Description
stringlengths
18
161k
Code
stringlengths
15
300k
disable matplotlib plotting in test code try import matplotlib pyplot as plt mocker patch objectplt gca mocker patch objectplt show except importerror pass pytest fixturescopemodule autousetrue def teardownloadedcorpora yield first wait for the test to end import nltk corpus for name in dirnltk corpus obj getattrnltk c...
import pytest from nltk.corpus.reader import CorpusReader @pytest.fixture(autouse=True) def mock_plot(mocker): try: import matplotlib.pyplot as plt mocker.patch.object(plt, "gca") mocker.patch.object(plt, "show") except ImportError: pass @pytest.fixture(scope="module"...
probability doctest uses hmm which requires numpy skip probability doctest if numpy is not available probability doctest uses hmm which requires numpy skip probability doctest if numpy is not available
def setup_module(): import pytest pytest.importorskip("numpy")
skip a test via pytest skip if the binary executable is not found keyword arguments are passed to nltk internals findbinary import pytest try findbinarybinary args except lookuperror pytest skipfskipping test because the binary binary was not found def checkjarnamepattern str args import pytest pytest skip skipping tes...
from nltk.internals import find_binary, find_jar def check_binary(binary: str, **args): import pytest try: find_binary(binary, **args) except LookupError: pytest.skip(f"Skipping test because the {binary} binary was not found.") def check_jar(name_pattern: str, **args): imp...
natural language toolkit language model unit tests c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt tests for ngramcounter that only involve lookup no modification classmethod def setupclassself text listabcd listegdbe self trigramcounter ngramc...
import unittest import pytest from nltk import FreqDist from nltk.lm import NgramCounter from nltk.util import everygrams class TestNgramCounter: @classmethod def setup_class(self): text = [list("abcd"), list("egdbe")] self.trigram_counter = NgramCounter( everygrams(sent, m...
natural language toolkit language model unit tests c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt unseen ngrams should yield 0 unigrams should also be 0 n unigrams 14 count a 2 count y 3 ngrams seen during training ngram log score s a 1 a b 1 ...
import math from operator import itemgetter import pytest from nltk.lm import ( MLE, AbsoluteDiscountingInterpolated, KneserNeyInterpolated, Laplace, Lidstone, StupidBackoff, Vocabulary, WittenBellInterpolated, ) from nltk.lm.preprocessing import padded_everygrams @pytest.fixture(sco...
natural language toolkit language model unit tests c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt natural language toolkit language model unit tests c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com url https www nltk org for lice...
import unittest from nltk.lm.preprocessing import padded_everygram_pipeline class TestPreprocessing(unittest.TestCase): def test_padded_everygram_pipeline(self): expected_train = [ [ ("<s>",), ("<s>", "a"), ("a",), ("a", "b"), ...
natural language toolkit language model unit tests c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt tests vocabulary class classmethod def setupclasscls cls vocab vocabulary z a b c f d e g a d b e w unkcutoff2 def testtruthinessself self assert...
import unittest from collections import Counter from timeit import timeit from nltk.lm import Vocabulary class NgramModelVocabularyTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.vocab = Vocabulary( ["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"], ...
test aline algorithm for aligning phonetic sequences test aline for computing the difference between two segments test aline for computing the difference between two segments
from nltk.metrics import aline def test_aline(): result = aline.align("θin", "tenwis") expected = [[("θ", "t"), ("i", "e"), ("n", "n")]] assert result == expected result = aline.align("jo", "ʒə") expected = [[("j", "ʒ"), ("o", "ə")]] assert result == expected result = aline.align("pema...
tests for brill tagger example from https github comnltknltkissues769 example from https github com nltk nltk issues 769
import unittest from nltk.corpus import treebank from nltk.tag import UnigramTagger, brill, brill_trainer from nltk.tbl import demo class TestBrill(unittest.TestCase): def test_pos_template(self): train_sents = treebank.tagged_sents()[:1000] tagger = UnigramTagger(train_sents) trainer = b...
make sure that we can still mutate cfd normally create cfd with word length as condition incrementing previously unseen key is still possible nonexistent keys shouldn t be added nonexistent keys shouldn t be added make sure that we can still mutate cfd normally create cfd with word length as condition incrementing prev...
import unittest import pytest from nltk import ConditionalFreqDist, tokenize class TestEmptyCondFreq(unittest.TestCase): def test_tabulate(self): empty = ConditionalFreqDist() self.assertEqual(empty.conditions(), []) with pytest.raises(ValueError): empty.tabulate(conditions="...
s np vp pp p np np det n np pp p vp v np vp pp vp det det a the n dog cat v chased sat p on in s np vp np vp n p vp p n dog cat p on in s np vp pp p np np det n np pp p vp v np vp pp vp det det a the n dog cat v chased sat p on in s np vp np vp n p vp p n dog cat p on in
import unittest import nltk from nltk.grammar import CFG class ChomskyNormalFormForCFGTest(unittest.TestCase): def test_simple(self): grammar = CFG.fromstring( ) self.assertFalse(grammar.is_flexible_chomsky_normal_form()) self.assertFalse(grammar.is_chomsky_normal_for...
unit tests for nltk classify see also nltktestclassify doctest unseen unseen seen 3 times labels y y x seen 1 time label x
import pytest from nltk import classify TRAIN = [ (dict(a=1, b=1, c=1), "y"), (dict(a=1, b=1, c=1), "x"), (dict(a=1, b=1, c=0), "y"), (dict(a=0, b=1, c=1), "x"), (dict(a=0, b=1, c=1), "y"), (dict(a=0, b=0, c=1), "y"), (dict(a=0, b=1, c=0), "x"), (dict(a=0, b=0, c=0), "x"), (dict(a=...
test bigram counters with discontinuous bigrams and repeated words verify that two sequences of ngram association values are within epsilon of each other test bigram counters with discontinuous bigrams and repeated words verify that two sequences of n gram association values are within _epsilon of each other
from nltk.collocations import BigramCollocationFinder from nltk.metrics import BigramAssocMeasures _EPSILON = 1e-8 SENT = "this this is is a a test test".split() def close_enough(x, y): return all(abs(x1[1] - y1[1]) <= _EPSILON for x1, y1 in zip(x, y)) def test_bigram2(): b = BigramCollocationFinde...
mock test for stanford corenlp wrappers
from unittest import TestCase from unittest.mock import MagicMock import pytest from nltk.parse import corenlp from nltk.tree import Tree def setup_module(module): global server try: server = corenlp.CoreNLPServer(port=9000) except LookupError: pytest.skip("Could not instantiate CoreNLP...
corpus view regression tests check that corpus views produce the correct sequence of values check that the corpus views report the correct lengths a very short file 160 chars a relatively short file 791 chars a longer file 32k chars check that corpus views produce the correct sequence of values check that the corpus vi...
import unittest import nltk.data from nltk.corpus.reader.util import ( StreamBackedCorpusView, read_line_block, read_whitespace_block, ) class TestCorpusViews(unittest.TestCase): linetok = nltk.LineTokenizer(blanklines="keep") names = [ "corpora/inaugural/README", "corpora/inau...
class containing unit tests for nltk metrics agreement disagreement simple test based on https github comfoolswoodkrippendorffsalpharawmasterkrippendorff pdf same simple test with 1 rating removed removal of that rating should not matter kapha ignores items with only 1 rating more advanced test based on http www agrees...
import unittest from nltk.metrics.agreement import AnnotationTask class TestDisagreement(unittest.TestCase): def test_easy(self): data = [ ("coder1", "dress1", "YES"), ("coder2", "dress1", "NO"), ("coder3", "dress1", "NO"), ("coder1", "dress...
allowing transpositions reduces the number of edits required with transpositions e g abc t cba d ca 2 steps without transpositions e g abc d ab d a i ca 3 steps note a substitioncost of higher than 2 doesn t make much sense as a deletion insertion is identical and always costs 2 transpositions don t always reduce the n...
from typing import Tuple import pytest from nltk.metrics.distance import edit_distance class TestEditDistance: @pytest.mark.parametrize( "left,right,substitution_cost,expecteds", [ ("abc", "ca", 1, (2, 3)),...
test that download works properly when the parent folder of the downloaddir exists downloaddir strtmppath joinpathanotherdir downloadstatus downloadmwappdb downloaddir assert downloadstatus is true def testdownloaderusingnonexistingparentdownloaddirtmppath test that download works properly when the parent folder of the...
from nltk import download def test_downloader_using_existing_parent_download_dir(tmp_path): download_dir = str(tmp_path.joinpath("another_dir")) download_status = download("mwa_ppdb", download_dir) assert download_status is True def test_downloader_using_non_existing_parent_download_dir(tmp_path):...
example from wikipedia https en wikipedia orgwikiforwarde28093backwardalgorithm example from p 385 huang et al examples in wikipedia are normalized examples in wikipedia are normalized forwardbackward algorithm doesn t need b05 so backwardprobability doesn t compute it 0 6469 0 3531 example from wikipedia https en wiki...
import pytest from nltk.tag import hmm def _wikipedia_example_hmm(): states = ["rain", "no rain"] symbols = ["umbrella", "no umbrella"] A = [[0.7, 0.3], [0.3, 0.7]] B = [[0.9, 0.1], [0.2, 0.8]] pi = [0.5, 0.5] seq = ["umbrella", "umbrella", "no umbrella", "umbrella", "umbre...
natural language toolkit twitter client c 20012023 nltk project lorenzo rubio lrnzciggmail com url https www nltk org for license information see license txt regression tests for json2csv and json2csventities in twitter package compare two files ignoring carriage returns leading whitespace and trailing whitespace sanit...
from pathlib import Path import pytest from nltk.corpus import twitter_samples from nltk.twitter.common import json2csv, json2csv_entities def files_are_identical(pathA, pathB): f1 = [l.strip() for l in pathA.read_bytes().splitlines()] f2 = [l.strip() for l in pathB.read_bytes().splitlines()] retur...
test the likelihood ratio metric test the likelihood ratio metric
import unittest from nltk.metrics import ( BigramAssocMeasures, QuadgramAssocMeasures, TrigramAssocMeasures, ) _DELTA = 1e-8 class TestLikelihoodRatio(unittest.TestCase): def test_lr_bigram(self): self.assertAlmostEqual( BigramAssocMeasures.likelihood_ratio(2, (4, 4), 20), ...
unit tests for nltk corpus nombank load the nombank once no of instances no of rolesets no of nouns load the nombank once no of instances no of rolesets no of nouns
import unittest from nltk.corpus import nombank nombank.nouns() class NombankDemo(unittest.TestCase): def test_numbers(self): self.assertEqual(len(nombank.instances()), 114574) self.assertEqual(len(nombank.rolesets()), 5577) self.assertEqual(len(nombank.nouns(...
tests for nltk postag test for default kwarg langnone tries to force the lang eng option test for default kwarg lang none tries to force the lang eng option
import unittest from nltk import pos_tag, word_tokenize class TestPosTag(unittest.TestCase): def test_pos_tag_eng(self): text = "John's big idea isn't all that bad." expected_tagged = [ ("John", "NNP"), ("'s", "POS"), ("big", "JJ"), ("idea", "NN"), ...
verifies that these two sentences have no alignment and hence have the lowest possible ribes score verifies that these two sentences have just one match and the ribes score for this sentence with very little correspondence is 0 verifies that these two sentences have two matches but still get the lowest possible ribes s...
from nltk.translate.ribes_score import corpus_ribes, word_rank_alignment def test_ribes_empty_worder(): hyp = "This is a nice sentence which I quite like".split() ref = "Okay well that's neat and all but the reference's different".split() assert word_rank_alignment(ref, hyp) == [] list_o...
unit tests for senna set senna executable path for tests if it is not specified as an environment variable unittest for nltk classify senna def testsennapipelineself unittest for nltk tag senna def testsennataggerself tagger sennataggersennaexecutablepath result tagger tagwhat is the airspeed of an unladen swallow spli...
import unittest from os import environ, path, sep from nltk.classify import Senna from nltk.tag import SennaChunkTagger, SennaNERTagger, SennaTagger if "SENNA" in environ: SENNA_EXECUTABLE_PATH = path.normpath(environ["SENNA"]) + sep else: SENNA_EXECUTABLE_PATH = "/usr/share/senna-v3.0" senna_is_installed =...
this unit testing for test the snowball arabic light stemmer this stemmer deals with prefixes and suffixes test where the ignorestopwordstrue test where the ignorestopwordsfalse test where create the arabic stemmer without given init value to ignorestopwords the word algue was raising an indexerror tests all words from...
import unittest from contextlib import closing from nltk import data from nltk.stem.porter import PorterStemmer from nltk.stem.snowball import SnowballStemmer class SnowballTest(unittest.TestCase): def test_arabic(self): ar_stemmer = SnowballStemmer("arabic", True) assert ar_ste...
tests for static parts of twitter package tests that twitter credentials from a file are handled correctly test that environment variable has been read correctly each of the following scenarios should raise an error an empty subdir path a subdir path of none a nonexistent directory credentials txt is not in default sub...
import os import pytest pytest.importorskip("twython") from nltk.twitter import Authenticate @pytest.fixture def auth(): return Authenticate() class TestCredentials: @classmethod def setup_class(self): self.subdir = os.path.join(os.path.dirname(__file__), "files") os.environ["TW...
form test data for tests return itera b c def testeverygramswithoutpaddingeverygraminput expectedoutput a a b a b c b b c c output listeverygramseverygraminput assert output expectedoutput def testeverygramsmaxleneverygraminput expectedoutput a a b b b c c output listeverygramseverygraminput maxlen2 assert output expec...
import pytest from nltk.util import everygrams @pytest.fixture def everygram_input(): return iter(["a", "b", "c"]) def test_everygrams_without_padding(everygram_input): expected_output = [ ("a",), ("a", "b"), ("a", "b", "c"), ("b",), ("b", "c"), ("c",), ...
unit tests for nltk corpus wordnet see also nltktestwordnet doctest not every synset as hypernyms test hyperhyponyms test root hyperhyponyms test derivationallyrelatedforms test meronyms holonyms test antonyms test misc relations test pertainyms test lch test domains test in domains path based similarities information ...
import unittest from nltk.corpus import wordnet as wn from nltk.corpus import wordnet_ic as wnic wn.ensure_loaded() S = wn.synset L = wn.lemma class WordnNetDemo(unittest.TestCase): def test_retrieve_synset(self): move_synset = S("go.v.21") self.assertEqual(move_synset.name(), "move.v.15") ...
tests for bleu translation evaluation metric examples from the original bleu paper https www aclweb organthologyp021040 pdf example 1 the the example reference sentences hypothesis sentences testing modified unigram precision with assertalmostequal at 4 place precision testing modified bigram precision example 2 the of...
import io import unittest import numpy as np from nltk.data import find from nltk.translate.bleu_score import ( SmoothingFunction, brevity_penalty, closest_ref_length, corpus_bleu, modified_precision, sentence_bleu, ) class TestBLEU(unittest.TestCase): def test_modified_precision(self): ...
tests gdfa alignments testing gdfa with first 10 eflomal outputs from issue 1829 https github comnltknltkissues1829 input expected output iterate through all 10 examples and check for expected outputs testing gdfa with first 10 eflomal outputs from issue 1829 https github com nltk nltk issues 1829 input expected output...
import unittest from nltk.translate.gdfa import grow_diag_final_and class TestGDFA(unittest.TestCase): def test_from_eflomal_outputs(self): forwards = [ "0-0 1-2", "0-0 1-1", "0-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 7-8 9-9 10-10 9-11 11-12 12-13 13-14", ...
tests for ibm model 1 training methods arrange act assert expectedprob 1 0 target vocab size 1 arrange act assert examine target words that are not in the training data domain arrange act assert arrange act assert expected_prob 1 0 target vocab size 1 arrange act assert examine target words that are not in the training...
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel1 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel1(unittest.TestCase): def test_set_uniform_translation_probabilities(self): corpus = [ AlignedSent(["ha...
tests for ibm model 2 training methods arrange act assert expectedprob 1 0 length of source sentence 1 arrange act assert examine i and j values that are not in the training data domain arrange act assert arrange act assert expected_prob 1 0 length of source sentence 1 arrange act assert examine i and j values that are...
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel2 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel2(unittest.TestCase): def test_set_uniform_alignment_probabilities(self): corpus = [ AlignedSent(["ham"...
tests for ibm model 3 training methods arrange act assert expectedprob 1 0 length of target sentence arrange act assert examine i and j values that are not in the training data domain arrange act assert arrange act assert expected_prob 1 0 length of target sentence arrange act assert examine i and j values that are not...
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel3 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel3(unittest.TestCase): def test_set_uniform_distortion_probabilities(self): corpus = [ AlignedSent(["ham...
tests for ibm model 4 training methods arrange act assert number of displacement values 2 number of words in longest target sentence 1 examine the boundary values for displacement srcclass trgclass arrange act assert examine displacement values that are not in the training data domain arrange act assert arrange act ass...
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel4 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel4(unittest.TestCase): def test_set_uniform_distortion_probabilities_of_max_displacements(self): src_classes = {"sch...
tests for ibm model 5 training methods arrange act assert number of vacancy difference values 2 number of words in longest target sentence examine the boundary values for dv maxv trgclass arrange act assert examine dv and maxv values that are not in the training data domain arrange act assert arrange mock static method...
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel4, IBMModel5 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel5(unittest.TestCase): def test_set_uniform_vacancy_probabilities_of_max_displacements(self): src_classes...
tests for common methods of ibm translation models arrange none and bien have zero fertility act assert arrange act force love to be pegged to jambon assert arrange bien produces 2 target words really and another really act assert arrange act assert arrange act assert arrange act assert moves swaps original alignment a...
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel(unittest.TestCase): __TEST_SRC_SENTENCE = ["j'", "aime", "bien", "jambon"] __TEST_TRG_SENTENCE = ["i", "love", "ham"] def test_vocabu...
tests for nist translation evaluation metric reads the nist scores from the mteval13a output file the order of the list corresponds to the order of the ngrams the numbers are located in the last 4th line of the file the first and 2nd item in the list are the score and system names whitespace tokenize the file note spli...
import io import unittest from nltk.data import find from nltk.translate.nist_score import corpus_nist class TestNIST(unittest.TestCase): def test_sentence_nist(self): ref_file = find("models/wmt15_eval/ref.ru") hyp_file = find("models/wmt15_eval/google.ru") mteval_output_file = find("mod...
natural language toolkit stack decoder c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt tests for stack decoder arrange act assert arrange act assert arrange act assert expansion from empty hypothesis always has zero distortion cost arrange act assert ...
import unittest from collections import defaultdict from math import log from nltk.translate import PhraseTable, StackDecoder from nltk.translate.stack_decoder import _Hypothesis, _Stack class TestStackDecoder(unittest.TestCase): def test_find_all_src_phrases(self): phrase_table = TestStackDecod...
natural language toolkit texts c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com url https www nltk org for license information see license txt this module brings together a variety of nltk functionality for text analysis and provides simple interactive interfaces functionality incl...
import re import sys from collections import Counter, defaultdict, namedtuple from functools import reduce from math import log from nltk.collocations import BigramCollocationFinder from nltk.lm import MLE from nltk.lm.preprocessing import padded_everygram_pipeline from nltk.metrics import BigramAssocMeasures, f_measu...
natural language toolkit tokenizers c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com minor additions contributors matthewmc clouds56 url https www nltk org for license information see license txt import re from nltk data import load from nltk tokenize casual import tweettokenizer c...
r import re from nltk.data import load from nltk.tokenize.casual import TweetTokenizer, casual_tokenize from nltk.tokenize.destructive import NLTKWordTokenizer from nltk.tokenize.legality_principle import LegalitySyllableTokenizer from nltk.tokenize.mwe import MWETokenizer from nltk.tokenize.punkt import PunktSentenc...
natural language toolkit tokenizer interface c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt tokenizer interface a processing interface for tokenizing a string subclasses must define tokenize or tokenizesents or both r...
from abc import ABC, abstractmethod from typing import Iterator, List, Tuple from nltk.internals import overridden from nltk.tokenize.util import string_span_tokenize class TokenizerI(ABC): @abstractmethod def tokenize(self, s: str) -> List[str]: if overridden(self.tokenize_sents): ...
natural language toolkit tokenizers c 20012023 nltk project christopher hench chris l henchgmail com alex estes url https www nltk org for license information see license txt the legality principle is a language agnostic principle maintaining that syllable onsets and codas the beginning and ends of syllables not includ...
from collections import Counter from nltk.tokenize.api import TokenizerI class LegalitySyllableTokenizer(TokenizerI): def __init__( self, tokenized_source_text, vowels="aeiouy", legal_frequency_threshold=0.001 ): self.legal_frequency_threshold = legal_frequency_threshold ...
multiword expression tokenizer c 20012023 nltk project rob malouf rmaloufmail sdsu edu url https www nltk org for license information see license txt multiword expression tokenizer a mwetokenizer takes a string which has already been divided into tokens and retokenizes it merging multiword expressions into single token...
from nltk.tokenize.api import TokenizerI from nltk.util import Trie class MWETokenizer(TokenizerI): def __init__(self, mwes=None, separator="_"): if not mwes: mwes = [] self._mwes = Trie(mwes) self._separator = separator def add_mwe(self, mwe): ...
natural language toolkit python port of the mtevalv14 pl tokenizer c 20012015 nltk project liling tan ported from ftp jaguar ncsl nist govmtresourcesmtevalv14 pl contributors ozan caglayan wiktor stribizew url https www nltk org for license information see license txt this is a nltk port of the tokenizer used in the ni...
import io import re from nltk.corpus import perluniprops from nltk.tokenize.api import TokenizerI from nltk.tokenize.util import xml_unescape class NISTTokenizer(TokenizerI): STRIP_SKIP = re.compile("<skipped>"), "" STRIP_EOL_HYPHEN = re.compile("\u2028"), " " PUNCT = re.compile(r"([...
natural language toolkit tokenizers c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com trevor cohn tacohncsse unimelb edu au url https www nltk org for license information see license txt import re from nltk tokenize api import tokenizeri from nltk tokenize util import regexpspantoke...
r import re from nltk.tokenize.api import TokenizerI from nltk.tokenize.util import regexp_span_tokenize class RegexpTokenizer(TokenizerI): r def __init__( self, pattern, gaps=False, discard_empty=True, flags=re.UNICODE | re.MULTILINE | re.DOTALL, ): ...
natural language toolkit interface to the repp tokenizer c 20012015 nltk project s rebecca dridan and stephan oepen contributors liling tan url https www nltk org for license information see license txt a class for word tokenization using the repp parser described in rebecca dridan and stephan oepen 2012 tokenization r...
import os import re import subprocess import sys import tempfile from nltk.data import ZipFilePathPointer from nltk.internals import find_dir from nltk.tokenize.api import TokenizerI class ReppTokenizer(TokenizerI): def __init__(self, repp_dir, encoding="utf8"): self.repp_dir = self.find_repptokeni...
natural language toolkit tokenizers c 20012023 nltk project yoav goldberg yoavgcs bgu ac il steven bird stevenbird1gmail com minor edits url https www nltk org for license information see license txt sexpression tokenizer sexprtokenizer is used to find parenthesized expressions in a string in particular it divides a st...
import re from nltk.tokenize.api import TokenizerI class SExprTokenizer(TokenizerI): def __init__(self, parens="()", strict=True): if len(parens) != 2: raise ValueError("parens must contain exactly two strings") self._strict = strict self._open_paren = parens[0] ...
natural language toolkit simple tokenizers c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt from nltk tokenize api import stringtokenizer tokenizeri from nltk tokenize util import regexpspantokenize stringspantokenize c...
r from nltk.tokenize.api import StringTokenizer, TokenizerI from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize class SpaceTokenizer(StringTokenizer): r _string = " " class TabTokenizer(StringTokenizer): r _string = "\t" class CharTokenizer(StringTokenizer): _str...
natural language toolkit tokenizers c 20012023 nltk project christopher hench chris l henchgmail com alex estes url https www nltk org for license information see license txt the sonority sequencing principle ssp is a language agnostic algorithm proposed by otto jesperson in 1904 the sonorous quality of a phoneme is ju...
import re import warnings from string import punctuation from nltk.tokenize.api import TokenizerI from nltk.util import ngrams class SyllableTokenizer(TokenizerI): def __init__(self, lang="en", sonority_hierarchy=False): if not sonority_hierarchy and lan...
natural language toolkit interface to the stanford tokenizer c 20012023 nltk project steven xu xxustudent unimelb edu au url https www nltk org for license information see license txt jar stanfordpostagger jar def init self pathtojarnone encodingutf8 optionsnone verbosefalse javaoptionsmx1000m raise deprecation warning...
import json import os import tempfile import warnings from subprocess import PIPE from nltk.internals import _java_options, config_java, find_jar, java from nltk.parse.corenlp import CoreNLPParser from nltk.tokenize.api import TokenizerI _stanford_url = "https://nlp.stanford.edu/software/tokenizer.shtml" class Stan...
usrbinenv python natural language toolkit interface to the stanford segmenter for chinese and arabic c 20012023 nltk project 52nlp 52nlpcngmail com casper lehmannstrm casperlehmanngmail com alex constantin alexkeyworder ch url https www nltk org for license information see license txt interface to the stanford segmente...
import json import os import tempfile import warnings from subprocess import PIPE from nltk.internals import ( _java_options, config_java, find_dir, find_file, find_jar, java, ) from nltk.tokenize.api import TokenizerI _stanford_url = "https://nlp.stanford.edu/software" class StanfordSegment...
natural language toolkit texttiling c 20012023 nltk project george boutsioukis url https www nltk org for license information see license txt tokenize a document into topical sections using the texttiling algorithm this algorithm detects subtopic shifts based on the analysis of lexical cooccurrence patterns the process...
import math import re try: import numpy except ImportError: pass from nltk.tokenize.api import TokenizerI BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1 LC, HC = 0, 1 DEFAULT_SMOOTHING = [0] class TextTilingTokenizer(TokenizerI): def __init__( self, w=20, k=10, s...
natural language toolkit toolbox reader c 20012023 nltk project greg aumann gregaumannsil org url https www nltk org for license information see license txt module for reading writing and manipulating toolbox databases and settings files class for reading and processing standard format marker files and strings open a s...
import codecs import re from io import StringIO from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder from nltk.data import PathPointer, find class StandardFormat: def __init__(self, filename=None, encoding=None): self._encoding = encoding if filename is not None: ...
natural language toolkit machine translation c 20012023 nltk project steven bird stevenbird1gmail com tah wei hoon hoon twgmail com url https www nltk org for license information see license txt experimental features for machine translation these interfaces are prone to change isort skipfile natural language toolkit ma...
from nltk.translate.api import AlignedSent, Alignment, PhraseTable from nltk.translate.ibm_model import IBMModel from nltk.translate.ibm1 import IBMModel1 from nltk.translate.ibm2 import IBMModel2 from nltk.translate.ibm3 import IBMModel3 from nltk.translate.ibm4 import IBMModel4 from nltk.translate.ibm5 import IBMMode...
natural language toolkit api for alignment and translation objects c 20012023 nltk project will zhang wilzzhagmail com guan gui gguistudent unimelb edu au steven bird stevenbird1gmail com tah wei hoon hoon twgmail com url https www nltk org for license information see license txt return an aligned sentence object which...
import subprocess from collections import namedtuple class AlignedSent: def __init__(self, words, mots, alignment=None): self._words = words self._mots = mots if alignment is None: self.alignment = Alignment([]) else: assert type(alignment) is Alignmen...
natural language toolkit bleu score c 20012023 nltk project s chin yee lee hengfeng li ruxin hou calvin tanujaya lim contributors bjrn mattsson dmitrijs milajevs liling tan url https www nltk org for license information see license txt bleu score implementation import math import sys import warnings from collections im...
import math import sys import warnings from collections import Counter from fractions import Fraction from nltk.util import ngrams def sentence_bleu( references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False, ): return corpus_bleu( [ref...
natural language toolkit chrf score c 20012023 nltk project s maja popovic contributors liling tan ale tamchyna memsource url https www nltk org for license information see license txt chrf score implementation import re from collections import counter defaultdict from nltk util import ngrams def sentencechrf reference...
import re from collections import Counter, defaultdict from nltk.util import ngrams def sentence_chrf( reference, hypothesis, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True ): return corpus_chrf( [reference], [hypothesis], min_len, max_len, beta=beta, ...
natural language toolkit galechurch aligner c 20012023 nltk project torsten marek marekifi uzh ch contributor cassidy laidlaw liling tan url https www nltk org for license information see license txt a port of the galechurch aligner gale church 1993 a program for aligning sentences in bilingual corpora https aclweb org...
import math try: from norm import logsf as norm_logsf from scipy.stats import norm except ImportError: def erfcc(x): z = abs(x) t = 1 / (1 + 0.5 * z) r = t * math.exp( -z * z - 1.26551223 + t * ( 1.00002368 ...
natural language toolkit gdfa word alignment symmetrization c 20012023 nltk project s liling tan url https www nltk org for license information see license txt this module symmetrisatizes the sourcetotarget and targettosource word alignment output and produces aka gdfa algorithm koehn 2005 step 1 find the intersection ...
from collections import defaultdict def grow_diag_final_and(srclen, trglen, e2f, f2e): e2f = [tuple(map(int, a.split("-"))) for a in e2f.split()] f2e = [tuple(map(int, a.split("-"))) for a in f2e.split()] neighbors = [(-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)] al...
natural language toolkit gleu score c 20012023 nltk project s contributors mike schuster michael wayne goodman liling tan url https www nltk org for license information see license txt gleu score implementation from collections import counter from nltk util import everygrams ngrams def sentencegleureferences hypothesis...
from collections import Counter from nltk.util import everygrams, ngrams def sentence_gleu(references, hypothesis, min_len=1, max_len=4): return corpus_gleu([references], [hypothesis], min_len=min_len, max_len=max_len) def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4): as...
natural language toolkit ibm model 1 c 20012013 nltk project chin yee lee c lee32student unimelb edu au hengfeng li hengfeng12345gmail com ruxin hou r houstudent unimelb edu au calvin tanujaya lim c tanujayalimgmail com based on earlier version by will zhang wilzzhagmail com guan gui gguistudent unimelb edu au url http...
import warnings from collections import defaultdict from nltk.translate import AlignedSent, Alignment, IBMModel from nltk.translate.ibm_model import Counts class IBMModel1(IBMModel): def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): super().__init__(sentence...
natural language toolkit ibm model 2 c 20012013 nltk project s chin yee lee hengfeng li ruxin hou calvin tanujaya lim url https www nltk org for license information see license txt lexical translation model that considers word order ibm model 2 improves on model 1 by accounting for word order an alignment probability i...
import warnings from collections import defaultdict from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel1 from nltk.translate.ibm_model import Counts class IBMModel2(IBMModel): def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): super().__init...
natural language toolkit ibm model 3 c 20012013 nltk project s chin yee lee hengfeng li ruxin hou calvin tanujaya lim url https www nltk org for license information see license txt translation model that considers how a word can be aligned to multiple words in another language ibm model 3 improves on model 2 by directl...
import warnings from collections import defaultdict from math import factorial from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel2 from nltk.translate.ibm_model import Counts class IBMModel3(IBMModel): def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): ...
natural language toolkit ibm model 4 c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt translation model that reorders output words based on their type and distance from other related words in the output sentence ibm model 4 improves the distortion mode...
import warnings from collections import defaultdict from math import factorial from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel3 from nltk.translate.ibm_model import Counts, longest_target_sentence_length class IBMModel4(IBMModel): def __init__( self, sentence_aligned_c...
natural language toolkit ibm model 5 c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt translation model that keeps track of vacant positions in the target sentence to decide where to place translated words translation can be viewed as a process where e...
import warnings from collections import defaultdict from math import factorial from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel4 from nltk.translate.ibm_model import Counts, longest_target_sentence_length class IBMModel5(IBMModel): MIN_SCORE_FACTOR = 0.2 def __init__( ...
natural language toolkit ibm model core c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt common methods and classes for all ibm models see ibmmodel1 ibmmodel2 ibmmodel3 ibmmodel4 and ibmmodel5 for specific implementations the ibm models are a series of...
from bisect import insort_left from collections import defaultdict from copy import deepcopy from math import ceil def longest_target_sentence_length(sentence_aligned_corpus): max_m = 0 for aligned_sentence in sentence_aligned_corpus: m = len(aligned_sentence.words) max_m = max(m, max_m) ...
natural language toolkit machine translation c 20012023 nltk project uday krishna udaykrishna5gmail com contributor tom aarsen url https www nltk org for license information see license txt takes in pretokenized inputs for hypothesis and reference and returns enumerated word lists for each of them param hypothesis pret...
from itertools import chain, product from typing import Callable, Iterable, List, Tuple from nltk.corpus import WordNetCorpusReader, wordnet from nltk.stem.api import StemmerI from nltk.stem.porter import PorterStemmer def _generate_enums( hypothesis: Iterable[str], reference: Iterable[str], preprocess: ...
natural language toolkit translation metrics c 20012023 nltk project will zhang wilzzhagmail com guan gui gguistudent unimelb edu au steven bird stevenbird1gmail com url https www nltk org for license information see license txt return the alignment error rate aer of an alignment with respect to a gold standard referen...
def alignment_error_rate(reference, hypothesis, possible=None): if possible is None: possible = reference else: assert reference.issubset(possible) return 1.0 - (len(hypothesis & reference) + len(hypothesis & possible)) / float( len(hypothesis) + len(reference) )
natural language toolkit nist score c 20012023 nltk project s contributors url https www nltk org for license information see license txt nist score implementation import fractions import math from collections import counter from nltk util import ngrams def sentencenistreferences hypothesis n5 return corpusnistreferenc...
import fractions import math from collections import Counter from nltk.util import ngrams def sentence_nist(references, hypothesis, n=5): return corpus_nist([references], [hypothesis], n) def corpus_nist(list_of_references, hypotheses, n=5): assert len(list_of_references) == len( hyp...
natural language toolkit phrase extraction algorithm c 20012023 nltk project s liling tan fredrik hedman petra barancikova url https www nltk org for license information see license txt this function checks for alignment point consistency and extracts phrases using the chunk of consistent phrases a phrase pair e f is c...
def extract( f_start, f_end, e_start, e_end, alignment, f_aligned, srctext, trgtext, srclen, trglen, max_phrase_length, ): if f_end < 0: return {} for e, f in alignment: if (f_start <= f <= f_end) and (e < e_start or e > e_end): ...
natural language toolkit ribes score c 20012023 nltk project contributors katsuhito sudoh liling tan kasramvd j f sebastian mark byers ekhumoro p ortiz url https www nltk org for license information see license txt ribes score implementation import math from itertools import islice from nltk util import choose ngrams d...
import math from itertools import islice from nltk.util import choose, ngrams def sentence_ribes(references, hypothesis, alpha=0.25, beta=0.10): best_ribes = -1.0 for reference in references: worder = word_rank_alignment(reference, hypothesis) nkt = kendall_tau(worder) ...
natural language toolkit stack decoder c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt a decoder that uses stacks to implement phrasebased translation in phrasebased translation the source sentence is segmented into phrases of one or more words and tr...
import warnings from collections import defaultdict from math import log class StackDecoder: def __init__(self, phrase_table, language_model): self.phrase_table = phrase_table self.language_model = language_model self.word_penalty = 0.0 self.beam_threshold...
natural language toolkit machine translation c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com peter ljunglf peter ljunglofgu se tom aarsen url https www nltk org for license information see license txt nltk tree package this package may be used for representing hierarchical languag...
from nltk.tree.immutable import ( ImmutableMultiParentedTree, ImmutableParentedTree, ImmutableProbabilisticTree, ImmutableTree, ) from nltk.tree.parented import MultiParentedTree, ParentedTree from nltk.tree.parsing import bracket_parse, sinica_parse from nltk.tree.prettyprinter import TreePrettyPrinter...
natural language toolkit text trees c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com peter ljunglf peter ljunglofgu se tom aarsen url https www nltk org for license information see license txt precompute our hash value this ensures that we re really immutable it also means we only ...
from nltk.probability import ProbabilisticMixIn from nltk.tree.parented import MultiParentedTree, ParentedTree from nltk.tree.tree import Tree class ImmutableTree(Tree): def __init__(self, node, children=None): super().__init__(node, children) try: self._hash = hash((...
natural language toolkit text trees c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com peter ljunglf peter ljunglofgu se tom aarsen url https www nltk org for license information see license txt parsing use tree reads removeemptytopbracketingtrue instead parse a sinica treebank strin...
import re from nltk.tree.tree import Tree def bracket_parse(s): raise NameError("Use Tree.read(s, remove_empty_top_bracketing=True) instead.") def sinica_parse(s): tokens = re.split(r"([()| ])", s) for i in range(len(tokens)): if tokens[i] == "(": tokens[i - 1], tokens...
natural language toolkit ascii visualization of nltk trees c 20012023 nltk project andreas van cranenburgh a w vancranenburghuva nl peter ljunglf peter ljunglofgu se url https www nltk org for license information see license txt prettyprinting of discontinuous trees adapted from the discodop project by andreas van cran...
import re try: from html import escape except ImportError: from cgi import escape from collections import defaultdict from operator import itemgetter from nltk.tree.tree import Tree from nltk.util import OrderedDict ANSICOLOR = { "black": 30, "red": 31, "green": 32, "yellow": 33, "blue":...
natural language toolkit text trees c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com peter ljunglf peter ljunglofgu se tom aarsen url https www nltk org for license information see license txt probabilistic trees we have to patch up these methods to make them work right natural lan...
from nltk.internals import raise_unorderable_types from nltk.probability import ProbabilisticMixIn from nltk.tree.immutable import ImmutableProbabilisticTree from nltk.tree.tree import Tree class ProbabilisticTree(Tree, ProbabilisticMixIn): def __init__(self, node, children=None, **prob_kwargs): Tree....
natural language toolkit tree transformations c 20052007 oregon graduate institute nathan bodenstab bodenstabcslu ogi edu url https www nltk org for license information see license txt from nltk internals import deprecated from nltk tree transforms import chomskynormalform as cnf from nltk tree transforms import collap...
r from nltk.internals import deprecated from nltk.tree.transforms import chomsky_normal_form as cnf from nltk.tree.transforms import collapse_unary as cu from nltk.tree.transforms import un_chomsky_normal_form as ucnf chomsky_normal_form = deprecated( "Import using `from nltk.tree import chomsky_normal_form` inst...
natural language toolkit twitter c 20012023 nltk project ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt nltk twitter package this package contains classes for retrieving tweet documents using the twitter api natural language toolkit twitter c 2001 2023 nltk project ewan klein...
try: import twython except ImportError: import warnings warnings.warn( "The twython library has not been installed. " "Some functionality from the twitter package will not be available." ) else: from nltk.twitter.util import Authenticate, credsfromfile from nltk.twitter.twitterc...
natural language toolkit twitter client c 20012023 nltk project ewan klein ewaninf ed ac uk lorenzo rubio lrnzciggmail com url https www nltk org for license information see license txt nltk twitter client this module offers methods for collecting and processing tweets most of the functionality depends on access to the...
import datetime import gzip import itertools import json import os import time import requests from twython import Twython, TwythonStreamer from twython.exceptions import TwythonError, TwythonRateLimitError from nltk.twitter.api import BasicTweetHandler, TweetHandlerI from nltk.twitter.util import credsfromfile, gues...
natural language toolkit twitter client c 20012023 nltk project ewan klein ewaninf ed ac uk lorenzo rubio lrnzciggmail com url https www nltk org for license information see license txt authentication utilities to accompany twitterclient convenience function for authentication methods for authenticating with twitter re...
import os import pprint from twython import Twython def credsfromfile(creds_file=None, subdir=None, verbose=False): return Authenticate().load_creds( creds_file=creds_file, subdir=subdir, verbose=verbose ) class Authenticate: def __init__(self): self.creds_file = "credentials...
natural language toolkit utility functions c 20012023 nltk project steven bird stevenbird1gmail com eric kafe kafe ericgmail com acyclic closures url https www nltk org for license information see license txt short usage message builtins sometimes don t support introspection idle return true if this function is run wit...
import inspect import locale import os import pydoc import re import textwrap import warnings from collections import defaultdict, deque from itertools import chain, combinations, islice, tee from pprint import pprint from urllib.request import ( HTTPPasswordMgrWithDefaultRealm, ProxyBasicAuthHandler, Proxy...
natural language toolkit word sense disambiguation algorithms s liling tan alvationsgmail com dmitrijs milajevs dimazestgmail com c 20012023 nltk project url https www nltk org for license information see license txt return a synset for an ambiguous word in a context param iter contextsentence the context sentence wher...
from nltk.corpus import wordnet def lesk(context_sentence, ambiguous_word, pos=None, synsets=None, lang="eng"): context = set(context_sentence) if synsets is None: synsets = wordnet.synsets(ambiguous_word, lang=lang) if pos: synsets = [ss for ss in synsets if str(ss.pos()) == pos] ...
usrbinenv python natural language toolkit deprecated function class finder c 20012023 nltk project edward loper edlopergmail com url https www nltk org for license information see license txt this commandline tool takes a list of python files or directories and searches them for calls to deprecated nltk functions or us...
import os import re import sys import textwrap import tokenize from doctest import DocTestParser, register_optionflag from cStringIO import StringIO import nltk.corpus from nltk import defaultdict STRING_PAT = ( r"\s*[ur]{0,2}(?:" r'|' '"[^"\n]+?"|' r"|" "'[^'\n]+?'" r")\s*" ) STRING_RE ...
usrbinenv python natural language toolkit substitute a pattern with a replacement in every file c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt nb should work on all platforms http www python orgdoc2 5 2libosfiledir ht...
import os import stat import sys def update(file, pattern, replacement): try: old_perm = os.stat(file)[0] if not os.access(file, os.W_OK): os.chmod(file, old_perm | stat.S_IWRITE) s = open(file, "rb").read().decode("utf-8") t = s.replace(pattern, rep...
nltk documentation build configuration file sphinxquickstart on wed nov 2 17 02 59 2011 this file is execfiled with the current directory set to its containing dir note that not all possible configuration values are present in this autogenerated file all configuration values have a default values that are commented out...
import os import sys sys.path.insert(0, os.path.abspath("..")) extensions = [ "sphinx.ext.autodoc", "sphinx.ext.coverage", "sphinx.ext.imgmath", "sphinx.ext.viewcode", "sphinxcontrib.apidoc", ] apidoc_module_dir = "../nltk" apidoc_output_dir = "api" apidoc_separate_modules = True apid...