query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Read stop words from input file (filename) and insert each word as a key into the stop words hash table.
def load_stop_table(self, filename): self.stop_table = HashTable(191) try: a = open(filename, "r") lines = a.readlines() a.close() except: raise FileNotFoundError() for n in range(len(lines)): self.stop_table.insert(lines[n][:-1], n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_stop_table(self, filename):\n self.stop_table = HashTable(191)\n with open(filename, 'r') as f:\n for word in f.readlines():\n self.stop_table.insert(word.replace('\\n',''),None)", "def load_stop_words(stop_word_file):\n stop_words = []\n for line in open(stop_word_file):\n if line.strip()[0:1] != \"#\":\n for word in line.split(): # in case more than one per line\n stop_words.append(word)\n return stop_words", "def load():\n for line in open(config.filepath, 'r'):\n line = line.strip()\n line_sorted = ''.join(sorted(line))\n\n if line_sorted not in Words.hashed:\n Words.hashed[line_sorted] = []\n\n # Store the real hashed as a list\n # We need line_sorted as the key for fast lookup later\n Words.hashed[line_sorted].append(line)\n\n # Also add the word to a standard list\n # We'll use this to quickly determine wordiness later\n Words.words.append(line)", "def _stopwords():\n global _stopword_set\n if _stopword_set:\n return _stopword_set\n f_name = \"stopword.list\"\n if os.path.isfile(f_name):\n res = set()\n with open(f_name) as f:\n for line in f:\n res.add(line.strip())\n _stopword_set = res\n return res\n else:\n error(\"stop words - not a file: %s\" % f_name)", "def read_text_file(self, filepath: str):\n with open(filepath) as fh:\n for line in fh:\n for word in re.split('\\W+', line):\n word = word.lower()\n if len(word):\n l = self.hash_map.lookup(word)\n self.hash_map.insert(word, l + 1 if l > 0 else 1)", "def read_stopwords(fileName='stopwords.txt', lower_case=True):\n stopwords = set()\n with open(fileName) as f:\n for w in f:\n w = w.strip()\n if w:\n if lower_case:\n w = w.lower()\n stopwords.add(w)\n return stopwords", "def make_word_dict():\n d = dict()\n for line in open('words.txt'):\n word = line.strip().lower()\n d[word] = None\n\n return d", "def load_wordlist(self, filename):\n reg1 = re.compile(\"^([1-6]{5})[ \\t]+(.*)$\")\n f = open(filename, 'r')\n \n if(self.generate):\n wordlist = []\n reg2 = re.compile(\"^(\\S*)$\")\n for line in f:\n m1 = reg1.match(line)\n m2 = reg2.match(line)\n \n if(m1):\n wordlist.append(m1.group(2))\n elif(m2):\n wordlist.append(m2.group(1))\n \n else:\n wordlist = {}\n for line in f:\n m = reg1.match(line)\n if(m):\n wordlist[int(m.group(1))] = m.group(2)\n \n if((not self.generate and len(wordlist) < 7776) or \n (self.generate and len(wordlist) < 2**13)):\n stderr.write(\"Word list is too short\\n\")\n exit(5)\n \n self.wordlist = wordlist", "def __init__(self):\n stopwords_file = open(self.filepath, \"r\")\n for line in stopwords_file.readlines():\n line2 = line.replace(\"\\n\", \"\") \n self.add(line2)", "def mkwrddct(inputfile):\n fin = open(inputfile)\n words = dict()\n for line in fin:\n w = line.strip()\n words[w] = w\n return words", "def rm_stopwords(file_path, word_dict):\n\n # read stop word dict and save in stop_dict\n stop_dict = {}\n with open(word_dict) as d:\n for word in d:\n stop_dict[word.strip(\"\\n\")] = 1\n # remove tmp file if exists\n if os.path.exists(file_path + \".tmp\"):\n os.remove(file_path + \".tmp\")\n\n print(\"now remove stop words in %s.\" % file_path)\n # read source file and rm stop word for each line.\n with open(file_path) as f1, open(file_path + \".tmp\", \"w\") as f2:\n for line in f1:\n tmp_list = [] # save words not in stop dict\n words = line.split()\n for word in words:\n if word not in stop_dict:\n tmp_list.append(word)\n words_without_stop = \" \".join(tmp_list)\n to_write = words_without_stop + \"\\n\"\n f2.write(to_write)\n\n # overwrite origin file with file been removed stop words\n shutil.move(file_path + \".tmp\", file_path)\n print(\"stop words in %s has been removed.\" % file_path)", "def load_stop_words() -> list:\r\n with open(f'{ENGINE}/stop_words.txt', 'r') as i:\r\n stop_words = i.read().splitlines()\r\n stop_words = list(map(lambda x: x.upper(), stop_words)) # Force all stop words to UPPER case.\r\n return stop_words", "def load_stop_words():\n with open('../data/stop_words.txt', 'r') as stop_words_file:\n return stop_words_file.read().split()", "def load_words(file_path: str) -> List[Word]:\n \n words = load_words_raw(file_path)\n \n \n words = remove_stop_words(words)\n\n \n words = remove_duplicates(words)\n \n return words", "def readFile(filename):\n listOfWords = []\n currentLine = 1\n f = open(filename, \"r\")\n for line in f:\n line = stripPunctuation(line)\n for word in line.split():\n word = word.lower()\n if len(word) > 1:\n if not word[0].isdigit():\n tempObj = contains(listOfWords, word)\n if tempObj != None:\n tempObj.incOccurrence(currentLine)\n else:\n temp = Word(word, currentLine)\n listOfWords.append(temp)\n currentLine = currentLine + 1\n return listOfWords", "def load_wordlist(filename):\n # YOUR CODE HERE\n words = {}\n f = open(filename, 'rU')\n text = f.read()\n text = text.split('\\n')\n for line in text:\n words[line] = 1\n f.close()\n return words", "def create_dictionary(filename):\n\tword_set = set()\n\tif os.path.isfile(filename):\n\t\twith open(filename, 'r') as f:\n\t\t\tfor line in iter(f):\n\t\t\t\tword_set.add(line.strip('\\n'))\n\telse:\n\t\tprint \"File not found!\"\n\treturn word_set", "def get_stop_words(stop_file_path):\n \n with open(stop_file_path, 'r', encoding=\"utf-8\") as f:\n stopwords = f.readlines()\n stop_set = set(m.strip() for m in stopwords)\n return frozenset(stop_set)", "def create_word_map(tokenized_descriptions_file_path, word_dictionary_output_path):\n if os.path.exists(word_dictionary_output_path):\n print(\"Word map already exists in workspace. Will be reused.\")\n return\n\n print(\"Word map not found. Generating....\")\n\n words_list = []\n words_to_id = {}\n\n with open(tokenized_descriptions_file_path, 'r') as file:\n for line in file:\n tokens = line.strip().split(\",\")\n words_list.extend(tokens[1:])\n\n # remove duplicate words\n words_list = list(set(words_list))\n\n # sorting the words\n words_list = sorted(words_list)\n for i in range(len(words_list)):\n words_to_id[words_list[i]] = i\n\n with open(word_dictionary_output_path, 'w') as f:\n [f.write('{0},{1}'.format(key, value) + \"\\n\") for key, value in words_to_id.items()]", "def read_dictionary():\n\tglobal dictionary\n\twith open(FILE, \"r\") as f:\n\t\tfor words in f:\n\t\t\tdictionary += words.split()", "def make_word_dict():\n d = dict()\n fin = open(\"words.txt\")\n for line in fin:\n word = line.strip().lower()\n d[word] = None\n #have to add single letter words to the word list;\n #also, the empty string is considered a word.\n for letter in ['a', 'i', '']:\n d[letter] = letter\n return d", "def read_dictionary():\n with open(FILE, 'r') as f:\n for line in f:\n words_lst = line.split()\n for word in words_lst:\n dict_list.append(word)", "def train(self, filename):\n with open(filename, 'r') as f:\n phrases_and_words = []\n\n for index, line in enumerate(f):\n # decoding, since input is not unicode\n cleaned_line = self.get_cleaned_line(line.decode('utf-8', 'ignore'))\n\n if cleaned_line:\n phrases_and_words.extend(self.get_phrase_and_words_from_line(cleaned_line))\n\n if index % 10000 == 0:\n self.db_storage.store_phrases_and_words(phrases_and_words)\n phrases_and_words = []\n\n self.db_storage.store_phrases_and_words(phrases_and_words)", "def word_dict():\n fin = open('words.txt')\n w_dict = {}\n for line in fin:\n word = line.strip()\n w_dict[word] = word\n return w_dict", "def load_dictionary(hash_table, filename):\n\n file = open(filename)\n lines = file.readlines()\n start = timeit.default_timer()\n for line in lines:\n hash_table.insert(line.rstrip(),1)\n if timeit.default_timer() - start > 4:\n break\n file.close()", "def create_index(path):\n words = {}\n\n for l in open(path):\n linewords = l.strip().split(\" \")\n student = linewords[0]\n linewords = linewords[1:]\n\n for word in linewords:\n if word in words:\n if int(student) not in words[word]:\n words[word].append(int(student))\n else:\n words[word] = [int(student)]\n\n return words", "def word_frequency_in_file(filename):\n words = {}\n fin = open(filename)\n punctuation = string.punctuation\n for line in fin:\n line = line.translate( # Replace punctuation with spaces\n str.maketrans(punctuation, ' ' * len(punctuation)))\n line = line.lower()\n line_words = line.split()\n for word in line_words: # Process each word in the line.\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n return words", "def load_file(self, file_path):\n f = open(file_path, \"r\")\n sentences = f.readlines()\n \n word_count = 0\n\n for sentence in sentence: \n for word in sentence.strip().split(\" \"):\n if not (word in self.word_id): #word not in dictionary\n word_id[word] = word_count\n word_count += 1\n\n #self.doc = [[self.word_id[word] for word in sentence.strip().split(\" \")] for sentence in sentences]", "def load_dictionary(filename):\n\n word_list = []\n freq_sum = 0\n\n # nacitanie zo suboru\n with open(filename) as f:\n for line in f:\n freq, val = line.split()\n word_list.append(Word(int(freq), val))\n freq_sum += int(freq)\n\n # lexikograficke usporiadanie slov\n word_list_sorted = sorted(word_list, key=operator.attrgetter('value'))\n\n return word_list_sorted, freq_sum", "def load_concordance_table(self, filename):\n self.concordance_table = HashTable(191)\n with open(filename, 'r') as f:\n for linenum,words in enumerate(f.readlines()):\n for i in words.translate(self.ttable).split():\n i = i.casefold()\n if not self.stop_table.in_table(i):\n self.concordance_table.insert(i,linenum + 1)", "def read_dictionary():\n global dic\n with open(FILE, 'r') as f:\n for line in f:\n word_list = line.split()\n word = word_list[0].strip()\n dic.append(word)", "def read_words(f, words):\n with open(f) as file:\n for line in file:\n w = tokenizer.tokenize(line.strip())\n for word in w:\n try:\n words[word] += 1\n except:\n words[word] = 1", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def load_input_word_list(file_path):\n if not os.path.isfile(file_path):\n return False\n\n word_list = list()\n\n with open(file_path, 'r') as fp:\n while True:\n line = fp.readline()\n if not line:\n break\n\n data = line.split(' ')\n text = data[0].lower().strip(Setting.NONWORD_CHARACTERS)\n\n if not text:\n continue\n\n text = text.replace('_', ' ')\n\n score = float(data[1])\n\n if score < 0:\n kind = WordKindEnum.NEG\n else:\n kind = WordKindEnum.POS\n\n word = Word(text, score, kind)\n word_list.append(word)\n\n return word_list", "def __init__(self, filename):\n\n self.term_dict = {}\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n\n #print line\n word, w_type = line.strip().split(\"\\t\")\n self.term_dict[word.strip().lower()] = \"CHESS_\" + w_type.strip().lower()", "def read_dictionary():\n with open(FILE, 'r') as f:\n for vocabulary in f:\n if vocabulary[0].strip() not in dict_txt:\n dict_txt[vocabulary[0].strip()] = [vocabulary.strip()]\n else:\n dict_txt[vocabulary[0].strip()].append(vocabulary.strip())", "def __init__(self, vocab_file, max_size):\n\t\tself._word_to_id = {}\n\t\tself._id_to_word = {}\n\t\tself._count = 0 # keeps track of total number of words in the Vocab\n\n\t\t# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.\n\t\tfor w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\tself._word_to_id[w] = self._count\n\t\t\tself._id_to_word[self._count] = w\n\t\t\tself._count += 1\n\n\t\t# Read the vocab file and add words up to max_size\n\t\twith open(vocab_file, 'r') as vocab_f:\n\t\t\tfor line in vocab_f:\n\t\t\t\tpieces = line.split()\n\t\t\t\tif len(pieces) != 2:\n\t\t\t\t\tprint ('Warning: incorrectly formatted line in vocabulary file: %s\\n' % line)\n\t\t\t\t\tcontinue\n\t\t\t\tw = pieces[0]\n\t\t\t\tif w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\t\t\traise Exception(\n\t\t\t\t\t\t'<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\\'t be in the vocab file, but %s is' % w)\n\t\t\t\tif w in self._word_to_id:\n\t\t\t\t\traise Exception('Duplicated word in vocabulary file: %s' % w)\n\t\t\t\tself._word_to_id[w] = self._count\n\t\t\t\tself._id_to_word[self._count] = w\n\t\t\t\tself._count += 1\n\t\t\t\tif max_size != 0 and self._count >= max_size:\n\t\t\t\t\tprint (\"max_size of vocab was specified as %i; we now have %i words. Stopping reading.\" % (\n\t\t\t\t\tmax_size, self._count))\n\t\t\t\t\tbreak\n\n\t\tprint (\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (\n\t\tself._count, self._id_to_word[self._count - 1]))", "def word_list():\n\n d = {}\n with open('words.txt') as fin:\n for line in fin.readlines():\n word = line.strip().lower()\n d[word] = True\n return d", "def make_stopwords(filepath='stopwords.txt'):\n sw = open(filepath, \"r\")\n my_stopwords = sw.read()\n my_stopwords = my_stopwords.split(\", \")\n sw.close()\n\n all_stopwords = stopwords.words('english')\n all_stopwords.extend(my_stopwords)\n return all_stopwords", "def _read_words(self, path):\r\n\r\n word_file = open(path)\r\n for line in word_file.readlines():\r\n pair = line.split('::')\r\n self.insert(pair[0], pair[1].rstrip())\r\n word_file.close()", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file, encoding='utf-8') as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def loadStopWordList(swFile):\n f = open(swFile, 'r')\n lines = f.readlines()\n f.close()\n result = list()\n for line in lines:\n sWord = line.strip('\\n')\n result.append(sWord)\n return result", "def import_words(file_name):\n with open(file_name) as word_list:\n words = []\n for line in word_list:\n number, word = line.strip().split(\"\\t\")\n words.append(word.strip())\n # print(f\"Imported {(len(word_dict))} words\")\n\n return words", "def __init__(self,dir_stopwords):\n \n arc = open(dir_stopwords, \"r\", encoding='utf-8')\n self.stp_wrds = [line.strip() for line in arc]\n arc.close()", "def init(wordlist_filename):\n global WORDS\n if WORDS == None:\n WORDS = []\n bad_line = lambda x: x.strip() == '' or x.startswith('#')\n with codecs.open(wordlist_filename, 'r', 'utf-8') as filehandle:\n lines = filehandle.readlines()\n WORDS = set([x.lower().strip() for x in lines if not bad_line(x)])", "def preprocess(in_file: str, file_out: str) -> tuple:\r\n try:\r\n stop_words = []\r\n if os.path.isfile(\"StopWords.pkl\"):\r\n with open(\"StopWords.pkl\", mode=\"rb\") as pkl:\r\n stop_words = pickle.load(pkl)\r\n else:\r\n with open(\"StopWords.txt\", mode=\"r\") as f1:\r\n for line in f1:\r\n stop_words.append(line.strip(\"\\n\"))\r\n with open(\"StopWords.pkl\", mode=\"wb\") as pkl:\r\n pickle.dump(stop_words, pkl)\r\n\r\n vocab = []\r\n corpus = []\r\n with open(in_file, mode=\"r\") as f:\r\n for line in f:\r\n line = line.rsplit(sep=\"\\t\")\r\n sent = line[1].strip(\" \\n\")\r\n sent = int(sent)\r\n\r\n sentence = process_sentence(line[0])\r\n\r\n [sentence.remove(w) for w in sentence if w in stop_words or w == \"\"]\r\n corpus.append([sentence, sent])\r\n vocab += list(set(sentence) - set(vocab))\r\n\r\n # Sort lists alphabetically\r\n corpus = sorted(corpus)\r\n vocab = sorted(vocab)\r\n vocab.append(\"classlabel\")\r\n to_file(file_out, vocab, corpus)\r\n\r\n return vocab, corpus\r\n except:\r\n catch_err()", "def get_glove_dictionary(self, file_path=\"./glove.twitter.27B.25d.txt\"):\n file = open(file_path, \"r\",encoding='utf-8')\n dictionary = {}\n keys = []\n for word_vector in file:\n dictionary[word_vector.split()[0]] = word_vector.split()[1:]\n keys.append(word_vector.split()[0])\n file.close()\n\n file = open(\"./Glove_dict.txt\", \"a\",encoding='utf-8')\n for word in keys:\n file.write(word + '\\n')\n file.close()", "def build_stopwords():\r\n\tprint('\\nbuilding stopwords')\r\n\t\r\n\tif load_stopwords():\r\n\t\treturn\r\n\r\n\tglobal stopwords\r\n\tstopwords = nltk.corpus.stopwords.words('english')\r\n\tfor f in os.listdir(paths.path_data_stopwords):\r\n\t\tpath_stopwords = paths.path_data_stopwords + '/' + f\r\n\t\twith open(path_stopwords,'r') as f:\r\n\t\t\tfor l in f:\r\n\t\t\t\tw = l.strip()\r\n\t\t\t\tw = re.sub(r\"[\\x80-\\xff]\",\" \",w)\r\n\t\t\t\tif (w not in stopwords):\r\n\t\t\t\t\tstopwords.append(w)\r\n\t\r\n\t# wip improve with POS and remove numbers\r\n\twith open(paths.path_data_stopwords_txt,'w') as outf:\r\n\t\toutf.write('\\n'.join(stopwords))\r\n\t\r\n\tprint('\\nstopword count : ' + str(len(stopwords)))", "def create_dictionary(file_dir):\r\n\tword_list = []\r\n\tfile_list = read_files(file_dir, \"lab\") # step 7\r\n\tfor file in file_list:\r\n\t\twith open(file, 'r') as f:\r\n\t\t\ttext = f.read()\r\n\t\tword_list = store_to_dictionary(text, word_list) # step 8cii\r\n\tmake_dictionary_file(file_dir, word_list) # step 9\r", "def loadWords():\n inFile = open(wordFile, 'r')\n wordlist = []\n for line in inFile:\n wordlist.append(line)\n return wordlist", "def buildCorpus(self, filename, stopwords_file=None):\n with open(filename, 'r') as infile:\n # use pattern.subs\n # doclines = [line.rstrip().lower().split(' ') for line in infile]\n doclines = [self.help_clean(line) for line in infile]\n n_docs = len(doclines)\n self.vocab = list({v for doc in doclines for v in doc})\n if stopwords_file:\n with open(stopwords_file, 'r') as stopfile:\n stops = stopfile.read().split()\n self.vocab = [x for x in self.vocab if x not in stops]\n self.vocab.sort()\n self.documents = []\n for i in range(n_docs):\n self.documents.append({})\n for j in range(len(doclines[i])):\n if doclines[i][j] in self.vocab:\n self.documents[i][j] = self.vocab.index(doclines[i][j])", "def load_keywords():\n keywords = set()\n with open(os.path.join(BASE, \"data/keywords.txt\")) as fp:\n for line in fp:\n keywords.add(line.strip().lower())\n return keywords", "def __init__(self, file_name=None):\n self.word_list = {} # Dict of {word: frequency}\n self.replacement_words = {}\n self.ignored_words = []\n [self.add_word(w) for w in self.ADDITIONAL_VALID_WORDS]\n if file_name:\n self.load(file_name)", "def get_word_freq(filein):\n freq = {}\n\n # Open file handles with context manager\n with open(filein) as f:\n\n # Read a single line at a time so as not to crush memory\n for line in f:\n\n # Tokenize and iterate\n for word in line.split():\n\n # Use try/except instead of if/then for performance\n # Likely after the first 1M tweets that the key will be contained\n try:\n freq[word] += 1\n except KeyError:\n freq[word] = 1\n\n return freq", "def parse_file(input_file):\n # Automatically close the file after being used\n with open(input_file) as text:\n # Read file and split each word into an element in a list\n data = text.read().split()\n\n # Sort the list\n # Python sort automatically does lexical sorting\n data.sort()\n\n # For each word, use as Dictionary key and count the occurrences of the word and use as value\n frequency_table = {word: data.count(word) for word in data}\n\n # Return the frequency table\n return frequency_table", "def load_words():\n with open(DICTIONARY) as f:\n return [line.strip() for line in f]", "def read_data(filename,words):\n try:\n f = open(filename)\n reader = f.read().splitlines()\n for line in reader:\n #print(line[0])\n words.add(line.lower())\n f.close()\n except IOError:\n print 'Input file reading failed,'\n return words", "def index_embedding_words(self, embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = TokenDictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def assignWordList(filename, thisDataEntry):\n oldArr = []\n newArr = []\n try:\n with open(filename, encoding=\"latin-1\") as file:\n lines = [line.rstrip() for line in file]\n idx = 0\n while(lines[idx] != \"***\"):\n oldArr.append(lines[idx].lower())\n idx += 1\n idx += 1 #Skip the delimitter\n for x in range(idx, len(lines)):\n newArr.append(lines[x].lower())\n file.close()\n except IOError:\n print(\"Error opening: \" + str(filename))\n for x in oldArr:\n thisDataEntry.old[x] = 0\n for y in newArr:\n thisDataEntry.new[y] = 0", "def read_file(filename):\n print(\"Reading dictionary: \" +filename)\n word_dict = set()\n\n dictionary = open(filename)\n\n # Read each word from the dictionary\n for word in dictionary:\n # Remove the trailing newline character\n word = word.rstrip('\\n')\n\n # Convert to lowercase\n word = word.lower()\n\n word_dict.add(word)\n\n dictionary.close()\n\n return word_dict", "def generate_dictionary(location):\n f = open('../data/wordlist.txt', 'rb')\n words = Counter(re.findall('[a-z]+', f.read().lower().decode()))\n joblib.dump(words, location)", "def create_dictionary(filename):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n words = text.split()\n d = {}\n current_word = '$'\n \n for next_word in words:\n if current_word not in d:\n d[current_word] = [next_word]\n else:\n d[current_word] += [next_word]\n if next_word[-1] == '.' or next_word[-1] == '!' or next_word[-1] == '?':\n current_word = '$'\n else:\n current_word = next_word\n return d", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def getstopwords():\n file = open('stopWords.txt', 'r')\n stoplist = []\n for word in file.readlines():\n word = word.strip('\\n')\n stoplist.append(word)\n return stoplist", "def word_counts(file):\n words = defaultdict(int)\n regex = re.compile('[' + string.punctuation + ']')\n for line in open(file):\n for word in [regex.sub('', w) for w in line.lower().split()]:\n words[word] += 1\n\n return words", "def learn(filename):\n word_dict = {} # Create empty dictionary\n first = None\n prev = None\n with open(filename, 'r', encoding='utf-8') as file:\n for line in file:\n list_words = line.lower().split()\n text = []\n for word in list_words:\n # take out leading and trailing punctuation characters\n words = word.strip(string.punctuation + string.digits)\n word_len = len(words)\n if word_len >= 1:\n text.append(words)\n\n if first is None:\n # Get the first word in the text file\n first = text[0]\n # iterate over text\n if prev:\n text.insert(0, prev)\n for counter, word in enumerate(text):\n if word not in word_dict:\n word_dict[word] = list()\n if counter < (len(text) - 1):\n following = counter + 1\n word_dict[word].append(text[following])\n prev = text[-1]\n return first, word_dict # return a tuple", "def load_cows(filename):\r\n print(\"Loading words from file...\")\r\n # inFile: file\r\n inFile = open(filename, 'r')\r\n # wordlist: list of strings\r\n wordlist = {}\r\n for line in inFile:\r\n cow = line.split(',')\r\n wordlist[cow[0]] = int(cow[1]) # 0: name, 1: weight\r\n inFile.close()\r\n print(\" \", len(wordlist), \"words loaded.\")\r\n return wordlist", "def print_word_freq(file):\n # with open(file, 'r') as text the r as the second arguement means that my intentions are to read the file\n with open(file, 'r') as text:\n # this reads the entire file and puts this into text string\n text_string = text.read()\n # returns the string respresentation of text string without removing special characters so you can see what you need to remove\n # print(repr(text_string))\n # this removes the specified characters from the text string\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"—\", \" \")\n text_string = text_string.replace(\"-\", \" \")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"’\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"\\\"\", \"\")\n # takes the text string and makes all the characters lower case\n text_string = text_string.lower()\n # takes the text string and splits all the words into a list this splits from space to space\n words_list = text_string.split()\n # a dictionary is a key and a value\n no_stop_words = {}\n # for loop that will cycle through the words list\n for word in words_list:\n # checking to see if the word is stop words\n if word not in STOP_WORDS:\n # if the word is already in the dictionary no stop words increment the value by 1\n if word in no_stop_words:\n no_stop_words[word] += 1\n # if the word is not in the dictionary no stop words add this to the dictionary and give it a value of 1\n else:\n no_stop_words[word] = 1\n \n sorted_dict = {}\n sorted_keys = sorted(no_stop_words, key=no_stop_words.get, reverse=True)\n \n for w in sorted_keys:\n sorted_dict[w] = no_stop_words[w]\n \n for key in sorted_dict:\n print(f\"{key:>15} | {sorted_dict[key]:2} {'*' * sorted_dict[key]}\")\n \n # good practice to ensure that we are properly closing the file in use at the end of the function\n text.close()", "def get_word_list(file_name, to_skip_or_not_to_skip):\n fin = open(file_name) #opening file\n histogram={} \n if to_skip_or_not_to_skip == True: #if I want to skip the header this is set to True\n skip_first_part(fin)\n for line in fin: #runs through lines of book file\n line = line.replace(\"-\",\" \") #takes out dashed, underscroes, numbers, whitespaces, and punctuation\n line = line.replace(\"_\",\" \")\n to_remove = string.punctuation + string.whitespace + '0123456789' \n for word in line.split():\n word = word.strip(to_remove) #running through all words in each line \n if word == 'God' or 'Lord':\n pass\n else:\n word = word.lower()\n histogram[word] = histogram.get(word, 0)+1\n return histogram", "def read_dictionary_from_file(self, stem_flag):\n file_name = \"/dictionary.txt\" if not stem_flag else \"/dictionaryWithStemming.txt\"\n with open(self.posting_and_dictionary_path + file_name, \"r\") as f:\n txt = f.readlines()\n for line in txt:\n l = line.split(\":\")\n pos = l[1].split(\",\")\n e = DictionaryElement(pos[0])\n e.pointer = int(pos[1])\n e.corpus_tf = int(pos[2])\n if not stem_flag:\n self.term_dictionary[l[0]] = e\n else:\n self.term_dictionary_with_stemming[l[0]] = e\n f.close()", "def ReadAndTokenize(filename):\n global CACHE\n global VOCABULARY\n if filename in CACHE:\n return CACHE[filename]\n comment = open(filename).read()\n words = Tokenize(comment)\n\n terms = collections.Counter()\n for w in words:\n VOCABULARY[w] += 1\n terms[w] += 1\n\n CACHE[filename] = terms\n return terms", "def importBrainstormWordsFile(filename):\n #init the list with all words in the file\n allWords = []\n \n #open the brainstorming words file and read the lines\n with open(filename, 'r') as fp:\n lines = fp.read().splitlines()\n \n #split the lines for the idiots that didn't read the instructions and add them to the output\n for curLine in lines:\n if curLine.startswith('Please type one'):\n continue\n cutLines = curLine.replace(',',' ').split()\n \n #cycle the word and add them\n for curWord in cutLines:\n allWords.append(curWord.strip().lower())\n \n return allWords", "def analyzeFile(filename): \n fileData = open(filename, encoding=\"utf-8\") # open the file\n \n counts = {}\n\n for line in fileData:\t\t # iterates over every line of the file\n words = line.split() # turns each line into a list\n for word in words: #iterates over the words in each line list\n word = word.lower().strip(string.whitespace+string.punctuation)\n if len(word) > 0: #make sure word is longer than 0 before adding it to the dictionary\n counts[word] = counts.get(word, 0) + 1 #look up if the dictionary has that word and if not then it'll add that word with the value 0 associated with it and then add one to that, if it has seen it it'll add 1 to the value stored in the counts dictionary\n #when it gets here for the first line it goes back up to the top and repeats for the 2nd line\n mostCommonWord = [word]\n leastCommonWord = [word]\n shortestWord = [word]\n longestWord = [word]\n \n for item in counts:\n if counts[mostCommonWord[0]] < counts[item]:\n mostCommonWord = [item]\n elif counts[mostCommonWord[0]] == counts[item]:\n mostCommonWord.append(item)\n if counts[leastCommonWord[0]] > counts[item]:\n leastCommonWord = [item]\n elif counts[leastCommonWord[0]] == counts[item]:\n leastCommonWord.append(item)\n if len(shortestWord[0]) > len(item):\n shortestWord = [item] \n elif len((shortestWord[0])) == len(item):\n shortestWord.append(item)\n if len(longestWord[0]) < len(item):\n longestWord = [item]\n elif len(longestWord[0]) == len(item):\n longestWord.append(item)\n \n return (mostCommonWord, leastCommonWord, shortestWord, longestWord)", "def load_stopwords():\r\n\tglobal stopwords\r\n\tif os.path.exists(paths.path_data_stopwords_txt):\r\n\t\tprint('\\nloading stopwords')\r\n\t\twith open(paths.path_data_stopwords_txt,'r') as inf:\r\n\t\t\tstopwords = inf.read().split('\\n')\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False", "def load_stop_list():\n stop_list = []\n with open(STOP_LIST, \"r\") as f:\n lines = f.readlines()\n stop_list = [word.strip() for word in lines]\n return stop_list", "def LoadWords(self,FileName) :\r\n\t\ttry :\r\n\t\t\twith open(FileName,'r') as fhan :\r\n\t\t\t\tWords = fhan.read()\r\n\t\texcept Exception as detail :\r\n\t\t\tlogging.error(\"Failed to read file %s: %s\"%(FileName,detail))\r\n\t\ttry :\r\n\t\t\tWordList = Words.rstrip().split('\\n')\r\n\t\t\tWordList = filter(None,WordList)\r\n\t\t\tWordList = [(Word,) for Word in WordList]\r\n\t\t\tDictRef = self.CreateDict(FileName)\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SelectDictTable'],(DictRef,))\r\n\t\t\tDictName = self.DB_Cursor.fetchone()[0]\r\n\t\t\tself.DB_Cursor.executemany(self.SQLCMDs['InsertAllWordsToDict']%DictName,WordList)\r\n\t\t\tself.DB_Connect.commit()\r\n\t\t\tlist_id = self.CreateWordList(FileName,DictRef)\r\n\t\t\tself.UpdateWordList(list_id,False)\r\n\t\texcept Exception as detail :\r\n\t\t\tlogging.error(\"Failed to add words to the new dictionary: %s\"%detail)\r\n\t\t\tself.DB_Connect.rollback()\r\n\t\treturn DictRef", "def load_words():\r\n## print \"Loading word list from file...\"\r\n # inFile: file\r\n inFile = open(WORDLIST_FILENAME, 'r', 0)\r\n # wordlist: list of strings\r\n wordlist = []\r\n for line in inFile:\r\n wordlist.append(line.strip().lower())\r\n## print \" \", len(wordlist), \"words loaded.\"\r\n return wordlist", "def print_word_freq(file):\n with open(file) as text:\n text_string = str(text.readlines())\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"-\", \"\")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n word_list = text_string.split()\n no_stop_words = []\n for word in word_list:\n if word in STOP_WORDS:\n pass\n else: no_stop_words.append(word)\n clean_list = {}\n for word in no_stop_words:\n clean_list[word] = no_stop_words.count(word) \n print(clean_list)", "def get_words_from_file(filename):\n words_by_len = {}\n f = open(filename, \"r\", 1, \"utf8\")\n for word in f:\n word = word.strip().lower()\n w_len = len(word)\n if w_len > 1:\n words_by_len[w_len] = words_by_len.get(w_len, []) + [word]\n return words_by_len", "def build_dict(fname):\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn", "def read_data(file_path):\n words=[]\n dic_word={}\n actual_text=[]\n for line in open(file_path,encoding='utf-8'):\n words_line=line.strip().split(' ')\n for ite in words_line:\n if ite not in dic_word:\n dic_word[ite]=1\n words.extend(words_line)\n actual_text.append(words_line)\n\n\n #with zipfile.ZipFile(file_path) as f:\n #words = tf.compat.as_str(f.read(f.namelist()[0])).split()\n\n return words,len(dic_word),actual_text", "def make_mimic_dict(filename):\r\n with open(filename, 'r') as file:\r\n text = file.read().lower().replace(\"'\",'').split()\r\n mimic_dict = {}\r\n prev = ''\r\n for word in text:\r\n if not prev in mimic_dict:\r\n mimic_dict[prev] = [word]\r\n else:\r\n mimic_dict[prev].append(word)\r\n prev = word\r\n return mimic_dict", "def getDictionary(tsvFile, wordIndex, ngram):\r\n \r\n reader = DescriptionReader(tsvFile, wordIndex, ngram)\r\n dictionary = corpora.Dictionary( d for d in reader )\r\n\r\n # remove stop words and words that appear only once\r\n stoplist = [] # might be specified in the furture\r\n stop_ids = [dictionary.token2id[stopword] for stopword in stoplist\r\n if stopword in dictionary.token2id]\r\n once_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq == 1]\r\n dictionary.filter_tokens(stop_ids + once_ids) # remove stop words and words that appear only once\r\n dictionary.compactify() # remove gaps in id sequence after words that were removed\r\n \r\n return dictionary", "def removeOwnStopWords(self, sort=True, lc=False):\n\t\tself.textFile = self.removeStopWords(text=self.textFile, sort=sort, lc=lc)", "def read_file(fp):\n scrabble_words_dict = {}\n \n for line in fp:\n line = line.lower()\n line = line.strip()\n if len(line) < 3:\n continue\n elif \"-\" in line or \"'\" in line:\n continue\n else:\n scrabble_words_dict[line] = 1\n return scrabble_words_dict", "def load_defs():\n # Load word definitions\n fname = 'word-definitions.txt'\n with open(fname) as fh:\n lines = fh.readlines()\n \n # Create dictionary keyed by lowercase word\n def_tbl = dict()\n for line in lines:\n # split the dictionary line at the first space\n word, word_def = line.split(sep=None, maxsplit=1)\n # add this entry to the dictionary\n word = word.lower()\n def_tbl[word] = word_def.rstrip()\n return def_tbl", "def load_words():\n # Load all the words from the scrabble dictionary into a python list, words\n fname = 'words.txt'\n with open(fname) as fh:\n words = fh.readlines()\n \n # Create a python dict keyed by sorted letters, with value equal to a list\n # of all the anagrams of that collection of letters\n anagram_tbl = dict()\n for word in words:\n word_lc = word.rstrip().lower()\n key = word_key(word_lc)\n value = anagram_tbl.get(key, []) + [word_lc]\n anagram_tbl[key] = value\n return anagram_tbl", "def load_words():\n print(\"Loading word list from file..\")\n WORDLIST_FILENAME = \"words.txt\"\n # with open('words.txt', 'r') as f:\n # inFile = f.read()\n inFile = open(WORDLIST_FILENAME, 'r')\n wordlist = []\n\n for line in inFile:\n wordlist.append(line.strip().lower())\n return wordlist", "def dictionary_creation(filename):\n\tfp = open(filename)\n\td = dict()\n\tfor line in fp:\n\t\t# print line\n\t\tfor word in line.split():\n\t\t\tword = word.strip(string.punctuation + string.whitespace)\n\t\t\t# print word\n\t\t\tif len(word) >5:\n\t\t\t\tif word not in d:\n\t\t\t\t\t# print 'in'\n\t\t\t\t\td[word] = 1\n\t\t\t\telse:\n\t\t\t\t\t# print 'not in'\n\t\t\t\t\td[word] += 1\n\treturn d\n\n\tfp.close()", "def word_tag_counts (count_file):\r\n wordtagcounts = defaultdict(list)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split(\" \")\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0].strip())\r\n tag = fields[2].strip()\r\n word = fields[3].strip()\r\n wordtagcounts[word].append((tag, count)) \r\n f.close() \r\n return wordtagcounts", "def setKeys():\n keywords['c++'] = {}\n with open('cppkeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['c++'][key] = list(words)\n for j in words:\n MyDict.insert(j)\n keywords['py'] = {}\n with open('pykeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['py'][key] = list(words)\n for j in words:\n MyDict.insert(j)", "def read_files(self,corpus):\n\n file = open(corpus)\n markov_dictionary = {}\n word_key = ['None', 'None']\n word_list = []\n lastword = \"\"\n #use for loop to make lines in file a list\n for line in file:\n line = line.strip()\n words = line.split(\" \")\n \n # generate keys\n word_key[0] = lastword\n word_key[1] = words[0]\n \n if lastword:\n markov_dictionary[tuple(word_key)] = self.make_values(corpus, word_key)\n\n i = 0\n while i < len(words) - 1:\n word_key[0] = words[i]\n word_key[1] = words[i + 1]\n \n markov_dictionary[tuple(word_key)] = self.make_values(corpus, word_key)\n\n i += 1\n\n lastword = words[len(words) - 1]\n\n # print \"make_chains\", markov_dictionary\n return markov_dictionary", "def read_word_list(file_name):\r\n\twith open(file_name) as word_list_file:\r\n\t\treturn set(word.strip() for word in word_list_file)", "def load_dictionary(filename, encoding='utf_8', skip=0, max_words=50000):\n\n d = dict()\n with codecs.open(filename, 'r', encoding=encoding) as f:\n line_counter = 0\n index_counter = 1 # we use 1 for the <EOS> symbol in both languages and 0 for <UNK> words\n\n d['<EOS>'] = index_counter\n index_counter += 1\n\n for line in f.readlines():\n\n line_counter += 1\n\n # check if we have to skip something\n if line_counter > skip:\n # split the line\n s = line.split()\n # get word and its index\n # if index > max. number of words, set it to 0\n if index_counter < max_words:\n word = s[0]\n d[word] = index_counter\n index_counter += 1\n return d", "def print_word_freq(file):\n with open(file) as text:\n text = text.read().lower()\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"’\", \"\")\n # text = \" \".join(text.split())\n # print(text)\n for character in string.punctuation:\n text = text.replace(character, \"\")\n word_list = text.split()\n clean_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n clean_list.append(word)\n \n\n # for stop_word in STOP_WORDS:\n # if stop_word in word_list:\n # word_list.remove(stop_word)\n\n\n new_dict = {}\n for word in clean_list:\n new_dict[word] = clean_list.count(word)\n sorted_dict = sorted(new_dict.items())\n print(sorted_dict)\n\n # print(f\"{key} | {value} {'*' * value}\")\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n\n # for word in word_list:\n # if word in string.punctuation:\n # #do something\n # if word in STOP_WORDS:\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n # print(text)", "def countWords(words, filename):\n\ttry:\n\t\tfile = codecs.open(filename, \"r\", \"utf8\")\n\t\ttokens = [ string.strip(string.lower(i)) for i in file.read().split() ]\n\t\tfor i in tokens:\n\t\t\twords[i] = words.get(i, 0) + 1\n\t\tfile.close()\n\texcept IOError:\n\t\tprint \"Cannot read from file:\", filename\n\treturn words", "def process_file(self, filename, order=2):\n fp = open(filename)\n self.skip_gutenberg_header(fp)\n\n for line in fp:\n for word in line.rstrip().split():\n self.process_word(word, order)\n\n #print(\">>>DEBUG the suffix map\")\n #i = 0\n #for k,v in self.suffix_map.items():\n # print(\"key is {}, value is {}\".format(k, v))\n # i += 1\n # if i > 10:\n # break", "def create_english_word_list(filename):\n global global_english_word_list\n\n if not global_english_word_list:\n with open(filename) as f:\n for line in f:\n global_english_word_list.append(re.sub(r'\\s+', '', line))", "def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.append(line)", "def load_word_counts(filename):\n raw_rows = csv_rows(filename)\n word_counts = defaultdict(lambda: 0)\n\n for line_number, raw_row in enumerate(raw_rows, 2):\n count = int(raw_row[\"count\"])\n ipa = raw_row[\"IPA\"]\n if '*' in ipa:\n continue\n\n # Fixes random badness.. hopefully doesn't hide anything?\n mod_ipa = ipa.replace('(', '').replace(')', '')\n\n # Work around a passage with an error in it:\n gloss = raw_row[\"Gloss\"] or raw_row[\"Text\"]\n\n category = raw_row[\"Category\"]\n\n skipword_characters = {'?'}\n try:\n for i, g in izip(mod_ipa.split('/'), gloss.split('/')):\n word = make_word(i, g, category)\n word_counts[word] += count\n except WordParseError as e:\n print (u\"Error on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n except IndexError as e:\n unknown_index = e.args[0]\n if unknown_index in skipword_characters:\n print (u\"Bad char on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n else:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n except:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n return word_counts" ]
[ "0.7675772", "0.6984068", "0.67051345", "0.65770996", "0.6555971", "0.65203714", "0.6489752", "0.6475122", "0.6467671", "0.6451879", "0.6377066", "0.63083595", "0.63079983", "0.6283576", "0.6268957", "0.6258872", "0.6253571", "0.6214078", "0.61646724", "0.6143819", "0.608474", "0.6073526", "0.6068786", "0.6065902", "0.60613567", "0.6052064", "0.6046372", "0.6042853", "0.60415167", "0.6040906", "0.60295177", "0.59983766", "0.59936625", "0.5988736", "0.5987943", "0.5976255", "0.59729934", "0.5966749", "0.59628665", "0.59528106", "0.5945419", "0.59452385", "0.5942639", "0.59383506", "0.5925877", "0.5908141", "0.5902478", "0.58904344", "0.588735", "0.5849045", "0.58342755", "0.5830125", "0.5798432", "0.57905126", "0.5788999", "0.5785569", "0.5784483", "0.5779273", "0.5775147", "0.5769087", "0.57593477", "0.57565105", "0.5749934", "0.57453054", "0.57395786", "0.5739182", "0.5737352", "0.5734947", "0.57284075", "0.5726179", "0.570873", "0.5707878", "0.57050043", "0.5704106", "0.568509", "0.56668484", "0.566292", "0.56574434", "0.5651551", "0.565081", "0.56492203", "0.56425697", "0.56345403", "0.5618813", "0.56128454", "0.56111825", "0.5608342", "0.56067157", "0.55998313", "0.55942154", "0.55927545", "0.55899787", "0.55746", "0.5572658", "0.5571997", "0.5567334", "0.55641556", "0.55524105", "0.55454993", "0.5521379" ]
0.69579995
2
Read words from input text file (filename) and insert them into the concordance hash table, after processing for punctuation, numbers and filtering out words that are in the stop words hash table. Do not include duplicate line numbers (word appearing on same line more than once, just one entry for that line)
def load_concordance_table(self, filename): self.concordance_table = HashTable(191) try: a = open(filename, "r") lines = a.readlines() a.close() except: raise FileNotFoundError() for n in range(len(lines)): lone = clean(lines[n]) line = lone.split(" ") for i in line: if (i != None) and (self.stop_table.in_table(i) == False) and (i != ""): self.concordance_table.insert(i, n+1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_text_file(self, filepath: str):\n with open(filepath) as fh:\n for line in fh:\n for word in re.split('\\W+', line):\n word = word.lower()\n if len(word):\n l = self.hash_map.lookup(word)\n self.hash_map.insert(word, l + 1 if l > 0 else 1)", "def load_concordance_table(self, filename):\n self.concordance_table = HashTable(191)\n with open(filename, 'r') as f:\n for linenum,words in enumerate(f.readlines()):\n for i in words.translate(self.ttable).split():\n i = i.casefold()\n if not self.stop_table.in_table(i):\n self.concordance_table.insert(i,linenum + 1)", "def analyzeFile(filename): \n fileData = open(filename, encoding=\"utf-8\") # open the file\n \n counts = {}\n\n for line in fileData:\t\t # iterates over every line of the file\n words = line.split() # turns each line into a list\n for word in words: #iterates over the words in each line list\n word = word.lower().strip(string.whitespace+string.punctuation)\n if len(word) > 0: #make sure word is longer than 0 before adding it to the dictionary\n counts[word] = counts.get(word, 0) + 1 #look up if the dictionary has that word and if not then it'll add that word with the value 0 associated with it and then add one to that, if it has seen it it'll add 1 to the value stored in the counts dictionary\n #when it gets here for the first line it goes back up to the top and repeats for the 2nd line\n mostCommonWord = [word]\n leastCommonWord = [word]\n shortestWord = [word]\n longestWord = [word]\n \n for item in counts:\n if counts[mostCommonWord[0]] < counts[item]:\n mostCommonWord = [item]\n elif counts[mostCommonWord[0]] == counts[item]:\n mostCommonWord.append(item)\n if counts[leastCommonWord[0]] > counts[item]:\n leastCommonWord = [item]\n elif counts[leastCommonWord[0]] == counts[item]:\n leastCommonWord.append(item)\n if len(shortestWord[0]) > len(item):\n shortestWord = [item] \n elif len((shortestWord[0])) == len(item):\n shortestWord.append(item)\n if len(longestWord[0]) < len(item):\n longestWord = [item]\n elif len(longestWord[0]) == len(item):\n longestWord.append(item)\n \n return (mostCommonWord, leastCommonWord, shortestWord, longestWord)", "def readFile(filename):\n listOfWords = []\n currentLine = 1\n f = open(filename, \"r\")\n for line in f:\n line = stripPunctuation(line)\n for word in line.split():\n word = word.lower()\n if len(word) > 1:\n if not word[0].isdigit():\n tempObj = contains(listOfWords, word)\n if tempObj != None:\n tempObj.incOccurrence(currentLine)\n else:\n temp = Word(word, currentLine)\n listOfWords.append(temp)\n currentLine = currentLine + 1\n return listOfWords", "def load():\n for line in open(config.filepath, 'r'):\n line = line.strip()\n line_sorted = ''.join(sorted(line))\n\n if line_sorted not in Words.hashed:\n Words.hashed[line_sorted] = []\n\n # Store the real hashed as a list\n # We need line_sorted as the key for fast lookup later\n Words.hashed[line_sorted].append(line)\n\n # Also add the word to a standard list\n # We'll use this to quickly determine wordiness later\n Words.words.append(line)", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500,hash_function_2)\n\n # This block of code will read a file one word as a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # set up index for hash map\n key = w.lower()\n hash = ht._hash_function(key)\n hash_index = hash % ht.capacity\n cur_bucket = ht._buckets[hash_index]\n new_node = cur_bucket.head\n # if key already exists in hash map, find and increment value\n if ht.contains_key(key):\n while new_node is not None:\n if new_node.key == key:\n new_node.value = new_node.value + 1\n new_node = new_node.next\n # else, add key to hashmap with value of 1\n else:\n cur_bucket.add_front(key, 1)\n # make empty list\n list = []\n # add all buckets to list as tuples\n for i in range(ht.capacity):\n bucket = ht._buckets[i]\n if bucket.head is not None:\n new_node = bucket.head\n while new_node is not None:\n list.append((new_node.key, new_node.value))\n new_node = new_node.next\n # Sort list in reverse by key value (word count)\n # Source: https://www.geeksforgeeks.org/python-program-to-sort-a-list-of-tuples-by-second-item/\n list.sort(key = lambda x: x[1], reverse=True)\n # Return list from 0 to user number\n return(list[0:number])", "def train(self, filename):\n with open(filename, 'r') as f:\n phrases_and_words = []\n\n for index, line in enumerate(f):\n # decoding, since input is not unicode\n cleaned_line = self.get_cleaned_line(line.decode('utf-8', 'ignore'))\n\n if cleaned_line:\n phrases_and_words.extend(self.get_phrase_and_words_from_line(cleaned_line))\n\n if index % 10000 == 0:\n self.db_storage.store_phrases_and_words(phrases_and_words)\n phrases_and_words = []\n\n self.db_storage.store_phrases_and_words(phrases_and_words)", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # convert word to lowercase to avoid inconsistent hash values\n # due to different cases of the same word.\n w = w.lower()\n\n # check if the current word already exists as a key\n if w in keys:\n current_count = ht.get(w) # fetch the current count for that word\n current_count += 1 # increment count by one\n ht.put(w, current_count) # update value for the key\n else:\n # word does not exist in hash map\n keys.add(w) # add current word to keys set\n ht.put(w, 1) # insert key into hash map with value of 1\n\n # fetch unsorted list of tuples from parsed data\n word_count_list = compile_list(ht, keys)\n\n # sort word count tuple list\n word_count_list = word_count_sort(word_count_list)\n\n # initialize and fill final word list\n final_list = []\n\n for index in range(0, number):\n final_list.append(word_count_list[index])\n\n return final_list", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def make_word_dict():\n d = dict()\n for line in open('words.txt'):\n word = line.strip().lower()\n d[word] = None\n\n return d", "def load_stop_table(self, filename):\n self.stop_table = HashTable(191)\n with open(filename, 'r') as f:\n for word in f.readlines():\n self.stop_table.insert(word.replace('\\n',''),None)", "def collate(filename):\r\n x=open(filename,\"r\")\r\n total_words=[]\r\n for line in x:\r\n line=line.strip(\"\\n\")\r\n line=line.split(\":\")\r\n if len(total_words)<1:\r\n total_words.append(line)\r\n else:\r\n x= len(total_words)\r\n if line[0] == total_words[x-1][0]:\r\n if int(line[1]) > int(total_words[x-1][len(total_words[x-1])-1]):\r\n total_words[x-1].append(line[1])\r\n else:\r\n total_words.append(line)\r\n y = open(\"collated_ids.txt\", \"w\")\r\n # for i in range(len(total_words)):\r\n # if len(total_words[i])<3:\r\n # total_words[i]=\":\".join(total_words[i])+\"\\n\"\r\n # else:\r\n # id=\" \".join(total_words[i][1:])\r\n # total_words[i]=total_words[i][0]+\":\"+id+\"\\n\"\r\n # y.writelines(total_words)\r\n for i in range(len(total_words)):\r\n id=\"\"\r\n for j in range(1,len(total_words[i])):\r\n id=id +total_words[i][j] +\" \"\r\n y.write(str(total_words[i][0]) + \":\" +str(id) + \"\\n\")", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file, encoding='utf-8') as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def word_frequency_in_file(filename):\n words = {}\n fin = open(filename)\n punctuation = string.punctuation\n for line in fin:\n line = line.translate( # Replace punctuation with spaces\n str.maketrans(punctuation, ' ' * len(punctuation)))\n line = line.lower()\n line_words = line.split()\n for word in line_words: # Process each word in the line.\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n return words", "def create_index(path):\n words = {}\n\n for l in open(path):\n linewords = l.strip().split(\" \")\n student = linewords[0]\n linewords = linewords[1:]\n\n for word in linewords:\n if word in words:\n if int(student) not in words[word]:\n words[word].append(int(student))\n else:\n words[word] = [int(student)]\n\n return words", "def load_file(self, file_path):\n f = open(file_path, \"r\")\n sentences = f.readlines()\n \n word_count = 0\n\n for sentence in sentence: \n for word in sentence.strip().split(\" \"):\n if not (word in self.word_id): #word not in dictionary\n word_id[word] = word_count\n word_count += 1\n\n #self.doc = [[self.word_id[word] for word in sentence.strip().split(\" \")] for sentence in sentences]", "def load_word_counts(filename):\n raw_rows = csv_rows(filename)\n word_counts = defaultdict(lambda: 0)\n\n for line_number, raw_row in enumerate(raw_rows, 2):\n count = int(raw_row[\"count\"])\n ipa = raw_row[\"IPA\"]\n if '*' in ipa:\n continue\n\n # Fixes random badness.. hopefully doesn't hide anything?\n mod_ipa = ipa.replace('(', '').replace(')', '')\n\n # Work around a passage with an error in it:\n gloss = raw_row[\"Gloss\"] or raw_row[\"Text\"]\n\n category = raw_row[\"Category\"]\n\n skipword_characters = {'?'}\n try:\n for i, g in izip(mod_ipa.split('/'), gloss.split('/')):\n word = make_word(i, g, category)\n word_counts[word] += count\n except WordParseError as e:\n print (u\"Error on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n except IndexError as e:\n unknown_index = e.args[0]\n if unknown_index in skipword_characters:\n print (u\"Bad char on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n else:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n except:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n return word_counts", "def process_file(filename, skip_header):\n hist = {}\n fp = open(filename, encoding='utf8')\n\n if skip_header:\n skip_gutenberg_header(fp)\n\n\n for line in fp:\n if line.startswith('*** END OF THIS PROJECT'):\n break\n line = line.replace('-', ' ')\n strippables = string.punctuation + string.whitespace\n\n for word in line.split():\n # remove punctuation and convert to lowercase\n word = word.strip(strippables)\n word = word.lower()\n\n #update the histrogram\n hist[word] = hist.get(word, 0) + 1\n\n\n return hist", "def mkwrddct(inputfile):\n fin = open(inputfile)\n words = dict()\n for line in fin:\n w = line.strip()\n words[w] = w\n return words", "def read_file(fp):\n scrabble_words_dict = {}\n \n for line in fp:\n line = line.lower()\n line = line.strip()\n if len(line) < 3:\n continue\n elif \"-\" in line or \"'\" in line:\n continue\n else:\n scrabble_words_dict[line] = 1\n return scrabble_words_dict", "def process(filename):\r\n x = open(filename, \"r\")\r\n words_from_songs=[]\r\n for line in x:\r\n array =line.split(\":\")\r\n songid= array[0]\r\n lyrics=array[1]\r\n lyrics=lyrics.replace(\"\\n\", \"\")\r\n lyrics=lyrics.split(\" \")\r\n for i in range(len(lyrics)):\r\n words_from_songs.append((lyrics[i],songid))\r\n words_from_songs=radixSortNumbers(words_from_songs)\r\n max1 = longestWord(words_from_songs)\r\n counting = []\r\n for _ in range(max1+1):\r\n counting.append([])\r\n for k in range(len(words_from_songs)-1,0,-1):\r\n counting[len(words_from_songs[k][0])].append(words_from_songs[k])\r\n new_list = []\r\n # for i in range(len(counting)-1,0,-1):\r\n # for k in range(len(counting[i])):\r\n # new_list.insert(0,counting[i][k])\r\n # for i in range(len(counting) - 1, 0, -1):\r\n # new_list = countingSort(new_list, i - 1)\r\n\r\n for i in range(len(counting)-1,0,-1):\r\n for k in range(len(counting[i])):\r\n new_list.insert(0,counting[i][k])\r\n new_list = countingSort(new_list,i-1)\r\n y = open(\"sorted_words.txt\",\"w\")\r\n for i in range(len(new_list)):\r\n y.write(str(new_list[i][0])+\":\"+str(new_list[i][1]+\"\\n\"))", "def dictionary_creation(filename):\n\tfp = open(filename)\n\td = dict()\n\tfor line in fp:\n\t\t# print line\n\t\tfor word in line.split():\n\t\t\tword = word.strip(string.punctuation + string.whitespace)\n\t\t\t# print word\n\t\t\tif len(word) >5:\n\t\t\t\tif word not in d:\n\t\t\t\t\t# print 'in'\n\t\t\t\t\td[word] = 1\n\t\t\t\telse:\n\t\t\t\t\t# print 'not in'\n\t\t\t\t\td[word] += 1\n\treturn d\n\n\tfp.close()", "def get_word_list(file_name, to_skip_or_not_to_skip):\n fin = open(file_name) #opening file\n histogram={} \n if to_skip_or_not_to_skip == True: #if I want to skip the header this is set to True\n skip_first_part(fin)\n for line in fin: #runs through lines of book file\n line = line.replace(\"-\",\" \") #takes out dashed, underscroes, numbers, whitespaces, and punctuation\n line = line.replace(\"_\",\" \")\n to_remove = string.punctuation + string.whitespace + '0123456789' \n for word in line.split():\n word = word.strip(to_remove) #running through all words in each line \n if word == 'God' or 'Lord':\n pass\n else:\n word = word.lower()\n histogram[word] = histogram.get(word, 0)+1\n return histogram", "def read_words(f, words):\n with open(f) as file:\n for line in file:\n w = tokenizer.tokenize(line.strip())\n for word in w:\n try:\n words[word] += 1\n except:\n words[word] = 1", "def load_wordlist(self, filename):\n reg1 = re.compile(\"^([1-6]{5})[ \\t]+(.*)$\")\n f = open(filename, 'r')\n \n if(self.generate):\n wordlist = []\n reg2 = re.compile(\"^(\\S*)$\")\n for line in f:\n m1 = reg1.match(line)\n m2 = reg2.match(line)\n \n if(m1):\n wordlist.append(m1.group(2))\n elif(m2):\n wordlist.append(m2.group(1))\n \n else:\n wordlist = {}\n for line in f:\n m = reg1.match(line)\n if(m):\n wordlist[int(m.group(1))] = m.group(2)\n \n if((not self.generate and len(wordlist) < 7776) or \n (self.generate and len(wordlist) < 2**13)):\n stderr.write(\"Word list is too short\\n\")\n exit(5)\n \n self.wordlist = wordlist", "def print_word_freq(file):\n # with open(file, 'r') as text the r as the second arguement means that my intentions are to read the file\n with open(file, 'r') as text:\n # this reads the entire file and puts this into text string\n text_string = text.read()\n # returns the string respresentation of text string without removing special characters so you can see what you need to remove\n # print(repr(text_string))\n # this removes the specified characters from the text string\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"—\", \" \")\n text_string = text_string.replace(\"-\", \" \")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"’\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"\\\"\", \"\")\n # takes the text string and makes all the characters lower case\n text_string = text_string.lower()\n # takes the text string and splits all the words into a list this splits from space to space\n words_list = text_string.split()\n # a dictionary is a key and a value\n no_stop_words = {}\n # for loop that will cycle through the words list\n for word in words_list:\n # checking to see if the word is stop words\n if word not in STOP_WORDS:\n # if the word is already in the dictionary no stop words increment the value by 1\n if word in no_stop_words:\n no_stop_words[word] += 1\n # if the word is not in the dictionary no stop words add this to the dictionary and give it a value of 1\n else:\n no_stop_words[word] = 1\n \n sorted_dict = {}\n sorted_keys = sorted(no_stop_words, key=no_stop_words.get, reverse=True)\n \n for w in sorted_keys:\n sorted_dict[w] = no_stop_words[w]\n \n for key in sorted_dict:\n print(f\"{key:>15} | {sorted_dict[key]:2} {'*' * sorted_dict[key]}\")\n \n # good practice to ensure that we are properly closing the file in use at the end of the function\n text.close()", "def importBrainstormWordsFile(filename):\n #init the list with all words in the file\n allWords = []\n \n #open the brainstorming words file and read the lines\n with open(filename, 'r') as fp:\n lines = fp.read().splitlines()\n \n #split the lines for the idiots that didn't read the instructions and add them to the output\n for curLine in lines:\n if curLine.startswith('Please type one'):\n continue\n cutLines = curLine.replace(',',' ').split()\n \n #cycle the word and add them\n for curWord in cutLines:\n allWords.append(curWord.strip().lower())\n \n return allWords", "def getDictionary(tsvFile, wordIndex, ngram):\r\n \r\n reader = DescriptionReader(tsvFile, wordIndex, ngram)\r\n dictionary = corpora.Dictionary( d for d in reader )\r\n\r\n # remove stop words and words that appear only once\r\n stoplist = [] # might be specified in the furture\r\n stop_ids = [dictionary.token2id[stopword] for stopword in stoplist\r\n if stopword in dictionary.token2id]\r\n once_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq == 1]\r\n dictionary.filter_tokens(stop_ids + once_ids) # remove stop words and words that appear only once\r\n dictionary.compactify() # remove gaps in id sequence after words that were removed\r\n \r\n return dictionary", "def learn(filename):\n word_dict = {} # Create empty dictionary\n first = None\n prev = None\n with open(filename, 'r', encoding='utf-8') as file:\n for line in file:\n list_words = line.lower().split()\n text = []\n for word in list_words:\n # take out leading and trailing punctuation characters\n words = word.strip(string.punctuation + string.digits)\n word_len = len(words)\n if word_len >= 1:\n text.append(words)\n\n if first is None:\n # Get the first word in the text file\n first = text[0]\n # iterate over text\n if prev:\n text.insert(0, prev)\n for counter, word in enumerate(text):\n if word not in word_dict:\n word_dict[word] = list()\n if counter < (len(text) - 1):\n following = counter + 1\n word_dict[word].append(text[following])\n prev = text[-1]\n return first, word_dict # return a tuple", "def read_dictionary():\n with open(FILE, 'r') as f:\n for vocabulary in f:\n if vocabulary[0].strip() not in dict_txt:\n dict_txt[vocabulary[0].strip()] = [vocabulary.strip()]\n else:\n dict_txt[vocabulary[0].strip()].append(vocabulary.strip())", "def ReadAndTokenize(filename):\n global CACHE\n global VOCABULARY\n if filename in CACHE:\n return CACHE[filename]\n comment = open(filename).read()\n words = Tokenize(comment)\n\n terms = collections.Counter()\n for w in words:\n VOCABULARY[w] += 1\n terms[w] += 1\n\n CACHE[filename] = terms\n return terms", "def file_preprocessing(input_file, output_file):\n # print(\"processing file \" + input_file)z\n # replace the punctuations with space\n replace_punctuation = str.maketrans(string.punctuation, ' '*len(string.punctuation))\n # stemming\n stemmer = PorterStemmer()\n\n with open(input_file, 'r', encoding='utf-8', errors='replace') as inFile, open(output_file,'w') as outFile:\n for line in inFile:\n # replace punctuations\n # convert camel case into space separated\n # convert snake case into space separated\n # remove language keywords\n custom_stopwords = [\"ENDCOND\",\"PVSCL\", \"IFCOND\", \"EVAL\", \"ENDCOND\", \"ELSECOND\", \"ELSEIFCOND\", \"WINDOW\", \"FUNCTION\",\n \"CALLBACK\", \"ABWA\", \"ERROR\", \"TODO\", \"RESOLVE\", \"DOCUMENT\", \"CLASS\", \"LINE\", \"ELEMENT\", \"UTILS\",\n \"NEW\", \"IS\", \"EMPTY\",\"ANNOTATIONS\",\"ANNOTATION\",\"UTILS\",\"CURRENT\",\"TEXT\",\"GET\",\"NAME\",\"LISTERNER\",\n \"ADD\", \"EVENT\", \"CREATE\",\"FOR\", \"FIND\", \"LENGTH\", \"USER\", \"VALUE\", \"ALERT\", \"ALERTS\", \"ID\", \"HANDLER\",\n \"MESSAGE\", \"GROUP\", \"RETRIEVE\", \"MANAGER\", \"LANGUAGE\", \"CONTENT\", \"INIT\"]\n line_witout_puncs = ' '.join([snake_to_spaces(camel_to_spaces(word))\n for word in line.translate(replace_punctuation).split()\n if len(word) >=4 and word not in stopwords.words('english') #and #word.upper() not in (name.upper() for name in custom_stopwords)\n and word not in all_keywords])\n\n\n # stemming\n # singles = []\n # for plural in line_witout_puncs.split():\n # try:\n # singles.append(stemmer.stem(plural))\n # except UnicodeDecodeError:\n # print(plural)\n\n # line_stemmed = ' '.join(singles)\n # print(line_stemmed, file=outFile)\n print(line_witout_puncs.encode(\"utf-8\"), file=outFile)", "def twitter_data(filename, dictionary):\r\n new_data = []\r\n with codecs.open(filename, 'r', 'utf8') as f:\r\n for line in f:\r\n new_line = []\r\n stuff = [x for x in line.lower().split() if\r\n ((has_letter(x) or len(x) >= 1) and keep_word(x, num_words, count_dict))]\r\n for word in stuff:\r\n new_line.append(dictionary.get(word, 1))\r\n if len(new_line) > 0:\r\n new_data.append(new_line)\r\n return new_data", "def index_embedding_words(self, embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = TokenDictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def buildGraph(file):\r\n dict = {}\r\n graph = Graph()\r\n wfile = open(file,'r')\r\n for line in wfile:\r\n word = line[:-1]\r\n for i in range(len(word)):\r\n bucket = word[:i] + '_' + word[i+1:]\r\n if bucket in dict:\r\n dict[bucket].append(word)\r\n else:\r\n dict[bucket] = [word]\r\n for bucket in dict.keys():\r\n for word1 in dict[bucket]:\r\n for word2 in dict[bucket]:\r\n if word1 != word2:\r\n graph.addEdge(word1,word2)\r\n return graph", "def init(wordlist_filename):\n global WORDS\n if WORDS == None:\n WORDS = []\n bad_line = lambda x: x.strip() == '' or x.startswith('#')\n with codecs.open(wordlist_filename, 'r', 'utf-8') as filehandle:\n lines = filehandle.readlines()\n WORDS = set([x.lower().strip() for x in lines if not bad_line(x)])", "def import_words(file_name):\n with open(file_name) as word_list:\n words = []\n for line in word_list:\n number, word = line.strip().split(\"\\t\")\n words.append(word.strip())\n # print(f\"Imported {(len(word_dict))} words\")\n\n return words", "def tokenize_and_split_bis(sms_file):\n \n dic = {}\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n i = -1\n document = 0\n terms = 0\n new_document = True\n ham = True\n for line in open(sms_file, 'r').readlines():\n w = []\n document += 1\n new_document = True\n for word in line.split():\n i = i + 1\n if word == \"ham\":\n ham = True\n i = i - 1\n elif word == \"spam\":\n ham = False\n i = i - 1\n else:\n if word not in dic:\n dic[word] = i\n w.append(dic[word])\n list3.append(1)\n list4.append(1)\n new_document = False\n terms += 1\n else : \n i = i - 1\n w.append(dic[word])\n list4[dic[word]] += 1\n terms += 1\n if new_document: \n list3[dic[word]] += 1\n new_document = False\n \n if ham and w !=[]:\n list2.append(w)\n elif ham == False and w !=[]:\n list1.append(w)\n\n moy = 0\n len_dic = len(dic.keys())\n list5 = [0 for x in range(len_dic)]\n for key in dic.keys():\n if list4[dic[key]] > 0:\n tf = list4[dic[key]] / terms\n idf = math.log(document / list3[dic[key]])\n tfIdf = tf * idf\n list5[dic[key]] = tfIdf\n # print(\"the word \" + str(key) + \" appairs \" + str(list4[dic[key]]) + \" times.\")\n # print(\"his frequency is \" + str(list4[dic[key]] / terms) )\n # print(\"the word \" + str(key) + \" appairs \" + str(list3[dic[key]]) + \" times in each document.\")\n # print(\"his frequency is \" + str(idf))\n # print(\"utility \" + str(tfIdf))\n moy += tfIdf\n \n moy = moy / len_dic \n # print(moy)\n dic_bis = {}\n i = -1\n for key in dic.keys():\n value = list5[dic[key]]\n # print(str(value))\n if (value > oracle * moy):\n i += 1\n dic_bis[key] = i\n # else:\n # print(\"not pass \" + key + \" \" + str(value))\n \n \n # print(dic_bis == dic)\n # print(dic)\n return dic_bis,list1,list2", "def load_dictionary(hash_table, filename):\n\n file = open(filename)\n lines = file.readlines()\n start = timeit.default_timer()\n for line in lines:\n hash_table.insert(line.rstrip(),1)\n if timeit.default_timer() - start > 4:\n break\n file.close()", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n current_word = w.lower()\n #get a count for current word\n current_count = ht.get(current_word)\n if current_count is None:\n ht.put(current_word, 1)\n else:\n ht.put(current_word, current_count + 1)\n\n #create an empty list to store top words in\n tuple_list = []\n\n #traverse hash_map to find most used words\n for i in range(ht.capacity):\n if ht._buckets[i] is not None:\n #traverse links at each bucket\n current = ht._buckets[i].head\n while current is not None:\n tuple_list.append((current.key, current.value))\n current = current.next\n\n #create an ordered list out of items\n iter_tuple_quick_sort(tuple_list, len(tuple_list) - 1, 0)\n\n #create a new list to return with passed number arg\n return_list = []\n list_counter = 0\n while list_counter <= number - 1:\n if list_counter == len(tuple_list) - 1:\n break\n else:\n return_list.append(tuple_list[list_counter])\n list_counter += 1\n\n return return_list", "def word_counts(file):\n words = defaultdict(int)\n regex = re.compile('[' + string.punctuation + ']')\n for line in open(file):\n for word in [regex.sub('', w) for w in line.lower().split()]:\n words[word] += 1\n\n return words", "def parse_file(input_file):\n # Automatically close the file after being used\n with open(input_file) as text:\n # Read file and split each word into an element in a list\n data = text.read().split()\n\n # Sort the list\n # Python sort automatically does lexical sorting\n data.sort()\n\n # For each word, use as Dictionary key and count the occurrences of the word and use as value\n frequency_table = {word: data.count(word) for word in data}\n\n # Return the frequency table\n return frequency_table", "def print_word_freq(file):\n with open(file) as text:\n text = text.read().lower()\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"’\", \"\")\n # text = \" \".join(text.split())\n # print(text)\n for character in string.punctuation:\n text = text.replace(character, \"\")\n word_list = text.split()\n clean_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n clean_list.append(word)\n \n\n # for stop_word in STOP_WORDS:\n # if stop_word in word_list:\n # word_list.remove(stop_word)\n\n\n new_dict = {}\n for word in clean_list:\n new_dict[word] = clean_list.count(word)\n sorted_dict = sorted(new_dict.items())\n print(sorted_dict)\n\n # print(f\"{key} | {value} {'*' * value}\")\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n\n # for word in word_list:\n # if word in string.punctuation:\n # #do something\n # if word in STOP_WORDS:\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n # print(text)", "def spell_file(fn, wordcost, maxword):\n\n def infer_spaces(s):\n \"\"\"Uses dynamic programming to infer the location of spaces in a string\n without spaces.\"\"\"\n global unfolded\n if s in unfolded:\n return unfolded[s]\n\n # Find the best match for the i first characters, assuming cost has\n # been built for the i-1 first characters.\n # Returns a pair (match_cost, match_length).\n def best_match(i):\n candidates = enumerate(reversed(cost[max(0, i-maxword):i]))\n return min((c + wordcost.get(s[i-k-1:i], 9e999), k+1) for k,c in candidates)\n\n # Build the cost array.\n cost = [0]\n for i in range(1,len(s)+1):\n c,k = best_match(i)\n cost.append(c)\n\n # Backtrack to recover the minimal-cost string.\n out = []\n i = len(s)\n while i>0:\n c,k = best_match(i)\n assert c == cost[i]\n out.append(s[i-k:i])\n i -= k\n \n unfolded[s] = ' '.join(reversed(out))\n return ' '.join(reversed(out))\n\n\n\n speller = aspell.Speller('lang', 'en')\n for w in slang:\n speller.addtoSession(w)\n \n with open(tweet_tmp1_dir + fn, 'r') as fin:\n with open(tweet_tmp2_dir + fn, 'w') as fout:\n res = []\n for l in fin:\n prefix = ''\n if 'test' in fn:\n comma = l.find(',')\n prefix = l[:comma].strip()\n l = l[comma+1:]\n try:\n assert(prefix.isdigit())\n except:\n print(prefix, l)\n prefix += ','\n \n ll = ''\n \n ws = [w for w in l.strip().split(' ') if len(w) > 0]\n for w in ws:\n if w in correct_word:\n nw = correct_word[w]\n elif (w.startswith('<') and w.endswith('>')) or w in whitelist or speller.check(w):\n nw = w\n else:\n try:\n nw1, nw2 = speller.suggest(w)[:2]\n nwdist1 = jellyfish.levenshtein_distance(w,nw1)\n nwdist2 = jellyfish.levenshtein_distance(w,nw2)\n \n if nw2.count(' ') < nw1.count(' ') or (nwdist1 > MAX_DIST_CORRECTION and nwdist2 < nwdist1) :\n nw1 = nw2\n nwdist1 = nwdist2\n if nwdist1 <= MAX_DIST_CORRECTION:\n nw = nw1.lower()\n else:\n nw = w.lower()\n except:\n nw = infer_spaces(w)\n if nw.count('.') >= nw.count(' ')/3:\n nw = nw.replace('.', '')\n elif nw.count('-') >= nw.count(' ')/3:\n nw = nw.replace('-', '')\n nw = nw.replace(' ', ' ').lower()\n ll += nw + ' '\n correct_word[w] = nw\n res.append(prefix+ll.strip())\n# fout.write(prefix+ll.strip()+'\\n')\n fout.write('\\n'.join(res))", "def process_file(filename, skip_header=True):\n hist = {}\n fp = file(filename)\n fullwordlist=[]\n # if skip_header:\n # skip_gutenberg_header(fp)\n\n for line in fp:\n holder=process_line(line,hist)\n #print holder\n fullwordlist.extend(holder)\n return fullwordlist", "def make_word_dict():\n d = dict()\n fin = open(\"words.txt\")\n for line in fin:\n word = line.strip().lower()\n d[word] = None\n #have to add single letter words to the word list;\n #also, the empty string is considered a word.\n for letter in ['a', 'i', '']:\n d[letter] = letter\n return d", "def load_wordlist(filename):\n # YOUR CODE HERE\n words = {}\n f = open(filename, 'rU')\n text = f.read()\n text = text.split('\\n')\n for line in text:\n words[line] = 1\n f.close()\n return words", "def __init__(self, vocab_file, max_size):\n\t\tself._word_to_id = {}\n\t\tself._id_to_word = {}\n\t\tself._count = 0 # keeps track of total number of words in the Vocab\n\n\t\t# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.\n\t\tfor w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\tself._word_to_id[w] = self._count\n\t\t\tself._id_to_word[self._count] = w\n\t\t\tself._count += 1\n\n\t\t# Read the vocab file and add words up to max_size\n\t\twith open(vocab_file, 'r') as vocab_f:\n\t\t\tfor line in vocab_f:\n\t\t\t\tpieces = line.split()\n\t\t\t\tif len(pieces) != 2:\n\t\t\t\t\tprint ('Warning: incorrectly formatted line in vocabulary file: %s\\n' % line)\n\t\t\t\t\tcontinue\n\t\t\t\tw = pieces[0]\n\t\t\t\tif w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\t\t\traise Exception(\n\t\t\t\t\t\t'<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\\'t be in the vocab file, but %s is' % w)\n\t\t\t\tif w in self._word_to_id:\n\t\t\t\t\traise Exception('Duplicated word in vocabulary file: %s' % w)\n\t\t\t\tself._word_to_id[w] = self._count\n\t\t\t\tself._id_to_word[self._count] = w\n\t\t\t\tself._count += 1\n\t\t\t\tif max_size != 0 and self._count >= max_size:\n\t\t\t\t\tprint (\"max_size of vocab was specified as %i; we now have %i words. Stopping reading.\" % (\n\t\t\t\t\tmax_size, self._count))\n\t\t\t\t\tbreak\n\n\t\tprint (\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (\n\t\tself._count, self._id_to_word[self._count - 1]))", "def create_dictionary(filename):\n\tword_set = set()\n\tif os.path.isfile(filename):\n\t\twith open(filename, 'r') as f:\n\t\t\tfor line in iter(f):\n\t\t\t\tword_set.add(line.strip('\\n'))\n\telse:\n\t\tprint \"File not found!\"\n\treturn word_set", "def read_dictionary():\n global dic\n with open(FILE, 'r') as f:\n for line in f:\n word_list = line.split()\n word = word_list[0].strip()\n dic.append(word)", "def read_data(filename,words):\n try:\n f = open(filename)\n reader = f.read().splitlines()\n for line in reader:\n #print(line[0])\n words.add(line.lower())\n f.close()\n except IOError:\n print 'Input file reading failed,'\n return words", "def read_dictionary():\n\tglobal dictionary\n\twith open(FILE, \"r\") as f:\n\t\tfor words in f:\n\t\t\tdictionary += words.split()", "def create_word_map(tokenized_descriptions_file_path, word_dictionary_output_path):\n if os.path.exists(word_dictionary_output_path):\n print(\"Word map already exists in workspace. Will be reused.\")\n return\n\n print(\"Word map not found. Generating....\")\n\n words_list = []\n words_to_id = {}\n\n with open(tokenized_descriptions_file_path, 'r') as file:\n for line in file:\n tokens = line.strip().split(\",\")\n words_list.extend(tokens[1:])\n\n # remove duplicate words\n words_list = list(set(words_list))\n\n # sorting the words\n words_list = sorted(words_list)\n for i in range(len(words_list)):\n words_to_id[words_list[i]] = i\n\n with open(word_dictionary_output_path, 'w') as f:\n [f.write('{0},{1}'.format(key, value) + \"\\n\") for key, value in words_to_id.items()]", "def __init__(self):\n stopwords_file = open(self.filepath, \"r\")\n for line in stopwords_file.readlines():\n line2 = line.replace(\"\\n\", \"\") \n self.add(line2)", "def print_word_freq(file):\n with open(file) as text:\n text_string = str(text.readlines())\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"-\", \"\")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n word_list = text_string.split()\n no_stop_words = []\n for word in word_list:\n if word in STOP_WORDS:\n pass\n else: no_stop_words.append(word)\n clean_list = {}\n for word in no_stop_words:\n clean_list[word] = no_stop_words.count(word) \n print(clean_list)", "def load_words(file_path: str) -> List[Word]:\n \n words = load_words_raw(file_path)\n \n \n words = remove_stop_words(words)\n\n \n words = remove_duplicates(words)\n \n return words", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def create_dict(fd):\n # initialize an empty dictionary\n full_dict = {}\n # loop through file\n for line in fd:\n # lowercase everything in line, then split line into a list\n line = line.lower().split()\n # loop through elements in the list of words in the splitted line\n for word in line:\n # strip words from puncuation using string module\n word = word.strip(string.punctuation)\n # if words contains only alphabatic characters and of length > 1\n if word.isalpha() and len(word)!= 1:\n if len(word) in full_dict:\n full_dict[len(word)].add(word)\n else:\n full_dict[len(word)] = set()\n full_dict[len(word)].add(word)\n return full_dict", "def __init__(self, filename):\n\n self.term_dict = {}\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n\n #print line\n word, w_type = line.strip().split(\"\\t\")\n self.term_dict[word.strip().lower()] = \"CHESS_\" + w_type.strip().lower()", "def print_word_freq(file):\n# Opening file to be read\n with open(file, \"r\") as f:\n file_contents = f.read()\n\n\n# # Taking away punctuation and lowercase all words\n word_list = file_contents.lower().replace(',',' ').replace('.',' ').replace('!',' ').split()\n # print(word_list)\n\n nice_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n nice_list.append(word)\n # print(nice_list)\n\n d = {}\n for word in nice_list:\n if word not in d.keys():\n d[word] = 1\n else:\n d[word] += 1 \n # print(sorted(d, key=d.get, reverse=True)\n # sorted(d, key=d.get, reverse=true)\n # print(d)\n\n # for word in sorted(d):\n # print((word, d[word]), end = \" \")\n\n d_filtered = sorted(d, key=d.get, reverse=True)\n for x in d_filtered:\n print(x, d[x])", "def get_words(txtfile):\n\n global _wordset\n global _postrie\n\n f = open(txtfile,'r')\n _wordset = set([x.lower() for x in set(f.read().split()) \\\n if not re.match('.*[\\W,\\d]|^.$',x)])\n\n #print('building suffix trie')\n _postrie = trienode(pre = False)\n _postrie.grow(_wordset)\n\n # Since this will be recursed through later, take care of it now.\n if len(_wordset) > sys.getrecursionlimit():\n sys.setrecursionlimit(len(_wordset))", "def loadBrainstormingCorrectAnswersFile( filename ):\n #read the file and init the output struct\n with open(filename, 'r') as fp:\n lines = fp.readlines()\n synonymTable = {}\n curCategory = ''\n \n for curLine in lines:\n #skip empty lines and lines that start with # as they are comments\n curLine = curLine.strip().lower()\n if not curLine or curLine.startswith('#'):\n continue\n \n #the > symbol indicates a new category all other lines are synonys for this cateogry\n if curLine.startswith('>'):\n curCategory = curLine[1:].strip()\n synonymTable[curCategory] = [curCategory]\n continue\n \n synonymTable[curCategory].append(curLine)\n \n return synonymTable", "def get_analyze_per_file(self):\n \"\"\"Exclude tags, exclude binary (img), count words without non literal characters and digits\"\"\"\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n df_tmp = pd.DataFrame(columns=['word', 'cnt', 'word_low'])\n w_cnt = 0\n word_counter = {}\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n for word in word_list:\n\n if word not in word_counter:\n word_counter[word] = 1\n else:\n word_counter[word] = word_counter[word] + 1\n w_cnt += 1\n\n for word, occurance in word_counter.items():\n df_tmp = df_tmp.append({'word': '{:15}'.format(word), 'cnt': '{:3}'.format(occurance),\n 'word_low': '{:15}'.format(word).lower()}, ignore_index=True)\n df_tmp = df_tmp.sort_values(by='word_low')\n df_tmp.loc[(df_tmp.word != df_tmp.word_low), 'word'] = df_tmp.cnt\n df_tmp.loc[(df_tmp.word == df_tmp.cnt), 'cnt'] = 0\n df_tmp.loc[(df_tmp.word == df_tmp.word_low), 'word'] = 0\n df_tmp['word'] = df_tmp.word.astype(int)\n df_tmp['cnt'] = df_tmp.cnt.astype(int)\n df_tmp = df_tmp.groupby(['word_low'])['cnt', 'word'].sum().reset_index()\n conn = sqlite3.connect('for_python_ht.db')\n try:\n try:\n sqlite_for_ht.CreateTableSingle.delete_table(f_3, self.filename)\n print(datetime.now(), '-', self.filename, 'Table deleted at the start point')\n except Exception:\n print(datetime.now(), '-', 'Something went wrong')\n traceback.print_exc()\n df_tmp.to_sql(name=self.filename, con=conn, index=False)\n print(datetime.now(), '-', self.filename, 'Table created and filled with data')\n except Exception:\n print(datetime.now(), '-', 'file with name {} already exists'.format(self.filename))\n traceback.print_exc()\n print(datetime.now(), '-', 'word analyse for', self.filename, 'done')\n sqlite_for_ht.HandleTemp.update_table(f_2, 'status', 'Done', self.filename)\n return None", "def convert_from_text(self, file_name):\n with open(file_name, 'r') as reader:\n words_list = []\n for line in reader:\n words_list.extend(line.split())\n\n for word in set(words_list):\n if word.isalpha():\n self.insert_word(word.lower())\n else:\n self.insert_word(''.join([c for c in word if c.isalpha()]).lower())", "def create_B_words(path_to_pairs,\n path_to_librispeech_text,\n path_to_phonemes,\n path_save,\n freq_sim,\n len_sim,\n edit_sim):\n for i in range(len(path_to_pairs)):\n \n pairs = []\n dic_cl_eq = {} # Classe d'equivalence pour le sens des mots\n \n with open(path_to_pairs[i]) as f:\n for line in f:\n line = line.replace('\\n', '').split(' ')\n pairs.append(line)\n if line[0] in dic_cl_eq:\n dic_cl_eq[line[0]].add(line[1])\n else:\n dic_cl_eq[line[0]] = {line[1]}\n if line[1] in dic_cl_eq:\n dic_cl_eq[line[1]].add(line[0])\n else:\n dic_cl_eq[line[1]] = {line[0]}\n \n dic_cl_eq_prev = {}\n while dic_cl_eq_prev != dic_cl_eq:\n dic_cl_eq_prev = copy.deepcopy(dic_cl_eq)\n for word in dic_cl_eq:\n for syn in dic_cl_eq[word]:\n dic_cl_eq[word] = set.union(dic_cl_eq[word], dic_cl_eq[syn])\n \n with open(path_to_librispeech_text) as f:\n text_librispeech = f.read()\n text_librispeech_split = text_librispeech.replace('\\n', ' ').split(' ')\n freq_libri = {}\n for word in text_librispeech_split:\n if word in dic_cl_eq:\n if word in freq_libri:\n freq_libri[word] += 1\n else:\n freq_libri[word] = 1\n \n phonemes = []\n with open(path_to_phonemes[i]) as f:\n for line in f:\n line = line.replace('\\n', '').split(' ')\n phonemes.append(line)\n \n dic_word_phonemes = {}\n for j in range(len(pairs)):\n dic_word_phonemes[pairs[j][0]] = phonemes[j][0]\n dic_word_phonemes[pairs[j][1]] = phonemes[j][1]\n \n file = open(path_save[i], 'w+')\n file.truncate(0)\n \n for j in range(len(pairs)):\n A, X = pairs[j]\n B_0 = []\n for word in dic_cl_eq:\n if word not in dic_cl_eq[A]:\n if np.abs(np.log(freq_libri[word])/np.log(freq_sim) \\\n - np.log(freq_libri[A])/np.log(freq_sim)) <= 1:\n if (len(word) > (1-len_sim)*len(A)) and \\\n (len(word) < (1+len_sim)*len(A)):\n p_A = dic_word_phonemes[A]\n p_X = dic_word_phonemes[X]\n p_word = dic_word_phonemes[word]\n if np.abs(dist(p_A, p_X) - dist(p_X, p_word)) < edit_sim:\n B_0.append(word)\n line_0 = ' '.join([A, X] + B_0)\n \n X, A = pairs[j]\n B_1 = []\n for word in dic_cl_eq:\n if word not in dic_cl_eq[A]:\n if np.abs(np.log(freq_libri[word])/np.log(freq_sim) \\\n - np.log(freq_libri[A])/np.log(freq_sim)) <= 1:\n if (len(word) > np.around((1-len_sim)*len(A), decimals=2)) and \\\n (len(word) < np.around((1+len_sim)*len(A), decimals=2)):\n p_A = dic_word_phonemes[A]\n p_X = dic_word_phonemes[X]\n p_word = dic_word_phonemes[word]\n if np.abs(dist(p_A, p_X) - dist(p_X, p_word)) < edit_sim:\n B_1.append(word)\n line_1 = ' '.join([A, X] + B_1)\n \n if max(len(B_0), len(B_1)) == 0:\n print(X, A)\n \n line = line_0 if len(line_0) > len(line_1) else line_1\n if j < len(pairs) - 1:\n line += '\\n'\n file.write(line)\n \n file.close()", "def process_dict(text, frequency_threshold):\n\n # Trying to load previous unique_words (pickle file)\n UNIQUE_WORDS_PICKLE = \"unique_words_with_frequency_\" + str(frequency_threshold) + \".pickle\"\n \n unique_words = None\n if os.path.isfile(UNIQUE_WORDS_PICKLE):\n try:\n with open(UNIQUE_WORDS_PICKLE, 'r') as f:\n unique_words = pickle.load(f)\n except:\n os.remove(UNIQUE_WORDS_PICKLE)\n unique_words = None\n\n if (type(unique_words) == list):\n return unique_words\n\n\n WORD_COUNT_PICKLE = \"word_count.pickle\"\n WORD_COUNT = 253855\n\n print(\"Processing dictionary. This will take a while.\")\n\n # Trying to load previous word_count (pickle file)\n word_count = None\n if os.path.isfile(WORD_COUNT_PICKLE):\n try:\n with open(WORD_COUNT_PICKLE, 'r') as f:\n word_count = pickle.load(f)\n if len(word_count) != WORD_COUNT:\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n except:\n raise\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n\n # count words\n if word_count == None:\n print(\"Pickle file not found. Counting word occurence...\")\n\n # grab all the words\n words = text.split(\" \")\n\n # counting word occurence\n word_count = dict(Counter(words).most_common())\n \n # saving word count for future reuse\n with open(WORD_COUNT_PICKLE, 'w') as f:\n pickle.dump(word_count, f)\n print(\"Word count saved for future reuse.\")\n \n # making sure we have the correct count loaded\n assert(type(word_count) == dict)\n assert(len(word_count) == WORD_COUNT)\n\n # remove the duplicates and single-character words.\n unique_words = [w for w in word_count.keys() if len(w) > 1]\n vocab_size = len(unique_words)\n print(\"Vocab size:\", vocab_size)\n\n # remove words with frequency lower than 1%\n unique_words = [word for word in unique_words if float(word_count[word]) / vocab_size > frequency_threshold]\n print(\"Vocab size (>%.3f%% frequency): %d\" % ((frequency_threshold * 100), len(unique_words)))\n\n unique_words.sort(key=lambda word: len(word), reverse=True)\n unique_words.append('a')\n unique_words.append('i')\n\n # save unique words for future reuse\n with open(UNIQUE_WORDS_PICKLE, 'w') as f:\n pickle.dump(unique_words, f)\n print(\"unique_words saved for future reuse.\")\n\n return unique_words", "def load_cows(filename):\r\n print(\"Loading words from file...\")\r\n # inFile: file\r\n inFile = open(filename, 'r')\r\n # wordlist: list of strings\r\n wordlist = {}\r\n for line in inFile:\r\n cow = line.split(',')\r\n wordlist[cow[0]] = int(cow[1]) # 0: name, 1: weight\r\n inFile.close()\r\n print(\" \", len(wordlist), \"words loaded.\")\r\n return wordlist", "def create_dictionary(filename):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n words = text.split()\n d = {}\n current_word = '$'\n \n for next_word in words:\n if current_word not in d:\n d[current_word] = [next_word]\n else:\n d[current_word] += [next_word]\n if next_word[-1] == '.' or next_word[-1] == '!' or next_word[-1] == '?':\n current_word = '$'\n else:\n current_word = next_word\n return d", "def get_word_freq(filein):\n freq = {}\n\n # Open file handles with context manager\n with open(filein) as f:\n\n # Read a single line at a time so as not to crush memory\n for line in f:\n\n # Tokenize and iterate\n for word in line.split():\n\n # Use try/except instead of if/then for performance\n # Likely after the first 1M tweets that the key will be contained\n try:\n freq[word] += 1\n except KeyError:\n freq[word] = 1\n\n return freq", "def word_list():\n\n d = {}\n with open('words.txt') as fin:\n for line in fin.readlines():\n word = line.strip().lower()\n d[word] = True\n return d", "def set_words(data_path):\n w_df = pd.read_csv(data_path, names=['es','gn','syn1','syn2'], encoding='iso-8859-1') # file -i\n gn_df = w_df[['gn','syn1','syn2']].drop_duplicates()\n gn_lst = gn_df['gn'].tolist()+gn_df['syn1'].tolist()+gn_df['syn2'].tolist()\n cleanedList = [x for x in gn_lst if str(x) != 'nan' and len(x)>=3]\n gn_set = set(cleanedList)\n \n print(len(gn_set))\n \n f = open(data_path[:-4]+\".txt\", 'w')\n for w in gn_set:\n f.write('{}\\n'.format(w))\n f.close()\n \n return list(gn_set)", "def fill_words_table(self, statistics, path, filemoving, conn, logg, parser):\n logg.writing_log(conn, 'Starting filling words table')\n c = conn.cursor()\n val1 = statistics.book_name(path, filemoving, parser).replace(' ', '_')\n sql1 = \"CREATE TABLE \" + val1 + \" (word text, count integer, count_uppercase integer)\"\n c.execute(sql1)\n val2 = statistics.frequency(path, filemoving, parser)\n sql2 = \"INSERT INTO \" + val1 + \" VALUES(?,?,?)\"\n for key, value in val2.items():\n if not key.istitle():\n c.execute(sql2, (key, value, (0 if val2.get(key.capitalize()) == None else val2.get(key.capitalize()))))\n logg.writing_log(conn, 'Words table is filled')\n conn.commit()", "def generate_input_with_unknown_words(file_path):\r\n\tseen_tuples = []\r\n\tlabel_matches = dict()\r\n\tfile_lines = []\r\n\twith open(file_path) as f:\r\n\t\tfor line in f:\r\n\t\t\tfile_lines = file_lines + [line.lower().split()]\r\n\t\tword_tuples = zip(file_lines[0::3], file_lines[1::3], file_lines[2::3])\r\n\t\tfor (words, part_of_speech, word_type) in word_tuples:\r\n\t\t\ttype_tuple = zip(words, word_type)\r\n\t\t\tfor word_and_tag in type_tuple:\r\n\t\t\t\tif word_and_tag in seen_tuples:\r\n\t\t\t\t\tlabel_matches.update({word_and_tag : (label_matches.get(word_and_tag, 0) + 1)})\r\n\t\t\t\telse:\r\n\t\t\t\t\ttag = word_and_tag[1]\r\n\t\t\t\t\tunknown_entry = (\"<UNK>\", tag)\r\n\t\t\t\t\tlabel_matches.update({unknown_entry : (label_matches.get(unknown_entry, 0) + 1)})\r\n\t\t\t\t\tseen_tuples.append(word_and_tag)\r\n\treturn label_matches", "def read_dictionary():\n with open(FILE, 'r') as f:\n for line in f:\n words_lst = line.split()\n for word in words_lst:\n dict_list.append(word)", "def uniquewords(self):\n vas = set({})\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.add(s_i)\n l_i = list(vas)\n self.print(l_i)\n self.write(l_i)\n logging.debug(\"Starting with to\")\n return l_i", "def read_data(file_path):\n words=[]\n dic_word={}\n actual_text=[]\n for line in open(file_path,encoding='utf-8'):\n words_line=line.strip().split(' ')\n for ite in words_line:\n if ite not in dic_word:\n dic_word[ite]=1\n words.extend(words_line)\n actual_text.append(words_line)\n\n\n #with zipfile.ZipFile(file_path) as f:\n #words = tf.compat.as_str(f.read(f.namelist()[0])).split()\n\n return words,len(dic_word),actual_text", "def get_words(f, letters):\n # lettrs = []\n # okay = True\n # words = []\n # nline = ''\n # with open(f, 'r') as vocabulary:\n # for line in vocabulary.readlines():\n # nline = line.replace(\"\\n\", \"\").lower()\n # if 4 <= len(nline) <= 9 and letters[4] in nline:\n # lettrs = list(nline)\n # for lettr in lettrs:\n # if lettr not in letters:\n # okay = False\n # break\n # else:\n # okay = True\n # if okay is True:\n # words.append(nline)\n #\n # lettrs = copy.copy(letters)\n # nwords = []\n # okay = True\n # for word in words[::1]:\n # lettrs = copy.copy(letters)\n # for letter in word:\n # if letter in lettrs:\n # lettrs[lettrs.index(letter)] = '0'\n # else:\n # okay = False\n # break\n # if okay is True:\n # nwords.append(word)\n # okay = True\n #\n # unique = True\n # words = []\n # for word in nwords:\n # if nwords.count(word) > 1:\n # nwords.remove(word)\n # nwords.sort()\n # return nwords\n res = []\n cort_letters = []\n our_letters = []\n res = []\n f = open(f, 'r')\n for line in f:\n line = line.replace(\"\\n\", \"\").strip().lower()\n if 4 <= len(line) <= 9:\n if letters[4] in line:\n count = 0\n for each_letter in line:\n if each_letter in letters:\n count += 1\n if count == len(line):\n our_letters.append(line)\n f.close()\n for each_word in our_letters:\n count_let = 0\n for each_letter in each_word:\n if each_word.count(each_letter) <= letters.count(each_letter):\n count_let += 1\n if count_let == len(each_word):\n res.append(each_word)\n for each in res:\n if res.count(each) > 1:\n res.remove(each)\n return sorted(res)", "def add_encryptors(word):\r\n\r\n assert isinstance(word, str), 'Strings only!'\r\n if word == \"\": return None\r\n file = open(r'words.txt', 'r')\r\n for line in file:\r\n first_word = line.split()[0]\r\n if word == first_word:\r\n print('Error, word is already added to the list!')\r\n return 'Error, word is already added to the list!'\r\n file.close()\r\n\r\n\r\n new_encryption = str(create_encryptors())\r\n blank = True\r\n while blank == True:\r\n file = open(r'words.txt', 'r')\r\n blank = False\r\n for line in file:\r\n if new_encryption == line.split()[2]:\r\n new_encryption = str(create_encryptors())\r\n blank = True\r\n file.close()\r\n break\r\n file.close()\r\n\r\n\r\n if len(word) < 4: tabs = 3\r\n elif len(word) <8: tabs = 2\r\n else: tabs = 1\r\n\r\n file = open(r'words.txt', 'a')\r\n file.write(word + '\\t'*tabs + \"= \" + new_encryption + \"\\n\")", "def words_occur():\n\n # Get the file name from keyboard\n # file_name = input(\"Enter the the file name: \")\n file_name = 'Untitled.txt'\n\n # File open, read and save in the word_list\n f = open(file_name, 'r')\n word_list = f.read().split()\n f.close()\n\n # Get the unic words inclusion's number\n occurs_dict = {}\n for word in word_list[:10]: # test on first ten\n if word.isalpha: # why isalpha isn't working?\n print(word)\n # Increment the counter\n occurs_dict[word] = occurs_dict.get(word, 0) + 1\n\n # Present results\n print(\"File %s has %d words (%d are unique)\" \\\n % (file_name, len(word_list), len(occurs_dict)))\n List = [occurs_dict.values].sort(reverse=True) # how to sort counts?\n print(List)\n print(occurs_dict)", "def _read_words(self, path):\r\n\r\n word_file = open(path)\r\n for line in word_file.readlines():\r\n pair = line.split('::')\r\n self.insert(pair[0], pair[1].rstrip())\r\n word_file.close()", "def parse_file(input_lst):\n word_dct = {}\n for line in input_lst:\n raw_output = line.split() # these are lists of strings\n for str_ in raw_output: # strings\n str_ = str_.lower()\n str_ = str_.replace(\"-\", \" \")\n str_ = str_.replace(\"?\", \"\")\n str_ = str_.replace(\"!\", \"\")\n str_ = str_.replace(\",\", \"\")\n str_ = str_.replace(\"\\'\", \"\")\n str_ = str_.replace('\\\"', \"\")\n str_ = str_.replace(\".\", \"\")\n if str_ not in word_dct:\n word_dct[str_] = 1\n else:\n word_dct[str_] += 1\n return word_dct", "def __line_parse(index: int, line: list, dictionary: dict, word_list: list):\n\n if index + 2 >= len(line):\n return\n word_1 = line[index + 2]\n word_2 = line[index + 1]\n word_3 = line[index]\n if word_1 == \"\" or word_2 == \"\" or word_3 == \"\":\n return\n\n if word_1 not in dictionary:\n dictionary[word_1] = {\n str(word_1 + \"_1\"): {\n\n },\n str(word_1 + \"_2\"): {\n\n },\n str(word_1 + \"_3\"): {\n\n }\n }\n if word_2 not in dictionary:\n dictionary[word_2] = {\n str(word_2 + \"_1\"): {\n\n },\n str(word_2 + \"_2\"): {\n\n },\n str(word_2 + \"_3\"): {\n\n }\n }\n if word_3 not in dictionary:\n dictionary[word_3] = {\n str(word_3 + \"_1\"): {\n\n },\n str(word_3 + \"_2\"): {\n\n },\n str(word_3 + \"_3\"): {\n\n }\n }\n if word_1 not in word_list:\n word_list.append(word_1)\n if word_2 not in word_list:\n word_list.append(word_2)\n if word_3 not in word_list:\n word_list.append(word_3)\n \"\"\" word_3 word_2 word_1\"\"\"\n if word_2 not in dictionary[word_1][str(word_1 + \"_1\")]:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = 1\n else:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = dictionary[word_1][str(word_1 + \"_1\")][word_2] + 1\n if word_3 not in dictionary[word_1][str(word_1 + \"_2\")]:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = 1\n else:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = dictionary[word_1][str(word_1 + \"_2\")][word_3] + 1\n if word_3 not in dictionary[word_2][str(word_2 + \"_1\")]:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = 1\n else:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = dictionary[word_2][str(word_2 + \"_1\")][word_3] + 1\n if index + 3 >= len(line) or line[index + 3] == \"\":\n return\n word_0 = line[index + 3]\n if word_0 not in dictionary:\n dictionary[word_0] = {\n str(word_0 + \"_1\"): {\n\n },\n str(word_0 + \"_2\"): {\n\n },\n str(word_0 + \"_3\"): {\n\n }\n }\n\n if word_0 not in word_list:\n word_list.append(word_0)\n\n if word_3 not in dictionary[word_0][str(word_0 + \"_3\")]:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = 1\n else:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = dictionary[word_0][str(word_0 + \"_3\")][word_3] + 1", "def make_words(self,lm):\n if \" \" in self.corpus[0] and \" \" in self.corpus[1]: \n print \"assuming BLICK\"\n self.corpus = [convert_to_disc(i) for i in self.corpus]\n else:\n self.disc = 1\n print \"assuming Disc\" \n if not os.path.isfile(self.f): ##check if it already exists\n print \"generating 10 million words\"\n outfile = open(self.f, \"w\")\n outfile.write(\"word,blick,ngram,Real,T,disc\\n\")\n for word in self.corpus:\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Real\", \"1\")\n while len(self.wordlist)<10000000: \n words = lm.generate(100)\n for word in words:\n if word not in self.wordlist and len(word) < 9: #keep only words less than len9\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Simulated\", \"0\")\n self.wordlist[word] = 0\n return", "def refine_tokens( self, tokens ):\n k = 1.75\n b = 0.75\n stop_words_file = \"stop_words.txt\"\n all_stopwords = list()\n refined_tokens_sources = dict()\n \n # collect all the stopwords\n with open( stop_words_file ) as file:\n lines = file.read()\n all_stopwords = lines.split( \"\\n\" )\n \n for source in tokens:\n refined_tokens = dict()\n files = dict()\n inverted_frequency = dict()\n file_id = -1\n total_file_length = 0\n for item in tokens[ source ]:\n file_id += 1\n file_tokens = tokens[ source ][ item ].split(\" \")\n if source in \"name_desc_edam_help\":\n file_tokens = utils._clean_tokens( file_tokens, all_stopwords )\n total_file_length += len( file_tokens )\n term_frequency = dict()\n for token in file_tokens:\n if token is not '':\n file_ids = list()\n if token not in inverted_frequency:\n file_ids.append( file_id )\n else:\n file_ids = inverted_frequency[ token ]\n if file_id not in file_ids:\n file_ids.append( file_id )\n inverted_frequency[ token ] = file_ids\n # for term frequency\n if token not in term_frequency:\n term_frequency[ token ] = 1\n else:\n term_frequency[ token ] += 1\n files[ item ] = term_frequency\n N = len( files )\n average_file_length = float( total_file_length ) / N\n # find BM25 score for each token of each tool. It helps to determine\n # how important each word is with respect to the tool and other tools\n for item in files:\n file_item = files[ item ]\n file_length = len( file_item )\n for token in file_item:\n tf = file_item[ token ]\n # normalize the term freq of token for each document\n tf = float( tf ) / file_length\n idf = np.log2( N / len( inverted_frequency[ token ] ) )\n alpha = ( 1 - b ) + ( float( b * file_length ) / average_file_length )\n tf_star = tf * float( ( k + 1 ) ) / ( k * alpha + tf )\n tf_idf = tf_star * idf\n file_item[ token ] = tf_idf\n # filter tokens based on the BM25 scores and stop words. Not all tokens are important\n for item in files:\n file_tokens = files[ item ]\n tokens_scores = [ ( token, score ) for ( token, score ) in file_tokens.items() ]\n sorted_tokens = sorted( tokens_scores, key=operator.itemgetter( 1 ), reverse=True )\n refined_tokens[ item ] = sorted_tokens\n tokens_file_name = 'tokens_' + source + '.txt'\n token_file_path = os.path.join( os.path.dirname( self.tools_data_path ) + '/' + tokens_file_name )\n with open( token_file_path, 'w' ) as file:\n file.write( json.dumps( refined_tokens ) )\n file.close()\n refined_tokens_sources[ source ] = refined_tokens\n return refined_tokens_sources", "def process_raw_phrases(file_path):", "def load_input_word_list(file_path):\n if not os.path.isfile(file_path):\n return False\n\n word_list = list()\n\n with open(file_path, 'r') as fp:\n while True:\n line = fp.readline()\n if not line:\n break\n\n data = line.split(' ')\n text = data[0].lower().strip(Setting.NONWORD_CHARACTERS)\n\n if not text:\n continue\n\n text = text.replace('_', ' ')\n\n score = float(data[1])\n\n if score < 0:\n kind = WordKindEnum.NEG\n else:\n kind = WordKindEnum.POS\n\n word = Word(text, score, kind)\n word_list.append(word)\n\n return word_list", "def word_dict():\n fin = open('words.txt')\n w_dict = {}\n for line in fin:\n word = line.strip()\n w_dict[word] = word\n return w_dict", "def word_count(filename):\n \n word_counts = {}\n\n with open(filename) as file_:\n for line in file_:\n # strip white space\n words = line.split()\n # iterate over words and strip excess punctutation then add to dict\n for word in words:\n word = word.strip(\",.\\\";:?_!\").lower()\n word_counts[word] = word_counts.get(word, 0) + 1\n\n # print list of words and count\n for word, count in word_counts.iteritems():\n print \"{} {}\".format(word, count)", "def read_file(filename):\n print(\"Reading dictionary: \" +filename)\n word_dict = set()\n\n dictionary = open(filename)\n\n # Read each word from the dictionary\n for word in dictionary:\n # Remove the trailing newline character\n word = word.rstrip('\\n')\n\n # Convert to lowercase\n word = word.lower()\n\n word_dict.add(word)\n\n dictionary.close()\n\n return word_dict", "def process_line(line, hist):\n # replace hyphens with spaces before splitting\n line = line.replace('-', ' ')\n wordlist=[]\n\n for word in line.split():\n # remove punctuation and convert to lowercase\n word = word.strip(string.punctuation + string.whitespace)\n word = word.lower()\n\n wordlist.append(word)\n # update the histogram\n #hist[word] = hist.get(word, 0) + 1\n return wordlist", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"infile\", help=\"Text file to be analyzed.\")\n args = parser.parse_args()\n with open(args.infile, encoding=\"utf-8\") as f:\n text = f.read()\n words = text.split()\n unique_words(words)", "def analyze_file(file_contents):\r\n load_dict = [];\r\n for a in file_contents:\r\n load_dict.append((char_score(a)[0][0],a))# should \r\n with open(\"./scores.txt\",'w') as scr:\r\n scr.write(str(load_dict)); \r\n return load_dict;", "def buildCorpus(self, filename, stopwords_file=None):\n with open(filename, 'r') as infile:\n # use pattern.subs\n # doclines = [line.rstrip().lower().split(' ') for line in infile]\n doclines = [self.help_clean(line) for line in infile]\n n_docs = len(doclines)\n self.vocab = list({v for doc in doclines for v in doc})\n if stopwords_file:\n with open(stopwords_file, 'r') as stopfile:\n stops = stopfile.read().split()\n self.vocab = [x for x in self.vocab if x not in stops]\n self.vocab.sort()\n self.documents = []\n for i in range(n_docs):\n self.documents.append({})\n for j in range(len(doclines[i])):\n if doclines[i][j] in self.vocab:\n self.documents[i][j] = self.vocab.index(doclines[i][j])", "def tokenize_and_split(sms_file):\n \n dic = {}\n list1 = []\n list2 = []\n i = -1\n ham = True\n for line in open(sms_file, 'r').readlines():\n w = []\n for word in line.split():\n i = i + 1\n if word == \"ham\":\n ham = True\n i = i - 1\n elif word == \"spam\":\n ham = False\n i = i - 1\n else:\n if word not in dic:\n dic[word] = i\n w.append(dic[word])\n else : \n i = i - 1\n w.append(dic[word])\n if ham and w !=[]:\n list2.append(w)\n elif ham == False and w !=[]:\n list1.append(w)\n \n return dic,list1,list2", "def load_train_word_dict():\n train_dict = {}\n with open(TRANSCRIPTION_PATH) as file:\n for line in file:\n if int(line[0:3]) < 300:\n word_id, transcript = str.split(line, \" \")\n train_dict[word_id] = transcript.rstrip('\\n')\n return train_dict", "def read_data(filename, n_words):\n with open(filename) as f:\n filter_set = set()\n unsorted_res = []\n words = []\n count = []\n for line in f:\n word = line.strip()\n if len(word) == 0:\n continue\n word_idx_list = [int(idx) for idx in word.split(',')]\n filter_set.add(tuple(word_idx_list))\n words.append(tuple(sorted(word_idx_list)))\n words_counter = collections.Counter(words)\n most_common_words = dict()\n most_common_words_counter = words_counter.most_common(n_words)\n for item in most_common_words_counter:\n most_common_words[item[0]] = item[1]\n\n unsorted_res = dict()\n for w in filter_set:\n sorted_tuple = tuple(sorted(list(w)))\n if sorted_tuple in most_common_words:\n unsorted_res[w] = most_common_words[sorted_tuple]\n \n del words\n del count\n del filter_set\n del most_common_words\n\n return unsorted_res", "def add_cpd_synonyms_from_datafile(filename = '/Users/wbryant/work/cogzymes/data/SEED/SEED_met_table.csv'):\n \n \n num_syns_added = 0\n num_syns_tested = 0\n \n source = Source.objects.get(name='seed')\n \n met_db_dict = get_model_dictionary(Metabolite, 'id')\n syn_met_dict = get_synonym_met_dict()\n \n counter = loop_counter(count_lines(filename), 'Adding CPD IDs to database')\n \n f_in = open(filename, 'r')\n for line in f_in:\n counter.step()\n add_cpd_to_synonyms(line, source, syn_met_dict, met_db_dict)\n \n counter.stop()", "def build_dict(fname):\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn", "def generate_input(file_path):\r\n\tlabel_matches = dict()\r\n\tfile_lines = []\r\n\twith open(file_path) as f:\r\n\t\tfor line in f:\r\n\t\t\tfile_lines = file_lines + [line.lower().split()]\r\n\t\tword_tuples = zip(file_lines[0::3], file_lines[1::3], file_lines[2::3])\r\n\t\tfor (words, part_of_speech, word_type) in word_tuples:\r\n\t\t\ttype_tuples = zip(words, word_type)\r\n\t\t\tfor word_and_tag in type_tuples:\r\n\t\t\t\tlabel_matches.update({word_and_tag : (label_matches.get(word_and_tag, 0) + 1)})\r\n\treturn label_matches", "def common_words_safe(filename, min_chars):\n wordPattern = re.compile('[a-zA-Z]{' + str(min_chars) + ',}')\n occurance = dict()\n try:\n with open(filename, 'r') as f:\n contents = f.read()\n except IOError as e:\n print \"IOError {0}: {1}\".format(e.errno, e.strerror)\n return\n words = wordPattern.finditer(contents)\n for wordMatch in words:\n word = wordMatch.group(0).lower()\n if word in occurance:\n occurance[word] += 1\n else:\n occurance[word] = 1\n return sorted(occurance.items(), key=lambda item:item[1], reverse=True)" ]
[ "0.70385915", "0.6925145", "0.6899971", "0.6768178", "0.6651934", "0.6420912", "0.6312901", "0.6296871", "0.6231026", "0.6214866", "0.62069726", "0.6178269", "0.616501", "0.615068", "0.61372495", "0.6134684", "0.612722", "0.61159056", "0.6110232", "0.6060298", "0.6052872", "0.6024224", "0.60239846", "0.59918666", "0.5982138", "0.5981788", "0.5977596", "0.5972763", "0.59598434", "0.59594", "0.5954592", "0.5954314", "0.5950341", "0.5945586", "0.5939405", "0.592284", "0.59227294", "0.5921953", "0.5901857", "0.58915174", "0.58891827", "0.588435", "0.58632594", "0.5839867", "0.5836892", "0.58325803", "0.5831235", "0.5831054", "0.58259505", "0.58204377", "0.58100015", "0.5801213", "0.5798646", "0.57879025", "0.5783743", "0.5783632", "0.5777174", "0.5773674", "0.576629", "0.5762148", "0.5750121", "0.57435894", "0.5742887", "0.5738712", "0.573655", "0.57355934", "0.573217", "0.57258767", "0.572451", "0.5724229", "0.572188", "0.57089233", "0.5708431", "0.5706453", "0.5705184", "0.5672568", "0.5665531", "0.5659035", "0.56569177", "0.56409246", "0.5640634", "0.563733", "0.5634866", "0.563245", "0.56282216", "0.56178486", "0.5599717", "0.55991083", "0.55925417", "0.5591636", "0.55891705", "0.558677", "0.55867386", "0.5585658", "0.5582212", "0.557763", "0.55756855", "0.5572795", "0.5568265", "0.5555953" ]
0.65844196
5
Write the concordance entries to the output file(filename) See sample output files for format.
def write_concordance(self, filename): all_keys = self.concordance_table.get_all_keys() lines = [] for i in all_keys: a = "" a += i + ":" f = self.concordance_table.get_value(i) if f != None: for s in f: a += " " + str(s) a += "\n" lines.append(a) a = open(filename, "w+") for i in lines: a.write(i) a.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_concordance(self, filename):\n out = ''\n values = [x for x in self.concordance_table.hash_table if x is not None]\n values.sort(key=lambda x: x[0])\n for v in values:\n out += f'{v[0]}: {\" \".join(str(x) for x in sorted(set(v[1])))}\\n' \n with open(filename, 'w') as f:\n f.write(out.rstrip())", "def write_cando_file(self, file_name):\n cando_writer = CandoWriter(self.dna_structure)\n cando_writer.write(file_name)", "def _write_conductances(self, cond_file_name):\n cond_file_path = os.path.join(OM_STORAGE_DIR, cond_file_name)\n\n #TODO: Check that the file doesn't already exist.\n LOG.info(\"Writing head conductance file: %s\" % cond_file_path)\n file_handle = file(cond_file_path, \"a\")\n\n file_handle.write(\"# Properties Description 1.0 (Conductivities)\\n\\n\")\n file_handle.write(\"Air %4.2f\\n\" % self.conductances[\"air\"])\n file_handle.write(\"Scalp %4.2f\\n\" % self.conductances[\"skin\"])\n file_handle.write(\"Brain %4.2f\\n\" % self.conductances[\"brain\"])\n file_handle.write(\"Skull %4.2f\\n\" % self.conductances[\"skull\"])\n\n file_handle.close()\n LOG.info(\"%s written successfully.\" % cond_file_path)\n\n return cond_file_path", "def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)", "def write_conll(conll_file, sents):\n with codecs.open(conll_file, mode = 'w', errors = 'ignore', encoding = 'utf-8') as ofile:\n for sent in sents:\n if sent:\n for element in sent:\n word = element[0]\n tag = element[1]\n ofile.write(str(tag) + '\\t' + str(word) + '\\n')\n ofile.write('\\n')", "def write_CA_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n ca_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM' and line[12:16] == \" CA \":\n line_split = line.split()[6:9]\n ca_list.append(line_split)\n choice1 = input('Enter name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in ca_list:\n outfile.writelines(i)\n print('Done!')\n print(i)", "def write_output():\n f = open(OUTPUT_FILE, 'w')\n for case_index, words in get_output():\n f.write('Case #%d: %s\\n' % (case_index, ' '.join(words)))\n f.close()", "def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")", "def write_output_file(filename, actions, log):\n f = open(filename, 'w')\n\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n\n for k in log.keys():\n f.write(str(k) + ' = ' + str(log.get(k)))\n f.write('\\n')\n\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()", "def write_file(self, filename):\n\n with open(filename, 'w', newline = '') as csvfile:\n langwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for key in self.features:\n value = self.features[key]\n l = []\n for val in value:\n l.append(str(val))\n langwriter.writerow([l])\n return", "def write_cn_cards(bc_file, bc_class):\n cn = bc_class.constituent_properties\n bc_file.write('! Constituent Properties\\n')\n if not cn.general_constituents.empty:\n # bc_file.write(cn.general_constituents.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.general_constituents.iterrows():\n bc_file.write(\n 'CN CON {} {}\\n'.format(row['ID'].astype('int'), row['CONC']))\n if not cn.sand.empty:\n # bc_file.write(cn.sand.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.sand.iterrows():\n bc_file.write(\n 'CN SND {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if not cn.clay.empty:\n # bc_file.write(cn.clay.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.clay.iterrows():\n bc_file.write(\n 'CN CLA {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if cn.salinity:\n bc_file.write('CN SAL {} {}\\n'.format(cn.salinity_id, cn.reference_concentration))\n if cn.temperature:\n bc_file.write('CN TMP {} {}\\n'.format(cn.temperature_id, cn.reference_temperature))\n if cn.vorticity:\n bc_file.write('CN VOR {} {} {} {}\\n'.format(cn.vorticity_id, cn.vorticity_normalization,\n cn.vorticity_as_term, cn.vorticity_ds_term))\n\n bc_file.write('\\n') # blank line at the end of the Constituent Properties", "def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1", "def result_file(accession_list):\n with open(\"../accessions_list.txt\", 'w') as file:\n file.write(accession_list)", "def writeCC(self, fileName, allSCC):\n f = open(fileName,'w')\n\n for compNumber in range(0,len(allSCC)):\n f.write(\"Component number %s: \" % (compNumber))\n f.write(\"%s\\n\" % (str(allSCC[compNumber])))\n f.close()", "def write_output(arr, filename):\n print('Started writing the output..')\n f = open(filename, 'w')\n for a in arr:\n f.write(str(a) + '\\n')\n f.close()\n print('Done!, Open the file to see the approved loans.')", "def write_crf_input(out_file, sentences, poss, lemmas, concepts):\n\n print '\\n\\tWrite out data in crf compliant format'\n f = open(out_file, 'w+')\n for position_i in range(len(sentences)):\n for position_j in range(len(sentences[position_i])):\n f.write(\n sentences[ position_i ][ position_j ] + '\\t' +\n poss[ position_i ][ position_j ] + '\\t' +\n lemmas[ position_i ][ position_j ] + '\\t' +\n concepts[ position_i ][ position_j ]\n + '\\n'\n )\n f.write('\\n')\n f.close()\n print '\\t--done'", "def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))", "def writeChronListToFile(self):\n ## write header\n for header_line in self.outData['header']:\n self.outFile.write(header_line + '\\n')\n ##loop through each msg list\n for msg_list in self.outData_temp:\n ## create line\n msg_line = reconstructLine(msg_list)\n ## write to file\n self.outFile.write(msg_line + '\\n')", "def write_dialogue_to_file(utterances, dialogue_index, filename):\n with open(filename, 'a') as file:\n for sentence_index in range(len(utterances[dialogue_index][0])):\n file.write('{0} {1}\\n'.format(utterances[dialogue_index][0][sentence_index],\n utterances[dialogue_index][1][sentence_index]))", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write_to_file(info, mode='w', file=\"output4.txt\"):\n with open(file, mode, encoding='utf-8') as f:\n for line in info:\n f.write(' '.join(map(str, line)) + '\\n')", "def export(self, fname):\n f = open(fname, 'w')\n for ue in self.ue_list:\n line_components = list()\n line_components.append(ue.expression)\n line_components.append(ue.meaning)\n print >>f, '\\t'.join(line_components).encode('utf-8')", "def write_conll(cls, filename, writer, document_id, sentences):\n with open(filename, 'w') as fd:\n writer.write(fd, document_id, sentences)", "def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)", "def writeAlltoFile(self):\n with open(self._fname, 'w') as f:\n for elem in self.getAll():\n line = self._writeGratoLine(elem)\n f.write(line + \"\\n\")\n f.close()", "def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')", "def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")", "def writeCentrality(filename, data):\n\n filePath = os.path.join(CONFIG['CENTRALITIES_PATH'], filename)\n f = open(filePath, \"w\")\n\n data = {k: v for k, v in sorted(\n data.items(), key=lambda x: x[1], reverse=True)}\n\n for k, v in data.items():\n text = f\"{k:<4}\\t{v:.6f}\\n\"\n f.write(text)", "def write(self, taxonomy, output_file):\n\n fout = open(output_file, 'w')\n for genome_id, taxa in taxonomy.items():\n fout.write(genome_id + '\\t' + ';'.join(taxa) + '\\n')\n fout.close()", "def write(filename):\n print(uc.write(filename))", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def write_output(self, output_file):\n\t\t# Create csv file header.\n\t\theader = ['Cohort', 'Customers',]\n\t\tfor start, end in self.day_ranges:\n\t\t\tday_range_str = '{}-{} days'.format(start, end)\n\t\t\theader.append(day_range_str)\n\n\t\twith open(output_file, 'wb') as fh:\n\t\t\twriter = csv.writer(fh)\n\t\t\twriter.writerow(header)\n\t\t\tfor cohort, cohort_value in self.output.items():\n\t\t\t\twriter.writerow(\n\t\t\t\t\tself.build_row(cohort, cohort_value)\n\t\t\t\t)", "def write_cif_file(self, file_name):\n cif_writer = CifWriter(self.dna_structure)\n cif_writer.write(file_name, self.infile, self.informat )", "def writeOutputToFile(self, expanded_acronyms, file_path):\n output_file = open(file_path, \"w\")\n if expanded_acronyms:\n for acronym in sorted(expanded_acronyms.keys()):\n output_file.write(\n acronym + \",\" + str(self._getExpansion(expanded_acronyms[acronym])) + \"\\n\")\n else:\n output_file.close(string_error_no_results_to_show)\n output_file.close()", "def write_output(word_dict):\n # create an empty output.txt file\n output = open('output.txt', 'w')\n\n for i in words_dict: \n output.write(i + \" : \" + str(words_dict[i]) + \"\\n\")", "def writexyz(self,fname):\n xyzfile = open(fname + \".xyz\",\"a+\")\n xyzfile.write(str(self.natoms) + \"\\n\\n\")\n for a in self.atoms:\n \tcxyz = a.xyz - np.array(self.pbc_correction(a.xyz))\n\t\t\txyzfile.write(str(a.type) + \"\\t\" + str(cxyz[0]) + \"\\t\" + str(cxyz[1]) + \"\\t\" + str(cxyz[2]) + \"\\n\")\n xyzfile.close()", "def writeIntrons(self, filenameout):\n printed = {}\n with open(filenameout, \"w\") as out:\n for features in self.feature_dictionary:\n intro = self.feature_dictionary[features].getIntrons()\n print(self.feature_dictionary[features].introns)\n for introns in intro:\n if \"-\".join([str(i) for i in introns]) not in printed:\n out.write(self.feature_dictionary[features].chromosome_name + \"\\t\"\n + str(introns[0]) + \"\\t\" + str(introns[1]) + \"\\t\" + self.feature_dictionary[features].strand + \"\\n\")\n printed[\"-\".join([str(i) for i in introns])] = 0", "def write_collected(self, names_file, kb_file, cat_file):\n with open(names_file, 'w') as fp:\n for kb_id, name in self.collected_names.items():\n fp.write('\\t'.join(['name', kb_id, name]) + '\\n')\n with open(kb_file, 'w') as fp:\n for kb_id, tail_set in self.collected_edges.items():\n for (rel, tail_id) in tail_set:\n fp.write('\\t'.join([rel, kb_id, tail_id]) + '\\n')\n with open(cat_file, 'w') as fp:\n for c, ms in self.collected_cat_mems.items():\n fp.write(c + '\\t' + self.kb[c].name + '\\t')\n fp.write('|'.join(ms) + '\\n')", "def write_data_file(output_file: str, companies: list):\n with open(output_file, \"w\") as f:\n # s = \"\\n\".join(companies)\n for i in range(len(companies)):\n for k in range(10):\n for j in range(len(companies[i].data[k])):\n s = f\"{i},{companies[i].data[k][j][0].__str__()},{companies[i].data[k][j][1]}\\n\"\n f.write(s)", "def WriteOutput(self, rows, fileName, access='wb'):\n \n outputFile = open(fileName, access)\n try: \n outputFile.write(self.GetBanner())\n csv.writer(outputFile, dialect='excel-tab').writerows(rows)\n print 'Wrote secondary output to: %s' %(fileName) \n except IOError:\n print 'Error writing output to: %s' %(fileName) \n finally:\n outputFile.close()", "def write_cadnano_file(self, file_name):\n cadnano_writer = CadnanoWriter(self.dna_structure)\n cadnano_writer.write(file_name)", "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def write_output(self, failed_genes):\r\n file_prefix = self.file_name.strip('sambamba_output.txt')\r\n fieldnames = ['GeneSymbol;Accession', 'percentage30']\r\n with open (f'../results/{file_prefix}.coverage_output.csv', 'w', newline = '') as output:\r\n csvwriter = csv.DictWriter(output, fieldnames=fieldnames)\r\n csvwriter.writeheader()\r\n csvwriter.writerows(failed_genes)", "def writecontactstocsv(self , contact_entries):\n rx = re.compile('\\W+')\n allcontacts = []\n for entry in contact_entries:\n if entry.name is not None and len(entry.phone_number) > 0 and len(entry.group_membership_info) > 0:\n\n # Clean up characters in contact name; replace all non-alphanumerics with spaces\n fullname = entry.name.full_name.text\n fullname = rx.sub(' ', fullname).strip()\n for rawPhoneNumber in entry.phone_number:\n # Remove non-numeric characters from the phone number\n phone_number = re.sub(\"[^0-9]\", \"\", rawPhoneNumber.text)\n # Save contact for later insert\n allcontacts.append((fullname, phone_number))\n\n allcontacts = tuple(set(allcontacts))\n\n csvfilename = \"Downloads/ContactExport\"+time.strftime(\"%Y%m%d-%H%M%S\")+\".csv\"\n csvfile = open(csvfilename, \"w\")\n for csvFullName, csvPhoneNumber in allcontacts:\n line = \"\\\"%s\\\",%s\\n\" % (csvFullName, csvPhoneNumber)\n csvfile.write(line)\n\n csvfile.close()", "def build_catalog(filename):\n\n write_to_file(filename)", "def write_output(self):", "def write(self, args):\n\t\tnewcsvfile = self.filename[:len(self.filename)-4] + \"NEW.csv\" #clever naming MIGHT NEED TO CHANGE THIS LATER/OVERWRITE OLD FILE?\n\t\twith open(newcsvfile, 'wb') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(self.all_likes)", "def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()", "def write_output(label1, label2, label3, submission_file):\n with open(submission_file, 'w') as f:\n f.write('Id,Bound'+ '\\n')\n for index, lab in enumerate(label1):\n f.write(str(index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label2):\n f.write(str(len(label1) + index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label3):\n f.write(str(len(label1) + len(label2) + index) + ',' + str(int(lab)))\n if index < len(label3) - 1:\n f.write('\\n')", "def write(self, outputFile):\n \n try: \n f = open(outputFile + '.py', 'w')\n for trail in self.trails: \n f.write(\"[\")\n for index in trail:\n f.write(\"({0}, {1}), \".format(*index)) \n f.write(\"]\\n\")\n \n except IOError, e:\n msg = \"Exception encountered when attempting \" + \\\n \"to write data to file: {0}.\" + \\\n \"\\n\\t -- Exception was: {1}\" + \\\n \"\\n\\t For help use --help\".format(outputFile, e)\n raise Usage(e)", "def write_to_file(self, filename: str) -> None:", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def write(self, outfile):\n outfile.write(\n '\\t'.join(\n [\n str(i) for i in [\n self.chrom, self.start, self.end, self.name,\n self.count, self.fold_change, self.log10p\n ]\n ]\n )\n )\n outfile.write('\\n')", "def write_to_txt(self):\r\n file = open(self.output_path, 'w')\r\n for question_id in self.question_ids:\r\n file.write(self.questions[question_id].question_string+str(self.questions[question_id].answer)+'\\n')\r\n file.close()", "def to_cheetah_file(self, filename):\n translate.write_cheetah(self, filename)\n return", "def writetofile(invertedindex, filename):\n file = open(filename + '.txt', 'w', encoding='utf-8')\n for word in invertedindex.keys():\n file.write(word)\n file.write(' : ')\n for docid in invertedindex[word][0]:\n file.write(str(docid) + ' ')\n file.write('\\n')", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def write_gct_file(output_file, class_names, class_counts, expression_matrix):\n total_genes = len(expression_matrix)\n first_key = list(expression_matrix.keys())[0]\n total_samples = len(expression_matrix[first_key])\n\n headers = ['NAME', 'DESCRIPTION']\n\n for c_name, c_count in zip(class_names, class_counts):\n for i in range(c_count):\n headers.append('{}_{}'.format(c_name, i + 1))\n\n with open(output_file, 'w') as f:\n f.write('#1.2\\n')\n f.write('{} {}\\n'.format(total_genes, total_samples))\n f.write('\\t'.join(headers))\n f.write('\\n')\n\n for g_name, values in expression_matrix.items():\n f.write(g_name)\n f.write('\\tna\\t')\n f.write('\\t'.join(\n ['{0:.2f}'.format(v) for v in values]\n ))\n f.write('\\n')", "def corpusWriter(self):\n with open('corpus.txt', 'w') as file:\n for quote in self.quotes:\n file.write(quote + '\\n')", "def create_chceckfile(artist_list):\n with open(\"Udemy_Course/Object_Oriented_Programing_and_Classes/OOP_Song_Class/checkfile.txt\", \"w\") as checkfile:\n for new_artist in artist_list:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks:\n print(\"{0.name}\\t{1.name}\\t{1.year}\\t{2.title}\".format\n (new_artist, new_album, new_song), file=checkfile)", "def write(read_file):\n princesses = filter_by_status(read(read_file))\n princesses = sort_by_status(princesses)\n princesses = sort_by_place(princesses)\n\n file = open(\"princesses_to_save.txt\", \"w\")\n for word in header:\n for something in word:\n file.write(\"{:20}\".format(something))\n file.write(\"\\n\")\n for i in range(len(princesses)):\n str1 = princesses[i]\n for word in str1:\n file.write(\"{:20}\".format(word))\n if i != len(princesses) - 1:\n file.write(\"\\n\")", "def write_csv(self, outputfile):\n d = csv.writer(outputfile, quoting=csv.QUOTE_ALL)\n for row in self.translations.iteritems():\n d.writerow(row)", "def write_candidates_file(self, min_count, stops, tags, filename):\n filename = os.path.join(filename)\n candidates = self.candidates(min_count, stops, tags)\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n for wordi, wordj in candidates:\n file.write(\"{} {}\\n\".format(wordi, wordj))\n print(\"Success: Candidates written to '{}'\".format(filename))", "def handle(self, *args, **options):\n self.stdout.write('exporting corpus to text file')\n basetext = '\\n'.join([x.text_str for x in BaseText.objects.all() if x.check_age()])\n with open(os.path.join(BASE_DIR, 'corpus.txt'), 'w') as f:\n f.write(basetext)", "def write_to(self, filepath):\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')", "def write(self, file):\n\n # Initialize output buffer\n out = ''\n\n # Print specification\n for key, value in self.specification.items():\n out += f'{key} : {value}\\n'\n\n # Print the tour\n if self.tour:\n out += 'TOUR_SECTION\\n'\n for s in self.tour:\n out += str(s) + '\\n'\n out += '-1\\n'\n\n # Append EOF\n out += 'EOF\\n'\n\n # Write to file\n with open(file, 'w') as f:\n f.write(out)", "def output(self, filename):\n with open(filename, 'w') as f:\n op = {}\n layer_res = []\n alphas_res = []\n for layer in self._layers:\n weights = []\n alphas = []\n for neuron in layer._neurons:\n weights.append(neuron._weights)\n alphas.append(neuron._alpha)\n layer_res.append(weights)\n alphas_res.append(alphas)\n op['layers'] = layer_res\n op['alphas'] = alphas_res\n json.dump(op, f, indent='\\t')", "def _write_ce_collector_attributes_file(self, attributes_file):\n attributes_file_contents = (\n \"# Do not edit - file generated by osg-configure\\n\"\n + self.ce_attributes_str + \"\\n\"\n )\n return utilities.atomic_write(attributes_file, attributes_file_contents)", "def write_domains_to_file_by_category(self, file_name, row):\n with open(\"output/\"+file_name+\".csv\", \"a+\") as f:\n f.write(row[self.domain_label] + \",\" + row[self.category_label] + \"\\n\")", "def write_to_files():\n\t# Create output files\n\toutput = [None, \\\n\t\t open(\"priority-1.txt\", \"w\"), \\\n\t\t open(\"priority-2.txt\", \"w\"), \\\n\t\t open(\"priority-3.txt\", \"w\"), \\\n\t\t open(\"priority-4.txt\", \"w\"), \\\n\t\t open(\"priority-5.txt\", \"w\"), ]\n\n\t# Loop over all fields and write them to the correct file\n\tfor field in sorted(reportlog.keys()):\n\t\tpriority = reportlog[field]['priority']\n\t\tlabel = reportlog[field]['label']\n\n\t\toutput[priority].write(\"intphas_%s\\t%s\\n\" % (field, label))\n\t\toutput[priority].flush()\n\n\t# Close files\n\tfor i in [1,2,3,4,5]:\n\t\toutput[i].close()", "def write(self, filename):\n f = open(filename, 'w')\n f.write(str(self.m) + \"\\n\")\n f.write(str(self.n) + \"\\n\")\n for i in self.values:\n for j in i:\n f.write(str(j)+\"\\n\")\n f.closed", "def write_analysis_details(self, csvfile):\n #filepath, total words, line count, most common word\n f = open(csvfile, 'w')\n most_common = self.most_common()\n f.write('filepath,total words,line count,most common word\\n')\n f.write(f'{self.filepath},{self.word_count()},{self.sentence_count()},{self.most_common()[0]}')\n f.close()", "def generate_csv(self, output_file):\n try: # We are going to \"try\" something\n csv_file = open(output_file, 'w+') # open \"output_file\" as a writable file and return a handle called \"csv_file\"\n except OSError as err: # If something goes wrong with the open, we catch the exception\n fatal(\"{0}\".format(err), -1) # exit with something other than 0 so the shell knows something went wrong\n \n writer = csv.writer(csv_file) # create a CSV writing object that's pointing at our open file handle\n\n writer.writerow([\"Question\",\"Answers\"]) # Let's write the top row\n for k in self.questions.keys(): # Let's walk down the directory by key\n # write the \"key\" (which is the question) and then let's take the list of answers and create a comma delmited list.\n # this is likely totally wrong since you could have an answer in it that also has a comma...\n writer.writerow([k, \",\".join(self.questions[k].answers)]) # insert a key (which is the question) and then let's take the array of \n\n csv_file.close() # close the csv_file file handle", "def __export_file(self, filename, output):\n outfile = open(filename, \"w\")\n outfile.write(output)\n outfile.close\n print(\"Output written to file: \" + filename + \"\\n\")", "def write_exact_graph_to_file(self, output_file):\n print(\"Writing output file.\")\n with open(output_file, 'w') as f:\n f.write(\"# graph number = 0 name = interval_graph\\n\")\n f.write(str(len(self.vertices)) + \"\\n\")\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n f.write(\"{} {} {}\\n\".format(s, t, w))", "def whriteInOuput(finalOutput):\n\n os.chdir(\"D:/IIHT/Python/Project/NRPT all companies scrapper/caches\")\n #open text file, return an object of type io.TextIOWrapper\n with open(\"Companies Website.txt\", \"w\") as writ:\n #write each line in the object op, return an object of type int\n writ.write('\\n'.join(finalOutput) + \"\\n\")", "def WriteFile(self, filename) :\n\n # open file for writing:\n f = open(filename, 'w')\n\n ## loop over key/value pairs:\n #for k,v in self.iteritems():\n # # add line; at least the specified number of characters \n # # is used for the key:\n # f.write( '%-20s:%s\\n' % (k,v) )\n ##endfor\n\n # write processed input:\n f.writelines(self.outfile)\n \n # close file:\n f.close()", "def output(owners, filename):\n\n out = open(filename, 'wb')\n writer = csv.writer(out)\n writer.writerow([\n 'Property Address',\n 'License Type',\n 'House',\n 'Street',\n 'License / Folio number',\n 'Civic address',\n 'Business name 1',\n 'Business name 2',\n 'Mail address 1',\n 'Mail address 2',\n 'Total Assess',\n 'Included Assess',\n 'Ann Chg',\n 'Unit'\n ])\n\n for owner in owners:\n owner.output_to(writer)", "def write_po(self, outputfile):\n raise NotImplementedError(\n \"Writing to this file format is not yet implemented\")", "def write(self, filename=None):\n if filename == None:\n filename = self.ofilename\n\n ofile = open(filename, 'w')\n\n ofile.write('# Susceptibility: %E d(susc): %E Coercivity: %E d(coer): %E\\n' % (self.susceptibility_mean, self.susceptibility_std, self.coercivity_mean, self.coercivity_std) )\n ofile.write('# H[] M[] Mfit[]\\n')\n\n #for i in range(len(self.h)):\n # ofile.write(\" %12.10f %12.10f %12.10f\\n\" % ( self.h[i], self.m[i], self.m_fit[i] ) )\n\n ofile.close()", "def write_output(output,fasta,CDR1_pos,CDR2_pos):\n # fasta file is the igblast input file\n with open(output, 'w') as f:\n header = \"\\t\".join(['Name', 'CDRL1_kabat_AA', 'CDRL2_kabat_AA'])\n f.write(header + '\\n')\n for record in SeqIO.parse(fasta, \"fasta\"):\n ID = str(record.id)\n seq = str(record.seq)\n CDR1_aa=''\n CDR2_aa = ''\n CDR1_index = CDR1_pos[ID]\n CDR2_index = CDR2_pos[ID]\n if CDR1_index != []:\n CDR1_start, CDR1_end = fix_aa_pos((int(CDR1_index[0]) - 1), int(CDR1_index[1]))\n CDR1_nuc = seq[CDR1_start:CDR1_end]\n CDR1_aa = translation(CDR1_nuc)\n if CDR2_index != []:\n CDR2_start, CDR2_end = fix_aa_pos((int(CDR2_index[0]) - 1), int(CDR2_index[1]))\n CDR2_nuc = seq[CDR2_start:CDR2_end]\n CDR2_aa = translation(CDR2_nuc)\n f.write(\"\\t\".join([ID, CDR1_aa, CDR2_aa]) + '\\n')", "def storePatternsInFile(self, outFile):\n self.oFile = outFile\n writer = open(self.oFile, 'w+')\n for x, y in self.finalPatterns.items():\n patternsAndSupport = str(x) + \":\" + str(y)\n writer.write(\"%s \\n\" % patternsAndSupport)", "def _write_interactions_to_file(self, results, writer):\n for index_pair, interactions in results.items():\n repetition = 0\n for interaction, results in interactions:\n\n if results is not None:\n (\n scores,\n score_diffs,\n turns,\n score_per_turns,\n score_diffs_per_turns,\n initial_cooperation,\n cooperations,\n state_distribution,\n state_to_action_distributions,\n winner_index,\n ) = results\n for index, player_index in enumerate(index_pair):\n opponent_index = index_pair[index - 1]\n row = [\n self.num_interactions,\n player_index,\n opponent_index,\n repetition,\n str(self.players[player_index]),\n str(self.players[opponent_index]),\n ]\n history = actions_to_str([i[index] for i in interaction])\n row.append(history)\n\n if results is not None:\n row.append(scores[index])\n row.append(score_diffs[index])\n row.append(turns)\n row.append(score_per_turns[index])\n row.append(score_diffs_per_turns[index])\n row.append(int(winner_index is index))\n row.append(initial_cooperation[index])\n row.append(cooperations[index])\n\n states = [(C, C), (C, D), (D, C), (D, D)]\n if index == 1:\n states = [s[::-1] for s in states]\n for state in states:\n row.append(state_distribution[state])\n for state in states:\n row.append(\n state_to_action_distributions[index][(state, C)]\n )\n row.append(\n state_to_action_distributions[index][(state, D)]\n )\n\n row.append(\n int(cooperations[index] >= cooperations[index - 1])\n )\n\n writer.writerow(row)\n repetition += 1\n self.num_interactions += 1", "def write(name, keyword, domain, citation, author, description, species, version, contact, license, values, output):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n file=output,\n )", "def write_index_to_file(output_file, items): \n \n file = open(output_file, 'w')\n for item in items: \n str0 = str(item[0])\n str1 = ' '.join(str(x) for x in item[1])\n file.write( str0 + ' ' + str1 + '\\n') \n # file.write(item)\n print ('An inverted index has been writted in file')\n file.close()", "def write_result(file_name, name, entries, extra_includes, src_file_names):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('// Generated by %s\\n' % os.path.basename(__file__))\r\n f.write('// Based on %s: %s\\n' %\r\n ((\"this file\" if len(src_file_names) < 2 else\r\n \"these files\"), \", \".join(src_file_names)))\r\n methods = entries[0]\r\n if len(methods) != 0:\r\n f.write(to_PyMethodDef(name, methods, extra_includes))\r\n f.write('\\n')\r\n\r\n properties = entries[1]\r\n if len(properties) != 0:\r\n f.write('\\n')\r\n f.write(to_PyGetSetDef(name, properties))", "def write_to_file(self):\n\t\twith self.driver.session() as session:\n\t\t\ttry:\n\t\t\t\tfile_name = None\n\t\t\t\tdonor_file_name = None\n\t\t\t\tfull_file_name = None\n\t\t\t\tcomplete_file_name = None\n\t\t\tcyph = \"MATCH (d:Donor) RETURN d.email\"\n\t\t\tresult = session.run(cyph) \n\t\t\t\n\t\t\tfor donor in result:\n\t\t\t\tdonor_file_name = donor['email']\n\t\t\t\tfull_file_name = \"{}.txt\".format(donor_file_name)\n\t\t\t\tcomplete_file_name = os.path.join(os.getcwd(), full_file_name)\n\t\t\t\tletter = self.letter_to_file(donor_full_name,total_donations)\n\t\t\t\twith open(complete_file_name, 'w+') as f:\n\t\t\t\t\tf.write(letter)\n\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"Error occurred. See below\")\n\t\t\t\tprint(e)\n\t\t\t\tprint('\\n')\n\n\tdef letter_to_file(self, donor):\n\t\tstr_letter = \"Dear {},\\n\\tThank you for your kind donation(s).\\n\\tIt will be put to very good use.\\n\\t\\tSincerely,\\n\\t\\t\\t-The Team\".format(donor)\n\t\treturn str_letter\n\n\tdef show_donors(self):\n\t\t\"\"\"\n\t\tLists the donor names\n\t\t\"\"\"\n\t\twith self.driver.session() as session:\n\t\t\tstr_build = \"\"\n\t\t\ttry:\n\t\t\t\tcyph = \"\"\"\n\t\t\t\tMATCH (d:Donor)\n\t\t\t\tRETURN d.full_name as full_name, d.email as email\n\t\t\t\t\"\"\"\n\t\t\t\tresult = session.run(cyph)\n\t\t\t\tfor record in result:\n\t\t\t\t\tstr_build += record['full_name'] + ' -- ' + record['email'] + '\\n'\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"Error occurred. See below.\")\n\t\t\t\tprint(e)\n\t\treturn str_build", "def write(self, fname):\n pass", "def compDataWriter(sentences, predictions, output_file):\n assert len(sentences) == len(predictions), \"Missing predictions for sentences!\"\n lines = list()\n for k in range(len(sentences)):\n assert len(sentences) == len(predictions), \"Missing tag predictions for words!\"\n sentence = sentences[k]\n tags = predictions[k]\n line_list = [sentence[i]+TAGCHAR+tags[i] for i in range(len(sentence))]\n line = WHITESPACE.join(line_list)\n lines.append(line)\n assert len(lines) == len(sentences), \"Missing tagged sentence!\"\n with open(output_file, 'w') as file:\n file.write(\"\\n\".join(lines))", "def write_annotations(self, output_file):\n logging.info(self._header)\n np.savetxt(output_file, self._zeroes, header=\" \".join(self._header),fmt='%i',comments='')", "def write_to_file(content, filename):\n if not os.path.isfile(filename): # Checking if file already exists, don't append data if it does.\n for j in range(len(content)): # For each dialog in dialogues array.\n with open(filename, 'a') as file: # Open a text file in append mode and write data into it.\n for k in range(len(content[j][0])):\n file.write('{0} {1}\\n'.format(str(content[j][0][k]).lower().split(\"(\")[0],\n str(content[j][1][k])).lower())", "def write(self, outputfile):\n outfile = open(outputfile, 'w')\n if (outputfile.lower().endswith('.po')):\n self.write_po(outfile)\n elif (outputfile.lower().endswith('.json')):\n self.write_json(outfile)\n elif (outputfile.lower().endswith('.xml')):\n self.write_properties(outfile)\n outfile.close()", "def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()", "def writeCADFile(self, filename):\n valid_filetypes = [\"brep\", \"bstl\", \"egads\", \"egg\", \"iges\", \"igs\", \"sens\", \"step\", \"stl\", \"stp\", \"tess\", \"grid\"]\n file_extension = filename.split(\".\")[-1]\n if file_extension.lower() not in valid_filetypes:\n raise OSError(\n \"CAD filename \"\n + filename\n + \" must have a valid exension. \"\n + \"Consult the EngineeringSketchPad docs for the DUMP function\"\n )\n if self.comm.rank == 0:\n modelCopy = self.espModel.Copy()\n n_branches, _, _ = modelCopy.Info()\n modelCopy.NewBrch(\n n_branches, modelCopy.GetCode(\"dump\"), \"<none>\", 0, filename, \"0\", \"0\", \"0\", \"\", \"\", \"\", \"\", \"\"\n )\n modelCopy.Build(0, 0)" ]
[ "0.7794726", "0.66742295", "0.64932483", "0.64526165", "0.6379942", "0.63655496", "0.63634735", "0.62910575", "0.6240714", "0.6233921", "0.6233921", "0.6233921", "0.61785156", "0.61412483", "0.61257005", "0.610843", "0.6082861", "0.60720426", "0.6064205", "0.60603034", "0.59847915", "0.5953382", "0.5949586", "0.59256744", "0.59232116", "0.59232116", "0.5918855", "0.5918259", "0.591524", "0.59104925", "0.5906709", "0.59009737", "0.58934045", "0.58857846", "0.58828205", "0.5871059", "0.58642375", "0.58520657", "0.5847941", "0.58440024", "0.5836401", "0.5835469", "0.58210695", "0.580742", "0.57888246", "0.5781007", "0.5777897", "0.5768409", "0.5767276", "0.5766805", "0.57632726", "0.5750538", "0.57470477", "0.5733779", "0.573351", "0.572666", "0.5726504", "0.57206035", "0.5718034", "0.57089895", "0.5704777", "0.5703387", "0.56983274", "0.56963384", "0.56868637", "0.5682352", "0.56822103", "0.56810313", "0.5677822", "0.5676366", "0.5673577", "0.56727034", "0.5670391", "0.5669521", "0.5668411", "0.566751", "0.5666728", "0.5647113", "0.56437045", "0.5642565", "0.5636676", "0.56290835", "0.5627325", "0.5618395", "0.5613953", "0.56119215", "0.5611875", "0.5611426", "0.560923", "0.5607486", "0.5606954", "0.560589", "0.560346", "0.5602905", "0.5598933", "0.55940706", "0.5592159", "0.55870605", "0.557992", "0.557991" ]
0.7876976
0
"Builds a kfactor circulant matrix (A matrix with the structure of circulant matrices, but with the (...TRUNCATED)
"def factor_circulant_matrix(x, k):\n n=len(x)\n return circulant(x) * (tri(n,n, 0) + k*np.tra(...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (...TRUNCATED)
["0.6495986","0.6089255","0.6045119","0.59890914","0.5949488","0.59035623","0.5859298","0.58462423",(...TRUNCATED)
0.78092545
0
"Compute the matrixvector product y = Cu where C is a kfactor circulant matrix All matrices are real(...TRUNCATED)
"def factor_circulant_multiplication(u, x, k=1):\n n = len(u) \n D_k = (k**(1/n))**np.arang(...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tenso(...TRUNCATED)
["0.6325033","0.6273725","0.6251581","0.62479377","0.6177961","0.6087597","0.6022537","0.60215706","(...TRUNCATED)
0.693636
0
Compute the matrixvector product y = Cu where C is a circulant matrix All matrices are real
def circulant_multiplication(u, a): return real(ifft(fft(a)*fft(u)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.sha(...TRUNCATED)
["0.650418","0.650212","0.6441079","0.6313763","0.6310517","0.62949276","0.62782884","0.62631303","0(...TRUNCATED)
0.6389226
3
Compute the matrixvector product y = Tu where T is a Toeplitz matrix All matrices are real
"def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u(...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append((...TRUNCATED)
["0.7003199","0.6513981","0.64759356","0.6454179","0.6377554","0.6326698","0.6245358","0.620894","0.(...TRUNCATED)
0.63380134
5
"Solves Tx=b using the Levinson algorithm where T is apositivedefinite symmetric Toeplitz matrix b i(...TRUNCATED)
"def levinson(r, b):\n\n n = len(b)\n y = zeros((n,))\n x = zeros((n,))\n \n # normal(...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def _tridisolve(d, e, b, overwrite_b=True):\n\t\tN = len(b)\n\t\t# work vectors\n\t\tdw = d.copy()(...TRUNCATED)
["0.63466734","0.61827254","0.61033237","0.6093494","0.60769826","0.5885008","0.58844715","0.5877297(...TRUNCATED)
0.7257071
0
"Compute the log determinant of a positivedefinite symmetric toeplitz matrix. The determinant is com(...TRUNCATED)
"def toeplitz_slogdet(r):\n n = len(r)\n r_0 = r[0]\n \n r = np.concatenate((r, np.array(...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n (...TRUNCATED)
["0.7205463","0.69225436","0.6803772","0.6577487","0.65662503","0.6258033","0.6235449","0.6192166","(...TRUNCATED)
0.6977162
1
Preprocessing needed for toeplitz_inverse_multiplication()
"def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != (...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def bd_toeplitz_inverse_multiplication_prep(*arrs):\n \n t = []\n for c in arrs: # loop o(...TRUNCATED)
["0.65743506","0.63173485","0.60780877","0.60345995","0.5920918","0.5710167","0.5684219","0.56176597(...TRUNCATED)
0.65871215
0

No dataset card yet

Downloads last month
185