15 Commits

21 changed files with 1098 additions and 87 deletions

4
.gitignore vendored
View File

@@ -7,5 +7,9 @@
.vscode .vscode
__pycache__ __pycache__
run.sh
prev prev
old old
data
venv
issue992/output

View File

@@ -9,3 +9,18 @@ Potrebne datoteke:
Priporocam: pypy3 paket za hitrejse poganjanje. Priporocam: pypy3 paket za hitrejse poganjanje.
Primer uporabe: `python3 wani.py ssj500k.xml Kolokacije_strukture.xml izhod.csv` Primer uporabe: `python3 wani.py ssj500k.xml Kolokacije_strukture.xml izhod.csv`
## Instructions for running on GF
Suggested running with saved mysql file in tmpfs. Instructions:
```bash
sudo mkdir /mnt/tmp
sudo mount -t tmpfs tmpfs /mnt/tmp
```
If running on big corpuses (ie. Gigafida have database in RAM):
```bash
sudo mount -t tmpfs tmpfs /mnt/tmp
sudo mount -o remount,size=110G,noexec,nosuid,nodev,noatime /mnt/tmp
```

37
issue992/extract.py Normal file
View File

@@ -0,0 +1,37 @@
import sys
import tqdm
good_lemmas = ["absurd", "absurdnost", "akuten", "akutno", "alkohol", "alkoholen", "aluminijast", "ananas", "aplikacija", "aplikativen", "aranžma", "arbiter", "armada", "avtomatičen", "avtomatiziran", "babica", "bajen", "bajka", "bakren", "bambusov", "barvan", "barvanje", "baseballski", "bazar", "bazičen", "belina", "bezgov", "bičati", "bife", "bilka", "biomasa", "biotop", "birma", "bivol", "blago", "blaženost", "bliskavica", "bobnič", "bolha", "bolnišnica", "bor", "borov", "borovničev", "brati", "briljant", "briti", "brusiti", "bučanje", "cikličen", "civilizacija", "dopust", "drama", "drezati", "duda", "dvorezen", "embalaža", "faks", "farsa", "glasno", "informiranje", "interier", "intima", "intimno", "investirati", "ironično", "istovetiti", "izvožen", "jagoda", "jeklar", "jezik", "karbon", "kitara", "kodrast", "molče", "mučiti", "novinarski", "obala", "občevati", "okrasiti", "pajčevina", "panoga", "prevajanje", "prevajati", "previti", "prihraniti", "priloga", "prisluškovati", "sopara"]
N1 = len(good_lemmas)
N2 = len(sys.argv) - 1
files_to_write = [open("polona/{}".format(l), 'w') for l in good_lemmas]
for fidx, filename in enumerate(sys.argv[1:]):
with open(filename, 'r') as fp:
print("loading next...", end="", flush=True)
line = fp.readline()
lemma_rows = [idx for idx, cell in enumerate(line.split(",")) if "_Lemma" in cell]
file_lines = fp.read().split("\n")
for lidx, good_lemma in enumerate(good_lemmas):
spaces = " " * 20 if lidx == 0 else ""
print("\r{}.{} / {}.{}{}".format(fidx, lidx, N2, N1, spaces), end="", flush=True)
for line in file_lines:
if good_lemma not in line:
continue
line_split = line.split(',')
for lemma_idx in lemma_rows:
lemma = line_split[lemma_idx]
if lemma == good_lemma:
print(line, file=files_to_write[lidx])
break
for fp in files_to_write:
fp.close()

81
issue992/files Normal file
View File

@@ -0,0 +1,81 @@
../data/gf2filesres/izhod.csv.100
../data/gf2filesres/izhod.csv.101
../data/gf2filesres/izhod.csv.102
../data/gf2filesres/izhod.csv.103
../data/gf2filesres/izhod.csv.104
../data/gf2filesres/izhod.csv.105
../data/gf2filesres/izhod.csv.106
../data/gf2filesres/izhod.csv.107
../data/gf2filesres/izhod.csv.108
../data/gf2filesres/izhod.csv.12
../data/gf2filesres/izhod.csv.13
../data/gf2filesres/izhod.csv.14
../data/gf2filesres/izhod.csv.15
../data/gf2filesres/izhod.csv.16
../data/gf2filesres/izhod.csv.17
../data/gf2filesres/izhod.csv.18
../data/gf2filesres/izhod.csv.19
../data/gf2filesres/izhod.csv.22
../data/gf2filesres/izhod.csv.23
../data/gf2filesres/izhod.csv.24
../data/gf2filesres/izhod.csv.25
../data/gf2filesres/izhod.csv.26
../data/gf2filesres/izhod.csv.27
../data/gf2filesres/izhod.csv.28
../data/gf2filesres/izhod.csv.29
../data/gf2filesres/izhod.csv.30
../data/gf2filesres/izhod.csv.31
../data/gf2filesres/izhod.csv.32
../data/gf2filesres/izhod.csv.34
../data/gf2filesres/izhod.csv.35
../data/gf2filesres/izhod.csv.36
../data/gf2filesres/izhod.csv.37
../data/gf2filesres/izhod.csv.38
../data/gf2filesres/izhod.csv.39
../data/gf2filesres/izhod.csv.40
../data/gf2filesres/izhod.csv.41
../data/gf2filesres/izhod.csv.42
../data/gf2filesres/izhod.csv.43
../data/gf2filesres/izhod.csv.44
../data/gf2filesres/izhod.csv.45
../data/gf2filesres/izhod.csv.46
../data/gf2filesres/izhod.csv.47
../data/gf2filesres/izhod.csv.48
../data/gf2filesres/izhod.csv.49
../data/gf2filesres/izhod.csv.50
../data/gf2filesres/izhod.csv.51
../data/gf2filesres/izhod.csv.52
../data/gf2filesres/izhod.csv.53
../data/gf2filesres/izhod.csv.54
../data/gf2filesres/izhod.csv.55
../data/gf2filesres/izhod.csv.57
../data/gf2filesres/izhod.csv.68
../data/gf2filesres/izhod.csv.69
../data/gf2filesres/izhod.csv.70
../data/gf2filesres/izhod.csv.71
../data/gf2filesres/izhod.csv.72
../data/gf2filesres/izhod.csv.73
../data/gf2filesres/izhod.csv.74
../data/gf2filesres/izhod.csv.75
../data/gf2filesres/izhod.csv.76
../data/gf2filesres/izhod.csv.77
../data/gf2filesres/izhod.csv.78
../data/gf2filesres/izhod.csv.80
../data/gf2filesres/izhod.csv.81
../data/gf2filesres/izhod.csv.82
../data/gf2filesres/izhod.csv.83
../data/gf2filesres/izhod.csv.84
../data/gf2filesres/izhod.csv.85
../data/gf2filesres/izhod.csv.86
../data/gf2filesres/izhod.csv.87
../data/gf2filesres/izhod.csv.88
../data/gf2filesres/izhod.csv.89
../data/gf2filesres/izhod.csv.90
../data/gf2filesres/izhod.csv.91
../data/gf2filesres/izhod.csv.92
../data/gf2filesres/izhod.csv.93
../data/gf2filesres/izhod.csv.94
../data/gf2filesres/izhod.csv.95
../data/gf2filesres/izhod.csv.96
../data/gf2filesres/izhod.csv.97
../data/gf2filesres/izhod.csv.98

1
run.sh.example Executable file
View File

@@ -0,0 +1 @@
pypy3 src/wani.py data/Kolokacije_strukture_JOS-32-representation_3D_08_1.xml data/input --out data/output --sloleks_db '<sloleks db data>' --collocation_sentence_map_dest data/collocation-sentence-mapper --db /mnt/tmp/mysql-wani --multiple-output --load-sloleks

View File

@@ -0,0 +1,199 @@
import argparse
import logging
import os
import sys
import time
from math import log2
CORE_RESTRICTIONS = ['s', 'p', 'r', 'gg']
ALL_RESTRICTIONS = CORE_RESTRICTIONS + ['vp', 'vd', 'd']
LEMMA_COLUMNS = ['C1_Lemma', 'C2_Lemma', 'C3_Lemma', 'C4_Lemma', 'C5_Lemma']
def load_word_order(word_order_file):
with open(word_order_file, 'r') as f:
lines = {}
for line in f:
l = line.split('|')
if l[6] not in [e[0] for e in lines] and l[6] != '' and l[6] != 'NSSS':
pos_tags = l[2].split('-')
core_rest = sorted([str(pt_i + 1) for cr in CORE_RESTRICTIONS for pt_i, pt in enumerate(pos_tags) if pt[:len(cr)] == cr])
assert len(core_rest) == 2, 'Core restrictions are incorrect!'
all_rest = sorted([str(pt_i + 1) for cr in ALL_RESTRICTIONS for pt_i, pt in enumerate(pos_tags) if pt[:len(cr)] == cr])
lines[l[6]] = [core_rest, all_rest]
return lines
def add_word(stats, pos, word, freq):
if word == '':
return
if word not in stats['words'][pos]:
stats['words'][pos][word] = int(freq)
else:
stats['words'][pos][word] += int(freq)
def get_new_stats(f):
lines = []
stats = {}
stats['words'] = {}
stats['words']['1'] = {}
stats['words']['2'] = {}
stats['words']['3'] = {}
stats['words']['4'] = {}
stats['words']['5'] = {}
stats['words']['total'] = 0
first_line = True
positions = {}
for line in f.readlines():
line = line.split(',')
lines.append(line)
if first_line:
positions['freq'] = line.index('Frequency')
for lci, lc in enumerate(LEMMA_COLUMNS):
positions[str(lci + 1)] = line.index(lc)
first_line = False
continue
for pos in range(1, 6):
pos = str(pos)
word = line[positions[pos]]
add_word(stats, pos, word, line[positions['freq']])
stats['words']['total'] += int(line[positions['freq']])
return lines, stats
def logDice_new(stats, positions, line, rest):
fi = [int(stats['words'][r][line[positions[r]]]) for r in rest]
res = 14 + log2(2 * int(line[positions['freq']]) / sum(fi))
return res
def deltaP_new(stats, positions, line, rest, delta21=True):
fi = [int(stats['words'][r][line[positions[r]]]) for r in rest]
fx = fi[0] if delta21 else fi[1]
fy = fi[1] if delta21 else fi[0]
freq = int(line[positions['freq']])
N = int(stats['words']['total'])
res = (freq / fx) - ((fy - freq) / (N - fx))
return res
def write_new_stats(wf, original_text, stats, file_name, word_order):
structure_id = file_name.split('.')[-1]
core_rest, all_rest = word_order[structure_id]
first_line = True
positions = {}
for line in original_text:
line[-1] = line[-1][:-1]
# handle header file
if first_line:
line += ['structure_frequency', 'logDice_core', 'logDice_all',
'weighted_logDice_frequency', 'deltaP12_structure',
'deltaP21_structure', 'deltaP_structure']
for i in range(5):
new_pos = 6 + i + i * 5
line = line[:new_pos] + ['C' + str(i + 1) + '_lemma_structure_frequency'] + line[new_pos:]
positions['freq'] = line.index('Frequency')
for lci, lc in enumerate(LEMMA_COLUMNS):
positions[str(lci + 1)] = line.index(lc)
positions['delta12'] = line.index('Delta_p12')
positions['delta21'] = line.index('Delta_p21')
positions['logDice_core'] = line.index('LogDice_core')
positions['logDice_all'] = line.index('LogDice_all')
line[positions['logDice_core']] = 'logDice_core_corpus'
line[positions['logDice_all']] = 'logDice_all_corpus'
first_line = False
line = line[:positions['logDice_all'] + 1] + ['weighted_logDice_frequency_corpus'] + line[positions['logDice_all'] + 1:]
line = line[:positions['delta21'] + 1] + ['deltaP'] + line[positions['delta21'] + 1:]
# TODO INSERT 'deltaP', and weightedlogDice_frequency and , 'weighted_logDice_frequency_corpus'
wf.write(','.join(line) + '\n')
continue
lemma_struct_freq = []
for i in range(5):
new_pos = 1 + i * 5
freq = str(stats['words'][str(i + 1)][line[new_pos]]) if line[new_pos] != '' else '0'
lemma_struct_freq.append(freq)
for i in range(5):
new_pos = 6 + i + i * 5
line = line[:new_pos] + [lemma_struct_freq[i]] + line[new_pos:]
# add structure_frequency
structure_frequency = int(stats['words']['total'])
line.append("{:.5f}".format(structure_frequency))
# add logDice_core_new
logDice_core_new = logDice_new(stats, positions, line, core_rest)
line.append("{:.5f}".format(logDice_core_new))
# add logDice_all_new
logDice_all_new = logDice_new(stats, positions, line, all_rest)
line.append("{:.5f}".format(logDice_all_new))
weighted_logDice_frequency_corpus = 0.3 * int(line[positions['freq']]) + 0.7 * float(
line[positions['logDice_core']])
# line.append("{:.5f}".format(weighted_logDice_frequency_corpus))
weighted_logDice_frequency = 0.3 * int(line[positions['freq']]) + 0.7 * logDice_core_new
line.append("{:.5f}".format(weighted_logDice_frequency))
# add deltaP12_structure
deltaP12_structure = deltaP_new(stats, positions, line, core_rest, delta21=False)
line.append("{:.5f}".format(deltaP12_structure))
# add deltaP21_structure
deltaP21_structure = deltaP_new(stats, positions, line, core_rest, delta21=True)
line.append("{:.5f}".format(deltaP21_structure))
deltaP12 = float(line[positions['delta12']])
deltaP21 = float(line[positions['delta21']])
deltaP = abs(deltaP12 - deltaP21)
# line.append("{:.5f}".format(deltaP))
deltaP_structure = abs(deltaP12_structure - deltaP21_structure)
line.append("{:.5f}".format(deltaP_structure))
line = line[:positions['logDice_all'] + 1] + ["{:.5f}".format(weighted_logDice_frequency_corpus)] + line[positions[
'logDice_all'] + 1:]
line = line[:positions['delta21'] + 1] + ["{:.5f}".format(deltaP)] + line[positions['delta21'] + 1:]
# TODO ADD OTHER COLUMNS AS IN #823 task
wf.write(','.join(line) + '\n')
def main(args):
word_order = load_word_order(args.word_order_file)
for file_name in os.listdir(args.input):
read_file_path = os.path.join(args.input, file_name)
write_file_path = os.path.join(args.output, file_name)
with open(read_file_path, 'r') as rf, open(write_file_path, 'w') as wf:
original_text, stats = get_new_stats(rf)
freq_pos = original_text[0].index('Frequency')
original_text = [original_text[0]] + [l for l in original_text[1:] if int(l[freq_pos]) >= 10]
if len(original_text) > 1:
original_text = [original_text[0]] + sorted(original_text[1:], key=lambda x: -1 * int(x[freq_pos]))
else:
original_text = [original_text[0]]
write_new_stats(wf, original_text, stats, file_name, word_order)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Extract structures from a parsed corpus.')
parser.add_argument('input',
help='Path to folder that contains all input files.')
parser.add_argument('output',
help='Path to folder that contains all input files.')
parser.add_argument('--word_order_file', type=str, help='File that contains word order for DeltaP calculations.')
args = parser.parse_args()
logging.basicConfig(stream=sys.stderr)
start = time.time()
main(args)
logging.info("TIME: {}".format(time.time() - start))

0
src/__init__.py Normal file
View File

View File

@@ -1,3 +1,125 @@
POSSIBLE_WORD_FORM_FEATURE_VALUES = {
"singular",
"dual",
"plural",
"nominative",
"genitive",
"dative",
"accusative",
"locative",
"instrumental",
"infinitive",
"supine",
"participle",
"present",
"future",
"conditional",
"imperative",
'masculine',
'feminine',
'neuter',
}
CODES_TRANSLATION = {
"N": {
2: {
'-': 'masculine',
'm': 'masculine',
'f': 'feminine',
'n': 'neuter',
},
3: {
"-": "singular",
"s": "singular",
"d": "dual",
"p": "plural",
},
4: {
"-": "nominative",
"n": "nominative",
"g": "genitive",
"d": "dative",
"a": "accusative",
"l": "locative",
"i": "instrumental",
},
},
"V": {
1: {
"-": "main",
"m": "main",
"a": "auxiliary",
},
3: {
"-": "infinitive",
"n": "infinitive",
"u": "supine",
"p": "participle",
"r": "present",
"f": "future",
"c": "conditional",
"m": "imperative",
},
4: {
"-": "first",
"1": "first",
"2": "second",
"3": "third",
},
5: {
"-": "singular",
"s": "singular",
"d": "dual",
"p": "plural",
},
6: {
'-': 'masculine',
'm': 'masculine',
'f': 'feminine',
'n': 'neuter',
},
8: {
"-": "no",
"n": "no",
"y": "yes",
},
},
"A": {
1: {
"-": "general",
"g": "general",
"s": "possessive",
},
2: {
"-": "positive",
"p": "positive",
"c": "comparative",
"s": "superlative",
},
3: {
'-': 'masculine',
'm': 'masculine',
'f': 'feminine',
'n': 'neuter',
},
4: {
"-": "singular",
"s": "singular",
"d": "dual",
"p": "plural",
},
5: {
"-": "nominative",
"n": "nominative",
"g": "genitive",
"d": "dative",
"a": "accusative",
"l": "locative",
"i": "instrumental",
},
}
}
CODES = { CODES = {
"Noun": "N", "Noun": "N",
"Verb": "V", "Verb": "V",

View File

@@ -0,0 +1,11 @@
class CollocationSentenceMapper:
def __init__(self, output_dir):
self.output = open(output_dir, "w")
self.output.write(f'Collocation_id\tSentence_id\n')
def close(self):
self.output.close()
def add_map(self, collocation_id, sentence_id):
self.output.write(f'{collocation_id}\t{sentence_id}\n')

View File

@@ -34,31 +34,35 @@ class Formatter:
class OutNoStatFormatter(Formatter): class OutNoStatFormatter(Formatter):
def additional_init(self): def additional_init(self):
self.representation = "" self.representation = {}
def header_repeat(self): def header_repeat(self):
return ["Lemma", "Representative_form", "RF_msd", "RF_scenario"] return ["Lemma", "Representative_form", "RF_msd", "RF_scenario"]
def header_right(self): def header_right(self):
return ["Joint_representative_form", "Frequency"] return ["Joint_representative_form_fixed", "Joint_representative_form_variable", "Frequency"]
def content_repeat(self, words, representations, idx, _sidx): def content_repeat(self, words, representations, idx, _sidx):
word = words[idx] word = words[idx]
if idx not in representations: if idx not in representations:
return [word.lemma, "", ""] return [word.lemma, "", ""]
rep = representations[idx] rep_text, rep_msd = representations[idx]
if rep is None: if rep_text is None:
self.representation += " " + word.lemma self.representation[idx] = word.lemma
return [word.lemma, word.lemma, "", "lemma_fallback"] return [word.lemma, word.lemma, "", "lemma_fallback"]
else: else:
self.representation += " " + rep self.representation[idx] = rep_text
return [word.lemma, rep, word.msd, "ok"] return [word.lemma, rep_text, rep_msd, "ok"]
def content_right(self, freq): def content_right(self, freq, variable_word_order=None):
rep = re.sub(' +', ' ', self.representation.strip()) fixed_word_order = sorted(self.representation.keys())
result = [rep, str(freq)] if variable_word_order is None:
self.representation = "" variable_word_order = fixed_word_order
rep_fixed_word_order = ' '.join([self.representation[o] for o in fixed_word_order if o in self.representation])
rep_variable_word_order = ' '.join([self.representation[o] for o in variable_word_order if o in self.representation])
result = [rep_fixed_word_order, rep_variable_word_order, str(freq)]
self.representation = {}
return result return result
def group(self): def group(self):
@@ -147,7 +151,27 @@ class StatsFormatter(Formatter):
word = words[idx] word = words[idx]
key = (sidx, idx, word.lemma) key = (sidx, idx, word.lemma)
# try to fix missing dispersions
if key not in self.colocation_ids.dispersions:
if word.lemma == 'k':
new_key = (sidx, idx, 'h')
elif word.lemma == 'h':
new_key = (sidx, idx, 'k')
elif word.lemma == 's':
new_key = (sidx, idx, 'z')
elif word.lemma == 'z':
new_key = (sidx, idx, 's')
else:
new_key = (sidx, idx, '')
if new_key in self.colocation_ids.dispersions:
key = new_key
print('Dispersions fixed.')
else:
print('Dispersions not fixed.')
if key in self.colocation_ids.dispersions:
distribution = self.colocation_ids.dispersions[key] distribution = self.colocation_ids.dispersions[key]
else:
distribution = 1
return [self.stat_str(distribution)] return [self.stat_str(distribution)]
def content_right(self, freq): def content_right(self, freq):
@@ -181,13 +205,13 @@ class OutFormatter(Formatter):
def header_right(self): def header_right(self):
return self.f1.header_right() + self.f2.header_right() return self.f1.header_right() + self.f2.header_right()
def content_repeat(self, words, representations, idx, sidx): def content_repeat(self, words, representations, idx, sidx, variable_word_order=None):
cr1 = self.f1.content_repeat(words, representations, idx, sidx) cr1 = self.f1.content_repeat(words, representations, idx, sidx)
cr2 = self.f2.content_repeat(words, representations, idx, sidx) cr2 = self.f2.content_repeat(words, representations, idx, sidx)
return cr1 + cr2 return cr1 + cr2
def content_right(self, freq): def content_right(self, freq, variable_word_order=None):
return self.f1.content_right(freq) + self.f2.content_right(freq) return self.f1.content_right(freq, variable_word_order) + self.f2.content_right(freq)
def group(self): def group(self):
return self.f1.group() and self.f2.group() return self.f1.group() and self.f2.group()

View File

@@ -1,3 +1,4 @@
import os
from xml.etree import ElementTree from xml.etree import ElementTree
import logging import logging
import re import re
@@ -13,11 +14,14 @@ def is_root_id(id_):
return len(id_.split('.')) == 3 return len(id_.split('.')) == 3
def load_files(args, database): def load_files(args, database, w_collection=None, input_corpus=None):
filenames = args.input filenames = input_corpus if input_corpus is not None else args.input
skip_id_check = args.skip_id_check skip_id_check = args.skip_id_check
do_msd_translate = not args.no_msd_translate do_msd_translate = not args.no_msd_translate
if len(filenames) == 1 and os.path.isdir(filenames[0]):
filenames = [os.path.join(filenames[0], file) for file in os.listdir(filenames[0]) if file[-5:] != '.zstd']
database.init("CREATE TABLE Files ( filename varchar(2048) )") database.init("CREATE TABLE Files ( filename varchar(2048) )")
for idx, fname in enumerate(filenames): for idx, fname in enumerate(filenames):
@@ -25,22 +29,35 @@ def load_files(args, database):
extension = pathlib.Path(fname).suffix extension = pathlib.Path(fname).suffix
# check if file with the same name already loaded... # check if file with the same name already loaded...
loaded = database.execute("SELECT * FROM Files WHERE filename=?", (fname, )).fetchone() loaded = database.execute("SELECT * FROM Files WHERE filename=?", (fname,)).fetchone()
if loaded is not None: if loaded is not None:
print("ALREADY LOADED") print("ALREADY LOADED")
continue continue
if extension == ".xml": if extension == ".xml":
et = load_xml(fname) et = load_xml(fname)
if input_corpus is None:
yield file_sentence_generator(et, skip_id_check, do_msd_translate, args.pc_tag) yield file_sentence_generator(et, skip_id_check, do_msd_translate, args.pc_tag)
else:
sentence_generator = file_sentence_generator_valency(et, skip_id_check, do_msd_translate, args.pc_tag, w_collection)
for sent_id, sentence, othr_attributes in sentence_generator:
yield sent_id, sentence, othr_attributes
elif extension == ".gz": elif extension == ".gz":
if input_corpus is None:
yield load_csv(fname, True) yield load_csv(fname, True)
else: else:
sentences = load_csv_valency(fname, True, w_collection)
for sentence in sentences:
yield sentence
else:
if input_corpus is None:
yield load_csv(fname, False) yield load_csv(fname, False)
# else: else:
# raise NotImplementedError("Unknown file extension: {}".format(extension)) sentences = load_csv_valency(fname, False, w_collection)
for sentence in sentences:
yield sentence
database.execute("INSERT INTO Files (filename) VALUES (?)", (fname, )) database.execute("INSERT INTO Files (filename) VALUES (?)", (fname,))
database.commit() database.commit()
@@ -99,6 +116,62 @@ def load_csv(filename, compressed):
sentence_end(bad_sentence) sentence_end(bad_sentence)
return result return result
def load_csv_valency(filename, compressed, w_collection):
# TODO skip sentences that are not in sentences of interest!!!
result = {}
bad_sentence = False
words = {}
links = []
idi = 0
def sentence_end(bad_sentence, sid):
if bad_sentence:
return
for lfrom, ldest, ana in links:
if lfrom not in words or ldest not in words:
logging.warning("Bad link in sentence: " + line_split[0])
continue
words[lfrom].add_link(ana, words[ldest])
result[sid] = list(words.values())
line_gen = lines_gz if compressed else lines_csv
for line in line_gen(filename):
line_str = line.strip()
line_fixed = line_str.replace('\t\t\t', '\t,\t')
line_split = line_fixed.split("\t")
if line_split[1] == "1" and len(words) > 0:
sentence_end(bad_sentence, sid)
bad_sentence = False
links = []
words = {}
idi = 0
try:
sid, wid, text, msd, lemma, link_src, link_type = line_split
except ValueError:
bad_sentence = True
full_id = "{}.{}".format(sid, wid)
words[wid] = Word(lemma, msd, full_id, text, True)
if not (len(text[0]) == 1 and re.match('^[\w]+$', text[0]) is None):
words[wid].idi = str(idi)
idi += 1
if link_src != '0':
links.append((link_src, wid, link_type))
sentence_end(bad_sentence, sid)
sentence_ids = list(result.keys())
cur = w_collection.find({'_id': {'$in': sentence_ids}})
cur = [c for c in cur]
unsorted_result = [(c['_id'], result[c['_id']], {k: v for k, v in c.items() if k != '_id'}) for c in cur]
return sorted(unsorted_result, key=lambda x: (x[0].split('.')[0], int(x[0].split('.')[1]), int(x[0].split('.')[2])))
def load_xml(filename): def load_xml(filename):
with open(filename, 'r') as fp: with open(filename, 'r') as fp:
content = fp.read() content = fp.read()
@@ -124,9 +197,9 @@ def file_sentence_generator(et, skip_id_check, do_msd_translate, pc_tag):
dest = l.get('dep') dest = l.get('dep')
else: else:
ana = l.get('ana') ana = l.get('ana')
if ana[:4] != 'syn:': # dont bother... if ana[:8] != 'jos-syn:': # dont bother...
continue continue
ana = ana[4:] ana = ana[8:]
lfrom, dest = l.get('target').replace('#', '').split() lfrom, dest = l.get('target').replace('#', '').split()
if lfrom in words: if lfrom in words:
@@ -146,3 +219,96 @@ def file_sentence_generator(et, skip_id_check, do_msd_translate, pc_tag):
pass pass
return list(words.values()) return list(words.values())
def file_sentence_generator_valency(et, skip_id_check, do_msd_translate, pc_tag, w_collection):
words = {}
sentences = list(et.iter('s'))
sentence_ids = [s.attrib['id'] for s in sentences]
cur = w_collection.find({'_id': {'$in': sentence_ids}})
sentences_of_interest = {c['_id']: {k: v for k, v in c.items() if k != '_id'} for c in cur}
for sentence in progress(sentences, "load-text"):
if sentence.attrib['id'] not in sentences_of_interest:
continue
idi = 0
last_word_id = None
for w in sentence.iter():
if w.tag == 'w':
last_word_id = w.get('id')
words[last_word_id] = Word.from_xml(w, do_msd_translate)
words[last_word_id].idi = str(idi)
idi += 1
elif w.tag == pc_tag:
last_word_id = w.get('id')
words[last_word_id] = Word.pc_word(w, do_msd_translate)
elif w.tag == 'c':
if last_word_id:
words[last_word_id].glue += w.text
for l in sentence.iter("link"):
if 'dep' in l.keys():
ana = l.get('afun')
lfrom = l.get('from')
dest = l.get('dep')
else:
ana = l.get('ana')
if ana[:8] != 'jos-syn:': # dont bother...
continue
ana = ana[8:]
lfrom, dest = l.get('target').replace('#', '').split()
if lfrom in words:
if not skip_id_check and is_root_id(lfrom):
logging.error("NOO: {}".format(lfrom))
sys.exit(1)
if dest in words:
next_word = words[dest]
words[lfrom].add_link(ana, next_word)
else:
logging.error("Unknown id: {}".format(dest))
sys.exit(1)
else:
# strange errors, just skip...
pass
yield sentence.attrib['id'], list(words.values()), sentences_of_interest[sentence.attrib['id']]
words = {}
def file_sentence_glue_generator(files, pc_tag, w_collection):
for fname in files:
et = load_xml(fname)
words = {}
sentences = list(et.iter('s'))
sentence_ids = [s.attrib['id'] for s in sentences]
cur = w_collection.find({'_id': {'$in': sentence_ids}})
sentences_of_interest = {c['_id']: {k: v for k, v in c.items() if k != '_id'} for c in cur}
for sentence in progress(sentences, "load-text"):
if sentence.attrib['id'] not in sentences_of_interest:
continue
w_id = 1
last_word_id = None
sentence_id = None
for w in sentence.iter():
if w.tag == 'w':
last_word_id = w_id
words[last_word_id] = [w.text, last_word_id, '']
w_id += 1
elif w.tag == pc_tag:
last_word_id = w_id
words[last_word_id] = [w.text, last_word_id, '']
w_id += 1
elif w.tag == 'c':
if last_word_id:
words[last_word_id][2] += w.text
elif w.tag == 's':
sentence_id = w.attrib['id']
yield (sentence_id, list(words.values()))
words = {}

View File

@@ -28,8 +28,8 @@ class StructureMatch:
result.matches[-1][str(component_id)] = Word(word_lemma, word_msd, word_id, word_text, False) result.matches[-1][str(component_id)] = Word(word_lemma, word_msd, word_id, word_text, False)
for component_id, text in db.execute("SELECT component_id, text FROM Representations WHERE colocation_id=?", (colocation_id,)): for component_id, text, msd in db.execute("SELECT component_id, text, msd FROM Representations WHERE colocation_id=?", (colocation_id,)):
result.representations[str(component_id)] = text result.representations[str(component_id)] = (text, msd)
return result return result

View File

@@ -1,5 +1,7 @@
import gc
from collections import defaultdict from collections import defaultdict
from ast import literal_eval from ast import literal_eval
from time import time
from match import StructureMatch from match import StructureMatch
from representation_assigner import RepresentationAssigner from representation_assigner import RepresentationAssigner
@@ -35,6 +37,7 @@ class MatchStore:
colocation_id INTEGER, colocation_id INTEGER,
component_id INTEGER, component_id INTEGER,
text varchar(32), text varchar(32),
msd varchar(32),
FOREIGN KEY(colocation_id) REFERENCES Colocations(colocation_id)) FOREIGN KEY(colocation_id) REFERENCES Colocations(colocation_id))
""") """)
self.db.init("""CREATE TABLE Dispersions ( self.db.init("""CREATE TABLE Dispersions (
@@ -91,7 +94,14 @@ class MatchStore:
(structure.id,)): (structure.id,)):
yield StructureMatch.from_db(self.db, cid[0], structure) yield StructureMatch.from_db(self.db, cid[0], structure)
def set_representations(self, word_renderer, structures): def add_inserts(self, inserts):
for match in inserts:
for component_id, (text, msd) in match.representations.items():
self.db.execute("""
INSERT INTO Representations (colocation_id, component_id, text, msd)
VALUES (?,?,?,?)""", (match.match_id, component_id, text, msd))
def set_representations(self, word_renderer, structures, sloleks_db=None):
step_name = 'representation' step_name = 'representation'
if self.db.is_step_done(step_name): if self.db.is_step_done(step_name):
print("Representation step already done, skipping") print("Representation step already done, skipping")
@@ -102,20 +112,20 @@ class MatchStore:
structures_dict = {s.id: s for s in structures} structures_dict = {s.id: s for s in structures}
num_representations = int(self.db.execute("SELECT Count(*) FROM Colocations").fetchone()[0]) num_representations = int(self.db.execute("SELECT Count(*) FROM Colocations").fetchone()[0])
start_time = time()
for cid, sid in progress(self.db.execute("SELECT colocation_id, structure_id FROM Colocations"), "representations", total=num_representations): for cid, sid in progress(self.db.execute("SELECT colocation_id, structure_id FROM Colocations"), "representations", total=num_representations):
structure = structures_dict[sid] structure = structures_dict[sid]
match = StructureMatch.from_db(self.db, cid, structure) match = StructureMatch.from_db(self.db, cid, structure)
RepresentationAssigner.set_representations(match, word_renderer) RepresentationAssigner.set_representations(match, word_renderer, sloleks_db=sloleks_db)
inserts.append(match) inserts.append(match)
if len(inserts) > num_inserts: if len(inserts) > num_inserts:
for match in inserts: self.add_inserts(inserts)
for component_id, text in match.representations.items():
self.db.execute("""
INSERT INTO Representations (colocation_id, component_id, text)
VALUES (?,?,?)""", (match.match_id, component_id, text))
inserts = [] inserts = []
if time() - start_time > 5:
start_time = time()
gc.collect()
self.add_inserts(inserts)
self.db.step_is_done(step_name) self.db.step_is_done(step_name)
def has_colocation_id_enough_frequency(self, colocation_id): def has_colocation_id_enough_frequency(self, colocation_id):

30
src/postprocessor.py Normal file
View File

@@ -0,0 +1,30 @@
class Postprocessor:
def __init__(self, fix_one_letter_words=True):
self.fix_one_letter_words = fix_one_letter_words
@staticmethod
def fix_sz(next_word):
if next_word[0] in ['c', 'č', 'f', 'h', 'k', 'p', 's', 'š', 't']:
return 's'
return 'z'
@staticmethod
def fix_kh(next_word):
if next_word[0] in ['g', 'k']:
return 'h'
return 'k'
def process(self, match, collocation_id):
if len(collocation_id) > 2:
for idx, (col_id, word) in enumerate(collocation_id[1:-1]):
if word in ['s', 'z']:
correct_letter = self.fix_sz(collocation_id[idx + 2][1])
collocation_id[idx + 1][1] = correct_letter
match[col_id].text = correct_letter
elif word in ['k', 'h']:
correct_letter = self.fix_kh(collocation_id[idx + 2][1])
collocation_id[idx + 1][1] = correct_letter
match[col_id].text = correct_letter
collocation_id = [collocation_id[0]] + [tuple(line) for line in collocation_id[1:]]
return match, collocation_id

View File

@@ -4,6 +4,9 @@ from collections import Counter
from codes_tagset import TAGSET, CODES from codes_tagset import TAGSET, CODES
from word import WordMsdOnly from word import WordMsdOnly
from word import WordDummy
class ComponentRepresentation: class ComponentRepresentation:
def __init__(self, data, word_renderer): def __init__(self, data, word_renderer):
self.data = data self.data = data
@@ -11,6 +14,7 @@ class ComponentRepresentation:
self.words = [] self.words = []
self.rendition_text = None self.rendition_text = None
self.rendition_msd = None
self.agreement = [] self.agreement = []
def get_agreement(self): def get_agreement(self):
@@ -19,31 +23,37 @@ class ComponentRepresentation:
def add_word(self, word): def add_word(self, word):
self.words.append(word) self.words.append(word)
def render(self): def render(self, sloleks_db=None):
if self.rendition_text is None: if self.rendition_text is None:
self.rendition_text = self._render() self.rendition_text, self.rendition_msd = self._render(sloleks_db=sloleks_db)
def _render(self): def _render(self, sloleks_db=None):
raise NotImplementedError("Not implemented for class: {}".format(type(self))) raise NotImplementedError("Not implemented for class: {}".format(type(self)))
class LemmaCR(ComponentRepresentation): class LemmaCR(ComponentRepresentation):
def _render(self): def _render(self, sloleks_db=None):
return self.words[0].lemma if len(self.words) > 0 else None # TODO FIX THIS TO LEMMA MSD
if len(self.words) > 0:
return self.words[0].lemma, self.words[0].msd
else:
return None, None
class LexisCR(ComponentRepresentation): class LexisCR(ComponentRepresentation):
def _render(self): def _render(self, sloleks_db=None):
return self.data['lexis'] return self.data['lexis'], 'Q'
class WordFormAllCR(ComponentRepresentation): class WordFormAllCR(ComponentRepresentation):
def _render(self): def _render(self, sloleks_db=None):
if len(self.words) == 0: if len(self.words) == 0:
return None return None, None
else: else:
forms = [w.text.lower() for w in self.words] forms = [w.text.lower() for w in self.words]
return "/".join(set(forms)) msds = [w.msd for w in self.words]
return "/".join(set(forms)), "/".join(set(msds))
class WordFormAnyCR(ComponentRepresentation): class WordFormAnyCR(ComponentRepresentation):
def _render(self): def _render(self, sloleks_db=None):
text_forms = {} text_forms = {}
msd_lemma_txt_triplets = Counter([(w.msd, w.lemma, w.text) for w in self.words]) msd_lemma_txt_triplets = Counter([(w.msd, w.lemma, w.text) for w in self.words])
for (msd, lemma, text), _n in reversed(msd_lemma_txt_triplets.most_common()): for (msd, lemma, text), _n in reversed(msd_lemma_txt_triplets.most_common()):
@@ -60,20 +70,40 @@ class WordFormAnyCR(ComponentRepresentation):
# check if agreements match # check if agreements match
agreements_matched = [agr.match(word_msd) for agr in self.agreement] agreements_matched = [agr.match(word_msd) for agr in self.agreement]
# in case all agreements do not match try to get data from sloleks and change properly
if not all(agreements_matched):
if sloleks_db is None:
raise Exception('sloleks_db not properly setup!')
for i, agr in enumerate(self.agreement):
if not agr.match(word_msd):
msd, lemma, text = sloleks_db.get_word_form(agr.lemma, agr.msd(), agr.data, align_msd=word_msd)
if msd is not None:
agr.msds[0] = msd
agr.words.append(WordDummy(msd, lemma, text))
# when we find element in sloleks automatically add it (no need for second checks, since msd
# is tailored to pass tests by default)
agr.rendition_candidate = text
agr.rendition_msd_candidate = msd
agreements_matched[i] = True
else:
break
# if we are at the last "backup word", then confirm matches # if we are at the last "backup word", then confirm matches
# that worked for this one and return # that worked for this one and return
if word_lemma is None: if word_lemma is None:
for agr, matched in zip(self.agreement, agreements_matched): for agr, matched in zip(self.agreement, agreements_matched):
if matched: if matched:
agr.confirm_match() agr.confirm_match()
return None return None, None
# if all agreements match, we win! # if all agreements match, we win!
if all(agreements_matched): if all(agreements_matched):
for agr in self.agreement: for agr in self.agreement:
agr.confirm_match() agr.confirm_match()
return text_forms[(word_msd, word_lemma)] return text_forms[(word_msd, word_lemma)], word_msd
return None, None
class WordFormMsdCR(WordFormAnyCR): class WordFormMsdCR(WordFormAnyCR):
@@ -93,6 +123,8 @@ class WordFormMsdCR(WordFormAnyCR):
for key, value in selectors.items(): for key, value in selectors.items():
t = word_msd[0] t = word_msd[0]
v = TAGSET[t].index(key.lower()) v = TAGSET[t].index(key.lower())
if v + 1 >= len(word_msd):
return False
f1 = word_msd[v + 1] f1 = word_msd[v + 1]
f2 = CODES[value] f2 = CODES[value]
@@ -109,16 +141,21 @@ class WordFormMsdCR(WordFormAnyCR):
if self.check_msd(word.msd): if self.check_msd(word.msd):
super().add_word(word) super().add_word(word)
def _render(self): def _render(self, sloleks_db=None):
if len(self.words) == 0:
if sloleks_db is None:
raise Exception('sloleks_db not properly setup!')
msd, lemma, text = sloleks_db.get_word_form(self.lemma, self.msd(), self.data)
if msd is not None:
self.words.append(WordDummy(msd, lemma, text))
self.words.append(WordMsdOnly(self._common_msd())) self.words.append(WordMsdOnly(self._common_msd()))
return super()._render() return super()._render(sloleks_db)
def _common_msd(self): def _common_msd(self):
msds = sorted(self.msds, key=len) msds = sorted(self.msds, key=len)
common_msd = ["-" if not all(msds[j][idx] == msds[0][idx] for j in range(1, len(self.msds))) common_msd = ["-" if not all(msds[j][idx] == msds[0][idx] for j in range(1, len(self.msds)))
else msds[0][idx] for idx in range(len(msds[0]))] else msds[0][idx] for idx in range(len(msds[0]))]
common_msd = "".join(common_msd) common_msd = "".join(common_msd)
iommon_msd = "".join(common_msd)
return self.word_renderer.common_lemma_msd(self.lemma, common_msd) return self.word_renderer.common_lemma_msd(self.lemma, common_msd)
@@ -126,6 +163,7 @@ class WordFormAgreementCR(WordFormMsdCR):
def __init__(self, data, word_renderer): def __init__(self, data, word_renderer):
super().__init__(data, word_renderer) super().__init__(data, word_renderer)
self.rendition_candidate = None self.rendition_candidate = None
self.rendition_msd_candidate = None
def get_agreement(self): def get_agreement(self):
return self.data['other'] return self.data['other']
@@ -141,12 +179,14 @@ class WordFormAgreementCR(WordFormMsdCR):
if WordFormAgreementCR.check_agreement(word_msd, candidate_msd, self.data['agreement']): if WordFormAgreementCR.check_agreement(word_msd, candidate_msd, self.data['agreement']):
if self.check_msd(candidate_msd): if self.check_msd(candidate_msd):
self.rendition_candidate = candidate_text self.rendition_candidate = candidate_text
self.rendition_msd_candidate = candidate_msd
return True return True
return False return False
def confirm_match(self): def confirm_match(self):
self.rendition_text = self.rendition_candidate self.rendition_text = self.rendition_candidate
self.rendition_msd = self.rendition_msd_candidate
@staticmethod @staticmethod
def check_agreement(msd1, msd2, agreements): def check_agreement(msd1, msd2, agreements):
@@ -182,5 +222,5 @@ class WordFormAgreementCR(WordFormMsdCR):
return True return True
def render(self): def render(self, sloleks_db=None):
pass pass

View File

@@ -39,7 +39,7 @@ class RepresentationAssigner:
return self.representation_factory(self.more, word_renderer) return self.representation_factory(self.more, word_renderer)
@staticmethod @staticmethod
def set_representations(match, word_renderer): def set_representations(match, word_renderer, sloleks_db=None):
representations = {} representations = {}
for c in match.structure.components: for c in match.structure.components:
representations[c.idx] = [] representations[c.idx] = []
@@ -70,13 +70,14 @@ class RepresentationAssigner:
for cid, reps in representations.items(): for cid, reps in representations.items():
for rep in reps: for rep in reps:
rep.render() rep.render(sloleks_db=sloleks_db)
for cid, reps in representations.items(): for cid, reps in representations.items():
reps = [rep.rendition_text for rep in reps] reps_text = [rep.rendition_text for rep in reps]
if reps == []: reps_msd = [rep.rendition_msd for rep in reps]
if reps_text == []:
pass pass
elif all(r is None for r in reps): elif all(r is None for r in reps_text):
match.representations[cid] = None match.representations[cid] = (None, None)
else: else:
match.representations[cid] = " ".join(("" if r is None else r) for r in reps) match.representations[cid] = (" ".join(("" if r is None else r) for r in reps_text), " ".join(("" if r is None else r) for r in reps_msd))

211
src/sloleks_db.py Normal file
View File

@@ -0,0 +1,211 @@
import gc
from psycopg2cffi import compat
compat.register()
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, aliased
from sqlalchemy import create_engine
from codes_tagset import TAGSET, CODES, CODES_TRANSLATION, POSSIBLE_WORD_FORM_FEATURE_VALUES
class SloleksDatabase:
def __init__(self, db, load_sloleks):
global Lexeme, LexemeFeature, SyntacticStructure, StructureComponent, Feature, LexicalUnitLexeme, LexicalUnit, LexicalUnitType, Category, Sense, Measure, LexicalUnitMeasure, Corpus, Definition, WordForm, WordFormFeature, FormRepresentation
[db_user, db_password, db_database, db_host] = db.split(':')
engine = create_engine('postgresql://' + db_user + ':' + db_password + '@' + db_host + '/' + db_database,
pool_recycle=14400)
Base = declarative_base()
Base.metadata.reflect(engine)
class Lexeme(Base):
__table__ = Base.metadata.tables['jedro_lexeme']
class LexemeFeature(Base):
__table__ = Base.metadata.tables['jedro_lexeme_feature']
class SyntacticStructure(Base):
__table__ = Base.metadata.tables['jedro_syntacticstructure']
class StructureComponent(Base):
__table__ = Base.metadata.tables['jedro_structurecomponent']
class Feature(Base):
__table__ = Base.metadata.tables['jedro_feature']
class LexicalUnitLexeme(Base):
__table__ = Base.metadata.tables['jedro_lexicalunit_lexeme']
class LexicalUnit(Base):
__table__ = Base.metadata.tables['jedro_lexicalunit']
class LexicalUnitType(Base):
__table__ = Base.metadata.tables['jedro_lexicalunittype']
class Category(Base):
__table__ = Base.metadata.tables['jedro_category']
class Sense(Base):
__table__ = Base.metadata.tables['jedro_sense']
class Measure(Base):
__table__ = Base.metadata.tables['jedro_measure']
class LexicalUnitMeasure(Base):
__table__ = Base.metadata.tables['jedro_lexicalunitmeasure']
class Corpus(Base):
__table__ = Base.metadata.tables['jedro_corpus']
class Definition(Base):
__table__ = Base.metadata.tables['jedro_definition']
class WordForm(Base):
__table__ = Base.metadata.tables['jedro_wordform']
class WordFormFeature(Base):
__table__ = Base.metadata.tables['jedro_wordform_feature']
class FormRepresentation(Base):
__table__ = Base.metadata.tables['jedro_formrepresentation']
self.session = Session(engine)
self.load_sloleks = load_sloleks
if self.load_sloleks:
self.init_load_sloleks()
def init_load_sloleks(self):
query_word_form_features = self.session.query(WordFormFeature.word_form_id, WordFormFeature.value)
word_form_features = query_word_form_features.all()
query_form_representations = self.session.query(FormRepresentation.word_form_id, FormRepresentation.form)
form_representations = query_form_representations.all()
query_word_forms = self.session.query(WordForm.id, WordForm.lexeme_id)
word_forms = query_word_forms.all()
query_lexemes = self.session.query(Lexeme.id, Lexeme.lemma)
lexemes = query_lexemes.all()
self.lemmas = {}
for lexeme in lexemes:
if lexeme.lemma not in self.lemmas:
self.lemmas[lexeme.lemma] = []
self.lemmas[lexeme.lemma].append(lexeme.id)
self.word_form_features = {}
for word_form_feature in word_form_features:
if word_form_feature.value not in POSSIBLE_WORD_FORM_FEATURE_VALUES:
continue
if word_form_feature.word_form_id not in self.word_form_features:
self.word_form_features[word_form_feature.word_form_id] = set()
self.word_form_features[word_form_feature.word_form_id].add(word_form_feature.value)
self.form_representations = {form_representation.word_form_id: form_representation.form for form_representation
in form_representations}
self.word_forms = {}
for word_form in word_forms:
if word_form.lexeme_id not in self.word_forms:
self.word_forms[word_form.lexeme_id] = []
self.word_forms[word_form.lexeme_id].append(word_form.id)
self.connected_lemmas = {}
for lemma, lemma_ids in self.lemmas.items():
for lemma_id in lemma_ids:
if lemma_id in self.word_forms:
for word_form_id in self.word_forms[lemma_id]:
if word_form_id in self.word_form_features and word_form_id in self.form_representations:
if lemma not in self.connected_lemmas:
self.connected_lemmas[lemma] = []
self.connected_lemmas[lemma].append((self.word_form_features[word_form_id], self.form_representations[word_form_id]))
del self.lemmas, self.word_form_features, self.form_representations, self.word_forms
gc.collect()
def close(self):
self.session.close()
def decypher_msd(self, msd):
t = msd[0]
decypher = []
# IF ADDING OR CHANGING ATTRIBUTES HERE ALSO FIX POSSIBLE_WORD_FORM_FEATURE_VALUES
if t == 'N':
# gender = CODES_TRANSLATION[t][2][msd[2]]
number = CODES_TRANSLATION[t][3][msd[3]]
case = CODES_TRANSLATION[t][4][msd[4]]
decypher = [number, case]
elif t == 'V':
# gender = CODES_TRANSLATION[t][6][msd[6]]
vform = CODES_TRANSLATION[t][3][msd[3]]
number = CODES_TRANSLATION[t][5][msd[5]]
person = 'third'
decypher = [vform, number, person]
elif t == 'A':
gender = CODES_TRANSLATION[t][3][msd[3]]
number = CODES_TRANSLATION[t][4][msd[4]]
case = CODES_TRANSLATION[t][5][msd[5]]
decypher = [gender, number, case]
return decypher
def get_word_form(self, lemma, msd, data, align_msd=False):
# modify msd as required
msd = list(msd)
if 'msd' in data:
for key, value in data['msd'].items():
t = msd[0]
v = TAGSET[t].index(key.lower())
if v + 1 >= len(msd):
msd = msd + ['-' for _ in range(v - len(msd) + 2)]
msd[v + 1] = CODES[value]
if align_msd and 'agreement' in data:
align_msd = list(align_msd)
t_align_msd = align_msd[0]
t = msd[0]
for att in data['agreement']:
v_align_msd = TAGSET[t_align_msd].index(att.lower())
v = TAGSET[t].index(att.lower())
# fix for verbs with short msds
if v + 1 >= len(msd):
msd = msd + ['-' for _ in range(v - len(msd) + 2)]
msd[v + 1] = align_msd[v_align_msd + 1]
decypher_msd = self.decypher_msd(msd)
if not decypher_msd:
return None, None, None
if self.load_sloleks and lemma in self.connected_lemmas:
for (word_form_features, form_representations) in self.connected_lemmas[lemma]:
fits = True
for d_m in decypher_msd:
if d_m not in word_form_features:
fits = False
break
if fits:
break
return ''.join(msd), lemma, form_representations
else:
wfs = [aliased(WordFormFeature) for _ in decypher_msd]
query_preposition = self.session.query(FormRepresentation.form) \
.join(WordForm, WordForm.id == FormRepresentation.word_form_id) \
.join(Lexeme, Lexeme.id == WordForm.lexeme_id)
for wf in wfs:
query_preposition = query_preposition.join(wf, wf.word_form_id == WordForm.id)
query_preposition = query_preposition.filter(Lexeme.lemma == lemma)
for wf, msd_el in zip(wfs, decypher_msd):
query_preposition = query_preposition.filter(wf.value == msd_el)
pattern_translation_hws = query_preposition.limit(1).all()
if len(pattern_translation_hws) > 0:
return ''.join(msd), lemma, pattern_translation_hws[0][0]
return None, None, None

View File

@@ -14,7 +14,7 @@ class SyntacticStructure:
@staticmethod @staticmethod
def from_xml(xml): def from_xml(xml):
st = SyntacticStructure() st = SyntacticStructure()
st.id = xml.get('id') st.id = xml.get('id_nsss')
st.lbs = xml.get('LBS') st.lbs = xml.get('LBS')
assert len(list(xml)) == 1 assert len(list(xml)) == 1

View File

@@ -11,6 +11,7 @@ import concurrent.futures
import tempfile import tempfile
from progress_bar import progress from progress_bar import progress
from sloleks_db import SloleksDatabase
from word import Word from word import Word
from syntactic_structure import build_structures from syntactic_structure import build_structures
from match_store import MatchStore from match_store import MatchStore
@@ -20,16 +21,19 @@ from loader import load_files
from database import Database from database import Database
from time_info import TimeInfo from time_info import TimeInfo
from postprocessor import Postprocessor
def match_file(words, structures):
def match_file(words, structures, postprocessor):
matches = {s: [] for s in structures} matches = {s: [] for s in structures}
for s in progress(structures, "matching"): for s in progress(structures, "matching"):
for w in words: for w in words:
mhere = s.match(w) mhere = s.match(w)
for match in mhere: for match in mhere:
colocation_id = [(idx, w.lemma) for idx, w in match.items()] colocation_id = [[idx, w.lemma] for idx, w in match.items()]
colocation_id = [s.id] + list(sorted(colocation_id, key=lambda x: x[0])) colocation_id = [s.id] + list(sorted(colocation_id, key=lambda x: x[0]))
match, collocation_id = postprocessor.process(match, colocation_id)
colocation_id = tuple(colocation_id) colocation_id = tuple(colocation_id)
matches[s].append((match, colocation_id)) matches[s].append((match, colocation_id))
@@ -51,7 +55,9 @@ def main(args):
continue continue
start_time = time.time() start_time = time.time()
matches = match_file(words, structures) postprocessor = Postprocessor()
matches = match_file(words, structures, postprocessor)
match_store.add_matches(matches) match_store.add_matches(matches)
word_stats.add_words(words) word_stats.add_words(words)
database.commit() database.commit()
@@ -74,7 +80,9 @@ def main(args):
# figure out representations! # figure out representations!
if args.out or args.out_no_stat: if args.out or args.out_no_stat:
match_store.set_representations(word_stats, structures) sloleks_db = SloleksDatabase(args.sloleks_db, args.load_sloleks)
match_store.set_representations(word_stats, structures, sloleks_db=sloleks_db)
sloleks_db.close()
Writer.make_output_writer(args, max_num_components, match_store, word_stats).write_out( Writer.make_output_writer(args, max_num_components, match_store, word_stats).write_out(
structures, match_store) structures, match_store)
@@ -85,6 +93,8 @@ def main(args):
Writer.make_stats_writer(args, max_num_components, match_store, word_stats).write_out( Writer.make_stats_writer(args, max_num_components, match_store, word_stats).write_out(
structures, match_store) structures, match_store)
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Extract structures from a parsed corpus.') description='Extract structures from a parsed corpus.')
@@ -92,6 +102,7 @@ if __name__ == '__main__':
help='Structures definitions in xml file') help='Structures definitions in xml file')
parser.add_argument('input', parser.add_argument('input',
help='input file in (gz or xml currently). If none, then just database is loaded', nargs='*') help='input file in (gz or xml currently). If none, then just database is loaded', nargs='*')
parser.add_argument('--sloleks_db', type=str, help='Sloleks database credentials')
parser.add_argument('--out', parser.add_argument('--out',
help='Classic output file') help='Classic output file')
parser.add_argument('--out-no-stat', parser.add_argument('--out-no-stat',
@@ -100,7 +111,7 @@ if __name__ == '__main__':
help='Additional output file, writes more data') help='Additional output file, writes more data')
parser.add_argument('--stats', parser.add_argument('--stats',
help='Output file for statistics') help='Output file for statistics')
#
parser.add_argument('--no-msd-translate', parser.add_argument('--no-msd-translate',
help='MSDs are translated from slovene to english by default', help='MSDs are translated from slovene to english by default',
action='store_true') action='store_true')
@@ -118,6 +129,10 @@ if __name__ == '__main__':
help='Generate one output for each syntactic structure', help='Generate one output for each syntactic structure',
action='store_true') action='store_true')
parser.add_argument('--load-sloleks',
help='Tells weather sloleks is loaded into memory at the beginning of processing or not.',
action='store_true')
parser.add_argument('--sort-by', parser.add_argument('--sort-by',
help="Sort by a this column (index)", type=int, default=-1) help="Sort by a this column (index)", type=int, default=-1)
parser.add_argument('--sort-reversed', parser.add_argument('--sort-reversed',
@@ -125,6 +140,8 @@ if __name__ == '__main__':
parser.add_argument('--db', parser.add_argument('--db',
help="Database file to use (instead of memory)", default=None) help="Database file to use (instead of memory)", default=None)
parser.add_argument('--collocation_sentence_map_dest',
help="Destination to folder where collocation-sentence mapper (mappers in case of multiple-output).", default=None)
parser.add_argument('--new-db', parser.add_argument('--new-db',
help="Writes over database file, if there exists one", action='store_true') help="Writes over database file, if there exists one", action='store_true')

View File

@@ -4,6 +4,13 @@ import logging
from msd_translate import MSD_TRANSLATE from msd_translate import MSD_TRANSLATE
class WordCompressed:
def __init__(self, text, collocation, dependency_tree):
self.text = text
self.collocation = collocation
self.dependency_tree = dependency_tree
class WordMsdOnly: class WordMsdOnly:
def __init__(self, msd): def __init__(self, msd):
self.msd = msd self.msd = msd
@@ -14,6 +21,16 @@ class WordMsdOnly:
return None return None
class WordDummy:
def __init__(self, msd, lemma, text):
self.msd = msd
self.lemma = lemma
self.text = text
def most_frequent_text(self, word_renderer):
return word_renderer.render(self.lemma, self.msd)
class Word: class Word:
def __init__(self, lemma, msd, wid, text, do_msd_translate): def __init__(self, lemma, msd, wid, text, do_msd_translate):
self.lemma = lemma self.lemma = lemma
@@ -41,10 +58,10 @@ class Word:
@staticmethod @staticmethod
def get_msd(comp): def get_msd(comp):
d = dict(comp.items()) d = dict(comp.items())
if 'msd' in d: if 'ana' in d:
return d['msd']
elif 'ana' in d:
return d['ana'][4:] return d['ana'][4:]
elif 'msd' in d:
return d['msd']
else: else:
logging.error(d) logging.error(d)
raise NotImplementedError("MSD?") raise NotImplementedError("MSD?")

View File

@@ -1,8 +1,13 @@
import logging import logging
import os
from progress_bar import progress from progress_bar import progress
from formatter import OutFormatter, OutNoStatFormatter, AllFormatter, StatsFormatter from formatter import OutFormatter, OutNoStatFormatter, AllFormatter, StatsFormatter
from collocation_sentence_mapper import CollocationSentenceMapper
class Writer: class Writer:
@staticmethod @staticmethod
def other_params(args): def other_params(args):
@@ -11,23 +16,25 @@ class Writer:
@staticmethod @staticmethod
def make_output_writer(args, num_components, colocation_ids, word_renderer): def make_output_writer(args, num_components, colocation_ids, word_renderer):
params = Writer.other_params(args) params = Writer.other_params(args)
return Writer(args.out, num_components, OutFormatter(colocation_ids, word_renderer), params) return Writer(args.out, num_components, OutFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, params)
@staticmethod @staticmethod
def make_output_no_stat_writer(args, num_components, colocation_ids, word_renderer): def make_output_no_stat_writer(args, num_components, colocation_ids, word_renderer):
params = Writer.other_params(args) params = Writer.other_params(args)
return Writer(args.out_no_stat, num_components, OutNoStatFormatter(colocation_ids, word_renderer), params) return Writer(args.out_no_stat, num_components, OutNoStatFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, params)
@staticmethod @staticmethod
def make_all_writer(args, num_components, colocation_ids, word_renderer): def make_all_writer(args, num_components, colocation_ids, word_renderer):
return Writer(args.all, num_components, AllFormatter(colocation_ids, word_renderer), None) return Writer(args.all, num_components, AllFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, None)
@staticmethod @staticmethod
def make_stats_writer(args, num_components, colocation_ids, word_renderer): def make_stats_writer(args, num_components, colocation_ids, word_renderer):
params = Writer.other_params(args) params = Writer.other_params(args)
return Writer(args.stats, num_components, StatsFormatter(colocation_ids, word_renderer), params) return Writer(args.stats, num_components, StatsFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, params)
def __init__(self, file_out, num_components, formatter, params): def __init__(self, file_out, num_components, formatter, collocation_sentence_map_dest, params):
# TODO FIX THIS
self.collocation_sentence_map_dest = collocation_sentence_map_dest
if params is None: if params is None:
self.multiple_output = False self.multiple_output = False
self.sort_by = -1 self.sort_by = -1
@@ -71,18 +78,24 @@ class Writer:
return sorted(rows, key=key, reverse=self.sort_order) return sorted(rows, key=key, reverse=self.sort_order)
def write_header(self, file_handler): def write_header(self, file_handler):
file_handler.write(", ".join(self.header()) + "\n") file_handler.write(",".join(self.header()) + "\n")
def write_out_worker(self, file_handler, structure, colocation_ids): def write_out_worker(self, file_handler, structure, colocation_ids, col_sent_map):
rows = [] rows = []
components = structure.components components = structure.components
for match in progress(colocation_ids.get_matches_for(structure), "Writing matches: {}".format(structure.id)): for match in progress(colocation_ids.get_matches_for(structure), "Writing matches: {}".format(structure.id)):
if len(match) < self.min_frequency: if len(match) < self.min_frequency:
continue continue
self.formatter.new_match(match) self.formatter.new_match(match)
variable_word_order = self.find_variable_word_order(match.matches)
if col_sent_map is not None:
# TODO find better way to get sentence_id
for words in match.matches:
col_sent_map.add_map(match.match_id, '.'.join(words['1'].id.split('.')[:-1]))
for words in match.matches: for words in match.matches:
to_write = [] to_write = []
@@ -100,7 +113,7 @@ class Writer:
to_write = [structure.id] + to_write + [match.match_id] to_write = [structure.id] + to_write + [match.match_id]
# header_right # header_right
to_write.extend(self.formatter.content_right(len(match))) to_write.extend(self.formatter.content_right(len(match), variable_word_order))
rows.append(to_write) rows.append(to_write)
if self.formatter.group(): if self.formatter.group():
@@ -108,7 +121,7 @@ class Writer:
if rows != []: if rows != []:
rows = self.sorted_rows(rows) rows = self.sorted_rows(rows)
file_handler.write("\n".join([", ".join(row) for row in rows]) + "\n") file_handler.write("\n".join([",".join(row) for row in rows]) + "\n")
file_handler.flush() file_handler.flush()
def write_out(self, structures, colocation_ids): def write_out(self, structures, colocation_ids):
@@ -127,17 +140,29 @@ class Writer:
if not self.multiple_output: if not self.multiple_output:
fp = fp_open() fp = fp_open()
self.write_header(fp) self.write_header(fp)
col_sent_map = CollocationSentenceMapper(os.path.join(self.collocation_sentence_map_dest, 'mapper.txt')) \
if self.collocation_sentence_map_dest is not None else None
for s in progress(structures, "writing:{}".format(self.formatter)): for s in progress(structures, "writing:{}".format(self.formatter)):
if self.multiple_output: if self.multiple_output:
fp = fp_open(s.id) fp = fp_open(s.id)
self.write_header(fp) self.write_header(fp)
col_sent_map = CollocationSentenceMapper(os.path.join(self.collocation_sentence_map_dest, f'{s.id}_mapper.txt')) \
if self.collocation_sentence_map_dest is not None else None
self.formatter.set_structure(s) self.formatter.set_structure(s)
self.write_out_worker(fp, s, colocation_ids) self.write_out_worker(fp, s, colocation_ids, col_sent_map)
if self.multiple_output: if self.multiple_output:
fp_close(fp) fp_close(fp)
if not self.multiple_output: if not self.multiple_output:
fp_close(fp) fp_close(fp)
@staticmethod
def find_variable_word_order(matches):
orders = {}
for words in matches:
order = tuple([tup[0] for tup in sorted(words.items(), key=lambda x: x[1].int_id)])
orders[order] = orders.get(order, 0) + 1
return max(orders, key=orders.get)