2018-10-29 10:29:51 +00:00
|
|
|
from xml.etree import ElementTree
|
|
|
|
import re
|
|
|
|
from enum import Enum
|
2019-05-29 18:22:22 +00:00
|
|
|
from collections import defaultdict, namedtuple, Counter
|
2019-01-08 20:13:36 +00:00
|
|
|
import sys
|
2019-01-19 21:42:51 +00:00
|
|
|
import logging
|
2019-02-06 14:28:39 +00:00
|
|
|
import argparse
|
2019-02-09 12:40:57 +00:00
|
|
|
import pickle
|
2019-02-06 14:29:37 +00:00
|
|
|
import time
|
2019-02-17 14:55:17 +00:00
|
|
|
import subprocess
|
|
|
|
import concurrent.futures
|
|
|
|
import tempfile
|
2019-06-09 22:25:36 +00:00
|
|
|
from math import log2
|
2018-10-29 10:29:51 +00:00
|
|
|
|
|
|
|
from msd_translate import MSD_TRANSLATE
|
2019-06-03 07:47:36 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
from tqdm import tqdm
|
|
|
|
except ImportError:
|
|
|
|
tqdm = lambda x: x
|
2018-10-29 10:29:51 +00:00
|
|
|
|
|
|
|
|
2019-06-10 12:05:40 +00:00
|
|
|
MAX_NUM_COMPONENTS = -1
|
2019-01-25 10:58:40 +00:00
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
CODES = {
|
|
|
|
"Noun": "N",
|
|
|
|
"Verb": "V",
|
|
|
|
"Adjective": "A",
|
|
|
|
"Adverb": "R",
|
|
|
|
"Pronoun": "P",
|
|
|
|
"Numeral": "M",
|
|
|
|
"Preposition": "S",
|
|
|
|
"Conjunction": "C",
|
|
|
|
"Particle": "Q",
|
|
|
|
"Interjection": "I",
|
|
|
|
"Abbreviation": "Y",
|
|
|
|
"Residual": "X",
|
|
|
|
|
|
|
|
'common': 'c',
|
|
|
|
'proper': 'p',
|
|
|
|
'masculine': 'm',
|
|
|
|
'feminine': 'f',
|
|
|
|
'neuter': 'n',
|
|
|
|
"singular": "s",
|
|
|
|
"dual": "d",
|
|
|
|
"plural": "p",
|
|
|
|
"nominative": "n",
|
|
|
|
"genitive": "g",
|
|
|
|
"dative": "d",
|
|
|
|
"accusative": "a",
|
|
|
|
"locative": "l",
|
|
|
|
"instrumental": "i",
|
|
|
|
"no": "n",
|
|
|
|
"yes": "y",
|
|
|
|
"main": "m",
|
|
|
|
"auxiliary": "a",
|
|
|
|
"perfective": "e",
|
|
|
|
"progressive": "p",
|
|
|
|
"biaspectual": "b",
|
|
|
|
"infinitive": "n",
|
|
|
|
"supine": "u",
|
|
|
|
"participle": "p",
|
|
|
|
"present": "r",
|
|
|
|
"future": "f",
|
|
|
|
"conditional": "c",
|
|
|
|
"imperative": "m",
|
|
|
|
"first": "1",
|
|
|
|
"second": "2",
|
|
|
|
"third": "3",
|
|
|
|
"general": "g",
|
|
|
|
"possessive": "s",
|
|
|
|
"positive": "p",
|
|
|
|
"comparative": "c",
|
|
|
|
"superlative": "s",
|
|
|
|
"personal": "p",
|
|
|
|
"demonstrative": "d",
|
|
|
|
"relative": "r",
|
|
|
|
"reflexive": "x",
|
|
|
|
"interrogative": "q",
|
|
|
|
"indefinite": "i",
|
|
|
|
"negative": "z",
|
|
|
|
"bound": "b",
|
|
|
|
"digit": "d",
|
|
|
|
"roman": "r",
|
|
|
|
"letter": "l",
|
|
|
|
"cardinal": "c",
|
|
|
|
"ordinal": "o",
|
|
|
|
"pronominal": "p",
|
|
|
|
"special": "s",
|
|
|
|
"coordinating": "c",
|
|
|
|
"subordinating": "s",
|
|
|
|
"foreign": "f",
|
|
|
|
"typo": "t",
|
|
|
|
"program": "p",
|
|
|
|
}
|
|
|
|
|
|
|
|
TAGSET = {
|
|
|
|
"N": ['type', 'gender', 'number', 'case', 'animate'],
|
|
|
|
"V": ['type', 'aspect', 'vform', 'person', 'number', 'gender', 'negative'],
|
|
|
|
"A": ['type', 'degree', 'gender', 'number', 'case', 'definiteness'],
|
|
|
|
"R": ['type', 'degree'],
|
|
|
|
"P": ['type', 'person', 'gender', 'number', 'case', 'owner_number', 'owned_gender', 'clitic'],
|
|
|
|
"M": ['form', 'type', 'gender', 'number', 'case', 'definiteness'],
|
|
|
|
"S": ['case'],
|
|
|
|
"C": ['type'],
|
|
|
|
"Q": [],
|
|
|
|
"I": [],
|
|
|
|
"Y": [],
|
|
|
|
"X": ['type']
|
|
|
|
}
|
|
|
|
|
|
|
|
CATEGORY_BASES = {
|
2019-01-19 21:42:51 +00:00
|
|
|
"N": ['.'] * 5,
|
|
|
|
"V": ['.'] * 7,
|
|
|
|
"A": ['.'] * 6,
|
|
|
|
"R": ['.'] * 2,
|
|
|
|
"P": ['.'] * 6,
|
|
|
|
"M": ['.'] * 6,
|
|
|
|
"S": ['.'] * 1,
|
|
|
|
"C": ['.'] * 1,
|
2018-10-29 10:29:51 +00:00
|
|
|
"Q": [],
|
|
|
|
"I": [],
|
|
|
|
"Y": [],
|
2019-01-19 21:42:51 +00:00
|
|
|
"X": ['.'] * 1
|
2018-10-29 10:29:51 +00:00
|
|
|
}
|
|
|
|
|
2019-06-08 09:27:51 +00:00
|
|
|
class ComponentType(Enum):
|
|
|
|
Other = 0
|
|
|
|
Core = 2
|
|
|
|
Core2w = 3
|
2018-10-29 10:29:51 +00:00
|
|
|
|
|
|
|
class RestrictionType(Enum):
|
|
|
|
Morphology = 0
|
|
|
|
Lexis = 1
|
2019-01-19 21:42:51 +00:00
|
|
|
MatchAll = 2
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-02-04 10:01:30 +00:00
|
|
|
class Order(Enum):
|
|
|
|
FromTo = 0
|
|
|
|
ToFrom = 1
|
|
|
|
Any = 2
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def new(order):
|
2019-06-08 13:43:53 +00:00
|
|
|
if order is None:
|
2019-02-04 10:01:30 +00:00
|
|
|
return Order.Any
|
2019-06-08 13:43:53 +00:00
|
|
|
elif order == "to-from":
|
|
|
|
return Order.ToFrom
|
|
|
|
elif order == "from-to":
|
|
|
|
return Order.FromTo
|
|
|
|
else:
|
|
|
|
raise NotImplementedError("What kind of ordering is: {}".format(order))
|
2019-02-04 10:01:30 +00:00
|
|
|
|
2019-02-12 16:38:32 +00:00
|
|
|
|
2019-02-04 10:01:30 +00:00
|
|
|
def match(self, from_w, to_w):
|
|
|
|
if self is Order.Any:
|
|
|
|
return True
|
|
|
|
|
2019-02-12 16:38:32 +00:00
|
|
|
fi = from_w.int_id
|
|
|
|
ti = to_w.int_id
|
2019-02-04 10:01:30 +00:00
|
|
|
|
|
|
|
if self is Order.FromTo:
|
|
|
|
return fi < ti
|
|
|
|
elif self is Order.ToFrom:
|
|
|
|
return ti < fi
|
|
|
|
else:
|
|
|
|
raise NotImplementedError("Should not be here: Order match")
|
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
|
|
|
|
class ComponentRepresentation:
|
|
|
|
def __init__(self, data, word_renderer):
|
|
|
|
self.data = data
|
|
|
|
self.word_renderer = word_renderer
|
|
|
|
|
|
|
|
self.words = []
|
|
|
|
self.rendition_text = None
|
2019-06-01 08:36:28 +00:00
|
|
|
self.agreement = []
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
def get_agreement(self):
|
2019-06-01 08:36:28 +00:00
|
|
|
return []
|
2019-05-30 09:34:31 +00:00
|
|
|
|
|
|
|
def add_word(self, word):
|
|
|
|
self.words.append(word)
|
|
|
|
|
|
|
|
def render(self):
|
|
|
|
if self.rendition_text is None:
|
|
|
|
self.rendition_text = self._render()
|
|
|
|
|
|
|
|
def _render(self):
|
|
|
|
raise NotImplementedError("Not implemented for class: {}".format(type(self)))
|
|
|
|
|
|
|
|
class LemmaCR(ComponentRepresentation):
|
|
|
|
def _render(self):
|
|
|
|
return self.words[0].lemma if len(self.words) > 0 else None
|
|
|
|
|
|
|
|
class LexisCR(ComponentRepresentation):
|
|
|
|
def _render(self):
|
2019-06-02 10:53:16 +00:00
|
|
|
return self.data['lexis']
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
class WordFormAllCR(ComponentRepresentation):
|
|
|
|
def _render(self):
|
2019-06-01 08:33:02 +00:00
|
|
|
if len(self.words) == 0:
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
forms = [w.text.lower() for w in self.words]
|
|
|
|
return "/".join(set(forms))
|
2019-05-30 09:34:31 +00:00
|
|
|
|
|
|
|
class WordFormAnyCR(ComponentRepresentation):
|
|
|
|
def _render(self):
|
|
|
|
text_forms = {}
|
|
|
|
msd_lemma_txt_triplets = Counter([(w.msd, w.lemma, w.text) for w in self.words])
|
|
|
|
for (msd, lemma, text), _n in reversed(msd_lemma_txt_triplets.most_common()):
|
|
|
|
text_forms[(msd, lemma)] = text
|
|
|
|
|
|
|
|
words_counter = []
|
|
|
|
for word in self.words:
|
|
|
|
words_counter.append((word.msd, word.lemma))
|
2019-06-01 08:35:51 +00:00
|
|
|
sorted_words = sorted(set(words_counter), key=lambda x: -words_counter.count(x))
|
2019-05-30 09:34:31 +00:00
|
|
|
|
|
|
|
for word_msd, word_lemma in sorted_words:
|
2019-06-01 08:36:28 +00:00
|
|
|
for agr in self.agreement:
|
|
|
|
if not agr.match(word_msd):
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
for agr in self.agreement:
|
|
|
|
agr.confirm_match()
|
|
|
|
|
|
|
|
if word_lemma is None:
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
return text_forms[(word_msd, word_lemma)]
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
class WordFormMsdCR(WordFormAnyCR):
|
|
|
|
def __init__(self, *args):
|
|
|
|
super().__init__(*args)
|
2019-06-02 10:53:16 +00:00
|
|
|
self.lemma = None
|
|
|
|
self.msd = None
|
2019-05-30 09:34:31 +00:00
|
|
|
|
2019-06-03 13:09:22 +00:00
|
|
|
def check_msd(self, word_msd):
|
2019-06-02 10:53:16 +00:00
|
|
|
if 'msd' not in self.data:
|
|
|
|
return True
|
|
|
|
selectors = self.data['msd']
|
2019-06-02 11:51:32 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
for key, value in selectors.items():
|
2019-06-03 13:09:22 +00:00
|
|
|
t = word_msd[0]
|
2019-05-30 09:34:31 +00:00
|
|
|
v = TAGSET[t].index(key.lower())
|
2019-06-03 13:09:22 +00:00
|
|
|
f1 = word_msd[v + 1]
|
2019-05-30 09:34:31 +00:00
|
|
|
f2 = CODES[value]
|
|
|
|
|
|
|
|
if '-' not in [f1, f2] and f1 != f2:
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
def add_word(self, word):
|
2019-06-02 10:53:16 +00:00
|
|
|
if self.lemma is None:
|
|
|
|
self.lemma = word.lemma
|
|
|
|
self.msd = word.msd
|
2019-05-30 09:34:31 +00:00
|
|
|
|
2019-06-03 13:09:22 +00:00
|
|
|
if self.check_msd(word.msd):
|
2019-05-30 09:34:31 +00:00
|
|
|
super().add_word(word)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
def _render(self):
|
2019-06-02 10:53:16 +00:00
|
|
|
msd = self.word_renderer.get_lemma_msd(self.lemma, self.msd)
|
2019-06-11 08:26:10 +00:00
|
|
|
self.words.append(WordMsdOnly(msd))
|
2019-06-02 11:51:32 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
return super()._render()
|
|
|
|
|
2019-06-02 10:53:16 +00:00
|
|
|
class WordFormAgreementCR(WordFormMsdCR):
|
2019-05-30 09:34:31 +00:00
|
|
|
def __init__(self, data, word_renderer):
|
|
|
|
super().__init__(data, word_renderer)
|
2019-06-01 08:36:28 +00:00
|
|
|
self.rendition_candidate = None
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
def get_agreement(self):
|
2019-06-02 10:53:16 +00:00
|
|
|
return self.data['other']
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
def match(self, word_msd):
|
|
|
|
existing = [(w.msd, w.text) for w in self.words]
|
|
|
|
|
2019-06-08 09:54:47 +00:00
|
|
|
lemma_available_words = self.word_renderer.available_words(self.lemma, existing)
|
|
|
|
for candidate_msd, candidate_text in lemma_available_words:
|
2019-06-02 10:53:16 +00:00
|
|
|
if self.msd[0] != candidate_msd[0]:
|
2019-05-30 09:34:31 +00:00
|
|
|
continue
|
|
|
|
|
2019-06-02 10:53:16 +00:00
|
|
|
if WordFormAgreementCR.check_agreement(word_msd, candidate_msd, self.data['agreement']):
|
2019-06-03 13:09:22 +00:00
|
|
|
if self.check_msd(candidate_msd):
|
|
|
|
self.rendition_candidate = candidate_text
|
|
|
|
return True
|
2019-05-30 09:34:31 +00:00
|
|
|
|
|
|
|
return False
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-01 08:36:28 +00:00
|
|
|
def confirm_match(self):
|
|
|
|
self.rendition_text = self.rendition_candidate
|
2019-05-30 09:34:31 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def check_agreement(msd1, msd2, agreements):
|
|
|
|
for agr_case in agreements:
|
|
|
|
t1 = msd1[0]
|
|
|
|
# if not in msd, some strange msd was tries, skipping...
|
|
|
|
if agr_case not in TAGSET[t1]:
|
2019-06-08 09:54:47 +00:00
|
|
|
logging.warning("Cannot do agreement: {} for msd {} not found!"
|
|
|
|
.format(agr_case, msd1))
|
2019-05-30 09:34:31 +00:00
|
|
|
return False
|
|
|
|
|
|
|
|
v1 = TAGSET[t1].index(agr_case)
|
|
|
|
# if none specified: nedolocnik, always agrees
|
2019-06-08 09:42:57 +00:00
|
|
|
if v1 + 1 >= len(msd1):
|
|
|
|
continue
|
2019-05-30 09:34:31 +00:00
|
|
|
# first is uppercase, not in TAGSET
|
|
|
|
m1 = msd1[v1 + 1]
|
|
|
|
|
|
|
|
# REPEAT (not DRY!)
|
|
|
|
t2 = msd2[0]
|
|
|
|
if agr_case not in TAGSET[t2]:
|
2019-06-08 09:54:47 +00:00
|
|
|
logging.warning("Cannot do agreement: {} for msd {} not found!"
|
|
|
|
.format(agr_case, msd2))
|
2019-05-30 09:34:31 +00:00
|
|
|
return False
|
|
|
|
v2 = TAGSET[t2].index(agr_case)
|
2019-06-08 09:42:57 +00:00
|
|
|
if v2 + 1 >= len(msd2):
|
|
|
|
continue
|
2019-05-30 09:34:31 +00:00
|
|
|
m2 = msd2[v2 + 1]
|
|
|
|
|
|
|
|
# match!
|
|
|
|
if '-' not in [m1, m2] and m1 != m2:
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
def render(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
class ComponentRendition:
|
2019-05-13 07:52:29 +00:00
|
|
|
def __init__(self):
|
2019-06-02 10:53:16 +00:00
|
|
|
self.more = {}
|
2019-05-30 09:34:31 +00:00
|
|
|
self.representation_factory = ComponentRepresentation
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-13 07:52:29 +00:00
|
|
|
def add_feature(self, feature):
|
|
|
|
if 'rendition' in feature:
|
|
|
|
if feature['rendition'] == "lemma":
|
2019-05-30 09:34:31 +00:00
|
|
|
self.representation_factory = LemmaCR
|
2019-05-13 07:52:29 +00:00
|
|
|
elif feature['rendition'] == "word_form":
|
2019-05-30 09:34:31 +00:00
|
|
|
# just by default, changes with selection
|
|
|
|
self.representation_factory = WordFormAnyCR
|
2019-05-13 07:52:29 +00:00
|
|
|
elif feature['rendition'] == "lexis":
|
2019-05-30 09:34:31 +00:00
|
|
|
self.representation_factory = LexisCR
|
2019-06-02 10:53:16 +00:00
|
|
|
self.more['lexis'] = feature['string']
|
2019-05-13 07:52:29 +00:00
|
|
|
else:
|
|
|
|
raise NotImplementedError("Representation rendition: {}".format(feature))
|
|
|
|
|
|
|
|
elif 'selection' in feature:
|
|
|
|
if feature['selection'] == "msd":
|
2019-06-02 10:53:16 +00:00
|
|
|
# could already be agreement
|
|
|
|
if self.representation_factory != WordFormAgreementCR:
|
|
|
|
self.representation_factory = WordFormMsdCR
|
|
|
|
self.more['msd'] = {k: v for k, v in feature.items() if k != 'selection'}
|
2019-05-13 07:52:29 +00:00
|
|
|
elif feature['selection'] == "all":
|
2019-05-30 09:34:31 +00:00
|
|
|
self.representation_factory = WordFormAllCR
|
2019-05-20 16:14:11 +00:00
|
|
|
elif feature['selection'] == 'agreement':
|
2019-06-08 13:43:53 +00:00
|
|
|
assert feature['head'][:4] == 'cid_'
|
|
|
|
assert feature['msd'] is not None
|
2019-05-30 09:34:31 +00:00
|
|
|
self.representation_factory = WordFormAgreementCR
|
2019-06-02 10:53:16 +00:00
|
|
|
self.more['agreement'] = feature['msd'].split('+')
|
|
|
|
self.more['other'] = feature['head'][4:]
|
2019-05-13 07:52:29 +00:00
|
|
|
else:
|
|
|
|
raise NotImplementedError("Representation selection: {}".format(feature))
|
2019-01-19 21:42:51 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
def cr_instance(self, word_renderer):
|
|
|
|
return self.representation_factory(self.more, word_renderer)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-13 08:48:21 +00:00
|
|
|
@staticmethod
|
2019-06-09 21:00:19 +00:00
|
|
|
def set_representations(match, word_renderer):
|
2019-05-30 09:34:31 +00:00
|
|
|
representations = {}
|
2019-06-09 21:00:19 +00:00
|
|
|
for c in match.structure.components:
|
2019-05-30 09:34:31 +00:00
|
|
|
representations[c.idx] = []
|
|
|
|
for rep in c.representation:
|
|
|
|
representations[c.idx].append(rep.cr_instance(word_renderer))
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
for cid, reps in representations.items():
|
|
|
|
for rep in reps:
|
2019-06-01 08:36:28 +00:00
|
|
|
for agr in rep.get_agreement():
|
|
|
|
if len(representations[agr]) != 1:
|
|
|
|
n = len(representations[agr])
|
|
|
|
raise NotImplementedError(
|
2019-06-09 21:00:19 +00:00
|
|
|
"Structure {}: ".format(match.structure.id) +
|
2019-06-01 08:36:28 +00:00
|
|
|
"component {} has agreement".format(cid) +
|
|
|
|
" with component {}".format(agr) +
|
|
|
|
", however there are {} (!= 1) representations".format(n) +
|
|
|
|
" of component {}!".format(agr))
|
|
|
|
|
|
|
|
representations[agr][0].agreement.append(rep)
|
2019-05-30 09:34:31 +00:00
|
|
|
|
2019-06-09 21:00:19 +00:00
|
|
|
for words in match.matches:
|
2019-05-22 09:22:07 +00:00
|
|
|
# first pass, check everything but agreements
|
2019-05-15 23:53:38 +00:00
|
|
|
for w_id, w in words.items():
|
2019-06-09 21:00:19 +00:00
|
|
|
component = match.structure.get_component(w_id)
|
2019-05-30 09:34:31 +00:00
|
|
|
component_representations = representations[component.idx]
|
|
|
|
for representation in component_representations:
|
|
|
|
representation.add_word(w)
|
2019-06-01 08:36:28 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
for cid, reps in representations.items():
|
|
|
|
for rep in reps:
|
|
|
|
rep.render()
|
2019-06-09 22:25:36 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
for cid, reps in representations.items():
|
2019-06-01 08:35:23 +00:00
|
|
|
reps = [rep.rendition_text for rep in reps]
|
2019-06-09 08:13:46 +00:00
|
|
|
if reps == []:
|
2019-06-01 08:35:23 +00:00
|
|
|
pass
|
|
|
|
elif all(r is None for r in reps):
|
2019-06-09 21:00:19 +00:00
|
|
|
match.representations[cid] = None
|
2019-06-01 08:35:23 +00:00
|
|
|
else:
|
2019-06-09 21:00:19 +00:00
|
|
|
match.representations[cid] = " ".join(("" if r is None else r) for r in reps)
|
2019-01-19 21:42:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ComponentStatus(Enum):
|
|
|
|
Optional = 0
|
|
|
|
Required = 1
|
|
|
|
Forbidden = 2
|
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
|
|
|
|
def get_level(restriction):
|
|
|
|
for feature in restriction:
|
|
|
|
if "level" in feature.keys():
|
|
|
|
lvl = feature.get("level")
|
2019-01-19 21:42:51 +00:00
|
|
|
else:
|
|
|
|
continue
|
2018-10-29 10:29:51 +00:00
|
|
|
|
|
|
|
raise RuntimeError("Unreachable!")
|
|
|
|
|
|
|
|
|
2019-06-08 09:31:52 +00:00
|
|
|
def determine_ppb(rgx):
|
|
|
|
if rgx[0] in ("A", "N", "R"):
|
|
|
|
return 0
|
|
|
|
elif rgx[0] == "V":
|
|
|
|
if 'a' in rgx[1]:
|
|
|
|
return 3
|
|
|
|
elif 'm' in rgx[1]:
|
|
|
|
return 1
|
|
|
|
else:
|
|
|
|
return 2
|
|
|
|
else:
|
|
|
|
return 4
|
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
class MorphologyRegex:
|
|
|
|
def __init__(self, restriction):
|
|
|
|
self.min_msd_length = 1
|
2019-01-19 21:42:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
restr_dict = {}
|
|
|
|
for feature in restriction:
|
|
|
|
feature_dict = dict(feature.items())
|
2019-01-19 21:42:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
match_type = True
|
|
|
|
if "filter" in feature_dict:
|
|
|
|
assert feature_dict['filter'] == "negative"
|
|
|
|
match_type = False
|
|
|
|
del feature_dict['filter']
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
assert len(feature_dict) == 1
|
|
|
|
key, value = next(iter(feature_dict.items()))
|
|
|
|
restr_dict[key] = (value, match_type)
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
assert 'POS' in restr_dict
|
|
|
|
category = restr_dict['POS'][0].capitalize()
|
|
|
|
cat_code = CODES[category]
|
|
|
|
rgx = [cat_code] + CATEGORY_BASES[cat_code]
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
del restr_dict['POS']
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
for attribute, (value, typ) in restr_dict.items():
|
|
|
|
index = TAGSET[cat_code].index(attribute.lower())
|
|
|
|
assert index >= 0
|
|
|
|
|
|
|
|
if '|' in value:
|
|
|
|
match = "".join(CODES[val] for val in value.split('|'))
|
|
|
|
else:
|
|
|
|
match = CODES[value]
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
match = "[{}{}]".format("" if typ else "^", match)
|
|
|
|
rgx[index + 1] = match
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
if typ:
|
|
|
|
self.min_msd_length = max(index + 1, self.min_msd_length)
|
2019-01-25 10:58:40 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
self.re_objects = [re.compile(r) for r in rgx]
|
|
|
|
self.rgx = rgx
|
|
|
|
|
|
|
|
def __call__(self, text):
|
|
|
|
if len(text) <= self.min_msd_length:
|
2019-01-25 10:58:40 +00:00
|
|
|
return False
|
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
for c, r in zip(text, self.re_objects):
|
2019-06-02 10:50:04 +00:00
|
|
|
if not r.match(c):
|
2019-01-19 21:42:51 +00:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
class LexisRegex:
|
|
|
|
def __init__(self, restriction):
|
|
|
|
restr_dict = {}
|
|
|
|
for feature in restriction:
|
|
|
|
restr_dict.update(feature.items())
|
2019-01-19 21:42:51 +00:00
|
|
|
|
2019-06-11 08:02:24 +00:00
|
|
|
assert "lemma" in restr_dict
|
|
|
|
self.match_list = restr_dict['lemma'].split('|')
|
|
|
|
|
|
|
|
def __call__(self, text):
|
|
|
|
return text in self.match_list
|
2018-10-29 10:29:51 +00:00
|
|
|
|
|
|
|
|
2018-10-29 11:16:42 +00:00
|
|
|
class Restriction:
|
|
|
|
def __init__(self, restriction_tag):
|
2019-06-08 09:31:52 +00:00
|
|
|
self.ppb = 4 # polnopomenska beseda (0-4)
|
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
if restriction_tag is None:
|
|
|
|
self.type = RestrictionType.MatchAll
|
|
|
|
self.matcher = None
|
|
|
|
self.present = None
|
|
|
|
return
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2018-10-29 11:16:42 +00:00
|
|
|
restriction_type = restriction_tag.get('type')
|
|
|
|
if restriction_type == "morphology":
|
|
|
|
self.type = RestrictionType.Morphology
|
2019-06-11 08:02:24 +00:00
|
|
|
self.matcher = MorphologyRegex(list(restriction_tag))
|
|
|
|
self.ppb = determine_ppb(self.matcher.rgx)
|
2019-06-08 09:31:52 +00:00
|
|
|
|
2018-10-29 11:16:42 +00:00
|
|
|
elif restriction_type == "lexis":
|
|
|
|
self.type = RestrictionType.Lexis
|
2019-06-11 08:02:24 +00:00
|
|
|
self.matcher = LexisRegex(list(restriction_tag))
|
2018-10-29 11:16:42 +00:00
|
|
|
else:
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def match(self, word):
|
|
|
|
if self.type == RestrictionType.Morphology:
|
|
|
|
match_to = word.msd
|
|
|
|
elif self.type == RestrictionType.Lexis:
|
|
|
|
match_to = word.lemma
|
2019-01-19 21:42:51 +00:00
|
|
|
elif self.type == RestrictionType.MatchAll:
|
|
|
|
return True
|
2018-10-29 11:16:42 +00:00
|
|
|
else:
|
|
|
|
raise RuntimeError("Unreachable!")
|
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
return self.matcher(match_to)
|
2018-10-29 11:16:42 +00:00
|
|
|
|
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
class Component:
|
2019-01-19 21:42:51 +00:00
|
|
|
def __init__(self, info):
|
|
|
|
idx = info['cid']
|
|
|
|
name = info['name'] if 'name' in info else None
|
2019-06-08 09:27:51 +00:00
|
|
|
typ = ComponentType.Core if info['type'] == "core" else ComponentType.Other
|
2019-01-19 21:42:51 +00:00
|
|
|
|
|
|
|
if 'status' not in info:
|
|
|
|
status = ComponentStatus.Required
|
|
|
|
elif info['status'] == 'forbidden':
|
|
|
|
status = ComponentStatus.Forbidden
|
|
|
|
elif info['status'] == 'obligatory':
|
|
|
|
status = ComponentStatus.Required
|
|
|
|
elif info['status'] == 'optional':
|
|
|
|
status = ComponentStatus.Optional
|
|
|
|
else:
|
|
|
|
raise NotImplementedError("strange status: {}".format(info['status']))
|
2018-10-30 12:33:08 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
self.status = status
|
|
|
|
self.name = name
|
2018-10-30 12:33:08 +00:00
|
|
|
self.idx = idx
|
2019-06-08 09:23:50 +00:00
|
|
|
self.restrictions = []
|
2018-10-30 12:33:08 +00:00
|
|
|
self.next_element = []
|
2019-05-30 09:34:31 +00:00
|
|
|
self.representation = []
|
2019-01-19 21:42:51 +00:00
|
|
|
self.selection = {}
|
2019-06-08 09:27:51 +00:00
|
|
|
self.type = typ
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2018-10-30 12:33:08 +00:00
|
|
|
self.iter_ctr = 0
|
|
|
|
|
2019-02-04 10:01:30 +00:00
|
|
|
def add_next(self, next_component, link_label, order):
|
|
|
|
self.next_element.append((next_component, link_label, Order.new(order)))
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2018-10-29 11:16:42 +00:00
|
|
|
def set_restriction(self, restrictions_tag):
|
2019-01-19 21:42:51 +00:00
|
|
|
if restrictions_tag is None:
|
2019-06-08 09:23:50 +00:00
|
|
|
self.restrictions = [Restriction(None)]
|
2019-01-19 21:42:51 +00:00
|
|
|
|
|
|
|
elif restrictions_tag.tag == "restriction":
|
2019-06-08 09:23:50 +00:00
|
|
|
self.restrictions = [Restriction(restrictions_tag)]
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2018-10-29 11:16:42 +00:00
|
|
|
elif restrictions_tag.tag == "restriction_or":
|
2019-06-08 09:23:50 +00:00
|
|
|
self.restrictions = [Restriction(el) for el in restrictions_tag]
|
2018-10-29 11:16:42 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
raise RuntimeError("Unreachable")
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
def set_representation(self, representation):
|
2019-05-30 09:34:31 +00:00
|
|
|
for rep in representation:
|
|
|
|
crend = ComponentRendition()
|
|
|
|
for feature in rep:
|
|
|
|
crend.add_feature(feature.attrib)
|
|
|
|
self.representation.append(crend)
|
2019-01-19 21:42:51 +00:00
|
|
|
|
|
|
|
def find_next(self, deps, comps, restrs, reprs):
|
|
|
|
to_ret = []
|
2018-10-30 12:33:08 +00:00
|
|
|
for d in deps:
|
|
|
|
if d[0] == self.idx:
|
2019-02-04 10:01:30 +00:00
|
|
|
_, idx, dep_label, order = d
|
2018-10-30 12:33:08 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
next_component = Component(comps[idx])
|
2018-10-30 12:33:08 +00:00
|
|
|
next_component.set_restriction(restrs[idx])
|
2019-05-13 07:52:29 +00:00
|
|
|
next_component.set_representation(reprs[idx])
|
2019-01-19 21:42:51 +00:00
|
|
|
to_ret.append(next_component)
|
2018-10-30 12:33:08 +00:00
|
|
|
|
2019-02-04 10:01:30 +00:00
|
|
|
self.add_next(next_component, dep_label, order)
|
2019-05-13 07:52:29 +00:00
|
|
|
others = next_component.find_next(deps, comps, restrs, reprs)
|
2019-01-19 21:42:51 +00:00
|
|
|
to_ret.extend(others)
|
|
|
|
|
2019-05-13 07:52:29 +00:00
|
|
|
return to_ret
|
2019-01-19 21:42:51 +00:00
|
|
|
|
|
|
|
def name_str(self):
|
|
|
|
return "_" if self.name is None else self.name
|
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
def match(self, word):
|
2019-01-19 21:42:51 +00:00
|
|
|
m1 = self._match_self(word)
|
|
|
|
if m1 is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
mn = self._match_next(word)
|
|
|
|
if mn is None:
|
|
|
|
return None
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
to_ret = [m1]
|
|
|
|
for cmatch in mn:
|
|
|
|
# if good match but nothing to add, just continue
|
|
|
|
if len(cmatch) == 0:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# if more than one match found for particular component
|
|
|
|
elif len(cmatch) > 1:
|
|
|
|
# if more than one match in multiple components, NOPE!
|
|
|
|
if len(to_ret) > 1:
|
|
|
|
logging.warning("Strange multiple match: {}".format(
|
|
|
|
str([w.id for w in cmatch[0].values()])))
|
|
|
|
|
|
|
|
for tr in to_ret:
|
|
|
|
tr.update(cmatch[0])
|
|
|
|
continue
|
|
|
|
|
|
|
|
# yeah, so we have found more than one match, =>
|
|
|
|
# more than one element in to_ret
|
|
|
|
to_ret = [{**dict(to_ret[0]), **m} for m in cmatch]
|
|
|
|
|
|
|
|
else:
|
|
|
|
for tr in to_ret:
|
|
|
|
tr.update(cmatch[0])
|
|
|
|
|
|
|
|
return to_ret
|
|
|
|
|
|
|
|
def _match_self(self, word):
|
2018-10-29 11:16:42 +00:00
|
|
|
# matching
|
2019-06-08 09:23:50 +00:00
|
|
|
for restr in self.restrictions:
|
|
|
|
if restr.match(word): # match either
|
|
|
|
return {self.idx: word}
|
2019-01-19 21:42:51 +00:00
|
|
|
|
|
|
|
def _match_next(self, word):
|
|
|
|
# matches for every component in links from this component
|
|
|
|
to_ret = []
|
|
|
|
|
|
|
|
# need to get all links that match
|
2019-02-04 10:01:30 +00:00
|
|
|
for next, link, order in self.next_element:
|
2019-06-08 09:42:57 +00:00
|
|
|
next_links = word.get_links(link)
|
2019-01-19 21:42:51 +00:00
|
|
|
to_ret.append([])
|
|
|
|
|
|
|
|
# good flag
|
|
|
|
good = next.status != ComponentStatus.Required
|
2019-01-25 10:58:40 +00:00
|
|
|
for next_word in next_links:
|
2019-02-04 10:01:30 +00:00
|
|
|
if not order.match(word, next_word):
|
|
|
|
continue
|
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
match = next.match(next_word)
|
|
|
|
|
|
|
|
if match is not None:
|
|
|
|
# special treatement for forbidden
|
|
|
|
if next.status == ComponentStatus.Forbidden:
|
|
|
|
good = False
|
2018-10-30 12:33:08 +00:00
|
|
|
break
|
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
else:
|
2019-06-08 13:43:53 +00:00
|
|
|
assert type(match) is list
|
2019-01-19 21:42:51 +00:00
|
|
|
to_ret[-1].extend(match)
|
|
|
|
good = True
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
# if none matched, nothing found!
|
|
|
|
if not good:
|
|
|
|
return None
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
return to_ret
|
2018-10-29 10:29:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
class SyntacticStructure:
|
|
|
|
def __init__(self):
|
|
|
|
self.id = None
|
|
|
|
self.lbs = None
|
2019-01-19 21:42:51 +00:00
|
|
|
self.components = []
|
2018-10-29 10:29:51 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def from_xml(xml):
|
|
|
|
st = SyntacticStructure()
|
2018-10-29 11:16:42 +00:00
|
|
|
st.id = xml.get('id')
|
2018-10-29 10:29:51 +00:00
|
|
|
st.lbs = xml.get('LBS')
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-08 13:43:53 +00:00
|
|
|
assert len(list(xml)) == 1
|
2019-01-19 21:42:51 +00:00
|
|
|
system = next(iter(xml))
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-08 13:43:53 +00:00
|
|
|
assert system.get('type') == 'JOS'
|
2019-01-19 21:42:51 +00:00
|
|
|
components, dependencies, definitions = list(system)
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-08 09:54:47 +00:00
|
|
|
deps = [(dep.get('from'), dep.get('to'), dep.get('label'), dep.get('order'))
|
2019-06-09 08:13:46 +00:00
|
|
|
for dep in dependencies]
|
|
|
|
comps = {comp.get('cid'): dict(comp.items()) for comp in components}
|
2019-01-19 21:42:51 +00:00
|
|
|
|
|
|
|
restrs, forms = {}, {}
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
for comp in definitions:
|
|
|
|
n = comp.get('cid')
|
|
|
|
restrs[n] = None
|
2019-05-12 20:13:22 +00:00
|
|
|
forms[n] = []
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
for el in comp:
|
|
|
|
if el.tag.startswith("restriction"):
|
2019-06-08 13:43:53 +00:00
|
|
|
assert restrs[n] is None
|
2019-01-19 21:42:51 +00:00
|
|
|
restrs[n] = el
|
|
|
|
elif el.tag.startswith("representation"):
|
|
|
|
st.add_representation(n, el, forms)
|
|
|
|
else:
|
2019-06-08 09:54:47 +00:00
|
|
|
raise NotImplementedError("Unknown definition: {} in structure {}"
|
|
|
|
.format(el.tag, st.id))
|
2019-01-19 21:42:51 +00:00
|
|
|
|
|
|
|
fake_root_component = Component({'cid': '#', 'type': 'other'})
|
2019-05-13 07:52:29 +00:00
|
|
|
st.components = fake_root_component.find_next(deps, comps, restrs, forms)
|
2019-06-08 09:31:52 +00:00
|
|
|
|
|
|
|
st.determine_core2w()
|
2018-10-29 10:29:51 +00:00
|
|
|
return st
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-08 09:31:52 +00:00
|
|
|
def determine_core2w(self):
|
|
|
|
ppb_components = []
|
|
|
|
for c in self.components:
|
|
|
|
if c.type != ComponentType.Core:
|
|
|
|
continue
|
|
|
|
|
|
|
|
ppb = 4
|
|
|
|
for r in c.restrictions:
|
|
|
|
ppb = min(r.ppb, ppb)
|
|
|
|
|
|
|
|
ppb_components.append((c, ppb))
|
|
|
|
|
|
|
|
ppb_components = sorted(ppb_components, key=lambda c: c[1])
|
|
|
|
if len(ppb_components) > 2 and ppb_components[1][1] == ppb_components[2][1]:
|
|
|
|
raise RuntimeError("Cannot determine 2 'jedrna polnopomenska beseda' for", self.id)
|
|
|
|
|
|
|
|
for c, _ in ppb_components[:2]:
|
|
|
|
c.type = ComponentType.Core2w
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-05-12 20:13:22 +00:00
|
|
|
def add_representation(self, n, rep_el, forms):
|
2019-06-08 13:43:53 +00:00
|
|
|
assert rep_el.tag == "representation"
|
2019-05-30 09:34:31 +00:00
|
|
|
to_add = []
|
2019-05-12 20:13:22 +00:00
|
|
|
for el in rep_el:
|
2019-06-08 13:43:53 +00:00
|
|
|
assert el.tag == "feature"
|
2019-05-30 09:34:31 +00:00
|
|
|
if 'rendition' in el.attrib or 'selection' in el.attrib:
|
|
|
|
to_add.append(el)
|
2019-01-19 21:42:51 +00:00
|
|
|
else:
|
2019-05-12 20:13:22 +00:00
|
|
|
logging.warning("Strange representation feature in structure {}. Skipping"
|
2019-06-09 08:13:46 +00:00
|
|
|
.format(self.id))
|
2019-01-28 08:39:57 +00:00
|
|
|
continue
|
2019-05-30 09:34:31 +00:00
|
|
|
forms[n].append(to_add)
|
2019-01-19 21:42:51 +00:00
|
|
|
|
|
|
|
def get_component(self, idx):
|
|
|
|
for c in self.components:
|
|
|
|
if c.idx == idx:
|
|
|
|
return c
|
|
|
|
raise RuntimeError("Unknown component id: {}".format(idx))
|
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
def match(self, word):
|
2019-01-19 21:42:51 +00:00
|
|
|
matches = self.components[0].match(word)
|
2019-05-20 16:14:11 +00:00
|
|
|
return [] if matches is None else matches
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-05-24 16:15:21 +00:00
|
|
|
def load_structures(filename):
|
2018-10-29 10:29:51 +00:00
|
|
|
with open(filename, 'r') as fp:
|
|
|
|
et = ElementTree.XML(fp.read())
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-24 16:15:21 +00:00
|
|
|
return build_structures(et), get_lemma_features(et)
|
|
|
|
|
|
|
|
def build_structures(et):
|
2019-06-10 12:05:40 +00:00
|
|
|
global MAX_NUM_COMPONENTS
|
|
|
|
|
2019-05-24 16:15:21 +00:00
|
|
|
structures = []
|
|
|
|
for structure in et.iter('syntactic_structure'):
|
|
|
|
to_append = SyntacticStructure.from_xml(structure)
|
|
|
|
if to_append is None:
|
|
|
|
continue
|
2019-06-10 12:05:40 +00:00
|
|
|
|
2019-05-24 16:15:21 +00:00
|
|
|
structures.append(to_append)
|
2019-06-10 12:05:40 +00:00
|
|
|
MAX_NUM_COMPONENTS = max(MAX_NUM_COMPONENTS, len(to_append.components))
|
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
return structures
|
|
|
|
|
2019-05-24 16:15:21 +00:00
|
|
|
def get_lemma_features(et):
|
|
|
|
lf = et.find('lemma_features')
|
|
|
|
if lf is None:
|
|
|
|
return {}
|
|
|
|
|
|
|
|
result = {}
|
|
|
|
for pos in lf.iter('POS'):
|
2019-06-11 08:02:24 +00:00
|
|
|
rgx_list = MorphologyRegex(pos).rgx
|
2019-05-24 16:15:21 +00:00
|
|
|
rgx_str = ""
|
|
|
|
for position in rgx_list:
|
|
|
|
if position == ".":
|
|
|
|
rgx_str += " "
|
|
|
|
elif len(position) == 1:
|
|
|
|
rgx_str += position
|
|
|
|
elif len(position) == 3 and position[0] == "[" and position[2] == "]":
|
|
|
|
rgx_str += position[1]
|
|
|
|
else:
|
|
|
|
raise RuntimeError("Strange rgx for lemma_feature...")
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-08 13:43:53 +00:00
|
|
|
assert rgx_str[0].isupper()
|
2019-05-24 16:15:21 +00:00
|
|
|
result[rgx_str[0]] = rgx_str.strip().replace(' ', '-')
|
|
|
|
|
|
|
|
return result
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
def get_msd(comp):
|
|
|
|
d = dict(comp.items())
|
|
|
|
if 'msd' in d:
|
|
|
|
return d['msd']
|
|
|
|
elif 'ana' in d:
|
|
|
|
return d['ana'][4:]
|
|
|
|
else:
|
|
|
|
logging.error(d, file=sys.stderr)
|
|
|
|
raise NotImplementedError("MSD?")
|
|
|
|
|
2019-06-11 08:26:10 +00:00
|
|
|
|
|
|
|
class WordMsdOnly:
|
|
|
|
def __init__(self, msd):
|
|
|
|
self.msd = msd
|
|
|
|
self.lemma = None
|
|
|
|
self.text = None
|
|
|
|
|
|
|
|
def most_frequent_text(self, _):
|
2019-05-29 18:22:22 +00:00
|
|
|
return None
|
2019-06-11 08:26:10 +00:00
|
|
|
|
2019-05-24 16:15:21 +00:00
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
class Word:
|
2019-02-12 10:58:04 +00:00
|
|
|
def __init__(self, xml, do_msd_translate):
|
2018-10-29 10:29:51 +00:00
|
|
|
self.lemma = xml.get('lemma')
|
2019-02-12 10:58:04 +00:00
|
|
|
self.msd = MSD_TRANSLATE[get_msd(xml)] if do_msd_translate else get_msd(xml)
|
2018-10-29 10:29:51 +00:00
|
|
|
self.id = xml.get('id')
|
|
|
|
self.text = xml.text
|
|
|
|
self.links = defaultdict(list)
|
|
|
|
|
2019-02-12 16:38:32 +00:00
|
|
|
last_num = self.id.split('.')[-1]
|
|
|
|
if last_num[0] not in '0123456789':
|
|
|
|
last_num = last_num[1:]
|
|
|
|
self.int_id = int(last_num)
|
|
|
|
|
2019-06-08 13:43:53 +00:00
|
|
|
assert None not in (self.id, self.lemma, self.msd)
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-01-25 10:58:40 +00:00
|
|
|
@staticmethod
|
2019-06-09 08:13:46 +00:00
|
|
|
def pc_word(pc, do_msd_translate):
|
2019-01-25 10:58:40 +00:00
|
|
|
pc.set('lemma', pc.text)
|
2019-02-12 10:59:51 +00:00
|
|
|
pc.set('msd', "N" if do_msd_translate else "U")
|
2019-02-12 10:58:04 +00:00
|
|
|
return Word(pc, do_msd_translate)
|
2019-01-25 10:58:40 +00:00
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
def add_link(self, link, to):
|
|
|
|
self.links[link].append(to)
|
|
|
|
|
|
|
|
def get_links(self, link):
|
2018-10-29 11:43:07 +00:00
|
|
|
if link not in self.links and "|" in link:
|
|
|
|
for l in link.split('|'):
|
|
|
|
self.links[link].extend(self.links[l])
|
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
return self.links[link]
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-22 09:55:51 +00:00
|
|
|
def most_frequent_text(self, word_renderer):
|
|
|
|
return word_renderer.render(self.lemma, self.msd)
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-05-22 09:22:07 +00:00
|
|
|
class WordMsdRenderer:
|
2019-05-30 09:34:31 +00:00
|
|
|
def __init__(self, lemma_features):
|
2019-05-22 09:22:07 +00:00
|
|
|
self.all_words = []
|
|
|
|
self.rendered_words = {}
|
2019-05-24 16:15:21 +00:00
|
|
|
self.frequent_words = {}
|
2019-06-09 22:24:47 +00:00
|
|
|
self.num_words = {}
|
2019-05-29 18:22:22 +00:00
|
|
|
self.lemma_msd = {}
|
2019-05-30 09:34:31 +00:00
|
|
|
self.lemma_features = lemma_features
|
2019-06-02 11:50:53 +00:00
|
|
|
self.memoized_msd_merges = {}
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-22 09:55:51 +00:00
|
|
|
def add_words(self, words):
|
|
|
|
self.all_words.extend(words)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-08 09:25:00 +00:00
|
|
|
def num_all_words(self):
|
|
|
|
return len(self.all_words)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
def generate_renders(self):
|
2019-06-09 22:24:47 +00:00
|
|
|
num_words = defaultdict(int)
|
2019-05-22 09:55:51 +00:00
|
|
|
data = defaultdict(lambda: defaultdict(list))
|
2019-05-22 09:22:07 +00:00
|
|
|
for w in self.all_words:
|
|
|
|
data[w.lemma][w.msd].append(w.text)
|
|
|
|
|
|
|
|
for lemma, ld in data.items():
|
|
|
|
self.rendered_words[lemma] = {}
|
2019-05-24 16:15:21 +00:00
|
|
|
freq_words = defaultdict(int)
|
2019-05-29 18:22:22 +00:00
|
|
|
common_msd = "*" * 10
|
2019-05-24 16:15:21 +00:00
|
|
|
|
2019-05-22 09:22:07 +00:00
|
|
|
for msd, texts in ld.items():
|
2019-06-09 20:25:58 +00:00
|
|
|
# TODO: this should be out of generate_renders...
|
2019-06-09 22:24:47 +00:00
|
|
|
num_words[(lemma, msd[0])] += len(texts)
|
2019-06-08 09:25:00 +00:00
|
|
|
|
2019-05-22 09:22:07 +00:00
|
|
|
rep = max(set(texts), key=texts.count)
|
2019-05-24 16:15:21 +00:00
|
|
|
self.rendered_words[lemma][msd] = (rep, len(texts))
|
|
|
|
|
|
|
|
for txt in texts:
|
|
|
|
freq_words[(msd, txt)] += 1
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-29 18:22:22 +00:00
|
|
|
common_msd = self.merge_msd(common_msd, msd)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-29 18:22:22 +00:00
|
|
|
self.lemma_msd[lemma] = common_msd
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-24 16:15:21 +00:00
|
|
|
self.frequent_words[lemma] = []
|
|
|
|
for (msd, txt), n in sorted(freq_words.items(), key=lambda x: -x[1]):
|
|
|
|
self.frequent_words[lemma].append((msd, txt, n))
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
lf = self.lemma_features
|
2019-06-09 08:13:46 +00:00
|
|
|
for lemma in self.lemma_msd:
|
2019-05-29 18:22:22 +00:00
|
|
|
cmsd = self.lemma_msd[lemma]
|
2019-05-30 09:34:31 +00:00
|
|
|
if cmsd[0] in lf:
|
2019-05-29 18:22:22 +00:00
|
|
|
self.lemma_msd[lemma] = "".join(
|
2019-05-30 09:34:31 +00:00
|
|
|
l1 if l1 != "-" else l2 for l1, l2 in zip(lf[cmsd[0]], cmsd)
|
2019-05-29 18:22:22 +00:00
|
|
|
)
|
2019-06-09 22:24:47 +00:00
|
|
|
|
|
|
|
self.num_words = dict(num_words)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-02 11:50:53 +00:00
|
|
|
def merge_msd(self, common_msd, new_msd):
|
|
|
|
key = (common_msd, new_msd)
|
|
|
|
if key in self.memoized_msd_merges:
|
|
|
|
return self.memoized_msd_merges[key]
|
|
|
|
|
2019-05-29 18:22:22 +00:00
|
|
|
def merge_letter(l1, l2):
|
|
|
|
if l1 == "*":
|
|
|
|
return l2
|
|
|
|
elif l1 != l2:
|
|
|
|
return "-"
|
|
|
|
else:
|
|
|
|
return l1
|
|
|
|
|
2019-06-02 11:50:53 +00:00
|
|
|
value = "".join(merge_letter(l1, l2) for l1, l2 in zip(common_msd, new_msd))
|
|
|
|
self.memoized_msd_merges[key] = value
|
|
|
|
return value
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-22 09:22:07 +00:00
|
|
|
def render(self, lemma, msd):
|
|
|
|
if lemma in self.rendered_words:
|
|
|
|
if msd in self.rendered_words[lemma]:
|
2019-05-24 16:15:21 +00:00
|
|
|
return self.rendered_words[lemma][msd][0]
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-29 18:22:22 +00:00
|
|
|
def available_words(self, lemma, existing_texts):
|
|
|
|
counted_texts = Counter(existing_texts)
|
2019-05-30 09:34:31 +00:00
|
|
|
for (msd, text), _n in counted_texts.most_common():
|
2019-05-29 18:22:22 +00:00
|
|
|
yield (msd, text)
|
|
|
|
|
2019-05-24 16:15:21 +00:00
|
|
|
if lemma in self.frequent_words:
|
|
|
|
for msd, text, _ in self.frequent_words[lemma]:
|
2019-05-29 18:22:22 +00:00
|
|
|
if (msd, text) not in counted_texts:
|
|
|
|
yield (msd, text)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-05-30 09:34:31 +00:00
|
|
|
def get_lemma_msd(self, lemma, word_msd):
|
|
|
|
# should be here, since we collect every lemmas
|
|
|
|
lemma_msd = self.lemma_msd[lemma]
|
|
|
|
|
|
|
|
if lemma_msd[0] == '-':
|
|
|
|
if word_msd[0] in self.lemma_features:
|
|
|
|
return self.lemma_features[word_msd[0]]
|
|
|
|
else:
|
|
|
|
return '-'
|
2019-05-29 18:22:22 +00:00
|
|
|
else:
|
2019-05-30 09:34:31 +00:00
|
|
|
return lemma_msd
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
def is_root_id(id_):
|
|
|
|
return len(id_.split('.')) == 3
|
|
|
|
|
|
|
|
|
2019-02-12 10:41:35 +00:00
|
|
|
def load_files(args):
|
2019-02-09 12:25:26 +00:00
|
|
|
filenames = args.input
|
|
|
|
skip_id_check = args.skip_id_check
|
2019-02-12 10:58:04 +00:00
|
|
|
do_msd_translate = not args.no_msd_translate
|
2019-02-09 12:25:26 +00:00
|
|
|
|
2019-02-12 11:19:21 +00:00
|
|
|
for n, fname in enumerate(filenames):
|
|
|
|
if args.count_files:
|
|
|
|
status = " :: {} / {}".format(n, len(filenames))
|
|
|
|
else:
|
|
|
|
status = ""
|
|
|
|
yield load_tei_file(fname, skip_id_check, do_msd_translate, args.pc_tag, status)
|
2019-02-09 12:25:26 +00:00
|
|
|
|
|
|
|
|
2019-02-12 11:19:21 +00:00
|
|
|
def load_tei_file(filename, skip_id_check, do_msd_translate, pc_tag, status):
|
|
|
|
logging.info("LOADING FILE: {}{}".format(filename, status))
|
2019-02-09 12:25:26 +00:00
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
with open(filename, 'r') as fp:
|
|
|
|
xmlstring = re.sub(' xmlns="[^"]+"', '', fp.read(), count=1)
|
|
|
|
xmlstring = xmlstring.replace(' xml:', ' ')
|
|
|
|
et = ElementTree.XML(xmlstring)
|
|
|
|
|
|
|
|
words = {}
|
|
|
|
for w in et.iter("w"):
|
2019-02-12 10:58:04 +00:00
|
|
|
words[w.get('id')] = Word(w, do_msd_translate)
|
2019-02-12 11:08:30 +00:00
|
|
|
for pc in et.iter(pc_tag):
|
2019-06-09 08:13:46 +00:00
|
|
|
words[pc.get('id')] = Word.pc_word(pc, do_msd_translate)
|
2019-01-19 21:42:51 +00:00
|
|
|
|
2018-10-29 10:29:51 +00:00
|
|
|
for l in et.iter("link"):
|
2019-01-19 21:42:51 +00:00
|
|
|
if 'dep' in l.keys():
|
|
|
|
ana = l.get('afun')
|
|
|
|
lfrom = l.get('from')
|
|
|
|
dest = l.get('dep')
|
|
|
|
else:
|
|
|
|
ana = l.get('ana')
|
|
|
|
if ana[:4] != 'syn:': # dont bother...
|
|
|
|
continue
|
|
|
|
ana = ana[4:]
|
|
|
|
lfrom, dest = l.get('target').replace('#', '').split()
|
2018-10-29 10:29:51 +00:00
|
|
|
|
|
|
|
if lfrom in words:
|
2019-02-06 14:46:02 +00:00
|
|
|
if not skip_id_check and is_root_id(lfrom):
|
2019-05-23 06:13:29 +00:00
|
|
|
logging.error("NOO: {}".format(lfrom))
|
2019-01-19 21:42:51 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
if dest in words:
|
|
|
|
next_word = words[dest]
|
|
|
|
words[lfrom].add_link(ana, next_word)
|
2019-01-25 10:58:40 +00:00
|
|
|
else:
|
|
|
|
logging.error("Unknown id: {}".format(dest))
|
|
|
|
sys.exit(1)
|
2019-01-08 20:17:15 +00:00
|
|
|
|
2019-01-08 18:37:28 +00:00
|
|
|
else:
|
|
|
|
# strange errors, just skip...
|
|
|
|
pass
|
2019-01-08 20:17:15 +00:00
|
|
|
|
2019-02-14 13:33:15 +00:00
|
|
|
return list(words.values())
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-09 11:27:04 +00:00
|
|
|
class Formatter:
|
2019-06-09 20:25:58 +00:00
|
|
|
def __init__(self, colocation_ids, word_renderer):
|
2019-06-09 11:27:04 +00:00
|
|
|
self.colocation_ids = colocation_ids
|
2019-06-09 20:25:58 +00:00
|
|
|
self.word_renderer = word_renderer
|
2019-06-09 11:27:04 +00:00
|
|
|
self.additional_init()
|
|
|
|
|
|
|
|
def header_repeat(self):
|
|
|
|
raise NotImplementedError("Header repeat formatter not implemented")
|
|
|
|
def header_right(self):
|
|
|
|
raise NotImplementedError("Header right formatter not implemented")
|
2019-06-09 20:25:58 +00:00
|
|
|
def content_repeat(self, words, representations, idx, sidx):
|
2019-06-09 11:27:04 +00:00
|
|
|
raise NotImplementedError("Content repeat formatter not implemented")
|
|
|
|
def content_right(self, freq):
|
|
|
|
raise NotImplementedError("Content right formatter not implemented")
|
|
|
|
def group(self):
|
|
|
|
raise NotImplementedError("Group for formatter not implemented")
|
|
|
|
|
|
|
|
def additional_init(self):
|
|
|
|
pass
|
|
|
|
def length(self):
|
|
|
|
return len(self.header_repeat())
|
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
def set_structure(self, structure):
|
|
|
|
pass
|
|
|
|
def new_match(self, match):
|
|
|
|
pass
|
|
|
|
|
2019-06-09 11:27:24 +00:00
|
|
|
|
2019-06-10 08:49:53 +00:00
|
|
|
class OutNoStatFormatter(Formatter):
|
2019-06-09 11:33:03 +00:00
|
|
|
def additional_init(self):
|
|
|
|
self.representation = ""
|
|
|
|
|
|
|
|
def header_repeat(self):
|
|
|
|
return ["Lemma", "Representative_form", "RF_scenario"]
|
|
|
|
|
|
|
|
def header_right(self):
|
|
|
|
return ["Joint_representative_form", "Frequency"]
|
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
def content_repeat(self, words, representations, idx, _sidx):
|
2019-06-09 11:33:03 +00:00
|
|
|
word = words[idx]
|
|
|
|
if idx not in representations:
|
|
|
|
return [word.lemma, "", ""]
|
|
|
|
|
|
|
|
rep = representations[idx]
|
|
|
|
if rep is None:
|
|
|
|
self.representation += " " + word.lemma
|
|
|
|
return [word.lemma, word.lemma, "lemma_fallback"]
|
|
|
|
else:
|
|
|
|
self.representation += " " + rep
|
|
|
|
return [word.lemma, rep, "ok"]
|
|
|
|
|
|
|
|
def content_right(self, freq):
|
|
|
|
rep = re.sub(' +', ' ', self.representation.strip())
|
|
|
|
result = [rep, str(freq)]
|
|
|
|
self.representation = ""
|
|
|
|
return result
|
|
|
|
|
|
|
|
def group(self):
|
|
|
|
return True
|
|
|
|
|
2019-06-09 11:27:24 +00:00
|
|
|
class AllFormatter(Formatter):
|
|
|
|
def header_repeat(self):
|
|
|
|
return ["Token_ID", "Word_form", "Lemma", "Msd"]
|
|
|
|
|
|
|
|
def header_right(self):
|
|
|
|
return []
|
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
def content_repeat(self, words, _representations, idx, _sidx):
|
2019-06-09 11:27:24 +00:00
|
|
|
word = words[idx]
|
|
|
|
return [word.id, word.text, word.lemma, word.msd]
|
|
|
|
|
|
|
|
def content_right(self, _freq):
|
|
|
|
return []
|
|
|
|
|
|
|
|
def group(self):
|
|
|
|
return False
|
2019-02-19 12:56:32 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
class StatsFormatter(Formatter):
|
|
|
|
def additional_init(self):
|
|
|
|
self.stats = None
|
|
|
|
self.jppb = None
|
|
|
|
self.corew = None
|
|
|
|
|
2019-06-10 09:05:46 +00:00
|
|
|
@staticmethod
|
|
|
|
def stat_str(num):
|
|
|
|
return "{:.5f}".format(num) if isinstance(num, float) else str(num)
|
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
def set_structure(self, structure):
|
|
|
|
jppb = []
|
|
|
|
corew = []
|
|
|
|
|
|
|
|
for component in structure.components:
|
|
|
|
if component.type == ComponentType.Core2w:
|
|
|
|
jppb.append(component.idx)
|
|
|
|
if component.type != ComponentType.Other:
|
|
|
|
corew.append(component.idx)
|
|
|
|
|
|
|
|
assert(len(jppb) == 2)
|
|
|
|
self.jppb = tuple(jppb)
|
|
|
|
self.corew = tuple(corew)
|
|
|
|
|
|
|
|
def new_match(self, match):
|
|
|
|
self.stats = {"freq": {}}
|
|
|
|
|
2019-06-09 22:25:36 +00:00
|
|
|
for cid in self.corew:
|
|
|
|
if cid not in match.matches[0]:
|
|
|
|
freq = 0
|
|
|
|
else:
|
|
|
|
word = match.matches[0][cid]
|
|
|
|
freq = self.word_renderer.num_words[(word.lemma, word.msd[0])]
|
2019-06-09 20:25:58 +00:00
|
|
|
|
2019-06-09 22:25:36 +00:00
|
|
|
self.stats["freq"][cid] = freq
|
2019-06-09 20:25:58 +00:00
|
|
|
|
2019-06-11 07:22:25 +00:00
|
|
|
fx = self.stats["freq"][self.jppb[0]]
|
|
|
|
fy = self.stats["freq"][self.jppb[1]]
|
2019-06-10 08:25:42 +00:00
|
|
|
freq = len(match)
|
|
|
|
N = self.word_renderer.num_all_words()
|
|
|
|
|
2019-06-11 07:22:25 +00:00
|
|
|
self.stats['d12'] = freq / fx - (fy - freq) / (N - fx)
|
|
|
|
self.stats['d21'] = freq / fy - (fx - freq) / (N - fy)
|
2019-06-10 08:25:42 +00:00
|
|
|
|
2019-06-09 22:25:36 +00:00
|
|
|
self.stats['df'] = match.distinct_forms()
|
2019-06-10 08:25:42 +00:00
|
|
|
self.stats['freq_all'] = freq
|
2019-06-09 20:25:58 +00:00
|
|
|
|
|
|
|
def header_repeat(self):
|
2019-06-10 08:25:42 +00:00
|
|
|
return ["Distribution"]
|
2019-06-09 20:25:58 +00:00
|
|
|
|
|
|
|
def header_right(self):
|
2019-06-10 08:25:42 +00:00
|
|
|
return ["Delta_p12", "Delta_p21", "LogDice_core", "LogDice_all", "Distinct_forms"]
|
2019-06-09 20:25:58 +00:00
|
|
|
|
|
|
|
def content_repeat(self, words, representations, idx, sidx):
|
2019-06-09 22:25:36 +00:00
|
|
|
# not a core word
|
|
|
|
if idx not in self.corew:
|
|
|
|
return [""] * self.length()
|
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
word = words[idx]
|
|
|
|
key = (sidx, idx, word.lemma)
|
|
|
|
distribution = self.colocation_ids.dispersions[key]
|
2019-06-10 09:05:46 +00:00
|
|
|
return [self.stat_str(distribution)]
|
2019-06-09 20:25:58 +00:00
|
|
|
|
|
|
|
def content_right(self, freq):
|
2019-06-11 07:22:25 +00:00
|
|
|
fx = self.stats["freq"][self.jppb[0]]
|
|
|
|
fy = self.stats["freq"][self.jppb[1]]
|
2019-06-10 08:25:42 +00:00
|
|
|
freq = self.stats['freq_all']
|
|
|
|
logdice_core = 14 + log2(2 * freq / (fx + fy))
|
2019-06-09 22:25:36 +00:00
|
|
|
|
2019-06-11 07:22:25 +00:00
|
|
|
fi = [self.stats["freq"][idx] for idx in self.corew]
|
|
|
|
fi = [f for f in fi if f > 0]
|
|
|
|
logdice_all = 14 + log2(len(fi) * freq / sum(fi))
|
2019-06-09 22:25:36 +00:00
|
|
|
|
2019-06-10 09:05:46 +00:00
|
|
|
return [self.stat_str(x) for x in (
|
2019-06-11 07:22:25 +00:00
|
|
|
self.stats["d12"], self.stats["d21"], logdice_core, logdice_all, self.stats['df']
|
2019-06-10 09:05:46 +00:00
|
|
|
)]
|
2019-06-09 20:25:58 +00:00
|
|
|
|
|
|
|
def group(self):
|
|
|
|
return True
|
|
|
|
|
2019-06-10 08:49:53 +00:00
|
|
|
class OutFormatter(Formatter):
|
|
|
|
def additional_init(self):
|
|
|
|
self.f1 = OutNoStatFormatter(self.colocation_ids, self.word_renderer)
|
|
|
|
self.f2 = StatsFormatter(self.colocation_ids, self.word_renderer)
|
|
|
|
|
|
|
|
def header_repeat(self):
|
|
|
|
return self.f1.header_repeat() + self.f2.header_repeat()
|
|
|
|
|
|
|
|
def header_right(self):
|
|
|
|
return self.f1.header_right() + self.f2.header_right()
|
|
|
|
|
|
|
|
def content_repeat(self, words, representations, idx, sidx):
|
|
|
|
cr1 = self.f1.content_repeat(words, representations, idx, sidx)
|
|
|
|
cr2 = self.f2.content_repeat(words, representations, idx, sidx)
|
|
|
|
return cr1 + cr2
|
|
|
|
|
|
|
|
def content_right(self, freq):
|
|
|
|
return self.f1.content_right(freq) + self.f2.content_right(freq)
|
|
|
|
|
|
|
|
def group(self):
|
|
|
|
return self.f1.group() and self.f2.group()
|
|
|
|
|
|
|
|
def set_structure(self, structure):
|
|
|
|
self.f2.set_structure(structure)
|
|
|
|
|
|
|
|
def new_match(self, match):
|
|
|
|
self.f2.new_match(match)
|
|
|
|
|
2019-05-12 21:00:38 +00:00
|
|
|
|
2019-06-09 11:35:19 +00:00
|
|
|
class Writer:
|
|
|
|
@staticmethod
|
|
|
|
def other_params(args):
|
|
|
|
return (args.multiple_output, int(args.sort_by), args.sort_reversed)
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-06-09 11:35:19 +00:00
|
|
|
@staticmethod
|
2019-06-09 20:25:58 +00:00
|
|
|
def make_output_writer(args, colocation_ids, word_renderer):
|
2019-06-09 11:35:19 +00:00
|
|
|
params = Writer.other_params(args)
|
2019-06-10 08:50:51 +00:00
|
|
|
return Writer(args.out, OutFormatter(colocation_ids, word_renderer), params)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def make_output_no_stat_writer(args, colocation_ids, word_renderer):
|
|
|
|
params = Writer.other_params(args)
|
|
|
|
return Writer(args.out_no_stat, OutNoStatFormatter(colocation_ids, word_renderer), params)
|
2019-06-09 11:35:19 +00:00
|
|
|
|
|
|
|
@staticmethod
|
2019-06-09 20:25:58 +00:00
|
|
|
def make_all_writer(args, colocation_ids, word_renderer):
|
|
|
|
return Writer(args.all, AllFormatter(colocation_ids, word_renderer), None)
|
2019-06-09 11:35:19 +00:00
|
|
|
|
|
|
|
@staticmethod
|
2019-06-09 20:25:58 +00:00
|
|
|
def make_stats_writer(args, colocation_ids, word_renderer):
|
2019-06-09 11:35:19 +00:00
|
|
|
params = Writer.other_params(args)
|
2019-06-09 20:25:58 +00:00
|
|
|
return Writer(args.stats, StatsFormatter(colocation_ids, word_renderer), params)
|
2019-06-09 11:35:19 +00:00
|
|
|
|
|
|
|
def __init__(self, file_out, formatter, params):
|
|
|
|
if params is None:
|
|
|
|
self.multiple_output = False
|
|
|
|
self.sort_by = -1
|
|
|
|
self.sort_order = None
|
2019-05-12 21:00:38 +00:00
|
|
|
else:
|
2019-06-09 11:35:19 +00:00
|
|
|
self.multiple_output = params[0]
|
|
|
|
self.sort_by = params[1]
|
|
|
|
self.sort_order = params[2]
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-06-09 11:35:19 +00:00
|
|
|
self.output_file = file_out
|
|
|
|
self.formatter = formatter
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-09 11:35:19 +00:00
|
|
|
def header(self):
|
|
|
|
repeating_cols = self.formatter.header_repeat()
|
|
|
|
cols = ["C{}_{}".format(i + 1, thd) for i in range(MAX_NUM_COMPONENTS)
|
|
|
|
for thd in repeating_cols]
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-06-09 11:35:19 +00:00
|
|
|
cols = ["Structure_ID"] + cols + ["Colocation_ID"]
|
|
|
|
cols += self.formatter.header_right()
|
2019-02-06 14:29:19 +00:00
|
|
|
return cols
|
|
|
|
|
2019-02-19 10:29:40 +00:00
|
|
|
def sorted_rows(self, rows):
|
|
|
|
if self.sort_by < 0 or len(rows) < 2:
|
|
|
|
return rows
|
|
|
|
|
|
|
|
if len(rows[0]) <= self.sort_by:
|
2019-02-19 13:57:48 +00:00
|
|
|
logging.warning("Cannot sort by column #{}: Not enough columns!".format(len(rows[0])))
|
2019-02-19 10:29:40 +00:00
|
|
|
return rows
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-02-19 10:29:40 +00:00
|
|
|
try:
|
|
|
|
int(rows[0][self.sort_by])
|
2019-06-09 08:13:46 +00:00
|
|
|
def key(row):
|
|
|
|
return int(row[self.sort_by])
|
2019-02-19 10:29:40 +00:00
|
|
|
except ValueError:
|
2019-06-09 08:13:46 +00:00
|
|
|
def key(row):
|
|
|
|
return row[self.sort_by].lower()
|
2019-02-19 10:29:40 +00:00
|
|
|
|
2019-02-19 12:56:32 +00:00
|
|
|
return sorted(rows, key=key, reverse=self.sort_order)
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-02-07 09:19:36 +00:00
|
|
|
def write_header(self, file_handler):
|
2019-02-06 14:29:19 +00:00
|
|
|
file_handler.write(", ".join(self.header()) + "\n")
|
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
def write_out_worker(self, file_handler, structure, colocation_ids):
|
2019-02-19 10:29:40 +00:00
|
|
|
rows = []
|
2019-06-09 20:25:58 +00:00
|
|
|
components = structure.components
|
2019-05-13 06:35:55 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
for match in colocation_ids.get_matches_for(structure):
|
|
|
|
self.formatter.new_match(match)
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
for words in match.matches:
|
|
|
|
to_write = []
|
|
|
|
|
|
|
|
for idx, _comp in enumerate(components):
|
|
|
|
idx = str(idx + 1)
|
|
|
|
if idx not in words:
|
|
|
|
to_write.extend([""] * self.formatter.length())
|
|
|
|
else:
|
|
|
|
to_write.extend(self.formatter.content_repeat(words, match.representations, idx, structure.id))
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
# make them equal size
|
|
|
|
to_write.extend([""] * (MAX_NUM_COMPONENTS * self.formatter.length() - len(to_write)))
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
# structure_id and colocation_id
|
|
|
|
to_write = [structure.id] + to_write + [match.match_id]
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
# header_right
|
|
|
|
to_write.extend(self.formatter.content_right(len(match)))
|
|
|
|
rows.append(to_write)
|
|
|
|
|
|
|
|
if self.formatter.group():
|
|
|
|
break
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-06-09 08:13:46 +00:00
|
|
|
if rows != []:
|
2019-02-19 13:57:48 +00:00
|
|
|
rows = self.sorted_rows(rows)
|
|
|
|
file_handler.write("\n".join([", ".join(row) for row in rows]) + "\n")
|
|
|
|
file_handler.flush()
|
2019-02-06 14:29:19 +00:00
|
|
|
|
2019-05-13 06:35:55 +00:00
|
|
|
def write_out(self, structures, colocation_ids):
|
2019-06-09 11:36:07 +00:00
|
|
|
if self.output_file is None:
|
|
|
|
return
|
|
|
|
|
2019-02-07 09:19:36 +00:00
|
|
|
def fp_close(fp_):
|
|
|
|
if fp_ != sys.stdout:
|
|
|
|
fp_.close()
|
|
|
|
|
|
|
|
def fp_open(snum=None):
|
2019-06-10 08:52:00 +00:00
|
|
|
if snum is None:
|
2019-02-07 09:19:36 +00:00
|
|
|
return open(self.output_file, "w")
|
|
|
|
else:
|
|
|
|
return open("{}.{}".format(self.output_file, snum), "w")
|
|
|
|
|
|
|
|
if not self.multiple_output:
|
|
|
|
fp = fp_open()
|
|
|
|
self.write_header(fp)
|
|
|
|
|
|
|
|
for s in structures:
|
|
|
|
if self.multiple_output:
|
2019-06-09 08:13:46 +00:00
|
|
|
fp = fp_open(s.id)
|
2019-02-07 09:19:36 +00:00
|
|
|
self.write_header(fp)
|
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
self.formatter.set_structure(s)
|
|
|
|
self.write_out_worker(fp, s, colocation_ids)
|
2019-02-07 09:19:36 +00:00
|
|
|
|
|
|
|
if self.multiple_output:
|
|
|
|
fp_close(fp)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-02-07 09:19:36 +00:00
|
|
|
if not self.multiple_output:
|
|
|
|
fp_close(fp)
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-05-22 09:22:07 +00:00
|
|
|
class StructureMatch:
|
2019-06-09 20:25:58 +00:00
|
|
|
def __init__(self, match_id, structure):
|
2019-05-22 09:22:07 +00:00
|
|
|
self.match_id = match_id
|
2019-06-09 20:25:58 +00:00
|
|
|
self.structure = structure
|
2019-05-22 09:22:07 +00:00
|
|
|
|
|
|
|
self.matches = []
|
|
|
|
self.representations = {}
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
def distinct_forms(self):
|
2019-06-08 09:25:55 +00:00
|
|
|
dm = set()
|
|
|
|
keys = list(self.matches[0].keys())
|
|
|
|
for words in self.matches:
|
|
|
|
dm.add(" ".join(words[k].text for k in keys))
|
|
|
|
return len(dm)
|
|
|
|
|
2019-05-22 09:22:07 +00:00
|
|
|
def append(self, match):
|
|
|
|
self.matches.append(match)
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.matches)
|
2019-02-06 14:29:37 +00:00
|
|
|
|
2019-02-06 14:29:03 +00:00
|
|
|
class ColocationIds:
|
|
|
|
def __init__(self):
|
|
|
|
self.data = {}
|
2019-05-12 21:00:38 +00:00
|
|
|
self.min_frequency = args.min_freq
|
2019-06-08 09:18:49 +00:00
|
|
|
self.dispersions = {}
|
2019-02-06 14:29:03 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
def _add_match(self, key, structure, match):
|
2019-05-22 09:22:07 +00:00
|
|
|
if key not in self.data:
|
2019-06-09 20:25:58 +00:00
|
|
|
self.data[key] = StructureMatch(str(len(self.data) + 1), structure)
|
2019-05-22 09:22:07 +00:00
|
|
|
self.data[key].append(match)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-02-06 14:29:03 +00:00
|
|
|
def get(self, key, n):
|
|
|
|
return self.data[key][n]
|
|
|
|
|
2019-05-13 06:35:55 +00:00
|
|
|
def add_matches(self, matches):
|
2019-06-09 20:25:58 +00:00
|
|
|
for structure, nms in matches.items():
|
2019-02-14 13:33:15 +00:00
|
|
|
for nm in nms:
|
2019-06-09 20:25:58 +00:00
|
|
|
self._add_match(nm[1], structure, nm[0])
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
def get_matches_for(self, structure):
|
2019-05-22 09:22:07 +00:00
|
|
|
for _cid_tup, sm in self.data.items():
|
2019-06-09 20:25:58 +00:00
|
|
|
if sm.structure != structure:
|
2019-05-13 06:35:55 +00:00
|
|
|
continue
|
2019-02-06 14:29:37 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
yield sm
|
|
|
|
|
2019-06-09 21:00:19 +00:00
|
|
|
def set_representations(self, word_renderer):
|
2019-05-24 16:15:21 +00:00
|
|
|
for _1, sm in tqdm(self.data.items()):
|
2019-06-09 21:00:19 +00:00
|
|
|
ComponentRendition.set_representations(sm, word_renderer)
|
2019-06-08 09:42:57 +00:00
|
|
|
|
2019-06-08 09:18:49 +00:00
|
|
|
def determine_colocation_dispersions(self):
|
|
|
|
dispersions = defaultdict(int)
|
|
|
|
for (structure_id, *word_tups) in self.data.keys():
|
|
|
|
for component_id, lemma in word_tups:
|
|
|
|
dispersions[(structure_id, component_id, lemma)] += 1
|
|
|
|
self.dispersions = dict(dispersions)
|
2019-05-13 08:48:21 +00:00
|
|
|
|
|
|
|
|
2019-02-14 13:33:15 +00:00
|
|
|
def match_file(words, structures):
|
2019-06-09 20:25:58 +00:00
|
|
|
matches = {s: [] for s in structures}
|
2019-01-19 21:42:51 +00:00
|
|
|
|
2019-06-01 08:40:44 +00:00
|
|
|
for s in tqdm(structures):
|
2019-01-19 21:42:51 +00:00
|
|
|
for w in words:
|
|
|
|
mhere = s.match(w)
|
2019-06-08 09:42:57 +00:00
|
|
|
for match in mhere:
|
2019-02-06 14:29:37 +00:00
|
|
|
colocation_id = [(idx, w.lemma) for idx, w in match.items()]
|
2019-06-09 08:13:46 +00:00
|
|
|
colocation_id = [s.id] + list(sorted(colocation_id, key=lambda x: x[0]))
|
2019-02-06 14:29:37 +00:00
|
|
|
colocation_id = tuple(colocation_id)
|
2018-10-29 10:29:51 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
matches[s].append((match, colocation_id))
|
2019-01-25 10:58:40 +00:00
|
|
|
|
2019-02-12 10:41:35 +00:00
|
|
|
return matches
|
|
|
|
|
2019-02-14 13:33:15 +00:00
|
|
|
|
2019-06-08 13:43:53 +00:00
|
|
|
def main(structures_file, args):
|
2019-05-24 16:15:21 +00:00
|
|
|
structures, lemma_msds = load_structures(structures_file)
|
2019-02-12 10:41:35 +00:00
|
|
|
|
|
|
|
colocation_ids = ColocationIds()
|
2019-05-30 09:34:31 +00:00
|
|
|
word_renderer = WordMsdRenderer(lemma_msds)
|
2019-02-12 10:41:35 +00:00
|
|
|
|
2019-02-17 14:55:17 +00:00
|
|
|
if args.parallel:
|
|
|
|
num_parallel = int(args.parallel)
|
|
|
|
|
|
|
|
# make temporary directory to hold temporary files
|
|
|
|
with tempfile.TemporaryDirectory() as tmpdirname:
|
2019-06-08 09:42:57 +00:00
|
|
|
cmd = sys.argv
|
2019-02-17 14:55:17 +00:00
|
|
|
for inpt in args.input:
|
|
|
|
if inpt in cmd:
|
|
|
|
cmd.remove(inpt)
|
|
|
|
|
|
|
|
# remove "--parallel X"
|
|
|
|
pidx = cmd.index('--parallel')
|
|
|
|
del cmd[pidx]
|
|
|
|
del cmd[pidx]
|
|
|
|
|
2019-06-08 09:42:57 +00:00
|
|
|
def func(n):
|
2019-06-08 09:54:47 +00:00
|
|
|
cmdn = [sys.executable] + cmd + [args.input[n],
|
|
|
|
"--match-to-file", "{}/{}.p".format(tmpdirname, n)]
|
2019-02-17 14:55:17 +00:00
|
|
|
subprocess.check_call(cmdn)
|
|
|
|
return n
|
|
|
|
|
|
|
|
# use ThreadPoolExecuter to run subprocesses in parallel using py threads
|
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=num_parallel) as executor:
|
|
|
|
# fancy interface to wait for threads to finish
|
|
|
|
for id_input in executor.map(func, [i for i, _ in enumerate(args.input)]):
|
|
|
|
with open("{}/{}.p".format(tmpdirname, id_input), "rb") as fp:
|
2019-05-22 09:55:51 +00:00
|
|
|
words, matches = pickle.load(fp)
|
|
|
|
|
2019-05-13 06:35:55 +00:00
|
|
|
colocation_ids.add_matches(matches)
|
2019-05-22 09:55:51 +00:00
|
|
|
word_renderer.add_words(words)
|
2019-02-17 14:55:17 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
for words in load_files(args):
|
2019-05-13 06:35:55 +00:00
|
|
|
matches = match_file(words, structures)
|
2019-02-17 14:55:17 +00:00
|
|
|
# just save to temporary file, used for children of a parallel process
|
2019-05-22 09:55:51 +00:00
|
|
|
# MUST NOT have more than one file
|
2019-02-17 14:55:17 +00:00
|
|
|
if args.match_to_file is not None:
|
|
|
|
with open(args.match_to_file, "wb") as fp:
|
2019-05-22 09:55:51 +00:00
|
|
|
pickle.dump((words, matches), fp)
|
2019-02-17 14:55:17 +00:00
|
|
|
return
|
|
|
|
else:
|
2019-05-13 06:35:55 +00:00
|
|
|
colocation_ids.add_matches(matches)
|
2019-05-22 09:55:51 +00:00
|
|
|
word_renderer.add_words(words)
|
2019-02-12 10:41:35 +00:00
|
|
|
|
2019-05-22 09:55:51 +00:00
|
|
|
# get word renders for lemma/msd
|
2019-05-30 09:34:31 +00:00
|
|
|
word_renderer.generate_renders()
|
2019-06-08 09:18:49 +00:00
|
|
|
colocation_ids.determine_colocation_dispersions()
|
2019-05-13 08:48:21 +00:00
|
|
|
|
2019-06-09 11:36:31 +00:00
|
|
|
# figure out representations!
|
2019-06-10 08:50:51 +00:00
|
|
|
if args.out or args.out_no_stat:
|
2019-06-09 21:00:19 +00:00
|
|
|
colocation_ids.set_representations(word_renderer)
|
2019-06-09 11:36:31 +00:00
|
|
|
|
2019-06-09 20:25:58 +00:00
|
|
|
Writer.make_output_writer(args, colocation_ids, word_renderer).write_out(
|
|
|
|
structures, colocation_ids)
|
2019-06-10 08:50:51 +00:00
|
|
|
Writer.make_output_no_stat_writer(args, colocation_ids, word_renderer).write_out(
|
|
|
|
structures, colocation_ids)
|
2019-06-09 20:25:58 +00:00
|
|
|
Writer.make_all_writer(args, colocation_ids, word_renderer).write_out(
|
|
|
|
structures, colocation_ids)
|
|
|
|
Writer.make_stats_writer(args, colocation_ids, word_renderer).write_out(
|
|
|
|
structures, colocation_ids)
|
2019-01-25 10:58:40 +00:00
|
|
|
|
2019-01-19 21:42:51 +00:00
|
|
|
if __name__ == '__main__':
|
2019-06-08 09:54:47 +00:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description='Extract structures from a parsed corpus.')
|
|
|
|
parser.add_argument('structures',
|
|
|
|
help='Structures definitions in xml file')
|
|
|
|
parser.add_argument('input',
|
|
|
|
help='input xml file in `ssj500k form`, can list more than one', nargs='+')
|
2019-06-10 08:50:51 +00:00
|
|
|
parser.add_argument('--out',
|
2019-06-10 08:52:00 +00:00
|
|
|
help='Classic output file')
|
2019-06-10 08:50:51 +00:00
|
|
|
parser.add_argument('--out-no-stat',
|
|
|
|
help='Output file, but without statistical columns')
|
2019-06-08 09:54:47 +00:00
|
|
|
parser.add_argument('--all',
|
|
|
|
help='Additional output file, writes more data')
|
2019-06-09 20:25:58 +00:00
|
|
|
parser.add_argument('--stats',
|
|
|
|
help='Output file for statistics')
|
2019-06-08 09:54:47 +00:00
|
|
|
|
|
|
|
parser.add_argument('--no-msd-translate',
|
|
|
|
help='MSDs are translated from slovene to english by default',
|
|
|
|
action='store_true')
|
|
|
|
parser.add_argument('--skip-id-check',
|
|
|
|
help='Skips checks for ids of <w> and <pc>, if they are in correct format',
|
|
|
|
action='store_true')
|
|
|
|
parser.add_argument('--min_freq', help='Minimal frequency in output',
|
|
|
|
type=int, default=0, const=1, nargs='?')
|
|
|
|
parser.add_argument('--verbose', help='Enable verbose output to stderr',
|
|
|
|
choices=["warning", "info", "debug"], default="info",
|
|
|
|
const="info", nargs='?')
|
|
|
|
parser.add_argument('--count-files',
|
|
|
|
help="Count files: more verbose output", action='store_true')
|
|
|
|
parser.add_argument('--multiple-output',
|
|
|
|
help='Generate one output for each syntactic structure',
|
|
|
|
action='store_true')
|
|
|
|
|
|
|
|
parser.add_argument('--sort-by',
|
|
|
|
help="Sort by a this column (index)", type=int, default=-1)
|
|
|
|
parser.add_argument('--sort-reversed',
|
|
|
|
help="Sort in reversed ored", action='store_true')
|
|
|
|
|
|
|
|
parser.add_argument('--pc-tag',
|
|
|
|
help='Tag for separators, usually pc or c', default="pc")
|
|
|
|
parser.add_argument('--parallel',
|
|
|
|
help='Run in multiple processes, should speed things up')
|
2019-02-17 14:55:17 +00:00
|
|
|
parser.add_argument('--match-to-file', help='Do not use!')
|
2019-02-12 11:08:30 +00:00
|
|
|
|
2019-02-06 14:28:39 +00:00
|
|
|
args = parser.parse_args()
|
|
|
|
logging.basicConfig(stream=sys.stderr, level=args.verbose.upper())
|
2019-01-19 21:42:51 +00:00
|
|
|
|
2019-02-09 12:40:57 +00:00
|
|
|
start = time.time()
|
2019-06-08 13:43:53 +00:00
|
|
|
main(args.structures, args)
|
2019-02-09 12:40:57 +00:00
|
|
|
logging.info("TIME: {}".format(time.time() - start))
|