luscenje_struktur/wani.py

1231 lines
39 KiB
Python
Raw Normal View History

2018-10-29 10:29:51 +00:00
from xml.etree import ElementTree
import re
from enum import Enum
from collections import defaultdict
2019-01-08 20:13:36 +00:00
import sys
2019-01-19 21:42:51 +00:00
import logging
2019-02-06 14:28:39 +00:00
import argparse
2019-02-09 12:40:57 +00:00
import pickle
2019-02-06 14:29:37 +00:00
import time
import subprocess
import concurrent.futures
import tempfile
2018-10-29 10:29:51 +00:00
from msd_translate import MSD_TRANSLATE
MAX_NUM_COMPONENTS = 5
2018-10-29 10:29:51 +00:00
CODES = {
"Noun": "N",
"Verb": "V",
"Adjective": "A",
"Adverb": "R",
"Pronoun": "P",
"Numeral": "M",
"Preposition": "S",
"Conjunction": "C",
"Particle": "Q",
"Interjection": "I",
"Abbreviation": "Y",
"Residual": "X",
'common': 'c',
'proper': 'p',
'masculine': 'm',
'feminine': 'f',
'neuter': 'n',
"singular": "s",
"dual": "d",
"plural": "p",
"nominative": "n",
"genitive": "g",
"dative": "d",
"accusative": "a",
"locative": "l",
"instrumental": "i",
"no": "n",
"yes": "y",
"main": "m",
"auxiliary": "a",
"perfective": "e",
"progressive": "p",
"biaspectual": "b",
"infinitive": "n",
"supine": "u",
"participle": "p",
"present": "r",
"future": "f",
"conditional": "c",
"imperative": "m",
"first": "1",
"second": "2",
"third": "3",
"general": "g",
"possessive": "s",
"positive": "p",
"comparative": "c",
"superlative": "s",
"personal": "p",
"demonstrative": "d",
"relative": "r",
"reflexive": "x",
"interrogative": "q",
"indefinite": "i",
"negative": "z",
"bound": "b",
"digit": "d",
"roman": "r",
"letter": "l",
"cardinal": "c",
"ordinal": "o",
"pronominal": "p",
"special": "s",
"coordinating": "c",
"subordinating": "s",
"foreign": "f",
"typo": "t",
"program": "p",
}
TAGSET = {
"N": ['type', 'gender', 'number', 'case', 'animate'],
"V": ['type', 'aspect', 'vform', 'person', 'number', 'gender', 'negative'],
"A": ['type', 'degree', 'gender', 'number', 'case', 'definiteness'],
"R": ['type', 'degree'],
"P": ['type', 'person', 'gender', 'number', 'case', 'owner_number', 'owned_gender', 'clitic'],
"M": ['form', 'type', 'gender', 'number', 'case', 'definiteness'],
"S": ['case'],
"C": ['type'],
"Q": [],
"I": [],
"Y": [],
"X": ['type']
}
CATEGORY_BASES = {
2019-01-19 21:42:51 +00:00
"N": ['.'] * 5,
"V": ['.'] * 7,
"A": ['.'] * 6,
"R": ['.'] * 2,
"P": ['.'] * 6,
"M": ['.'] * 6,
"S": ['.'] * 1,
"C": ['.'] * 1,
2018-10-29 10:29:51 +00:00
"Q": [],
"I": [],
"Y": [],
2019-01-19 21:42:51 +00:00
"X": ['.'] * 1
2018-10-29 10:29:51 +00:00
}
class RestrictionType(Enum):
Morphology = 0
Lexis = 1
2019-01-19 21:42:51 +00:00
MatchAll = 2
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
class Rendition(Enum):
2018-10-29 10:29:51 +00:00
Lemma = 0
WordForm = 1
Lexis = 2
Unknown = 3
2019-01-19 21:42:51 +00:00
class WordFormSelection(Enum):
All = 0
Msd = 1
Agreement = 2
Any = 3
2019-02-04 10:01:30 +00:00
class Order(Enum):
FromTo = 0
ToFrom = 1
Any = 2
@staticmethod
def new(order):
if order is not None:
if order == "to-from":
return Order.ToFrom
elif order == "from-to":
return Order.FromTo
else:
raise NotImplementedError("What kind of ordering is: {}".format(order))
else:
return Order.Any
2019-02-12 16:38:32 +00:00
2019-02-04 10:01:30 +00:00
def match(self, from_w, to_w):
if self is Order.Any:
return True
2019-02-12 16:38:32 +00:00
fi = from_w.int_id
ti = to_w.int_id
2019-02-04 10:01:30 +00:00
if self is Order.FromTo:
return fi < ti
elif self is Order.ToFrom:
return ti < fi
else:
raise NotImplementedError("Should not be here: Order match")
2019-01-19 21:42:51 +00:00
class ComponentRendition:
def __init__(self):
self.more = None
self.rendition = Rendition.Unknown
def _set_rendition(self, r):
assert(self.rendition is Rendition.Unknown)
self.rendition = r
def _set_more(self, m):
self.more = m
def add_feature(self, feature):
if 'rendition' in feature:
if feature['rendition'] == "lemma":
self._set_rendition(Rendition.Lemma)
elif feature['rendition'] == "word_form":
self._set_rendition(Rendition.WordForm)
self._set_more((WordFormSelection.Any, None))
elif feature['rendition'] == "lexis":
self._set_rendition(Rendition.Lexis)
self._set_more(feature['string'])
else:
raise NotImplementedError("Representation rendition: {}".format(feature))
elif 'selection' in feature:
if feature['selection'] == "msd":
2019-05-17 18:45:10 +00:00
selectors = {k: v for k, v in feature.items() if k != 'selection'}
self._set_more((WordFormSelection.Msd, selectors))
elif feature['selection'] == "all":
self._set_more((WordFormSelection.All, None))
elif feature['selection'] == 'agreement':
assert(feature['head'][:4] == 'cid_')
assert(feature['msd'] is not None)
self._set_more((WordFormSelection.Agreement,
(feature['head'][4:], feature['msd'].split('+'))))
else:
raise NotImplementedError("Representation selection: {}".format(feature))
2019-01-19 21:42:51 +00:00
else:
return None
def isit(self, rendition):
return self.rendition is rendition
2019-01-19 21:42:51 +00:00
@staticmethod
def set_representations(matches, structure, word_renderer):
representations = {
c.idx: [[], None] if c.representation.isit(Rendition.WordForm) else [True, ""]
for c in structure.components
}
representations_to_check = []
word_component_id = {}
# doprint = structure.id == '1' and matches[0]['1'].text.startswith('evrop') and matches[0]['2'].text.startswith('prv')
doprint = False
def render_all(lst):
return "/".join([w.text for w in set(lst)])
def render_form(lst):
sorted_lst = sorted(set(lst), key=lst.count)
for word in sorted_lst:
othw = are_agreements_ok(word, representations_to_check)
if othw is not None:
matches.representations[word_component_id[othw.id]] = othw.most_frequent_text(word_renderer)
matches.representations[word_component_id[word.id]] = word.most_frequent_text(word_renderer)
return
def are_agreements_ok(word, words_to_try):
for w_id, other_word, agreements in words_to_try:
if check_agreement(word, other_word, agreements):
if doprint:
print("GOOD :)")
return other_word
def check_msd(word, selectors):
for key, value in selectors.items():
t = word.msd[0]
v = TAGSET[t].index(key.lower())
f1 = word.msd[v + 1]
f2 = CODES[value]
if '-' not in [f1, f2] and f1 != f2:
return False
return True
def check_agreement(w1, w2, agreements):
if doprint:
print("CHECK", w1.text, w1, w2.text, w2)
for agr_case in agreements:
t1 = w1.msd[0]
v1 = TAGSET[t1].index(agr_case)
assert(v1 >= 0)
# if none specified: nedolocnik, always agrees
if v1 + 1 >= len(w1.msd):
continue
# first is uppercase, not in TAGSET
m1 = w1.msd[v1 + 1]
# REPEAT (not DRY!)
t2 = w2.msd[0]
v2 = TAGSET[t2].index(agr_case)
assert(v2 >= 0)
if v2 + 1 >= len(w2.msd):
continue
m2 = w2.msd[v2 + 1]
# match!
if '-' not in [m1, m2] and m1 != m2:
return False
return True
for words in matches.matches:
# first pass, check everything but agreements
for w_id, w in words.items():
component = structure.get_component(w_id)
rep = component.representation
word_component_id[w.id] = w_id
if rep.isit(Rendition.Lemma):
representations[w_id][0] = False
representations[w_id][1] = w.lemma
elif rep.isit(Rendition.Lexis):
representations[w_id][0] = False
representations[w_id][1] = rep.more
elif rep.isit(Rendition.Unknown):
representations[w_id][0] = False
representations[w_id][1] = ""
# it HAS to be word_form now
else:
assert(rep.isit(Rendition.WordForm))
wf_type, more = rep.more
if wf_type is WordFormSelection.All:
add = True
func = render_all
elif wf_type is WordFormSelection.Msd:
add = check_msd(w, more)
func = render_form
elif wf_type is WordFormSelection.Any:
add = True
func = render_form
else:
assert(wf_type is WordFormSelection.Agreement)
other_w, agreements = more
representations_to_check.append((other_w, w, agreements))
add = True
func = lambda x: None
if add:
representations[w_id][0].append(w)
representations[w_id][1] = func
if doprint:
print(len(matches), len(representations_to_check))
# for w1i, w2i, agreements in representations_to_check:
# w1, w2 = words[w1i], words[w2i]
# if doprint:
# print("? ", w1.msd, w2.msd, end="")
# if w2i not in bad_words:
#
# if check_agreement(w1, w2, agreements):
# representations[w1i][0].append(w1.text)
# if doprint:
# print(" :)")
# elif doprint:
# print(" :(")
# elif doprint:
# print(" :((")
# just need to set representation to first group,
# but in correct order, agreements last!
representation_sorted_words = []
for w_id, w in matches.matches[0].items():
rep = component.representation
if rep.isit(Rendition.WordForm) and rep.more[0] is WordFormSelection.Agreement:
representation_sorted_words.append((w_id, w))
else:
representation_sorted_words.insert(0, (w_id, w))
for w_id, w in representation_sorted_words:
data = representations[w_id]
if doprint:
print([(r.text, r.lemma, r.msd) for r in data[0]])
if type(data[1]) is str:
matches.representations[w_id] = None if data[0] else data[1]
elif len(data[0]) == 0:
matches.representations[w_id] = None
else:
data[1](data[0])
if doprint:
print(matches.representations)
print('--')
2019-01-19 21:42:51 +00:00
def __str__(self):
return str(self.rendition)
class ComponentStatus(Enum):
Optional = 0
Required = 1
Forbidden = 2
def __str__(self):
if self == ComponentStatus.Optional:
return "?"
elif self == ComponentStatus.Required:
return "!"
else: #Forbidden
return "X"
2018-10-29 10:29:51 +00:00
def get_level(restriction):
for feature in restriction:
if "level" in feature.keys():
lvl = feature.get("level")
2019-01-19 21:42:51 +00:00
else:
continue
2018-10-29 10:29:51 +00:00
raise RuntimeError("Unreachable!")
def build_morphology_regex(restriction):
restr_dict = {}
for feature in restriction:
2019-01-19 21:42:51 +00:00
feature_dict = dict(feature.items())
match_type = True
if "filter" in feature_dict:
assert(feature_dict['filter'] == "negative")
match_type = False
del feature_dict['filter']
assert(len(feature_dict) == 1)
key, value = next(iter(feature_dict.items()))
restr_dict[key] = (value, match_type)
2018-10-29 10:29:51 +00:00
assert('POS' in restr_dict)
2019-01-19 21:42:51 +00:00
category = restr_dict['POS'][0].capitalize()
2018-10-29 10:29:51 +00:00
cat_code = CODES[category]
rgx = [cat_code] + CATEGORY_BASES[cat_code]
del restr_dict['POS']
min_msd_length = 1
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
for attribute, (value, typ) in restr_dict.items():
2018-10-29 10:29:51 +00:00
index = TAGSET[cat_code].index(attribute.lower())
assert(index >= 0)
if '|' in value:
2019-01-19 21:42:51 +00:00
match = "".join(CODES[val] for val in value.split('|'))
2018-10-29 10:29:51 +00:00
else:
match = CODES[value]
2019-01-19 21:42:51 +00:00
match = "[{}{}]".format("" if typ else "^", match)
2018-10-29 10:29:51 +00:00
rgx[index + 1] = match
if typ:
min_msd_length = max(index + 1, min_msd_length)
2019-01-19 21:42:51 +00:00
def matcher(text):
if len(text) <= min_msd_length:
return False
2019-01-19 21:42:51 +00:00
for c, r in zip(text, rgx):
if not re.match(r, c):
return False
return True
return " ".join(rgx), matcher
2018-10-29 10:29:51 +00:00
def build_lexis_regex(restriction):
restr_dict = {}
for feature in restriction:
restr_dict.update(feature.items())
2019-01-19 21:42:51 +00:00
assert("lemma" in restr_dict)
match_list = restr_dict['lemma'].split('|')
return match_list, lambda text: text in match_list
2018-10-29 10:29:51 +00:00
class Restriction:
def __init__(self, restriction_tag):
2019-01-19 21:42:51 +00:00
if restriction_tag is None:
self.type = RestrictionType.MatchAll
self.matcher = None
self.present = None
return
restriction_type = restriction_tag.get('type')
if restriction_type == "morphology":
self.type = RestrictionType.Morphology
2019-01-19 21:42:51 +00:00
self.present, self.matcher = build_morphology_regex(list(restriction_tag))
elif restriction_type == "lexis":
self.type = RestrictionType.Lexis
2019-01-19 21:42:51 +00:00
self.present, self.matcher = build_lexis_regex(list(restriction_tag))
else:
raise NotImplementedError()
def match(self, word):
if self.type == RestrictionType.Morphology:
match_to = word.msd
elif self.type == RestrictionType.Lexis:
match_to = word.lemma
2019-01-19 21:42:51 +00:00
elif self.type == RestrictionType.MatchAll:
return True
else:
raise RuntimeError("Unreachable!")
2019-01-19 21:42:51 +00:00
return self.matcher(match_to)
def __str__(self):
2019-01-19 21:42:51 +00:00
return "({:s} {})".format(str(self.type).split('.')[1], self.present)
def __repr__(self):
return str(self)
2018-10-29 10:29:51 +00:00
class Component:
2019-01-19 21:42:51 +00:00
def __init__(self, info):
idx = info['cid']
name = info['name'] if 'name' in info else None
if 'status' not in info:
status = ComponentStatus.Required
elif info['status'] == 'forbidden':
status = ComponentStatus.Forbidden
elif info['status'] == 'obligatory':
status = ComponentStatus.Required
elif info['status'] == 'optional':
status = ComponentStatus.Optional
else:
raise NotImplementedError("strange status: {}".format(info['status']))
2019-01-19 21:42:51 +00:00
self.status = status
self.name = name
self.idx = idx
2018-10-29 10:29:51 +00:00
self.restriction = None
self.next_element = []
self.representation = ComponentRendition()
2019-01-19 21:42:51 +00:00
self.selection = {}
2018-10-29 10:29:51 +00:00
self.iter_ctr = 0
2019-02-04 10:01:30 +00:00
def add_next(self, next_component, link_label, order):
self.next_element.append((next_component, link_label, Order.new(order)))
2018-10-29 10:29:51 +00:00
def set_restriction(self, restrictions_tag):
2019-01-19 21:42:51 +00:00
if restrictions_tag is None:
self.restriction = Restriction(None)
elif restrictions_tag.tag == "restriction":
self.restriction = Restriction(restrictions_tag)
2018-10-29 10:29:51 +00:00
elif restrictions_tag.tag == "restriction_or":
self.restriction = [Restriction(el) for el in restrictions_tag]
else:
raise RuntimeError("Unreachable")
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
def set_representation(self, representation):
for feature in representation:
self.representation.add_feature(feature.attrib)
2019-01-19 21:42:51 +00:00
def find_next(self, deps, comps, restrs, reprs):
to_ret = []
for d in deps:
if d[0] == self.idx:
2019-02-04 10:01:30 +00:00
_, idx, dep_label, order = d
2019-01-19 21:42:51 +00:00
next_component = Component(comps[idx])
next_component.set_restriction(restrs[idx])
next_component.set_representation(reprs[idx])
2019-01-19 21:42:51 +00:00
to_ret.append(next_component)
2019-02-04 10:01:30 +00:00
self.add_next(next_component, dep_label, order)
others = next_component.find_next(deps, comps, restrs, reprs)
2019-01-19 21:42:51 +00:00
to_ret.extend(others)
return to_ret
2019-01-19 21:42:51 +00:00
def name_str(self):
return "_" if self.name is None else self.name
2018-10-29 10:29:51 +00:00
def __str__(self):
2019-01-19 21:42:51 +00:00
n = self.name_str()
return "{:s}) {:7s}:{} [{}] :{}".format(
self.idx, n, self.status, self.restriction, self.representation)
2019-01-19 21:42:51 +00:00
def tree(self):
el = []
2019-02-04 10:01:30 +00:00
for next, link, order in self.next_element:
s = "{:3} -- {:5} --> {:3}".format(self.idx, link, next.idx)
if order != Order.Any:
s += " " + str(order)[6:]
el.append(s)
2019-01-19 21:42:51 +00:00
el.extend(next.tree())
2018-10-29 10:29:51 +00:00
return el
def __repr__(self):
return str(self)
def match(self, word):
2019-01-19 21:42:51 +00:00
m1 = self._match_self(word)
if m1 is None:
return None
mn = self._match_next(word)
if mn is None:
return None
to_ret = [m1]
for cmatch in mn:
# if good match but nothing to add, just continue
if len(cmatch) == 0:
continue
# if more than one match found for particular component
elif len(cmatch) > 1:
logging.debug("MULTIPLE: {}, {}".format(self.idx, cmatch))
# if more than one match in multiple components, NOPE!
if len(to_ret) > 1:
logging.warning("Strange multiple match: {}".format(
str([w.id for w in cmatch[0].values()])))
for tr in to_ret:
tr.update(cmatch[0])
continue
# yeah, so we have found more than one match, =>
# more than one element in to_ret
to_ret = [{**dict(to_ret[0]), **m} for m in cmatch]
else:
for tr in to_ret:
tr.update(cmatch[0])
logging.debug("MA: {}".format(str(to_ret)))
return to_ret
def _match_self(self, word):
matched = None
# matching
if type(self.restriction) is list:
for restr in self.restriction:
matched = restr.match(word)
2019-01-19 21:42:51 +00:00
if matched: # match either
break
2018-10-29 10:29:51 +00:00
else:
matched = self.restriction.match(word)
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
logging.debug("SELF MATCH({}: {} -> {}".format(self.idx, word.text, matched))
# recurse to next
2019-01-19 21:42:51 +00:00
if not matched:
return None
else:
return {self.idx: word}
def _match_next(self, word):
# matches for every component in links from this component
to_ret = []
# need to get all links that match
2019-02-04 10:01:30 +00:00
for next, link, order in self.next_element:
next_links = word.get_links(link)
logging.debug("FIND LINKS FOR: {} -> {}: #{}".format(self.idx, next.idx, len(next_links)))
2019-01-19 21:42:51 +00:00
to_ret.append([])
# good flag
good = next.status != ComponentStatus.Required
for next_word in next_links:
2019-01-19 21:42:51 +00:00
logging.debug("link: {}: {} -> {}".format(link, word.id, next_word.id))
2019-02-04 10:01:30 +00:00
if not order.match(word, next_word):
continue
2019-01-19 21:42:51 +00:00
match = next.match(next_word)
if match is not None:
# special treatement for forbidden
if next.status == ComponentStatus.Forbidden:
good = False
break
2019-01-19 21:42:51 +00:00
else:
assert(type(match) is list)
to_ret[-1].extend(match)
good = True
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
# if none matched, nothing found!
if not good:
logging.debug("BAD")
return None
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
return to_ret
2018-10-29 10:29:51 +00:00
class SyntacticStructure:
def __init__(self):
self.id = None
self.lbs = None
2019-01-19 21:42:51 +00:00
self.components = []
2018-10-29 10:29:51 +00:00
@staticmethod
def from_xml(xml):
st = SyntacticStructure()
st.id = xml.get('id')
2018-10-29 10:29:51 +00:00
st.lbs = xml.get('LBS')
2019-01-19 21:42:51 +00:00
assert(len(list(xml)) == 1)
system = next(iter(xml))
2018-10-29 10:29:51 +00:00
assert(system.get('type') == 'JOS')
2019-01-19 21:42:51 +00:00
components, dependencies, definitions = list(system)
2018-10-29 10:29:51 +00:00
2019-02-04 10:01:30 +00:00
deps = [ (dep.get('from'), dep.get('to'), dep.get('label'), dep.get('order')) for dep in dependencies ]
2019-01-19 21:42:51 +00:00
comps = { comp.get('cid'): dict(comp.items()) for comp in components }
restrs, forms = {}, {}
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
for comp in definitions:
n = comp.get('cid')
restrs[n] = None
forms[n] = []
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
for el in comp:
if el.tag.startswith("restriction"):
assert(restrs[n] is None)
restrs[n] = el
elif el.tag.startswith("representation"):
st.add_representation(n, el, forms)
else:
raise NotImplementedError("Unknown definition: {} in structure {}".format(el.tag, st.id))
2019-01-19 21:42:51 +00:00
fake_root_component = Component({'cid': '#', 'type': 'other'})
st.components = fake_root_component.find_next(deps, comps, restrs, forms)
2018-10-29 10:29:51 +00:00
return st
def add_representation(self, n, rep_el, forms):
if rep_el.tag == "representation_and":
rep_el = rep_el[0]
logging.warning("Only using first reprentation in representation_and in structure {}".format(self.id))
assert(rep_el.tag == "representation")
for el in rep_el:
assert(el.tag == "feature")
if 'rendition' in el.attrib:
forms[n].append(el)
elif 'selection' in el.attrib:
forms[n].append(el)
2019-01-19 21:42:51 +00:00
else:
logging.warning("Strange representation feature in structure {}. Skipping"
.format(self.id))
2019-01-28 08:39:57 +00:00
continue
2019-01-19 21:42:51 +00:00
2018-10-29 10:29:51 +00:00
def __str__(self):
2019-01-19 21:42:51 +00:00
comp_str = "\n".join(str(comp) for comp in self.components)
links_str = "\n".join(self.components[0].tree())
return "{} LBS {}\nCOMPONENTS\n{}\n\nLINKS\n{}\n{}".format(
self.id, self.lbs, comp_str, links_str, "-" * 40)
2019-01-19 21:42:51 +00:00
def get_component(self, idx):
for c in self.components:
if c.idx == idx:
return c
raise RuntimeError("Unknown component id: {}".format(idx))
def check_agreements(self, match):
for agr in self.agreements:
w1 = match[agr['n1']]
w2 = match[agr['n2']]
for agr_case in agr['match']:
t1 = w1.msd[0]
v1 = TAGSET[t1].index(agr_case)
assert(v1 >= 0)
# if none specified: nedolocnik, always agrees
if v1 + 1 >= len(w1.msd):
continue
# first is uppercase, not in TAGSET
m1 = w1.msd[v1 + 1]
# REPEAT (not DRY!)
t2 = w2.msd[0]
v2 = TAGSET[t2].index(agr_case)
assert(v2 >= 0)
if v2 + 1 >= len(w2.msd):
continue
m2 = w2.msd[v2 + 1]
# match!
if '-' not in [m1, m2] and m1 != m2:
return False
return True
def check_form(self, match):
for midx, w in match.items():
c = self.get_component(midx)
for key, value in c.selection.items():
t = w.msd[0]
v = TAGSET[t].index(key.lower())
f1 = w.msd[v + 1]
f2 = CODES[value]
if '-' not in [f1, f2] and f1 != f2:
return False
return True
2018-10-29 10:29:51 +00:00
def match(self, word):
2019-01-19 21:42:51 +00:00
matches = self.components[0].match(word)
return [] if matches is None else matches
# for m in matches:
# to_ret.append((m, self.check_agreements(m)))
2018-10-29 10:29:51 +00:00
def build_structures(filename):
structures = []
with open(filename, 'r') as fp:
et = ElementTree.XML(fp.read())
for structure in et.iter('syntactic_structure'):
2019-01-19 21:42:51 +00:00
to_append = SyntacticStructure.from_xml(structure)
if to_append is None:
continue
structures.append(to_append)
2018-10-29 10:29:51 +00:00
return structures
2019-01-19 21:42:51 +00:00
def get_msd(comp):
d = dict(comp.items())
if 'msd' in d:
return d['msd']
elif 'ana' in d:
return d['ana'][4:]
else:
logging.error(d, file=sys.stderr)
raise NotImplementedError("MSD?")
2018-10-29 10:29:51 +00:00
class Word:
2019-02-12 10:58:04 +00:00
def __init__(self, xml, do_msd_translate):
2018-10-29 10:29:51 +00:00
self.lemma = xml.get('lemma')
2019-02-12 10:58:04 +00:00
self.msd = MSD_TRANSLATE[get_msd(xml)] if do_msd_translate else get_msd(xml)
2018-10-29 10:29:51 +00:00
self.id = xml.get('id')
self.text = xml.text
self.links = defaultdict(list)
2019-02-12 16:38:32 +00:00
last_num = self.id.split('.')[-1]
if last_num[0] not in '0123456789':
last_num = last_num[1:]
self.int_id = int(last_num)
2018-10-29 10:29:51 +00:00
assert(None not in (self.id, self.lemma, self.msd))
@staticmethod
2019-02-12 10:58:04 +00:00
def pcWord(pc, do_msd_translate):
pc.set('lemma', pc.text)
2019-02-12 10:59:51 +00:00
pc.set('msd', "N" if do_msd_translate else "U")
2019-02-12 10:58:04 +00:00
return Word(pc, do_msd_translate)
2018-10-29 10:29:51 +00:00
def add_link(self, link, to):
self.links[link].append(to)
def get_links(self, link):
2018-10-29 11:43:07 +00:00
if link not in self.links and "|" in link:
for l in link.split('|'):
self.links[link].extend(self.links[l])
2018-10-29 10:29:51 +00:00
return self.links[link]
def most_frequent_text(self, word_renderer):
return word_renderer.render(self.lemma, self.msd)
2018-10-29 10:29:51 +00:00
class WordMsdRenderer:
def __init__(self):
self.all_words = []
self.rendered_words = {}
def add_words(self, words):
self.all_words.extend(words)
def generate_renders(self):
data = defaultdict(lambda: defaultdict(list))
for w in self.all_words:
data[w.lemma][w.msd].append(w.text)
for lemma, ld in data.items():
self.rendered_words[lemma] = {}
for msd, texts in ld.items():
rep = max(set(texts), key=texts.count)
self.rendered_words[lemma][msd] = rep
def render(self, lemma, msd):
if lemma in self.rendered_words:
if msd in self.rendered_words[lemma]:
return self.rendered_words[lemma][msd]
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
def is_root_id(id_):
return len(id_.split('.')) == 3
def load_files(args):
2019-02-09 12:25:26 +00:00
filenames = args.input
skip_id_check = args.skip_id_check
2019-02-12 10:58:04 +00:00
do_msd_translate = not args.no_msd_translate
2019-02-09 12:25:26 +00:00
for n, fname in enumerate(filenames):
if args.count_files:
status = " :: {} / {}".format(n, len(filenames))
else:
status = ""
yield load_tei_file(fname, skip_id_check, do_msd_translate, args.pc_tag, status)
2019-02-09 12:25:26 +00:00
def load_tei_file(filename, skip_id_check, do_msd_translate, pc_tag, status):
logging.info("LOADING FILE: {}{}".format(filename, status))
2019-02-09 12:25:26 +00:00
2018-10-29 10:29:51 +00:00
with open(filename, 'r') as fp:
xmlstring = re.sub(' xmlns="[^"]+"', '', fp.read(), count=1)
xmlstring = xmlstring.replace(' xml:', ' ')
et = ElementTree.XML(xmlstring)
words = {}
for w in et.iter("w"):
2019-02-12 10:58:04 +00:00
words[w.get('id')] = Word(w, do_msd_translate)
for pc in et.iter(pc_tag):
2019-02-12 10:58:04 +00:00
words[pc.get('id')] = Word.pcWord(pc, do_msd_translate)
2019-01-19 21:42:51 +00:00
2018-10-29 10:29:51 +00:00
for l in et.iter("link"):
2019-01-19 21:42:51 +00:00
if 'dep' in l.keys():
ana = l.get('afun')
lfrom = l.get('from')
dest = l.get('dep')
else:
ana = l.get('ana')
if ana[:4] != 'syn:': # dont bother...
continue
ana = ana[4:]
lfrom, dest = l.get('target').replace('#', '').split()
2018-10-29 10:29:51 +00:00
if lfrom in words:
if not skip_id_check and is_root_id(lfrom):
logging.error("NOO: ", lfrom)
2019-01-19 21:42:51 +00:00
sys.exit(1)
if dest in words:
next_word = words[dest]
words[lfrom].add_link(ana, next_word)
else:
logging.error("Unknown id: {}".format(dest))
sys.exit(1)
2019-01-08 18:37:28 +00:00
else:
# strange errors, just skip...
pass
return list(words.values())
2018-10-29 10:29:51 +00:00
2019-02-06 14:29:19 +00:00
class Writer:
@staticmethod
def make_output_writer(args):
return Writer(False, args.output, args.multiple_output, int(args.sort_by), args.sort_reversed)
@staticmethod
def make_all_writer(args):
return Writer(True, args.all, False, -1, False)
2019-02-19 12:56:32 +00:00
def __init__(self, all, filename, multiple_output, sort_by, sort_reversed):
self.all = all
self.output_file = filename
self.multiple_output = multiple_output
self.sort_by = sort_by
self.sort_order = sort_reversed
2019-02-06 14:29:19 +00:00
def header(self):
cols = ["Lemma"]
if self.all:
2019-02-06 14:29:19 +00:00
cols = ["Token_ID", "Word_form"] + cols + ["Msd"]
else:
cols.extend(["Representative_form", "RF_scenario"])
2019-02-06 14:29:19 +00:00
assert(len(cols) == self.length())
cols = ["C{}_{}".format(i + 1, thd) for i in range(MAX_NUM_COMPONENTS) for thd in cols]
cols = ["Structure_ID"] + cols + ["Colocation_ID"]
2019-02-06 14:29:19 +00:00
if not self.all:
cols += ["Joint_representative_form", "Frequency"]
2019-02-06 14:29:19 +00:00
return cols
def length(self):
return 4 if self.all else 3
2019-02-06 14:29:19 +00:00
def from_word(self, word, representation):
2019-02-06 14:29:19 +00:00
if word is None:
2019-05-12 22:26:00 +00:00
return [""] * self.length()
elif self.all:
return [word.id, word.text, word.lemma, word.msd]
2019-02-06 14:29:19 +00:00
else:
print("1", word)
if representation is None:
return [word.lemma, word.lemma, "lemma_fallback"]
else:
return [word.lemma, representation, "ok"]
2019-02-19 10:29:40 +00:00
def sorted_rows(self, rows):
if self.sort_by < 0 or len(rows) < 2:
return rows
if len(rows[0]) <= self.sort_by:
2019-02-19 13:57:48 +00:00
logging.warning("Cannot sort by column #{}: Not enough columns!".format(len(rows[0])))
2019-02-19 10:29:40 +00:00
return rows
try:
int(rows[0][self.sort_by])
key=lambda row: int(row[self.sort_by])
except ValueError:
key=lambda row: row[self.sort_by].lower()
2019-02-19 12:56:32 +00:00
return sorted(rows, key=key, reverse=self.sort_order)
2019-02-06 14:29:19 +00:00
2019-02-07 09:19:36 +00:00
def write_header(self, file_handler):
2019-02-06 14:29:19 +00:00
file_handler.write(", ".join(self.header()) + "\n")
def write_out_worker(self, file_handler, structure_id, components, colocation_ids):
2019-02-19 10:29:40 +00:00
rows = []
for cid, m, freq, rprsnt in colocation_ids.get_matches_for(structure_id, not self.all):
2019-02-07 09:19:36 +00:00
to_write = []
2019-05-12 22:26:00 +00:00
representation = ""
2019-02-06 14:29:19 +00:00
for idx, _comp in enumerate(components):
2019-02-07 09:19:36 +00:00
idx = str(idx + 1)
word = m[idx] if idx in m else None
print(rprsnt)
rep = rprsnt[idx] if idx in rprsnt else None
to_write.extend(self.from_word(word, rep))
representation += " " + to_write[-2]
2019-02-06 14:29:19 +00:00
2019-02-07 09:19:36 +00:00
# make them equal size
to_write.extend([""] * (MAX_NUM_COMPONENTS * self.length() - len(to_write)))
to_write = [structure_id] + to_write + [cid]
2019-02-06 14:29:19 +00:00
if not self.all:
2019-05-12 22:26:00 +00:00
representation = re.sub(' +', ' ', representation)
to_write.append(representation.strip())
to_write.append(str(freq))
2019-02-06 14:29:19 +00:00
2019-02-19 10:29:40 +00:00
rows.append(to_write)
2019-02-06 14:29:19 +00:00
2019-02-19 13:57:48 +00:00
if len(rows) > 0:
rows = self.sorted_rows(rows)
file_handler.write("\n".join([", ".join(row) for row in rows]) + "\n")
file_handler.flush()
2019-02-06 14:29:19 +00:00
def write_out(self, structures, colocation_ids):
2019-02-07 09:19:36 +00:00
def fp_close(fp_):
if fp_ != sys.stdout:
fp_.close()
def fp_open(snum=None):
if self.output_file is None:
return sys.stdout
elif snum is None:
return open(self.output_file, "w")
else:
return open("{}.{}".format(self.output_file, snum), "w")
if not self.multiple_output:
fp = fp_open()
self.write_header(fp)
for s in structures:
if self.multiple_output:
fp=fp_open(s.id)
self.write_header(fp)
self.write_out_worker(fp, s.id, s.components, colocation_ids)
2019-02-07 09:19:36 +00:00
if self.multiple_output:
fp_close(fp)
if not self.multiple_output:
fp_close(fp)
2018-10-29 10:29:51 +00:00
class StructureMatch:
def __init__(self, match_id, structure_id):
self.match_id = match_id
self.structure_id = structure_id
self.matches = []
self.representations = {}
def append(self, match):
self.matches.append(match)
def __len__(self):
return len(self.matches)
2019-02-06 14:29:37 +00:00
2019-02-06 14:29:03 +00:00
class ColocationIds:
def __init__(self):
self.data = {}
self.min_frequency = args.min_freq
2019-02-06 14:29:03 +00:00
def _add_match(self, key, sid, match):
if key not in self.data:
self.data[key] = StructureMatch(str(len(self.data) + 1), sid)
self.data[key].append(match)
2019-02-06 14:29:03 +00:00
def get(self, key, n):
return self.data[key][n]
def add_matches(self, matches):
for sid, nms in matches.items():
for nm in nms:
self._add_match(nm[1], sid, nm[0])
def get_matches_for(self, structure_id, group):
for _cid_tup, sm in self.data.items():
if sm.structure_id != structure_id:
continue
2019-02-06 14:29:37 +00:00
for words in sm.matches:
yield (sm.match_id, words, len(sm), sm.representations)
if group:
break
def set_representations(self, structures, word_renderer):
components_dict = {structure.id: structure for structure in structures}
for _1, sm in self.data.items():
ComponentRendition.set_representations(sm, components_dict[sm.structure_id], word_renderer)
def match_file(words, structures):
matches = {s.id: [] for s in structures}
2019-01-19 21:42:51 +00:00
for idx, s in enumerate(structures):
2019-02-06 14:26:09 +00:00
logging.info("{}/{}: {:7s}".format(idx, len(structures), s.id))
2019-01-19 21:42:51 +00:00
for w in words:
mhere = s.match(w)
logging.debug(" GOT: {}".format(len(mhere)))
for match in mhere:
2019-02-06 14:29:37 +00:00
colocation_id = [(idx, w.lemma) for idx, w in match.items()]
colocation_id = [s.id] + list(sorted(colocation_id, key=lambda x:x[0]))
colocation_id = tuple(colocation_id)
2018-10-29 10:29:51 +00:00
matches[s.id].append((match, colocation_id))
return matches
def main(input_file, structures_file, args):
structures = build_structures(structures_file)
for s in structures:
logging.debug(str(s))
colocation_ids = ColocationIds()
word_renderer = WordMsdRenderer()
if args.parallel:
num_parallel = int(args.parallel)
# make temporary directory to hold temporary files
with tempfile.TemporaryDirectory() as tmpdirname:
cmd = sys.argv
for inpt in args.input:
if inpt in cmd:
cmd.remove(inpt)
# remove "--parallel X"
pidx = cmd.index('--parallel')
del cmd[pidx]
del cmd[pidx]
def func(n):
cmdn = [sys.executable] + cmd + [args.input[n], "--match-to-file", "{}/{}.p".format(tmpdirname, n)]
subprocess.check_call(cmdn)
return n
# use ThreadPoolExecuter to run subprocesses in parallel using py threads
with concurrent.futures.ThreadPoolExecutor(max_workers=num_parallel) as executor:
# fancy interface to wait for threads to finish
for id_input in executor.map(func, [i for i, _ in enumerate(args.input)]):
with open("{}/{}.p".format(tmpdirname, id_input), "rb") as fp:
words, matches = pickle.load(fp)
colocation_ids.add_matches(matches)
word_renderer.add_words(words)
else:
for words in load_files(args):
matches = match_file(words, structures)
# just save to temporary file, used for children of a parallel process
# MUST NOT have more than one file
if args.match_to_file is not None:
with open(args.match_to_file, "wb") as fp:
pickle.dump((words, matches), fp)
return
else:
colocation_ids.add_matches(matches)
word_renderer.add_words(words)
# get word renders for lemma/msd
word_renderer.generate_renders()
# figure out representations!
colocation_ids.set_representations(structures, word_renderer)
if args.all:
Writer.make_all_writer(args).write_out(structures, colocation_ids)
Writer.make_output_writer(args).write_out(structures, colocation_ids)
2019-02-06 14:26:09 +00:00
logging.debug([(k, len(v)) for k, v in matches.items()])
logging.debug(sum(len(v) for _, v in matches.items()))
2018-10-29 10:29:51 +00:00
2019-01-19 21:42:51 +00:00
if __name__ == '__main__':
2019-02-06 14:28:39 +00:00
parser = argparse.ArgumentParser(description='Extract structures from a parsed corpus.')
parser.add_argument('structures', help='Structures definitions in xml file')
2019-02-09 12:25:26 +00:00
parser.add_argument('input', help='input xml file in `ssj500k form`, can list more than one', nargs='+')
2019-02-06 14:28:39 +00:00
parser.add_argument('--output', help='Output file (if none given, then output to stdout)')
parser.add_argument('--all', help='Additional output file, writes more data')
2019-02-06 14:28:39 +00:00
2019-02-12 10:58:04 +00:00
parser.add_argument('--no-msd-translate', help='MSDs are translated from slovene to english by default', action='store_true')
parser.add_argument('--skip-id-check', help='Skips checks for ids of <w> and <pc>, if they are in correct format', action='store_true')
parser.add_argument('--min_freq', help='Minimal frequency in output', type=int, default=0, const=1, nargs='?')
2019-02-19 13:57:48 +00:00
parser.add_argument('--verbose', help='Enable verbose output to stderr', choices=["warning", "info", "debug"], default="info", const="info", nargs='?')
parser.add_argument('--count-files', help="Count files: more verbose output", action='store_true')
2019-02-07 09:19:36 +00:00
parser.add_argument('--multiple-output', help='Generate one output for each syntactic structure', action='store_true')
2019-02-19 12:56:32 +00:00
2019-02-19 10:29:40 +00:00
parser.add_argument('--sort-by', help="Sort by a this column (index)", type=int, default=-1)
2019-02-19 12:56:32 +00:00
parser.add_argument('--sort-reversed', help="Sort in reversed ored", action='store_true')
2019-02-06 14:28:39 +00:00
parser.add_argument('--pc-tag', help='Tag for separators, usually pc or c', default="pc")
parser.add_argument('--parallel', help='Run in multiple processes, should speed things up')
parser.add_argument('--match-to-file', help='Do not use!')
2019-02-06 14:28:39 +00:00
args = parser.parse_args()
logging.basicConfig(stream=sys.stderr, level=args.verbose.upper())
2019-01-19 21:42:51 +00:00
2019-02-09 12:40:57 +00:00
start = time.time()
2019-02-06 14:29:37 +00:00
main(args.input, args.structures, args)
2019-02-09 12:40:57 +00:00
logging.info("TIME: {}".format(time.time() - start))