Adding uncommited changes
This commit is contained in:
parent
d67976c3d9
commit
598ab102b3
|
@ -120,6 +120,26 @@ CODES_TRANSLATION = {
|
|||
}
|
||||
}
|
||||
|
||||
CODES_UD = {
|
||||
"ADJ",
|
||||
"ADP",
|
||||
"PUNCT",
|
||||
"ADV",
|
||||
"AUX",
|
||||
"SYM",
|
||||
"INTJ",
|
||||
"CCONJ",
|
||||
"X",
|
||||
"NOUN",
|
||||
"DET",
|
||||
"PROPN",
|
||||
"NUM",
|
||||
"VERB",
|
||||
"PART",
|
||||
"PRON",
|
||||
"SCONJ"
|
||||
}
|
||||
|
||||
CODES = {
|
||||
"Noun": "N",
|
||||
"Verb": "V",
|
||||
|
@ -211,3 +231,18 @@ TAGSET = {
|
|||
"Y": [],
|
||||
"X": ['type']
|
||||
}
|
||||
|
||||
PPB_DEPRELS = [
|
||||
"advmod",
|
||||
"amod",
|
||||
"compound",
|
||||
"conj",
|
||||
"fixed",
|
||||
"flat",
|
||||
"iobj",
|
||||
"nmod",
|
||||
"nsubj",
|
||||
"nummod",
|
||||
"obj",
|
||||
"obl"
|
||||
]
|
||||
|
|
|
@ -20,7 +20,7 @@ class ComponentType(Enum):
|
|||
|
||||
|
||||
class Component:
|
||||
def __init__(self, info):
|
||||
def __init__(self, info, system_type):
|
||||
idx = info['cid']
|
||||
name = info['label'] if 'label' in info else None
|
||||
typ = ComponentType.Core if info['type'] == "core" else ComponentType.Other
|
||||
|
@ -39,7 +39,7 @@ class Component:
|
|||
self.status = status
|
||||
self.name = name
|
||||
self.idx = idx
|
||||
self.restrictions = RestrictionGroup([None]) if 'restriction' in info else []
|
||||
self.restrictions = RestrictionGroup([None], system_type) if 'restriction' in info else []
|
||||
self.next_element = []
|
||||
self.representation = []
|
||||
self.selection = {}
|
||||
|
@ -50,17 +50,17 @@ class Component:
|
|||
def add_next(self, next_component, link_label, order):
|
||||
self.next_element.append((next_component, link_label, Order.new(order)))
|
||||
|
||||
def set_restriction(self, restrictions_tags):
|
||||
def set_restriction(self, restrictions_tags, system_type):
|
||||
if not restrictions_tags:
|
||||
self.restrictions = RestrictionGroup([None])
|
||||
self.restrictions = RestrictionGroup([None], system_type)
|
||||
|
||||
# if first element is of type restriction all following are as well
|
||||
elif restrictions_tags[0].tag == "restriction":
|
||||
self.restrictions = RestrictionGroup(restrictions_tags)
|
||||
self.restrictions = RestrictionGroup(restrictions_tags, system_type)
|
||||
|
||||
# combinations of 'and' and 'or' restrictions are currently not implemented
|
||||
elif restrictions_tags[0].tag == "restriction_or":
|
||||
self.restrictions = RestrictionGroup(restrictions_tags[0], group_type='or')
|
||||
self.restrictions = RestrictionGroup(restrictions_tags[0], system_type, group_type='or')
|
||||
|
||||
else:
|
||||
raise RuntimeError("Unreachable")
|
||||
|
@ -72,19 +72,19 @@ class Component:
|
|||
crend.add_feature(feature.attrib)
|
||||
self.representation.append(crend)
|
||||
|
||||
def find_next(self, deps, comps, restrs, reprs):
|
||||
def find_next(self, deps, comps, restrs, reprs, system_type):
|
||||
to_ret = []
|
||||
for d in deps:
|
||||
if d[0] == self.idx:
|
||||
_, idx, dep_label, order = d
|
||||
|
||||
next_component = Component(comps[idx])
|
||||
next_component.set_restriction(restrs[idx])
|
||||
next_component = Component(comps[idx], system_type)
|
||||
next_component.set_restriction(restrs[idx], system_type)
|
||||
next_component.set_representation(reprs[idx])
|
||||
to_ret.append(next_component)
|
||||
|
||||
self.add_next(next_component, dep_label, order)
|
||||
others = next_component.find_next(deps, comps, restrs, reprs)
|
||||
others = next_component.find_next(deps, comps, restrs, reprs, system_type)
|
||||
to_ret.extend(others)
|
||||
|
||||
return to_ret
|
||||
|
|
|
@ -5,6 +5,7 @@ import re
|
|||
import sys
|
||||
import gzip
|
||||
import pathlib
|
||||
from io import StringIO
|
||||
|
||||
from luscenje_struktur.progress_bar import progress
|
||||
from luscenje_struktur.word import Word
|
||||
|
@ -53,6 +54,11 @@ def load_files(args, database, w_collection=None, input_corpus=None):
|
|||
sentences = load_csv_valency(fname, True, w_collection)
|
||||
for sentence in sentences:
|
||||
yield sentence
|
||||
elif extension == ".conllu":
|
||||
if input_corpus is None:
|
||||
yield load_conllu(fname)
|
||||
else:
|
||||
raise Exception('conllu with input_corpus is not supported!')
|
||||
else:
|
||||
if input_corpus is None:
|
||||
yield load_csv(fname, False)
|
||||
|
@ -77,6 +83,59 @@ def lines_csv(filename):
|
|||
yield line
|
||||
|
||||
|
||||
def load_conllu(filename):
|
||||
import conllu
|
||||
result = []
|
||||
bad_sentence = False
|
||||
|
||||
words = {}
|
||||
links = []
|
||||
|
||||
def sentence_end(bad_sentence, sent_id):
|
||||
if bad_sentence:
|
||||
return
|
||||
|
||||
for lfrom, ldest, ana in links:
|
||||
if lfrom not in words or ldest not in words:
|
||||
logging.warning("Bad link in sentence: " + sent_id)
|
||||
continue
|
||||
words[lfrom].add_link(ana, words[ldest])
|
||||
result.extend(words.values())
|
||||
|
||||
with open(filename, 'r') as f:
|
||||
data = f.read()
|
||||
# conlls = conllu.parse_incr(StringIO(data))
|
||||
# for sent in conlls:
|
||||
# try:
|
||||
# for word in sent:
|
||||
# full_id = "{}.{}".format(sent.metadata['sent_id'], str(word['id']))
|
||||
# words[str(word['id'])] = Word(word['id'], word['xpos'], full_id, word['form'], False)
|
||||
# except:
|
||||
# logging.error(f"Error while reading file {filename} in sentence {sent.metadata['sent_id']}. Check if required data is available!")
|
||||
|
||||
conlls = conllu.parse_incr(StringIO(data))
|
||||
# build dep parse
|
||||
for sent in conlls:
|
||||
try:
|
||||
# adding fake word
|
||||
words['0'] = Word('', '', '0', '', False, True)
|
||||
for word in sent:
|
||||
if type(word['id']) == tuple:
|
||||
continue
|
||||
full_id = "{}.{}".format(sent.metadata['sent_id'], str(word['id']))
|
||||
words[str(word['id'])] = Word(word['lemma'], word['upos'], full_id, word['form'], False)
|
||||
links.append((str(word['head']), str(word['id']), word['deprel']))
|
||||
sentence_end(False, sent.metadata['sent_id'])
|
||||
links = []
|
||||
words = {}
|
||||
except:
|
||||
links = []
|
||||
words = {}
|
||||
logging.error(f"Error while reading file {filename} in sentence {sent.metadata['sent_id']}. Check if required data is available!")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def load_csv(filename, compressed):
|
||||
result = []
|
||||
bad_sentence = False
|
||||
|
@ -265,6 +324,7 @@ def file_sentence_generator(et, args):
|
|||
# strange errors, just skip...
|
||||
pass
|
||||
|
||||
a = list(words.values())
|
||||
return list(words.values())
|
||||
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import re
|
||||
from enum import Enum
|
||||
|
||||
from luscenje_struktur.codes_tagset import CODES, TAGSET
|
||||
from luscenje_struktur.codes_tagset import CODES, TAGSET, CODES_UD
|
||||
|
||||
|
||||
class RestrictionType(Enum):
|
||||
|
@ -9,6 +9,21 @@ class RestrictionType(Enum):
|
|||
Lexis = 1
|
||||
MatchAll = 2
|
||||
Space = 3
|
||||
MorphologyUD = 4
|
||||
|
||||
|
||||
def determine_ppb_ud(rgxs):
|
||||
if len(rgxs) != 1:
|
||||
return 0
|
||||
rgx = rgxs[0]
|
||||
if rgx in ("ADJ", "NOUN", "ADV"):
|
||||
return 0
|
||||
elif rgx == "AUX":
|
||||
return 3
|
||||
elif rgx == "VERB":
|
||||
return 2
|
||||
else:
|
||||
return 4
|
||||
|
||||
|
||||
def determine_ppb(rgxs):
|
||||
|
@ -112,6 +127,78 @@ class MorphologyRegex:
|
|||
return False
|
||||
|
||||
|
||||
class MorphologyUDRegex:
|
||||
def __init__(self, restriction):
|
||||
# self.min_msd_length = 1
|
||||
|
||||
restr_dict = {}
|
||||
for feature in restriction:
|
||||
feature_dict = dict(feature.items())
|
||||
|
||||
match_type = True
|
||||
# if "filter" in feature_dict:
|
||||
# assert feature_dict['filter'] == "negative"
|
||||
# match_type = False
|
||||
# del feature_dict['filter']
|
||||
|
||||
assert len(feature_dict) == 1
|
||||
key, value = next(iter(feature_dict.items()))
|
||||
restr_dict[key] = (value, match_type)
|
||||
|
||||
assert 'POS' in restr_dict
|
||||
|
||||
# handle multiple word types
|
||||
if '|' in restr_dict['POS'][0]:
|
||||
categories = restr_dict['POS'][0].split('|')
|
||||
else:
|
||||
categories = [restr_dict['POS'][0]]
|
||||
|
||||
self.rgxs = []
|
||||
self.re_objects = []
|
||||
self.min_msd_lengths = []
|
||||
|
||||
del restr_dict['POS']
|
||||
|
||||
for category in categories:
|
||||
min_msd_length = 1
|
||||
category = category.upper()
|
||||
assert category in CODES_UD
|
||||
cat_code = category
|
||||
rgx = category
|
||||
|
||||
# for attribute, (value, typ) in restr_dict.items():
|
||||
# if attribute.lower() not in TAGSET[cat_code]:
|
||||
# continue
|
||||
# index = TAGSET[cat_code].index(attribute.lower())
|
||||
# assert index >= 0
|
||||
#
|
||||
# if '|' in value:
|
||||
# match = "".join(CODES[val] for val in value.split('|'))
|
||||
# else:
|
||||
# match = CODES[value]
|
||||
#
|
||||
# match = "[{}{}]".format("" if typ else "^", match)
|
||||
# rgx[index + 1] = match
|
||||
#
|
||||
# if typ:
|
||||
# min_msd_length = max(index + 1, min_msd_length)
|
||||
|
||||
# strip rgx
|
||||
# for i in reversed(range(len(rgx))):
|
||||
# if rgx[i] == '.':
|
||||
# rgx = rgx[:-1]
|
||||
# else:
|
||||
# break
|
||||
|
||||
# self.re_objects.append([re.compile(r) for r in rgx])
|
||||
self.rgxs.append(rgx)
|
||||
self.min_msd_lengths.append(min_msd_length)
|
||||
|
||||
def __call__(self, text):
|
||||
assert len(self.rgxs) == 1
|
||||
return self.rgxs[0] == text
|
||||
|
||||
|
||||
class LexisRegex:
|
||||
def __init__(self, restriction):
|
||||
restr_dict = {}
|
||||
|
@ -150,8 +237,11 @@ class SpaceRegex:
|
|||
|
||||
return match
|
||||
|
||||
|
||||
|
||||
|
||||
class Restriction:
|
||||
def __init__(self, restriction_tag):
|
||||
def __init__(self, restriction_tag, system_type='JOS'):
|
||||
self.ppb = 4 # polnopomenska beseda (0-4)
|
||||
|
||||
if restriction_tag is None:
|
||||
|
@ -162,9 +252,15 @@ class Restriction:
|
|||
|
||||
restriction_type = restriction_tag.get('type')
|
||||
if restriction_type == "morphology":
|
||||
if system_type == 'JOS':
|
||||
self.type = RestrictionType.Morphology
|
||||
self.matcher = MorphologyRegex(list(restriction_tag))
|
||||
self.ppb = determine_ppb(self.matcher.rgxs)
|
||||
# UD system is handled based on deprel
|
||||
elif system_type == 'UD':
|
||||
self.type = RestrictionType.MorphologyUD
|
||||
self.matcher = MorphologyUDRegex(list(restriction_tag))
|
||||
# self.ppb = determine_ppb_ud(self.matcher.rgxs)
|
||||
|
||||
elif restriction_type == "lexis":
|
||||
self.type = RestrictionType.Lexis
|
||||
|
@ -177,7 +273,7 @@ class Restriction:
|
|||
raise NotImplementedError()
|
||||
|
||||
def match(self, word):
|
||||
if self.type == RestrictionType.Morphology:
|
||||
if self.type == RestrictionType.Morphology or self.type == RestrictionType.MorphologyUD:
|
||||
match_to = word.msd
|
||||
elif self.type == RestrictionType.Lexis:
|
||||
match_to = word.lemma
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
from luscenje_struktur.restriction import Restriction
|
||||
|
||||
class RestrictionGroup:
|
||||
def __init__(self, restrictions_tag, group_type='and'):
|
||||
self.restrictions = [Restriction(el) for el in restrictions_tag]
|
||||
def __init__(self, restrictions_tag, system_type, group_type='and'):
|
||||
self.restrictions = [Restriction(el, system_type) for el in restrictions_tag]
|
||||
self.group_type = group_type
|
||||
|
||||
def __iter__(self):
|
||||
|
|
|
@ -2,6 +2,7 @@ from xml.etree import ElementTree
|
|||
import logging
|
||||
import pickle
|
||||
|
||||
from luscenje_struktur.codes_tagset import PPB_DEPRELS
|
||||
from luscenje_struktur.component import Component, ComponentType
|
||||
from luscenje_struktur.lemma_features import get_lemma_features
|
||||
|
||||
|
@ -23,7 +24,9 @@ class SyntacticStructure:
|
|||
assert len(list(xml)) == 1
|
||||
system = next(iter(xml))
|
||||
|
||||
assert system.get('type') == 'JOS'
|
||||
assert system.get('type') == 'JOS' or system.get('type') == 'UD'
|
||||
system_type = system.get('type')
|
||||
|
||||
components, dependencies, definitions = list(system)
|
||||
|
||||
deps = [(dep.get('from'), dep.get('to'), dep.get('label'), dep.get('order'))
|
||||
|
@ -46,8 +49,8 @@ class SyntacticStructure:
|
|||
raise NotImplementedError("Unknown definition: {} in structure {}"
|
||||
.format(el.tag, st.id))
|
||||
|
||||
fake_root_component = Component({'cid': '#', 'type': 'other', 'restriction': None})
|
||||
fake_root_component_children = fake_root_component.find_next(deps, comps, restrs, forms)
|
||||
fake_root_component = Component({'cid': '#', 'type': 'other', 'restriction': None}, system_type)
|
||||
fake_root_component_children = fake_root_component.find_next(deps, comps, restrs, forms, system_type)
|
||||
# all dep with value modra point to artificial root - fake_root_component
|
||||
if any([dep[2] == 'modra' for dep in deps]):
|
||||
st.fake_root_included = True
|
||||
|
@ -56,9 +59,28 @@ class SyntacticStructure:
|
|||
st.components = fake_root_component_children
|
||||
|
||||
if not no_stats:
|
||||
if system_type == 'JOS':
|
||||
st.determine_core2w()
|
||||
elif system_type == 'UD':
|
||||
st.determine_core2w_ud()
|
||||
return st
|
||||
|
||||
def determine_core2w_ud(self):
|
||||
deprels = {}
|
||||
for c in self.components:
|
||||
for next_el in c.next_element:
|
||||
deprels[next_el[0]] = next_el[1]
|
||||
ppb_components_num = 0
|
||||
for c in self.components:
|
||||
if c.type != ComponentType.Core:
|
||||
continue
|
||||
if c in deprels and deprels[c] not in PPB_DEPRELS:
|
||||
continue
|
||||
ppb_components_num += 1
|
||||
c.type = ComponentType.Core2w
|
||||
|
||||
assert ppb_components_num == 2, RuntimeError("Cannot determine 2 'jedrna polnopomenska beseda' for", self.id)
|
||||
|
||||
def determine_core2w(self):
|
||||
ppb_components = []
|
||||
for c in self.components:
|
||||
|
|
Loading…
Reference in New Issue
Block a user