Compare commits

..

1 Commits

14 changed files with 179 additions and 281 deletions

View File

@ -2,7 +2,6 @@ import argparse
import os import os
import sys import sys
import tqdm import tqdm
import logging
good_lemmas = ["absurd", "absurdnost", "akuten", "akutno", "alkohol", "alkoholen", "aluminijast", "ananas", "aplikacija", "aplikativen", "aranžma", "arbiter", "armada", "avtomatičen", "avtomatiziran", "babica", "bajen", "bajka", "bakren", "bambusov", "barvan", "barvanje", "baseballski", "bazar", "bazičen", "belina", "bezgov", "bičati", "bife", "bilka", "biomasa", "biotop", "birma", "bivol", "blago", "blaženost", "bliskavica", "bobnič", "bolha", "bolnišnica", "bor", "borov", "borovničev", "brati", "briljant", "briti", "brusiti", "bučanje", "cikličen", "civilizacija", "dopust", "drama", "drezati", "duda", "dvorezen", "embalaža", "faks", "farsa", "glasno", "informiranje", "interier", "intima", "intimno", "investirati", "ironično", "istovetiti", "izvožen", "jagoda", "jeklar", "jezik", "karbon", "kitara", "kodrast", "molče", "mučiti", "novinarski", "obala", "občevati", "okrasiti", "pajčevina", "panoga", "prevajanje", "prevajati", "previti", "prihraniti", "priloga", "prisluškovati", "sopara"] good_lemmas = ["absurd", "absurdnost", "akuten", "akutno", "alkohol", "alkoholen", "aluminijast", "ananas", "aplikacija", "aplikativen", "aranžma", "arbiter", "armada", "avtomatičen", "avtomatiziran", "babica", "bajen", "bajka", "bakren", "bambusov", "barvan", "barvanje", "baseballski", "bazar", "bazičen", "belina", "bezgov", "bičati", "bife", "bilka", "biomasa", "biotop", "birma", "bivol", "blago", "blaženost", "bliskavica", "bobnič", "bolha", "bolnišnica", "bor", "borov", "borovničev", "brati", "briljant", "briti", "brusiti", "bučanje", "cikličen", "civilizacija", "dopust", "drama", "drezati", "duda", "dvorezen", "embalaža", "faks", "farsa", "glasno", "informiranje", "interier", "intima", "intimno", "investirati", "ironično", "istovetiti", "izvožen", "jagoda", "jeklar", "jezik", "karbon", "kitara", "kodrast", "molče", "mučiti", "novinarski", "obala", "občevati", "okrasiti", "pajčevina", "panoga", "prevajanje", "prevajati", "previti", "prihraniti", "priloga", "prisluškovati", "sopara"]
@ -16,14 +15,14 @@ def main(args):
for fidx, filename in enumerate(filepaths): for fidx, filename in enumerate(filepaths):
with open(filename, 'r') as fp: with open(filename, 'r') as fp:
logging.info("loading next...") print("loading next...", end="", flush=True)
line = fp.readline() line = fp.readline()
lemma_rows = [idx for idx, cell in enumerate(line.split(",")) if "_Lemma" in cell] lemma_rows = [idx for idx, cell in enumerate(line.split(",")) if "_Lemma" in cell]
file_lines = fp.read().split("\n") file_lines = fp.read().split("\n")
for lidx, good_lemma in enumerate(good_lemmas): for lidx, good_lemma in enumerate(good_lemmas):
spaces = " " * 20 if lidx == 0 else "" spaces = " " * 20 if lidx == 0 else ""
logging.info("\r{}.{} / {}.{}{}".format(fidx, lidx, N2, N1, spaces)) print("\r{}.{} / {}.{}{}".format(fidx, lidx, N2, N1, spaces), end="", flush=True)
for line in file_lines: for line in file_lines:
if good_lemma not in line: if good_lemma not in line:

View File

@ -120,26 +120,6 @@ CODES_TRANSLATION = {
} }
} }
CODES_UD = {
"ADJ",
"ADP",
"PUNCT",
"ADV",
"AUX",
"SYM",
"INTJ",
"CCONJ",
"X",
"NOUN",
"DET",
"PROPN",
"NUM",
"VERB",
"PART",
"PRON",
"SCONJ"
}
CODES = { CODES = {
"Noun": "N", "Noun": "N",
"Verb": "V", "Verb": "V",
@ -231,18 +211,3 @@ TAGSET = {
"Y": [], "Y": [],
"X": ['type'] "X": ['type']
} }
PPB_DEPRELS = [
"advmod",
"amod",
"compound",
"conj",
"fixed",
"flat",
"iobj",
"nmod",
"nsubj",
"nummod",
"obj",
"obl"
]

View File

@ -20,7 +20,7 @@ class ComponentType(Enum):
class Component: class Component:
def __init__(self, info, system_type): def __init__(self, info):
idx = info['cid'] idx = info['cid']
name = info['label'] if 'label' in info else None name = info['label'] if 'label' in info else None
typ = ComponentType.Core if info['type'] == "core" else ComponentType.Other typ = ComponentType.Core if info['type'] == "core" else ComponentType.Other
@ -39,7 +39,7 @@ class Component:
self.status = status self.status = status
self.name = name self.name = name
self.idx = idx self.idx = idx
self.restrictions = RestrictionGroup([None], system_type) if 'restriction' in info else [] self.restrictions = RestrictionGroup([None]) if 'restriction' in info else []
self.next_element = [] self.next_element = []
self.representation = [] self.representation = []
self.selection = {} self.selection = {}
@ -50,17 +50,17 @@ class Component:
def add_next(self, next_component, link_label, order): def add_next(self, next_component, link_label, order):
self.next_element.append((next_component, link_label, Order.new(order))) self.next_element.append((next_component, link_label, Order.new(order)))
def set_restriction(self, restrictions_tags, system_type): def set_restriction(self, restrictions_tags):
if not restrictions_tags: if not restrictions_tags:
self.restrictions = RestrictionGroup([None], system_type) self.restrictions = RestrictionGroup([None])
# if first element is of type restriction all following are as well # if first element is of type restriction all following are as well
elif restrictions_tags[0].tag == "restriction": elif restrictions_tags[0].tag == "restriction":
self.restrictions = RestrictionGroup(restrictions_tags, system_type) self.restrictions = RestrictionGroup(restrictions_tags)
# combinations of 'and' and 'or' restrictions are currently not implemented # combinations of 'and' and 'or' restrictions are currently not implemented
elif restrictions_tags[0].tag == "restriction_or": elif restrictions_tags[0].tag == "restriction_or":
self.restrictions = RestrictionGroup(restrictions_tags[0], system_type, group_type='or') self.restrictions = RestrictionGroup(restrictions_tags[0], group_type='or')
else: else:
raise RuntimeError("Unreachable") raise RuntimeError("Unreachable")
@ -72,19 +72,19 @@ class Component:
crend.add_feature(feature.attrib) crend.add_feature(feature.attrib)
self.representation.append(crend) self.representation.append(crend)
def find_next(self, deps, comps, restrs, reprs, system_type): def find_next(self, deps, comps, restrs, reprs):
to_ret = [] to_ret = []
for d in deps: for d in deps:
if d[0] == self.idx: if d[0] == self.idx:
_, idx, dep_label, order = d _, idx, dep_label, order = d
next_component = Component(comps[idx], system_type) next_component = Component(comps[idx])
next_component.set_restriction(restrs[idx], system_type) next_component.set_restriction(restrs[idx])
next_component.set_representation(reprs[idx]) next_component.set_representation(reprs[idx])
to_ret.append(next_component) to_ret.append(next_component)
self.add_next(next_component, dep_label, order) self.add_next(next_component, dep_label, order)
others = next_component.find_next(deps, comps, restrs, reprs, system_type) others = next_component.find_next(deps, comps, restrs, reprs)
to_ret.extend(others) to_ret.extend(others)
return to_ret return to_ret

View File

@ -1,6 +1,5 @@
from math import log2 from math import log2
import re import re
import logging
from luscenje_struktur.component import ComponentType from luscenje_struktur.component import ComponentType
@ -166,9 +165,9 @@ class StatsFormatter(Formatter):
new_key = (sidx, idx, '') new_key = (sidx, idx, '')
if new_key in self.colocation_ids.dispersions: if new_key in self.colocation_ids.dispersions:
key = new_key key = new_key
logging.info('Dispersions fixed.') print('Dispersions fixed.')
else: else:
logging.info('Dispersions not fixed.') print('Dispersions not fixed.')
if key in self.colocation_ids.dispersions: if key in self.colocation_ids.dispersions:
distribution = self.colocation_ids.dispersions[key] distribution = self.colocation_ids.dispersions[key]
else: else:

View File

@ -5,7 +5,6 @@ import re
import sys import sys
import gzip import gzip
import pathlib import pathlib
from io import StringIO
from luscenje_struktur.progress_bar import progress from luscenje_struktur.progress_bar import progress
from luscenje_struktur.word import Word from luscenje_struktur.word import Word
@ -30,13 +29,13 @@ def load_files(args, database, w_collection=None, input_corpus=None):
database.init("CREATE TABLE Files ( filename varchar(2048) )") database.init("CREATE TABLE Files ( filename varchar(2048) )")
for idx, fname in enumerate(filenames): for idx, fname in enumerate(filenames):
logging.info("FILE " + fname + "{}/{}".format(idx, len(filenames))) print("FILE ", fname, "{}/{}".format(idx, len(filenames)))
extension = pathlib.Path(fname).suffix extension = pathlib.Path(fname).suffix
# check if file with the same name already loaded... # check if file with the same name already loaded...
loaded = database.execute("SELECT * FROM Files WHERE filename=?", (fname,)).fetchone() loaded = database.execute("SELECT * FROM Files WHERE filename=?", (fname,)).fetchone()
if loaded is not None: if loaded is not None:
logging.info("ALREADY LOADED") print("ALREADY LOADED")
continue continue
if extension == ".xml": if extension == ".xml":
@ -54,11 +53,6 @@ def load_files(args, database, w_collection=None, input_corpus=None):
sentences = load_csv_valency(fname, True, w_collection) sentences = load_csv_valency(fname, True, w_collection)
for sentence in sentences: for sentence in sentences:
yield sentence yield sentence
elif extension == ".conllu":
if input_corpus is None:
yield load_conllu(fname)
else:
raise Exception('conllu with input_corpus is not supported!')
else: else:
if input_corpus is None: if input_corpus is None:
yield load_csv(fname, False) yield load_csv(fname, False)
@ -83,59 +77,6 @@ def lines_csv(filename):
yield line yield line
def load_conllu(filename):
import conllu
result = []
bad_sentence = False
words = {}
links = []
def sentence_end(bad_sentence, sent_id):
if bad_sentence:
return
for lfrom, ldest, ana in links:
if lfrom not in words or ldest not in words:
logging.warning("Bad link in sentence: " + sent_id)
continue
words[lfrom].add_link(ana, words[ldest])
result.extend(words.values())
with open(filename, 'r') as f:
data = f.read()
# conlls = conllu.parse_incr(StringIO(data))
# for sent in conlls:
# try:
# for word in sent:
# full_id = "{}.{}".format(sent.metadata['sent_id'], str(word['id']))
# words[str(word['id'])] = Word(word['id'], word['xpos'], full_id, word['form'], False)
# except:
# logging.error(f"Error while reading file {filename} in sentence {sent.metadata['sent_id']}. Check if required data is available!")
conlls = conllu.parse_incr(StringIO(data))
# build dep parse
for sent in conlls:
try:
# adding fake word
words['0'] = Word('', '', '0', '', False, True)
for word in sent:
if type(word['id']) == tuple:
continue
full_id = "{}.{}".format(sent.metadata['sent_id'], str(word['id']))
words[str(word['id'])] = Word(word['lemma'], word['upos'], full_id, word['form'], False)
links.append((str(word['head']), str(word['id']), word['deprel']))
sentence_end(False, sent.metadata['sent_id'])
links = []
words = {}
except:
links = []
words = {}
logging.error(f"Error while reading file {filename} in sentence {sent.metadata['sent_id']}. Check if required data is available!")
return result
def load_csv(filename, compressed): def load_csv(filename, compressed):
result = [] result = []
bad_sentence = False bad_sentence = False
@ -324,7 +265,6 @@ def file_sentence_generator(et, args):
# strange errors, just skip... # strange errors, just skip...
pass pass
a = list(words.values())
return list(words.values()) return list(words.values())

View File

@ -2,7 +2,6 @@ import gc
from collections import defaultdict from collections import defaultdict
from ast import literal_eval from ast import literal_eval
from time import time from time import time
import logging
from luscenje_struktur.match import StructureMatch from luscenje_struktur.match import StructureMatch
from luscenje_struktur.representation_assigner import RepresentationAssigner from luscenje_struktur.representation_assigner import RepresentationAssigner
@ -105,7 +104,7 @@ class MatchStore:
def set_representations(self, word_renderer, structures, sloleks_db=None): def set_representations(self, word_renderer, structures, sloleks_db=None):
step_name = 'representation' step_name = 'representation'
if self.db.is_step_done(step_name): if self.db.is_step_done(step_name):
logging.info("Representation step already done, skipping") print("Representation step already done, skipping")
return return
num_inserts = 1000 num_inserts = 1000
@ -149,7 +148,7 @@ class MatchStore:
dispersions[(str(structure_id), component_id, lemma)] += 1 dispersions[(str(structure_id), component_id, lemma)] += 1
self.dispersions = dict(dispersions) self.dispersions = dict(dispersions)
logging.info("Storing dispersions...") print("Storing dispersions...")
self.store_dispersions() self.store_dispersions()
self.db.step_is_done(step_name) self.db.step_is_done(step_name)

View File

@ -1,5 +1,4 @@
import time import time
import logging
try: try:
from tqdm import tqdm from tqdm import tqdm
@ -22,10 +21,10 @@ class Progress:
for n, el in enumerate(iterable): for n, el in enumerate(iterable):
now = time.time() now = time.time()
if now - last_report > REPORT_ON: if now - last_report > REPORT_ON:
logging.info("\r{}: {}/{}".format(description, n, total), end="") print("\r{}: {}/{}".format(description, n, total), end="")
last_report = now last_report = now
yield el yield el
logging.info(" -> {}".format(time.time() - start_time)) print(" -> {}".format(time.time() - start_time))
else: else:
yield from tqdm(iterable, desc=description, total=total) yield from tqdm(iterable, desc=description, total=total)

View File

@ -1,7 +1,7 @@
import re import re
from enum import Enum from enum import Enum
from luscenje_struktur.codes_tagset import CODES, TAGSET, CODES_UD from luscenje_struktur.codes_tagset import CODES, TAGSET
class RestrictionType(Enum): class RestrictionType(Enum):
@ -9,21 +9,6 @@ class RestrictionType(Enum):
Lexis = 1 Lexis = 1
MatchAll = 2 MatchAll = 2
Space = 3 Space = 3
MorphologyUD = 4
def determine_ppb_ud(rgxs):
if len(rgxs) != 1:
return 0
rgx = rgxs[0]
if rgx in ("ADJ", "NOUN", "ADV"):
return 0
elif rgx == "AUX":
return 3
elif rgx == "VERB":
return 2
else:
return 4
def determine_ppb(rgxs): def determine_ppb(rgxs):
@ -127,78 +112,6 @@ class MorphologyRegex:
return False return False
class MorphologyUDRegex:
def __init__(self, restriction):
# self.min_msd_length = 1
restr_dict = {}
for feature in restriction:
feature_dict = dict(feature.items())
match_type = True
# if "filter" in feature_dict:
# assert feature_dict['filter'] == "negative"
# match_type = False
# del feature_dict['filter']
assert len(feature_dict) == 1
key, value = next(iter(feature_dict.items()))
restr_dict[key] = (value, match_type)
assert 'POS' in restr_dict
# handle multiple word types
if '|' in restr_dict['POS'][0]:
categories = restr_dict['POS'][0].split('|')
else:
categories = [restr_dict['POS'][0]]
self.rgxs = []
self.re_objects = []
self.min_msd_lengths = []
del restr_dict['POS']
for category in categories:
min_msd_length = 1
category = category.upper()
assert category in CODES_UD
cat_code = category
rgx = category
# for attribute, (value, typ) in restr_dict.items():
# if attribute.lower() not in TAGSET[cat_code]:
# continue
# index = TAGSET[cat_code].index(attribute.lower())
# assert index >= 0
#
# if '|' in value:
# match = "".join(CODES[val] for val in value.split('|'))
# else:
# match = CODES[value]
#
# match = "[{}{}]".format("" if typ else "^", match)
# rgx[index + 1] = match
#
# if typ:
# min_msd_length = max(index + 1, min_msd_length)
# strip rgx
# for i in reversed(range(len(rgx))):
# if rgx[i] == '.':
# rgx = rgx[:-1]
# else:
# break
# self.re_objects.append([re.compile(r) for r in rgx])
self.rgxs.append(rgx)
self.min_msd_lengths.append(min_msd_length)
def __call__(self, text):
assert len(self.rgxs) == 1
return self.rgxs[0] == text
class LexisRegex: class LexisRegex:
def __init__(self, restriction): def __init__(self, restriction):
restr_dict = {} restr_dict = {}
@ -237,11 +150,8 @@ class SpaceRegex:
return match return match
class Restriction: class Restriction:
def __init__(self, restriction_tag, system_type='JOS'): def __init__(self, restriction_tag):
self.ppb = 4 # polnopomenska beseda (0-4) self.ppb = 4 # polnopomenska beseda (0-4)
if restriction_tag is None: if restriction_tag is None:
@ -252,15 +162,9 @@ class Restriction:
restriction_type = restriction_tag.get('type') restriction_type = restriction_tag.get('type')
if restriction_type == "morphology": if restriction_type == "morphology":
if system_type == 'JOS':
self.type = RestrictionType.Morphology self.type = RestrictionType.Morphology
self.matcher = MorphologyRegex(list(restriction_tag)) self.matcher = MorphologyRegex(list(restriction_tag))
self.ppb = determine_ppb(self.matcher.rgxs) self.ppb = determine_ppb(self.matcher.rgxs)
# UD system is handled based on deprel
elif system_type == 'UD':
self.type = RestrictionType.MorphologyUD
self.matcher = MorphologyUDRegex(list(restriction_tag))
# self.ppb = determine_ppb_ud(self.matcher.rgxs)
elif restriction_type == "lexis": elif restriction_type == "lexis":
self.type = RestrictionType.Lexis self.type = RestrictionType.Lexis
@ -273,7 +177,7 @@ class Restriction:
raise NotImplementedError() raise NotImplementedError()
def match(self, word): def match(self, word):
if self.type == RestrictionType.Morphology or self.type == RestrictionType.MorphologyUD: if self.type == RestrictionType.Morphology:
match_to = word.msd match_to = word.msd
elif self.type == RestrictionType.Lexis: elif self.type == RestrictionType.Lexis:
match_to = word.lemma match_to = word.lemma

View File

@ -1,8 +1,8 @@
from luscenje_struktur.restriction import Restriction from luscenje_struktur.restriction import Restriction
class RestrictionGroup: class RestrictionGroup:
def __init__(self, restrictions_tag, system_type, group_type='and'): def __init__(self, restrictions_tag, group_type='and'):
self.restrictions = [Restriction(el, system_type) for el in restrictions_tag] self.restrictions = [Restriction(el) for el in restrictions_tag]
self.group_type = group_type self.group_type = group_type
def __iter__(self): def __iter__(self):

View File

@ -1,18 +1,18 @@
import gc import gc
from psycopg2cffi import compat
compat.register()
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, aliased
from sqlalchemy import create_engine
from luscenje_struktur.codes_tagset import TAGSET, CODES, CODES_TRANSLATION, POSSIBLE_WORD_FORM_FEATURE_VALUES from luscenje_struktur.codes_tagset import TAGSET, CODES, CODES_TRANSLATION, POSSIBLE_WORD_FORM_FEATURE_VALUES
class SloleksDatabase: class SloleksDatabase:
def __init__(self, db, load_sloleks): def __init__(self, db, load_sloleks):
from psycopg2cffi import compat global Lexeme, LexemeFeature, SyntacticStructure, StructureComponent, Feature, LexicalUnitLexeme, LexicalUnit, LexicalUnitType, Category, Sense, Measure, LexicalUnitMeasure, Corpus, Definition, WordForm, WordFormFeature, FormRepresentation
compat.register()
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
global Lexeme, LexemeFeature, SyntacticStructure, StructureComponent, Feature, LexicalUnitLexeme, LexicalUnit, LexicalUnitType, Category, Sense, Measure, LexicalUnitMeasure, Corpus, Definition, WordForm, WordFormFeature, FormRepresentation, FormEncoding
[db_user, db_password, db_database, db_host] = db.split(':') [db_user, db_password, db_database, db_host] = db.split(':')
engine = create_engine('postgresql://' + db_user + ':' + db_password + '@' + db_host + '/' + db_database, engine = create_engine('postgresql://' + db_user + ':' + db_password + '@' + db_host + '/' + db_database,
@ -71,25 +71,17 @@ class SloleksDatabase:
class FormRepresentation(Base): class FormRepresentation(Base):
__table__ = Base.metadata.tables['jedro_formrepresentation'] __table__ = Base.metadata.tables['jedro_formrepresentation']
class FormEncoding(Base):
__table__ = Base.metadata.tables['jedro_formencoding']
self.session = Session(engine) self.session = Session(engine)
self.load_sloleks = load_sloleks self.load_sloleks = load_sloleks
if self.load_sloleks: if self.load_sloleks:
self.init_load_sloleks() self.init_load_sloleks()
# def init_load_sloleks2(self):
def init_load_sloleks(self): def init_load_sloleks(self):
query_word_form_features = self.session.query(WordFormFeature.word_form_id, WordFormFeature.value) query_word_form_features = self.session.query(WordFormFeature.word_form_id, WordFormFeature.value)
word_form_features = query_word_form_features.all() word_form_features = query_word_form_features.all()
query_form_representations = self.session.query(FormRepresentation.word_form_id) query_form_representations = self.session.query(FormRepresentation.word_form_id, FormRepresentation.form)
form_representations = query_form_representations.all() form_representations = query_form_representations.all()
query_form_encoding = self.session.query(FormEncoding.form_representation_id, FormEncoding.text)
form_encodings = query_form_encoding.all()
query_word_forms = self.session.query(WordForm.id, WordForm.lexeme_id) query_word_forms = self.session.query(WordForm.id, WordForm.lexeme_id)
word_forms = query_word_forms.all() word_forms = query_word_forms.all()
query_lexemes = self.session.query(Lexeme.id, Lexeme.lemma) query_lexemes = self.session.query(Lexeme.id, Lexeme.lemma)
@ -109,10 +101,7 @@ class SloleksDatabase:
self.word_form_features[word_form_feature.word_form_id] = set() self.word_form_features[word_form_feature.word_form_id] = set()
self.word_form_features[word_form_feature.word_form_id].add(word_form_feature.value) self.word_form_features[word_form_feature.word_form_id].add(word_form_feature.value)
form_encodings_dict = {form_encoding.form_representation_id: form_encoding.text for form_encoding self.form_representations = {form_representation.word_form_id: form_representation.form for form_representation
in form_encodings}
self.form_representations = {form_representation.word_form_id: form_encodings_dict[form_representation.word_form_id] for form_representation
in form_representations} in form_representations}
self.word_forms = {} self.word_forms = {}
@ -164,7 +153,6 @@ class SloleksDatabase:
def get_word_form(self, lemma, msd, data, align_msd=False): def get_word_form(self, lemma, msd, data, align_msd=False):
# modify msd as required # modify msd as required
from sqlalchemy.orm import aliased
msd = list(msd) msd = list(msd)
if 'msd' in data: if 'msd' in data:
for key, value in data['msd'].items(): for key, value in data['msd'].items():
@ -205,14 +193,9 @@ class SloleksDatabase:
return ''.join(msd), lemma, form_representations return ''.join(msd), lemma, form_representations
else: else:
wfs = [aliased(WordFormFeature) for _ in decypher_msd] wfs = [aliased(WordFormFeature) for _ in decypher_msd]
# self.session.query(FormEncoding.form_representation_id, FormEncoding.text) query_preposition = self.session.query(FormRepresentation.form) \
query_preposition = self.session.query(FormEncoding.text) \
.join(FormRepresentation, FormRepresentation.id == FormEncoding.form_representation_id) \
.join(WordForm, WordForm.id == FormRepresentation.word_form_id) \ .join(WordForm, WordForm.id == FormRepresentation.word_form_id) \
.join(Lexeme, Lexeme.id == WordForm.lexeme_id) .join(Lexeme, Lexeme.id == WordForm.lexeme_id)
# query_preposition = self.session.query(FormRepresentation.form) \
# .join(WordForm, WordForm.id == FormRepresentation.word_form_id) \
# .join(Lexeme, Lexeme.id == WordForm.lexeme_id)
for wf in wfs: for wf in wfs:
query_preposition = query_preposition.join(wf, wf.word_form_id == WordForm.id) query_preposition = query_preposition.join(wf, wf.word_form_id == WordForm.id)

View File

@ -2,7 +2,6 @@ from xml.etree import ElementTree
import logging import logging
import pickle import pickle
from luscenje_struktur.codes_tagset import PPB_DEPRELS
from luscenje_struktur.component import Component, ComponentType from luscenje_struktur.component import Component, ComponentType
from luscenje_struktur.lemma_features import get_lemma_features from luscenje_struktur.lemma_features import get_lemma_features
@ -24,9 +23,7 @@ class SyntacticStructure:
assert len(list(xml)) == 1 assert len(list(xml)) == 1
system = next(iter(xml)) system = next(iter(xml))
assert system.get('type') == 'JOS' or system.get('type') == 'UD' assert system.get('type') == 'JOS'
system_type = system.get('type')
components, dependencies, definitions = list(system) components, dependencies, definitions = list(system)
deps = [(dep.get('from'), dep.get('to'), dep.get('label'), dep.get('order')) deps = [(dep.get('from'), dep.get('to'), dep.get('label'), dep.get('order'))
@ -49,8 +46,8 @@ class SyntacticStructure:
raise NotImplementedError("Unknown definition: {} in structure {}" raise NotImplementedError("Unknown definition: {} in structure {}"
.format(el.tag, st.id)) .format(el.tag, st.id))
fake_root_component = Component({'cid': '#', 'type': 'other', 'restriction': None}, system_type) fake_root_component = Component({'cid': '#', 'type': 'other', 'restriction': None})
fake_root_component_children = fake_root_component.find_next(deps, comps, restrs, forms, system_type) fake_root_component_children = fake_root_component.find_next(deps, comps, restrs, forms)
# all dep with value modra point to artificial root - fake_root_component # all dep with value modra point to artificial root - fake_root_component
if any([dep[2] == 'modra' for dep in deps]): if any([dep[2] == 'modra' for dep in deps]):
st.fake_root_included = True st.fake_root_included = True
@ -59,28 +56,9 @@ class SyntacticStructure:
st.components = fake_root_component_children st.components = fake_root_component_children
if not no_stats: if not no_stats:
if system_type == 'JOS':
st.determine_core2w() st.determine_core2w()
elif system_type == 'UD':
st.determine_core2w_ud()
return st return st
def determine_core2w_ud(self):
deprels = {}
for c in self.components:
for next_el in c.next_element:
deprels[next_el[0]] = next_el[1]
ppb_components_num = 0
for c in self.components:
if c.type != ComponentType.Core:
continue
if c in deprels and deprels[c] not in PPB_DEPRELS:
continue
ppb_components_num += 1
c.type = ComponentType.Core2w
assert ppb_components_num == 2, RuntimeError("Cannot determine 2 'jedrna polnopomenska beseda' for", self.id)
def determine_core2w(self): def determine_core2w(self):
ppb_components = [] ppb_components = []
for c in self.components: for c in self.components:

View File

@ -1,5 +1,4 @@
from datetime import timedelta, datetime from datetime import timedelta, datetime
import logging
class TimeInfo: class TimeInfo:
def __init__(self, to_go): def __init__(self, to_go):
@ -15,5 +14,5 @@ class TimeInfo:
seconds = sum(self.times) / len(self.times) seconds = sum(self.times) / len(self.times)
td = timedelta(seconds = int(seconds * self.to_go)) td = timedelta(seconds = int(seconds * self.to_go))
ft = datetime.now() + td ft = datetime.now() + td
logging.info("Going to finish in {}".format(ft.strftime("%d/%m @ %H:%M"))) print("Going to finish in {}".format(ft.strftime("%d/%m @ %H:%M")))

View File

@ -1,7 +1,7 @@
from collections import defaultdict, Counter from collections import defaultdict, Counter
from luscenje_struktur.progress_bar import progress from luscenje_struktur.progress_bar import progress
import logging
class WordStats: class WordStats:
def __init__(self, lemma_features, db): def __init__(self, lemma_features, db):
@ -46,7 +46,7 @@ class WordStats:
def generate_renders(self): def generate_renders(self):
step_name = 'generate_renders' step_name = 'generate_renders'
if self.db.is_step_done(step_name): if self.db.is_step_done(step_name):
logging.info("Skipping GenerateRenders, already complete") print("Skipping GenerateRenders, already complete")
return return
lemmas = [lemma for (lemma, ) in self.db.execute("SELECT DISTINCT lemma FROM UniqWords")] lemmas = [lemma for (lemma, ) in self.db.execute("SELECT DISTINCT lemma FROM UniqWords")]

133
luscenje_struktur/writerpy Normal file
View File

@ -0,0 +1,133 @@
class Writer:
@staticmethod
def other_params(args):
return (args.multiple_output, int(args.sort_by), args.sort_reversed)
@staticmethod
def make_output_writer(args, colocation_ids, word_renderer):
params = Writer.other_params(args)
return Writer(args.out, OutFormatter(colocation_ids, word_renderer), params)
@staticmethod
def make_output_no_stat_writer(args, colocation_ids, word_renderer):
params = Writer.other_params(args)
return Writer(args.out_no_stat, OutNoStatFormatter(colocation_ids, word_renderer), params)
@staticmethod
def make_all_writer(args, colocation_ids, word_renderer):
return Writer(args.all, AllFormatter(colocation_ids, word_renderer), None)
@staticmethod
def make_stats_writer(args, colocation_ids, word_renderer):
params = Writer.other_params(args)
return Writer(args.stats, StatsFormatter(colocation_ids, word_renderer), params)
def __init__(self, file_out, formatter, params):
if params is None:
self.multiple_output = False
self.sort_by = -1
self.sort_order = None
else:
self.multiple_output = params[0]
self.sort_by = params[1]
self.sort_order = params[2]
self.output_file = file_out
self.formatter = formatter
def header(self):
repeating_cols = self.formatter.header_repeat()
cols = ["C{}_{}".format(i + 1, thd) for i in range(MAX_NUM_COMPONENTS)
for thd in repeating_cols]
cols = ["Structure_ID"] + cols + ["Colocation_ID"]
cols += self.formatter.header_right()
return cols
def sorted_rows(self, rows):
if self.sort_by < 0 or len(rows) < 2:
return rows
if len(rows[0]) <= self.sort_by:
logging.warning("Cannot sort by column #{}: Not enough columns!".format(len(rows[0])))
return rows
try:
int(rows[0][self.sort_by])
def key(row):
return int(row[self.sort_by])
except ValueError:
def key(row):
return row[self.sort_by].lower()
return sorted(rows, key=key, reverse=self.sort_order)
def write_header(self, file_handler):
file_handler.write(", ".join(self.header()) + "\n")
def write_out_worker(self, file_handler, structure, colocation_ids):
rows = []
components = structure.components
for match in colocation_ids.get_matches_for(structure):
self.formatter.new_match(match)
for words in match.matches:
to_write = []
for idx, _comp in enumerate(components):
idx = str(idx + 1)
if idx not in words:
to_write.extend([""] * self.formatter.length())
else:
to_write.extend(self.formatter.content_repeat(words, match.representations, idx, structure.id))
# make them equal size
to_write.extend([""] * (MAX_NUM_COMPONENTS * self.formatter.length() - len(to_write)))
# structure_id and colocation_id
to_write = [structure.id] + to_write + [match.match_id]
# header_right
to_write.extend(self.formatter.content_right(len(match)))
rows.append(to_write)
if self.formatter.group():
break
if rows != []:
rows = self.sorted_rows(rows)
file_handler.write("\n".join([", ".join(row) for row in rows]) + "\n")
file_handler.flush()
def write_out(self, structures, colocation_ids):
if self.output_file is None:
return
def fp_close(fp_):
if fp_ != sys.stdout:
fp_.close()
def fp_open(snum=None):
if snum is None:
return open(self.output_file, "w")
else:
return open("{}.{}".format(self.output_file, snum), "w")
if not self.multiple_output:
fp = fp_open()
self.write_header(fp)
for s in structures:
if self.multiple_output:
fp = fp_open(s.id)
self.write_header(fp)
self.formatter.set_structure(s)
self.write_out_worker(fp, s, colocation_ids)
if self.multiple_output:
fp_close(fp)
if not self.multiple_output:
fp_close(fp)