Compare commits

...

3 Commits

@ -2,6 +2,7 @@ import argparse
import os
import sys
import tqdm
import logging
good_lemmas = ["absurd", "absurdnost", "akuten", "akutno", "alkohol", "alkoholen", "aluminijast", "ananas", "aplikacija", "aplikativen", "aranžma", "arbiter", "armada", "avtomatičen", "avtomatiziran", "babica", "bajen", "bajka", "bakren", "bambusov", "barvan", "barvanje", "baseballski", "bazar", "bazičen", "belina", "bezgov", "bičati", "bife", "bilka", "biomasa", "biotop", "birma", "bivol", "blago", "blaženost", "bliskavica", "bobnič", "bolha", "bolnišnica", "bor", "borov", "borovničev", "brati", "briljant", "briti", "brusiti", "bučanje", "cikličen", "civilizacija", "dopust", "drama", "drezati", "duda", "dvorezen", "embalaža", "faks", "farsa", "glasno", "informiranje", "interier", "intima", "intimno", "investirati", "ironično", "istovetiti", "izvožen", "jagoda", "jeklar", "jezik", "karbon", "kitara", "kodrast", "molče", "mučiti", "novinarski", "obala", "občevati", "okrasiti", "pajčevina", "panoga", "prevajanje", "prevajati", "previti", "prihraniti", "priloga", "prisluškovati", "sopara"]
@ -15,14 +16,14 @@ def main(args):
for fidx, filename in enumerate(filepaths):
with open(filename, 'r') as fp:
print("loading next...", end="", flush=True)
logging.info("loading next...")
line = fp.readline()
lemma_rows = [idx for idx, cell in enumerate(line.split(",")) if "_Lemma" in cell]
file_lines = fp.read().split("\n")
for lidx, good_lemma in enumerate(good_lemmas):
spaces = " " * 20 if lidx == 0 else ""
print("\r{}.{} / {}.{}{}".format(fidx, lidx, N2, N1, spaces), end="", flush=True)
logging.info("\r{}.{} / {}.{}{}".format(fidx, lidx, N2, N1, spaces))
for line in file_lines:
if good_lemma not in line:

@ -120,6 +120,26 @@ CODES_TRANSLATION = {
}
}
CODES_UD = {
"ADJ",
"ADP",
"PUNCT",
"ADV",
"AUX",
"SYM",
"INTJ",
"CCONJ",
"X",
"NOUN",
"DET",
"PROPN",
"NUM",
"VERB",
"PART",
"PRON",
"SCONJ"
}
CODES = {
"Noun": "N",
"Verb": "V",
@ -211,3 +231,18 @@ TAGSET = {
"Y": [],
"X": ['type']
}
PPB_DEPRELS = [
"advmod",
"amod",
"compound",
"conj",
"fixed",
"flat",
"iobj",
"nmod",
"nsubj",
"nummod",
"obj",
"obl"
]

@ -20,7 +20,7 @@ class ComponentType(Enum):
class Component:
def __init__(self, info):
def __init__(self, info, system_type):
idx = info['cid']
name = info['label'] if 'label' in info else None
typ = ComponentType.Core if info['type'] == "core" else ComponentType.Other
@ -39,7 +39,7 @@ class Component:
self.status = status
self.name = name
self.idx = idx
self.restrictions = RestrictionGroup([None]) if 'restriction' in info else []
self.restrictions = RestrictionGroup([None], system_type) if 'restriction' in info else []
self.next_element = []
self.representation = []
self.selection = {}
@ -50,17 +50,17 @@ class Component:
def add_next(self, next_component, link_label, order):
self.next_element.append((next_component, link_label, Order.new(order)))
def set_restriction(self, restrictions_tags):
def set_restriction(self, restrictions_tags, system_type):
if not restrictions_tags:
self.restrictions = RestrictionGroup([None])
self.restrictions = RestrictionGroup([None], system_type)
# if first element is of type restriction all following are as well
elif restrictions_tags[0].tag == "restriction":
self.restrictions = RestrictionGroup(restrictions_tags)
self.restrictions = RestrictionGroup(restrictions_tags, system_type)
# combinations of 'and' and 'or' restrictions are currently not implemented
elif restrictions_tags[0].tag == "restriction_or":
self.restrictions = RestrictionGroup(restrictions_tags[0], group_type='or')
self.restrictions = RestrictionGroup(restrictions_tags[0], system_type, group_type='or')
else:
raise RuntimeError("Unreachable")
@ -72,19 +72,19 @@ class Component:
crend.add_feature(feature.attrib)
self.representation.append(crend)
def find_next(self, deps, comps, restrs, reprs):
def find_next(self, deps, comps, restrs, reprs, system_type):
to_ret = []
for d in deps:
if d[0] == self.idx:
_, idx, dep_label, order = d
next_component = Component(comps[idx])
next_component.set_restriction(restrs[idx])
next_component = Component(comps[idx], system_type)
next_component.set_restriction(restrs[idx], system_type)
next_component.set_representation(reprs[idx])
to_ret.append(next_component)
self.add_next(next_component, dep_label, order)
others = next_component.find_next(deps, comps, restrs, reprs)
others = next_component.find_next(deps, comps, restrs, reprs, system_type)
to_ret.extend(others)
return to_ret

@ -1,5 +1,6 @@
from math import log2
import re
import logging
from luscenje_struktur.component import ComponentType
@ -165,9 +166,9 @@ class StatsFormatter(Formatter):
new_key = (sidx, idx, '')
if new_key in self.colocation_ids.dispersions:
key = new_key
print('Dispersions fixed.')
logging.info('Dispersions fixed.')
else:
print('Dispersions not fixed.')
logging.info('Dispersions not fixed.')
if key in self.colocation_ids.dispersions:
distribution = self.colocation_ids.dispersions[key]
else:

@ -5,6 +5,7 @@ import re
import sys
import gzip
import pathlib
from io import StringIO
from luscenje_struktur.progress_bar import progress
from luscenje_struktur.word import Word
@ -29,13 +30,13 @@ def load_files(args, database, w_collection=None, input_corpus=None):
database.init("CREATE TABLE Files ( filename varchar(2048) )")
for idx, fname in enumerate(filenames):
print("FILE ", fname, "{}/{}".format(idx, len(filenames)))
logging.info("FILE " + fname + "{}/{}".format(idx, len(filenames)))
extension = pathlib.Path(fname).suffix
# check if file with the same name already loaded...
loaded = database.execute("SELECT * FROM Files WHERE filename=?", (fname,)).fetchone()
if loaded is not None:
print("ALREADY LOADED")
logging.info("ALREADY LOADED")
continue
if extension == ".xml":
@ -53,6 +54,11 @@ def load_files(args, database, w_collection=None, input_corpus=None):
sentences = load_csv_valency(fname, True, w_collection)
for sentence in sentences:
yield sentence
elif extension == ".conllu":
if input_corpus is None:
yield load_conllu(fname)
else:
raise Exception('conllu with input_corpus is not supported!')
else:
if input_corpus is None:
yield load_csv(fname, False)
@ -77,6 +83,59 @@ def lines_csv(filename):
yield line
def load_conllu(filename):
import conllu
result = []
bad_sentence = False
words = {}
links = []
def sentence_end(bad_sentence, sent_id):
if bad_sentence:
return
for lfrom, ldest, ana in links:
if lfrom not in words or ldest not in words:
logging.warning("Bad link in sentence: " + sent_id)
continue
words[lfrom].add_link(ana, words[ldest])
result.extend(words.values())
with open(filename, 'r') as f:
data = f.read()
# conlls = conllu.parse_incr(StringIO(data))
# for sent in conlls:
# try:
# for word in sent:
# full_id = "{}.{}".format(sent.metadata['sent_id'], str(word['id']))
# words[str(word['id'])] = Word(word['id'], word['xpos'], full_id, word['form'], False)
# except:
# logging.error(f"Error while reading file {filename} in sentence {sent.metadata['sent_id']}. Check if required data is available!")
conlls = conllu.parse_incr(StringIO(data))
# build dep parse
for sent in conlls:
try:
# adding fake word
words['0'] = Word('', '', '0', '', False, True)
for word in sent:
if type(word['id']) == tuple:
continue
full_id = "{}.{}".format(sent.metadata['sent_id'], str(word['id']))
words[str(word['id'])] = Word(word['lemma'], word['upos'], full_id, word['form'], False)
links.append((str(word['head']), str(word['id']), word['deprel']))
sentence_end(False, sent.metadata['sent_id'])
links = []
words = {}
except:
links = []
words = {}
logging.error(f"Error while reading file {filename} in sentence {sent.metadata['sent_id']}. Check if required data is available!")
return result
def load_csv(filename, compressed):
result = []
bad_sentence = False
@ -265,6 +324,7 @@ def file_sentence_generator(et, args):
# strange errors, just skip...
pass
a = list(words.values())
return list(words.values())

@ -2,6 +2,7 @@ import gc
from collections import defaultdict
from ast import literal_eval
from time import time
import logging
from luscenje_struktur.match import StructureMatch
from luscenje_struktur.representation_assigner import RepresentationAssigner
@ -104,7 +105,7 @@ class MatchStore:
def set_representations(self, word_renderer, structures, sloleks_db=None):
step_name = 'representation'
if self.db.is_step_done(step_name):
print("Representation step already done, skipping")
logging.info("Representation step already done, skipping")
return
num_inserts = 1000
@ -148,7 +149,7 @@ class MatchStore:
dispersions[(str(structure_id), component_id, lemma)] += 1
self.dispersions = dict(dispersions)
print("Storing dispersions...")
logging.info("Storing dispersions...")
self.store_dispersions()
self.db.step_is_done(step_name)

@ -1,4 +1,5 @@
import time
import logging
try:
from tqdm import tqdm
@ -21,10 +22,10 @@ class Progress:
for n, el in enumerate(iterable):
now = time.time()
if now - last_report > REPORT_ON:
print("\r{}: {}/{}".format(description, n, total), end="")
logging.info("\r{}: {}/{}".format(description, n, total), end="")
last_report = now
yield el
print(" -> {}".format(time.time() - start_time))
logging.info(" -> {}".format(time.time() - start_time))
else:
yield from tqdm(iterable, desc=description, total=total)

@ -1,7 +1,7 @@
import re
from enum import Enum
from luscenje_struktur.codes_tagset import CODES, TAGSET
from luscenje_struktur.codes_tagset import CODES, TAGSET, CODES_UD
class RestrictionType(Enum):
@ -9,6 +9,21 @@ class RestrictionType(Enum):
Lexis = 1
MatchAll = 2
Space = 3
MorphologyUD = 4
def determine_ppb_ud(rgxs):
if len(rgxs) != 1:
return 0
rgx = rgxs[0]
if rgx in ("ADJ", "NOUN", "ADV"):
return 0
elif rgx == "AUX":
return 3
elif rgx == "VERB":
return 2
else:
return 4
def determine_ppb(rgxs):
@ -112,6 +127,78 @@ class MorphologyRegex:
return False
class MorphologyUDRegex:
def __init__(self, restriction):
# self.min_msd_length = 1
restr_dict = {}
for feature in restriction:
feature_dict = dict(feature.items())
match_type = True
# if "filter" in feature_dict:
# assert feature_dict['filter'] == "negative"
# match_type = False
# del feature_dict['filter']
assert len(feature_dict) == 1
key, value = next(iter(feature_dict.items()))
restr_dict[key] = (value, match_type)
assert 'POS' in restr_dict
# handle multiple word types
if '|' in restr_dict['POS'][0]:
categories = restr_dict['POS'][0].split('|')
else:
categories = [restr_dict['POS'][0]]
self.rgxs = []
self.re_objects = []
self.min_msd_lengths = []
del restr_dict['POS']
for category in categories:
min_msd_length = 1
category = category.upper()
assert category in CODES_UD
cat_code = category
rgx = category
# for attribute, (value, typ) in restr_dict.items():
# if attribute.lower() not in TAGSET[cat_code]:
# continue
# index = TAGSET[cat_code].index(attribute.lower())
# assert index >= 0
#
# if '|' in value:
# match = "".join(CODES[val] for val in value.split('|'))
# else:
# match = CODES[value]
#
# match = "[{}{}]".format("" if typ else "^", match)
# rgx[index + 1] = match
#
# if typ:
# min_msd_length = max(index + 1, min_msd_length)
# strip rgx
# for i in reversed(range(len(rgx))):
# if rgx[i] == '.':
# rgx = rgx[:-1]
# else:
# break
# self.re_objects.append([re.compile(r) for r in rgx])
self.rgxs.append(rgx)
self.min_msd_lengths.append(min_msd_length)
def __call__(self, text):
assert len(self.rgxs) == 1
return self.rgxs[0] == text
class LexisRegex:
def __init__(self, restriction):
restr_dict = {}
@ -150,8 +237,11 @@ class SpaceRegex:
return match
class Restriction:
def __init__(self, restriction_tag):
def __init__(self, restriction_tag, system_type='JOS'):
self.ppb = 4 # polnopomenska beseda (0-4)
if restriction_tag is None:
@ -162,9 +252,15 @@ class Restriction:
restriction_type = restriction_tag.get('type')
if restriction_type == "morphology":
self.type = RestrictionType.Morphology
self.matcher = MorphologyRegex(list(restriction_tag))
self.ppb = determine_ppb(self.matcher.rgxs)
if system_type == 'JOS':
self.type = RestrictionType.Morphology
self.matcher = MorphologyRegex(list(restriction_tag))
self.ppb = determine_ppb(self.matcher.rgxs)
# UD system is handled based on deprel
elif system_type == 'UD':
self.type = RestrictionType.MorphologyUD
self.matcher = MorphologyUDRegex(list(restriction_tag))
# self.ppb = determine_ppb_ud(self.matcher.rgxs)
elif restriction_type == "lexis":
self.type = RestrictionType.Lexis
@ -177,7 +273,7 @@ class Restriction:
raise NotImplementedError()
def match(self, word):
if self.type == RestrictionType.Morphology:
if self.type == RestrictionType.Morphology or self.type == RestrictionType.MorphologyUD:
match_to = word.msd
elif self.type == RestrictionType.Lexis:
match_to = word.lemma

@ -1,8 +1,8 @@
from luscenje_struktur.restriction import Restriction
class RestrictionGroup:
def __init__(self, restrictions_tag, group_type='and'):
self.restrictions = [Restriction(el) for el in restrictions_tag]
def __init__(self, restrictions_tag, system_type, group_type='and'):
self.restrictions = [Restriction(el, system_type) for el in restrictions_tag]
self.group_type = group_type
def __iter__(self):

@ -1,18 +1,18 @@
import gc
from psycopg2cffi import compat
compat.register()
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, aliased
from sqlalchemy import create_engine
from luscenje_struktur.codes_tagset import TAGSET, CODES, CODES_TRANSLATION, POSSIBLE_WORD_FORM_FEATURE_VALUES
class SloleksDatabase:
def __init__(self, db, load_sloleks):
global Lexeme, LexemeFeature, SyntacticStructure, StructureComponent, Feature, LexicalUnitLexeme, LexicalUnit, LexicalUnitType, Category, Sense, Measure, LexicalUnitMeasure, Corpus, Definition, WordForm, WordFormFeature, FormRepresentation
from psycopg2cffi import compat
compat.register()
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
global Lexeme, LexemeFeature, SyntacticStructure, StructureComponent, Feature, LexicalUnitLexeme, LexicalUnit, LexicalUnitType, Category, Sense, Measure, LexicalUnitMeasure, Corpus, Definition, WordForm, WordFormFeature, FormRepresentation, FormEncoding
[db_user, db_password, db_database, db_host] = db.split(':')
engine = create_engine('postgresql://' + db_user + ':' + db_password + '@' + db_host + '/' + db_database,
@ -71,17 +71,25 @@ class SloleksDatabase:
class FormRepresentation(Base):
__table__ = Base.metadata.tables['jedro_formrepresentation']
class FormEncoding(Base):
__table__ = Base.metadata.tables['jedro_formencoding']
self.session = Session(engine)
self.load_sloleks = load_sloleks
if self.load_sloleks:
self.init_load_sloleks()
# def init_load_sloleks2(self):
def init_load_sloleks(self):
query_word_form_features = self.session.query(WordFormFeature.word_form_id, WordFormFeature.value)
word_form_features = query_word_form_features.all()
query_form_representations = self.session.query(FormRepresentation.word_form_id, FormRepresentation.form)
query_form_representations = self.session.query(FormRepresentation.word_form_id)
form_representations = query_form_representations.all()
query_form_encoding = self.session.query(FormEncoding.form_representation_id, FormEncoding.text)
form_encodings = query_form_encoding.all()
query_word_forms = self.session.query(WordForm.id, WordForm.lexeme_id)
word_forms = query_word_forms.all()
query_lexemes = self.session.query(Lexeme.id, Lexeme.lemma)
@ -101,7 +109,10 @@ class SloleksDatabase:
self.word_form_features[word_form_feature.word_form_id] = set()
self.word_form_features[word_form_feature.word_form_id].add(word_form_feature.value)
self.form_representations = {form_representation.word_form_id: form_representation.form for form_representation
form_encodings_dict = {form_encoding.form_representation_id: form_encoding.text for form_encoding
in form_encodings}
self.form_representations = {form_representation.word_form_id: form_encodings_dict[form_representation.word_form_id] for form_representation
in form_representations}
self.word_forms = {}
@ -153,6 +164,7 @@ class SloleksDatabase:
def get_word_form(self, lemma, msd, data, align_msd=False):
# modify msd as required
from sqlalchemy.orm import aliased
msd = list(msd)
if 'msd' in data:
for key, value in data['msd'].items():
@ -193,9 +205,14 @@ class SloleksDatabase:
return ''.join(msd), lemma, form_representations
else:
wfs = [aliased(WordFormFeature) for _ in decypher_msd]
query_preposition = self.session.query(FormRepresentation.form) \
# self.session.query(FormEncoding.form_representation_id, FormEncoding.text)
query_preposition = self.session.query(FormEncoding.text) \
.join(FormRepresentation, FormRepresentation.id == FormEncoding.form_representation_id) \
.join(WordForm, WordForm.id == FormRepresentation.word_form_id) \
.join(Lexeme, Lexeme.id == WordForm.lexeme_id)
# query_preposition = self.session.query(FormRepresentation.form) \
# .join(WordForm, WordForm.id == FormRepresentation.word_form_id) \
# .join(Lexeme, Lexeme.id == WordForm.lexeme_id)
for wf in wfs:
query_preposition = query_preposition.join(wf, wf.word_form_id == WordForm.id)

@ -2,6 +2,7 @@ from xml.etree import ElementTree
import logging
import pickle
from luscenje_struktur.codes_tagset import PPB_DEPRELS
from luscenje_struktur.component import Component, ComponentType
from luscenje_struktur.lemma_features import get_lemma_features
@ -23,7 +24,9 @@ class SyntacticStructure:
assert len(list(xml)) == 1
system = next(iter(xml))
assert system.get('type') == 'JOS'
assert system.get('type') == 'JOS' or system.get('type') == 'UD'
system_type = system.get('type')
components, dependencies, definitions = list(system)
deps = [(dep.get('from'), dep.get('to'), dep.get('label'), dep.get('order'))
@ -46,8 +49,8 @@ class SyntacticStructure:
raise NotImplementedError("Unknown definition: {} in structure {}"
.format(el.tag, st.id))
fake_root_component = Component({'cid': '#', 'type': 'other', 'restriction': None})
fake_root_component_children = fake_root_component.find_next(deps, comps, restrs, forms)
fake_root_component = Component({'cid': '#', 'type': 'other', 'restriction': None}, system_type)
fake_root_component_children = fake_root_component.find_next(deps, comps, restrs, forms, system_type)
# all dep with value modra point to artificial root - fake_root_component
if any([dep[2] == 'modra' for dep in deps]):
st.fake_root_included = True
@ -56,9 +59,28 @@ class SyntacticStructure:
st.components = fake_root_component_children
if not no_stats:
st.determine_core2w()
if system_type == 'JOS':
st.determine_core2w()
elif system_type == 'UD':
st.determine_core2w_ud()
return st
def determine_core2w_ud(self):
deprels = {}
for c in self.components:
for next_el in c.next_element:
deprels[next_el[0]] = next_el[1]
ppb_components_num = 0
for c in self.components:
if c.type != ComponentType.Core:
continue
if c in deprels and deprels[c] not in PPB_DEPRELS:
continue
ppb_components_num += 1
c.type = ComponentType.Core2w
assert ppb_components_num == 2, RuntimeError("Cannot determine 2 'jedrna polnopomenska beseda' for", self.id)
def determine_core2w(self):
ppb_components = []
for c in self.components:
@ -115,7 +137,7 @@ def build_structures(args):
structures = []
for structure in et.iter('syntactic_structure'):
if structure.attrib['type'] == 'single':
if structure.attrib['type'] != 'collocation':
continue
to_append = SyntacticStructure.from_xml(structure, no_stats)
if to_append is None:

@ -1,4 +1,5 @@
from datetime import timedelta, datetime
import logging
class TimeInfo:
def __init__(self, to_go):
@ -14,5 +15,5 @@ class TimeInfo:
seconds = sum(self.times) / len(self.times)
td = timedelta(seconds = int(seconds * self.to_go))
ft = datetime.now() + td
print("Going to finish in {}".format(ft.strftime("%d/%m @ %H:%M")))
logging.info("Going to finish in {}".format(ft.strftime("%d/%m @ %H:%M")))

@ -1,7 +1,7 @@
from collections import defaultdict, Counter
from luscenje_struktur.progress_bar import progress
import logging
class WordStats:
def __init__(self, lemma_features, db):
@ -46,7 +46,7 @@ class WordStats:
def generate_renders(self):
step_name = 'generate_renders'
if self.db.is_step_done(step_name):
print("Skipping GenerateRenders, already complete")
logging.info("Skipping GenerateRenders, already complete")
return
lemmas = [lemma for (lemma, ) in self.db.execute("SELECT DISTINCT lemma FROM UniqWords")]

@ -1,133 +0,0 @@
class Writer:
@staticmethod
def other_params(args):
return (args.multiple_output, int(args.sort_by), args.sort_reversed)
@staticmethod
def make_output_writer(args, colocation_ids, word_renderer):
params = Writer.other_params(args)
return Writer(args.out, OutFormatter(colocation_ids, word_renderer), params)
@staticmethod
def make_output_no_stat_writer(args, colocation_ids, word_renderer):
params = Writer.other_params(args)
return Writer(args.out_no_stat, OutNoStatFormatter(colocation_ids, word_renderer), params)
@staticmethod
def make_all_writer(args, colocation_ids, word_renderer):
return Writer(args.all, AllFormatter(colocation_ids, word_renderer), None)
@staticmethod
def make_stats_writer(args, colocation_ids, word_renderer):
params = Writer.other_params(args)
return Writer(args.stats, StatsFormatter(colocation_ids, word_renderer), params)
def __init__(self, file_out, formatter, params):
if params is None:
self.multiple_output = False
self.sort_by = -1
self.sort_order = None
else:
self.multiple_output = params[0]
self.sort_by = params[1]
self.sort_order = params[2]
self.output_file = file_out
self.formatter = formatter
def header(self):
repeating_cols = self.formatter.header_repeat()
cols = ["C{}_{}".format(i + 1, thd) for i in range(MAX_NUM_COMPONENTS)
for thd in repeating_cols]
cols = ["Structure_ID"] + cols + ["Colocation_ID"]
cols += self.formatter.header_right()
return cols
def sorted_rows(self, rows):
if self.sort_by < 0 or len(rows) < 2:
return rows
if len(rows[0]) <= self.sort_by:
logging.warning("Cannot sort by column #{}: Not enough columns!".format(len(rows[0])))
return rows
try:
int(rows[0][self.sort_by])
def key(row):
return int(row[self.sort_by])
except ValueError:
def key(row):
return row[self.sort_by].lower()
return sorted(rows, key=key, reverse=self.sort_order)
def write_header(self, file_handler):
file_handler.write(", ".join(self.header()) + "\n")
def write_out_worker(self, file_handler, structure, colocation_ids):
rows = []
components = structure.components
for match in colocation_ids.get_matches_for(structure):
self.formatter.new_match(match)
for words in match.matches:
to_write = []
for idx, _comp in enumerate(components):
idx = str(idx + 1)
if idx not in words:
to_write.extend([""] * self.formatter.length())
else:
to_write.extend(self.formatter.content_repeat(words, match.representations, idx, structure.id))
# make them equal size
to_write.extend([""] * (MAX_NUM_COMPONENTS * self.formatter.length() - len(to_write)))
# structure_id and colocation_id
to_write = [structure.id] + to_write + [match.match_id]
# header_right
to_write.extend(self.formatter.content_right(len(match)))
rows.append(to_write)
if self.formatter.group():
break
if rows != []:
rows = self.sorted_rows(rows)
file_handler.write("\n".join([", ".join(row) for row in rows]) + "\n")
file_handler.flush()
def write_out(self, structures, colocation_ids):
if self.output_file is None:
return
def fp_close(fp_):
if fp_ != sys.stdout:
fp_.close()
def fp_open(snum=None):
if snum is None:
return open(self.output_file, "w")
else:
return open("{}.{}".format(self.output_file, snum), "w")
if not self.multiple_output:
fp = fp_open()
self.write_header(fp)
for s in structures:
if self.multiple_output:
fp = fp_open(s.id)
self.write_header(fp)
self.formatter.set_structure(s)
self.write_out_worker(fp, s, colocation_ids)
if self.multiple_output:
fp_close(fp)
if not self.multiple_output:
fp_close(fp)
Loading…
Cancel
Save