Created links, source, target file output

master
Luka 2 years ago
parent de50db0c30
commit 95b71cd6a3

1
.gitignore vendored

@ -2,3 +2,4 @@
data/ data/
__pycache__/ __pycache__/
venv/ venv/
src/__pycache__/

@ -12,8 +12,8 @@ class Sentence:
self.links = [] self.links = []
self.no_ud = no_ud self.no_ud = no_ud
def add_item(self, token, lemma, upos, upos_other, xpos, misc): def add_item(self, word_id, token, lemma, upos, upos_other, xpos, misc):
self.items.append([token, lemma, upos, upos_other, xpos, "SpaceAfter=No" in misc.split('|')]) self.items.append([word_id, token, lemma, upos, upos_other, xpos, "SpaceAfter=No" in misc.split('|')])
def add_link(self, link_ref, link_type): def add_link(self, link_ref, link_type):
self.links.append([link_ref, link_type]) self.links.append([link_ref, link_type])
@ -25,10 +25,9 @@ class Sentence:
xml_id = self._id xml_id = self._id
base = etree.Element('s') base = etree.Element('s')
set_xml_attr(base, 'id', xml_id) set_xml_attr(base, 'id', xml_id)
id_counter = 1
for item in self.items: for item in self.items:
token, lemma, upos, upos_other, xpos, no_space_after = item word_id, token, lemma, upos, upos_other, xpos, no_space_after = item
if xpos in {'U', 'Z'}: # hmm, safe only as long as U is unused in English tagset and Z in Slovenian one if xpos in {'U', 'Z'}: # hmm, safe only as long as U is unused in English tagset and Z in Slovenian one
to_add = etree.Element('pc') to_add = etree.Element('pc')
@ -43,11 +42,9 @@ class Sentence:
else: else:
to_add.set('msd', f'UposTag={upos}') to_add.set('msd', f'UposTag={upos}')
set_xml_attr(to_add, 'id', "{}.{}".format(xml_id, id_counter)) set_xml_attr(to_add, 'id', word_id)
to_add.text = token to_add.text = token
id_counter += 1
if no_space_after: if no_space_after:
to_add.set('join', 'right') to_add.set('join', 'right')
@ -57,8 +54,9 @@ class Sentence:
class Paragraph: class Paragraph:
def __init__(self, _id): def __init__(self, _id, _doc_id):
self._id = _id if _id is not None else 'no-id' self._id = _id if _id is not None else 'no-id'
self._doc_id = _doc_id if _doc_id is not None else ''
self.sentences = [] self.sentences = []
def add_sentence(self, sentence): def add_sentence(self, sentence):
@ -68,7 +66,10 @@ class Paragraph:
if id_prefix: if id_prefix:
xml_id = id_prefix + '.' + self._id xml_id = id_prefix + '.' + self._id
else: else:
xml_id = self._id if self._doc_id:
xml_id = self._doc_id + '.' + self._id
else:
xml_id = self._id
p = etree.Element('p') p = etree.Element('p')
set_xml_attr(p, 'id', xml_id) set_xml_attr(p, 'id', xml_id)
@ -97,7 +98,7 @@ class TeiDocument:
text = etree.SubElement(root, 'text') text = etree.SubElement(root, 'text')
body = etree.SubElement(text, 'body') body = etree.SubElement(text, 'body')
for para in self.paragraphs: for para in self.paragraphs:
body.append(para.as_xml(id_prefix=xml_id)) body.append(para.as_xml())
encoding_desc = etree.SubElement(tei_header, 'encodingDesc') encoding_desc = etree.SubElement(tei_header, 'encodingDesc')
tags_decl = etree.SubElement(encoding_desc, 'tagsDecl') tags_decl = etree.SubElement(encoding_desc, 'tagsDecl')
@ -121,6 +122,36 @@ def build_tei_etrees(documents):
return elements return elements
def build_links(all_edges):
root = etree.Element('TEI')
root.set('xmlns', 'http://www.tei-c.org/ns/1.0')
set_xml_attr(root, 'lang', 'sl')
# elements = []
for document_edges in all_edges:
d = etree.Element('linkGrp')
for paragraph_edges in document_edges:
p = etree.Element('linkGrp')
for sentence_edges in paragraph_edges:
s = etree.Element('linkGrp')
random_id = ''
for token_edges in sentence_edges:
link = etree.Element('link')
link.set('labels', ' '.join(token_edges['labels']))
link.set('sources', ' '.join(['#' + source for source in token_edges['source_ids']]))
link.set('targets', ' '.join(['#' + source for source in token_edges['target_ids']]))
if not random_id:
random_id = token_edges['source_ids'][0] if len(token_edges['source_ids']) > 0 else token_edges['target_ids'][0]
s.append(link)
set_xml_attr(s, 'sentence_id', '.'.join(random_id.split('.')[:3]))
p.append(s)
set_xml_attr(p, 'paragraph_id', '.'.join(random_id.split('.')[:2]))
d.append(p)
set_xml_attr(d, 'document_id', random_id.split('.')[0])
root.append(d)
return root
def set_xml_attr(node, attribute, value): def set_xml_attr(node, attribute, value):
node.attrib['{http://www.w3.org/XML/1998/namespace}' + attribute] = value node.attrib['{http://www.w3.org/XML/1998/namespace}' + attribute] = value
@ -173,7 +204,7 @@ def construct_tei_documents_from_list(object_list):
# para_buffer.append(line) # para_buffer.append(line)
if len(object_list) > 0: if len(object_list) > 0:
document_paragraphs.append(construct_paragraph(para_id, object_list)) document_paragraphs.append(construct_paragraph(doc_id, para_id, object_list))
if len(document_paragraphs) > 0: if len(document_paragraphs) > 0:
documents.append( documents.append(
@ -196,7 +227,7 @@ def construct_tei_documents(conllu_lines):
key, val = parse_metaline(line) key, val = parse_metaline(line)
if key == 'newdoc id': if key == 'newdoc id':
if len(para_buffer) > 0: if len(para_buffer) > 0:
document_paragraphs.append(construct_paragraph(para_id, para_buffer)) document_paragraphs.append(construct_paragraph(doc_id, para_id, para_buffer))
if len(document_paragraphs) > 0: if len(document_paragraphs) > 0:
documents.append( documents.append(
TeiDocument(doc_id, document_paragraphs)) TeiDocument(doc_id, document_paragraphs))
@ -204,7 +235,7 @@ def construct_tei_documents(conllu_lines):
doc_id = val doc_id = val
elif key == 'newpar id': elif key == 'newpar id':
if len(para_buffer) > 0: if len(para_buffer) > 0:
document_paragraphs.append(construct_paragraph(para_id, para_buffer)) document_paragraphs.append(construct_paragraph(doc_id, para_id, para_buffer))
para_buffer = [] para_buffer = []
para_id = val para_id = val
elif key == 'sent_id': elif key == 'sent_id':
@ -214,7 +245,7 @@ def construct_tei_documents(conllu_lines):
para_buffer.append(line) para_buffer.append(line)
if len(para_buffer) > 0: if len(para_buffer) > 0:
document_paragraphs.append(construct_paragraph(para_id, para_buffer)) document_paragraphs.append(construct_paragraph(doc_id, para_id, para_buffer))
if len(document_paragraphs) > 0: if len(document_paragraphs) > 0:
documents.append( documents.append(
@ -223,8 +254,8 @@ def construct_tei_documents(conllu_lines):
return documents return documents
def construct_paragraph_from_list(para_id, etree_source_sentences): def construct_paragraph_from_list(doc_id, para_id, etree_source_sentences):
para = Paragraph(para_id) para = Paragraph(para_id, doc_id)
for sentence in etree_source_sentences: for sentence in etree_source_sentences:
para.add_sentence(sentence) para.add_sentence(sentence)
@ -232,8 +263,8 @@ def construct_paragraph_from_list(para_id, etree_source_sentences):
return para return para
def construct_paragraph(para_id, conllu_lines): def construct_paragraph(doc_id, para_id, conllu_lines):
para = Paragraph(para_id) para = Paragraph(para_id, doc_id)
sent_id = None sent_id = None
sent_buffer = [] sent_buffer = []
@ -267,6 +298,7 @@ def construct_sentence_from_list(sent_id, object_list):
misc = '_' if tokens['space_after'] else 'SpaceAfter=No' misc = '_' if tokens['space_after'] else 'SpaceAfter=No'
sentence.add_item( sentence.add_item(
word_id,
token, token,
lemma, lemma,
upos, upos,

@ -9,17 +9,17 @@ from xml.etree import ElementTree
from lxml import etree from lxml import etree
from src.create_tei import construct_tei_etrees, construct_tei_documents_from_list, construct_sentence_from_list, \ from src.create_tei import construct_tei_etrees, construct_tei_documents_from_list, construct_sentence_from_list, \
construct_paragraph_from_list, TeiDocument, build_tei_etrees construct_paragraph_from_list, TeiDocument, build_tei_etrees, build_links
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
def add_token(svala_i, source_i, target_i, el, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id): def add_token(svala_i, source_i, target_i, el, source, target, edges, svala_data, sentence_string_id):
source_id = "s" + svala_i source_id = "s" + svala_i
target_id = "t" + svala_i target_id = "t" + svala_i
edge_id = "e-" + source_id + "-" + target_id edge_id = "e-" + source_id + "-" + target_id
source_token_id = sentence_string_source_id + f'.{source_i}' source_token_id = sentence_string_id + f'.s{source_i}'
target_token_id = sentence_string_target_id + f'.{target_i}' target_token_id = sentence_string_id + f'.t{target_i}'
token_tag = 'w' if el.tag.startswith('w') else 'pc' token_tag = 'w' if el.tag.startswith('w') else 'pc'
lemma = el.attrib['lemma'] if token_tag == 'w' else el.text lemma = el.attrib['lemma'] if token_tag == 'w' else el.text
source.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False}) source.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False})
@ -27,15 +27,15 @@ def add_token(svala_i, source_i, target_i, el, source, target, edges, svala_data
edges.append({'source_ids': [source_token_id], 'target_ids': [target_token_id], 'labels': svala_data['edges'][edge_id]['labels']}) edges.append({'source_ids': [source_token_id], 'target_ids': [target_token_id], 'labels': svala_data['edges'][edge_id]['labels']})
def add_error_token(el, out_list, sentence_string_id, out_list_i, out_list_ids): def add_error_token(el, out_list, sentence_string_id, out_list_i, out_list_ids, is_source):
source_token_id = sentence_string_id + f'.{out_list_i}' source_token_id = sentence_string_id + f'.s{out_list_i}' if is_source else sentence_string_id + f'.t{out_list_i}'
token_tag = 'w' if el.tag.startswith('w') else 'pc' token_tag = 'w' if el.tag.startswith('w') else 'pc'
lemma = el.attrib['lemma'] if token_tag == 'w' else el.text lemma = el.attrib['lemma'] if token_tag == 'w' else el.text
out_list.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False}) out_list.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False})
out_list_ids.append(source_token_id) out_list_ids.append(source_token_id)
def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id): def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_data, sentence_string_id):
source_edge_ids = [] source_edge_ids = []
target_edge_ids = [] target_edge_ids = []
source_ids = [] source_ids = []
@ -49,7 +49,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
source_id = "s" + ind source_id = "s" + ind
source_edge_ids.append(source_id) source_edge_ids.append(source_id)
add_error_token(el, source, sentence_string_source_id, source_i, source_ids) add_error_token(el, source, sentence_string_id, source_i, source_ids, True)
source_i += 1 source_i += 1
svala_i += 1 svala_i += 1
@ -65,7 +65,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
target_id = "t" + ind target_id = "t" + ind
target_edge_ids.append(target_id) target_edge_ids.append(target_id)
add_error_token(p_el, target, sentence_string_target_id, target_i, target_ids) add_error_token(p_el, target, sentence_string_id, target_i, target_ids, False)
target_i += 1 target_i += 1
svala_i += 1 svala_i += 1
@ -81,7 +81,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
source_id = "s" + ind source_id = "s" + ind
source_edge_ids.append(source_id) source_edge_ids.append(source_id)
add_error_token(el_l2, source, sentence_string_source_id, source_i, source_ids) add_error_token(el_l2, source, sentence_string_id, source_i, source_ids, True)
source_i += 1 source_i += 1
svala_i += 1 svala_i += 1
@ -97,7 +97,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
source_id = "s" + ind source_id = "s" + ind
source_edge_ids.append(source_id) source_edge_ids.append(source_id)
add_error_token(el_l3, source, sentence_string_source_id, source_i, source_ids) add_error_token(el_l3, source, sentence_string_id, source_i, source_ids, True)
source_i += 1 source_i += 1
svala_i += 1 svala_i += 1
@ -113,7 +113,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
source_id = "s" + ind source_id = "s" + ind
source_edge_ids.append(source_id) source_edge_ids.append(source_id)
add_error_token(el_l4, source, sentence_string_source_id, source_i, source_ids) add_error_token(el_l4, source, sentence_string_id, source_i, source_ids, True)
source_i += 1 source_i += 1
svala_i += 1 svala_i += 1
@ -128,7 +128,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
source_id = "s" + ind source_id = "s" + ind
source_edge_ids.append(source_id) source_edge_ids.append(source_id)
add_error_token(el_l5, source, sentence_string_source_id, source_i, source_ids) add_error_token(el_l5, source, sentence_string_id, source_i, source_ids, True)
source_i += 1 source_i += 1
svala_i += 1 svala_i += 1
@ -142,14 +142,13 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
target_id = "t" + ind target_id = "t" + ind
target_edge_ids.append(target_id) target_edge_ids.append(target_id)
add_error_token(p_el, target, sentence_string_target_id, target_i, target_ids) add_error_token(p_el, target, sentence_string_id, target_i, target_ids, False)
target_i += 1 target_i += 1
svala_i += 1 svala_i += 1
elif p_el.tag.startswith('c'): elif p_el.tag.startswith('c'):
target[-1]['space_after'] = True target[-1]['space_after'] = True
edge_ids = sorted(source_edge_ids) + sorted(target_edge_ids) edge_ids = sorted(source_edge_ids) + sorted(target_edge_ids)
edge_id = "e-" + "-".join(edge_ids) edge_id = "e-" + "-".join(edge_ids)
edges.append({'source_ids': source_ids, 'target_ids': target_ids, 'labels': svala_data['edges'][edge_id]['labels']}) edges.append({'source_ids': source_ids, 'target_ids': target_ids, 'labels': svala_data['edges'][edge_id]['labels']})
@ -161,6 +160,12 @@ def process_file(et, args):
if os.path.exists(args.results_folder): if os.path.exists(args.results_folder):
shutil.rmtree(args.results_folder) shutil.rmtree(args.results_folder)
os.mkdir(args.results_folder) os.mkdir(args.results_folder)
etree_source_documents = []
etree_target_documents = []
etree_source_paragraphs = []
etree_target_paragraphs = []
document_edges = []
for div in et.iter('div'): for div in et.iter('div'):
bibl = div.find('bibl') bibl = div.find('bibl')
file_name = bibl.get('n') file_name = bibl.get('n')
@ -174,13 +179,13 @@ def process_file(et, args):
svala_list = [[fname[:-13], fname] if 'problem' in fname else [fname[:-5], fname] for fname in os.listdir(svala_path)] svala_list = [[fname[:-13], fname] if 'problem' in fname else [fname[:-5], fname] for fname in os.listdir(svala_path)]
svala_dict = {e[0]: e[1] for e in svala_list} svala_dict = {e[0]: e[1] for e in svala_list}
paragraph_edges = []
paragraphs = div.findall('p') paragraphs = div.findall('p')
for paragraph in paragraphs: for paragraph in paragraphs:
sentences = paragraph.findall('s') sentences = paragraph.findall('s')
svala_i = 1 svala_i = 1
# read json # read json
svala_file = os.path.join(svala_path, svala_dict[paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id']]) svala_file = os.path.join(svala_path, svala_dict[paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id']])
jf = open(svala_file) jf = open(svala_file)
@ -189,54 +194,68 @@ def process_file(et, args):
etree_source_sentences = [] etree_source_sentences = []
etree_target_sentences = [] etree_target_sentences = []
edges = []
sentence_edges = []
for sentence_id, sentence in enumerate(sentences): for sentence_id, sentence in enumerate(sentences):
source = [] source = []
target = [] target = []
edges = []
sentence_id += 1 sentence_id += 1
source_i = 1 source_i = 1
target_i = 1 target_i = 1
sentence_string_source_id = paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'] + f's.{sentence_id}' sentence_string_id = paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'] + f'.{sentence_id}'
sentence_string_target_id = paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'] + f't.{sentence_id}'
for el in sentence: for el in sentence:
if el.tag.startswith('w'): if el.tag.startswith('w'):
add_token(str(svala_i), source_i, target_i, el, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id) add_token(str(svala_i), source_i, target_i, el, source, target, edges, svala_data, sentence_string_id)
svala_i += 1 svala_i += 1
source_i += 1 source_i += 1
target_i += 1 target_i += 1
elif el.tag.startswith('pc'): elif el.tag.startswith('pc'):
add_token(str(svala_i), source_i, target_i, el, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id) add_token(str(svala_i), source_i, target_i, el, source, target, edges, svala_data, sentence_string_id)
svala_i += 1 svala_i += 1
source_i += 1 source_i += 1
target_i += 1 target_i += 1
elif el.tag.startswith('u'): elif el.tag.startswith('u'):
svala_i, source_i, target_i = add_errors(svala_i, source_i, target_i, el, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id) svala_i, source_i, target_i = add_errors(svala_i, source_i, target_i, el, source, target, edges, svala_data, sentence_string_id)
elif el.tag.startswith('c'): elif el.tag.startswith('c'):
source[-1]['space_after'] = True if len(source) > 0:
target[-1]['space_after'] = True source[-1]['space_after'] = True
if len(target) > 0:
target[-1]['space_after'] = True
sentence_edges.append(edges)
etree_source_sentences.append(construct_sentence_from_list(str(sentence_id), source)) etree_source_sentences.append(construct_sentence_from_list(str(sentence_id), source))
etree_target_sentences.append(construct_sentence_from_list(str(sentence_id), target)) etree_target_sentences.append(construct_sentence_from_list(str(sentence_id), target))
etree_source_paragraph = construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1] + 's', etree_source_sentences) etree_source_paragraphs.append(construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1], etree_source_sentences))
etree_source_document = TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], [etree_source_paragraph]) etree_target_paragraphs.append(construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1], etree_target_sentences))
etree_source = build_tei_etrees([etree_source_document]) paragraph_edges.append(sentence_edges)
document_edges.append(paragraph_edges)
etree_source_documents.append(TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], etree_source_paragraphs))
etree_target_documents.append(TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], etree_target_paragraphs))
etree_source = build_tei_etrees(etree_source_documents)
etree_target = build_tei_etrees(etree_target_documents)
etree_target_paragraph = construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1] + 't', etree_target_sentences) # TODO FIX THIS
etree_target_document = TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], [etree_target_paragraph]) etree_links = build_links(document_edges)
etree_target = build_tei_etrees([etree_target_document])
with open(os.path.join(args.results_folder, f"{paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id']}_source"), 'w') as sf: with open(os.path.join(args.results_folder, f"source.xml"), 'w') as sf:
sf.write(etree.tostring(etree_source[0], pretty_print=True, encoding='utf-8').decode()) sf.write(etree.tostring(etree_source[0], pretty_print=True, encoding='utf-8').decode())
with open(os.path.join(args.results_folder, f"{paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id']}_target"), 'w') as tf: with open(os.path.join(args.results_folder, f"target.xml"), 'w') as tf:
tf.write(etree.tostring(etree_target[0], pretty_print=True, encoding='utf-8').decode()) tf.write(etree.tostring(etree_target[0], pretty_print=True, encoding='utf-8').decode())
with open(os.path.join(args.results_folder, f"{paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id']}_errors"), 'w') as jf: with open(os.path.join(args.results_folder, f"links.xml"), 'w') as tf:
json.dump(edges, jf, ensure_ascii=False, indent=" ") tf.write(etree.tostring(etree_links, pretty_print=True, encoding='utf-8').decode())
break with open(os.path.join(args.results_folder, f"links.json"), 'w') as jf:
json.dump(document_edges, jf, ensure_ascii=False, indent=" ")
def main(args): def main(args):

Loading…
Cancel
Save