IssueID #1723: adjusted pipeline to also handle single-component structures
This commit is contained in:
parent
177b352181
commit
ba5c2d155d
|
@ -2,6 +2,7 @@
|
||||||
TMP_DIRECTORY = '../tmp/structure_assignment'
|
TMP_DIRECTORY = '../tmp/structure_assignment'
|
||||||
|
|
||||||
# scripts
|
# scripts
|
||||||
|
TEI_SPLIT_SCRIPT_NAME = 'split_tei.py'
|
||||||
CONLLU_TWEAK_SCRIPT_NAME = 'tweak_conllu.py'
|
CONLLU_TWEAK_SCRIPT_NAME = 'tweak_conllu.py'
|
||||||
CONLLU_TEI_SCRIPT_NAME = 'conllu_to_xml.py'
|
CONLLU_TEI_SCRIPT_NAME = 'conllu_to_xml.py'
|
||||||
MWE_EXTRACTION_SCRIPT_NAME = 'wani.py'
|
MWE_EXTRACTION_SCRIPT_NAME = 'wani.py'
|
||||||
|
@ -9,6 +10,7 @@ STRUCTURE_ASSIGNMENT_SCRIPT_NAME = 'assign_structures.py'
|
||||||
STRUCTURE_CREATION_SCRIPT_NAME = 'create_structures.py'
|
STRUCTURE_CREATION_SCRIPT_NAME = 'create_structures.py'
|
||||||
STRUCTURE_CREATION_SCRIPT_NAME = 'create_structures.py'
|
STRUCTURE_CREATION_SCRIPT_NAME = 'create_structures.py'
|
||||||
TEI_DICTIONARY_SCRIPT_NAME = 'tei_to_dictionary.py'
|
TEI_DICTIONARY_SCRIPT_NAME = 'tei_to_dictionary.py'
|
||||||
|
DICTIONARY_MERGE_SCRIPT_NAME = 'merge_dictionaries.py'
|
||||||
|
|
||||||
# resources
|
# resources
|
||||||
OBELIKS_JAR_FILE_NAME = '../resources/obeliks.jar'
|
OBELIKS_JAR_FILE_NAME = '../resources/obeliks.jar'
|
||||||
|
@ -24,9 +26,13 @@ OBELIKS_RAW_FILE_NAME = TMP_DIRECTORY + '/obeliks_raw.conllu'
|
||||||
OBELIKS_TWEAKED_FILE_NAME = TMP_DIRECTORY + '/obeliks_tweaked.conllu'
|
OBELIKS_TWEAKED_FILE_NAME = TMP_DIRECTORY + '/obeliks_tweaked.conllu'
|
||||||
CLASSLA_FILE_NAME = TMP_DIRECTORY + '/classla.conllu'
|
CLASSLA_FILE_NAME = TMP_DIRECTORY + '/classla.conllu'
|
||||||
TEI_INIT_FILE_NAME = TMP_DIRECTORY + '/tei_initial.xml'
|
TEI_INIT_FILE_NAME = TMP_DIRECTORY + '/tei_initial.xml'
|
||||||
|
TEI_SINGLE_FILE_NAME = TMP_DIRECTORY + '/tei_single.xml'
|
||||||
|
TEI_MULTIPLE_FILE_NAME = TMP_DIRECTORY + '/tei_multiple.xml'
|
||||||
TEI_STRUCTURE_1_FILE_NAME = TMP_DIRECTORY + '/tei_with_structure_ids1.xml'
|
TEI_STRUCTURE_1_FILE_NAME = TMP_DIRECTORY + '/tei_with_structure_ids1.xml'
|
||||||
TEI_STRUCTURE_2_FILE_NAME = TMP_DIRECTORY + '/tei_with_structure_ids2.xml'
|
TEI_STRUCTURE_2_FILE_NAME = TMP_DIRECTORY + '/tei_with_structure_ids2.xml'
|
||||||
MWE_CSV_1_FILE_NAME = TMP_DIRECTORY + '/mwes1.csv'
|
MWE_CSV_1_FILE_NAME = TMP_DIRECTORY + '/mwes1.csv'
|
||||||
MWE_CSV_2_FILE_NAME = TMP_DIRECTORY + '/mwes2.csv'
|
MWE_CSV_2_FILE_NAME = TMP_DIRECTORY + '/mwes2.csv'
|
||||||
STRUCTURE_NEW_FILE_NAME = TMP_DIRECTORY + '/structures_new.xml'
|
STRUCTURE_NEW_FILE_NAME = TMP_DIRECTORY + '/structures_new.xml'
|
||||||
DICTIONARY_XML_FILE_NAME = TMP_DIRECTORY + '/dictionary.xml'
|
DICTIONARY_SINGLE_FILE_NAME = TMP_DIRECTORY + '/dictionary_single.xml'
|
||||||
|
DICTIONARY_MULTIPLE_FILE_NAME = TMP_DIRECTORY + '/dictionary_multiple.xml'
|
||||||
|
DICTIONARY_FILE_NAME = TMP_DIRECTORY + '/dictionary.xml'
|
||||||
|
|
24
scripts/merge_dictionaries.py
Normal file
24
scripts/merge_dictionaries.py
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
import argparse
|
||||||
|
import lxml.etree as lxml
|
||||||
|
|
||||||
|
arg_parser = argparse.ArgumentParser(description='Split input TEI into single and multiple token units.')
|
||||||
|
arg_parser.add_argument('-single', type=str, required=True, help='Input single token dictionary')
|
||||||
|
arg_parser.add_argument('-multiple', type=str, required=True, help='Input multiple token dictionary')
|
||||||
|
arg_parser.add_argument('-outfile', type=str, required=True, help='Output merged dictionary')
|
||||||
|
arguments = arg_parser.parse_args()
|
||||||
|
single_file_name = arguments.single
|
||||||
|
multiple_file_name = arguments.multiple
|
||||||
|
output_file_name = arguments.outfile
|
||||||
|
|
||||||
|
def get_entries(input_file_name):
|
||||||
|
return list(lxml.parse(input_file_name).getroot())
|
||||||
|
|
||||||
|
entries = get_entries(single_file_name) + get_entries(multiple_file_name)
|
||||||
|
sort(entries, key=lambda entry: int(re.search('^s(\d+)\.\d+$', entry.get('xml:id')).group(1)))
|
||||||
|
|
||||||
|
root = lxml.Element('dictionary')
|
||||||
|
for entry in entries:
|
||||||
|
del entry.attrib['xml:id']
|
||||||
|
root.append(entry)
|
||||||
|
tree = lxml.ElementTree(root)
|
||||||
|
tree.write(output_file_name, encoding='UTF-8', pretty_print=True)
|
|
@ -16,20 +16,36 @@ output_lexicon_file_name = arguments.outlexicon
|
||||||
output_structure_file_name = arguments.outstructures
|
output_structure_file_name = arguments.outstructures
|
||||||
|
|
||||||
def run_pipeline(input_tei_file_name, output_lexicon_file_name, output_structure_file_name):
|
def run_pipeline(input_tei_file_name, output_lexicon_file_name, output_structure_file_name):
|
||||||
|
|
||||||
|
# setup and split
|
||||||
shutil.rmtree(TMP_DIRECTORY, True)
|
shutil.rmtree(TMP_DIRECTORY, True)
|
||||||
os.makedirs(TMP_DIRECTORY, exist_ok=True)
|
os.makedirs(TMP_DIRECTORY, exist_ok=True)
|
||||||
shutil.copyfile(input_tei_file_name, TEI_INIT_FILE_NAME)
|
shutil.copyfile(input_tei_file_name, TEI_INIT_FILE_NAME)
|
||||||
run_mwe_extraction(STRUCTURE_CURRENT_FILE_NAME, TEI_INIT_FILE_NAME, MWE_CSV_1_FILE_NAME)
|
split_tei_input(TEI_INIT_FILE_NAME, TEI_SINGLE_FILE_NAME, TEI_MULTIPLE_FILE_NAME)
|
||||||
run_structure_assignment(TEI_INIT_FILE_NAME, MWE_CSV_1_FILE_NAME, TEI_STRUCTURE_1_FILE_NAME)
|
|
||||||
|
# single-token units
|
||||||
|
run_dictionary_conversion(TEI_SINGLE_FILE_NAME, DICTIONARY_SINGLE_FILE_NAME)
|
||||||
|
|
||||||
|
# multiple-token units
|
||||||
|
run_mwe_extraction(STRUCTURE_CURRENT_FILE_NAME, TEI_MULTIPLE_FILE_NAME, MWE_CSV_1_FILE_NAME)
|
||||||
|
run_structure_assignment(TEI_MULTIPLE_FILE_NAME, MWE_CSV_1_FILE_NAME, TEI_STRUCTURE_1_FILE_NAME)
|
||||||
run_structure_creation(STRUCTURE_CURRENT_FILE_NAME, TEI_STRUCTURE_1_FILE_NAME, STRUCTURE_NEW_FILE_NAME)
|
run_structure_creation(STRUCTURE_CURRENT_FILE_NAME, TEI_STRUCTURE_1_FILE_NAME, STRUCTURE_NEW_FILE_NAME)
|
||||||
validate_structures(STRUCTURE_NEW_FILE_NAME)
|
validate_structures(STRUCTURE_NEW_FILE_NAME)
|
||||||
run_mwe_extraction(STRUCTURE_NEW_FILE_NAME, TEI_INIT_FILE_NAME, MWE_CSV_2_FILE_NAME)
|
run_mwe_extraction(STRUCTURE_NEW_FILE_NAME, TEI_MULTIPLE_FILE_NAME, MWE_CSV_2_FILE_NAME)
|
||||||
run_structure_assignment(TEI_INIT_FILE_NAME, MWE_CSV_2_FILE_NAME, TEI_STRUCTURE_2_FILE_NAME)
|
run_structure_assignment(TEI_MULTIPLE_FILE_NAME, MWE_CSV_2_FILE_NAME, TEI_STRUCTURE_2_FILE_NAME)
|
||||||
run_dictionary_conversion(TEI_STRUCTURE_2_FILE_NAME, DICTIONARY_XML_FILE_NAME)
|
run_dictionary_conversion(TEI_STRUCTURE_2_FILE_NAME, DICTIONARY_MULTIPLE_FILE_NAME)
|
||||||
validate_dictionary(DICTIONARY_XML_FILE_NAME)
|
|
||||||
shutil.copyfile(DICTIONARY_XML_FILE_NAME, output_lexicon_file_name)
|
# merge and finish
|
||||||
|
merge_dictionaries(DICTIONARY_SINGLE_FILE_NAME, DICTIONARY_MULTIPLE_FILE_NAME, DICTIONARY_FILE_NAME)
|
||||||
|
validate_dictionary(DICTIONARY_FILE_NAME)
|
||||||
|
shutil.copyfile(DICTIONARY_FILE_NAME, output_lexicon_file_name)
|
||||||
shutil.copyfile(STRUCTURE_NEW_FILE_NAME, output_structure_file_name)
|
shutil.copyfile(STRUCTURE_NEW_FILE_NAME, output_structure_file_name)
|
||||||
|
|
||||||
|
def split_tei_input(input_file_name, single_file_name, multiple_file_name):
|
||||||
|
print('Splitting TEI input file ...')
|
||||||
|
split_command = ' '.join(['python', TEI_SPLIT_SCRIPT_NAME, '--input', input_file_name, '--single', single_file_name, '--multiple', multiple_file_name])
|
||||||
|
os.system(split_command)
|
||||||
|
|
||||||
def run_mwe_extraction(structure_file_name, tei_file_name, mwe_csv_file_name):
|
def run_mwe_extraction(structure_file_name, tei_file_name, mwe_csv_file_name):
|
||||||
print('Extracting MWEs from tei ...')
|
print('Extracting MWEs from tei ...')
|
||||||
extraction_command = ' '.join(['python', MWE_EXTRACTION_SCRIPT_NAME, structure_file_name, tei_file_name, '--all', mwe_csv_file_name, '--skip-id-check', '--fixed-restriction-order'])
|
extraction_command = ' '.join(['python', MWE_EXTRACTION_SCRIPT_NAME, structure_file_name, tei_file_name, '--all', mwe_csv_file_name, '--skip-id-check', '--fixed-restriction-order'])
|
||||||
|
@ -54,9 +70,14 @@ def run_structure_creation(input_file_name, tei_file_name, output_file_name):
|
||||||
|
|
||||||
def run_dictionary_conversion(tei_file_name, xml_file_name):
|
def run_dictionary_conversion(tei_file_name, xml_file_name):
|
||||||
print('Converting to dictionary xml format ...')
|
print('Converting to dictionary xml format ...')
|
||||||
convert_command = ' '.join(['python', TEI_DICTIONARY_SCRIPT_NAME, '-infile', tei_file_name, '-outfile', xml_file_name])
|
convert_command = ' '.join(['python', TEI_DICTIONARY_SCRIPT_NAME, '-infile', tei_file_name, '-outfile', xml_file_name, '--keepids', 'true'])
|
||||||
os.system(convert_command)
|
os.system(convert_command)
|
||||||
|
|
||||||
|
def merge_dictionaries(single_file_name, multiple_file_name, joint_file_name):
|
||||||
|
print('Merging dictionary files ...')
|
||||||
|
merge_command = ' '.join(['python', DICTIONARY_MERGE_SCRIPT_NAME, '-single', single_file_name, '--multiple', multiple_file_name, '--joint', joint_file_name])
|
||||||
|
os.system(merge_command)
|
||||||
|
|
||||||
def validate_dictionary(dictionary_file_name):
|
def validate_dictionary(dictionary_file_name):
|
||||||
print('Validating output dictionary file ...')
|
print('Validating output dictionary file ...')
|
||||||
xml_schema = lxml.XMLSchema(lxml.parse(DICTIONARY_SCHEMA_FILE_NAME))
|
xml_schema = lxml.XMLSchema(lxml.parse(DICTIONARY_SCHEMA_FILE_NAME))
|
||||||
|
|
34
scripts/split_tei.py
Normal file
34
scripts/split_tei.py
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
import argparse
|
||||||
|
import lxml.etree as lxml
|
||||||
|
|
||||||
|
arg_parser = argparse.ArgumentParser(description='Split input TEI into single and multiple token units.')
|
||||||
|
arg_parser.add_argument('-infile', type=str, required=True, help='Input TEI file')
|
||||||
|
arg_parser.add_argument('-single', type=str, required=True, help='Output single token TEI file')
|
||||||
|
arg_parser.add_argument('-multiple', type=str, required=True, help='Output multiple token TEI file')
|
||||||
|
arguments = arg_parser.parse_args()
|
||||||
|
input_file_name = arguments.infile
|
||||||
|
single_file_name = arguments.single
|
||||||
|
multiple_file_name = arguments.multiple
|
||||||
|
|
||||||
|
TEI_NAMESPACE = 'http://www.tei-c.org/ns/1.0'
|
||||||
|
def xpath_find(element,expression):
|
||||||
|
return element.xpath(expression, namespaces={'tei':TEI_NAMESPACE})
|
||||||
|
|
||||||
|
def count_tokens(paragraph):
|
||||||
|
return len(xpath_find(paragraph, './/tei:w|.//tei:pc'))
|
||||||
|
|
||||||
|
tree = lxml.parse(input_file_name)
|
||||||
|
root = tree.getroot()
|
||||||
|
paragraphs = xpath_find('.//tei:p')
|
||||||
|
for paragraph in paragraphs:
|
||||||
|
if (count_tokens(paragraph) > 1):
|
||||||
|
paragraph.getparent().remove(paragraph)
|
||||||
|
tree.write(single_file_name, encoding='UTF-8', pretty_print=True)
|
||||||
|
|
||||||
|
tree = lxml.parse(input_file_name)
|
||||||
|
root = tree.getroot()
|
||||||
|
paragraphs = xpath_find('.//tei:p')
|
||||||
|
for paragraph in paragraphs:
|
||||||
|
if (count_tokens(paragraph) == 1):
|
||||||
|
paragraph.getparent().remove(paragraph)
|
||||||
|
tree.write(multiple_file_name, encoding='UTF-8', pretty_print=True)
|
Loading…
Reference in New Issue
Block a user