Redmine #1835: created pipeline class and adapted pipeline1

master
Cyprian Laskowski 3 years ago
parent e3aa8a3aee
commit 5395d8def0

@ -5,23 +5,25 @@ import lxml.etree as lxml
from flask import Flask, Response from flask import Flask, Response
from flask_httpauth import HTTPBasicAuth from flask_httpauth import HTTPBasicAuth
import structure_assignment.pipeline as pipeline from structure_assignment.pipeline import Pipeline, create_nlp
app = Flask(__name__) app = Flask(__name__)
api_prefix = os.environ['API_PREFIX'] api_prefix = os.environ['API_PREFIX']
resource_directory = os.environ['API_RESOURCE_DIR'] resource_directory = os.environ['API_RESOURCE_DIR']
pipeline.initialise(resource_dir=resource_directory)
pipeline.load_classla_models() nlp = create_nlp(resource_directory)
@app.route(api_prefix + '/test/<string:string>', methods=['GET']) @app.route(api_prefix + '/test/<string:string>', methods=['GET'])
def test(string): def test(string):
string_file_name = '/tmp/string.txt' string_file_name = '/tmp/string.txt'
parse_file_name = '/tmp/parse.xml' parse_file_name = '/tmp/parse.xml'
with open(string_file_name, 'w') as string_file: with open(string_file_name, 'w') as string_file:
string_file.write(string + '\n') string_file.write(string + '\n')
try: try:
pipeline = Pipeline(nlp)
pipeline.import_file(string_file_name, 'strings-list') pipeline.import_file(string_file_name, 'strings-list')
pipeline.do_tokenise() pipeline.do_tokenise()
pipeline.do_tweak_conllu() pipeline.do_tweak_conllu()
@ -29,7 +31,7 @@ def test(string):
pipeline.do_translate_jos() pipeline.do_translate_jos()
pipeline.do_conllu_to_tei() pipeline.do_conllu_to_tei()
pipeline.export_file(parse_file_name, 'tei-initial') pipeline.export_file(parse_file_name, 'tei-initial')
# pipeline.cleanup() pipeline.cleanup()
tei = lxml.parse(parse_file_name).getroot() tei = lxml.parse(parse_file_name).getroot()
message = lxml.tostring(tei, encoding='UTF-8', pretty_print=True).decode() message = lxml.tostring(tei, encoding='UTF-8', pretty_print=True).decode()
ok = True ok = True

@ -14,22 +14,29 @@ STRUCTURE_SCHEMA_FILE_NAME = '../resources/structures.xsd'
DICTIONARY_SCHEMA_FILE_NAME = '../resources/monolingual_dictionaries.xsd' DICTIONARY_SCHEMA_FILE_NAME = '../resources/monolingual_dictionaries.xsd'
# temporary outputs # temporary outputs
FILE_NAME_MAP = {'strings-list': 'strings.txt', FILE_MAP = {'strings-list': 'strings.txt',
'obeliks-tokenised': 'obeliks_raw.conllu', 'obeliks-tokenised': 'obeliks_raw.conllu',
'obeliks-tweaked': 'obeliks_tweaked.conllu', 'obeliks-tweaked': 'obeliks_tweaked.conllu',
'classla-parsed': 'classla_raw.conllu', 'classla-parsed': 'classla_raw.conllu',
'classla-translated': 'classla_translated.conllu', 'classla-translated': 'classla_translated.conllu',
'tei-initial': 'tei_initial.xml', 'tei-initial': 'tei_initial.xml',
'tei-single': 'tei_single.xml', 'tei-single': 'tei_single.xml',
'tei-single-ids': 'tei_single_with_ids.xml', 'tei-single-ids': 'tei_single_with_ids.xml',
'tei-multiple': 'tei_multiple.xml', 'tei-multiple': 'tei_multiple.xml',
'tei-multiple-ids-1': 'tei_multiple_with_ids1.xml', 'tei-multiple-ids-1': 'tei_multiple_with_ids1.xml',
'tei-multiple-ids-2': 'tei_multiple_with_ids2.xml', 'tei-multiple-ids-2': 'tei_multiple_with_ids2.xml',
'mwes-1': 'mwes1.csv', 'mwes-1': 'mwes1.csv',
'mwes-2': 'mwes2.csv', 'mwes-2': 'mwes2.csv',
'structures-old': 'structures_old.xml', 'structures-old': 'structures_old.xml',
'structures-new': 'structures_new.xml', 'structures-new': 'structures_new.xml',
'dictionary-single': 'dictionary_single.xml', 'dictionary-single': 'dictionary_single.xml',
'dictionary-multiple': 'dictionary_multiple.xml', 'dictionary-multiple': 'dictionary_multiple.xml',
'dictionary': 'dictionary.xml' 'dictionary': 'dictionary.xml'
}
NLP_CONFIG_MAP = {
'type': 'standard_jos',
'processors': 'tokenize,pos,lemma,depparse',
'tokenize_pretokenized': True,
'pos_use_lexicon': True,
} }

@ -2,6 +2,7 @@ import codecs
import shutil import shutil
import os import os
import tempfile import tempfile
from copy import deepcopy
import obeliks import obeliks
@ -14,67 +15,52 @@ from structure_assignment.tweak_conllu import tweak as tweak_conllu
from nova_slovnica.translate_jos import translate as translate_jos from nova_slovnica.translate_jos import translate as translate_jos
from nova_slovnica.conllu_to_xml import convert_file as conllu_to_tei from nova_slovnica.conllu_to_xml import convert_file as conllu_to_tei
NLP_CONFIG_MAP = { def create_nlp(resource_directory):
'type': 'standard_jos',
'processors': 'tokenize,pos,lemma,depparse',
'tokenize_pretokenized': True,
'pos_use_lexicon': True,
'models_dir': None
}
XML_ID_PREFIX = 's'
tmp_directory = tempfile.mkdtemp()
resource_directory = None
nlp = None
def __get_tmp_file_name(file_key):
return tmp_directory + '/' + FILE_NAME_MAP[file_key]
def initialise(**argument_map):
global resource_directory
resource_directory = argument_map['resource_dir']
NLP_CONFIG_MAP['dir'] = resource_directory + '/classla' NLP_CONFIG_MAP['dir'] = resource_directory + '/classla'
return classla.Pipeline('sl', **NLP_CONFIG_MAP)
def import_file(file_name, file_key):
shutil.copyfile(file_name, __get_tmp_file_name(file_key)) class Pipeline:
def do_tokenise(): def __init__(self, nlp):
input_file_name = __get_tmp_file_name('strings-list') self.nlp = nlp
output_file_name = __get_tmp_file_name('obeliks-tokenised') self.tmp_directory = tempfile.mkdtemp()
obeliks.run(in_file=input_file_name, out_file=output_file_name, conllu=True) self.file_map = {key: self.tmp_directory + '/' + FILE_MAP[key] for key in FILE_MAP.keys()}
def do_tweak_conllu(): def import_file(self, file_name, file_key):
input_file_name = __get_tmp_file_name('obeliks-tokenised') shutil.copyfile(file_name, self.file_map[file_key])
output_file_name = __get_tmp_file_name('obeliks-tweaked')
tweak_conllu(input_file_name, output_file_name) def do_tokenise(self):
input_file_name = self.file_map['strings-list']
def load_classla_models(): output_file_name = self.file_map['obeliks-tokenised']
global nlp obeliks.run(in_file=input_file_name, out_file=output_file_name, conllu=True)
nlp = classla.Pipeline('sl', **NLP_CONFIG_MAP)
def do_tweak_conllu(self):
def do_parse(): input_file_name = self.file_map['obeliks-tokenised']
input_file_name = __get_tmp_file_name('obeliks-tweaked') output_file_name = self.file_map['obeliks-tweaked']
output_file_name = __get_tmp_file_name('classla-parsed') tweak_conllu(input_file_name, output_file_name)
doc = Document(text=None)
conll_file = CoNLLFile(filename=input_file_name) def do_parse(self):
doc.conll_file = conll_file input_file_name = self.file_map['obeliks-tweaked']
result = nlp(doc) output_file_name = self.file_map['classla-parsed']
result.conll_file.write_conll(output_file_name) doc = Document(text=None)
conll_file = CoNLLFile(filename=input_file_name)
def do_translate_jos(): doc.conll_file = conll_file
input_file_name = __get_tmp_file_name('classla-parsed') result = nlp(doc)
dictionary_file_name = resource_directory + '/dict.xml' result.conll_file.write_conll(output_file_name)
output_file_name = __get_tmp_file_name('classla-translated')
translate_jos(input_file_name, dictionary_file_name, output_file_name) def do_translate_jos(self):
input_file_name = self.file_map['classla-parsed']
def do_conllu_to_tei(): dictionary_file_name = resource_directory + '/dict.xml'
input_file_name = __get_tmp_file_name('classla-translated') output_file_name = self.file_map['classla-translated']
output_file_name = __get_tmp_file_name('tei-initial') translate_jos(input_file_name, dictionary_file_name, output_file_name)
conllu_to_tei(input_file_name, output_file_name)
def do_conllu_to_tei(self):
def export_file(file_name, file_key): input_file_name = self.file_map['classla-translated']
shutil.copyfile(__get_tmp_file_name(file_key), file_name) output_file_name = self.file_map['tei-initial']
conllu_to_tei(input_file_name, output_file_name)
def cleanup():
shutil.rmtree(tmp_directory, True) def export_file(self, file_name, file_key):
shutil.copyfile(self.file_map[file_key], file_name)
def cleanup(self):
shutil.rmtree(self.tmp_directory, True)

@ -1,28 +1,26 @@
import argparse import argparse
import classla from structure_assignment.pipeline import Pipeline, create_nlp
from classla import Document
from classla.models.common.conll import CoNLLFile
import structure_assignment.pipeline as pipeline def run_pipeline(nlp, input_file_name, output_file_name):
pipeline = Pipeline(nlp)
arg_parser = argparse.ArgumentParser(description='Parse Slovene strings and convert to TEI.')
arg_parser.add_argument('-inlist', type=str, help='Input list file')
arg_parser.add_argument('-outtei', type=str, help='Output TEI file')
arguments = arg_parser.parse_args()
input_file_name = arguments.inlist
output_file_name = arguments.outtei
def run_pipeline(input_file_name, output_file_name):
pipeline.initialise(temp_dir='/tmp/structure_assignment_pipeline1', resource_dir='../resources')
pipeline.import_file(input_file_name, 'strings-list') pipeline.import_file(input_file_name, 'strings-list')
pipeline.do_tokenise() pipeline.do_tokenise()
pipeline.do_tweak_conllu() pipeline.do_tweak_conllu()
pipeline.load_classla_models()
pipeline.do_parse() pipeline.do_parse()
pipeline.do_translate_jos() pipeline.do_translate_jos()
pipeline.do_conllu_to_tei() pipeline.do_conllu_to_tei()
pipeline.export_file(output_file_name, 'tei-initial') pipeline.export_file(output_file_name, 'tei-initial')
pipeline.cleanup()
if (__name__ == '__main__'): if (__name__ == '__main__'):
arg_parser = argparse.ArgumentParser(description='Parse Slovene strings and convert to TEI.')
arg_parser.add_argument('-inlist', type=str, help='Input list file')
arg_parser.add_argument('-outtei', type=str, help='Output TEI file')
arguments = arg_parser.parse_args()
input_file_name = arguments.inlist
output_file_name = arguments.outtei
nlp = create_nlp('../resources')
run_pipeline(input_file_name, output_file_name) run_pipeline(input_file_name, output_file_name)

Loading…
Cancel
Save