Redmine #1835: created pipeline class and adapted pipeline1
This commit is contained in:
parent
e3aa8a3aee
commit
5395d8def0
|
@ -5,23 +5,25 @@ import lxml.etree as lxml
|
||||||
from flask import Flask, Response
|
from flask import Flask, Response
|
||||||
from flask_httpauth import HTTPBasicAuth
|
from flask_httpauth import HTTPBasicAuth
|
||||||
|
|
||||||
import structure_assignment.pipeline as pipeline
|
from structure_assignment.pipeline import Pipeline, create_nlp
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
api_prefix = os.environ['API_PREFIX']
|
api_prefix = os.environ['API_PREFIX']
|
||||||
resource_directory = os.environ['API_RESOURCE_DIR']
|
resource_directory = os.environ['API_RESOURCE_DIR']
|
||||||
pipeline.initialise(resource_dir=resource_directory)
|
|
||||||
pipeline.load_classla_models()
|
nlp = create_nlp(resource_directory)
|
||||||
|
|
||||||
@app.route(api_prefix + '/test/<string:string>', methods=['GET'])
|
@app.route(api_prefix + '/test/<string:string>', methods=['GET'])
|
||||||
def test(string):
|
def test(string):
|
||||||
|
|
||||||
string_file_name = '/tmp/string.txt'
|
string_file_name = '/tmp/string.txt'
|
||||||
parse_file_name = '/tmp/parse.xml'
|
parse_file_name = '/tmp/parse.xml'
|
||||||
|
|
||||||
with open(string_file_name, 'w') as string_file:
|
with open(string_file_name, 'w') as string_file:
|
||||||
string_file.write(string + '\n')
|
string_file.write(string + '\n')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
pipeline = Pipeline(nlp)
|
||||||
pipeline.import_file(string_file_name, 'strings-list')
|
pipeline.import_file(string_file_name, 'strings-list')
|
||||||
pipeline.do_tokenise()
|
pipeline.do_tokenise()
|
||||||
pipeline.do_tweak_conllu()
|
pipeline.do_tweak_conllu()
|
||||||
|
@ -29,7 +31,7 @@ def test(string):
|
||||||
pipeline.do_translate_jos()
|
pipeline.do_translate_jos()
|
||||||
pipeline.do_conllu_to_tei()
|
pipeline.do_conllu_to_tei()
|
||||||
pipeline.export_file(parse_file_name, 'tei-initial')
|
pipeline.export_file(parse_file_name, 'tei-initial')
|
||||||
# pipeline.cleanup()
|
pipeline.cleanup()
|
||||||
tei = lxml.parse(parse_file_name).getroot()
|
tei = lxml.parse(parse_file_name).getroot()
|
||||||
message = lxml.tostring(tei, encoding='UTF-8', pretty_print=True).decode()
|
message = lxml.tostring(tei, encoding='UTF-8', pretty_print=True).decode()
|
||||||
ok = True
|
ok = True
|
||||||
|
|
|
@ -14,7 +14,7 @@ STRUCTURE_SCHEMA_FILE_NAME = '../resources/structures.xsd'
|
||||||
DICTIONARY_SCHEMA_FILE_NAME = '../resources/monolingual_dictionaries.xsd'
|
DICTIONARY_SCHEMA_FILE_NAME = '../resources/monolingual_dictionaries.xsd'
|
||||||
|
|
||||||
# temporary outputs
|
# temporary outputs
|
||||||
FILE_NAME_MAP = {'strings-list': 'strings.txt',
|
FILE_MAP = {'strings-list': 'strings.txt',
|
||||||
'obeliks-tokenised': 'obeliks_raw.conllu',
|
'obeliks-tokenised': 'obeliks_raw.conllu',
|
||||||
'obeliks-tweaked': 'obeliks_tweaked.conllu',
|
'obeliks-tweaked': 'obeliks_tweaked.conllu',
|
||||||
'classla-parsed': 'classla_raw.conllu',
|
'classla-parsed': 'classla_raw.conllu',
|
||||||
|
@ -33,3 +33,10 @@ FILE_NAME_MAP = {'strings-list': 'strings.txt',
|
||||||
'dictionary-multiple': 'dictionary_multiple.xml',
|
'dictionary-multiple': 'dictionary_multiple.xml',
|
||||||
'dictionary': 'dictionary.xml'
|
'dictionary': 'dictionary.xml'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NLP_CONFIG_MAP = {
|
||||||
|
'type': 'standard_jos',
|
||||||
|
'processors': 'tokenize,pos,lemma,depparse',
|
||||||
|
'tokenize_pretokenized': True,
|
||||||
|
'pos_use_lexicon': True,
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ import codecs
|
||||||
import shutil
|
import shutil
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
|
from copy import deepcopy
|
||||||
|
|
||||||
import obeliks
|
import obeliks
|
||||||
|
|
||||||
|
@ -14,67 +15,52 @@ from structure_assignment.tweak_conllu import tweak as tweak_conllu
|
||||||
from nova_slovnica.translate_jos import translate as translate_jos
|
from nova_slovnica.translate_jos import translate as translate_jos
|
||||||
from nova_slovnica.conllu_to_xml import convert_file as conllu_to_tei
|
from nova_slovnica.conllu_to_xml import convert_file as conllu_to_tei
|
||||||
|
|
||||||
NLP_CONFIG_MAP = {
|
def create_nlp(resource_directory):
|
||||||
'type': 'standard_jos',
|
|
||||||
'processors': 'tokenize,pos,lemma,depparse',
|
|
||||||
'tokenize_pretokenized': True,
|
|
||||||
'pos_use_lexicon': True,
|
|
||||||
'models_dir': None
|
|
||||||
}
|
|
||||||
|
|
||||||
XML_ID_PREFIX = 's'
|
|
||||||
|
|
||||||
tmp_directory = tempfile.mkdtemp()
|
|
||||||
resource_directory = None
|
|
||||||
nlp = None
|
|
||||||
|
|
||||||
def __get_tmp_file_name(file_key):
|
|
||||||
return tmp_directory + '/' + FILE_NAME_MAP[file_key]
|
|
||||||
|
|
||||||
def initialise(**argument_map):
|
|
||||||
global resource_directory
|
|
||||||
resource_directory = argument_map['resource_dir']
|
|
||||||
NLP_CONFIG_MAP['dir'] = resource_directory + '/classla'
|
NLP_CONFIG_MAP['dir'] = resource_directory + '/classla'
|
||||||
|
return classla.Pipeline('sl', **NLP_CONFIG_MAP)
|
||||||
|
|
||||||
def import_file(file_name, file_key):
|
class Pipeline:
|
||||||
shutil.copyfile(file_name, __get_tmp_file_name(file_key))
|
|
||||||
|
|
||||||
def do_tokenise():
|
def __init__(self, nlp):
|
||||||
input_file_name = __get_tmp_file_name('strings-list')
|
self.nlp = nlp
|
||||||
output_file_name = __get_tmp_file_name('obeliks-tokenised')
|
self.tmp_directory = tempfile.mkdtemp()
|
||||||
|
self.file_map = {key: self.tmp_directory + '/' + FILE_MAP[key] for key in FILE_MAP.keys()}
|
||||||
|
|
||||||
|
def import_file(self, file_name, file_key):
|
||||||
|
shutil.copyfile(file_name, self.file_map[file_key])
|
||||||
|
|
||||||
|
def do_tokenise(self):
|
||||||
|
input_file_name = self.file_map['strings-list']
|
||||||
|
output_file_name = self.file_map['obeliks-tokenised']
|
||||||
obeliks.run(in_file=input_file_name, out_file=output_file_name, conllu=True)
|
obeliks.run(in_file=input_file_name, out_file=output_file_name, conllu=True)
|
||||||
|
|
||||||
def do_tweak_conllu():
|
def do_tweak_conllu(self):
|
||||||
input_file_name = __get_tmp_file_name('obeliks-tokenised')
|
input_file_name = self.file_map['obeliks-tokenised']
|
||||||
output_file_name = __get_tmp_file_name('obeliks-tweaked')
|
output_file_name = self.file_map['obeliks-tweaked']
|
||||||
tweak_conllu(input_file_name, output_file_name)
|
tweak_conllu(input_file_name, output_file_name)
|
||||||
|
|
||||||
def load_classla_models():
|
def do_parse(self):
|
||||||
global nlp
|
input_file_name = self.file_map['obeliks-tweaked']
|
||||||
nlp = classla.Pipeline('sl', **NLP_CONFIG_MAP)
|
output_file_name = self.file_map['classla-parsed']
|
||||||
|
|
||||||
def do_parse():
|
|
||||||
input_file_name = __get_tmp_file_name('obeliks-tweaked')
|
|
||||||
output_file_name = __get_tmp_file_name('classla-parsed')
|
|
||||||
doc = Document(text=None)
|
doc = Document(text=None)
|
||||||
conll_file = CoNLLFile(filename=input_file_name)
|
conll_file = CoNLLFile(filename=input_file_name)
|
||||||
doc.conll_file = conll_file
|
doc.conll_file = conll_file
|
||||||
result = nlp(doc)
|
result = nlp(doc)
|
||||||
result.conll_file.write_conll(output_file_name)
|
result.conll_file.write_conll(output_file_name)
|
||||||
|
|
||||||
def do_translate_jos():
|
def do_translate_jos(self):
|
||||||
input_file_name = __get_tmp_file_name('classla-parsed')
|
input_file_name = self.file_map['classla-parsed']
|
||||||
dictionary_file_name = resource_directory + '/dict.xml'
|
dictionary_file_name = resource_directory + '/dict.xml'
|
||||||
output_file_name = __get_tmp_file_name('classla-translated')
|
output_file_name = self.file_map['classla-translated']
|
||||||
translate_jos(input_file_name, dictionary_file_name, output_file_name)
|
translate_jos(input_file_name, dictionary_file_name, output_file_name)
|
||||||
|
|
||||||
def do_conllu_to_tei():
|
def do_conllu_to_tei(self):
|
||||||
input_file_name = __get_tmp_file_name('classla-translated')
|
input_file_name = self.file_map['classla-translated']
|
||||||
output_file_name = __get_tmp_file_name('tei-initial')
|
output_file_name = self.file_map['tei-initial']
|
||||||
conllu_to_tei(input_file_name, output_file_name)
|
conllu_to_tei(input_file_name, output_file_name)
|
||||||
|
|
||||||
def export_file(file_name, file_key):
|
def export_file(self, file_name, file_key):
|
||||||
shutil.copyfile(__get_tmp_file_name(file_key), file_name)
|
shutil.copyfile(self.file_map[file_key], file_name)
|
||||||
|
|
||||||
def cleanup():
|
def cleanup(self):
|
||||||
shutil.rmtree(tmp_directory, True)
|
shutil.rmtree(self.tmp_directory, True)
|
||||||
|
|
|
@ -1,28 +1,26 @@
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
import classla
|
from structure_assignment.pipeline import Pipeline, create_nlp
|
||||||
from classla import Document
|
|
||||||
from classla.models.common.conll import CoNLLFile
|
|
||||||
|
|
||||||
import structure_assignment.pipeline as pipeline
|
def run_pipeline(nlp, input_file_name, output_file_name):
|
||||||
|
pipeline = Pipeline(nlp)
|
||||||
arg_parser = argparse.ArgumentParser(description='Parse Slovene strings and convert to TEI.')
|
|
||||||
arg_parser.add_argument('-inlist', type=str, help='Input list file')
|
|
||||||
arg_parser.add_argument('-outtei', type=str, help='Output TEI file')
|
|
||||||
arguments = arg_parser.parse_args()
|
|
||||||
input_file_name = arguments.inlist
|
|
||||||
output_file_name = arguments.outtei
|
|
||||||
|
|
||||||
def run_pipeline(input_file_name, output_file_name):
|
|
||||||
pipeline.initialise(temp_dir='/tmp/structure_assignment_pipeline1', resource_dir='../resources')
|
|
||||||
pipeline.import_file(input_file_name, 'strings-list')
|
pipeline.import_file(input_file_name, 'strings-list')
|
||||||
pipeline.do_tokenise()
|
pipeline.do_tokenise()
|
||||||
pipeline.do_tweak_conllu()
|
pipeline.do_tweak_conllu()
|
||||||
pipeline.load_classla_models()
|
|
||||||
pipeline.do_parse()
|
pipeline.do_parse()
|
||||||
pipeline.do_translate_jos()
|
pipeline.do_translate_jos()
|
||||||
pipeline.do_conllu_to_tei()
|
pipeline.do_conllu_to_tei()
|
||||||
pipeline.export_file(output_file_name, 'tei-initial')
|
pipeline.export_file(output_file_name, 'tei-initial')
|
||||||
|
pipeline.cleanup()
|
||||||
|
|
||||||
if (__name__ == '__main__'):
|
if (__name__ == '__main__'):
|
||||||
|
|
||||||
|
arg_parser = argparse.ArgumentParser(description='Parse Slovene strings and convert to TEI.')
|
||||||
|
arg_parser.add_argument('-inlist', type=str, help='Input list file')
|
||||||
|
arg_parser.add_argument('-outtei', type=str, help='Output TEI file')
|
||||||
|
arguments = arg_parser.parse_args()
|
||||||
|
input_file_name = arguments.inlist
|
||||||
|
output_file_name = arguments.outtei
|
||||||
|
|
||||||
|
nlp = create_nlp('../resources')
|
||||||
run_pipeline(input_file_name, output_file_name)
|
run_pipeline(input_file_name, output_file_name)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user