From ab292d74f0eacb44297180c1cf16db211b1228c9 Mon Sep 17 00:00:00 2001 From: Luka Date: Tue, 22 Feb 2022 10:35:01 +0100 Subject: [PATCH] Added svala2tei code. --- src/__init__.py | 0 src/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 144 bytes src/__pycache__/create_tei.cpython-37.pyc | Bin 0 -> 7910 bytes src/create_tei.py | 355 ++++++++++++++++++++++ svala2tei.py | 264 ++++++++++++++++ 5 files changed, 619 insertions(+) create mode 100644 src/__init__.py create mode 100644 src/__pycache__/__init__.cpython-37.pyc create mode 100644 src/__pycache__/create_tei.cpython-37.pyc create mode 100644 src/create_tei.py create mode 100644 svala2tei.py diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/__pycache__/__init__.cpython-37.pyc b/src/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e666e6735a788958133381e13c45396522cc211d GIT binary patch literal 144 zcmZ?b<>g`k0{M+RNg(<$h=2h`Aj1KOi&=m~3PUi1CZpdjZ3 literal 0 HcmV?d00001 diff --git a/src/__pycache__/create_tei.cpython-37.pyc b/src/__pycache__/create_tei.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2906192135e5f5a5515cbf6ca2a2e057514d49d6 GIT binary patch literal 7910 zcmb7JOK=-kcJ0?cG(LnVil)AnDNB|G*(CiNTlUBKDTnb`)JW-)(+2UHAV?4m zzHXQl)TnZWp31M3am^x|sY#Jl*(8-!7O7-2i)7ZRS-)knQe~4>7Fy-p+du<^?WzQ* ze*b>B@7{BN4?mijDH-@&__q`5pIvdVMs%Yw&7>B@CElx-$ZS; ztsUF9h4Fiaw50vDA?*XxchGaBi=K;~i(Wx`=y~WBWKovBHhfQ(Wd(In&d4h2k~|@2 zQJ3XOIfuF;Psw@IGx9mPfVwJA%QL7?$g}bs>REYSUO;_PJ})n#o|7-gMbxL{C4WAA zPQJKpek0UxK;ww^%k71o1%E*pp|N#ZUXd?75&oI*%+^_XRlY2*$=VYObJV{sUy-j$ zA(!Ox6K6QKB43lQ%S-sa(KO|o+4$kPBf~!*p5MBF-+DWIo?7xPobqDF4b80={EHZw z_viQfs26V=pLhLB;X-)nK=?22e-XZjkq7=|ymwjt;K2Mv)xQ$Xg;y~9(tXhVQ{yw^ zfwBKa_!8D#_pgQ*4_^-Lt!v>``L=vV-gsgi){cxL0SYlA-=)?!V)yI&R6gYLC$!I9 zqxRlYlI&_tXeUfm7&fRo`dDb(!V|xSA~Z0#>`?cEJKN!V$GcN(-6eW*6^e1s>yfFc#C5g1JarQp&`O|&g(U<O?_L)+}wuVS7W_*hdvm{GFkSsGj|BWi#3dSK7Vp`pWI_%dj1FcR0!g>-0N!VKMKGI$gv^uRM2xzO=LtzM)zx|V=11*o% zH0WpPLp3P}AM7d`sj+1o3LMW&t<+8(DN^^)l;)AxcAGAVgpX*g_Gj*P>y7ZkjU-g> zuSU;ATCk^eglx&7d+xs+P z@3e44Izm>EN^7q!Wk8%ffnH2W1G*56#Zujqj$1W>*_bD%Q{s(C$o-2NRSEHJ=J8PBUqzje*&5 ziO7MOh>Y6gsg!oV982cdM9-Xs zhI`_iIcLrb*Yr#qV^y<%BG>qAr<%A!R;fT%qEp`qgFu&pU?-BjHnl53u-mJ*vl&J9 zK+$ij1u9;pVyuzeBDGW&3PLp-8&8y6&+;rDZD2J2(6U>2VlpqeITq4{@ddyoyey2d z$L3hT96NwTQ5Qe0t9nz_yPLTdkre4+Ey|3rG`5SXl8TNAJCz$`wovA3ANF>sC86bH z40JIbn)MhXM0bXb@yYm5boiTiVtU6AbD}?&Q!+N6LjLgwv6wIc{ZM$4rK$ z?3cASqGf`yZ0agx%tn*9@Y=|{ER3m`Tr##XWO~V(F)2Soa!MO_o>8dZg{rm4#qP*bLCz9JSxe|CZ!tSW?VS1r+=%T%mT!9aeKTHlLd3K4#YCnnZ;!n1*3 z8f)OmKNecI@Wj_qj1U}J2gU|WIB+b0-wek^Xr^{<$>vq)Wk=;4wo^l*B zXc)U3Kpk9Uf?aP>1#ICOm+UzzEIYT?ak*7-q+8(z(IfHgFErPacM%(_!@&xv~ zH7F&;)IKc9*+clkBb?zt49fggk$eyPKvT|PF7tt=Je5?^@`0fK?mq+D6jsi!`qtWy ze_uN=zMZ&#eltnB@2#xt?d>fi%35kHN2K#kCnlkjhl=C2Fz$5fXIiZf||)`xN7b1kWm= ztrq;Dv)`>Jn~Kb{w&Fx#FEWqXR#Ai=ntlnV3pU5cQ}6Kt0;fMSI$R*>bVWKHnvJ$1 z#nKk~+Tj!MT+k!hr?E>4N=A!S%wR`Xfx{y=hcDb7)P7i3Fgwp9^Dxen2+w@oF@Hg- z%lQ6&I zL?)7AA+mH^NaRw)zc;gd0^a)S;|X=!dy{qT!5@6G^7^~)-dM>MWq_Q|rNFJVPKi_} zlA&MB`n@F74zi@)x6nnLN%0J&#k!I6kVwnPBm0>ehae6qk4c75T4%{D!T=tbhvpH5 z=rDT+nBTV59n71Uw~vf?)TT97$CfB5iP|t>SlrcT;(c`vn=2CCGjUlP@P<{ibwP#Q zHbO$=OE^_mTWzFCg;vaV0f{8g(g`&pMWj!$wzk7Z+QKQPrBVeh<1p9>lRCZd6}FBm zC=95|yf`DOhz->rp`Ra|D9H~_X%g%u?0<^9K+Q^!Eh$2`SNq=O#pPEP-(SAIcBf`) zQ-M^3$c;^yw|jVPnv!xW&dK--%rkn43=r@T{hR{4KZzgJF(&Ri+1DqD% z^s5L-M1n*EBb>uw&Kk~r44ex{1>DZ&*qEEE%xl=xyV#n;FMX;Jb-?9bBN?kPYgEnA zEcCxV3-%srHx6?ZjJy>n? zY8$J62S;zEd~vXQ4p#{-H09jn7@Yo?vj3jm4|7Axa3fWnrj6z7toDnYa1V@Lgfs}$ z+=qD3*98hLAtqR1!X9hD7!c}n8Y7dc)=(htfUoVe+dT-X6UK^QpaLqsprVe#FSFp+ zdm9_@^^E`fH1~iCHr=`X)~CM*3;%+(@dqe!1ONpk@^<{pvaS#w*aM2$Q448bmD9g! zhLsRI`2WBgLkHpEoK;gbF_KeG(MFE)U!pUjoNOo% zW$%B%CwMS$fNY>xJG*#jMx9dJMkh4Lr}th961?gGL-P@oXZq%kkr@>D>R79Te5H#_ukU@IlSHgIXl8}OBT->>NDvg2`>#wj4=lnL4~xKdI-2(1lV&! zw3S!a;4~=d;HY`xqGXZAUO@4baIDJ{w<;1|x_HH(CzD;i@%k=_t4(Zrh6MW- z8qg*VUd;&sXJY2^n2=rWl?3mQdu!qcJ*;7ra12E)V6_l2j;t+NZH|zEWVQtCWr$rF zB00#OAA@lmXum_Wl70}aY7cb|Gh#kH{M*E_OuG-D+xT^p>>>njB__NvJODvy%ckg- zB3t^Wh#+x~58H>k)L+ff|W9REbC)9od}Mho|p zlmqy)Q7<_@QW`H&AL1u)b&H**m7Dzfm!ZnGx=DN_#mq9z64aRdlKKiGzO{w_A#hK+ zQ*U*G0O^L^Y)9+;(;sCUEPfQNu70v~Z!w7$aaXX61!YQn>3^A4gqvrgHGU>u=~qT8 zX#`wyI(5~=UmTlZyW2OHqJC*<38z`2X>Fm^fB8P0ela^D{QylZwh*!+^ za+xw8{AQvHGTf;5+KKv8P>wz0zPGd#Kf?MQ?Pj%a(qB&M&#^#JG_DJc&8XFYhSA-E z+DEbW9fD$YuU*G~FRIC!rF?qX!luYba6gjWH?a0|_`pFF<$c-B17#YSgIuQ;W1gIT z7IBe-h%gn=Xm9z b3#ZVIL%=h*$)`}y#g{@ty7!+$c~1N 1 and not tokens[1].isspace(): + val = tokens[1].strip() + else: + val = None + return (key, val) + + +def is_metaline(line): + if re.match('# .+ =.*', line): + return True + return False + + +def construct_tei_documents_from_list(object_list): + documents = [] + + doc_id = None + document_paragraphs = [] + + para_id = None + # para_buffer = [] + + # for line in object_list: + # if is_metaline(line): + # key, val = parse_metaline(line) + # if key == 'newdoc id': + # if len(para_buffer) > 0: + # document_paragraphs.append(construct_paragraph(para_id, para_buffer)) + # if len(document_paragraphs) > 0: + # documents.append( + # TeiDocument(doc_id, document_paragraphs)) + # document_paragraphs = [] + # doc_id = val + # elif key == 'newpar id': + # if len(para_buffer) > 0: + # document_paragraphs.append(construct_paragraph(para_id, para_buffer)) + # para_buffer = [] + # para_id = val + # elif key == 'sent_id': + # para_buffer.append(line) + # else: + # if not line.isspace(): + # para_buffer.append(line) + + if len(object_list) > 0: + document_paragraphs.append(construct_paragraph(para_id, object_list)) + + if len(document_paragraphs) > 0: + documents.append( + TeiDocument(doc_id, document_paragraphs)) + + return documents + + +def construct_tei_documents(conllu_lines): + documents = [] + + doc_id = None + document_paragraphs = [] + + para_id = None + para_buffer = [] + + for line in conllu_lines: + if is_metaline(line): + key, val = parse_metaline(line) + if key == 'newdoc id': + if len(para_buffer) > 0: + document_paragraphs.append(construct_paragraph(para_id, para_buffer)) + if len(document_paragraphs) > 0: + documents.append( + TeiDocument(doc_id, document_paragraphs)) + document_paragraphs = [] + doc_id = val + elif key == 'newpar id': + if len(para_buffer) > 0: + document_paragraphs.append(construct_paragraph(para_id, para_buffer)) + para_buffer = [] + para_id = val + elif key == 'sent_id': + para_buffer.append(line) + else: + if not line.isspace(): + para_buffer.append(line) + + if len(para_buffer) > 0: + document_paragraphs.append(construct_paragraph(para_id, para_buffer)) + + if len(document_paragraphs) > 0: + documents.append( + TeiDocument(doc_id, document_paragraphs)) + + return documents + + +def construct_paragraph_from_list(para_id, etree_source_sentences): + para = Paragraph(para_id) + + for sentence in etree_source_sentences: + para.add_sentence(sentence) + + return para + + +def construct_paragraph(para_id, conllu_lines): + para = Paragraph(para_id) + + sent_id = None + sent_buffer = [] + + for line in conllu_lines: + if is_metaline(line): + key, val = parse_metaline(line) + if key == 'sent_id': + if len(sent_buffer) > 0: + para.add_sentence(construct_sentence(sent_id, sent_buffer)) + sent_buffer = [] + sent_id = val + elif not line.isspace(): + sent_buffer.append(line) + + if len(sent_buffer) > 0: + para.add_sentence(construct_sentence(sent_id, sent_buffer)) + + return para + + +def construct_sentence_from_list(sent_id, object_list): + sentence = Sentence(sent_id, no_ud=True) + for tokens in object_list: + word_id = tokens['id'] + token = tokens['token'] + lemma = tokens['lemma'] + upos = '_' + xpos = tokens['ana'][4:] + upos_other = '_' + misc = '_' if tokens['space_after'] else 'SpaceAfter=No' + + sentence.add_item( + token, + lemma, + upos, + upos_other, + xpos, + misc) + + return sentence + + +def construct_sentence(sent_id, lines): + sentence = Sentence(sent_id) + for line in lines: + if line.startswith('#') or line.isspace(): + continue + line = line.replace('\n', '') + tokens = line.split('\t') + word_id = tokens[0] + token = tokens[1] + lemma = tokens[2] + upos = tokens[3] + xpos = tokens[4] + upos_other = tokens[5] + depparse_link = tokens[6] + depparse_link_name = tokens[7] + misc = tokens[9] + + sentence.add_item( + token, + lemma, + upos, + upos_other, + xpos, + misc) + + sentence.add_link( + depparse_link, + depparse_link_name) + return sentence + + +def construct_tei_etrees(conllu_lines): + documents = construct_tei_documents(conllu_lines) + return build_tei_etrees(documents) + + +def convert_file(input_file_name, output_file_name): + input_file = open(input_file_name, 'r') + root = construct_tei_etrees(input_file)[0] + tree = etree.ElementTree(root) + tree.write(output_file_name, encoding='UTF-8', pretty_print=True) + input_file.close() + + tree = etree.ElementTree(root) + tree.write(output_file_name, pretty_print=True, encoding='utf-8') + + +system = 'jos' # default (TODO: make this cleaner) + +if __name__ == '__main__': + import argparse + from glob import glob + + parser = argparse.ArgumentParser(description='Convert CoNNL-U to TEI.') + parser.add_argument('files', nargs='+', help='CoNNL-U file') + parser.add_argument('-o', '--out-file', dest='out', default=None, + help='Write output to file instead of stdout.') + parser.add_argument('-s', '--system', dest='system', default='jos', choices=['jos', 'ud']) + + args = parser.parse_args() + + if args.out: + f_out = open(args.out, 'w') + else: + f_out = sys.stdout + + system = args.system + + for arg in args.files: + filelist = glob(arg) + for f in filelist: + with open(f, 'r') as conllu_f: + tei_etrees = construct_tei_etrees(conllu_f) + for tei_etree in tei_etrees: + f_out.write(etree.tostring(tei_etree, pretty_print=True, encoding='utf-8').decode()) + f_out.write('') diff --git a/svala2tei.py b/svala2tei.py new file mode 100644 index 0000000..7af0eb9 --- /dev/null +++ b/svala2tei.py @@ -0,0 +1,264 @@ +import argparse +import json +import logging +import os +import shutil +import time +from xml.etree import ElementTree + +from lxml import etree + +from src.create_tei import construct_tei_etrees, construct_tei_documents_from_list, construct_sentence_from_list, \ + construct_paragraph_from_list, TeiDocument, build_tei_etrees + +logging.basicConfig(level=logging.INFO) + + +def add_token(svala_i, source_i, target_i, el, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id): + source_id = "s" + svala_i + target_id = "t" + svala_i + edge_id = "e-" + source_id + "-" + target_id + source_token_id = sentence_string_source_id + f'.{source_i}' + target_token_id = sentence_string_target_id + f'.{target_i}' + token_tag = 'w' if el.tag.startswith('w') else 'pc' + lemma = el.attrib['lemma'] if token_tag == 'w' else el.text + source.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False}) + target.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': target_token_id, 'space_after': False}) + edges.append({'source_ids': [source_token_id], 'target_ids': [target_token_id], 'labels': svala_data['edges'][edge_id]['labels']}) + + +def add_error_token(el, out_list, sentence_string_id, out_list_i, out_list_ids): + source_token_id = sentence_string_id + f'.{out_list_i}' + token_tag = 'w' if el.tag.startswith('w') else 'pc' + lemma = el.attrib['lemma'] if token_tag == 'w' else el.text + out_list.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False}) + out_list_ids.append(source_token_id) + + +def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id): + source_edge_ids = [] + target_edge_ids = [] + source_ids = [] + target_ids = [] + + # solar5.7 + for el in error: + if el.tag.startswith('w') or el.tag.startswith('pc'): + ind = str(svala_i) + + source_id = "s" + ind + source_edge_ids.append(source_id) + + add_error_token(el, source, sentence_string_source_id, source_i, source_ids) + + source_i += 1 + svala_i += 1 + + elif el.tag.startswith('c'): + source[-1]['space_after'] = True + + elif el.tag.startswith('p'): + for p_el in el: + if p_el.tag.startswith('w') or p_el.tag.startswith('pc'): + ind = str(svala_i) + + target_id = "t" + ind + target_edge_ids.append(target_id) + + add_error_token(p_el, target, sentence_string_target_id, target_i, target_ids) + + target_i += 1 + svala_i += 1 + + elif p_el.tag.startswith('c'): + target[-1]['space_after'] = True + + elif el.tag.startswith('u2'): + for el_l2 in el: + if el_l2.tag.startswith('w') or el_l2.tag.startswith('pc'): + ind = str(svala_i) + + source_id = "s" + ind + source_edge_ids.append(source_id) + + add_error_token(el_l2, source, sentence_string_source_id, source_i, source_ids) + + source_i += 1 + svala_i += 1 + + elif el_l2.tag.startswith('c'): + source[-1]['space_after'] = True + + elif el_l2.tag.startswith('u3'): + for el_l3 in el_l2: + if el_l3.tag.startswith('w') or el_l3.tag.startswith('pc'): + ind = str(svala_i) + + source_id = "s" + ind + source_edge_ids.append(source_id) + + add_error_token(el_l3, source, sentence_string_source_id, source_i, source_ids) + + source_i += 1 + svala_i += 1 + + elif el_l3.tag.startswith('c'): + source[-1]['space_after'] = True + + elif el_l3.tag.startswith('u4'): + for el_l4 in el_l3: + if el_l4.tag.startswith('w') or el_l4.tag.startswith('pc'): + ind = str(svala_i) + + source_id = "s" + ind + source_edge_ids.append(source_id) + + add_error_token(el_l4, source, sentence_string_source_id, source_i, source_ids) + + source_i += 1 + svala_i += 1 + elif el_l4.tag.startswith('c'): + source[-1]['space_after'] = True + + elif el_l4.tag.startswith('u5'): + for el_l5 in el_l4: + if el_l5.tag.startswith('w') or el_l5.tag.startswith('pc'): + ind = str(svala_i) + + source_id = "s" + ind + source_edge_ids.append(source_id) + + add_error_token(el_l5, source, sentence_string_source_id, source_i, source_ids) + + source_i += 1 + svala_i += 1 + elif el_l5.tag.startswith('c'): + source[-1]['space_after'] = True + + for p_el in el: + if p_el.tag.startswith('w') or p_el.tag.startswith('pc'): + ind = str(svala_i) + + target_id = "t" + ind + target_edge_ids.append(target_id) + + add_error_token(p_el, target, sentence_string_target_id, target_i, target_ids) + + target_i += 1 + svala_i += 1 + elif p_el.tag.startswith('c'): + target[-1]['space_after'] = True + + + edge_ids = sorted(source_edge_ids) + sorted(target_edge_ids) + edge_id = "e-" + "-".join(edge_ids) + edges.append({'source_ids': source_ids, 'target_ids': target_ids, 'labels': svala_data['edges'][edge_id]['labels']}) + + return svala_i, source_i, target_i + + +def process_file(et, args): + if os.path.exists(args.results_folder): + shutil.rmtree(args.results_folder) + os.mkdir(args.results_folder) + for div in et.iter('div'): + bibl = div.find('bibl') + file_name = bibl.get('n') + file_name = file_name.replace('/', '_') + + svala_path = os.path.join(args.svala_folder, file_name) + # skip files that are not svala annotated (to enable short examples) + if not os.path.isdir(svala_path): + continue + + svala_list = [[fname[:-13], fname] if 'problem' in fname else [fname[:-5], fname] for fname in os.listdir(svala_path)] + svala_dict = {e[0]: e[1] for e in svala_list} + + paragraphs = div.findall('p') + for paragraph in paragraphs: + sentences = paragraph.findall('s') + svala_i = 1 + + + + # read json + svala_file = os.path.join(svala_path, svala_dict[paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id']]) + jf = open(svala_file) + svala_data = json.load(jf) + jf.close() + + etree_source_sentences = [] + etree_target_sentences = [] + edges = [] + for sentence_id, sentence in enumerate(sentences): + source = [] + target = [] + + sentence_id += 1 + source_i = 1 + target_i = 1 + sentence_string_source_id = paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'] + f's.{sentence_id}' + sentence_string_target_id = paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'] + f't.{sentence_id}' + for el in sentence: + if el.tag.startswith('w'): + add_token(str(svala_i), source_i, target_i, el, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id) + svala_i += 1 + source_i += 1 + target_i += 1 + elif el.tag.startswith('pc'): + add_token(str(svala_i), source_i, target_i, el, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id) + svala_i += 1 + source_i += 1 + target_i += 1 + elif el.tag.startswith('u'): + svala_i, source_i, target_i = add_errors(svala_i, source_i, target_i, el, source, target, edges, svala_data, sentence_string_source_id, sentence_string_target_id) + elif el.tag.startswith('c'): + source[-1]['space_after'] = True + target[-1]['space_after'] = True + + etree_source_sentences.append(construct_sentence_from_list(str(sentence_id), source)) + etree_target_sentences.append(construct_sentence_from_list(str(sentence_id), target)) + + etree_source_paragraph = construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1] + 's', etree_source_sentences) + etree_source_document = TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], [etree_source_paragraph]) + etree_source = build_tei_etrees([etree_source_document]) + + etree_target_paragraph = construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1] + 't', etree_target_sentences) + etree_target_document = TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], [etree_target_paragraph]) + etree_target = build_tei_etrees([etree_target_document]) + + with open(os.path.join(args.results_folder, f"{paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id']}_source"), 'w') as sf: + sf.write(etree.tostring(etree_source[0], pretty_print=True, encoding='utf-8').decode()) + + with open(os.path.join(args.results_folder, f"{paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id']}_target"), 'w') as tf: + tf.write(etree.tostring(etree_target[0], pretty_print=True, encoding='utf-8').decode()) + + with open(os.path.join(args.results_folder, f"{paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id']}_errors"), 'w') as jf: + json.dump(edges, jf, ensure_ascii=False, indent=" ") + + break + + +def main(args): + with open(args.solar_file, 'r') as fp: + logging.info(args.solar_file) + et = ElementTree.XML(fp.read()) + process_file(et, args) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Read already processed xmls, erase entries without examples and limit gigafida examples to 1 per entry.') + parser.add_argument('--solar_file', default='data/Solar2.0/solar2.xml', + help='input file in (gz or xml currently). If none, then just database is loaded') + parser.add_argument('--txt_file', default='data/txt/input', + help='input file in (gz or xml currently). If none, then just database is loaded') + parser.add_argument('--svala_folder', default='data/solar.svala.error.small', + help='input file in (gz or xml currently). If none, then just database is loaded') + parser.add_argument('--results_folder', default='data/results/solar3.0', + help='input file in (gz or xml currently). If none, then just database is loaded') + args = parser.parse_args() + + start = time.time() + main(args) + logging.info("TIME: {}".format(time.time() - start))