|
|
@ -5,11 +5,15 @@ import os
|
|
|
|
import shutil
|
|
|
|
import shutil
|
|
|
|
import time
|
|
|
|
import time
|
|
|
|
from xml.etree import ElementTree
|
|
|
|
from xml.etree import ElementTree
|
|
|
|
|
|
|
|
from conllu import TokenList
|
|
|
|
|
|
|
|
import conllu
|
|
|
|
|
|
|
|
import classla
|
|
|
|
|
|
|
|
import copy
|
|
|
|
|
|
|
|
|
|
|
|
from lxml import etree
|
|
|
|
from lxml import etree
|
|
|
|
|
|
|
|
|
|
|
|
from src.create_tei import construct_tei_etrees, construct_tei_documents_from_list, construct_sentence_from_list, \
|
|
|
|
from src.create_tei import construct_sentence_from_list, \
|
|
|
|
construct_paragraph_from_list, TeiDocument, build_tei_etrees, build_links, build_complete_tei
|
|
|
|
construct_paragraph_from_list, TeiDocument, build_tei_etrees, build_links, build_complete_tei, convert_bibl
|
|
|
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
|
|
|
|
|
|
|
|
|
@ -18,17 +22,22 @@ def add_token(svala_i, source_i, target_i, el, source, target, edges, svala_data
|
|
|
|
source_id = "s" + svala_i
|
|
|
|
source_id = "s" + svala_i
|
|
|
|
target_id = "t" + svala_i
|
|
|
|
target_id = "t" + svala_i
|
|
|
|
edge_id = "e-" + source_id + "-" + target_id
|
|
|
|
edge_id = "e-" + source_id + "-" + target_id
|
|
|
|
source_token_id = sentence_string_id + f'.s{source_i}'
|
|
|
|
labels = svala_data['edges'][edge_id]['labels']
|
|
|
|
target_token_id = sentence_string_id + f'.t{target_i}'
|
|
|
|
sentence_string_id_split = sentence_string_id.split('.')
|
|
|
|
|
|
|
|
source_token_id = f'{sentence_string_id_split[0]}s.{".".join(sentence_string_id_split[1:])}.{source_i}'
|
|
|
|
|
|
|
|
target_token_id = f'{sentence_string_id_split[0]}t.{".".join(sentence_string_id_split[1:])}.{source_i}'
|
|
|
|
token_tag = 'w' if el.tag.startswith('w') else 'pc'
|
|
|
|
token_tag = 'w' if el.tag.startswith('w') else 'pc'
|
|
|
|
lemma = el.attrib['lemma'] if token_tag == 'w' else el.text
|
|
|
|
lemma = el.attrib['lemma'] if token_tag == 'w' else el.text
|
|
|
|
source.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False})
|
|
|
|
source.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False})
|
|
|
|
target.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': target_token_id, 'space_after': False})
|
|
|
|
target.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': target_token_id, 'space_after': False})
|
|
|
|
edges.append({'source_ids': [source_token_id], 'target_ids': [target_token_id], 'labels': svala_data['edges'][edge_id]['labels']})
|
|
|
|
edges.append({'source_ids': [source_token_id], 'target_ids': [target_token_id], 'labels': labels})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def add_error_token(el, out_list, sentence_string_id, out_list_i, out_list_ids, is_source):
|
|
|
|
def add_error_token(el, out_list, sentence_string_id, out_list_i, out_list_ids, is_source):
|
|
|
|
source_token_id = sentence_string_id + f'.s{out_list_i}' if is_source else sentence_string_id + f'.t{out_list_i}'
|
|
|
|
sentence_string_id_split = sentence_string_id.split('.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
source_token_id = f'{sentence_string_id_split[0]}s.{".".join(sentence_string_id_split[1:])}.{out_list_i}' if is_source \
|
|
|
|
|
|
|
|
else f'{sentence_string_id_split[0]}t.{".".join(sentence_string_id_split[1:])}.{out_list_i}'
|
|
|
|
token_tag = 'w' if el.tag.startswith('w') else 'pc'
|
|
|
|
token_tag = 'w' if el.tag.startswith('w') else 'pc'
|
|
|
|
lemma = el.attrib['lemma'] if token_tag == 'w' else el.text
|
|
|
|
lemma = el.attrib['lemma'] if token_tag == 'w' else el.text
|
|
|
|
out_list.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False})
|
|
|
|
out_list.append({'token': el.text, 'tag': token_tag, 'ana': el.attrib['ana'], 'lemma': lemma, 'id': source_token_id, 'space_after': False})
|
|
|
@ -54,7 +63,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
|
|
|
|
source_i += 1
|
|
|
|
source_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
|
|
|
|
|
|
|
|
elif el.tag.startswith('c'):
|
|
|
|
elif el.tag.startswith('c') and len(source) > 0:
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
|
|
|
|
|
|
|
|
elif el.tag.startswith('p'):
|
|
|
|
elif el.tag.startswith('p'):
|
|
|
@ -70,7 +79,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
|
|
|
|
target_i += 1
|
|
|
|
target_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
|
|
|
|
|
|
|
|
elif p_el.tag.startswith('c'):
|
|
|
|
elif p_el.tag.startswith('c') and len(target) > 0:
|
|
|
|
target[-1]['space_after'] = True
|
|
|
|
target[-1]['space_after'] = True
|
|
|
|
|
|
|
|
|
|
|
|
elif el.tag.startswith('u2'):
|
|
|
|
elif el.tag.startswith('u2'):
|
|
|
@ -86,7 +95,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
|
|
|
|
source_i += 1
|
|
|
|
source_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
|
|
|
|
|
|
|
|
elif el_l2.tag.startswith('c'):
|
|
|
|
elif el_l2.tag.startswith('c') and len(source) > 0:
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
|
|
|
|
|
|
|
|
elif el_l2.tag.startswith('u3'):
|
|
|
|
elif el_l2.tag.startswith('u3'):
|
|
|
@ -102,7 +111,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
|
|
|
|
source_i += 1
|
|
|
|
source_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
|
|
|
|
|
|
|
|
elif el_l3.tag.startswith('c'):
|
|
|
|
elif el_l3.tag.startswith('c') and len(source) > 0:
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
|
|
|
|
|
|
|
|
elif el_l3.tag.startswith('u4'):
|
|
|
|
elif el_l3.tag.startswith('u4'):
|
|
|
@ -117,7 +126,7 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
|
|
|
|
|
|
|
|
|
|
|
|
source_i += 1
|
|
|
|
source_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
elif el_l4.tag.startswith('c'):
|
|
|
|
elif el_l4.tag.startswith('c') and len(source) > 0:
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
|
|
|
|
|
|
|
|
elif el_l4.tag.startswith('u5'):
|
|
|
|
elif el_l4.tag.startswith('u5'):
|
|
|
@ -132,22 +141,23 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
|
|
|
|
|
|
|
|
|
|
|
|
source_i += 1
|
|
|
|
source_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
svala_i += 1
|
|
|
|
elif el_l5.tag.startswith('c'):
|
|
|
|
elif el_l5.tag.startswith('c') and len(source) > 0:
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
source[-1]['space_after'] = True
|
|
|
|
|
|
|
|
|
|
|
|
for p_el in el:
|
|
|
|
# TODO NOT SURE IF THIS SHOULD BE COMMENTED! IF IT IS NOT THERE ARE ERRORS ON 2ND lvl of errors, where some words are duplicated
|
|
|
|
if p_el.tag.startswith('w') or p_el.tag.startswith('pc'):
|
|
|
|
# for p_el in el:
|
|
|
|
ind = str(svala_i)
|
|
|
|
# if p_el.tag.startswith('w') or p_el.tag.startswith('pc'):
|
|
|
|
|
|
|
|
# ind = str(svala_i)
|
|
|
|
target_id = "t" + ind
|
|
|
|
#
|
|
|
|
target_edge_ids.append(target_id)
|
|
|
|
# target_id = "t" + ind
|
|
|
|
|
|
|
|
# target_edge_ids.append(target_id)
|
|
|
|
add_error_token(p_el, target, sentence_string_id, target_i, target_ids, False)
|
|
|
|
#
|
|
|
|
|
|
|
|
# add_error_token(p_el, target, sentence_string_id, target_i, target_ids, False)
|
|
|
|
target_i += 1
|
|
|
|
#
|
|
|
|
svala_i += 1
|
|
|
|
# target_i += 1
|
|
|
|
elif p_el.tag.startswith('c'):
|
|
|
|
# svala_i += 1
|
|
|
|
target[-1]['space_after'] = True
|
|
|
|
# elif p_el.tag.startswith('c') and len(target) > 0:
|
|
|
|
|
|
|
|
# target[-1]['space_after'] = True
|
|
|
|
|
|
|
|
|
|
|
|
edge_ids = sorted(source_edge_ids) + sorted(target_edge_ids)
|
|
|
|
edge_ids = sorted(source_edge_ids) + sorted(target_edge_ids)
|
|
|
|
edge_id = "e-" + "-".join(edge_ids)
|
|
|
|
edge_id = "e-" + "-".join(edge_ids)
|
|
|
@ -156,14 +166,36 @@ def add_errors(svala_i, source_i, target_i, error, source, target, edges, svala_
|
|
|
|
return svala_i, source_i, target_i
|
|
|
|
return svala_i, source_i, target_i
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def process_file(et, args):
|
|
|
|
def create_conllu(interest_list, sentence_string_id):
|
|
|
|
|
|
|
|
conllu_result = TokenList([{"id": token_i + 1, "form": token['token'], "lemma": None, "upos": None, "xpos": None, "feats": None,
|
|
|
|
|
|
|
|
"head": None, "deprel": None, "deps": None, "misc": "SpaceAfter=No"} if not token['space_after']
|
|
|
|
|
|
|
|
else {"id": token_i + 1, "form": token['token'], "lemma": None, "upos": None, "xpos": None,
|
|
|
|
|
|
|
|
"feats": None, "head": None, "deprel": None, "deps": None, "misc": None} for token_i, token in
|
|
|
|
|
|
|
|
enumerate(interest_list)])
|
|
|
|
|
|
|
|
# Delete last SpaceAfter
|
|
|
|
|
|
|
|
misc = conllu_result[len(conllu_result) - 1]['misc'] if len(conllu_result) > 0 else None
|
|
|
|
|
|
|
|
if misc is not None:
|
|
|
|
|
|
|
|
misc_split = misc.split('|')
|
|
|
|
|
|
|
|
if misc is not None and misc == 'SpaceAfter=No':
|
|
|
|
|
|
|
|
conllu_result[len(conllu_result) - 1]['misc'] = None
|
|
|
|
|
|
|
|
elif misc is not None and 'SpaceAfter=No' in misc_split:
|
|
|
|
|
|
|
|
conllu_result[len(conllu_result) - 1]['misc'] = '|'.join([el for el in misc_split if el != 'SpaceAfter=No'])
|
|
|
|
|
|
|
|
conllu_result.metadata = {"sent_id": sentence_string_id}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return conllu_result.serialize()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def process_file(et, args, nlp):
|
|
|
|
if os.path.exists(args.results_folder):
|
|
|
|
if os.path.exists(args.results_folder):
|
|
|
|
shutil.rmtree(args.results_folder)
|
|
|
|
shutil.rmtree(args.results_folder)
|
|
|
|
os.mkdir(args.results_folder)
|
|
|
|
os.mkdir(args.results_folder)
|
|
|
|
etree_source_documents = []
|
|
|
|
etree_source_documents = []
|
|
|
|
etree_target_documents = []
|
|
|
|
etree_target_documents = []
|
|
|
|
etree_source_paragraphs = []
|
|
|
|
etree_source_divs = []
|
|
|
|
etree_target_paragraphs = []
|
|
|
|
etree_target_divs = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
complete_source_conllu = ''
|
|
|
|
|
|
|
|
complete_target_conllu = ''
|
|
|
|
|
|
|
|
|
|
|
|
document_edges = []
|
|
|
|
document_edges = []
|
|
|
|
for div in et.iter('div'):
|
|
|
|
for div in et.iter('div'):
|
|
|
@ -179,6 +211,8 @@ def process_file(et, args):
|
|
|
|
svala_list = [[fname[:-13], fname] if 'problem' in fname else [fname[:-5], fname] for fname in os.listdir(svala_path)]
|
|
|
|
svala_list = [[fname[:-13], fname] if 'problem' in fname else [fname[:-5], fname] for fname in os.listdir(svala_path)]
|
|
|
|
svala_dict = {e[0]: e[1] for e in svala_list}
|
|
|
|
svala_dict = {e[0]: e[1] for e in svala_list}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
etree_source_paragraphs = []
|
|
|
|
|
|
|
|
etree_target_paragraphs = []
|
|
|
|
paragraph_edges = []
|
|
|
|
paragraph_edges = []
|
|
|
|
|
|
|
|
|
|
|
|
paragraphs = div.findall('p')
|
|
|
|
paragraphs = div.findall('p')
|
|
|
@ -226,26 +260,55 @@ def process_file(et, args):
|
|
|
|
target[-1]['space_after'] = True
|
|
|
|
target[-1]['space_after'] = True
|
|
|
|
|
|
|
|
|
|
|
|
sentence_edges.append(edges)
|
|
|
|
sentence_edges.append(edges)
|
|
|
|
|
|
|
|
if len(source) > 0:
|
|
|
|
etree_source_sentences.append(construct_sentence_from_list(str(sentence_id), source))
|
|
|
|
source_conllu = create_conllu(source, sentence_string_id)
|
|
|
|
etree_target_sentences.append(construct_sentence_from_list(str(sentence_id), target))
|
|
|
|
if len(target) > 0:
|
|
|
|
|
|
|
|
target_conllu = create_conllu(target, sentence_string_id)
|
|
|
|
etree_source_paragraphs.append(construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1], etree_source_sentences))
|
|
|
|
|
|
|
|
etree_target_paragraphs.append(construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1], etree_target_sentences))
|
|
|
|
if len(source) > 0:
|
|
|
|
|
|
|
|
source_conllu_annotated = nlp(source_conllu).to_conll()
|
|
|
|
|
|
|
|
if len(target) > 0:
|
|
|
|
|
|
|
|
target_conllu_annotated = nlp(target_conllu).to_conll()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(source) > 0:
|
|
|
|
|
|
|
|
complete_source_conllu += source_conllu_annotated
|
|
|
|
|
|
|
|
complete_target_conllu += target_conllu_annotated
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(source) > 0:
|
|
|
|
|
|
|
|
source_conllu_parsed = conllu.parse(source_conllu_annotated)[0]
|
|
|
|
|
|
|
|
if len(target) > 0:
|
|
|
|
|
|
|
|
target_conllu_parsed = conllu.parse(target_conllu_annotated)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(source) > 0:
|
|
|
|
|
|
|
|
etree_source_sentences.append(construct_sentence_from_list(str(sentence_id), source_conllu_parsed, True))
|
|
|
|
|
|
|
|
if len(target) > 0:
|
|
|
|
|
|
|
|
etree_target_sentences.append(construct_sentence_from_list(str(sentence_id), target_conllu_parsed, False))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
etree_source_paragraphs.append(construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1], etree_source_sentences, True))
|
|
|
|
|
|
|
|
etree_target_paragraphs.append(construct_paragraph_from_list(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[1], etree_target_sentences, False))
|
|
|
|
paragraph_edges.append(sentence_edges)
|
|
|
|
paragraph_edges.append(sentence_edges)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
etree_bibl = convert_bibl(bibl)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
etree_source_divs.append((etree_source_paragraphs, copy.deepcopy(etree_bibl)))
|
|
|
|
|
|
|
|
etree_target_divs.append((etree_target_paragraphs, copy.deepcopy(etree_bibl)))
|
|
|
|
document_edges.append(paragraph_edges)
|
|
|
|
document_edges.append(paragraph_edges)
|
|
|
|
|
|
|
|
|
|
|
|
etree_source_documents.append(TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], etree_source_paragraphs))
|
|
|
|
etree_source_documents.append(TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0] + 's', etree_source_divs))
|
|
|
|
etree_target_documents.append(TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0], etree_target_paragraphs))
|
|
|
|
etree_target_documents.append(TeiDocument(paragraph.attrib['{http://www.w3.org/XML/1998/namespace}id'].split('.')[0] + 't', etree_target_divs))
|
|
|
|
|
|
|
|
|
|
|
|
etree_source = build_tei_etrees(etree_source_documents)
|
|
|
|
etree_source = build_tei_etrees(etree_source_documents)
|
|
|
|
etree_target = build_tei_etrees(etree_target_documents)
|
|
|
|
etree_target = build_tei_etrees(etree_target_documents)
|
|
|
|
|
|
|
|
|
|
|
|
# TODO FIX THIS
|
|
|
|
|
|
|
|
etree_links = build_links(document_edges)
|
|
|
|
etree_links = build_links(document_edges)
|
|
|
|
|
|
|
|
|
|
|
|
complete_etree = build_complete_tei(etree_source, etree_target, etree_links)
|
|
|
|
complete_etree = build_complete_tei(copy.deepcopy(etree_source), copy.deepcopy(etree_target), etree_links)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open(os.path.join(args.results_folder, f"source.conllu"), 'w') as sf:
|
|
|
|
|
|
|
|
sf.write(complete_source_conllu)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open(os.path.join(args.results_folder, f"target.conllu"), 'w') as sf:
|
|
|
|
|
|
|
|
sf.write(complete_target_conllu)
|
|
|
|
|
|
|
|
|
|
|
|
with open(os.path.join(args.results_folder, f"source.xml"), 'w') as sf:
|
|
|
|
with open(os.path.join(args.results_folder, f"source.xml"), 'w') as sf:
|
|
|
|
sf.write(etree.tostring(etree_source[0], pretty_print=True, encoding='utf-8').decode())
|
|
|
|
sf.write(etree.tostring(etree_source[0], pretty_print=True, encoding='utf-8').decode())
|
|
|
@ -266,8 +329,9 @@ def process_file(et, args):
|
|
|
|
def main(args):
|
|
|
|
def main(args):
|
|
|
|
with open(args.solar_file, 'r') as fp:
|
|
|
|
with open(args.solar_file, 'r') as fp:
|
|
|
|
logging.info(args.solar_file)
|
|
|
|
logging.info(args.solar_file)
|
|
|
|
|
|
|
|
nlp = classla.Pipeline('sl', pos_use_lexicon=True, pos_lemma_pretag=False, tokenize_pretokenized="conllu", type='standard_jos')
|
|
|
|
et = ElementTree.XML(fp.read())
|
|
|
|
et = ElementTree.XML(fp.read())
|
|
|
|
process_file(et, args)
|
|
|
|
process_file(et, args, nlp)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
if __name__ == '__main__':
|
|
|
@ -275,8 +339,6 @@ if __name__ == '__main__':
|
|
|
|
description='Read already processed xmls, erase entries without examples and limit gigafida examples to 1 per entry.')
|
|
|
|
description='Read already processed xmls, erase entries without examples and limit gigafida examples to 1 per entry.')
|
|
|
|
parser.add_argument('--solar_file', default='data/Solar2.0/solar2.xml',
|
|
|
|
parser.add_argument('--solar_file', default='data/Solar2.0/solar2.xml',
|
|
|
|
help='input file in (gz or xml currently). If none, then just database is loaded')
|
|
|
|
help='input file in (gz or xml currently). If none, then just database is loaded')
|
|
|
|
parser.add_argument('--txt_file', default='data/txt/input',
|
|
|
|
|
|
|
|
help='input file in (gz or xml currently). If none, then just database is loaded')
|
|
|
|
|
|
|
|
parser.add_argument('--svala_folder', default='data/solar.svala.error.small',
|
|
|
|
parser.add_argument('--svala_folder', default='data/solar.svala.error.small',
|
|
|
|
help='input file in (gz or xml currently). If none, then just database is loaded')
|
|
|
|
help='input file in (gz or xml currently). If none, then just database is loaded')
|
|
|
|
parser.add_argument('--results_folder', default='data/results/solar3.0',
|
|
|
|
parser.add_argument('--results_folder', default='data/results/solar3.0',
|
|
|
|