Compare commits
No commits in common. "master" and "mt-homonymy-support" have entirely different histories.
master
...
mt-homonym
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -10,5 +10,5 @@ build/*
|
|||
# using kdev4, works fairly nicely!
|
||||
.kdev4
|
||||
**/*.kdev4
|
||||
remote
|
||||
|
||||
|
||||
|
|
|
@ -156,7 +156,16 @@
|
|||
vertical-align: super;
|
||||
font-size: 0.7em;
|
||||
}
|
||||
.translation-explanation:not(:empty) {
|
||||
font-style: italic;
|
||||
|
||||
&:before {
|
||||
content: '[';
|
||||
}
|
||||
&:after {
|
||||
content: ']';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.translation-add {
|
||||
|
@ -166,20 +175,6 @@
|
|||
}
|
||||
}
|
||||
|
||||
.explanations:not(:empty) {
|
||||
font-style: italic;
|
||||
|
||||
&:not(.solo) {
|
||||
&:before {
|
||||
content: '[';
|
||||
}
|
||||
|
||||
&:after {
|
||||
content: ']';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.example {
|
||||
clear: left;
|
||||
margin-left: 1em;
|
||||
|
|
|
@ -5,7 +5,7 @@ from model.tags import export_tag
|
|||
def export_to_xml(model):
|
||||
xml_document = export_entry(model.entry)
|
||||
serializer = __new__(XMLSerializer())
|
||||
return serializer.serializeToString(xml_document)
|
||||
return serializer.serializeToString(xml_document);
|
||||
|
||||
|
||||
def export_entry(entry):
|
||||
|
@ -23,14 +23,9 @@ def export_entry(entry):
|
|||
|
||||
headword = doc.createElement("headword")
|
||||
headword_lemma = doc.createElement("lemma")
|
||||
|
||||
# headword_lemma = entry.original_xml.querySelector("head headword lemma")
|
||||
|
||||
headword_lemma.textContent = entry.headword
|
||||
if entry.headword_type is not None:
|
||||
headword_lemma.setAttribute("type", entry.headword_type)
|
||||
if entry.headword_audio is not None:
|
||||
headword_lemma.setAttribute("audio", entry.headword_audio)
|
||||
headword.appendChild(headword_lemma)
|
||||
head.appendChild(headword)
|
||||
|
||||
|
@ -57,14 +52,12 @@ def export_entry(entry):
|
|||
lexunit.appendChild(lexeme)
|
||||
head.appendChild(lexunit)
|
||||
|
||||
|
||||
grammar = doc.createElement("grammar")
|
||||
grammar_category = doc.createElement("category")
|
||||
grammar_category.textContent = entry.grammar
|
||||
grammar.appendChild(grammar_category)
|
||||
head.appendChild(grammar)
|
||||
|
||||
|
||||
if len(entry.measure) > 0:
|
||||
measure_list = doc.createElement("measureList")
|
||||
measure = doc.createElement("measure")
|
||||
|
@ -132,9 +125,7 @@ def export_sense(doc, sense):
|
|||
|
||||
for example in sense.examples:
|
||||
example_container = example.export(doc)
|
||||
translation_container_list = doc.createElement("translationContainerList")
|
||||
export_translation_list(doc, example, translation_container_list)
|
||||
example_container.appendChild(translation_container_list)
|
||||
export_translation_list(doc, example, example_container)
|
||||
example_container_list.appendChild(example_container)
|
||||
|
||||
return sense_xml
|
||||
|
@ -155,27 +146,16 @@ def export_translation(doc, translation):
|
|||
actual_t.textContent = translation.translation
|
||||
actual_t.setAttribute("targetLang", translation.targetLang)
|
||||
|
||||
if translation.audio:
|
||||
actual_t.setAttribute("audio", translation.audio)
|
||||
|
||||
if translation.source:
|
||||
actual_t.setAttribute("source", translation.source)
|
||||
translation_xml.appendChild(actual_t)
|
||||
|
||||
if len(translation.explanationList) > 0 :
|
||||
explanationList = _export_explanation_list(doc, translation.explanationList)
|
||||
translation_xml.appendChild(explanationList)
|
||||
|
||||
|
||||
explanation = doc.createElement("explanation")
|
||||
explanation.textContent = translation.explanation
|
||||
translation_xml.appendChild(explanation)
|
||||
|
||||
return translation_xml
|
||||
|
||||
def _export_explanation_list(doc, lst):
|
||||
result = doc.createElement('explanationList')
|
||||
for explanation in lst:
|
||||
result.appendChild(explanation.export(doc))
|
||||
|
||||
return result
|
||||
|
||||
def _export_label_list(doc, lst):
|
||||
result = doc.createElement("labelList")
|
||||
|
|
|
@ -24,41 +24,29 @@ def build_structure_conversions():
|
|||
if line[1] == "struktura":
|
||||
continue
|
||||
|
||||
vto_structure = line[1].strip().split(">")[1].split("<")[0]
|
||||
vto_name = line[2].strip()
|
||||
vto_id = line[6].strip()
|
||||
vto_id = line[4].strip()
|
||||
|
||||
if 0 in (len(vto_name), len(vto_id)):
|
||||
continue
|
||||
|
||||
vfrom = "^" + line[0].replace("?", "\?").replace("%s", "([a-zA-Z螚ȎŠ-]+)") + "$"
|
||||
structure_conversions.append((__new__(RegExp(vfrom, 'u')), vto_name, vto_structure, vto_id))
|
||||
structure_conversions.append((__new__(RegExp(vfrom, 'u')), vto_name, vto_id))
|
||||
|
||||
|
||||
def convert_structure(structure, type):
|
||||
def convert_structure(structure):
|
||||
if structure_conversions is None:
|
||||
build_structure_conversions()
|
||||
|
||||
for vfrom, vto_name, vto_structure, vto_id in structure_conversions:
|
||||
for vfrom, vto_name, vto_id in structure_conversions:
|
||||
match = structure.match(vfrom)
|
||||
# fix for ids 106, 107, 44 which instead matched with 30
|
||||
if match and vto_id == '30' and '-s' in type:
|
||||
vto_name = 's0-vp-s0'
|
||||
vto_id = '106'
|
||||
elif match and vto_id == '30' and '-g' in type:
|
||||
vto_name = 'gg-vp-gg'
|
||||
vto_id = '107'
|
||||
elif match and vto_id == '30' and '-r' in type:
|
||||
vto_name = 'r-vp-r'
|
||||
vto_id = '44'
|
||||
|
||||
if match:
|
||||
# we need to remove replace alias here as we want to use javascript's one
|
||||
__pragma__('noalias', 'replace')
|
||||
result = structure.replace(vfrom, vto_name).strip()
|
||||
__pragma__('alias', 'replace', "py_replace")
|
||||
|
||||
return result, vto_structure, vto_id
|
||||
return result, vto_id
|
||||
|
||||
window.console.log("Unknown structure: ", structure)
|
||||
return 'N/A', '/'
|
||||
return None
|
||||
|
|
|
@ -10,19 +10,16 @@ def generic_list_getter():
|
|||
return result
|
||||
|
||||
# Formats data from inputs to name-value objects
|
||||
def double_list_getter(firstParameter, secondParameter, allowEmptyField = False):
|
||||
def homonymy_list_getter():
|
||||
result = []
|
||||
for row in document.getElementsByClassName("double-list-row"):
|
||||
firstValue = row.querySelector("." + firstParameter + "-input").value
|
||||
secondValue = row.querySelector("." + secondParameter + "-input").value
|
||||
for row in document.getElementsByClassName("label-list-row"):
|
||||
value = row.querySelector(".value-input").value
|
||||
name = row.querySelector(".name-input").value
|
||||
|
||||
if (allowEmptyField is False and '' in [firstValue, secondValue]):
|
||||
if ("" in [name, value]):
|
||||
continue
|
||||
|
||||
if (allowEmptyField is True and all('' == value or value.isspace() for value in [firstValue, secondValue])):
|
||||
continue
|
||||
|
||||
result.append({firstParameter: firstValue, secondParameter: secondValue})
|
||||
result.append({"name": name, "value": value})
|
||||
|
||||
return result
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ class EditVariants(Message):
|
|||
|
||||
class EditHomonymy(Message):
|
||||
def update_model(self, model):
|
||||
homonymy = common_accessors.double_list_getter("value", "name")
|
||||
homonymy = common_accessors.homonymy_list_getter()
|
||||
model.entry.homonymy = homonymy
|
||||
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ class SkeExample:
|
|||
self.mid = ""
|
||||
self.s_id = ""
|
||||
self.gf2_good = None
|
||||
self.gf2_check = False
|
||||
|
||||
@staticmethod
|
||||
def fromLine(line):
|
||||
|
@ -59,8 +58,15 @@ class SkeCollocation:
|
|||
def __init__(self, data):
|
||||
self.word = data.word
|
||||
self.frequency = data.count
|
||||
self.gramrel = data.gramrel
|
||||
self.structure_name, self.structure, self.structure_id = convert_structure(data.gramrel, data.lempos)
|
||||
|
||||
info = convert_structure(data.gramrel)
|
||||
if info is None:
|
||||
self.structure_name = None
|
||||
self.structure_id = None
|
||||
else:
|
||||
self.structure_name = info[0]
|
||||
self.structure_id = info[1]
|
||||
|
||||
|
||||
self.other = {"score": data.score, "cm": data.cm}
|
||||
|
||||
|
@ -103,28 +109,13 @@ def match_gf2_examples(data, *args):
|
|||
xhr.send(to_send)
|
||||
|
||||
|
||||
def make_cql_query(ske_index, search_term, pos):
|
||||
cql_pos= {
|
||||
"samostalnik": ("S.*", "-s"),
|
||||
"glagol": ("G.*", "-g"),
|
||||
"pridevnik": ("P.*", "-p"),
|
||||
"prislov": ("R.*", "-r"),
|
||||
"zaimek": ("Z.*", "-z")
|
||||
}
|
||||
|
||||
if ske_index == 0:
|
||||
return "[ lemma=\"{0}\" & tag=\"{1}\" ]".format(search_term, cql_pos[pos][0])
|
||||
else:
|
||||
return search_term + cql_pos[pos][1]
|
||||
|
||||
|
||||
class SkeModal(ClickMessage):
|
||||
def on_event(self, event):
|
||||
# event could be data if this is the return from external library
|
||||
if type(event) in [list, int]:
|
||||
self.add_arg(event)
|
||||
else:
|
||||
if len(self._args) < 5:
|
||||
if len(self._args) < 4:
|
||||
self.add_arg(None)
|
||||
super().on_event(event)
|
||||
|
||||
|
@ -132,20 +123,18 @@ class SkeModal(ClickMessage):
|
|||
page_num = self.get_arg(0, int)
|
||||
search_term = self.get_arg(1, str)
|
||||
ske_index = self.get_arg(2, int)
|
||||
ske_pos_query = self.get_arg(3, str)
|
||||
ske_lookup = model.ske.url_for_kind_index(ske_index)
|
||||
|
||||
next_message = msg(SkeModal, page_num, search_term, ske_index, ske_pos_query)
|
||||
next_message = msg(SkeModal, page_num, search_term, ske_index)
|
||||
|
||||
# could be none if empty
|
||||
data = self.get_arg(4)
|
||||
data = self.get_arg(3)
|
||||
|
||||
if data is None:
|
||||
params = {"additional_refs": "s.id,p.id",
|
||||
"page_num": page_num,
|
||||
"error_callback": next_message,
|
||||
"data_parser": get_parser(ske_index),
|
||||
"querytype": ske_pos_query}
|
||||
"data_parser": get_parser(ske_index)}
|
||||
|
||||
gdex = get_preference("ske_gdex")
|
||||
if gdex:
|
||||
|
@ -154,36 +143,15 @@ class SkeModal(ClickMessage):
|
|||
return
|
||||
params["gdex"] = gdex
|
||||
|
||||
# enable CQL query
|
||||
if ske_pos_query is not "simple":
|
||||
search_term_old = search_term
|
||||
search_term = make_cql_query(ske_index, search_term, ske_pos_query)
|
||||
|
||||
model.ske.request(search_term, next_message, ske_lookup, params)
|
||||
search_term = search_term_old
|
||||
|
||||
elif type(data) is list:
|
||||
window.console.log(data)
|
||||
# check if gf2 examples are loaded or not
|
||||
if not data[0].gf2_check and type(data[0]) is SkeExample:
|
||||
if data[0].gf2_good is None:
|
||||
# we get the data, we have to match it with available data on our gf2 examples API
|
||||
match_gf2_examples(data, page_num, search_term, ske_index, ske_pos_query)
|
||||
elif type(data[0]) is SkeCollocation:
|
||||
# filtering, grouping and sorting data
|
||||
data.sort(key= lambda x: float(x.other["score"]), reverse=True)
|
||||
_data = []
|
||||
while len(data) > 0:
|
||||
max_item = data.pop(0) # max(data, key= lambda x: x.other["score"])
|
||||
_data.append(max_item)
|
||||
for item in data:
|
||||
if "N/A" in item.structure_name:
|
||||
data.remove(item)
|
||||
elif item.structure_name.strip() == max_item.structure_name.strip():
|
||||
_data.append(item)
|
||||
for delete_item in _data:
|
||||
if delete_item in data:
|
||||
data.remove(delete_item)
|
||||
data = _data
|
||||
match_gf2_examples(data, page_num, search_term, ske_index)
|
||||
|
||||
model.modal_set(lambda: modals.ske_list(
|
||||
search_term, data, page_num, model.entry.senses, model.ske.request_kinds))
|
||||
|
||||
|
@ -194,25 +162,19 @@ class SkeModal(ClickMessage):
|
|||
class SkeModalGf2Update(SkeModal):
|
||||
def on_event(self, event):
|
||||
response_data = window.JSON.parse(event.target.response)
|
||||
data = self.get_arg(4)
|
||||
data = self.get_arg(3)
|
||||
|
||||
data_dict = {}
|
||||
for example in data:
|
||||
example.gf_good = False
|
||||
data_dict[example.s_id] = example
|
||||
|
||||
bad_response = dict(response_data["bad"])
|
||||
for gf_sid, gf_data in bad_response.items():
|
||||
data_dict[gf_sid].gf2_good = None
|
||||
data_dict[gf_sid].gf2_check = True
|
||||
|
||||
good_response = dict(response_data["good"])
|
||||
for gf_sid, gf_data in good_response.items():
|
||||
data_dict[gf_sid].left = gf_data.left
|
||||
data_dict[gf_sid].mid = gf_data.mid
|
||||
data_dict[gf_sid].right = gf_data.right
|
||||
data_dict[gf_sid].gf2_good = True
|
||||
data_dict[gf_sid].gf2_check = True
|
||||
|
||||
# changed data_dict, now we can redraw!
|
||||
# just let it do its thing in update_model
|
||||
|
@ -230,7 +192,6 @@ class SearchInSkeModal(SkeModal):
|
|||
self.add_arg(int(document.getElementById("ske-page-num").value))
|
||||
self.add_arg(document.getElementById("ske-search").value)
|
||||
self.add_arg(document.getElementById("ske-select").selectedIndex)
|
||||
self.add_arg(document.getElementById("ske-pos-query").value)
|
||||
super().on_event(event)
|
||||
|
||||
|
||||
|
@ -264,8 +225,6 @@ class SkeInsert(DataChgClickMessage):
|
|||
console.log("You really should not be here, my lady")
|
||||
continue
|
||||
|
||||
model.reset()
|
||||
|
||||
def _as_corpus_example(self, example):
|
||||
new_example = Example()
|
||||
new_example.inner = CorpusExample()
|
||||
|
@ -278,7 +237,7 @@ class SkeInsert(DataChgClickMessage):
|
|||
|
||||
lex_mid = ComponentLexeme()
|
||||
lex_mid.text = example["mid"]
|
||||
lex_mid.role = "headword"
|
||||
lex_mid.role = "collocation"
|
||||
|
||||
lex_right = ComponentLexeme()
|
||||
lex_right.text = example["right"]
|
||||
|
@ -297,45 +256,17 @@ class SkeInsert(DataChgClickMessage):
|
|||
new_collocation.inner.other_attributes["frequency"] = example.frequency
|
||||
new_collocation.inner.type = "collocation"
|
||||
|
||||
headword = document.getElementById("ske-search").value
|
||||
lexemes = []
|
||||
structure_name = example.structure_name.split("-")
|
||||
gramrel = example.gramrel.split("_")
|
||||
structure = example.structure.split(" ")
|
||||
structure.append("") # Bad fix: we have to add something for structure l-gg-ggn
|
||||
lex_left = ComponentLexeme()
|
||||
lex_left.text = ""
|
||||
lex_left.role = None
|
||||
|
||||
for i in range(len(structure_name)):
|
||||
lex = ComponentLexeme()
|
||||
structure[i] = structure[i].replace("Inf-", "")
|
||||
lex_mid = ComponentLexeme()
|
||||
lex_mid.text = example.word
|
||||
lex_mid.role = "collocation"
|
||||
|
||||
# take care of negations "ne"
|
||||
if "Neg-" in structure[i]:
|
||||
structure[i] = structure[i].replace("Neg-", "")
|
||||
negation_flag = True
|
||||
n_lex = ComponentLexeme()
|
||||
n_lex.text = "ne"
|
||||
n_lex.role = "other"
|
||||
lexemes.append(n_lex)
|
||||
lex_right = ComponentLexeme()
|
||||
lex_right.text = ""
|
||||
lex_right.role = None
|
||||
|
||||
if structure[i] is "":
|
||||
continue # skipping bcs of fix
|
||||
elif "Vez-gbz" in structure[i]:
|
||||
lex.text = "je"
|
||||
lex.role = "other"
|
||||
elif structure_name[i] in ["d", "vd", "zp"]:
|
||||
lex.text = gramrel[i]
|
||||
lex.text = lex.text.replace("-d", "").replace("%", "")
|
||||
lex.role = "other"
|
||||
elif structure_name[i] is "vp":
|
||||
lex.text = structure[i]
|
||||
lex.role = "other"
|
||||
elif structure[i][0] in ["S", "G", "P", "R"]:
|
||||
lex.text = headword
|
||||
lex.role = "headword"
|
||||
else:
|
||||
lex.text = example.word
|
||||
lex.role = "collocate"
|
||||
lexemes.append(lex)
|
||||
|
||||
new_collocation.components.extend(lexemes)
|
||||
new_collocation.components.extend([lex_left, lex_mid, lex_right])
|
||||
return new_collocation
|
||||
|
|
|
@ -4,7 +4,6 @@ import message.common_accessors as common_accessors
|
|||
from browser import document, window
|
||||
from model.translation import Translation
|
||||
from model.sense import Sense
|
||||
from model.explanation import Explanation
|
||||
|
||||
|
||||
|
||||
|
@ -14,14 +13,8 @@ class EditTranslation(DataChgClickMessage):
|
|||
self.old_cluster_idx = self.get_arg(1, int)
|
||||
|
||||
self.translation.translation = document.getElementById("etv").value
|
||||
# This could be dangerous if double_list_getter is getting data from any other list as well.
|
||||
explanations = common_accessors.double_list_getter('value', 'language', True)
|
||||
self.translation.explanationList = []
|
||||
for entry in explanations:
|
||||
explanation = Explanation()
|
||||
explanation.value = entry.value
|
||||
explanation.language = entry.language
|
||||
self.translation.explanationList.append(explanation)
|
||||
self.translation.explanation = document.getElementById("ete").value
|
||||
|
||||
# common_accessors.label_list_getter()
|
||||
self.translation.tags = common_accessors.label_list_getter()
|
||||
|
||||
|
|
|
@ -2,4 +2,3 @@ from model.model import Model
|
|||
from model.sense import Sense
|
||||
from model.translation import Translation
|
||||
from model.example import Example
|
||||
from model.explanation import Explanation
|
||||
|
|
|
@ -14,7 +14,6 @@ class Entry(Data):
|
|||
self.headword = ""
|
||||
self.homonymy = []
|
||||
self.headword_type = None
|
||||
self.headword_audio = None
|
||||
self.grammar = ""
|
||||
self.comment = ""
|
||||
self.variants = []
|
||||
|
@ -23,20 +22,16 @@ class Entry(Data):
|
|||
self.measure = {}
|
||||
self.labels = []
|
||||
self.senses = []
|
||||
self.original_xml = None
|
||||
|
||||
def import_xml(self, entry_xml):
|
||||
self.original_xml = entry_xml.cloneNode(True)
|
||||
status = entry_xml.querySelector("head status")
|
||||
|
||||
headword = entry_xml.querySelector("head headword lemma")
|
||||
|
||||
grammar = entry_xml.querySelector("head grammar category")
|
||||
comment = entry_xml.querySelector("head comment")
|
||||
self.status = status.textContent if status else ""
|
||||
self.headword = headword.textContent if headword else ""
|
||||
self.headword_type = headword.getAttribute("type") if headword and headword.hasAttribute("type") else None
|
||||
self.headword_audio = headword.getAttribute("audio") if headword and headword.hasAttribute("audio") else None
|
||||
self.headword_type = headword.getAttribute("type") if headword else None
|
||||
self.grammar = grammar.textContent if grammar else ""
|
||||
self.comment = comment.textContent if comment else ""
|
||||
self.variants = [v.textContent for v in entry_xml.querySelectorAll("head variantList variant")]
|
||||
|
|
|
@ -19,10 +19,11 @@ class ComponentLexeme(Data):
|
|||
else:
|
||||
self.text = xml.textContent
|
||||
self.role = xml.getAttribute("role")
|
||||
|
||||
if xml.hasAttribute("space"):
|
||||
self.no_space = xml.getAttribute("space") == "false"
|
||||
|
||||
for oth_attr in ["lexeme_id", "lexical_unit_lexeme_id", "slolex", "kol", "sloleks"]:
|
||||
for oth_attr in ["lexical_unit_lexeme_id", "slolex", "kol"]:
|
||||
if xml.hasAttribute(oth_attr):
|
||||
self.other_attributes[oth_attr] = xml.getAttribute(oth_attr)
|
||||
|
||||
|
@ -32,6 +33,7 @@ class ComponentLexeme(Data):
|
|||
def export(self, doc):
|
||||
if self.role is None:
|
||||
return doc.createTextNode(self.text)
|
||||
|
||||
result = doc.createElement("comp")
|
||||
result.setAttribute("role", self.role)
|
||||
result.textContent = self.text
|
||||
|
|
|
@ -40,7 +40,7 @@ class Example(Data):
|
|||
return example
|
||||
|
||||
def import_xml(self, example_xml):
|
||||
self.translations = from_container_list(example_xml.querySelectorAll("translationContainerList translationContainer"))
|
||||
self.translations = from_container_list(example_xml.querySelectorAll("translationContainer"))
|
||||
|
||||
if example_xml.hasAttribute("modified"):
|
||||
self.edited = example_xml.getAttribute("modified") == "true"
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
from model.data import Data
|
||||
|
||||
from lib.snabbdom import h
|
||||
|
||||
|
||||
class Explanation(Data):
|
||||
def __init__(self):
|
||||
self.value = ""
|
||||
self.language = ""
|
||||
|
||||
def import_dom(self, explanation_dom):
|
||||
|
||||
self.value = explanation_dom.textContent if explanation_dom else ""
|
||||
self.language = explanation_dom.getAttribute("language") if explanation_dom.hasAttribute("language") else ""
|
||||
|
||||
def export(self, doc):
|
||||
result = doc.createElement("explanation")
|
||||
result.textContent = self.value
|
||||
if self.language != "": result.setAttribute('language', self.language)
|
||||
|
||||
return result
|
|
@ -18,14 +18,14 @@ class Sense(Data):
|
|||
|
||||
def import_xml(self, sense_xml, idx):
|
||||
self.original_idx = idx
|
||||
|
||||
for definition in sense_xml.querySelectorAll("definitionList definition"):
|
||||
key = definition.getAttribute("type")
|
||||
self.definition[key] = definition.textContent
|
||||
|
||||
self.labels = import_label_list("sense > labelList label", sense_xml)
|
||||
self.translations = from_container_list(
|
||||
sense_xml.querySelectorAll('sense > translationContainerList translationContainer'))
|
||||
sense_xml.querySelectorAll("translationContainerList translationContainer"))
|
||||
|
||||
for example_xml in sense_xml.querySelectorAll("exampleContainerList exampleContainer"):
|
||||
example = Example()
|
||||
example.import_xml(example_xml)
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
from model.tags import import_label_list
|
||||
from model.explanation import Explanation
|
||||
from model.data import Data
|
||||
|
||||
from lib.snabbdom import h
|
||||
|
@ -33,8 +32,7 @@ class Translation(Data):
|
|||
self.translation = ""
|
||||
self.source = ""
|
||||
self.targetLang = ""
|
||||
self.audio = ""
|
||||
self.explanationList = set()
|
||||
self.explanation = ""
|
||||
self.tags = []
|
||||
|
||||
def import_xml(self, translation_xml):
|
||||
|
@ -44,16 +42,12 @@ class Translation(Data):
|
|||
self.translation = translation.textContent
|
||||
self.source = translation.getAttribute("source") if translation.hasAttribute("source") else ""
|
||||
self.targetLang = translation.getAttribute("targetLang") if translation.hasAttribute("targetLang") else ""
|
||||
self.audio = translation.getAttribute("audio") if translation.hasAttribute("audio") else ""
|
||||
|
||||
explanationList = translation_xml.querySelectorAll("explanationList explanation")
|
||||
for explanation_dom in explanationList:
|
||||
explanation = Explanation()
|
||||
explanation.import_dom(explanation_dom)
|
||||
self.explanationList.append(explanation)
|
||||
|
||||
explanation = translation_xml.querySelector("explanation")
|
||||
self.explanation = explanation.textContent if explanation else ""
|
||||
self.tags = import_label_list("labelList label", translation_xml)
|
||||
|
||||
|
||||
def view(self, model):
|
||||
elements = []
|
||||
|
||||
|
@ -67,10 +61,8 @@ class Translation(Data):
|
|||
if self.source:
|
||||
elements.append(h("span.translation-source", {}, self.source))
|
||||
|
||||
if (self.explanationList):
|
||||
explanation_class = ".explanations" if self.translation else ".explanations.solo"
|
||||
explanations = [explanation.value for explanation in self.explanationList]
|
||||
elements.append(h("span{}".format(explanation_class), {}, ", ".join(explanations)))
|
||||
explanation_class = ".translation-explanation" if self.translation else ""
|
||||
elements.append(h("span{}".format(explanation_class), {}, self.explanation))
|
||||
|
||||
return h("div.translation-div", {"on": {"click": M.msg(M.ShowTranslationMenu, self) }}, elements)
|
||||
|
||||
|
@ -81,6 +73,6 @@ class Translation(Data):
|
|||
# next two are not checked as the also can not be deleted via gui
|
||||
# result = result and self.source == ""
|
||||
# result = result and self.targetLang == ""
|
||||
result = result and len(self.explanationList) == 0
|
||||
result = result and self.explanation == ""
|
||||
result = result and len(self.tags) == 0
|
||||
return result
|
||||
|
|
|
@ -49,7 +49,7 @@ def generic_list_editor(title, element_list_getter):
|
|||
|
||||
def homonymy_editor(title, current_labels):
|
||||
def split_line2(left, right):
|
||||
cls = "flex.two{}".format(".double-list-row")
|
||||
cls = "flex.two{}".format(".label-list-row")
|
||||
return h("div.{}".format(cls), {}, [
|
||||
h("div.half", {}, left), h("div.half", {}, right)])
|
||||
|
||||
|
@ -67,28 +67,6 @@ def homonymy_editor(title, current_labels):
|
|||
|
||||
return content
|
||||
|
||||
def explanation_editor(title, current_labels):
|
||||
def split_line2(left, right):
|
||||
cls = "flex.two{}".format(".double-list-row")
|
||||
return h("div.{}".format(cls), {}, [
|
||||
h("div.four-fifth", {}, left), h("div.fifth", {}, right)])
|
||||
|
||||
content = [h("p", {}, title)]
|
||||
for i, explanation in enumerate(current_labels()):
|
||||
language = []
|
||||
value = []
|
||||
language.append(h("label", {"attrs": {"for": i}}, "Language:"))
|
||||
language.append(h("input.language-input", {"props": {"type": "text", "value": explanation["language"], "id": i}}, ""))
|
||||
value.append(h("label", {"attrs": {"for": i + "-value"}}, "Value:"))
|
||||
value.append(h("input.value-input", {"props": {"type": "text", "value": explanation["value"], "id": i + "-value"}}, ""))
|
||||
|
||||
content.append(split_line2(value, language))
|
||||
content.append(h("button", {"on": {"click": message.msg(message.AddToGenericList, current_labels)}}, "+"))
|
||||
|
||||
return content
|
||||
|
||||
|
||||
|
||||
def label_list_editor(current_labels, add_label_message_class):
|
||||
def split_line3(left, center, right, is_llr=True):
|
||||
cls = "flex.three{}".format(".label-list-row" if is_llr else "")
|
||||
|
|
|
@ -19,9 +19,10 @@ def edit_translation(translation, parent, cluster_idx, num_clusters, cls):
|
|||
|
||||
# first line: transalation itself
|
||||
content.extend([
|
||||
split_line2("Prevedek:", h("textarea#etv", {"props": {"value": translation.translation}}, ""))])
|
||||
|
||||
content.extend(explanation_editor("Razlage:", lambda: translation.explanationList))
|
||||
split_line2("Prevedek:",
|
||||
h("textarea#etv", {"props": {"value": translation.translation}}, "")),
|
||||
split_line2("Razlaga:",
|
||||
h("textarea#ete", {"props": {"value": translation.explanation}}, ""))])
|
||||
|
||||
# cluster number
|
||||
options = [h("option", {"props": {"selected": idx == cluster_idx}}, str(idx + 1)) for idx in range(num_clusters + 1)]
|
||||
|
@ -64,18 +65,14 @@ def edit_example(example, sense):
|
|||
result.append(h("span.example-component-button.example-component-none",
|
||||
{"on": {"click": role_msg(idx, "none")}}, "N"))
|
||||
|
||||
if "-" not in example.inner.other_attributes["structureName"]:
|
||||
result.extend([
|
||||
h("span.example-component-button",
|
||||
{"on": {"click": message.msg(message.ExampleComponentAdd, example_original, idx)}}, "+"),
|
||||
h("span.example-component-button",
|
||||
{"on": {"click": message.msg(message.ExampleComponentRemove, example_original, idx)}}, "-")])
|
||||
result.extend([
|
||||
h("span.example-component-button",
|
||||
{"on": {"click": message.msg(message.ExampleComponentAdd, example_original, idx)}}, "+"),
|
||||
h("span.example-component-button",
|
||||
{"on": {"click": message.msg(message.ExampleComponentRemove, example_original, idx)}}, "-")])
|
||||
|
||||
return result
|
||||
|
||||
divs.append(h("div.flex.five.example-component", {}, [h("div.one-fifth", {}, "Struktura:"),
|
||||
h("div.three-fifth", {}, example.inner.other_attributes["structureName"])]))
|
||||
|
||||
for idx, component in enumerate(example.components):
|
||||
role_txt = component.role if component.role is not None else "none"
|
||||
color_class = ".example-component-" + role_txt
|
||||
|
@ -181,10 +178,6 @@ def ske_list(search_term, data, page_num, senses, ske_kinds):
|
|||
"type": "number",
|
||||
"min": 1,
|
||||
"step": 1}}, "")]),
|
||||
h("label.fourth.ske-mid-input", {}, [
|
||||
h("select#ske-pos-query", {}, [h("option", {}, "{}".format(pos)) for pos in ["simple", "samostalnik", "glagol", "pridevnik", "prislov", "zaimek"]])
|
||||
]),
|
||||
|
||||
h("span.fourth.button.ske-right-button",
|
||||
{"on": {"click": message.msg(message.SearchInSkeModal)}}, "Isci")]),
|
||||
h("div.ske-list",
|
||||
|
|
|
@ -58,7 +58,7 @@ class View:
|
|||
def view_ske_button(model):
|
||||
return h(
|
||||
"span#ske-button.button.toggle",
|
||||
{ "on": {"click": msg(ShowSkeModal, 1, model.entry.headword, 0, "simple")} },
|
||||
{ "on": {"click": msg(ShowSkeModal, 1, model.entry.headword, 0)} },
|
||||
h("svg#ske-img", {
|
||||
"attrs": {
|
||||
"xmlns": "http://www.w3.org/2000/svg",
|
||||
|
|
Loading…
Reference in New Issue
Block a user