backend: added sentence_count to limit displayed sentences in frontend

This commit is contained in:
voje 2019-04-28 19:02:08 +02:00
parent bf0970a90a
commit 910955abb8
3 changed files with 23 additions and 101 deletions

View File

@ -37,7 +37,8 @@ def frames_from_db_entry(dbent):
return frames return frames
class Frame(): class Frame():
def __init__(self, tids, deep_links=None, slots=None, hw_lemma=None, sentences=None): def __init__(self, tids, deep_links=None, slots=None,
hw_lemma=None, sentences=None, sentence_count=None):
self.hw = hw_lemma self.hw = hw_lemma
self.tids = tids # list of tokens with the same hw_lemma self.tids = tids # list of tokens with the same hw_lemma
# Each tid = "S123.t123"; # Each tid = "S123.t123";
@ -50,6 +51,8 @@ class Frame():
self.sense_info = {} self.sense_info = {}
self.sentences = sentences self.sentences = sentences
self.aggr_sent = None # Dictionary { hw: self.sentences idx } self.aggr_sent = None # Dictionary { hw: self.sentences idx }
self.sentence_count = sentence_count # paging, optimization
def get_functors(self): def get_functors(self):
return [slot.functor for slot in self.slots] return [slot.functor for slot in self.slots]
@ -62,7 +65,8 @@ class Frame():
"slots": [slot.to_json() for slot in self.slots], "slots": [slot.to_json() for slot in self.slots],
"sentences": self.sentences, "sentences": self.sentences,
"aggr_sent": self.aggr_sent, "aggr_sent": self.aggr_sent,
"sense_info": self.sense_info "sense_info": self.sense_info,
"sentence_count": self.sentence_count
} }
return ret return ret

View File

@ -1,96 +0,0 @@
import logging
log = logging.getLogger(__name__)
class Frame():
def __init__(self, tids, deep_links=None, slots=None, hw=None):
self.hw = hw
self.tids = tids # list of tokens with the same hw_lemma
# Each tid = "S123.t123";
# you can get sentence with vallex.get_sentence(S123)
self.slots = []
if slots is None:
self.slots = self.init_slots(deep_links)
else:
self.slots = slots
self.sense_info = {}
self.sentences = None # Used for passing to view in app.py, get_frames
self.aggr_sent = None # Dictionary { hw: self.sentences idx }
def to_json(self):
ret = {
"hw": self.hw,
"tids": self.tids,
"slots": [slot.to_json() for slot in self.slots],
"sentences": self.sentences,
"aggr_sent": self.aggr_sent,
"sense_info": self.sense_info
}
return ret
def init_slots(self, deep):
slots = []
for link in deep:
slots.append(Slot(
functor=link["functor"],
tids=[link["to"]]
))
return slots
def sort_slots(self):
# ACT, PAT, alphabetically
srt1 = [
x for x in self.slots
if (x.functor == "ACT" or
x.functor == "PAT")
]
srt1 = sorted(srt1, key=lambda x: x.functor)
srt2 = [
x for x in self.slots
if (x.functor != "ACT" and
x.functor != "PAT")
]
srt2 = sorted(srt2, key=lambda x: x.functor)
self.slots = (srt1 + srt2)
def to_string(self):
ret = "Frame:\n"
ret += "sense_info: {}\n".format(str(self.sense_info))
ret += "tids: ["
for t in self.tids:
ret += (str(t) + ", ")
ret += "]\n"
if self.slots is not None:
ret += "slots:\n"
for sl in self.slots:
ret += (sl.to_string() + "\n")
return ret
class Slot():
# Each slot is identified by its functor (ACT, PAT, ...)
# It consists of different tokens.
def __init__(self, functor, tids=None, count=None):
self.functor = functor
self.tids = tids or [] # combining multiple sentences vertically
self.count = count or 1
def to_string(self):
ret = "---- Slot:\n"
ret += "functor: {}\n".format(self.functor)
ret += "tids: ["
for t in self.tids:
ret += (str(t) + ", ")
ret += "]\n"
ret += "]\n"
ret += "----\n"
return ret
def to_json(self):
ret = {
"functor": self.functor,
"tids": self.tids,
"count": self.count
}
return ret

View File

@ -9,6 +9,7 @@ import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
SENSE_UNDEFINED = "nedefinirano" SENSE_UNDEFINED = "nedefinirano"
SENTENCE_LIMIT = 10
## TIDI: use frame.py ## TIDI: use frame.py
## TODO: build a list of [Frame] with lists of [Slot] ## TODO: build a list of [Frame] with lists of [Slot]
@ -70,7 +71,10 @@ def reduce_1(frames, valdb_sensemap=None):
for functor in fs[0]: for functor in fs[0]:
slots[functor] = Slot(functor=functor) slots[functor] = Slot(functor=functor)
# Reduce slots from all frames. (Merge ACT from all frames, ...) # Reduce slots from all frames. (Merge ACT from all frames, ...)
sentence_count = len(fs[1])
for frame in fs[1]: for frame in fs[1]:
if len(tids) >= SENTENCE_LIMIT:
break
tids += frame.tids tids += frame.tids
sentences += frame.sentences sentences += frame.sentences
for sl in frame.slots: for sl in frame.slots:
@ -78,8 +82,13 @@ def reduce_1(frames, valdb_sensemap=None):
slots_list = [] slots_list = []
for k, e in slots.items(): for k, e in slots.items():
slots_list.append(e) slots_list.append(e)
# TODO does appending hw_lemma of first frame work for functor frames too? rf = Frame(
rf = Frame(hw_lemma=fs[1][0].hw, tids=tids, slots=slots_list, sentences=sentences) hw_lemma=fs[1][0].hw,
tids=tids,
slots=slots_list,
sentences=sentences,
sentence_count=sentence_count
)
rf.sort_slots() rf.sort_slots()
ret_frames.append(rf) ret_frames.append(rf)
return sorted_by_len_tids(ret_frames) return sorted_by_len_tids(ret_frames)
@ -182,7 +191,11 @@ def frames_from_sense_ids(raw_frames, id_map):
tids = [] tids = []
reduced_slots = [] reduced_slots = []
sentences = [] sentences = []
sentence_count = len(frames)
for frame in frames: for frame in frames:
if len(tids) >= SENTENCE_LIMIT:
break
tids += frame.tids tids += frame.tids
sentences += frame.sentences sentences += frame.sentences
for slot in frame.slots: for slot in frame.slots:
@ -204,7 +217,8 @@ def frames_from_sense_ids(raw_frames, id_map):
hw_lemma="derp", hw_lemma="derp",
tids=tids, tids=tids,
slots=reduced_slots, slots=reduced_slots,
sentences=sentences sentences=sentences,
sentence_count=sentence_count,
) )
id_map_entry = ( id_map_entry = (
id_map.get(tids[0]) or id_map.get(tids[0]) or