|
|
|
@ -235,28 +235,6 @@ def api_token():
|
|
|
|
|
|
|
|
|
|
# FRAMES ----------------------------.
|
|
|
|
|
|
|
|
|
|
def prepare_frames(ret_frames):
|
|
|
|
|
# append sentences
|
|
|
|
|
for frame in ret_frames:
|
|
|
|
|
unique_sids = {".".join(x.split(".")[:-1]): x for x in frame.tids}
|
|
|
|
|
# frame.sentences = []
|
|
|
|
|
frame.aggr_sent = {}
|
|
|
|
|
# sid, tid==hw
|
|
|
|
|
for sid, tid in unique_sids.items():
|
|
|
|
|
# hwl = vallex.get_token(tid)["lemma"]
|
|
|
|
|
hwl = frame.hw
|
|
|
|
|
tmp_idx = len(frame.sentences)
|
|
|
|
|
if hwl not in frame.aggr_sent:
|
|
|
|
|
frame.aggr_sent[hwl] = []
|
|
|
|
|
frame.aggr_sent[hwl].append(tmp_idx)
|
|
|
|
|
# return (n-frames, rendered template)
|
|
|
|
|
# json frames
|
|
|
|
|
json_ret = {"frames": []}
|
|
|
|
|
for frame in ret_frames:
|
|
|
|
|
json_ret["frames"].append(DC(frame.to_json()))
|
|
|
|
|
return json.dumps(json_ret)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# input: hw, reduct_function
|
|
|
|
|
@app.route("/api/frames")
|
|
|
|
|
def api_get_frames():
|
|
|
|
@ -280,8 +258,34 @@ def api_get_frames():
|
|
|
|
|
frames = [x for x in frames if x.hw == hw]
|
|
|
|
|
|
|
|
|
|
ret_frames = RF(frames, valdb[SENSEMAP_COLL])
|
|
|
|
|
return prepare_frames(ret_frames)
|
|
|
|
|
|
|
|
|
|
json_ret = {"frames": []}
|
|
|
|
|
for frame in ret_frames:
|
|
|
|
|
json_ret["frames"].append(frame.to_json())
|
|
|
|
|
return json.dumps(json_ret)
|
|
|
|
|
# return prepare_frames(ret_frames)
|
|
|
|
|
|
|
|
|
|
def _aggregate_by_hw(ret_frames):
|
|
|
|
|
|
|
|
|
|
def _tid_to_lemma(tid, sentence):
|
|
|
|
|
# slow and hackish
|
|
|
|
|
for pair in sentence:
|
|
|
|
|
if pair[0] == tid:
|
|
|
|
|
return pair[1]["lemma"]
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
# append sentences
|
|
|
|
|
for frame in ret_frames:
|
|
|
|
|
# unique_sids = {".".join(x.split(".")[:-1]): x for x in frame.tids}
|
|
|
|
|
frame.aggr_sent = {} # map of headword: [sentence indexes]
|
|
|
|
|
# sid, tid==hw
|
|
|
|
|
for i, tid in enumerate(frame.tids):
|
|
|
|
|
# hwl = vallex.get_token(tid)["lemma"]
|
|
|
|
|
hwl = _tid_to_lemma(tid, frame.sentences[i])
|
|
|
|
|
if hwl not in frame.aggr_sent:
|
|
|
|
|
frame.aggr_sent[hwl] = []
|
|
|
|
|
frame.aggr_sent[hwl].append(i)
|
|
|
|
|
return ret_frames
|
|
|
|
|
|
|
|
|
|
# input: functor, reduce_function
|
|
|
|
|
@app.route("/api/functor-frames")
|
|
|
|
@ -302,12 +306,17 @@ def api_get_functor_frames():
|
|
|
|
|
for ent in cur:
|
|
|
|
|
frames += frames_from_db_entry(ent) # pre-process this step for prod TODO
|
|
|
|
|
|
|
|
|
|
for f in frames:
|
|
|
|
|
print(f.to_json())
|
|
|
|
|
# filter by relevant functor
|
|
|
|
|
frames = [x for x in frames if functor in x.get_functors()]
|
|
|
|
|
|
|
|
|
|
# raw_frames = vallex.functors_index[functor] # TODO
|
|
|
|
|
ret_frames = RF(frames, valdb[SENSEMAP_COLL])
|
|
|
|
|
return prepare_frames(ret_frames)
|
|
|
|
|
ret_frames = _aggregate_by_hw(ret_frames)
|
|
|
|
|
|
|
|
|
|
json_ret = {"frames": []}
|
|
|
|
|
for frame in ret_frames:
|
|
|
|
|
json_ret["frames"].append(DC(frame.to_json()))
|
|
|
|
|
return json.dumps(json_ret)
|
|
|
|
|
|
|
|
|
|
# FRAMES ----------------------------^
|
|
|
|
|
|
|
|
|
|