forked from kristjan/cjvt-valency
Compare commits
20 Commits
Author | SHA1 | Date |
---|---|---|
voje | b4db4e5255 | 5 years ago |
voje | ef02583d72 | 5 years ago |
voje | 155c0b2c3d | 5 years ago |
voje | c96b199932 | 5 years ago |
voje | 02c0e74798 | 5 years ago |
voje | 2ff339e24c | 5 years ago |
voje | 707034153c | 5 years ago |
voje | ba72802f1f | 5 years ago |
voje | 14a6e2423b | 5 years ago |
voje | c5fc78dca1 | 5 years ago |
voje | 439446b1d0 | 5 years ago |
voje | eca236bc7e | 5 years ago |
voje | 11d3dfc0e6 | 5 years ago |
voje | 1aff111cb9 | 5 years ago |
voje | 910955abb8 | 5 years ago |
voje | bf0970a90a | 5 years ago |
voje | 81395890ab | 5 years ago |
voje | fd94627fdb | 5 years ago |
voje | 2c5fef29df | 5 years ago |
voje | 8f747ef75c | 5 years ago |
@ -0,0 +1,16 @@
|
||||
FROM cjvt-python-env
|
||||
|
||||
RUN mkdir -p /project/src/backend_flask
|
||||
RUN mkdir -p /project/src/pkg
|
||||
RUN mkdir -p /project/data
|
||||
|
||||
COPY src/backend_flask /project/src/backend_flask
|
||||
COPY src/pkg /project/src/pkg
|
||||
|
||||
COPY data/appindex.json /project/data
|
||||
|
||||
COPY src/backend_flask/entrypoint.sh /.
|
||||
|
||||
COPY src/backend_flask/conf_files/prod_conf.yaml /project
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
@ -1 +0,0 @@
|
||||
/home/kristjan/kres_data/payload/kres_json/
|
@ -1 +0,0 @@
|
||||
/home/kristjan/kres_mount/kres_parsed/tei/
|
Binary file not shown.
File diff suppressed because one or more lines are too long
@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "testing entrypoint."
|
||||
$(exit 1)
|
||||
exit 0
|
@ -0,0 +1,26 @@
|
||||
# frontend
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
location / {
|
||||
root /srv/dist;
|
||||
index index.html index.htm;
|
||||
}
|
||||
|
||||
location /home {
|
||||
return 301 /;
|
||||
}
|
||||
}
|
||||
|
||||
# backend
|
||||
server {
|
||||
listen 8084;
|
||||
server_name _;
|
||||
|
||||
location / {
|
||||
proxy_set_header X-Forward-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_pass http://backend_flask:8084;
|
||||
}
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
my_mongo:
|
||||
image: my-mongo
|
||||
restart: always
|
||||
# ports:
|
||||
# - 27017:27017
|
||||
expose:
|
||||
- 27017
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: valuser
|
||||
MONGO_INITDB_ROOT_PASSWORD: valuserpass
|
||||
volumes:
|
||||
- ${HOME}/mongo_container/data/:/data/db
|
||||
|
||||
mongo_express:
|
||||
image: mongo-express
|
||||
restart: always
|
||||
ports:
|
||||
- 8081:8081
|
||||
environment:
|
||||
ME_CONFIG_BASICAUTH_USERNAME: test
|
||||
ME_CONFIG_BASICAUTH_PASSWORD: test
|
||||
ME_CONFIG_MONGODB_ADMINUSERNAME: valadmin
|
||||
ME_CONFIG_MONGODB_ADMINPASSWORD: rolercoaster
|
||||
ME_CONFIG_MONGODB_SERVER: my_mongo
|
||||
|
||||
backend_flask:
|
||||
image: backend-flask
|
||||
expose:
|
||||
- 8084
|
||||
|
||||
proxy:
|
||||
image: nginx
|
||||
ports:
|
||||
- 80:80
|
||||
- 8084:8084
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/conf.d/default.conf
|
||||
- ./src/frontend_vue/dist:/srv/dist
|
||||
|
@ -0,0 +1,16 @@
|
||||
IMG="backend-flask"
|
||||
CNT="backend_flask"
|
||||
|
||||
clean:
|
||||
- docker rm -f $(CNT)
|
||||
|
||||
run: clean build
|
||||
docker run -d --net host --name $(CNT) $(IMG)
|
||||
docker logs -f $(CNT)
|
||||
|
||||
build: build-cjvt-python-env
|
||||
# docker build . -f ../../Dockerfile-backend-flask -t $(IMG)
|
||||
cd ../..; docker build . -f Dockerfile-backend-flask -t $(IMG)
|
||||
|
||||
build-cjvt-python-env:
|
||||
cd ../../dockerfiles/python-env; $(MAKE) build
|
@ -1,6 +1,5 @@
|
||||
---
|
||||
debug: True
|
||||
port: 8084
|
||||
host: 0.0.0.0
|
||||
logfile: "/var/log/valency_backend.log"
|
||||
---
|
||||
appindex: /project/data/appindex.json
|
||||
|
@ -0,0 +1,2 @@
|
||||
MONGO_URI = "mongodb://sizif:p5e3r4u8t7@my_mongo:27017/valdb"
|
||||
MONGO_AUTH_SOURCE = 'admin'
|
@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
pip3 install -e /project/src/pkg/cjvt-corpusparser/.
|
||||
pip3 install -e /project/src/pkg/valency/.
|
||||
pip3 install -e /project/src/pkg/seqparser/.
|
||||
|
||||
cd /project/src/backend_flask
|
||||
gunicorn -t 4 -b 0.0.0.0:8084 app:app
|
@ -1,73 +0,0 @@
|
||||
# Deprecated: headword creation moved to be part of corpusparser,
|
||||
# index creation moved to app.py as a preprocessing (with exit) step
|
||||
|
||||
CORPORA = ["kres", "ssj"]
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
valdb = None
|
||||
|
||||
def helper_tid_to_token(tid, tokens):
|
||||
for t in tokens:
|
||||
if t["tid"] == tid:
|
||||
return t
|
||||
return None
|
||||
|
||||
# update entries (add headwords and fuctors for indexing)
|
||||
for corpus in CORPORA:
|
||||
for e in valdb[corpus].find({}):
|
||||
if e["srl_links"] is None:
|
||||
e["headwords"] = []
|
||||
e["functors"] = []
|
||||
else:
|
||||
hw_tids = list(set([x["from"] for x in e["srl_links"]]))
|
||||
hw_tokens = [helper_tid_to_token(tid, e["tokens"]) for tid in hw_tids]
|
||||
headwords = [(t["lemma"] if t["msd"][0] == "G" else t["lemma"] + "_") for t in hw_tokens]
|
||||
e["headwords"] = headwords
|
||||
|
||||
functors = list(set([x["afun"] for x in e["srl_links"]]))
|
||||
e["functors"] = functors
|
||||
|
||||
valdb[corpus].save(e)
|
||||
|
||||
valdb[corpus].ensure_index([("headwords", pymongo.ASCENDING)])
|
||||
valdb[corpus].ensure_index([("functors", pymongo.ASCENDING)])
|
||||
|
||||
# create app_index (used in frontend, left side word index)
|
||||
tmp_app_index = {c: {} for c in CORPORA}
|
||||
for corpus in CORPORA:
|
||||
res_hws = {}
|
||||
res_fns = {}
|
||||
for e in valdb[corpus].find({}):
|
||||
if "headwords" not in e:
|
||||
continue
|
||||
for hw in e["headwords"]:
|
||||
if hw in res_hws:
|
||||
res_hws[hw] += 1
|
||||
else:
|
||||
res_hws[hw] = 1
|
||||
if "functors" not in e:
|
||||
continue
|
||||
for fn in e["functors"]:
|
||||
if fn in res_fns:
|
||||
res_fns[fn] += 1
|
||||
else:
|
||||
res_fns[fn] = 1
|
||||
|
||||
alphabetical = {}
|
||||
for k, e in res_hws.items():
|
||||
fst = k[0].lower()
|
||||
if fst in alphabetical:
|
||||
alphabetical[fst].append((k, e))
|
||||
else:
|
||||
alphabetical[fst] = [(k, e)]
|
||||
|
||||
for k, e in alphabetical.items():
|
||||
alphabetical[k] = sorted(e, key=lambda x: x[0])
|
||||
tmp_app_index[corpus]["words"] = alphabetical
|
||||
|
||||
functors = [(k, e) for (k, e) in res_fns.items()]
|
||||
functors = sorted(functors, key=lambda x: x[0])
|
||||
tmp_app_index[corpus]["functors"] = functors
|
||||
|
||||
valdb.appindex.update({"dockey": "appindex"}, {"dockey": "appindex", "data": tmp_app_index}, upsert=True)
|
@ -1,3 +1,3 @@
|
||||
{
|
||||
"api_addr": "http://193.2.76.103"
|
||||
"api_addr": "http://193.2.76.103:8084"
|
||||
}
|
||||
|
@ -1,3 +1,3 @@
|
||||
{
|
||||
"api_addr": "http://193.2.76.103:8084"
|
||||
"api_addr": "http://0.0.0.0:8084"
|
||||
}
|
||||
|
@ -0,0 +1 @@
|
||||
<!DOCTYPE html><html><head><meta charset=utf-8><meta name=viewport content="width=device-width,initial-scale=1"><title>vue_frontend</title><link href=/static/css/app.05a420a551b5bded5dfec6b370d3edca.css rel=stylesheet></head><body><div id=app></div><script type=text/javascript src=/static/js/manifest.2ae2e69a05c33dfc65f8.js></script><script type=text/javascript src=/static/js/vendor.5d3d2fd333c62579d227.js></script><script type=text/javascript src=/static/js/app.8538f7133303d3e391b2.js></script></body></html>
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -0,0 +1,2 @@
|
||||
!function(r){var n=window.webpackJsonp;window.webpackJsonp=function(e,u,c){for(var f,i,p,a=0,l=[];a<e.length;a++)i=e[a],o[i]&&l.push(o[i][0]),o[i]=0;for(f in u)Object.prototype.hasOwnProperty.call(u,f)&&(r[f]=u[f]);for(n&&n(e,u,c);l.length;)l.shift()();if(c)for(a=0;a<c.length;a++)p=t(t.s=c[a]);return p};var e={},o={2:0};function t(n){if(e[n])return e[n].exports;var o=e[n]={i:n,l:!1,exports:{}};return r[n].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=r,t.c=e,t.d=function(r,n,e){t.o(r,n)||Object.defineProperty(r,n,{configurable:!1,enumerable:!0,get:e})},t.n=function(r){var n=r&&r.__esModule?function(){return r.default}:function(){return r};return t.d(n,"a",n),n},t.o=function(r,n){return Object.prototype.hasOwnProperty.call(r,n)},t.p="/",t.oe=function(r){throw console.error(r),r}}([]);
|
||||
//# sourceMappingURL=manifest.2ae2e69a05c33dfc65f8.js.map
|
@ -0,0 +1 @@
|
||||
{"version":3,"sources":["webpack:///webpack/bootstrap d176f5affa434246605f"],"names":["parentJsonpFunction","window","chunkIds","moreModules","executeModules","moduleId","chunkId","result","i","resolves","length","installedChunks","push","Object","prototype","hasOwnProperty","call","modules","shift","__webpack_require__","s","installedModules","2","exports","module","l","m","c","d","name","getter","o","defineProperty","configurable","enumerable","get","n","__esModule","object","property","p","oe","err","console","error"],"mappings":"aACA,IAAAA,EAAAC,OAAA,aACAA,OAAA,sBAAAC,EAAAC,EAAAC,GAIA,IADA,IAAAC,EAAAC,EAAAC,EAAAC,EAAA,EAAAC,KACQD,EAAAN,EAAAQ,OAAoBF,IAC5BF,EAAAJ,EAAAM,GACAG,EAAAL,IACAG,EAAAG,KAAAD,EAAAL,GAAA,IAEAK,EAAAL,GAAA,EAEA,IAAAD,KAAAF,EACAU,OAAAC,UAAAC,eAAAC,KAAAb,EAAAE,KACAY,EAAAZ,GAAAF,EAAAE,IAIA,IADAL,KAAAE,EAAAC,EAAAC,GACAK,EAAAC,QACAD,EAAAS,OAAAT,GAEA,GAAAL,EACA,IAAAI,EAAA,EAAYA,EAAAJ,EAAAM,OAA2BF,IACvCD,EAAAY,IAAAC,EAAAhB,EAAAI,IAGA,OAAAD,GAIA,IAAAc,KAGAV,GACAW,EAAA,GAIA,SAAAH,EAAAd,GAGA,GAAAgB,EAAAhB,GACA,OAAAgB,EAAAhB,GAAAkB,QAGA,IAAAC,EAAAH,EAAAhB,IACAG,EAAAH,EACAoB,GAAA,EACAF,YAUA,OANAN,EAAAZ,GAAAW,KAAAQ,EAAAD,QAAAC,IAAAD,QAAAJ,GAGAK,EAAAC,GAAA,EAGAD,EAAAD,QAKAJ,EAAAO,EAAAT,EAGAE,EAAAQ,EAAAN,EAGAF,EAAAS,EAAA,SAAAL,EAAAM,EAAAC,GACAX,EAAAY,EAAAR,EAAAM,IACAhB,OAAAmB,eAAAT,EAAAM,GACAI,cAAA,EACAC,YAAA,EACAC,IAAAL,KAMAX,EAAAiB,EAAA,SAAAZ,GACA,IAAAM,EAAAN,KAAAa,WACA,WAA2B,OAAAb,EAAA,SAC3B,WAAiC,OAAAA,GAEjC,OADAL,EAAAS,EAAAE,EAAA,IAAAA,GACAA,GAIAX,EAAAY,EAAA,SAAAO,EAAAC,GAAsD,OAAA1B,OAAAC,UAAAC,eAAAC,KAAAsB,EAAAC,IAGtDpB,EAAAqB,EAAA,IAGArB,EAAAsB,GAAA,SAAAC,GAA8D,MAApBC,QAAAC,MAAAF,GAAoBA","file":"static/js/manifest.2ae2e69a05c33dfc65f8.js","sourcesContent":[" \t// install a JSONP callback for chunk loading\n \tvar parentJsonpFunction = window[\"webpackJsonp\"];\n \twindow[\"webpackJsonp\"] = function webpackJsonpCallback(chunkIds, moreModules, executeModules) {\n \t\t// add \"moreModules\" to the modules object,\n \t\t// then flag all \"chunkIds\" as loaded and fire callback\n \t\tvar moduleId, chunkId, i = 0, resolves = [], result;\n \t\tfor(;i < chunkIds.length; i++) {\n \t\t\tchunkId = chunkIds[i];\n \t\t\tif(installedChunks[chunkId]) {\n \t\t\t\tresolves.push(installedChunks[chunkId][0]);\n \t\t\t}\n \t\t\tinstalledChunks[chunkId] = 0;\n \t\t}\n \t\tfor(moduleId in moreModules) {\n \t\t\tif(Object.prototype.hasOwnProperty.call(moreModules, moduleId)) {\n \t\t\t\tmodules[moduleId] = moreModules[moduleId];\n \t\t\t}\n \t\t}\n \t\tif(parentJsonpFunction) parentJsonpFunction(chunkIds, moreModules, executeModules);\n \t\twhile(resolves.length) {\n \t\t\tresolves.shift()();\n \t\t}\n \t\tif(executeModules) {\n \t\t\tfor(i=0; i < executeModules.length; i++) {\n \t\t\t\tresult = __webpack_require__(__webpack_require__.s = executeModules[i]);\n \t\t\t}\n \t\t}\n \t\treturn result;\n \t};\n\n \t// The module cache\n \tvar installedModules = {};\n\n \t// objects to store loaded and loading chunks\n \tvar installedChunks = {\n \t\t2: 0\n \t};\n\n \t// The require function\n \tfunction __webpack_require__(moduleId) {\n\n \t\t// Check if module is in cache\n \t\tif(installedModules[moduleId]) {\n \t\t\treturn installedModules[moduleId].exports;\n \t\t}\n \t\t// Create a new module (and put it into the cache)\n \t\tvar module = installedModules[moduleId] = {\n \t\t\ti: moduleId,\n \t\t\tl: false,\n \t\t\texports: {}\n \t\t};\n\n \t\t// Execute the module function\n \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n \t\t// Flag the module as loaded\n \t\tmodule.l = true;\n\n \t\t// Return the exports of the module\n \t\treturn module.exports;\n \t}\n\n\n \t// expose the modules object (__webpack_modules__)\n \t__webpack_require__.m = modules;\n\n \t// expose the module cache\n \t__webpack_require__.c = installedModules;\n\n \t// define getter function for harmony exports\n \t__webpack_require__.d = function(exports, name, getter) {\n \t\tif(!__webpack_require__.o(exports, name)) {\n \t\t\tObject.defineProperty(exports, name, {\n \t\t\t\tconfigurable: false,\n \t\t\t\tenumerable: true,\n \t\t\t\tget: getter\n \t\t\t});\n \t\t}\n \t};\n\n \t// getDefaultExport function for compatibility with non-harmony modules\n \t__webpack_require__.n = function(module) {\n \t\tvar getter = module && module.__esModule ?\n \t\t\tfunction getDefault() { return module['default']; } :\n \t\t\tfunction getModuleExports() { return module; };\n \t\t__webpack_require__.d(getter, 'a', getter);\n \t\treturn getter;\n \t};\n\n \t// Object.prototype.hasOwnProperty.call\n \t__webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };\n\n \t// __webpack_public_path__\n \t__webpack_require__.p = \"/\";\n\n \t// on error function for async loading\n \t__webpack_require__.oe = function(err) { console.error(err); throw err; };\n\n\n\n// WEBPACK FOOTER //\n// webpack/bootstrap d176f5affa434246605f"],"sourceRoot":""}
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,9 @@
|
||||
SSKJ_HTML = /home/kristjan/git/diploma/data/sskj/sskj2_v1.html
|
||||
SSKJ_JSON = "./sskj_senses.json"
|
||||
WORDLIST = "./wordlist.json"
|
||||
|
||||
gen_json_files:
|
||||
cd seqparser; python3 main.py \
|
||||
--sskj-html=$(SSKJ_HTML) \
|
||||
--sskj-json=$(SSKJ_JSON) \
|
||||
--wordlist=$(WORDLIST)
|
@ -0,0 +1 @@
|
||||
bs4
|
@ -0,0 +1,313 @@
|
||||
from bs4 import BeautifulSoup as BS
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from time import time
|
||||
import pickle
|
||||
import json
|
||||
from copy import deepcopy as DC
|
||||
from pathlib import Path
|
||||
|
||||
# Match sese ordinals (1., 2., ...)
|
||||
rord = re.compile(r"^ *[0-9]+\. *$")
|
||||
|
||||
# Get rid of accented characters.
|
||||
intab = "ÁÉÍÓÚàáäçèéêìíîñòóôöùúüčŔŕ"
|
||||
outtb = "AEIOUaaaceeeiiinoooouuučRr"
|
||||
transtab = str.maketrans(intab, outtb)
|
||||
|
||||
def d_time(fun):
|
||||
def wrapper(*args, **kwargs):
|
||||
tstart = time()
|
||||
fun(*args, **kwargs)
|
||||
duration = time() - tstart
|
||||
print("Function {} ran for {:.2f} s.".format(
|
||||
fun.__name__, duration))
|
||||
return wrapper
|
||||
|
||||
class Seqparser:
|
||||
def __init__(sskj_file):
|
||||
pass
|
||||
|
||||
@d_time
|
||||
def html_to_verb_adj_json(self, infile, outfile):
|
||||
out_dict = defaultdict(list)
|
||||
with Path(infile).open("rb") as fp:
|
||||
for line in fp:
|
||||
data = self.parse_line(line)
|
||||
if data is None: continue
|
||||
out_dict[data["izt_clean"]].append(data)
|
||||
with Path(outfile).open("w") as fp:
|
||||
json.dump(dict(out_dict), fp)
|
||||
|
||||
@d_time
|
||||
def generate_sskj_wordlist(self, in_json_file, out_wordlist):
|
||||
wordlist = None
|
||||
with Path(in_json_file).open("r") as fp:
|
||||
jdata = json.load(fp)
|
||||
wordlist = list(jdata.keys())
|
||||
with Path(out_wordlist).open("w") as fp:
|
||||
json.dump({"wordlist": wordlist}, fp)
|
||||
|
||||
# main functions
|
||||
def html_to_raw_pickle(self, sskj_html_filepath, raw_pickle_filepath):
|
||||
entries = dict(self.parse_file(sskj_html_filepath, self.parse_line))
|
||||
print("entries len: " + str(len(entries)))
|
||||
with open(raw_pickle_filepath, "wb") as f:
|
||||
tmpstr = json.dumps(dict(entries))
|
||||
pickle.dump(tmpstr, f)
|
||||
# debugging
|
||||
|
||||
def raw_pickle_to_parsed_pickle(
|
||||
self, raw_pickle_filepath, parsed_pickle_filepath,
|
||||
se_list_filepath
|
||||
):
|
||||
data = self.load_raw_pickle(raw_pickle_filepath)
|
||||
print("raw_pickle data len: " + str(len(data)))
|
||||
se_list = self.gen_se_list(data)
|
||||
print("se_list len: " + str(len(se_list)))
|
||||
with open(se_list_filepath, "wb") as f:
|
||||
pickle.dump(se_list, f)
|
||||
data1 = self.remove_se(data)
|
||||
data2 = self.reorganize(data1, se_list)
|
||||
print("data2 len: " + str(len(data2.keys())))
|
||||
with open(parsed_pickle_filepath, "wb") as f:
|
||||
pickle.dump(data2, f)
|
||||
|
||||
# helper html reading functions
|
||||
def parse_file(self, path, f_parse_line):
|
||||
tstart = time()
|
||||
entries = defaultdict(list)
|
||||
with open(path, "r") as f:
|
||||
for line in f:
|
||||
data = f_parse_line(line)
|
||||
if data is not None:
|
||||
entries[data["izt_clean"]].append(data)
|
||||
print("parse_file({}) in {:.2f}s".format(path, time() - tstart))
|
||||
return entries
|
||||
|
||||
def parse_line(self, line):
|
||||
def helper_bv_set(g_or_p):
|
||||
if g_or_p not in ["G", "P"]:
|
||||
print("Err g_or_p.")
|
||||
exit(1)
|
||||
if data.get("bv") is not None:
|
||||
if data["bv"] != g_or_p:
|
||||
print(str(line))
|
||||
# exit(1)
|
||||
data["bv"] = g_or_p
|
||||
data = {
|
||||
"izt": "",
|
||||
"izt_clean": "",
|
||||
"senses": defaultdict(list)
|
||||
}
|
||||
soup = BS(line, "html.parser")
|
||||
|
||||
current_sense_id = "0"
|
||||
for span in soup.find_all("span"):
|
||||
|
||||
# sense id
|
||||
if span.string is not None:
|
||||
rmatch = rord.match(span.string)
|
||||
if rmatch is not None:
|
||||
current_sense_id = rmatch.group().strip()
|
||||
|
||||
title = span.attrs.get("title")
|
||||
if title is not None:
|
||||
title = title.lower()
|
||||
|
||||
# only verbs and adjectives
|
||||
if "glagol" in title:
|
||||
helper_bv_set("G")
|
||||
data["bv_full"] = title
|
||||
elif "pridevn" in title:
|
||||
helper_bv_set("P")
|
||||
data["bv_full"] = title
|
||||
|
||||
# žšč
|
||||
if title == "iztočnica":
|
||||
data["izt"] = span.string
|
||||
data["izt_clean"] = span.string.translate(transtab).lower()
|
||||
|
||||
# sense description
|
||||
if title == "razlaga" and span.string is not None:
|
||||
data["senses"][current_sense_id].append(
|
||||
("razl", span.string))
|
||||
if "pridevnik od" in span.string:
|
||||
helper_bv_set("P")
|
||||
|
||||
if title == "sopomenka":
|
||||
subspan = span.find_all("a")[0]
|
||||
if subspan.string is not None:
|
||||
data["senses"][current_sense_id].append(
|
||||
("sopo", subspan.string))
|
||||
|
||||
# save verbs and adjectives
|
||||
if (
|
||||
("bv" not in data) or
|
||||
(data["bv"] != "P" and data["bv"] != "G")
|
||||
):
|
||||
return None
|
||||
|
||||
# sanity check
|
||||
if data["bv"] == "P" and " se" in data["izt_clean"]:
|
||||
print(data)
|
||||
exit(1)
|
||||
|
||||
# append _ to adjective keywords
|
||||
if data["bv"] == "P":
|
||||
data["izt_clean"] = data["izt_clean"] + "_"
|
||||
|
||||
# cleanup
|
||||
if "bv" not in data:
|
||||
print("Should not be here (no bv).")
|
||||
exit(1)
|
||||
del(data["bv"])
|
||||
if "bv_full" in data:
|
||||
del(data["bv_full"])
|
||||
|
||||
return data
|
||||
|
||||
# helper functions
|
||||
def load_raw_pickle(self, raw_pickle_filepath):
|
||||
with open(raw_pickle_filepath, "rb") as f:
|
||||
tmpstr = pickle.load(f)
|
||||
return json.loads(tmpstr)
|
||||
|
||||
def helper_loop(self, data, fnc):
|
||||
for k, lst in data.items():
|
||||
for el in lst:
|
||||
fnc(el)
|
||||
|
||||
def gen_se_list(self, data):
|
||||
|
||||
def fnc1(el):
|
||||
ic = el["izt_clean"]
|
||||
if " se" in ic:
|
||||
se_list.append(ic)
|
||||
|
||||
def fnc2(el):
|
||||
ic = el["izt_clean"]
|
||||
if ic in se_pruned:
|
||||
se_pruned.remove(ic)
|
||||
|
||||
# hw entries that only exist with " se"
|
||||
se_list = []
|
||||
self.helper_loop(data, fnc1)
|
||||
se_pruned = set([hw.split(" se")[0] for hw in se_list])
|
||||
self.helper_loop(data, fnc2)
|
||||
return sorted(list(se_pruned))
|
||||
|
||||
def remove_se(self, data):
|
||||
|
||||
def fnc1(el):
|
||||
nel = DC(el)
|
||||
ic = nel["izt_clean"]
|
||||
if " se" in ic:
|
||||
nic = ic.split(" se")[0]
|
||||
nel["izt_clean"] = nic
|
||||
data_new[nel["izt_clean"]].append(nel)
|
||||
|
||||
data_new = defaultdict(list)
|
||||
self.helper_loop(data, fnc1)
|
||||
return dict(data_new)
|
||||
|
||||
def reorganize(self, data, se_list):
|
||||
# some hw entries have several headwords,
|
||||
# some senses have subsenses
|
||||
# index everything, make 1 object per hw
|
||||
|
||||
def helper_prune(sense_str):
|
||||
# remove space padding
|
||||
sense_str = sense_str.strip()
|
||||
|
||||
if len(sense_str) == 1:
|
||||
return sense_str
|
||||
|
||||
# remove banned characters from string ending
|
||||
banned = ": ; . , - ! ?".split(" ")
|
||||
if sense_str[-1] in banned:
|
||||
return sense_str[:-1]
|
||||
|
||||
return sense_str
|
||||
|
||||
data_new = {}
|
||||
for k, lst in data.items():
|
||||
new_el = {
|
||||
"hw": k,
|
||||
"has_se": k in se_list,
|
||||
"senses": []
|
||||
}
|
||||
|
||||
# if there is a single hw entry, hw_id is 0
|
||||
if len(lst) == 1:
|
||||
homonym_id = -1
|
||||
else:
|
||||
homonym_id = 0
|
||||
|
||||
# loop homonyms
|
||||
for el in lst:
|
||||
homonym_id += 1
|
||||
# loop top lvl sense ids
|
||||
for sense_id, sens_lst in el["senses"].items():
|
||||
# loop subsenses
|
||||
for i, sens in enumerate(sens_lst):
|
||||
nsid = sense_id.split(".")[0]
|
||||
if len(sens_lst) == 1:
|
||||
nsid += "-0"
|
||||
else:
|
||||
nsid += ("-" + str(i + 1))
|
||||
new_sense = {
|
||||
"homonym_id": homonym_id,
|
||||
# sense_id: sense_id-subsense_id
|
||||
"sense_id": nsid,
|
||||
"sense_type": sens[0],
|
||||
"sense_desc": helper_prune(sens[1]),
|
||||
}
|
||||
new_el["senses"].append(new_sense)
|
||||
hw = new_el["hw"]
|
||||
if hw in data_new:
|
||||
print("Shouldn't be here.")
|
||||
print(new_el)
|
||||
exit(1)
|
||||
data_new[hw] = DC(new_el)
|
||||
# return data_new
|
||||
|
||||
# check
|
||||
for hw, el in data_new.items():
|
||||
for sens in el["senses"]:
|
||||
if sens["sense_desc"] is None:
|
||||
print(sens)
|
||||
|
||||
return data_new
|
||||
|
||||
|
||||
def plst(lst):
|
||||
for el in lst:
|
||||
print(el)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
datapath = "../../../data"
|
||||
html_filepath = datapath + "/sskj/sskj2_v1.html"
|
||||
raw_pickle_filepath = datapath + "/tmp_pickles/raw_sskj.pickle"
|
||||
parsed_pickle_filepath = datapath + "/no_del_pickles/sskj_senses.pickle"
|
||||
se_list_filepath = datapath + "/no_del_pickles/se_list.pickle"
|
||||
|
||||
p = Seqparser()
|
||||
|
||||
if True:
|
||||
print("html_to_raw_pickle({}, {})".format(
|
||||
html_filepath, raw_pickle_filepath))
|
||||
print("Big file, this might take a while (2 min).")
|
||||
tstart = time()
|
||||
p.html_to_raw_pickle(html_filepath, raw_pickle_filepath)
|
||||
print("Finished in {:.2f}.".format(time() - tstart))
|
||||
|
||||
if False:
|
||||
print("raw_pickle_to_parsed_pickle({}, {}, {})".format(
|
||||
raw_pickle_filepath, parsed_pickle_filepath, se_list_filepath))
|
||||
tstart = time()
|
||||
p.raw_pickle_to_parsed_pickle(
|
||||
raw_pickle_filepath, parsed_pickle_filepath, se_list_filepath)
|
||||
print("Finished in {:.2f}.".format(time() - tstart))
|
||||
print("Done.")
|
@ -0,0 +1,68 @@
|
||||
from Seqparser import Seqparser
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import json
|
||||
import datetime
|
||||
import hashlib
|
||||
from pymongo import MongoClient
|
||||
|
||||
SSKJ_USER = "sskj2"
|
||||
|
||||
if __name__ == "__main__":
|
||||
aparser = argparse.ArgumentParser()
|
||||
aparser.add_argument("--sskj-html", type=str)
|
||||
aparser.add_argument("--sskj-json", type=str)
|
||||
aparser.add_argument("--wordlist", type=str)
|
||||
aparser.add_argument("--operation", type=str)
|
||||
aparser.add_argument("--dbaddr", type=str)
|
||||
aparser.add_argument("--dbuser", type=str)
|
||||
aparser.add_argument("--dbpass", type=str)
|
||||
args = aparser.parse_args()
|
||||
|
||||
if args.operation == "gen_sskj_json":
|
||||
sqp = Seqparser()
|
||||
sqp.html_to_verb_adj_json(args.sskj_html, args.sskj_json)
|
||||
sys.exit()
|
||||
|
||||
if args.operation == "gen_wordlist":
|
||||
sqp = Seqparser()
|
||||
sqp.generate_sskj_wordlist(args.sskj_senses, args.wordlist)
|
||||
sys.exit()
|
||||
|
||||
if args.operation == "senses_to_db":
|
||||
db_entries = []
|
||||
tmp_dt = datetime.datetime.utcnow()
|
||||
with Path(args.sskj_json).open("r") as fp:
|
||||
jdata = json.load(fp)
|
||||
# print(jdata[list(jdata.keys())[201]])
|
||||
for hw, entry in jdata.items():
|
||||
for key, sense in entry[0]["senses"].items():
|
||||
desc = sense[0][1]
|
||||
if sense[0][0] == "razl":
|
||||
desc = desc[:-1] # for some reason, descriptions contain a ':'
|
||||
else:
|
||||
desc = sense[0][0] + ": " + desc
|
||||
tmp_entry = {
|
||||
"desc": desc,
|
||||
"hw": hw,
|
||||
"author": SSKJ_USER
|
||||
}
|
||||
tmp_entry["sense_id"] = "{}-{}".format(
|
||||
SSKJ_USER,
|
||||
hashlib.sha256(str(tmp_entry).encode("utf-8")).hexdigest()[:10]
|
||||
)
|
||||
tmp_entry["date"] = tmp_dt
|
||||
db_entries.append(tmp_entry)
|
||||
print(len(db_entries))
|
||||
|
||||
# db login
|
||||
client = MongoClient(
|
||||
"mongodb://{}".format(args.dbaddr),
|
||||
username=args.dbuser,
|
||||
password=args.dbpass,
|
||||
authSource="valdb",
|
||||
authMechanism='SCRAM-SHA-1'
|
||||
)
|
||||
valdb = client.valdb
|
||||
valdb.senses.insert_many(db_entries)
|
@ -0,0 +1,11 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
name='seqparser',
|
||||
version='0.0.1',
|
||||
description='Parser for sskj2 html dump.',
|
||||
author='Kristjan Voje',
|
||||
author_email='kristjan.voje@gmail.com',
|
||||
license='MIT',
|
||||
packages=['seqparser'],
|
||||
)
|
@ -1,96 +0,0 @@
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Frame():
|
||||
def __init__(self, tids, deep_links=None, slots=None, hw=None):
|
||||
self.hw = hw
|
||||
self.tids = tids # list of tokens with the same hw_lemma
|
||||
# Each tid = "S123.t123";
|
||||
# you can get sentence with vallex.get_sentence(S123)
|
||||
self.slots = []
|
||||
if slots is None:
|
||||
self.slots = self.init_slots(deep_links)
|
||||
else:
|
||||
self.slots = slots
|
||||
self.sense_info = {}
|
||||
self.sentences = None # Used for passing to view in app.py, get_frames
|
||||
self.aggr_sent = None # Dictionary { hw: self.sentences idx }
|
||||
|
||||
def to_json(self):
|
||||
ret = {
|
||||
"hw": self.hw,
|
||||
"tids": self.tids,
|
||||
"slots": [slot.to_json() for slot in self.slots],
|
||||
"sentences": self.sentences,
|
||||
"aggr_sent": self.aggr_sent,
|
||||
"sense_info": self.sense_info
|
||||
}
|
||||
return ret
|
||||
|
||||
def init_slots(self, deep):
|
||||
slots = []
|
||||
for link in deep:
|
||||
slots.append(Slot(
|
||||
functor=link["functor"],
|
||||
tids=[link["to"]]
|
||||
))
|
||||
return slots
|
||||
|
||||
def sort_slots(self):
|
||||
# ACT, PAT, alphabetically
|
||||
srt1 = [
|
||||
x for x in self.slots
|
||||
if (x.functor == "ACT" or
|
||||
x.functor == "PAT")
|
||||
]
|
||||
srt1 = sorted(srt1, key=lambda x: x.functor)
|
||||
srt2 = [
|
||||
x for x in self.slots
|
||||
if (x.functor != "ACT" and
|
||||
x.functor != "PAT")
|
||||
]
|
||||
srt2 = sorted(srt2, key=lambda x: x.functor)
|
||||
self.slots = (srt1 + srt2)
|
||||
|
||||
def to_string(self):
|
||||
ret = "Frame:\n"
|
||||
ret += "sense_info: {}\n".format(str(self.sense_info))
|
||||
ret += "tids: ["
|
||||
for t in self.tids:
|
||||
ret += (str(t) + ", ")
|
||||
ret += "]\n"
|
||||
if self.slots is not None:
|
||||
ret += "slots:\n"
|
||||
for sl in self.slots:
|
||||
ret += (sl.to_string() + "\n")
|
||||
return ret
|
||||
|
||||
|
||||
class Slot():
|
||||
# Each slot is identified by its functor (ACT, PAT, ...)
|
||||
# It consists of different tokens.
|
||||
def __init__(self, functor, tids=None, count=None):
|
||||
self.functor = functor
|
||||
self.tids = tids or [] # combining multiple sentences vertically
|
||||
self.count = count or 1
|
||||
|
||||
def to_string(self):
|
||||
ret = "---- Slot:\n"
|
||||
ret += "functor: {}\n".format(self.functor)
|
||||
ret += "tids: ["
|
||||
for t in self.tids:
|
||||
ret += (str(t) + ", ")
|
||||
ret += "]\n"
|
||||
ret += "]\n"
|
||||
ret += "----\n"
|
||||
return ret
|
||||
|
||||
def to_json(self):
|
||||
ret = {
|
||||
"functor": self.functor,
|
||||
"tids": self.tids,
|
||||
"count": self.count
|
||||
}
|
||||
return ret
|
Loading…
Reference in new issue