diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..d3c4f22
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "src/pkg/cjvt-corpusparser"]
+ path = src/pkg/cjvt-corpusparser
+ url = git@gitea.cjvt.si:kristjan/cjvt-corpusparser.git
diff --git a/Makefile b/Makefile
index 8e7660b..4ccdfc6 100644
--- a/Makefile
+++ b/Makefile
@@ -11,9 +11,9 @@ MAKE_ROOT = $(shell pwd)
SSJ_FILE = "$(MAKE_ROOT)/data/samples/ssj_example/ssj500k-sl.body.sample.xml"
KRES_FOLDER = "$(MAKE_ROOT)/data/samples/kres_example"
KRES_SRL_FOLDER = "$(MAKE_ROOT)/data/kres_srl"
-OUTPUT = "file"
+OUTPUT = "db"
OUTDIR = "/home/voje/workdir/test_out"
-DBADDR = ""
+DBADDR = "0.0.0.0:27017" # don't use localhost
DB_ADM_USER = testadmin
DB_ADM_PASS = testadminpass
@@ -21,25 +21,42 @@ DB_USR_USER = testuser
DB_USR_PASS = testuserpass
export
-.PHONY: dev-env preflight
+.PHONY: python-env fill_database
all:
echo "Select an argument"
-# prereq (environment)
-dev-env:
- cd dockerfiles/dev-env; $(MAKE)
+# create database (run from host):
+# !!! might need to run several times, so the containers come online
+# Successful if you see the lines:
+# Successfully added user: { "user" : "testuser", "roles" : [ "readWrite" ] }
+# bye
+database-service:
+ cd dockerfiles/database; $(MAKE) build_run
-# run these inside dev-env container
+database-users:
+ cd dockerfiles/database; $(MAKE) create_users
+
+# also useful, if we want to restart the db
+database-clean:
+ cd dockerfiles/database; $(MAKE) clean_stack
+
+# create python-env container
+python-env:
+ cd dockerfiles/python-env; $(MAKE)
+
+# inside the container, install our packages
+python-env-install:
+ pip3 install -e src/pkg/cjvt-corpusparser/.
+
+# from inside python-env container:
data/samples:
cd data; tar xzvf samples.tar.gz
-# installs our python code as packages
-# when debugging, run this once, then run python3 ... by hand
-preflight: data/samples
- pip3 install -e src/pkg/corpusparser/.
- python3 src/pkg/corpusparser/corpusparser/main.py --kres-folder $(KRES_FOLDER) \
- --ssj-file $(SSJ_FILE) --kres-srl-folder $(KRES_SRL_FOLDER) --output $(OUTPUT) --outdir $(OUTDIR) --dbaddr $(DBADDR)
+# from inside python-env container:
+fill_database: data/samples
+ python3 src/pkg/cjvt-corpusparser/corpusparser/main.py --kres-folder $(KRES_FOLDER) \
+ --ssj-file $(SSJ_FILE) --kres-srl-folder $(KRES_SRL_FOLDER) \
+ --output $(OUTPUT) --outdir $(OUTDIR) --dbaddr $(DBADDR) \
+ --dbuser $(DB_USR_USER) --dbpass $(DB_USR_PASS)
-database:
- cd dockerfiles/database; $(MAKE)
\ No newline at end of file
diff --git a/README.md b/README.md
index c35bf79..c9c868f 100644
--- a/README.md
+++ b/README.md
@@ -1,23 +1,42 @@
# cjvt-valency
+Required submodules:
+
+* `https://gitea.cjvt.si/kristjan/cjvt-corpusparser.git`
+
+```bash
+$ git submodule init
+```
## Components
### Database (2 containers)
-Input:
+Spin up the database service and create users:
+```bash
+# $ make database-clean # opt
+$ make database-service
+$ make database-users
+```
+
+Populate the database with data form files:
* ssj500k.xml
* kres.xml
* kres_SRL.json
-Intermediate:
+Set path to files in `Makefile`.
+```bash
+# spin up a container with python env
+$ make python-env
-* ssj500k.json **HERE HERE**
-* kres.json
+# install our packages
+$ make python-env-install
-Output:
+# run the code
+$ make fill_database
+```
-* DB containing both corpora (1:1 map of the intermediate json formats)
+If all goes well, we should be able to inspect the database on `0.0.0.0:8087`.
### Flask backend (1 container)
diff --git a/dockerfiles/database/Makefile b/dockerfiles/database/Makefile
index 3eaaf2a..336fd59 100644
--- a/dockerfiles/database/Makefile
+++ b/dockerfiles/database/Makefile
@@ -8,10 +8,9 @@ STACKNAME = dbstack
.PHONY: start_db FORCE
-all: build_mongo run_stack create_users
+all: build_run create_users
-info:
- echo "[*] RUN THIS A FEW TIMES (THE STACK NEEDS TIME TO CLEAN UP AND TO SPIN UP THE NODES)"
+build_run: build_mongo run_stack
create.js: FORCE
diff --git a/dockerfiles/database/mongodb-stack.yml b/dockerfiles/database/mongodb-stack.yml
index c28db6a..0e74083 100644
--- a/dockerfiles/database/mongodb-stack.yml
+++ b/dockerfiles/database/mongodb-stack.yml
@@ -5,6 +5,8 @@ services:
my-mongo:
image: my-mongo
restart: always
+ ports:
+ - 27017:27017
environment:
MONGO_INITDB_ROOT_USERNAME: ${DB_ADM_USER}
MONGO_INITDB_ROOT_PASSWORD: ${DB_ADM_PASS}
diff --git a/dockerfiles/dev-env/Dockerfile b/dockerfiles/python-env/Dockerfile
similarity index 79%
rename from dockerfiles/dev-env/Dockerfile
rename to dockerfiles/python-env/Dockerfile
index 3a494d2..6b620bf 100644
--- a/dockerfiles/dev-env/Dockerfile
+++ b/dockerfiles/python-env/Dockerfile
@@ -12,6 +12,10 @@ RUN pip3 install \
pandas \
sklearn \
argparse \
- pathlib
+ pathlib \
+ pymongo
+
+RUN apt-get install -y \
+ curl
ENV PYTHONIOENCODING UTF-8
diff --git a/dockerfiles/dev-env/Makefile b/dockerfiles/python-env/Makefile
similarity index 87%
rename from dockerfiles/dev-env/Makefile
rename to dockerfiles/python-env/Makefile
index 7f5dd50..1074b4a 100644
--- a/dockerfiles/dev-env/Makefile
+++ b/dockerfiles/python-env/Makefile
@@ -1,4 +1,4 @@
-IMAGE_NAME="cjvt-dev-env"
+IMAGE_NAME="cjvt-python-env"
all: build run
@@ -14,5 +14,6 @@ run:
-v /etc/group:/etc/group \
-v $(MAKE_ROOT):/project \
-w /project \
+ --net host \
$(IMAGE_NAME) \
/bin/bash
diff --git a/src/pkg/cjvt-corpusparser b/src/pkg/cjvt-corpusparser
new file mode 160000
index 0000000..e599740
--- /dev/null
+++ b/src/pkg/cjvt-corpusparser
@@ -0,0 +1 @@
+Subproject commit e599740ac940be1b150d653dfbf66d0551ece3a3
diff --git a/src/pkg/corpusparser/corpusparser/Parser.py b/src/pkg/corpusparser/corpusparser/Parser.py
deleted file mode 100644
index d908d04..0000000
--- a/src/pkg/corpusparser/corpusparser/Parser.py
+++ /dev/null
@@ -1,196 +0,0 @@
-from pathlib import Path
-import re
-import json
-from lxml import etree
-import logging
-
-logging.basicConfig(level=logging.INFO)
-
-# Read input file(.xml, .json; kres or ssj500k).
-# Create an iterator that outputs resulting sentences (python dict format).
-class Parser():
-
- def __init__(self, corpus, infiles, logger=None):
-
- if corpus == "kres":
- self.kres_folder = Path(infiles[0])
- self.kres_srl_folder = Path(infiles[1])
- elif corpus == "ssj":
- self.ssj_file = Path(infiles[0])
- else:
- raise ValueError("Argument corpus should be 'ssj' or 'kres'.")
-
- self.corpus = corpus
- self.W_TAGS = ['w']
- self.C_TAGS = ['c']
- self.S_TAGS = ['S', 'pc']
- self.logger = logger or logging.getLogger(__name__)
- self.stats = {
- "parsed_count": 0,
- "missing_srl": []
- }
-
- def parse_jos_links(self, sent_el):
- if self.corpus == "kres":
- return self.parse_jos_links_kres(sent_el)
- else:
- # 'syntax' is the linkgroup we're looking for
- return self.parse_any_links_ssj(sent_el, "syntax")
-
- def parse_jos_links_kres(self, sent_el):
- lgrps = sent_el.findall(".//links")
- if len(lgrps) < 1:
- raise IOError("Can't find links.")
- res_links = []
- for link in lgrps[0]:
- res_links += [{
- "from": int(link.get("from").split(".")[-1]),
- "afun": link.get("afun"),
- "to": int(link.get("dep").split(".")[-1]),
- }]
- return res_links
-
- def parse_ssj_target_arg(self, text):
- # from: 0, to: 6
- #
- # from: 6, to: 7
- #
- lst = [x.split(".")[-1] for x in text.split(" ")]
- return [int(x[1:] if x[0] == "t" else 0) for x in lst]
-
- def parse_any_links_ssj(self, sent_el, links_type):
- lgrps = sent_el.findall(".//linkGrp")
- links = [x for x in lgrps if x.get("type") == links_type][0]
- res_links = []
- for link in links:
- tar = self.parse_ssj_target_arg(link.get("target"))
- res_links += [{
- "from": tar[0],
- "afun": link.get("ana").split(":")[1],
- "to": tar[1],
- }]
- return res_links
-
- def parse_srl_links(self, sent_el, sent_srl_links=None):
- if self.corpus == "kres":
- return self.parse_srl_links_kres(sent_el, sent_srl_links)
- else:
- return self.parse_any_links_ssj(sent_el, "SRL")
-
- def parse_srl_links_kres(self, sent_el, sent_srl_links):
- res_links = []
- for link in sent_srl_links:
- res_links += [{
- "from": int(link["from"]),
- "afun": link["arg"],
- "to": int(link["dep"]),
- }]
- # find the correspointing json file with srl links
- return res_links
-
- def sentence_generator(self):
- # Using generators so we don't copy a whole corpu around in memory.
- if self.corpus == "kres":
- for xml_file in self.kres_folder.iterdir():
- # self.parse_xml_file(xml_file)
- yield from self.parse_xml_file(xml_file)
- else:
- yield from self.parse_xml_file(self.ssj_file)
-
- def parse_xml_file(self, xml_file):
- srl_from_json = {}
- if self.corpus == "kres":
- # in case of kres, read the SRL links form a separate json file
- file_id = xml_file.name.split(".")[0]
- json_file = self.kres_srl_folder / Path(file_id).with_suffix(".srl.json")
- with json_file.open("r") as fp:
- srl_from_json = json.loads(fp.read())
-
- with xml_file.open("rb") as fp:
- # remove namespaces
- bstr = fp.read()
-
- utf8str = bstr.decode("utf-8")
- utf8str = re.sub('\\sxmlns="[^"]+"', '', utf8str, count=1)
- utf8str = re.sub(' xml:', ' ', utf8str)
-
- root = etree.XML(utf8str.encode("utf-8"))
-
- divs = [] # in ssj, there are divs, in Kres, there are separate files
- if self.corpus == "kres":
- divs = [root]
- else:
- divs = root.findall(".//div")
-
- res_dict = {}
-
- # parse divs
- for div in divs:
- f_id = div.get("id")
-
- # parse paragraphs
- for p in div.findall(".//p"):
- p_id = p.get("id").split(".")[-1]
-
- # parse sentences
- for s in p.findall(".//s"):
- s_id = s.get("id").split(".")[-1]
- sentence_text = ""
- sentence_tokens = []
-
- # parse tokens
- for el in s.iter():
- if el.tag in self.W_TAGS:
- el_id = el.get("id").split(".")[-1]
- if el_id[0] == 't':
- el_id = el_id[1:] # ssj W_TAG ids start with t
- sentence_text += el.text
- sentence_tokens += [{
- "word": True,
- "tid": int(el_id),
- "text": el.text,
- "lemma": el.get("lemma"),
- "msd": (el.get("msd") if self.corpus == "kres"
- else el.get("ana").split(":")[-1]),
- }]
- elif el.tag in self.C_TAGS:
- # only Kres' C_TAGS have ids
- el_id = el.get("id") or "none"
- el_id = el_id.split(".")[-1]
- sentence_text += el.text
- sentence_tokens += [{
- "word": False,
- "tid": (int(el_id) if self.corpus == "kres" else -1),
- "text": el.text,
- }]
- elif el.tag in self.S_TAGS:
- # Kres' doesn't contain .text
- sentence_text += " "
- else:
- # pass links and linkGroups
- pass
- sentence_id = "{}.{}.{}".format(f_id, p_id, s_id)
-
- jos_links = self.parse_jos_links(s)
-
- if self.corpus == "kres":
- srl_links_raw = srl_from_json.get(sentence_id)
- if srl_links_raw is None:
- srl_links_parsed = None
- self.stats["missing_srl"] += [(sentence_id, sentence_text)]
- else:
- srl_links_parsed = self.parse_srl_links(s, srl_links_raw)
- else:
- srl_links_parsed = self.parse_srl_links(s)
- if len(srl_links_parsed) == 0:
- self.stats["missing_srl"] += [(sentence_id, sentence_text)]
-
- sentence_entry = {
- "sid": sentence_id,
- "text": sentence_text,
- "tokens": sentence_tokens,
- "jos_links": jos_links,
- "srl_links": srl_links_parsed
- }
- self.stats["parsed_count"] += 1
- yield (xml_file, sentence_entry)
diff --git a/src/pkg/corpusparser/corpusparser/README.md b/src/pkg/corpusparser/corpusparser/README.md
deleted file mode 100644
index 0d41d60..0000000
--- a/src/pkg/corpusparser/corpusparser/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# corpusparser
-A tool for parsing ssj500k and Kres into a unified .json format.
-
-## Input:
-### ssj500k
-To parse ssj500k, point to the monolythic `ssj500k-sl.body.xml` file (tested on ssj500k 2.1).
-
-### Kres
-To parse Kres, point to folders:
-* Kres folder, containig several (around 20K) .xml files (`F00XXXXX.xml.parsed.xml`).
-* Kres SRL folder, containing SRL links for the corresponding F00...xml files (`F00XXXXX.srl.json`).
-
-## Internal data format
-This is the internal python dict data format. It can be stored to file as `.json` or stored into a database for application usage.
-```python
-{
- 'sid': 'F0034713.5.0',
- 'text': 'Mednarodni denarni sklad je odobril 30 milijard evrov vredno posojilo GrĨiji. ',
- 'tokens': [
- {'text': 'Mednarodni', 'lemma': 'mednaroden', 'msd': 'Ppnmeid', 'word': True, 'tid': 1},
- {'text': 'denarni', 'lemma': 'denaren', 'msd': 'Ppnmeid', 'word': True, 'tid': 2},
- {'text': 'sklad', 'lemma': 'sklad', 'msd': 'Somei', 'word': True, 'tid': 3},
- {'text': 'je', 'lemma': 'biti', 'msd': 'Gp-ste-n', 'word': True, 'tid': 4},
- {'text': 'odobril', 'lemma': 'odobriti', 'msd': 'Ggdd-em', 'word': True, 'tid': 5},
- {'text': '30', 'lemma': '30', 'msd': 'Kag', 'word': True, 'tid': 6},
- {'text': 'milijard', 'lemma': 'milijarda', 'msd': 'Sozmr', 'word': True, 'tid': 7}, # ...
- ]
- 'jos_links': [
- {'to': 1, 'from': 3, 'afun': 'dol'},
- {'to': 2, 'from': 3, 'afun': 'dol'},
- {'to': 3, 'from': 5, 'afun': 'ena'}, # ...
- ]
- 'srl_links': [
- {'to': 3, 'from': 5, 'afun': 'ACT'},
- {'to': 7, 'from': 5, 'afun': 'PAT'}
- ]
-}
-```
\ No newline at end of file
diff --git a/src/pkg/corpusparser/corpusparser/__init__.py b/src/pkg/corpusparser/corpusparser/__init__.py
deleted file mode 100644
index d993b8c..0000000
--- a/src/pkg/corpusparser/corpusparser/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from corpusparser.Parser import Parser
\ No newline at end of file
diff --git a/src/pkg/corpusparser/corpusparser/main.py b/src/pkg/corpusparser/corpusparser/main.py
deleted file mode 100644
index e0ba065..0000000
--- a/src/pkg/corpusparser/corpusparser/main.py
+++ /dev/null
@@ -1,102 +0,0 @@
-from pathlib import Path
-from corpusparser import Parser
-import argparse
-import logging
-import json
-
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-## Main handles command line arguments and writing to files / DB.
-
-def ssj_to_json_file(sentence_generator, outfolder):
- # this funciton is based on the fact that files are parsed sequentially
- outfolder = Path(outfolder)
- outfolder.mkdir(parents=True, exist_ok=True)
- outfile = outfolder / "ssj500k.json"
-
- data_buffer = []
- for s in sentence_generator:
- sdata = s[1]
- data_buffer += [sdata]
-
- # outfile = Path(outfile)
- with outfile.open("w") as fp:
- logger.info("Writing to {}".format(outfile))
- json.dump(data_buffer, fp)
-
-def kres_to_json_files(sentence_generator, outfolder):
- outfolder = Path(outfolder) / "kres_json"
- outfolder.mkdir(parents=True, exist_ok=True)
-
- def write_buffer_to_file(outfile, outfile_buffer):
- logger.info("Writing file: {}".format(outfile))
- with outfile.open("w") as fp:
- json.dump(outfile_buffer, fp)
-
- outfile_buffer = None
- current_outfile = None
- for s in sentence_generator:
- infile = s[0]
- outfile = outfolder / Path(infile.name.split(".")[0]).with_suffix(".json")
-
- # parser sequentially parses files; when we're done with a file, write it out
- if current_outfile is None:
- current_outfile = outfile
- outfile_buffer = []
- elif outfile != current_outfile:
- write_buffer_to_file(current_outfile, outfile_buffer)
- current_outfile = outfile
- outfile_buffer = []
-
- # update buffer
- sdata = s[1]
- outfile_buffer += [sdata]
- write_buffer_to_file(current_outfile, outfile_buffer)
-
-
-
-def to_db():
- return "TODO"
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Parsing corpora kres and ssj500k.")
- parser.add_argument('--kres-folder', required=True)
- parser.add_argument('--kres-srl-folder', required=True)
- parser.add_argument('--ssj-file', required=True)
- parser.add_argument('--output', required=False, default=None)
- parser.add_argument('--outdir', required=False, default=None)
- parser.add_argument('--dbaddr', required=False, default=None)
- args = parser.parse_args()
-
- # parse ssj
- logger.info("Parsing ssj500k: {}".format(args.ssj_file))
- ssj_parser = Parser(
- corpus="ssj",
- infiles=[args.ssj_file],
- )
- # res = [x[1]["sid"] for x in ssj_parser.sentence_generator()]
- # logger.info("Parsed {} sentences (ssj500k)".format(len(res)))
-
- # ssj to json
- ssj_to_json_file(ssj_parser.sentence_generator(), args.outdir)
-
- # parse kres
- logger.info("Parsing Kres: {}".format(args.ssj_file))
- kres_parser = Parser(
- corpus="kres",
- infiles=[args.kres_folder, args.kres_srl_folder],
- )
- # res = [x[1]["sid"] for x in kres_parser.sentence_generator()]
- # logger.info("Parsed {} sentences (kres)".format(len(res)))
-
- # kres to json
- kres_to_json_files(kres_parser.sentence_generator(), args.outdir)
-
-
-## Handling output is situational --- implement it outside of Parser.
-## Parser returns tuples (orig_file, element)
-# 1. parse per-file and output to file (JSON)
-# 2. parse and save to DB
-
-# TODO
diff --git a/src/pkg/corpusparser/setup.py b/src/pkg/corpusparser/setup.py
deleted file mode 100644
index da967a7..0000000
--- a/src/pkg/corpusparser/setup.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from setuptools import setup
-
-setup(name='corpusparser',
- version='0.0.1',
- description=u"Parser for kres and ssj500k",
- author=u"Kristjan Voje",
- author_email='kristjan.voje@gmail.com',
- license='MIT',
- zip_safe=False,
-)
\ No newline at end of file