make fill_database

This commit is contained in:
voje 2019-03-17 17:25:59 +01:00
parent d8fd3ad30b
commit 2d4a6a152a
13 changed files with 72 additions and 373 deletions

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "src/pkg/cjvt-corpusparser"]
path = src/pkg/cjvt-corpusparser
url = git@gitea.cjvt.si:kristjan/cjvt-corpusparser.git

View File

@ -11,9 +11,9 @@ MAKE_ROOT = $(shell pwd)
SSJ_FILE = "$(MAKE_ROOT)/data/samples/ssj_example/ssj500k-sl.body.sample.xml" SSJ_FILE = "$(MAKE_ROOT)/data/samples/ssj_example/ssj500k-sl.body.sample.xml"
KRES_FOLDER = "$(MAKE_ROOT)/data/samples/kres_example" KRES_FOLDER = "$(MAKE_ROOT)/data/samples/kres_example"
KRES_SRL_FOLDER = "$(MAKE_ROOT)/data/kres_srl" KRES_SRL_FOLDER = "$(MAKE_ROOT)/data/kres_srl"
OUTPUT = "file" OUTPUT = "db"
OUTDIR = "/home/voje/workdir/test_out" OUTDIR = "/home/voje/workdir/test_out"
DBADDR = "" DBADDR = "0.0.0.0:27017" # don't use localhost
DB_ADM_USER = testadmin DB_ADM_USER = testadmin
DB_ADM_PASS = testadminpass DB_ADM_PASS = testadminpass
@ -21,25 +21,42 @@ DB_USR_USER = testuser
DB_USR_PASS = testuserpass DB_USR_PASS = testuserpass
export export
.PHONY: dev-env preflight .PHONY: python-env fill_database
all: all:
echo "Select an argument" echo "Select an argument"
# prereq (environment) # create database (run from host):
dev-env: # !!! might need to run several times, so the containers come online
cd dockerfiles/dev-env; $(MAKE) # Successful if you see the lines:
# Successfully added user: { "user" : "testuser", "roles" : [ "readWrite" ] }
# bye
database-service:
cd dockerfiles/database; $(MAKE) build_run
# run these inside dev-env container database-users:
cd dockerfiles/database; $(MAKE) create_users
# also useful, if we want to restart the db
database-clean:
cd dockerfiles/database; $(MAKE) clean_stack
# create python-env container
python-env:
cd dockerfiles/python-env; $(MAKE)
# inside the container, install our packages
python-env-install:
pip3 install -e src/pkg/cjvt-corpusparser/.
# from inside python-env container:
data/samples: data/samples:
cd data; tar xzvf samples.tar.gz cd data; tar xzvf samples.tar.gz
# installs our python code as packages # from inside python-env container:
# when debugging, run this once, then run python3 ... by hand fill_database: data/samples
preflight: data/samples python3 src/pkg/cjvt-corpusparser/corpusparser/main.py --kres-folder $(KRES_FOLDER) \
pip3 install -e src/pkg/corpusparser/. --ssj-file $(SSJ_FILE) --kres-srl-folder $(KRES_SRL_FOLDER) \
python3 src/pkg/corpusparser/corpusparser/main.py --kres-folder $(KRES_FOLDER) \ --output $(OUTPUT) --outdir $(OUTDIR) --dbaddr $(DBADDR) \
--ssj-file $(SSJ_FILE) --kres-srl-folder $(KRES_SRL_FOLDER) --output $(OUTPUT) --outdir $(OUTDIR) --dbaddr $(DBADDR) --dbuser $(DB_USR_USER) --dbpass $(DB_USR_PASS)
database:
cd dockerfiles/database; $(MAKE)

View File

@ -1,23 +1,42 @@
# cjvt-valency # cjvt-valency
Required submodules:
* `https://gitea.cjvt.si/kristjan/cjvt-corpusparser.git`
```bash
$ git submodule init
```
## Components ## Components
### Database (2 containers) ### Database (2 containers)
Input: Spin up the database service and create users:
```bash
# $ make database-clean # opt
$ make database-service
$ make database-users
```
Populate the database with data form files:
* ssj500k.xml * ssj500k.xml
* kres.xml * kres.xml
* kres_SRL.json * kres_SRL.json
Intermediate: Set path to files in `Makefile`.
```bash
# spin up a container with python env
$ make python-env
* ssj500k.json **HERE HERE** # install our packages
* kres.json $ make python-env-install
Output: # run the code
$ make fill_database
```
* DB containing both corpora (1:1 map of the intermediate json formats) If all goes well, we should be able to inspect the database on `0.0.0.0:8087`.
### Flask backend (1 container) ### Flask backend (1 container)

View File

@ -8,10 +8,9 @@ STACKNAME = dbstack
.PHONY: start_db FORCE .PHONY: start_db FORCE
all: build_mongo run_stack create_users all: build_run create_users
info: build_run: build_mongo run_stack
echo "[*] RUN THIS A FEW TIMES (THE STACK NEEDS TIME TO CLEAN UP AND TO SPIN UP THE NODES)"
create.js: FORCE create.js: FORCE

View File

@ -5,6 +5,8 @@ services:
my-mongo: my-mongo:
image: my-mongo image: my-mongo
restart: always restart: always
ports:
- 27017:27017
environment: environment:
MONGO_INITDB_ROOT_USERNAME: ${DB_ADM_USER} MONGO_INITDB_ROOT_USERNAME: ${DB_ADM_USER}
MONGO_INITDB_ROOT_PASSWORD: ${DB_ADM_PASS} MONGO_INITDB_ROOT_PASSWORD: ${DB_ADM_PASS}

View File

@ -12,6 +12,10 @@ RUN pip3 install \
pandas \ pandas \
sklearn \ sklearn \
argparse \ argparse \
pathlib pathlib \
pymongo
RUN apt-get install -y \
curl
ENV PYTHONIOENCODING UTF-8 ENV PYTHONIOENCODING UTF-8

View File

@ -1,4 +1,4 @@
IMAGE_NAME="cjvt-dev-env" IMAGE_NAME="cjvt-python-env"
all: build run all: build run
@ -14,5 +14,6 @@ run:
-v /etc/group:/etc/group \ -v /etc/group:/etc/group \
-v $(MAKE_ROOT):/project \ -v $(MAKE_ROOT):/project \
-w /project \ -w /project \
--net host \
$(IMAGE_NAME) \ $(IMAGE_NAME) \
/bin/bash /bin/bash

@ -0,0 +1 @@
Subproject commit e599740ac940be1b150d653dfbf66d0551ece3a3

View File

@ -1,196 +0,0 @@
from pathlib import Path
import re
import json
from lxml import etree
import logging
logging.basicConfig(level=logging.INFO)
# Read input file(.xml, .json; kres or ssj500k).
# Create an iterator that outputs resulting sentences (python dict format).
class Parser():
def __init__(self, corpus, infiles, logger=None):
if corpus == "kres":
self.kres_folder = Path(infiles[0])
self.kres_srl_folder = Path(infiles[1])
elif corpus == "ssj":
self.ssj_file = Path(infiles[0])
else:
raise ValueError("Argument corpus should be 'ssj' or 'kres'.")
self.corpus = corpus
self.W_TAGS = ['w']
self.C_TAGS = ['c']
self.S_TAGS = ['S', 'pc']
self.logger = logger or logging.getLogger(__name__)
self.stats = {
"parsed_count": 0,
"missing_srl": []
}
def parse_jos_links(self, sent_el):
if self.corpus == "kres":
return self.parse_jos_links_kres(sent_el)
else:
# 'syntax' is the linkgroup we're looking for
return self.parse_any_links_ssj(sent_el, "syntax")
def parse_jos_links_kres(self, sent_el):
lgrps = sent_el.findall(".//links")
if len(lgrps) < 1:
raise IOError("Can't find links.")
res_links = []
for link in lgrps[0]:
res_links += [{
"from": int(link.get("from").split(".")[-1]),
"afun": link.get("afun"),
"to": int(link.get("dep").split(".")[-1]),
}]
return res_links
def parse_ssj_target_arg(self, text):
# from: 0, to: 6
# <link ana="syn:modra" target="#ssj1.1.3 #ssj1.1.3.t6"/>
# from: 6, to: 7
# <link ana="syn:dol" target="#ssj1.1.3.t6 #ssj1.1.3.t7"/>
lst = [x.split(".")[-1] for x in text.split(" ")]
return [int(x[1:] if x[0] == "t" else 0) for x in lst]
def parse_any_links_ssj(self, sent_el, links_type):
lgrps = sent_el.findall(".//linkGrp")
links = [x for x in lgrps if x.get("type") == links_type][0]
res_links = []
for link in links:
tar = self.parse_ssj_target_arg(link.get("target"))
res_links += [{
"from": tar[0],
"afun": link.get("ana").split(":")[1],
"to": tar[1],
}]
return res_links
def parse_srl_links(self, sent_el, sent_srl_links=None):
if self.corpus == "kres":
return self.parse_srl_links_kres(sent_el, sent_srl_links)
else:
return self.parse_any_links_ssj(sent_el, "SRL")
def parse_srl_links_kres(self, sent_el, sent_srl_links):
res_links = []
for link in sent_srl_links:
res_links += [{
"from": int(link["from"]),
"afun": link["arg"],
"to": int(link["dep"]),
}]
# find the correspointing json file with srl links
return res_links
def sentence_generator(self):
# Using generators so we don't copy a whole corpu around in memory.
if self.corpus == "kres":
for xml_file in self.kres_folder.iterdir():
# self.parse_xml_file(xml_file)
yield from self.parse_xml_file(xml_file)
else:
yield from self.parse_xml_file(self.ssj_file)
def parse_xml_file(self, xml_file):
srl_from_json = {}
if self.corpus == "kres":
# in case of kres, read the SRL links form a separate json file
file_id = xml_file.name.split(".")[0]
json_file = self.kres_srl_folder / Path(file_id).with_suffix(".srl.json")
with json_file.open("r") as fp:
srl_from_json = json.loads(fp.read())
with xml_file.open("rb") as fp:
# remove namespaces
bstr = fp.read()
utf8str = bstr.decode("utf-8")
utf8str = re.sub('\\sxmlns="[^"]+"', '', utf8str, count=1)
utf8str = re.sub(' xml:', ' ', utf8str)
root = etree.XML(utf8str.encode("utf-8"))
divs = [] # in ssj, there are divs, in Kres, there are separate files
if self.corpus == "kres":
divs = [root]
else:
divs = root.findall(".//div")
res_dict = {}
# parse divs
for div in divs:
f_id = div.get("id")
# parse paragraphs
for p in div.findall(".//p"):
p_id = p.get("id").split(".")[-1]
# parse sentences
for s in p.findall(".//s"):
s_id = s.get("id").split(".")[-1]
sentence_text = ""
sentence_tokens = []
# parse tokens
for el in s.iter():
if el.tag in self.W_TAGS:
el_id = el.get("id").split(".")[-1]
if el_id[0] == 't':
el_id = el_id[1:] # ssj W_TAG ids start with t
sentence_text += el.text
sentence_tokens += [{
"word": True,
"tid": int(el_id),
"text": el.text,
"lemma": el.get("lemma"),
"msd": (el.get("msd") if self.corpus == "kres"
else el.get("ana").split(":")[-1]),
}]
elif el.tag in self.C_TAGS:
# only Kres' C_TAGS have ids
el_id = el.get("id") or "none"
el_id = el_id.split(".")[-1]
sentence_text += el.text
sentence_tokens += [{
"word": False,
"tid": (int(el_id) if self.corpus == "kres" else -1),
"text": el.text,
}]
elif el.tag in self.S_TAGS:
# Kres' <S /> doesn't contain .text
sentence_text += " "
else:
# pass links and linkGroups
pass
sentence_id = "{}.{}.{}".format(f_id, p_id, s_id)
jos_links = self.parse_jos_links(s)
if self.corpus == "kres":
srl_links_raw = srl_from_json.get(sentence_id)
if srl_links_raw is None:
srl_links_parsed = None
self.stats["missing_srl"] += [(sentence_id, sentence_text)]
else:
srl_links_parsed = self.parse_srl_links(s, srl_links_raw)
else:
srl_links_parsed = self.parse_srl_links(s)
if len(srl_links_parsed) == 0:
self.stats["missing_srl"] += [(sentence_id, sentence_text)]
sentence_entry = {
"sid": sentence_id,
"text": sentence_text,
"tokens": sentence_tokens,
"jos_links": jos_links,
"srl_links": srl_links_parsed
}
self.stats["parsed_count"] += 1
yield (xml_file, sentence_entry)

View File

@ -1,38 +0,0 @@
# corpusparser
A tool for parsing ssj500k and Kres into a unified .json format.
## Input:
### ssj500k
To parse ssj500k, point to the monolythic `ssj500k-sl.body.xml` file (tested on ssj500k 2.1).
### Kres
To parse Kres, point to folders:
* Kres folder, containig several (around 20K) .xml files (`F00XXXXX.xml.parsed.xml`).
* Kres SRL folder, containing SRL links for the corresponding F00...xml files (`F00XXXXX.srl.json`).
## Internal data format
This is the internal python dict data format. It can be stored to file as `.json` or stored into a database for application usage.
```python
{
'sid': 'F0034713.5.0',
'text': 'Mednarodni denarni sklad je odobril 30 milijard evrov vredno posojilo Grčiji. ',
'tokens': [
{'text': 'Mednarodni', 'lemma': 'mednaroden', 'msd': 'Ppnmeid', 'word': True, 'tid': 1},
{'text': 'denarni', 'lemma': 'denaren', 'msd': 'Ppnmeid', 'word': True, 'tid': 2},
{'text': 'sklad', 'lemma': 'sklad', 'msd': 'Somei', 'word': True, 'tid': 3},
{'text': 'je', 'lemma': 'biti', 'msd': 'Gp-ste-n', 'word': True, 'tid': 4},
{'text': 'odobril', 'lemma': 'odobriti', 'msd': 'Ggdd-em', 'word': True, 'tid': 5},
{'text': '30', 'lemma': '30', 'msd': 'Kag', 'word': True, 'tid': 6},
{'text': 'milijard', 'lemma': 'milijarda', 'msd': 'Sozmr', 'word': True, 'tid': 7}, # ...
]
'jos_links': [
{'to': 1, 'from': 3, 'afun': 'dol'},
{'to': 2, 'from': 3, 'afun': 'dol'},
{'to': 3, 'from': 5, 'afun': 'ena'}, # ...
]
'srl_links': [
{'to': 3, 'from': 5, 'afun': 'ACT'},
{'to': 7, 'from': 5, 'afun': 'PAT'}
]
}
```

View File

@ -1 +0,0 @@
from corpusparser.Parser import Parser

View File

@ -1,102 +0,0 @@
from pathlib import Path
from corpusparser import Parser
import argparse
import logging
import json
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
## Main handles command line arguments and writing to files / DB.
def ssj_to_json_file(sentence_generator, outfolder):
# this funciton is based on the fact that files are parsed sequentially
outfolder = Path(outfolder)
outfolder.mkdir(parents=True, exist_ok=True)
outfile = outfolder / "ssj500k.json"
data_buffer = []
for s in sentence_generator:
sdata = s[1]
data_buffer += [sdata]
# outfile = Path(outfile)
with outfile.open("w") as fp:
logger.info("Writing to {}".format(outfile))
json.dump(data_buffer, fp)
def kres_to_json_files(sentence_generator, outfolder):
outfolder = Path(outfolder) / "kres_json"
outfolder.mkdir(parents=True, exist_ok=True)
def write_buffer_to_file(outfile, outfile_buffer):
logger.info("Writing file: {}".format(outfile))
with outfile.open("w") as fp:
json.dump(outfile_buffer, fp)
outfile_buffer = None
current_outfile = None
for s in sentence_generator:
infile = s[0]
outfile = outfolder / Path(infile.name.split(".")[0]).with_suffix(".json")
# parser sequentially parses files; when we're done with a file, write it out
if current_outfile is None:
current_outfile = outfile
outfile_buffer = []
elif outfile != current_outfile:
write_buffer_to_file(current_outfile, outfile_buffer)
current_outfile = outfile
outfile_buffer = []
# update buffer
sdata = s[1]
outfile_buffer += [sdata]
write_buffer_to_file(current_outfile, outfile_buffer)
def to_db():
return "TODO"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parsing corpora kres and ssj500k.")
parser.add_argument('--kres-folder', required=True)
parser.add_argument('--kres-srl-folder', required=True)
parser.add_argument('--ssj-file', required=True)
parser.add_argument('--output', required=False, default=None)
parser.add_argument('--outdir', required=False, default=None)
parser.add_argument('--dbaddr', required=False, default=None)
args = parser.parse_args()
# parse ssj
logger.info("Parsing ssj500k: {}".format(args.ssj_file))
ssj_parser = Parser(
corpus="ssj",
infiles=[args.ssj_file],
)
# res = [x[1]["sid"] for x in ssj_parser.sentence_generator()]
# logger.info("Parsed {} sentences (ssj500k)".format(len(res)))
# ssj to json
ssj_to_json_file(ssj_parser.sentence_generator(), args.outdir)
# parse kres
logger.info("Parsing Kres: {}".format(args.ssj_file))
kres_parser = Parser(
corpus="kres",
infiles=[args.kres_folder, args.kres_srl_folder],
)
# res = [x[1]["sid"] for x in kres_parser.sentence_generator()]
# logger.info("Parsed {} sentences (kres)".format(len(res)))
# kres to json
kres_to_json_files(kres_parser.sentence_generator(), args.outdir)
## Handling output is situational --- implement it outside of Parser.
## Parser returns tuples (orig_file, element)
# 1. parse per-file and output to file (JSON)
# 2. parse and save to DB
# TODO

View File

@ -1,10 +0,0 @@
from setuptools import setup
setup(name='corpusparser',
version='0.0.1',
description=u"Parser for kres and ssj500k",
author=u"Kristjan Voje",
author_email='kristjan.voje@gmail.com',
license='MIT',
zip_safe=False,
)