Compare commits
	
		
			No commits in common. "master" and "valency" have entirely different histories.
		
	
	
		
	
		
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -1,5 +1,4 @@ | |||||||
| *.xml | *.xml | ||||||
| !collocation-structures.xml |  | ||||||
| *.tbl | *.tbl | ||||||
| *.csv | *.csv | ||||||
| *.pdf | *.pdf | ||||||
|  | |||||||
							
								
								
									
										62
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										62
									
								
								README.md
									
									
									
									
									
								
							| @ -10,66 +10,7 @@ Priporocam: pypy3 paket za hitrejse poganjanje. | |||||||
| 
 | 
 | ||||||
| Primer uporabe: `python3 wani.py ssj500k.xml Kolokacije_strukture.xml  izhod.csv` | Primer uporabe: `python3 wani.py ssj500k.xml Kolokacije_strukture.xml  izhod.csv` | ||||||
| 
 | 
 | ||||||
| # About | ## Instructions for running on GF | ||||||
| 
 |  | ||||||
| This script was developed to extract collocations from text in TEI format. Collocations are extracted and presented based on rules provided in structure file (example in `collocation-structures.xml`). |  | ||||||
| 
 |  | ||||||
| # Setup |  | ||||||
| 
 |  | ||||||
| Script may be run via python3 or pypy3. We suggest usage of virtual environments. |  | ||||||
| 
 |  | ||||||
| ```bash |  | ||||||
| pip install -r requirements.txt |  | ||||||
| ``` |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| # Running |  | ||||||
| 
 |  | ||||||
| ```bash |  | ||||||
| python3 wani.py <LOCATION TO STRUCTURES> <EXTRACTION TEXT> --out <RESULTS FILE> |  | ||||||
| ``` |  | ||||||
| 
 |  | ||||||
| ## Most important optional parameters |  | ||||||
| 
 |  | ||||||
| ### --sloleks_db |  | ||||||
| This parameter is may be used, if you have access to sloleks_db. Parameter is useful when lemma_fallback would be shown in results file, because if you have sloleks_db script looks into this database to find correct replacement.  |  | ||||||
| 
 |  | ||||||
| To use this sqlalchemy has to be installed as well. |  | ||||||
| 
 |  | ||||||
| This parameter has to include information about database in following order: |  | ||||||
| 
 |  | ||||||
| <DB_USERNAME>:<DB_PASSWORD>:<DB_NAME>:<DB_URL> |  | ||||||
| 
 |  | ||||||
| ### --collocation_sentence_map_dest |  | ||||||
| If value for this parameter exists (it should be string path to directory), files will be generated that include links between collocation ids and sentence ids. |  | ||||||
| 
 |  | ||||||
| ### --db |  | ||||||
| This is path to file which will contain sqlite database with internal states. Used to save internal states in case code gets modified. |  | ||||||
| 
 |  | ||||||
| We suggest to put this sqlite file in RAM for faster execution. To do this follow these instructions: |  | ||||||
| 
 |  | ||||||
| ```bash |  | ||||||
| sudo mkdir /mnt/tmp |  | ||||||
| sudo mount -t tmpfs tmpfs /mnt/tmp |  | ||||||
| ``` |  | ||||||
| 
 |  | ||||||
| If running on big corpuses (ie. Gigafida have database in RAM): |  | ||||||
| ```bash |  | ||||||
| sudo mkdir /mnt/tmp |  | ||||||
| sudo mount -t tmpfs tmpfs /mnt/tmp |  | ||||||
| sudo mount -o remount,size=110G,noexec,nosuid,nodev,noatime /mnt/tmp |  | ||||||
| ``` |  | ||||||
| 
 |  | ||||||
| Pass path to specific file when running `wani.py`. For example: |  | ||||||
| ```bash |  | ||||||
| python3 wani.py ... --db /mnt/tmp/mysql-wani-ssj500k ... |  | ||||||
| ``` |  | ||||||
| 
 |  | ||||||
| ### --multiple-output |  | ||||||
| Used when we want multiple output files (one file per structure_id). |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| ## Instructions for running on big files (ie. Gigafida) |  | ||||||
| 
 | 
 | ||||||
| Suggested running with saved mysql file in tmpfs. Instructions: | Suggested running with saved mysql file in tmpfs. Instructions: | ||||||
| 
 | 
 | ||||||
| @ -80,7 +21,6 @@ sudo mount -t tmpfs tmpfs /mnt/tmp | |||||||
| 
 | 
 | ||||||
| If running on big corpuses (ie. Gigafida have database in RAM): | If running on big corpuses (ie. Gigafida have database in RAM): | ||||||
| ```bash | ```bash | ||||||
| sudo mkdir /mnt/tmp |  | ||||||
| sudo mount -t tmpfs tmpfs /mnt/tmp | sudo mount -t tmpfs tmpfs /mnt/tmp | ||||||
| sudo mount -o remount,size=110G,noexec,nosuid,nodev,noatime /mnt/tmp | sudo mount -o remount,size=110G,noexec,nosuid,nodev,noatime /mnt/tmp | ||||||
| ``` | ``` | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -1,29 +1,23 @@ | |||||||
| import argparse |  | ||||||
| import os |  | ||||||
| import sys | import sys | ||||||
| import tqdm | import tqdm | ||||||
| import logging |  | ||||||
| 
 | 
 | ||||||
| good_lemmas = ["absurd", "absurdnost", "akuten", "akutno", "alkohol", "alkoholen", "aluminijast", "ananas", "aplikacija", "aplikativen", "aranžma", "arbiter", "armada", "avtomatičen", "avtomatiziran", "babica", "bajen", "bajka", "bakren", "bambusov", "barvan", "barvanje", "baseballski", "bazar", "bazičen", "belina", "bezgov", "bičati", "bife", "bilka", "biomasa", "biotop", "birma", "bivol", "blago", "blaženost", "bliskavica", "bobnič", "bolha", "bolnišnica", "bor", "borov", "borovničev", "brati", "briljant", "briti", "brusiti", "bučanje", "cikličen", "civilizacija", "dopust", "drama", "drezati", "duda", "dvorezen", "embalaža", "faks", "farsa", "glasno", "informiranje", "interier", "intima", "intimno", "investirati", "ironično", "istovetiti", "izvožen", "jagoda", "jeklar", "jezik", "karbon", "kitara", "kodrast", "molče", "mučiti", "novinarski", "obala", "občevati", "okrasiti", "pajčevina", "panoga", "prevajanje", "prevajati", "previti", "prihraniti", "priloga", "prisluškovati", "sopara"] | good_lemmas = ["absurd", "absurdnost", "akuten", "akutno", "alkohol", "alkoholen", "aluminijast", "ananas", "aplikacija", "aplikativen", "aranžma", "arbiter", "armada", "avtomatičen", "avtomatiziran", "babica", "bajen", "bajka", "bakren", "bambusov", "barvan", "barvanje", "baseballski", "bazar", "bazičen", "belina", "bezgov", "bičati", "bife", "bilka", "biomasa", "biotop", "birma", "bivol", "blago", "blaženost", "bliskavica", "bobnič", "bolha", "bolnišnica", "bor", "borov", "borovničev", "brati", "briljant", "briti", "brusiti", "bučanje", "cikličen", "civilizacija", "dopust", "drama", "drezati", "duda", "dvorezen", "embalaža", "faks", "farsa", "glasno", "informiranje", "interier", "intima", "intimno", "investirati", "ironično", "istovetiti", "izvožen", "jagoda", "jeklar", "jezik", "karbon", "kitara", "kodrast", "molče", "mučiti", "novinarski", "obala", "občevati", "okrasiti", "pajčevina", "panoga", "prevajanje", "prevajati", "previti", "prihraniti", "priloga", "prisluškovati", "sopara"] | ||||||
| 
 | 
 | ||||||
| def main(args): |  | ||||||
|     filepaths = [os.path.join(args.input, fn) for fn in os.listdir(args.input)] |  | ||||||
|     filepaths = sorted(filepaths, key=lambda x: int(x.split('.')[-1])) |  | ||||||
| N1 = len(good_lemmas) | N1 = len(good_lemmas) | ||||||
|     N2 = len(filepaths) - 1 | N2 = len(sys.argv) - 1 | ||||||
| 
 | 
 | ||||||
|     files_to_write = [open("output/{}".format(l), 'w') for l in good_lemmas] | files_to_write = [open("polona/{}".format(l), 'w') for l in good_lemmas] | ||||||
| 
 | 
 | ||||||
|     for fidx, filename in enumerate(filepaths): | for fidx, filename in enumerate(sys.argv[1:]): | ||||||
|     with open(filename, 'r') as fp: |     with open(filename, 'r') as fp: | ||||||
|             logging.info("loading next...") |         print("loading next...", end="", flush=True) | ||||||
|         line = fp.readline() |         line = fp.readline() | ||||||
|         lemma_rows = [idx for idx, cell in enumerate(line.split(",")) if "_Lemma" in cell] |         lemma_rows = [idx for idx, cell in enumerate(line.split(",")) if "_Lemma" in cell] | ||||||
|         file_lines = fp.read().split("\n") |         file_lines = fp.read().split("\n") | ||||||
| 
 | 
 | ||||||
|     for lidx, good_lemma in enumerate(good_lemmas): |     for lidx, good_lemma in enumerate(good_lemmas): | ||||||
|         spaces = " " * 20 if lidx == 0 else "" |         spaces = " " * 20 if lidx == 0 else "" | ||||||
|             logging.info("\r{}.{} / {}.{}{}".format(fidx, lidx, N2, N1, spaces)) |         print("\r{}.{} / {}.{}{}".format(fidx, lidx, N2, N1, spaces), end="", flush=True) | ||||||
| 
 | 
 | ||||||
|         for line in file_lines: |         for line in file_lines: | ||||||
|             if good_lemma not in line: |             if good_lemma not in line: | ||||||
| @ -39,11 +33,5 @@ def main(args): | |||||||
| for fp in files_to_write: | for fp in files_to_write: | ||||||
|     fp.close() |     fp.close() | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | 
 | ||||||
|     parser = argparse.ArgumentParser( |  | ||||||
|         description='Extract structures from a parsed corpus.') |  | ||||||
|     parser.add_argument('input', |  | ||||||
|                         help='Path to folder with files') |  | ||||||
|     args = parser.parse_args() |  | ||||||
|     main(args) |  | ||||||
| 
 | 
 | ||||||
|  | |||||||
							
								
								
									
										81
									
								
								issue992/files
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								issue992/files
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,81 @@ | |||||||
|  | ../data/gf2filesres/izhod.csv.100 | ||||||
|  | ../data/gf2filesres/izhod.csv.101 | ||||||
|  | ../data/gf2filesres/izhod.csv.102 | ||||||
|  | ../data/gf2filesres/izhod.csv.103 | ||||||
|  | ../data/gf2filesres/izhod.csv.104 | ||||||
|  | ../data/gf2filesres/izhod.csv.105 | ||||||
|  | ../data/gf2filesres/izhod.csv.106 | ||||||
|  | ../data/gf2filesres/izhod.csv.107 | ||||||
|  | ../data/gf2filesres/izhod.csv.108 | ||||||
|  | ../data/gf2filesres/izhod.csv.12 | ||||||
|  | ../data/gf2filesres/izhod.csv.13 | ||||||
|  | ../data/gf2filesres/izhod.csv.14 | ||||||
|  | ../data/gf2filesres/izhod.csv.15 | ||||||
|  | ../data/gf2filesres/izhod.csv.16 | ||||||
|  | ../data/gf2filesres/izhod.csv.17 | ||||||
|  | ../data/gf2filesres/izhod.csv.18 | ||||||
|  | ../data/gf2filesres/izhod.csv.19 | ||||||
|  | ../data/gf2filesres/izhod.csv.22 | ||||||
|  | ../data/gf2filesres/izhod.csv.23 | ||||||
|  | ../data/gf2filesres/izhod.csv.24 | ||||||
|  | ../data/gf2filesres/izhod.csv.25 | ||||||
|  | ../data/gf2filesres/izhod.csv.26 | ||||||
|  | ../data/gf2filesres/izhod.csv.27 | ||||||
|  | ../data/gf2filesres/izhod.csv.28 | ||||||
|  | ../data/gf2filesres/izhod.csv.29 | ||||||
|  | ../data/gf2filesres/izhod.csv.30 | ||||||
|  | ../data/gf2filesres/izhod.csv.31 | ||||||
|  | ../data/gf2filesres/izhod.csv.32 | ||||||
|  | ../data/gf2filesres/izhod.csv.34 | ||||||
|  | ../data/gf2filesres/izhod.csv.35 | ||||||
|  | ../data/gf2filesres/izhod.csv.36 | ||||||
|  | ../data/gf2filesres/izhod.csv.37 | ||||||
|  | ../data/gf2filesres/izhod.csv.38 | ||||||
|  | ../data/gf2filesres/izhod.csv.39 | ||||||
|  | ../data/gf2filesres/izhod.csv.40 | ||||||
|  | ../data/gf2filesres/izhod.csv.41 | ||||||
|  | ../data/gf2filesres/izhod.csv.42 | ||||||
|  | ../data/gf2filesres/izhod.csv.43 | ||||||
|  | ../data/gf2filesres/izhod.csv.44 | ||||||
|  | ../data/gf2filesres/izhod.csv.45 | ||||||
|  | ../data/gf2filesres/izhod.csv.46 | ||||||
|  | ../data/gf2filesres/izhod.csv.47 | ||||||
|  | ../data/gf2filesres/izhod.csv.48 | ||||||
|  | ../data/gf2filesres/izhod.csv.49 | ||||||
|  | ../data/gf2filesres/izhod.csv.50 | ||||||
|  | ../data/gf2filesres/izhod.csv.51 | ||||||
|  | ../data/gf2filesres/izhod.csv.52 | ||||||
|  | ../data/gf2filesres/izhod.csv.53 | ||||||
|  | ../data/gf2filesres/izhod.csv.54 | ||||||
|  | ../data/gf2filesres/izhod.csv.55 | ||||||
|  | ../data/gf2filesres/izhod.csv.57 | ||||||
|  | ../data/gf2filesres/izhod.csv.68 | ||||||
|  | ../data/gf2filesres/izhod.csv.69 | ||||||
|  | ../data/gf2filesres/izhod.csv.70 | ||||||
|  | ../data/gf2filesres/izhod.csv.71 | ||||||
|  | ../data/gf2filesres/izhod.csv.72 | ||||||
|  | ../data/gf2filesres/izhod.csv.73 | ||||||
|  | ../data/gf2filesres/izhod.csv.74 | ||||||
|  | ../data/gf2filesres/izhod.csv.75 | ||||||
|  | ../data/gf2filesres/izhod.csv.76 | ||||||
|  | ../data/gf2filesres/izhod.csv.77 | ||||||
|  | ../data/gf2filesres/izhod.csv.78 | ||||||
|  | ../data/gf2filesres/izhod.csv.80 | ||||||
|  | ../data/gf2filesres/izhod.csv.81 | ||||||
|  | ../data/gf2filesres/izhod.csv.82 | ||||||
|  | ../data/gf2filesres/izhod.csv.83 | ||||||
|  | ../data/gf2filesres/izhod.csv.84 | ||||||
|  | ../data/gf2filesres/izhod.csv.85 | ||||||
|  | ../data/gf2filesres/izhod.csv.86 | ||||||
|  | ../data/gf2filesres/izhod.csv.87 | ||||||
|  | ../data/gf2filesres/izhod.csv.88 | ||||||
|  | ../data/gf2filesres/izhod.csv.89 | ||||||
|  | ../data/gf2filesres/izhod.csv.90 | ||||||
|  | ../data/gf2filesres/izhod.csv.91 | ||||||
|  | ../data/gf2filesres/izhod.csv.92 | ||||||
|  | ../data/gf2filesres/izhod.csv.93 | ||||||
|  | ../data/gf2filesres/izhod.csv.94 | ||||||
|  | ../data/gf2filesres/izhod.csv.95 | ||||||
|  | ../data/gf2filesres/izhod.csv.96 | ||||||
|  | ../data/gf2filesres/izhod.csv.97 | ||||||
|  | ../data/gf2filesres/izhod.csv.98 | ||||||
| @ -1,288 +0,0 @@ | |||||||
| import re |  | ||||||
| from enum import Enum |  | ||||||
| 
 |  | ||||||
| from luscenje_struktur.codes_tagset import CODES, TAGSET, CODES_UD |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class RestrictionType(Enum): |  | ||||||
|     Morphology = 0 |  | ||||||
|     Lexis = 1 |  | ||||||
|     MatchAll = 2 |  | ||||||
|     Space = 3 |  | ||||||
|     MorphologyUD = 4 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def determine_ppb_ud(rgxs): |  | ||||||
|     if len(rgxs) != 1: |  | ||||||
|         return 0 |  | ||||||
|     rgx = rgxs[0] |  | ||||||
|     if rgx in ("ADJ", "NOUN", "ADV"): |  | ||||||
|         return 0 |  | ||||||
|     elif rgx == "AUX": |  | ||||||
|         return 3 |  | ||||||
|     elif rgx == "VERB": |  | ||||||
|         return 2 |  | ||||||
|     else: |  | ||||||
|         return 4 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def determine_ppb(rgxs): |  | ||||||
|     if len(rgxs) != 1: |  | ||||||
|         return 0 |  | ||||||
|     rgx = rgxs[0] |  | ||||||
|     if rgx[0] in ("A", "N", "R"): |  | ||||||
|         return 0 |  | ||||||
|     elif rgx[0] == "V": |  | ||||||
|         if len(rgx) == 1: |  | ||||||
|             return 2 |  | ||||||
|         elif 'a' in rgx[1]: |  | ||||||
|             return 3 |  | ||||||
|         elif 'm' in rgx[1]: |  | ||||||
|             return 1 |  | ||||||
|         else: |  | ||||||
|             return 2 |  | ||||||
|     else: |  | ||||||
|         return 4 |  | ||||||
| 
 |  | ||||||
| class MorphologyRegex: |  | ||||||
|     def __init__(self, restriction): |  | ||||||
|         # self.min_msd_length = 1 |  | ||||||
| 
 |  | ||||||
|         restr_dict = {} |  | ||||||
|         for feature in restriction: |  | ||||||
|             feature_dict = dict(feature.items()) |  | ||||||
| 
 |  | ||||||
|             match_type = True |  | ||||||
|             if "filter" in feature_dict: |  | ||||||
|                 assert feature_dict['filter'] == "negative" |  | ||||||
|                 match_type = False |  | ||||||
|                 del feature_dict['filter'] |  | ||||||
| 
 |  | ||||||
|             assert len(feature_dict) == 1 |  | ||||||
|             key, value = next(iter(feature_dict.items())) |  | ||||||
|             restr_dict[key] = (value, match_type) |  | ||||||
| 
 |  | ||||||
|         assert 'POS' in restr_dict |  | ||||||
| 
 |  | ||||||
|         # handle multiple word types |  | ||||||
|         if '|' in restr_dict['POS'][0]: |  | ||||||
|             categories = restr_dict['POS'][0].split('|') |  | ||||||
|         else: |  | ||||||
|             categories = [restr_dict['POS'][0]] |  | ||||||
| 
 |  | ||||||
|         self.rgxs = [] |  | ||||||
|         self.re_objects = [] |  | ||||||
|         self.min_msd_lengths = [] |  | ||||||
| 
 |  | ||||||
|         del restr_dict['POS'] |  | ||||||
| 
 |  | ||||||
|         for category in categories: |  | ||||||
|             min_msd_length = 1 |  | ||||||
|             category = category.capitalize() |  | ||||||
|             cat_code = CODES[category] |  | ||||||
|             rgx = [cat_code] + ['.'] * 10 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|             for attribute, (value, typ) in restr_dict.items(): |  | ||||||
|                 if attribute.lower() not in TAGSET[cat_code]: |  | ||||||
|                     continue |  | ||||||
|                 index = TAGSET[cat_code].index(attribute.lower()) |  | ||||||
|                 assert index >= 0 |  | ||||||
| 
 |  | ||||||
|                 if '|' in value: |  | ||||||
|                     match = "".join(CODES[val] for val in value.split('|')) |  | ||||||
|                 else: |  | ||||||
|                     match = CODES[value] |  | ||||||
| 
 |  | ||||||
|                 match = "[{}{}]".format("" if typ else "^", match) |  | ||||||
|                 rgx[index + 1] = match |  | ||||||
| 
 |  | ||||||
|                 if typ: |  | ||||||
|                     min_msd_length = max(index + 1, min_msd_length) |  | ||||||
| 
 |  | ||||||
|             # strip rgx |  | ||||||
|             for i in reversed(range(len(rgx))): |  | ||||||
|                 if rgx[i] == '.': |  | ||||||
|                     rgx = rgx[:-1] |  | ||||||
|                 else: |  | ||||||
|                     break |  | ||||||
| 
 |  | ||||||
|             self.re_objects.append([re.compile(r) for r in rgx]) |  | ||||||
|             self.rgxs.append(rgx) |  | ||||||
|             self.min_msd_lengths.append(min_msd_length) |  | ||||||
|      |  | ||||||
|     def __call__(self, text): |  | ||||||
|         for i, re_object in enumerate(self.re_objects): |  | ||||||
|             if len(text) < self.min_msd_lengths[i]: |  | ||||||
|                 continue |  | ||||||
|             match = True |  | ||||||
| 
 |  | ||||||
|             for c, r in zip(text, re_object): |  | ||||||
|                 if not r.match(c): |  | ||||||
|                     match = False |  | ||||||
|                     break |  | ||||||
|             if match: |  | ||||||
|                 return True |  | ||||||
|         return False |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class MorphologyUDRegex: |  | ||||||
|     def __init__(self, restriction): |  | ||||||
|         # self.min_msd_length = 1 |  | ||||||
| 
 |  | ||||||
|         restr_dict = {} |  | ||||||
|         for feature in restriction: |  | ||||||
|             feature_dict = dict(feature.items()) |  | ||||||
| 
 |  | ||||||
|             match_type = True |  | ||||||
|             # if "filter" in feature_dict: |  | ||||||
|             #     assert feature_dict['filter'] == "negative" |  | ||||||
|             #     match_type = False |  | ||||||
|             #     del feature_dict['filter'] |  | ||||||
| 
 |  | ||||||
|             assert len(feature_dict) == 1 |  | ||||||
|             key, value = next(iter(feature_dict.items())) |  | ||||||
|             restr_dict[key] = (value, match_type) |  | ||||||
| 
 |  | ||||||
|         assert 'POS' in restr_dict |  | ||||||
| 
 |  | ||||||
|         # handle multiple word types |  | ||||||
|         if '|' in restr_dict['POS'][0]: |  | ||||||
|             categories = restr_dict['POS'][0].split('|') |  | ||||||
|         else: |  | ||||||
|             categories = [restr_dict['POS'][0]] |  | ||||||
| 
 |  | ||||||
|         self.rgxs = [] |  | ||||||
|         self.re_objects = [] |  | ||||||
|         self.min_msd_lengths = [] |  | ||||||
| 
 |  | ||||||
|         del restr_dict['POS'] |  | ||||||
| 
 |  | ||||||
|         for category in categories: |  | ||||||
|             min_msd_length = 1 |  | ||||||
|             category = category.upper() |  | ||||||
|             assert category in CODES_UD |  | ||||||
|             cat_code = category |  | ||||||
|             rgx = category |  | ||||||
| 
 |  | ||||||
|             # for attribute, (value, typ) in restr_dict.items(): |  | ||||||
|             #     if attribute.lower() not in TAGSET[cat_code]: |  | ||||||
|             #         continue |  | ||||||
|             #     index = TAGSET[cat_code].index(attribute.lower()) |  | ||||||
|             #     assert index >= 0 |  | ||||||
|             # |  | ||||||
|             #     if '|' in value: |  | ||||||
|             #         match = "".join(CODES[val] for val in value.split('|')) |  | ||||||
|             #     else: |  | ||||||
|             #         match = CODES[value] |  | ||||||
|             # |  | ||||||
|             #     match = "[{}{}]".format("" if typ else "^", match) |  | ||||||
|             #     rgx[index + 1] = match |  | ||||||
|             # |  | ||||||
|             #     if typ: |  | ||||||
|             #         min_msd_length = max(index + 1, min_msd_length) |  | ||||||
| 
 |  | ||||||
|             # strip rgx |  | ||||||
|             # for i in reversed(range(len(rgx))): |  | ||||||
|             #     if rgx[i] == '.': |  | ||||||
|             #         rgx = rgx[:-1] |  | ||||||
|             #     else: |  | ||||||
|             #         break |  | ||||||
| 
 |  | ||||||
|             # self.re_objects.append([re.compile(r) for r in rgx]) |  | ||||||
|             self.rgxs.append(rgx) |  | ||||||
|             self.min_msd_lengths.append(min_msd_length) |  | ||||||
| 
 |  | ||||||
|     def __call__(self, text): |  | ||||||
|         assert len(self.rgxs) == 1 |  | ||||||
|         return self.rgxs[0] == text |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class LexisRegex: |  | ||||||
|     def __init__(self, restriction): |  | ||||||
|         restr_dict = {} |  | ||||||
|         for feature in restriction: |  | ||||||
|             restr_dict.update(feature.items()) |  | ||||||
| 
 |  | ||||||
|         assert "lemma" in restr_dict |  | ||||||
|         self.match_list = restr_dict['lemma'].split('|') |  | ||||||
|      |  | ||||||
|     def __call__(self, text): |  | ||||||
|         return text in self.match_list |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class SpaceRegex: |  | ||||||
|     def __init__(self, restriction): |  | ||||||
|         restr_dict = {} |  | ||||||
|         for feature in restriction: |  | ||||||
|             restr_dict.update(feature.items()) |  | ||||||
| 
 |  | ||||||
|         assert "contact" in restr_dict |  | ||||||
|         self.space = restr_dict['contact'].split('|') |  | ||||||
|         for el in self.space: |  | ||||||
|             if el not in ['both', 'right', 'left', 'neither']: |  | ||||||
|                 raise Exception('Value of space restriction is not supported (it may be both, left, right or neither).') |  | ||||||
| 
 |  | ||||||
|     def __call__(self, word): |  | ||||||
|         match = False |  | ||||||
|         if 'neither' in self.space: |  | ||||||
|             match = match or (word.previous_glue != '' and word.glue != '') |  | ||||||
|         if 'left' in self.space: |  | ||||||
|             match = match or (word.previous_glue == '' and word.glue != '') |  | ||||||
|         if 'right' in self.space: |  | ||||||
|             match = match or (word.previous_glue != '' and word.glue == '') |  | ||||||
|         if 'both' in self.space: |  | ||||||
|             match = match or (word.previous_glue == '' and word.glue == '') |  | ||||||
| 
 |  | ||||||
|         return match |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class Restriction: |  | ||||||
|     def __init__(self, restriction_tag, system_type='JOS'): |  | ||||||
|         self.ppb = 4 # polnopomenska beseda (0-4) |  | ||||||
| 
 |  | ||||||
|         if restriction_tag is None: |  | ||||||
|             self.type = RestrictionType.MatchAll |  | ||||||
|             self.matcher = None |  | ||||||
|             self.present = None |  | ||||||
|             return |  | ||||||
| 
 |  | ||||||
|         restriction_type = restriction_tag.get('type') |  | ||||||
|         if restriction_type == "morphology": |  | ||||||
|             if system_type == 'JOS': |  | ||||||
|                 self.type = RestrictionType.Morphology |  | ||||||
|                 self.matcher = MorphologyRegex(list(restriction_tag)) |  | ||||||
|                 self.ppb = determine_ppb(self.matcher.rgxs) |  | ||||||
|             # UD system is handled based on deprel |  | ||||||
|             elif system_type == 'UD': |  | ||||||
|                 self.type = RestrictionType.MorphologyUD |  | ||||||
|                 self.matcher = MorphologyUDRegex(list(restriction_tag)) |  | ||||||
|             #     self.ppb = determine_ppb_ud(self.matcher.rgxs) |  | ||||||
| 
 |  | ||||||
|         elif restriction_type == "lexis": |  | ||||||
|             self.type = RestrictionType.Lexis |  | ||||||
|             self.matcher = LexisRegex(list(restriction_tag)) |  | ||||||
| 
 |  | ||||||
|         elif restriction_type == "space": |  | ||||||
|             self.type = RestrictionType.Space |  | ||||||
|             self.matcher = SpaceRegex(list(restriction_tag)) |  | ||||||
|         else: |  | ||||||
|             raise NotImplementedError() |  | ||||||
| 
 |  | ||||||
|     def match(self, word): |  | ||||||
|         if self.type == RestrictionType.Morphology or self.type == RestrictionType.MorphologyUD: |  | ||||||
|             match_to = word.msd |  | ||||||
|         elif self.type == RestrictionType.Lexis: |  | ||||||
|             match_to = word.lemma |  | ||||||
|         elif self.type == RestrictionType.MatchAll: |  | ||||||
|             return True |  | ||||||
|         elif self.type == RestrictionType.Space: |  | ||||||
|             match_to = word |  | ||||||
|         else: |  | ||||||
|             raise RuntimeError("Unreachable!") |  | ||||||
| 
 |  | ||||||
|         return self.matcher(match_to) |  | ||||||
| 
 |  | ||||||
| @ -1,24 +0,0 @@ | |||||||
| from luscenje_struktur.restriction import Restriction |  | ||||||
| 
 |  | ||||||
| class RestrictionGroup: |  | ||||||
|     def __init__(self, restrictions_tag, system_type, group_type='and'): |  | ||||||
|         self.restrictions = [Restriction(el, system_type) for el in restrictions_tag] |  | ||||||
|         self.group_type = group_type |  | ||||||
| 
 |  | ||||||
|     def __iter__(self): |  | ||||||
|         for restriction in self.restrictions: |  | ||||||
|             yield restriction |  | ||||||
| 
 |  | ||||||
|     def match(self, word): |  | ||||||
|         if self.group_type == 'or': |  | ||||||
|             for restr in self.restrictions: |  | ||||||
|                 if restr.match(word): # match either |  | ||||||
|                     return True |  | ||||||
|             return False |  | ||||||
|         elif self.group_type == 'and': |  | ||||||
|             for restr in self.restrictions: |  | ||||||
|                 if not restr.match(word): # match and |  | ||||||
|                     return False |  | ||||||
|             return True |  | ||||||
|         else: |  | ||||||
|             raise Exception("Unsupported group_type - it may only be 'and' or 'or'") |  | ||||||
| @ -1 +1 @@ | |||||||
| pypy3 wani.py data/Kolokacije_strukture_JOS-32-representation_3D_08_1.xml data/input --out data/output --sloleks_db '<sloleks db data>' --collocation_sentence_map_dest data/collocation-sentence-mapper --db /mnt/tmp/mysql-wani --multiple-output  --load-sloleks | pypy3 src/wani.py data/Kolokacije_strukture_JOS-32-representation_3D_08_1.xml data/input --out data/output --sloleks_db '<sloleks db data>' --collocation_sentence_map_dest data/collocation-sentence-mapper --db /mnt/tmp/mysql-wani --multiple-output  --load-sloleks | ||||||
|  | |||||||
| @ -1,7 +1,4 @@ | |||||||
| 
 |  | ||||||
| 
 |  | ||||||
| import argparse | import argparse | ||||||
| import csv |  | ||||||
| import logging | import logging | ||||||
| import os | import os | ||||||
| import sys | import sys | ||||||
| @ -169,8 +166,8 @@ def write_new_stats(wf, original_text, stats, file_name, word_order): | |||||||
|         wf.write(','.join(line) + '\n') |         wf.write(','.join(line) + '\n') | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| def main(args): | def main(args): | ||||||
|     if not args.ignore_recalculation: |  | ||||||
|     word_order = load_word_order(args.word_order_file) |     word_order = load_word_order(args.word_order_file) | ||||||
|     for file_name in os.listdir(args.input): |     for file_name in os.listdir(args.input): | ||||||
|         read_file_path = os.path.join(args.input, file_name) |         read_file_path = os.path.join(args.input, file_name) | ||||||
| @ -178,46 +175,13 @@ def main(args): | |||||||
|         with open(read_file_path, 'r') as rf, open(write_file_path, 'w') as wf: |         with open(read_file_path, 'r') as rf, open(write_file_path, 'w') as wf: | ||||||
|             original_text, stats = get_new_stats(rf) |             original_text, stats = get_new_stats(rf) | ||||||
|             freq_pos = original_text[0].index('Frequency') |             freq_pos = original_text[0].index('Frequency') | ||||||
|                 if args.frequency_limit > 1: |  | ||||||
|             original_text = [original_text[0]] + [l for l in original_text[1:] if int(l[freq_pos]) >= 10] |             original_text = [original_text[0]] + [l for l in original_text[1:] if int(l[freq_pos]) >= 10] | ||||||
|                 if args.sorted: |  | ||||||
|             if len(original_text) > 1: |             if len(original_text) > 1: | ||||||
|                 original_text = [original_text[0]] + sorted(original_text[1:], key=lambda x: -1 * int(x[freq_pos])) |                 original_text = [original_text[0]] + sorted(original_text[1:], key=lambda x: -1 * int(x[freq_pos])) | ||||||
|             else: |             else: | ||||||
|                 original_text = [original_text[0]] |                 original_text = [original_text[0]] | ||||||
|             write_new_stats(wf, original_text, stats, file_name, word_order) |             write_new_stats(wf, original_text, stats, file_name, word_order) | ||||||
| 
 | 
 | ||||||
|     if args.format_output: |  | ||||||
|         for file_name in os.listdir(args.output): |  | ||||||
|             read_file_path = os.path.join(args.output, file_name) |  | ||||||
|             write_file_path = os.path.join(args.formatted_output, file_name) |  | ||||||
|             with open(read_file_path, 'r', encoding="utf-8") as rf, open(write_file_path, 'w') as wf: |  | ||||||
|                 first_line = True |  | ||||||
|                 lines = [] |  | ||||||
|                 formatted_output = [] |  | ||||||
|                 for line in rf: |  | ||||||
|                     line = line[:-1].split(',') |  | ||||||
|                     if first_line: |  | ||||||
|                         # sorting |  | ||||||
|                         a = line[-17] |  | ||||||
|                         b = line[-15] |  | ||||||
|                         # post frequency |  | ||||||
|                         c = line[-6] |  | ||||||
|                         d = line[-8] |  | ||||||
|                         formatted_output.append(line[:-14] + [line[-6], line[-8]]) |  | ||||||
| 
 |  | ||||||
|                         first_line = False |  | ||||||
|                         continue |  | ||||||
|                     lines.append(line[:-14] + [line[-6], line[-8]]) |  | ||||||
| 
 |  | ||||||
|                 lines = [line for line in lines if int(line[-3]) >= 10] |  | ||||||
|                 lines = sorted(lines, key=lambda x: (-int(x[-3]), x[-5])) |  | ||||||
|                 formatted_output += lines |  | ||||||
|                 for line in formatted_output: |  | ||||||
|                     wf.write(','.join(line) + '\n') |  | ||||||
|             break |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     parser = argparse.ArgumentParser( |     parser = argparse.ArgumentParser( | ||||||
|         description='Extract structures from a parsed corpus.') |         description='Extract structures from a parsed corpus.') | ||||||
| @ -226,11 +190,6 @@ if __name__ == '__main__': | |||||||
|     parser.add_argument('output', |     parser.add_argument('output', | ||||||
|                         help='Path to folder that contains all input files.') |                         help='Path to folder that contains all input files.') | ||||||
|     parser.add_argument('--word_order_file', type=str, help='File that contains word order for DeltaP calculations.') |     parser.add_argument('--word_order_file', type=str, help='File that contains word order for DeltaP calculations.') | ||||||
|     parser.add_argument('--frequency_limit', type=int, default=1, help='File that contains word order for DeltaP calculations.') |  | ||||||
|     parser.add_argument('--sorted', action='store_true', help='File that contains word order for DeltaP calculations.') |  | ||||||
|     parser.add_argument('--format_output', action='store_true', help='Format and cut data as specified in #1808 on redmine.') |  | ||||||
|     parser.add_argument('--ignore_recalculation', action='store_true', help='Ignore recalculation.') |  | ||||||
|     parser.add_argument('--formatted_output', default=None, help='Destination of final results.') |  | ||||||
| 
 | 
 | ||||||
|     args = parser.parse_args() |     args = parser.parse_args() | ||||||
|     logging.basicConfig(stream=sys.stderr) |     logging.basicConfig(stream=sys.stderr) | ||||||
|  | |||||||
							
								
								
									
										10
									
								
								setup.py
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								setup.py
									
									
									
									
									
								
							| @ -1,10 +0,0 @@ | |||||||
| from setuptools import setup, find_packages |  | ||||||
| 
 |  | ||||||
| setup(name='luscenje_struktur_loc', |  | ||||||
|   version='0.0.1', |  | ||||||
|   description=u"Parser for collocability", |  | ||||||
|   author=u"CJVT", |  | ||||||
|   author_email='fake@mail.com', |  | ||||||
|   license='MIT', |  | ||||||
|   packages=find_packages(), |  | ||||||
| ) |  | ||||||
| @ -120,26 +120,6 @@ CODES_TRANSLATION = { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| CODES_UD = { |  | ||||||
|     "ADJ", |  | ||||||
|     "ADP", |  | ||||||
|     "PUNCT", |  | ||||||
|     "ADV", |  | ||||||
|     "AUX", |  | ||||||
|     "SYM", |  | ||||||
|     "INTJ", |  | ||||||
|     "CCONJ", |  | ||||||
|     "X", |  | ||||||
|     "NOUN", |  | ||||||
|     "DET", |  | ||||||
|     "PROPN", |  | ||||||
|     "NUM", |  | ||||||
|     "VERB", |  | ||||||
|     "PART", |  | ||||||
|     "PRON", |  | ||||||
|     "SCONJ" |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| CODES = { | CODES = { | ||||||
|     "Noun": "N", |     "Noun": "N", | ||||||
|     "Verb": "V", |     "Verb": "V", | ||||||
| @ -153,7 +133,6 @@ CODES = { | |||||||
|     "Interjection": "I", |     "Interjection": "I", | ||||||
|     "Abbreviation": "Y", |     "Abbreviation": "Y", | ||||||
|     "Residual": "X", |     "Residual": "X", | ||||||
|     "Punctuation": "Z", |  | ||||||
| 
 | 
 | ||||||
|     'common': 'c', |     'common': 'c', | ||||||
|     'proper': 'p', |     'proper': 'p', | ||||||
| @ -231,18 +210,3 @@ TAGSET = { | |||||||
|     "Y": [], |     "Y": [], | ||||||
|     "X": ['type'] |     "X": ['type'] | ||||||
| } | } | ||||||
| 
 |  | ||||||
| PPB_DEPRELS = [ |  | ||||||
|     "advmod", |  | ||||||
|     "amod", |  | ||||||
|     "compound", |  | ||||||
|     "conj", |  | ||||||
|     "fixed", |  | ||||||
|     "flat", |  | ||||||
|     "iobj", |  | ||||||
|     "nmod", |  | ||||||
|     "nsubj", |  | ||||||
|     "nummod", |  | ||||||
|     "obj", |  | ||||||
|     "obl" |  | ||||||
| ] |  | ||||||
| @ -1,10 +1,9 @@ | |||||||
| from enum import Enum | from enum import Enum | ||||||
| import logging | import logging | ||||||
| 
 | 
 | ||||||
| # from luscenje_struktur.restriction import Restriction | from restriction import Restriction | ||||||
| from luscenje_struktur.order import Order | from order import Order | ||||||
| from luscenje_struktur.representation_assigner import RepresentationAssigner | from representation_assigner import RepresentationAssigner | ||||||
| from luscenje_struktur.restriction_group import RestrictionGroup |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class ComponentStatus(Enum): | class ComponentStatus(Enum): | ||||||
| @ -20,9 +19,9 @@ class ComponentType(Enum): | |||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class Component: | class Component: | ||||||
|     def __init__(self, info, system_type): |     def __init__(self, info): | ||||||
|         idx = info['cid'] |         idx = info['cid'] | ||||||
|         name = info['label'] if 'label' in info else None |         name = info['name'] if 'name' in info else None | ||||||
|         typ = ComponentType.Core if info['type'] == "core" else ComponentType.Other |         typ = ComponentType.Core if info['type'] == "core" else ComponentType.Other | ||||||
| 
 | 
 | ||||||
|         if 'status' not in info: |         if 'status' not in info: | ||||||
| @ -39,7 +38,7 @@ class Component: | |||||||
|         self.status = status |         self.status = status | ||||||
|         self.name = name |         self.name = name | ||||||
|         self.idx = idx |         self.idx = idx | ||||||
|         self.restrictions = RestrictionGroup([None], system_type) if 'restriction' in info else [] |         self.restrictions = [] | ||||||
|         self.next_element = [] |         self.next_element = [] | ||||||
|         self.representation = [] |         self.representation = [] | ||||||
|         self.selection = {} |         self.selection = {} | ||||||
| @ -50,17 +49,15 @@ class Component: | |||||||
|     def add_next(self, next_component, link_label, order): |     def add_next(self, next_component, link_label, order): | ||||||
|         self.next_element.append((next_component, link_label, Order.new(order))) |         self.next_element.append((next_component, link_label, Order.new(order))) | ||||||
| 
 | 
 | ||||||
|     def set_restriction(self, restrictions_tags, system_type): |     def set_restriction(self, restrictions_tag): | ||||||
|         if not restrictions_tags: |         if restrictions_tag is None: | ||||||
|             self.restrictions = RestrictionGroup([None], system_type) |             self.restrictions = [Restriction(None)] | ||||||
| 
 | 
 | ||||||
|         # if first element is of type restriction all following are as well |         elif restrictions_tag.tag == "restriction": | ||||||
|         elif restrictions_tags[0].tag == "restriction": |             self.restrictions = [Restriction(restrictions_tag)] | ||||||
|             self.restrictions = RestrictionGroup(restrictions_tags, system_type) |  | ||||||
| 
 | 
 | ||||||
|         # combinations of 'and' and 'or' restrictions are currently not implemented |         elif restrictions_tag.tag == "restriction_or": | ||||||
|         elif restrictions_tags[0].tag == "restriction_or": |             self.restrictions = [Restriction(el) for el in restrictions_tag] | ||||||
|             self.restrictions = RestrictionGroup(restrictions_tags[0], system_type, group_type='or') |  | ||||||
| 
 | 
 | ||||||
|         else: |         else: | ||||||
|             raise RuntimeError("Unreachable") |             raise RuntimeError("Unreachable") | ||||||
| @ -72,19 +69,19 @@ class Component: | |||||||
|                 crend.add_feature(feature.attrib) |                 crend.add_feature(feature.attrib) | ||||||
|             self.representation.append(crend) |             self.representation.append(crend) | ||||||
| 
 | 
 | ||||||
|     def find_next(self, deps, comps, restrs, reprs, system_type): |     def find_next(self, deps, comps, restrs, reprs): | ||||||
|         to_ret = [] |         to_ret = [] | ||||||
|         for d in deps: |         for d in deps: | ||||||
|             if d[0] == self.idx: |             if d[0] == self.idx: | ||||||
|                 _, idx, dep_label, order = d |                 _, idx, dep_label, order = d | ||||||
| 
 | 
 | ||||||
|                 next_component = Component(comps[idx], system_type) |                 next_component = Component(comps[idx]) | ||||||
|                 next_component.set_restriction(restrs[idx], system_type) |                 next_component.set_restriction(restrs[idx]) | ||||||
|                 next_component.set_representation(reprs[idx]) |                 next_component.set_representation(reprs[idx]) | ||||||
|                 to_ret.append(next_component) |                 to_ret.append(next_component) | ||||||
| 
 | 
 | ||||||
|                 self.add_next(next_component, dep_label, order) |                 self.add_next(next_component, dep_label, order) | ||||||
|                 others = next_component.find_next(deps, comps, restrs, reprs, system_type) |                 others = next_component.find_next(deps, comps, restrs, reprs) | ||||||
|                 to_ret.extend(others) |                 to_ret.extend(others) | ||||||
| 
 | 
 | ||||||
|         return to_ret |         return to_ret | ||||||
| @ -107,28 +104,37 @@ class Component: | |||||||
|             if len(cmatch) == 0: |             if len(cmatch) == 0: | ||||||
|                 continue |                 continue | ||||||
| 
 | 
 | ||||||
|             # create new to_ret, to which extend all results |             # if more than one match found for particular component | ||||||
|             new_to_ret = [] |             elif len(cmatch) > 1: | ||||||
|  |                 # if more than one match in multiple components, NOPE! | ||||||
|  |                 if len(to_ret) > 1: | ||||||
|  |                     logging.warning("Strange multiple match: {}".format( | ||||||
|  |                         str([w.id for w in cmatch[0].values()]))) | ||||||
|  | 
 | ||||||
|                     for tr in to_ret: |                     for tr in to_ret: | ||||||
|                 # make sure that one word is not used twice in same to_ret |                         tr.update(cmatch[0]) | ||||||
|                 new_to_ret.extend([{**dict(tr), **m} for m in cmatch if all([m_v not in dict(tr).values() for m_v in m.values()])]) |                     continue | ||||||
|             if len(new_to_ret) == 0: | 
 | ||||||
|                 return None |                 # yeah, so we have found more than one match, => | ||||||
|             to_ret = new_to_ret |                 # more than one element in to_ret | ||||||
|             del new_to_ret |                 to_ret = [{**dict(to_ret[0]), **m} for m in cmatch] | ||||||
|  | 
 | ||||||
|  |             else: | ||||||
|  |                 for tr in to_ret: | ||||||
|  |                     tr.update(cmatch[0]) | ||||||
| 
 | 
 | ||||||
|         return to_ret |         return to_ret | ||||||
| 
 | 
 | ||||||
|     def _match_self(self, word): |     def _match_self(self, word): | ||||||
|         # matching |         # matching | ||||||
|         if self.restrictions.match(word): |         for restr in self.restrictions: | ||||||
|  |             if restr.match(word): # match either | ||||||
|                 return {self.idx: word} |                 return {self.idx: word} | ||||||
| 
 | 
 | ||||||
|     def _match_next(self, word): |     def _match_next(self, word): | ||||||
|         # matches for every component in links from this component |         # matches for every component in links from this component | ||||||
|         to_ret = [] |         to_ret = [] | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
|         # need to get all links that match |         # need to get all links that match | ||||||
|         for next, link, order in self.next_element: |         for next, link, order in self.next_element: | ||||||
|             next_links = word.get_links(link) |             next_links = word.get_links(link) | ||||||
| @ -1,8 +1,7 @@ | |||||||
| from math import log2 | from math import log2 | ||||||
| import re | import re | ||||||
| import logging |  | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.component import ComponentType | from component import ComponentType | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class Formatter: | class Formatter: | ||||||
| @ -83,7 +82,7 @@ class AllFormatter(Formatter): | |||||||
|         word = words[idx] |         word = words[idx] | ||||||
|         return [word.id, word.text, word.lemma, word.msd] |         return [word.id, word.text, word.lemma, word.msd] | ||||||
|      |      | ||||||
|     def content_right(self, _freq, variable_word_order=None): |     def content_right(self, _freq): | ||||||
|         return [] |         return [] | ||||||
|      |      | ||||||
|     def group(self): |     def group(self): | ||||||
| @ -166,9 +165,9 @@ class StatsFormatter(Formatter): | |||||||
|                 new_key = (sidx, idx, '') |                 new_key = (sidx, idx, '') | ||||||
|             if new_key in self.colocation_ids.dispersions: |             if new_key in self.colocation_ids.dispersions: | ||||||
|                 key = new_key |                 key = new_key | ||||||
|                 logging.info('Dispersions fixed.') |                 print('Dispersions fixed.') | ||||||
|             else: |             else: | ||||||
|                 logging.info('Dispersions not fixed.') |                 print('Dispersions not fixed.') | ||||||
|         if key in self.colocation_ids.dispersions: |         if key in self.colocation_ids.dispersions: | ||||||
|             distribution = self.colocation_ids.dispersions[key] |             distribution = self.colocation_ids.dispersions[key] | ||||||
|         else: |         else: | ||||||
| @ -1,4 +1,4 @@ | |||||||
| from luscenje_struktur.restriction import MorphologyRegex | from restriction import MorphologyRegex | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def get_lemma_features(et): | def get_lemma_features(et): | ||||||
| @ -8,7 +8,7 @@ def get_lemma_features(et): | |||||||
| 
 | 
 | ||||||
|     result = {} |     result = {} | ||||||
|     for pos in lf.iter('POS'): |     for pos in lf.iter('POS'): | ||||||
|         rgx_list = MorphologyRegex(pos).rgxs[0] |         rgx_list = MorphologyRegex(pos).rgx | ||||||
|         rgx_str = "" |         rgx_str = "" | ||||||
|         for position in rgx_list: |         for position in rgx_list: | ||||||
|             if position == ".": |             if position == ".": | ||||||
| @ -5,10 +5,9 @@ import re | |||||||
| import sys | import sys | ||||||
| import gzip | import gzip | ||||||
| import pathlib | import pathlib | ||||||
| from io import StringIO |  | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.progress_bar import progress | from progress_bar import progress | ||||||
| from luscenje_struktur.word import Word | from word import Word | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def is_root_id(id_): | def is_root_id(id_): | ||||||
| @ -23,26 +22,22 @@ def load_files(args, database, w_collection=None, input_corpus=None): | |||||||
|     if len(filenames) == 1 and os.path.isdir(filenames[0]): |     if len(filenames) == 1 and os.path.isdir(filenames[0]): | ||||||
|         filenames = [os.path.join(filenames[0], file) for file in os.listdir(filenames[0]) if file[-5:] != '.zstd'] |         filenames = [os.path.join(filenames[0], file) for file in os.listdir(filenames[0]) if file[-5:] != '.zstd'] | ||||||
| 
 | 
 | ||||||
|     if len(filenames) > 1: |  | ||||||
|         filenames = [filename for filename in filenames if filename[-5:] != '.zstd'] |  | ||||||
|         filenames = sorted(filenames, key=lambda x: int(x.split('.')[-1])) |  | ||||||
| 
 |  | ||||||
|     database.init("CREATE TABLE Files ( filename varchar(2048) )") |     database.init("CREATE TABLE Files ( filename varchar(2048) )") | ||||||
| 
 | 
 | ||||||
|     for idx, fname in enumerate(filenames): |     for idx, fname in enumerate(filenames): | ||||||
|         logging.info("FILE " + fname + "{}/{}".format(idx, len(filenames))) |         print("FILE ", fname, "{}/{}".format(idx, len(filenames))) | ||||||
|         extension = pathlib.Path(fname).suffix |         extension = pathlib.Path(fname).suffix | ||||||
| 
 | 
 | ||||||
|         # check if file with the same name already loaded... |         # check if file with the same name already loaded... | ||||||
|         loaded = database.execute("SELECT * FROM Files WHERE filename=?", (fname,)).fetchone() |         loaded = database.execute("SELECT * FROM Files WHERE filename=?", (fname,)).fetchone() | ||||||
|         if loaded is not None: |         if loaded is not None: | ||||||
|             logging.info("ALREADY LOADED") |             print("ALREADY LOADED") | ||||||
|             continue |             continue | ||||||
| 
 | 
 | ||||||
|         if extension == ".xml": |         if extension == ".xml": | ||||||
|             et = load_xml(fname) |             et = load_xml(fname) | ||||||
|             if input_corpus is None: |             if input_corpus is None: | ||||||
|                 yield file_sentence_generator(et, args) |                 yield file_sentence_generator(et, skip_id_check, do_msd_translate, args.pc_tag) | ||||||
|             else: |             else: | ||||||
|                 sentence_generator = file_sentence_generator_valency(et, skip_id_check, do_msd_translate, args.pc_tag, w_collection) |                 sentence_generator = file_sentence_generator_valency(et, skip_id_check, do_msd_translate, args.pc_tag, w_collection) | ||||||
|                 for sent_id, sentence, othr_attributes in sentence_generator: |                 for sent_id, sentence, othr_attributes in sentence_generator: | ||||||
| @ -54,11 +49,6 @@ def load_files(args, database, w_collection=None, input_corpus=None): | |||||||
|                 sentences = load_csv_valency(fname, True, w_collection) |                 sentences = load_csv_valency(fname, True, w_collection) | ||||||
|                 for sentence in sentences: |                 for sentence in sentences: | ||||||
|                     yield sentence |                     yield sentence | ||||||
|         elif extension == ".conllu": |  | ||||||
|             if input_corpus is None: |  | ||||||
|                 yield load_conllu(fname) |  | ||||||
|             else: |  | ||||||
|                 raise Exception('conllu with input_corpus is not supported!') |  | ||||||
|         else: |         else: | ||||||
|             if input_corpus is None: |             if input_corpus is None: | ||||||
|                 yield load_csv(fname, False) |                 yield load_csv(fname, False) | ||||||
| @ -83,59 +73,6 @@ def lines_csv(filename): | |||||||
|             yield line |             yield line | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def load_conllu(filename): |  | ||||||
|     import conllu |  | ||||||
|     result = [] |  | ||||||
|     bad_sentence = False |  | ||||||
| 
 |  | ||||||
|     words = {} |  | ||||||
|     links = [] |  | ||||||
| 
 |  | ||||||
|     def sentence_end(bad_sentence, sent_id): |  | ||||||
|         if bad_sentence: |  | ||||||
|             return |  | ||||||
| 
 |  | ||||||
|         for lfrom, ldest, ana in links: |  | ||||||
|             if lfrom not in words or ldest not in words: |  | ||||||
|                 logging.warning("Bad link in sentence: " + sent_id) |  | ||||||
|                 continue |  | ||||||
|             words[lfrom].add_link(ana, words[ldest]) |  | ||||||
|         result.extend(words.values()) |  | ||||||
| 
 |  | ||||||
|     with open(filename, 'r') as f: |  | ||||||
|         data = f.read() |  | ||||||
|         # conlls = conllu.parse_incr(StringIO(data)) |  | ||||||
|         # for sent in conlls: |  | ||||||
|         #     try: |  | ||||||
|         #         for word in sent: |  | ||||||
|         #             full_id = "{}.{}".format(sent.metadata['sent_id'], str(word['id'])) |  | ||||||
|         #             words[str(word['id'])] = Word(word['id'], word['xpos'], full_id, word['form'], False) |  | ||||||
|         #     except: |  | ||||||
|         #         logging.error(f"Error while reading file {filename} in sentence {sent.metadata['sent_id']}. Check if required data is available!") |  | ||||||
| 
 |  | ||||||
|         conlls = conllu.parse_incr(StringIO(data)) |  | ||||||
|         # build dep parse |  | ||||||
|         for sent in conlls: |  | ||||||
|             try: |  | ||||||
|                 # adding fake word |  | ||||||
|                 words['0'] = Word('', '', '0', '', False, True) |  | ||||||
|                 for word in sent: |  | ||||||
|                     if type(word['id']) == tuple: |  | ||||||
|                         continue |  | ||||||
|                     full_id = "{}.{}".format(sent.metadata['sent_id'], str(word['id'])) |  | ||||||
|                     words[str(word['id'])] = Word(word['lemma'], word['upos'], full_id, word['form'], False) |  | ||||||
|                     links.append((str(word['head']), str(word['id']), word['deprel'])) |  | ||||||
|                 sentence_end(False, sent.metadata['sent_id']) |  | ||||||
|                 links = [] |  | ||||||
|                 words = {} |  | ||||||
|             except: |  | ||||||
|                 links = [] |  | ||||||
|                 words = {} |  | ||||||
|                 logging.error(f"Error while reading file {filename} in sentence {sent.metadata['sent_id']}. Check if required data is available!") |  | ||||||
| 
 |  | ||||||
|     return result |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def load_csv(filename, compressed): | def load_csv(filename, compressed): | ||||||
|     result = [] |     result = [] | ||||||
|     bad_sentence = False |     bad_sentence = False | ||||||
| @ -161,8 +98,6 @@ def load_csv(filename, compressed): | |||||||
|         line_split = line_fixed.split("\t") |         line_split = line_fixed.split("\t") | ||||||
| 
 | 
 | ||||||
|         if line_split[1] == "1" and len(words) > 0: |         if line_split[1] == "1" and len(words) > 0: | ||||||
|             # adding fake word |  | ||||||
|             words['0'] = Word('', '', '0', '', False, True) |  | ||||||
|             sentence_end(bad_sentence) |             sentence_end(bad_sentence) | ||||||
|             bad_sentence = False |             bad_sentence = False | ||||||
|             links = [] |             links = [] | ||||||
| @ -175,11 +110,9 @@ def load_csv(filename, compressed): | |||||||
|         full_id = "{}.{}".format(sid, wid) |         full_id = "{}.{}".format(sid, wid) | ||||||
| 
 | 
 | ||||||
|         words[wid] = Word(lemma, msd, full_id, text, True) |         words[wid] = Word(lemma, msd, full_id, text, True) | ||||||
|         # if link_src != '0': |         if link_src != '0': | ||||||
|             links.append((link_src, wid, link_type)) |             links.append((link_src, wid, link_type)) | ||||||
| 
 | 
 | ||||||
|     # adding fake word |  | ||||||
|     words['0'] = Word('', '', '0', '', False, True) |  | ||||||
|     sentence_end(bad_sentence) |     sentence_end(bad_sentence) | ||||||
|     return result |     return result | ||||||
| 
 | 
 | ||||||
| @ -248,53 +181,14 @@ def load_xml(filename): | |||||||
|     return ElementTree.XML(xmlstring) |     return ElementTree.XML(xmlstring) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def file_sentence_generator(et, args): | def file_sentence_generator(et, skip_id_check, do_msd_translate, pc_tag): | ||||||
|     skip_id_check = args.skip_id_check |  | ||||||
|     do_msd_translate = not args.no_msd_translate |  | ||||||
|     pc_tag = args.pc_tag |  | ||||||
|     use_punctuations = not args.ignore_punctuations |  | ||||||
|     previous_pc = False |  | ||||||
| 
 |  | ||||||
|     words = {} |     words = {} | ||||||
|     paragraphs = list(et.iter('p')) |     sentences = list(et.iter('s')) | ||||||
|     for paragraph in progress(paragraphs, "load-text"): |     for sentence in progress(sentences, "load-text"): | ||||||
|         previous_glue = '' |         for w in sentence.iter("w"): | ||||||
|         sentences = list(paragraph.iter('s')) |  | ||||||
|         for sentence in sentences: |  | ||||||
|             # create fake root word |  | ||||||
|             words[sentence.get('id')] = Word.fake_root_word(sentence.get('id')) |  | ||||||
|             last_word_id = None |  | ||||||
| 
 |  | ||||||
|             if args.new_tei: |  | ||||||
|                 for w in sentence.iter(): |  | ||||||
|                     if w.tag == 'w': |  | ||||||
|             words[w.get('id')] = Word.from_xml(w, do_msd_translate) |             words[w.get('id')] = Word.from_xml(w, do_msd_translate) | ||||||
|                         if use_punctuations: |         for pc in sentence.iter(pc_tag): | ||||||
|                             previous_glue = '' if 'join' in w.attrib and w.get('join') == 'right' else ' ' |             words[pc.get('id')] = Word.pc_word(pc, do_msd_translate) | ||||||
|                     elif w.tag == pc_tag: |  | ||||||
|                         words[w.get('id')] = Word.pc_word(w, do_msd_translate) |  | ||||||
|                         if use_punctuations: |  | ||||||
|                             words[w.get('id')].previous_glue = previous_glue |  | ||||||
|                             words[w.get('id')].glue = '' if 'join' in w.attrib and w.get('join') == 'right' else ' ' |  | ||||||
|                             previous_glue = '' if 'join' in w.attrib and w.get('join') == 'right' else ' ' |  | ||||||
|             else: |  | ||||||
|                 for w in sentence.iter(): |  | ||||||
|                     if w.tag == 'w': |  | ||||||
|                         words[w.get('id')] = Word.from_xml(w, do_msd_translate) |  | ||||||
|                         if use_punctuations: |  | ||||||
|                             previous_glue = '' |  | ||||||
|                             last_word_id = None |  | ||||||
|                     elif w.tag == pc_tag: |  | ||||||
|                         words[w.get('id')] = Word.pc_word(w, do_msd_translate) |  | ||||||
|                         if use_punctuations: |  | ||||||
|                             last_word_id = w.get('id') |  | ||||||
|                             words[w.get('id')].previous_glue = previous_glue |  | ||||||
|                             previous_glue = '' |  | ||||||
|                     elif use_punctuations and w.tag == 'c': |  | ||||||
|                         # always save previous glue |  | ||||||
|                         previous_glue = w.text |  | ||||||
|                         if last_word_id: |  | ||||||
|                             words[last_word_id].glue += w.text |  | ||||||
| 
 | 
 | ||||||
|         for l in sentence.iter("link"): |         for l in sentence.iter("link"): | ||||||
|             if 'dep' in l.keys(): |             if 'dep' in l.keys(): | ||||||
| @ -310,7 +204,7 @@ def file_sentence_generator(et, args): | |||||||
| 
 | 
 | ||||||
|             if lfrom in words: |             if lfrom in words: | ||||||
|                 if not skip_id_check and is_root_id(lfrom): |                 if not skip_id_check and is_root_id(lfrom): | ||||||
|                         logging.error("Id {} is not fine, you might want to try with tag --skip-id-check".format(lfrom)) |                     logging.error("NOO: {}".format(lfrom)) | ||||||
|                     sys.exit(1) |                     sys.exit(1) | ||||||
| 
 | 
 | ||||||
|                 if dest in words: |                 if dest in words: | ||||||
| @ -324,7 +218,6 @@ def file_sentence_generator(et, args): | |||||||
|                 # strange errors, just skip... |                 # strange errors, just skip... | ||||||
|                 pass |                 pass | ||||||
| 
 | 
 | ||||||
|     a = list(words.values()) |  | ||||||
|     return list(words.values()) |     return list(words.values()) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @ -1,4 +1,4 @@ | |||||||
| from luscenje_struktur.word import Word | from word import Word | ||||||
| 
 | 
 | ||||||
| class StructureMatch: | class StructureMatch: | ||||||
|     def __init__(self, match_id, structure): |     def __init__(self, match_id, structure): | ||||||
| @ -2,11 +2,10 @@ import gc | |||||||
| from collections import defaultdict | from collections import defaultdict | ||||||
| from ast import literal_eval | from ast import literal_eval | ||||||
| from time import time | from time import time | ||||||
| import logging |  | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.match import StructureMatch | from match import StructureMatch | ||||||
| from luscenje_struktur.representation_assigner import RepresentationAssigner | from representation_assigner import RepresentationAssigner | ||||||
| from luscenje_struktur.progress_bar import progress | from progress_bar import progress | ||||||
| 
 | 
 | ||||||
| class MatchStore: | class MatchStore: | ||||||
|     def __init__(self, args, db): |     def __init__(self, args, db): | ||||||
| @ -105,7 +104,7 @@ class MatchStore: | |||||||
|     def set_representations(self, word_renderer, structures, sloleks_db=None): |     def set_representations(self, word_renderer, structures, sloleks_db=None): | ||||||
|         step_name = 'representation' |         step_name = 'representation' | ||||||
|         if self.db.is_step_done(step_name): |         if self.db.is_step_done(step_name): | ||||||
|             logging.info("Representation step already done, skipping") |             print("Representation step already done, skipping") | ||||||
|             return |             return | ||||||
| 
 | 
 | ||||||
|         num_inserts = 1000 |         num_inserts = 1000 | ||||||
| @ -149,7 +148,7 @@ class MatchStore: | |||||||
|                 dispersions[(str(structure_id), component_id, lemma)] += 1 |                 dispersions[(str(structure_id), component_id, lemma)] += 1 | ||||||
|              |              | ||||||
|         self.dispersions = dict(dispersions) |         self.dispersions = dict(dispersions) | ||||||
|         logging.info("Storing dispersions...") |         print("Storing dispersions...") | ||||||
|         self.store_dispersions() |         self.store_dispersions() | ||||||
| 
 | 
 | ||||||
|         self.db.step_is_done(step_name) |         self.db.step_is_done(step_name) | ||||||
| @ -1911,4 +1911,4 @@ MSD_TRANSLATE = { | |||||||
|     "Ne": "Ne", |     "Ne": "Ne", | ||||||
|     "Nh": "Nh", |     "Nh": "Nh", | ||||||
|     "Na": "Na", |     "Na": "Na", | ||||||
|     "U": "Z"} |     "U": "N"} | ||||||
| @ -1,8 +1,7 @@ | |||||||
| 
 | 
 | ||||||
| class Postprocessor: | class Postprocessor: | ||||||
|     def __init__(self, fix_one_letter_words=True, fixed_restriction_order=False): |     def __init__(self, fix_one_letter_words=True): | ||||||
|         self.fix_one_letter_words = fix_one_letter_words |         self.fix_one_letter_words = fix_one_letter_words | ||||||
|         self.fixed_restriction_order = fixed_restriction_order |  | ||||||
| 
 | 
 | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def fix_sz(next_word): |     def fix_sz(next_word): | ||||||
| @ -29,19 +28,3 @@ class Postprocessor: | |||||||
|                     match[col_id].text = correct_letter |                     match[col_id].text = correct_letter | ||||||
|         collocation_id = [collocation_id[0]] + [tuple(line) for line in collocation_id[1:]] |         collocation_id = [collocation_id[0]] + [tuple(line) for line in collocation_id[1:]] | ||||||
|         return match, collocation_id |         return match, collocation_id | ||||||
| 
 |  | ||||||
|     def is_fixed_restriction_order(self, match): |  | ||||||
|         if not self.fixed_restriction_order: |  | ||||||
|             return True |  | ||||||
| 
 |  | ||||||
|         sorted_dict = {k: v for k, v in sorted(match.items(), key=lambda item: item[1].int_id)} |  | ||||||
|         prev_id = -1 |  | ||||||
|         for key in sorted_dict.keys(): |  | ||||||
|             if key == '#': |  | ||||||
|                 continue |  | ||||||
|             int_key = int(key) |  | ||||||
|             if prev_id > int_key: |  | ||||||
|                 return False |  | ||||||
|             prev_id = int_key |  | ||||||
| 
 |  | ||||||
|         return True |  | ||||||
| @ -1,5 +1,4 @@ | |||||||
| import time | import time | ||||||
| import logging |  | ||||||
| 
 | 
 | ||||||
| try: | try: | ||||||
|     from tqdm import tqdm |     from tqdm import tqdm | ||||||
| @ -22,10 +21,10 @@ class Progress: | |||||||
|             for n, el in enumerate(iterable): |             for n, el in enumerate(iterable): | ||||||
|                 now = time.time() |                 now = time.time() | ||||||
|                 if now - last_report > REPORT_ON: |                 if now - last_report > REPORT_ON: | ||||||
|                     logging.info("\r{}: {}/{}".format(description, n, total), end="") |                     print("\r{}: {}/{}".format(description, n, total), end="") | ||||||
|                     last_report = now |                     last_report = now | ||||||
|                 yield el |                 yield el | ||||||
|             logging.info(" -> {}".format(time.time() - start_time)) |             print(" -> {}".format(time.time() - start_time)) | ||||||
|         else: |         else: | ||||||
|             yield from tqdm(iterable, desc=description, total=total) |             yield from tqdm(iterable, desc=description, total=total) | ||||||
| 
 | 
 | ||||||
| @ -1,10 +1,10 @@ | |||||||
| import logging | import logging | ||||||
| 
 | 
 | ||||||
| from collections import Counter | from collections import Counter | ||||||
| from luscenje_struktur.codes_tagset import TAGSET, CODES | from codes_tagset import TAGSET, CODES | ||||||
| from luscenje_struktur.word import WordMsdOnly | from word import WordMsdOnly | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.word import WordDummy | from word import WordDummy | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class ComponentRepresentation: | class ComponentRepresentation: | ||||||
| @ -71,7 +71,9 @@ class WordFormAnyCR(ComponentRepresentation): | |||||||
|             agreements_matched = [agr.match(word_msd) for agr in self.agreement] |             agreements_matched = [agr.match(word_msd) for agr in self.agreement] | ||||||
| 
 | 
 | ||||||
|             # in case all agreements do not match try to get data from sloleks and change properly |             # in case all agreements do not match try to get data from sloleks and change properly | ||||||
|             if sloleks_db is not None and not all(agreements_matched): |             if not all(agreements_matched): | ||||||
|  |                 if sloleks_db is None: | ||||||
|  |                     raise Exception('sloleks_db not properly setup!') | ||||||
|                 for i, agr in enumerate(self.agreement): |                 for i, agr in enumerate(self.agreement): | ||||||
|                     if not agr.match(word_msd): |                     if not agr.match(word_msd): | ||||||
|                         msd, lemma, text = sloleks_db.get_word_form(agr.lemma, agr.msd(), agr.data, align_msd=word_msd) |                         msd, lemma, text = sloleks_db.get_word_form(agr.lemma, agr.msd(), agr.data, align_msd=word_msd) | ||||||
| @ -140,7 +142,9 @@ class WordFormMsdCR(WordFormAnyCR): | |||||||
|             super().add_word(word) |             super().add_word(word) | ||||||
| 
 | 
 | ||||||
|     def _render(self, sloleks_db=None): |     def _render(self, sloleks_db=None): | ||||||
|         if len(self.words) == 0 and sloleks_db is not None: |         if len(self.words) == 0: | ||||||
|  |             if sloleks_db is None: | ||||||
|  |                 raise Exception('sloleks_db not properly setup!') | ||||||
|             msd, lemma, text = sloleks_db.get_word_form(self.lemma, self.msd(), self.data) |             msd, lemma, text = sloleks_db.get_word_form(self.lemma, self.msd(), self.data) | ||||||
|             if msd is not None: |             if msd is not None: | ||||||
|                 self.words.append(WordDummy(msd, lemma, text)) |                 self.words.append(WordDummy(msd, lemma, text)) | ||||||
| @ -1,4 +1,4 @@ | |||||||
| from luscenje_struktur.representation import ComponentRepresentation, LemmaCR, LexisCR, WordFormAgreementCR, WordFormAnyCR, WordFormMsdCR, WordFormAllCR | from representation import ComponentRepresentation, LemmaCR, LexisCR, WordFormAgreementCR, WordFormAnyCR, WordFormMsdCR, WordFormAllCR | ||||||
| 
 | 
 | ||||||
| class RepresentationAssigner: | class RepresentationAssigner: | ||||||
|     def __init__(self): |     def __init__(self): | ||||||
| @ -27,10 +27,11 @@ class RepresentationAssigner: | |||||||
|             elif feature['selection'] == "all": |             elif feature['selection'] == "all": | ||||||
|                 self.representation_factory = WordFormAllCR |                 self.representation_factory = WordFormAllCR | ||||||
|             elif feature['selection'] == 'agreement': |             elif feature['selection'] == 'agreement': | ||||||
|  |                 assert feature['head'][:4] == 'cid_' | ||||||
|                 assert feature['msd'] is not None |                 assert feature['msd'] is not None | ||||||
|                 self.representation_factory = WordFormAgreementCR |                 self.representation_factory = WordFormAgreementCR | ||||||
|                 self.more['agreement'] = feature['msd'].split('+') |                 self.more['agreement'] = feature['msd'].split('+') | ||||||
|                 self.more['other'] = feature['head_cid'] |                 self.more['other'] = feature['head'][4:] | ||||||
|             else: |             else: | ||||||
|                 raise NotImplementedError("Representation selection: {}".format(feature)) |                 raise NotImplementedError("Representation selection: {}".format(feature)) | ||||||
| 
 | 
 | ||||||
							
								
								
									
										133
									
								
								src/restriction.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										133
									
								
								src/restriction.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,133 @@ | |||||||
|  | import re | ||||||
|  | from enum import Enum | ||||||
|  | 
 | ||||||
|  | from codes_tagset import CODES, TAGSET | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class RestrictionType(Enum): | ||||||
|  |     Morphology = 0 | ||||||
|  |     Lexis = 1 | ||||||
|  |     MatchAll = 2 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def determine_ppb(rgx): | ||||||
|  |     if rgx[0] in ("A", "N", "R"): | ||||||
|  |         return 0 | ||||||
|  |     elif rgx[0] == "V": | ||||||
|  |         if len(rgx) == 1: | ||||||
|  |             return 2 | ||||||
|  |         elif 'a' in rgx[1]: | ||||||
|  |             return 3 | ||||||
|  |         elif 'm' in rgx[1]: | ||||||
|  |             return 1 | ||||||
|  |         else: | ||||||
|  |             return 2 | ||||||
|  |     else: | ||||||
|  |         return 4 | ||||||
|  | 
 | ||||||
|  | class MorphologyRegex: | ||||||
|  |     def __init__(self, restriction): | ||||||
|  |         self.min_msd_length = 1 | ||||||
|  | 
 | ||||||
|  |         restr_dict = {} | ||||||
|  |         for feature in restriction: | ||||||
|  |             feature_dict = dict(feature.items()) | ||||||
|  | 
 | ||||||
|  |             match_type = True | ||||||
|  |             if "filter" in feature_dict: | ||||||
|  |                 assert feature_dict['filter'] == "negative" | ||||||
|  |                 match_type = False | ||||||
|  |                 del feature_dict['filter'] | ||||||
|  | 
 | ||||||
|  |             assert len(feature_dict) == 1 | ||||||
|  |             key, value = next(iter(feature_dict.items())) | ||||||
|  |             restr_dict[key] = (value, match_type) | ||||||
|  | 
 | ||||||
|  |         assert 'POS' in restr_dict | ||||||
|  |         category = restr_dict['POS'][0].capitalize() | ||||||
|  |         cat_code = CODES[category] | ||||||
|  |         rgx = [cat_code] + ['.'] * 10 | ||||||
|  | 
 | ||||||
|  |         del restr_dict['POS'] | ||||||
|  | 
 | ||||||
|  |         for attribute, (value, typ) in restr_dict.items(): | ||||||
|  |             index = TAGSET[cat_code].index(attribute.lower()) | ||||||
|  |             assert index >= 0 | ||||||
|  | 
 | ||||||
|  |             if '|' in value: | ||||||
|  |                 match = "".join(CODES[val] for val in value.split('|')) | ||||||
|  |             else: | ||||||
|  |                 match = CODES[value] | ||||||
|  | 
 | ||||||
|  |             match = "[{}{}]".format("" if typ else "^", match) | ||||||
|  |             rgx[index + 1] = match | ||||||
|  | 
 | ||||||
|  |             if typ: | ||||||
|  |                 self.min_msd_length = max(index + 1, self.min_msd_length) | ||||||
|  | 
 | ||||||
|  |         # strip rgx | ||||||
|  |         for i in reversed(range(len(rgx))): | ||||||
|  |             if rgx[i] == '.': | ||||||
|  |                 rgx = rgx[:-1] | ||||||
|  |             else: | ||||||
|  |                 break | ||||||
|  | 
 | ||||||
|  |         self.re_objects = [re.compile(r) for r in rgx] | ||||||
|  |         self.rgx = rgx | ||||||
|  |      | ||||||
|  |     def __call__(self, text): | ||||||
|  |         if len(text) <= self.min_msd_length: | ||||||
|  |             return False | ||||||
|  | 
 | ||||||
|  |         for c, r in zip(text, self.re_objects): | ||||||
|  |             if not r.match(c): | ||||||
|  |                 return False | ||||||
|  |         return True | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class LexisRegex: | ||||||
|  |     def __init__(self, restriction): | ||||||
|  |         restr_dict = {} | ||||||
|  |         for feature in restriction: | ||||||
|  |             restr_dict.update(feature.items()) | ||||||
|  | 
 | ||||||
|  |         assert "lemma" in restr_dict | ||||||
|  |         self.match_list = restr_dict['lemma'].split('|') | ||||||
|  |      | ||||||
|  |     def __call__(self, text): | ||||||
|  |         return text in self.match_list | ||||||
|  | 
 | ||||||
|  | class Restriction: | ||||||
|  |     def __init__(self, restriction_tag): | ||||||
|  |         self.ppb = 4 # polnopomenska beseda (0-4) | ||||||
|  | 
 | ||||||
|  |         if restriction_tag is None: | ||||||
|  |             self.type = RestrictionType.MatchAll | ||||||
|  |             self.matcher = None | ||||||
|  |             self.present = None | ||||||
|  |             return | ||||||
|  | 
 | ||||||
|  |         restriction_type = restriction_tag.get('type') | ||||||
|  |         if restriction_type == "morphology": | ||||||
|  |             self.type = RestrictionType.Morphology | ||||||
|  |             self.matcher = MorphologyRegex(list(restriction_tag)) | ||||||
|  |             self.ppb = determine_ppb(self.matcher.rgx) | ||||||
|  | 
 | ||||||
|  |         elif restriction_type == "lexis": | ||||||
|  |             self.type = RestrictionType.Lexis | ||||||
|  |             self.matcher = LexisRegex(list(restriction_tag)) | ||||||
|  |         else: | ||||||
|  |             raise NotImplementedError() | ||||||
|  | 
 | ||||||
|  |     def match(self, word): | ||||||
|  |         if self.type == RestrictionType.Morphology: | ||||||
|  |             match_to = word.msd | ||||||
|  |         elif self.type == RestrictionType.Lexis: | ||||||
|  |             match_to = word.lemma | ||||||
|  |         elif self.type == RestrictionType.MatchAll: | ||||||
|  |             return True | ||||||
|  |         else: | ||||||
|  |             raise RuntimeError("Unreachable!") | ||||||
|  | 
 | ||||||
|  |         return self.matcher(match_to) | ||||||
|  | 
 | ||||||
| @ -1,18 +1,18 @@ | |||||||
| import gc | import gc | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.codes_tagset import TAGSET, CODES, CODES_TRANSLATION, POSSIBLE_WORD_FORM_FEATURE_VALUES |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class SloleksDatabase: |  | ||||||
|     def __init__(self, db, load_sloleks): |  | ||||||
| from psycopg2cffi import compat | from psycopg2cffi import compat | ||||||
| compat.register() | compat.register() | ||||||
| 
 | 
 | ||||||
| from sqlalchemy.ext.declarative import declarative_base | from sqlalchemy.ext.declarative import declarative_base | ||||||
|         from sqlalchemy.orm import Session | from sqlalchemy.orm import Session, aliased | ||||||
| from sqlalchemy import create_engine | from sqlalchemy import create_engine | ||||||
| 
 | 
 | ||||||
|         global Lexeme, LexemeFeature, SyntacticStructure, StructureComponent, Feature, LexicalUnitLexeme, LexicalUnit, LexicalUnitType, Category, Sense, Measure, LexicalUnitMeasure, Corpus, Definition, WordForm, WordFormFeature, FormRepresentation, FormEncoding | from codes_tagset import TAGSET, CODES, CODES_TRANSLATION, POSSIBLE_WORD_FORM_FEATURE_VALUES | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class SloleksDatabase: | ||||||
|  |     def __init__(self, db, load_sloleks): | ||||||
|  |         global Lexeme, LexemeFeature, SyntacticStructure, StructureComponent, Feature, LexicalUnitLexeme, LexicalUnit, LexicalUnitType, Category, Sense, Measure, LexicalUnitMeasure, Corpus, Definition, WordForm, WordFormFeature, FormRepresentation | ||||||
|         [db_user, db_password, db_database, db_host] = db.split(':') |         [db_user, db_password, db_database, db_host] = db.split(':') | ||||||
| 
 | 
 | ||||||
|         engine = create_engine('postgresql://' + db_user + ':' + db_password + '@' + db_host + '/' + db_database, |         engine = create_engine('postgresql://' + db_user + ':' + db_password + '@' + db_host + '/' + db_database, | ||||||
| @ -71,25 +71,17 @@ class SloleksDatabase: | |||||||
|         class FormRepresentation(Base): |         class FormRepresentation(Base): | ||||||
|             __table__ = Base.metadata.tables['jedro_formrepresentation'] |             __table__ = Base.metadata.tables['jedro_formrepresentation'] | ||||||
| 
 | 
 | ||||||
|         class FormEncoding(Base): |  | ||||||
|             __table__ = Base.metadata.tables['jedro_formencoding'] |  | ||||||
| 
 |  | ||||||
|         self.session = Session(engine) |         self.session = Session(engine) | ||||||
| 
 | 
 | ||||||
|         self.load_sloleks = load_sloleks |         self.load_sloleks = load_sloleks | ||||||
|         if self.load_sloleks: |         if self.load_sloleks: | ||||||
|             self.init_load_sloleks() |             self.init_load_sloleks() | ||||||
| 
 | 
 | ||||||
|     # def init_load_sloleks2(self): |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     def init_load_sloleks(self): |     def init_load_sloleks(self): | ||||||
|         query_word_form_features = self.session.query(WordFormFeature.word_form_id, WordFormFeature.value) |         query_word_form_features = self.session.query(WordFormFeature.word_form_id, WordFormFeature.value) | ||||||
|         word_form_features = query_word_form_features.all() |         word_form_features = query_word_form_features.all() | ||||||
|         query_form_representations = self.session.query(FormRepresentation.word_form_id) |         query_form_representations = self.session.query(FormRepresentation.word_form_id, FormRepresentation.form) | ||||||
|         form_representations = query_form_representations.all() |         form_representations = query_form_representations.all() | ||||||
|         query_form_encoding = self.session.query(FormEncoding.form_representation_id, FormEncoding.text) |  | ||||||
|         form_encodings = query_form_encoding.all() |  | ||||||
|         query_word_forms = self.session.query(WordForm.id, WordForm.lexeme_id) |         query_word_forms = self.session.query(WordForm.id, WordForm.lexeme_id) | ||||||
|         word_forms = query_word_forms.all() |         word_forms = query_word_forms.all() | ||||||
|         query_lexemes = self.session.query(Lexeme.id, Lexeme.lemma) |         query_lexemes = self.session.query(Lexeme.id, Lexeme.lemma) | ||||||
| @ -109,10 +101,7 @@ class SloleksDatabase: | |||||||
|                 self.word_form_features[word_form_feature.word_form_id] = set() |                 self.word_form_features[word_form_feature.word_form_id] = set() | ||||||
|             self.word_form_features[word_form_feature.word_form_id].add(word_form_feature.value) |             self.word_form_features[word_form_feature.word_form_id].add(word_form_feature.value) | ||||||
| 
 | 
 | ||||||
|         form_encodings_dict = {form_encoding.form_representation_id: form_encoding.text for form_encoding |         self.form_representations = {form_representation.word_form_id: form_representation.form for form_representation | ||||||
|                                      in form_encodings} |  | ||||||
| 
 |  | ||||||
|         self.form_representations = {form_representation.word_form_id: form_encodings_dict[form_representation.word_form_id] for form_representation |  | ||||||
|                                      in form_representations} |                                      in form_representations} | ||||||
| 
 | 
 | ||||||
|         self.word_forms = {} |         self.word_forms = {} | ||||||
| @ -164,7 +153,6 @@ class SloleksDatabase: | |||||||
| 
 | 
 | ||||||
|     def get_word_form(self, lemma, msd, data, align_msd=False): |     def get_word_form(self, lemma, msd, data, align_msd=False): | ||||||
|         # modify msd as required |         # modify msd as required | ||||||
|         from sqlalchemy.orm import aliased |  | ||||||
|         msd = list(msd) |         msd = list(msd) | ||||||
|         if 'msd' in data: |         if 'msd' in data: | ||||||
|             for key, value in data['msd'].items(): |             for key, value in data['msd'].items(): | ||||||
| @ -205,14 +193,9 @@ class SloleksDatabase: | |||||||
|             return ''.join(msd), lemma, form_representations |             return ''.join(msd), lemma, form_representations | ||||||
|         else: |         else: | ||||||
|             wfs = [aliased(WordFormFeature) for _ in decypher_msd] |             wfs = [aliased(WordFormFeature) for _ in decypher_msd] | ||||||
|             # self.session.query(FormEncoding.form_representation_id, FormEncoding.text) |             query_preposition = self.session.query(FormRepresentation.form) \ | ||||||
|             query_preposition = self.session.query(FormEncoding.text) \ |  | ||||||
|                 .join(FormRepresentation, FormRepresentation.id == FormEncoding.form_representation_id) \ |  | ||||||
|                 .join(WordForm, WordForm.id == FormRepresentation.word_form_id) \ |                 .join(WordForm, WordForm.id == FormRepresentation.word_form_id) \ | ||||||
|                 .join(Lexeme, Lexeme.id == WordForm.lexeme_id) |                 .join(Lexeme, Lexeme.id == WordForm.lexeme_id) | ||||||
|             # query_preposition = self.session.query(FormRepresentation.form) \ |  | ||||||
|             #     .join(WordForm, WordForm.id == FormRepresentation.word_form_id) \ |  | ||||||
|             #     .join(Lexeme, Lexeme.id == WordForm.lexeme_id) |  | ||||||
| 
 | 
 | ||||||
|             for wf in wfs: |             for wf in wfs: | ||||||
|                 query_preposition = query_preposition.join(wf, wf.word_form_id == WordForm.id) |                 query_preposition = query_preposition.join(wf, wf.word_form_id == WordForm.id) | ||||||
| @ -2,31 +2,25 @@ from xml.etree import ElementTree | |||||||
| import logging | import logging | ||||||
| import pickle | import pickle | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.codes_tagset import PPB_DEPRELS | from component import Component, ComponentType | ||||||
| from luscenje_struktur.component import Component, ComponentType | from lemma_features import get_lemma_features | ||||||
| from luscenje_struktur.lemma_features import get_lemma_features |  | ||||||
| 
 | 
 | ||||||
| class SyntacticStructure: | class SyntacticStructure: | ||||||
|     def __init__(self): |     def __init__(self): | ||||||
|         self.id = None |         self.id = None | ||||||
|         # self.lbs = None |         self.lbs = None | ||||||
|         self.components = [] |         self.components = [] | ||||||
|         self.fake_root_included = False |  | ||||||
| 
 | 
 | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def from_xml(xml, no_stats): |     def from_xml(xml): | ||||||
|         st = SyntacticStructure() |         st = SyntacticStructure() | ||||||
|         st.id = xml.get('id') |         st.id = xml.get('id_nsss') | ||||||
|         if st.id is None: |         st.lbs = xml.get('LBS') | ||||||
|             st.id = xml.get('tempId') |  | ||||||
|         # st.lbs = xml.get('LBS') |  | ||||||
| 
 | 
 | ||||||
|         assert len(list(xml)) == 1 |         assert len(list(xml)) == 1 | ||||||
|         system = next(iter(xml)) |         system = next(iter(xml)) | ||||||
| 
 | 
 | ||||||
|         assert system.get('type') == 'JOS' or system.get('type') == 'UD' |         assert system.get('type') == 'JOS' | ||||||
|         system_type = system.get('type') |  | ||||||
| 
 |  | ||||||
|         components, dependencies, definitions = list(system) |         components, dependencies, definitions = list(system) | ||||||
| 
 | 
 | ||||||
|         deps = [(dep.get('from'), dep.get('to'), dep.get('label'), dep.get('order')) |         deps = [(dep.get('from'), dep.get('to'), dep.get('label'), dep.get('order')) | ||||||
| @ -37,50 +31,25 @@ class SyntacticStructure: | |||||||
| 
 | 
 | ||||||
|         for comp in definitions: |         for comp in definitions: | ||||||
|             n = comp.get('cid') |             n = comp.get('cid') | ||||||
|             restrs[n] = [] |             restrs[n] = None | ||||||
|             forms[n] = [] |             forms[n] = [] | ||||||
| 
 | 
 | ||||||
|             for el in comp: |             for el in comp: | ||||||
|                 if el.tag.startswith("restriction"): |                 if el.tag.startswith("restriction"): | ||||||
|                     restrs[n].append(el) |                     assert restrs[n] is None | ||||||
|  |                     restrs[n] = el | ||||||
|                 elif el.tag.startswith("representation"): |                 elif el.tag.startswith("representation"): | ||||||
|                     st.add_representation(n, el, forms) |                     st.add_representation(n, el, forms) | ||||||
|                 else: |                 else: | ||||||
|                     raise NotImplementedError("Unknown definition: {} in structure {}" |                     raise NotImplementedError("Unknown definition: {} in structure {}" | ||||||
|                                               .format(el.tag, st.id)) |                                               .format(el.tag, st.id)) | ||||||
| 
 | 
 | ||||||
|         fake_root_component = Component({'cid': '#', 'type': 'other', 'restriction': None}, system_type) |         fake_root_component = Component({'cid': '#', 'type': 'other'}) | ||||||
|         fake_root_component_children = fake_root_component.find_next(deps, comps, restrs, forms, system_type) |         st.components = fake_root_component.find_next(deps, comps, restrs, forms) | ||||||
|         # all dep with value modra point to artificial root - fake_root_component |  | ||||||
|         if any([dep[2] == 'modra' for dep in deps]): |  | ||||||
|             st.fake_root_included = True |  | ||||||
|             st.components = [fake_root_component] + fake_root_component_children |  | ||||||
|         else: |  | ||||||
|             st.components = fake_root_component_children |  | ||||||
| 
 | 
 | ||||||
|         if not no_stats: |  | ||||||
|             if system_type == 'JOS': |  | ||||||
|         st.determine_core2w() |         st.determine_core2w() | ||||||
|             elif system_type == 'UD': |  | ||||||
|                 st.determine_core2w_ud() |  | ||||||
|         return st |         return st | ||||||
| 
 | 
 | ||||||
|     def determine_core2w_ud(self): |  | ||||||
|         deprels = {} |  | ||||||
|         for c in self.components: |  | ||||||
|             for next_el in c.next_element: |  | ||||||
|                 deprels[next_el[0]] = next_el[1] |  | ||||||
|         ppb_components_num = 0 |  | ||||||
|         for c in self.components: |  | ||||||
|             if c.type != ComponentType.Core: |  | ||||||
|                 continue |  | ||||||
|             if c in deprels and deprels[c] not in PPB_DEPRELS: |  | ||||||
|                 continue |  | ||||||
|             ppb_components_num += 1 |  | ||||||
|             c.type = ComponentType.Core2w |  | ||||||
| 
 |  | ||||||
|         assert ppb_components_num == 2, RuntimeError("Cannot determine 2 'jedrna polnopomenska beseda' for", self.id) |  | ||||||
| 
 |  | ||||||
|     def determine_core2w(self): |     def determine_core2w(self): | ||||||
|         ppb_components = [] |         ppb_components = [] | ||||||
|         for c in self.components: |         for c in self.components: | ||||||
| @ -129,7 +98,6 @@ class SyntacticStructure: | |||||||
| 
 | 
 | ||||||
| def build_structures(args): | def build_structures(args): | ||||||
|     filename = args.structures |     filename = args.structures | ||||||
|     no_stats = args.out is None and args.stats is None |  | ||||||
| 
 | 
 | ||||||
|     max_num_components = -1 |     max_num_components = -1 | ||||||
|     with open(filename, 'r') as fp: |     with open(filename, 'r') as fp: | ||||||
| @ -137,15 +105,12 @@ def build_structures(args): | |||||||
| 
 | 
 | ||||||
|     structures = [] |     structures = [] | ||||||
|     for structure in et.iter('syntactic_structure'): |     for structure in et.iter('syntactic_structure'): | ||||||
|         if structure.attrib['type'] != 'collocation': |         to_append = SyntacticStructure.from_xml(structure) | ||||||
|             continue |  | ||||||
|         to_append = SyntacticStructure.from_xml(structure, no_stats) |  | ||||||
|         if to_append is None: |         if to_append is None: | ||||||
|             continue |             continue | ||||||
| 
 | 
 | ||||||
|         structures.append(to_append) |         structures.append(to_append) | ||||||
|         to_append_len = len(to_append.components) if not to_append.fake_root_included else len(to_append.components) - 1 |         max_num_components = max(max_num_components, len(to_append.components)) | ||||||
|         max_num_components = max(max_num_components, to_append_len) |  | ||||||
|      |      | ||||||
|     lemma_features = get_lemma_features(et) |     lemma_features = get_lemma_features(et) | ||||||
|     return structures, lemma_features, max_num_components |     return structures, lemma_features, max_num_components | ||||||
| @ -1,5 +1,4 @@ | |||||||
| from datetime import timedelta, datetime | from datetime import timedelta, datetime | ||||||
| import logging |  | ||||||
| 
 | 
 | ||||||
| class TimeInfo: | class TimeInfo: | ||||||
|     def __init__(self, to_go): |     def __init__(self, to_go): | ||||||
| @ -15,5 +14,5 @@ class TimeInfo: | |||||||
|         seconds = sum(self.times) / len(self.times) |         seconds = sum(self.times) / len(self.times) | ||||||
|         td = timedelta(seconds = int(seconds * self.to_go)) |         td = timedelta(seconds = int(seconds * self.to_go)) | ||||||
|         ft = datetime.now() + td |         ft = datetime.now() + td | ||||||
|         logging.info("Going to finish in {}".format(ft.strftime("%d/%m @ %H:%M"))) |         print("Going to finish in {}".format(ft.strftime("%d/%m @ %H:%M"))) | ||||||
| 
 | 
 | ||||||
| @ -10,18 +10,18 @@ import subprocess | |||||||
| import concurrent.futures | import concurrent.futures | ||||||
| import tempfile | import tempfile | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.progress_bar import progress | from progress_bar import progress | ||||||
| from luscenje_struktur.sloleks_db import SloleksDatabase | from sloleks_db import SloleksDatabase | ||||||
| from luscenje_struktur.word import Word | from word import Word | ||||||
| from luscenje_struktur.syntactic_structure import build_structures | from syntactic_structure import build_structures | ||||||
| from luscenje_struktur.match_store import MatchStore | from match_store import MatchStore | ||||||
| from luscenje_struktur.word_stats import WordStats | from word_stats import WordStats | ||||||
| from luscenje_struktur.writer import Writer | from writer import Writer | ||||||
| from luscenje_struktur.loader import load_files | from loader import load_files | ||||||
| from luscenje_struktur.database import Database | from database import Database | ||||||
| from luscenje_struktur.time_info import TimeInfo | from time_info import TimeInfo | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.postprocessor import Postprocessor | from postprocessor import Postprocessor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def match_file(words, structures, postprocessor): | def match_file(words, structures, postprocessor): | ||||||
| @ -31,8 +31,6 @@ def match_file(words, structures, postprocessor): | |||||||
|         for w in words: |         for w in words: | ||||||
|             mhere = s.match(w) |             mhere = s.match(w) | ||||||
|             for match in mhere: |             for match in mhere: | ||||||
|                 if not postprocessor.is_fixed_restriction_order(match): |  | ||||||
|                     continue |  | ||||||
|                 colocation_id = [[idx, w.lemma] for idx, w in match.items()] |                 colocation_id = [[idx, w.lemma] for idx, w in match.items()] | ||||||
|                 colocation_id = [s.id] + list(sorted(colocation_id, key=lambda x: x[0])) |                 colocation_id = [s.id] + list(sorted(colocation_id, key=lambda x: x[0])) | ||||||
|                 match, collocation_id = postprocessor.process(match, colocation_id) |                 match, collocation_id = postprocessor.process(match, colocation_id) | ||||||
| @ -50,7 +48,6 @@ def main(args): | |||||||
|     database = Database(args) |     database = Database(args) | ||||||
|     match_store = MatchStore(args, database) |     match_store = MatchStore(args, database) | ||||||
|     word_stats = WordStats(lemma_msds, database) |     word_stats = WordStats(lemma_msds, database) | ||||||
|     postprocessor = Postprocessor(fixed_restriction_order=args.fixed_restriction_order) |  | ||||||
| 
 | 
 | ||||||
|     for words in load_files(args, database): |     for words in load_files(args, database): | ||||||
|         if words is None: |         if words is None: | ||||||
| @ -58,6 +55,7 @@ def main(args): | |||||||
|             continue |             continue | ||||||
| 
 | 
 | ||||||
|         start_time = time.time() |         start_time = time.time() | ||||||
|  |         postprocessor = Postprocessor() | ||||||
|         matches = match_file(words, structures, postprocessor) |         matches = match_file(words, structures, postprocessor) | ||||||
| 
 | 
 | ||||||
|         match_store.add_matches(matches) |         match_store.add_matches(matches) | ||||||
| @ -82,12 +80,8 @@ def main(args): | |||||||
| 
 | 
 | ||||||
|     # figure out representations! |     # figure out representations! | ||||||
|     if args.out or args.out_no_stat: |     if args.out or args.out_no_stat: | ||||||
|         if args.sloleks_db is not None: |  | ||||||
|         sloleks_db = SloleksDatabase(args.sloleks_db, args.load_sloleks) |         sloleks_db = SloleksDatabase(args.sloleks_db, args.load_sloleks) | ||||||
|         else: |  | ||||||
|             sloleks_db = None |  | ||||||
|         match_store.set_representations(word_stats, structures, sloleks_db=sloleks_db) |         match_store.set_representations(word_stats, structures, sloleks_db=sloleks_db) | ||||||
|         if args.sloleks_db is not None: |  | ||||||
|         sloleks_db.close() |         sloleks_db.close() | ||||||
| 
 | 
 | ||||||
|     Writer.make_output_writer(args, max_num_components, match_store, word_stats).write_out( |     Writer.make_output_writer(args, max_num_components, match_store, word_stats).write_out( | ||||||
| @ -108,7 +102,7 @@ if __name__ == '__main__': | |||||||
|                         help='Structures definitions in xml file') |                         help='Structures definitions in xml file') | ||||||
|     parser.add_argument('input', |     parser.add_argument('input', | ||||||
|                         help='input file in (gz or xml currently). If none, then just database is loaded', nargs='*') |                         help='input file in (gz or xml currently). If none, then just database is loaded', nargs='*') | ||||||
|     parser.add_argument('--sloleks_db', type=str, default=None, help='Sloleks database credentials') |     parser.add_argument('--sloleks_db', type=str, help='Sloleks database credentials') | ||||||
|     parser.add_argument('--out', |     parser.add_argument('--out', | ||||||
|                         help='Classic output file') |                         help='Classic output file') | ||||||
|     parser.add_argument('--out-no-stat', |     parser.add_argument('--out-no-stat', | ||||||
| @ -136,7 +130,7 @@ if __name__ == '__main__': | |||||||
|                         action='store_true') |                         action='store_true') | ||||||
| 
 | 
 | ||||||
|     parser.add_argument('--load-sloleks', |     parser.add_argument('--load-sloleks', | ||||||
|                         help='Tells weather sloleks is loaded into memory at the beginning of processing or not. Should be in', |                         help='Tells weather sloleks is loaded into memory at the beginning of processing or not.', | ||||||
|                         action='store_true') |                         action='store_true') | ||||||
| 
 | 
 | ||||||
|     parser.add_argument('--sort-by', |     parser.add_argument('--sort-by', | ||||||
| @ -153,16 +147,7 @@ if __name__ == '__main__': | |||||||
| 
 | 
 | ||||||
|     parser.add_argument('--pc-tag', |     parser.add_argument('--pc-tag', | ||||||
|                         help='Tag for separators, usually pc or c', default="pc") |                         help='Tag for separators, usually pc or c', default="pc") | ||||||
|     parser.add_argument('--separator', | 
 | ||||||
|                         help='Separator in output file', default="\t") |  | ||||||
|     parser.add_argument('--ignore-punctuations', |  | ||||||
|                         help="Sort in reversed ored", action='store_true') |  | ||||||
|     parser.add_argument('--fixed-restriction-order', |  | ||||||
|                         help='If used, words have to be in the same order as components.', |  | ||||||
|                         action='store_true') |  | ||||||
|     parser.add_argument('--new-tei', |  | ||||||
|                         help='Attribute to be used, when using new version of tei. (default=False)', |  | ||||||
|                         action='store_true') |  | ||||||
|     args = parser.parse_args() |     args = parser.parse_args() | ||||||
|     logging.basicConfig(stream=sys.stderr, level=args.verbose.upper()) |     logging.basicConfig(stream=sys.stderr, level=args.verbose.upper()) | ||||||
| 
 | 
 | ||||||
| @ -1,7 +1,7 @@ | |||||||
| from collections import defaultdict | from collections import defaultdict | ||||||
| import logging | import logging | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.msd_translate import MSD_TRANSLATE | from msd_translate import MSD_TRANSLATE | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class WordCompressed: | class WordCompressed: | ||||||
| @ -32,15 +32,11 @@ class WordDummy: | |||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class Word: | class Word: | ||||||
|     def __init__(self, lemma, msd, wid, text, do_msd_translate, fake_word=False, previous_punctuation=None): |     def __init__(self, lemma, msd, wid, text, do_msd_translate): | ||||||
|         self.lemma = lemma |         self.lemma = lemma | ||||||
|         self.msd = MSD_TRANSLATE[msd] if do_msd_translate else msd |         self.msd = MSD_TRANSLATE[msd] if do_msd_translate else msd | ||||||
|         self.id = wid |         self.id = wid | ||||||
|         self.idi = None |  | ||||||
|         self.text = text |         self.text = text | ||||||
|         self.glue = '' |  | ||||||
|         self.previous_glue = '' if previous_punctuation is None else previous_punctuation |  | ||||||
|         self.fake_word = fake_word |  | ||||||
| 
 | 
 | ||||||
|         self.links = defaultdict(list) |         self.links = defaultdict(list) | ||||||
| 
 | 
 | ||||||
| @ -76,11 +72,6 @@ class Word: | |||||||
|         pc.set('msd', "N" if do_msd_translate else "U") |         pc.set('msd', "N" if do_msd_translate else "U") | ||||||
|         return Word.from_xml(pc, do_msd_translate) |         return Word.from_xml(pc, do_msd_translate) | ||||||
| 
 | 
 | ||||||
|     @staticmethod |  | ||||||
|     def fake_root_word(sentence_id): |  | ||||||
|         wid = sentence_id |  | ||||||
|         return Word('', '', wid, '', False, True) |  | ||||||
| 
 |  | ||||||
|     def add_link(self, link, to): |     def add_link(self, link, to): | ||||||
|         self.links[link].append(to) |         self.links[link].append(to) | ||||||
| 
 | 
 | ||||||
| @ -1,7 +1,7 @@ | |||||||
| from collections import defaultdict, Counter | from collections import defaultdict, Counter | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.progress_bar import progress | from progress_bar import progress | ||||||
| import logging | 
 | ||||||
| 
 | 
 | ||||||
| class WordStats: | class WordStats: | ||||||
|     def __init__(self, lemma_features, db): |     def __init__(self, lemma_features, db): | ||||||
| @ -25,8 +25,6 @@ class WordStats: | |||||||
| 
 | 
 | ||||||
|     def add_words(self, words): |     def add_words(self, words): | ||||||
|         for w in progress(words, "adding-words"): |         for w in progress(words, "adding-words"): | ||||||
|             if w.fake_word: |  | ||||||
|                 continue |  | ||||||
|             params = {'lemma': w.lemma, 'msd': w.msd, 'text': w.text} |             params = {'lemma': w.lemma, 'msd': w.msd, 'text': w.text} | ||||||
|             res = self.db.execute("""UPDATE UniqWords SET frequency=frequency + 1 |             res = self.db.execute("""UPDATE UniqWords SET frequency=frequency + 1 | ||||||
|                 WHERE lemma=:lemma AND msd=:msd AND text=:text""", params) |                 WHERE lemma=:lemma AND msd=:msd AND text=:text""", params) | ||||||
| @ -46,7 +44,7 @@ class WordStats: | |||||||
|     def generate_renders(self): |     def generate_renders(self): | ||||||
|         step_name = 'generate_renders' |         step_name = 'generate_renders' | ||||||
|         if self.db.is_step_done(step_name): |         if self.db.is_step_done(step_name): | ||||||
|             logging.info("Skipping GenerateRenders, already complete") |             print("Skipping GenerateRenders, already complete") | ||||||
|             return |             return | ||||||
| 
 | 
 | ||||||
|         lemmas = [lemma for (lemma, ) in self.db.execute("SELECT DISTINCT lemma FROM UniqWords")] |         lemmas = [lemma for (lemma, ) in self.db.execute("SELECT DISTINCT lemma FROM UniqWords")] | ||||||
| @ -1,11 +1,11 @@ | |||||||
| import logging | import logging | ||||||
| import os | import os | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.progress_bar import progress | from progress_bar import progress | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.formatter import OutFormatter, OutNoStatFormatter, AllFormatter, StatsFormatter | from formatter import OutFormatter, OutNoStatFormatter, AllFormatter, StatsFormatter | ||||||
| 
 | 
 | ||||||
| from luscenje_struktur.collocation_sentence_mapper import CollocationSentenceMapper | from collocation_sentence_mapper import CollocationSentenceMapper | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class Writer: | class Writer: | ||||||
| @ -16,23 +16,23 @@ class Writer: | |||||||
|     @staticmethod |     @staticmethod | ||||||
|     def make_output_writer(args, num_components, colocation_ids, word_renderer): |     def make_output_writer(args, num_components, colocation_ids, word_renderer): | ||||||
|         params = Writer.other_params(args) |         params = Writer.other_params(args) | ||||||
|         return Writer(args.out, num_components, OutFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, params, args.separator) |         return Writer(args.out, num_components, OutFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, params) | ||||||
| 
 | 
 | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def make_output_no_stat_writer(args, num_components, colocation_ids, word_renderer): |     def make_output_no_stat_writer(args, num_components, colocation_ids, word_renderer): | ||||||
|         params = Writer.other_params(args) |         params = Writer.other_params(args) | ||||||
|         return Writer(args.out_no_stat, num_components, OutNoStatFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, params, args.separator) |         return Writer(args.out_no_stat, num_components, OutNoStatFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, params) | ||||||
| 
 | 
 | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def make_all_writer(args, num_components, colocation_ids, word_renderer): |     def make_all_writer(args, num_components, colocation_ids, word_renderer): | ||||||
|         return Writer(args.all, num_components, AllFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, None, args.separator) |         return Writer(args.all, num_components, AllFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, None) | ||||||
| 
 | 
 | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def make_stats_writer(args, num_components, colocation_ids, word_renderer): |     def make_stats_writer(args, num_components, colocation_ids, word_renderer): | ||||||
|         params = Writer.other_params(args) |         params = Writer.other_params(args) | ||||||
|         return Writer(args.stats, num_components, StatsFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, params, args.separator) |         return Writer(args.stats, num_components, StatsFormatter(colocation_ids, word_renderer), args.collocation_sentence_map_dest, params) | ||||||
| 
 | 
 | ||||||
|     def __init__(self, file_out, num_components, formatter, collocation_sentence_map_dest, params, separator): |     def __init__(self, file_out, num_components, formatter, collocation_sentence_map_dest, params): | ||||||
|         # TODO FIX THIS |         # TODO FIX THIS | ||||||
|         self.collocation_sentence_map_dest = collocation_sentence_map_dest |         self.collocation_sentence_map_dest = collocation_sentence_map_dest | ||||||
|         if params is None: |         if params is None: | ||||||
| @ -49,7 +49,6 @@ class Writer: | |||||||
|         self.num_components = num_components |         self.num_components = num_components | ||||||
|         self.output_file = file_out |         self.output_file = file_out | ||||||
|         self.formatter = formatter |         self.formatter = formatter | ||||||
|         self.separator = separator |  | ||||||
| 
 | 
 | ||||||
|     def header(self): |     def header(self): | ||||||
|         repeating_cols = self.formatter.header_repeat() |         repeating_cols = self.formatter.header_repeat() | ||||||
| @ -79,7 +78,7 @@ class Writer: | |||||||
|         return sorted(rows, key=key, reverse=self.sort_order) |         return sorted(rows, key=key, reverse=self.sort_order) | ||||||
| 
 | 
 | ||||||
|     def write_header(self, file_handler): |     def write_header(self, file_handler): | ||||||
|         file_handler.write(self.separator.join(self.header()) + "\n") |         file_handler.write(",".join(self.header()) + "\n") | ||||||
| 
 | 
 | ||||||
|     def write_out_worker(self, file_handler, structure, colocation_ids, col_sent_map): |     def write_out_worker(self, file_handler, structure, colocation_ids, col_sent_map): | ||||||
|         rows = [] |         rows = [] | ||||||
| @ -100,16 +99,12 @@ class Writer: | |||||||
|             for words in match.matches: |             for words in match.matches: | ||||||
|                 to_write = [] |                 to_write = [] | ||||||
| 
 | 
 | ||||||
|                 idx = 1 |                 for idx, _comp in enumerate(components): | ||||||
|                 for _comp in components: |                     idx = str(idx + 1) | ||||||
|                     if _comp.idx == '#': |                     if idx not in words: | ||||||
|                         continue |  | ||||||
|                     idx_s = str(idx) |  | ||||||
|                     idx += 1 |  | ||||||
|                     if idx_s not in words: |  | ||||||
|                         to_write.extend([""] * self.formatter.length()) |                         to_write.extend([""] * self.formatter.length()) | ||||||
|                     else: |                     else: | ||||||
|                         to_write.extend(self.formatter.content_repeat(words, match.representations, idx_s, structure.id)) |                         to_write.extend(self.formatter.content_repeat(words, match.representations, idx, structure.id)) | ||||||
| 
 | 
 | ||||||
|                 # make them equal size |                 # make them equal size | ||||||
|                 to_write.extend([""] * (self.num_components * self.formatter.length() - len(to_write))) |                 to_write.extend([""] * (self.num_components * self.formatter.length() - len(to_write))) | ||||||
| @ -126,7 +121,7 @@ class Writer: | |||||||
| 
 | 
 | ||||||
|         if rows != []: |         if rows != []: | ||||||
|             rows = self.sorted_rows(rows) |             rows = self.sorted_rows(rows) | ||||||
|             file_handler.write("\n".join([self.separator.join(row) for row in rows]) + "\n") |             file_handler.write("\n".join([",".join(row) for row in rows]) + "\n") | ||||||
|             file_handler.flush() |             file_handler.flush() | ||||||
| 
 | 
 | ||||||
|     def write_out(self, structures, colocation_ids): |     def write_out(self, structures, colocation_ids): | ||||||
							
								
								
									
										133
									
								
								src/writerpy
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										133
									
								
								src/writerpy
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,133 @@ | |||||||
|  | class Writer: | ||||||
|  |     @staticmethod | ||||||
|  |     def other_params(args): | ||||||
|  |         return (args.multiple_output, int(args.sort_by), args.sort_reversed) | ||||||
|  | 
 | ||||||
|  |     @staticmethod | ||||||
|  |     def make_output_writer(args, colocation_ids, word_renderer): | ||||||
|  |         params = Writer.other_params(args) | ||||||
|  |         return Writer(args.out, OutFormatter(colocation_ids, word_renderer), params) | ||||||
|  | 
 | ||||||
|  |     @staticmethod | ||||||
|  |     def make_output_no_stat_writer(args, colocation_ids, word_renderer): | ||||||
|  |         params = Writer.other_params(args) | ||||||
|  |         return Writer(args.out_no_stat, OutNoStatFormatter(colocation_ids, word_renderer), params) | ||||||
|  | 
 | ||||||
|  |     @staticmethod | ||||||
|  |     def make_all_writer(args, colocation_ids, word_renderer): | ||||||
|  |         return Writer(args.all, AllFormatter(colocation_ids, word_renderer), None) | ||||||
|  | 
 | ||||||
|  |     @staticmethod | ||||||
|  |     def make_stats_writer(args, colocation_ids, word_renderer): | ||||||
|  |         params = Writer.other_params(args) | ||||||
|  |         return Writer(args.stats, StatsFormatter(colocation_ids, word_renderer), params) | ||||||
|  | 
 | ||||||
|  |     def __init__(self, file_out, formatter, params): | ||||||
|  |         if params is None: | ||||||
|  |             self.multiple_output = False | ||||||
|  |             self.sort_by = -1 | ||||||
|  |             self.sort_order = None | ||||||
|  |         else: | ||||||
|  |             self.multiple_output = params[0] | ||||||
|  |             self.sort_by = params[1] | ||||||
|  |             self.sort_order = params[2] | ||||||
|  | 
 | ||||||
|  |         self.output_file = file_out | ||||||
|  |         self.formatter = formatter | ||||||
|  | 
 | ||||||
|  |     def header(self): | ||||||
|  |         repeating_cols = self.formatter.header_repeat() | ||||||
|  |         cols = ["C{}_{}".format(i + 1, thd) for i in range(MAX_NUM_COMPONENTS)  | ||||||
|  |                 for thd in repeating_cols] | ||||||
|  | 
 | ||||||
|  |         cols = ["Structure_ID"] + cols + ["Colocation_ID"] | ||||||
|  |         cols += self.formatter.header_right() | ||||||
|  |         return cols | ||||||
|  | 
 | ||||||
|  |     def sorted_rows(self, rows): | ||||||
|  |         if self.sort_by < 0 or len(rows) < 2: | ||||||
|  |             return rows | ||||||
|  | 
 | ||||||
|  |         if len(rows[0]) <= self.sort_by: | ||||||
|  |             logging.warning("Cannot sort by column #{}: Not enough columns!".format(len(rows[0]))) | ||||||
|  |             return rows | ||||||
|  | 
 | ||||||
|  |         try: | ||||||
|  |             int(rows[0][self.sort_by]) | ||||||
|  |             def key(row):  | ||||||
|  |                 return int(row[self.sort_by]) | ||||||
|  |         except ValueError: | ||||||
|  |             def key(row):  | ||||||
|  |                 return row[self.sort_by].lower() | ||||||
|  | 
 | ||||||
|  |         return sorted(rows, key=key, reverse=self.sort_order) | ||||||
|  | 
 | ||||||
|  |     def write_header(self, file_handler): | ||||||
|  |         file_handler.write(", ".join(self.header()) + "\n") | ||||||
|  | 
 | ||||||
|  |     def write_out_worker(self, file_handler, structure, colocation_ids): | ||||||
|  |         rows = [] | ||||||
|  |         components = structure.components | ||||||
|  | 
 | ||||||
|  |         for match in colocation_ids.get_matches_for(structure): | ||||||
|  |             self.formatter.new_match(match) | ||||||
|  | 
 | ||||||
|  |             for words in match.matches: | ||||||
|  |                 to_write = [] | ||||||
|  | 
 | ||||||
|  |                 for idx, _comp in enumerate(components): | ||||||
|  |                     idx = str(idx + 1) | ||||||
|  |                     if idx not in words: | ||||||
|  |                         to_write.extend([""] * self.formatter.length()) | ||||||
|  |                     else: | ||||||
|  |                         to_write.extend(self.formatter.content_repeat(words, match.representations, idx, structure.id)) | ||||||
|  | 
 | ||||||
|  |                 # make them equal size | ||||||
|  |                 to_write.extend([""] * (MAX_NUM_COMPONENTS * self.formatter.length() - len(to_write))) | ||||||
|  | 
 | ||||||
|  |                 # structure_id and colocation_id | ||||||
|  |                 to_write = [structure.id] + to_write + [match.match_id] | ||||||
|  | 
 | ||||||
|  |                 # header_right | ||||||
|  |                 to_write.extend(self.formatter.content_right(len(match))) | ||||||
|  |                 rows.append(to_write) | ||||||
|  | 
 | ||||||
|  |                 if self.formatter.group(): | ||||||
|  |                     break | ||||||
|  | 
 | ||||||
|  |         if rows != []: | ||||||
|  |             rows = self.sorted_rows(rows) | ||||||
|  |             file_handler.write("\n".join([", ".join(row) for row in rows]) + "\n") | ||||||
|  |             file_handler.flush() | ||||||
|  | 
 | ||||||
|  |     def write_out(self, structures, colocation_ids): | ||||||
|  |         if self.output_file is None: | ||||||
|  |             return | ||||||
|  | 
 | ||||||
|  |         def fp_close(fp_): | ||||||
|  |             if fp_ != sys.stdout: | ||||||
|  |                 fp_.close() | ||||||
|  | 
 | ||||||
|  |         def fp_open(snum=None): | ||||||
|  |             if snum is None: | ||||||
|  |                 return open(self.output_file, "w") | ||||||
|  |             else: | ||||||
|  |                 return open("{}.{}".format(self.output_file, snum), "w") | ||||||
|  | 
 | ||||||
|  |         if not self.multiple_output: | ||||||
|  |             fp = fp_open() | ||||||
|  |             self.write_header(fp) | ||||||
|  | 
 | ||||||
|  |         for s in structures: | ||||||
|  |             if self.multiple_output: | ||||||
|  |                 fp = fp_open(s.id) | ||||||
|  |                 self.write_header(fp) | ||||||
|  | 
 | ||||||
|  |             self.formatter.set_structure(s) | ||||||
|  |             self.write_out_worker(fp, s, colocation_ids) | ||||||
|  | 
 | ||||||
|  |             if self.multiple_output: | ||||||
|  |                 fp_close(fp) | ||||||
|  | 
 | ||||||
|  |         if not self.multiple_output: | ||||||
|  |             fp_close(fp) | ||||||
		Loading…
	
		Reference in New Issue
	
	Block a user