Formatted computer + added correct forms of accentuation

This commit is contained in:
Luka 2018-08-17 10:20:55 +02:00
parent db19dade4f
commit 4ca872dc63
28 changed files with 12 additions and 4 deletions

1
.gitignore vendored Normal file → Executable file
View File

@ -98,3 +98,4 @@ grid_results/
.idea/
cnn/word_accetuation/svm/data/
data_merge.ipynb
data_merge.py

0
LICENSE Normal file → Executable file
View File

0
README.md Normal file → Executable file
View File

0
__init__.py Normal file → Executable file
View File

0
accentuate.py Normal file → Executable file
View File

0
accentuate_connected_text.py Normal file → Executable file
View File

0
hyphenation Normal file → Executable file
View File

0
learn_location_weights.py Normal file → Executable file
View File

0
notes Normal file → Executable file
View File

0
prepare_data.py Normal file → Executable file
View File

0
preprocessed_data/environment.pkl Normal file → Executable file
View File

0
requirements.txt Normal file → Executable file
View File

0
run_multiple_files.py Normal file → Executable file
View File

0
sloleks_accentuation.py Normal file → Executable file
View File

2
sloleks_accentuation2.py Normal file → Executable file
View File

@ -16,7 +16,7 @@ content = data._read_content('data/SlovarIJS_BESEDE_utf8.lex')
dictionary, max_word, max_num_vowels, vowels, accented_vowels = data._create_dict(content)
feature_dictionary = data._create_slovene_feature_dictionary()
syllable_dictionary = data._create_syllables_dictionary(content, vowels)
accented_vowels = ['ŕ', 'á', 'ä', 'é', 'ë', 'ě', 'í', 'î', 'ó', 'ô', 'ö', 'ú', 'ü']
accented_vowels = ['ŕ', 'á', 'à', 'é', 'è', 'ê', 'í', 'ì', 'ó', 'ô', 'ò', 'ú', 'ù']
data = Data('l', shuffle_all_inputs=False)
letter_location_model, syllable_location_model, syllabled_letters_location_model = data.load_location_models(

3
sloleks_accentuation2_tab2xml.py Normal file → Executable file
View File

@ -59,7 +59,8 @@ start_timer = time.time()
print('Copy initialization complete')
with open("data/new_sloleks/final_sloleks.xml", "ab") as myfile:
# myfile2 = open('data/new_sloleks/p' + str(iter_index) + '.xml', 'ab')
for event, element in etree.iterparse('data/Sloleks_v1.2.xml', tag="LexicalEntry", encoding="UTF-8", remove_blank_text=True):
for event, element in etree.iterparse('data/new_sloleks/final_sloleks_read.xml', tag="LexicalEntry", encoding="UTF-8", remove_blank_text=True):
# for event, element in etree.iterparse('data/Sloleks_v1.2.xml', tag="LexicalEntry", encoding="UTF-8", remove_blank_text=True):
# if word_glob_num >= word_limit:
# myfile2.close()
# myfile2 = open('data/new_sloleks/p' + str(iter_index) + '.xml', 'ab')

0
sloleks_accetuation.ipynb Normal file → Executable file
View File

4
sloleks_accetuation2.ipynb Normal file → Executable file
View File

@ -219,7 +219,6 @@
{
"ename": "IndexError",
"evalue": "index 10 is out of bounds for axis 0 with size 10",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
@ -228,7 +227,8 @@
"\u001b[0;32m~/Developement/accetuation/prepare_data.py\u001b[0m in \u001b[0;36mget_ensemble_location_predictions\u001b[0;34m(input_words, letter_location_model, syllable_location_model, syllabled_letters_location_model, letter_location_co_model, syllable_location_co_model, syllabled_letters_location_co_model, dictionary, max_word, max_num_vowels, vowels, accented_vowels, feature_dictionary, syllable_dictionary)\u001b[0m\n\u001b[1;32m 1465\u001b[0m \u001b[0mletter_location_co_predictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mletter_location_co_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict_generator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgenerator\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1466\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1467\u001b[0;31m \u001b[0mletter_location_co_predictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreverse_predictions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mletter_location_co_predictions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_words\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvowels\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1468\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1469\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mData\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m's'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshuffle_all_inputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconvert_multext\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreverse_inputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/Developement/accetuation/prepare_data.py\u001b[0m in \u001b[0;36mreverse_predictions\u001b[0;34m(self, predictions, words, vowels)\u001b[0m\n\u001b[1;32m 1503\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1504\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mword_len\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1505\u001b[0;31m \u001b[0mnew_predictions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mpredictions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mword_len\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0;36m1\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1506\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1507\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnew_predictions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mIndexError\u001b[0m: index 10 is out of bounds for axis 0 with size 10"
]
],
"output_type": "error"
}
],
"source": [

0
sloleks_xml_checker.py Normal file → Executable file
View File

0
test_data/accented_connected_text Normal file → Executable file
View File

0
test_data/accented_data Normal file → Executable file
View File

0
test_data/original_connected_text Normal file → Executable file
View File

0
test_data/unaccented_dictionary Normal file → Executable file
View File

0
tex_hyphenation.py Normal file → Executable file
View File

6
text2SAMPA.py Normal file → Executable file
View File

@ -86,6 +86,7 @@ def create_syllables(word, vowels):
def convert_to_SAMPA(word):
word = word.lower()
syllables = create_syllables(word, vowels)
letters_in_stressed_syllable = [False] * len(word)
# print(syllables)
@ -152,6 +153,11 @@ def convert_to_SAMPA(word):
word = list(''.join(word))
test_word = ''.join(word)
test_word = test_word.replace('"', '').replace(':', '')
if len(test_word) <= 1:
return ''.join(word)
previous_letter_i = -1
letter_i = 0
next_letter_i = 1

0
workbench.py Normal file → Executable file
View File

0
workbench.sh Normal file → Executable file
View File

0
workbench.xrsl Normal file → Executable file
View File