-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathjstringstxttranslate.py
More file actions
executable file
·282 lines (226 loc) · 9.71 KB
/
jstringstxttranslate.py
File metadata and controls
executable file
·282 lines (226 loc) · 9.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
#!/bin/sh
'''exec' "$HOME/.local/share/pipx/venvs/ctranslate2/bin/python" "$0" "$@"
'''
# prereq.: pipx install ctranslate2
import sys
import os
#def translate_with_argos(line):
# # https://argos-translate.readthedocs.io/en/latest/source/examples.html
# import argostranslate
# import argostranslate.package
# import argostranslate.translate
# from_code = "ja"
# to_code = "en"
# r = argostranslate.translate.translate(line, from_code, to_code)
# if len(r)<=1:
# # use gtranslate via https://github.com/soimort/translate-shell
# shell_cmd = "trans -b -from ja -to en"
# import subprocess
# r = subprocess.check_output("trans -b -from ja -to en \"" + line + "\"", shell=True, text=True).strip()
# return r
def translate_with_sugoi(line):
# https://huggingface.co/entai2965/sugoi-v4-ja-en-ctranslate2
import ctranslate2
import sentencepiece
#set defaults
model_path='sugoi-v4-ja-en-ctranslate2'
#model_path=os.path.expanduser('~/.cache/huggingface/hub/models--entai2965--sugoi-v4-ja-en-ctranslate2/snapshots/71d67eb8e73ec2f5aaefc0689e03a4eb843d3a2b')
sentencepiece_model_path=model_path+'/spm'
device='cpu'
#device='cuda'
#load data
#string1='は静かに前へと歩み出た。'
#string2='悲しいGPTと話したことがありますか?'
#raw_list=[string1,string2]
raw_list=[line]
#load models
translator = ctranslate2.Translator(model_path, device=device)
tokenizer_for_source_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.ja.nopretok.model')
tokenizer_for_target_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.en.nopretok.model')
#tokenize batch
tokenized_batch=[]
for text in raw_list:
tokenized_batch.append(tokenizer_for_source_language.encode(text,out_type=str))
#translate
#https://opennmt.net/CTranslate2/python/ctranslate2.Translator.html?#ctranslate2.Translator.translate_batch
#translated_batch=translator.translate_batch(source=tokenized_batch,beam_size=1) #faster https://github.com/OpenNMT/CTranslate2/blob/master/docs/decoding.md#greedy-search
translated_batch=translator.translate_batch(source=tokenized_batch,beam_size=5) #disable_unk=True
assert(len(raw_list)==len(translated_batch))
#decode
for count,tokens in enumerate(translated_batch):
translated_batch[count]=tokenizer_for_target_language.decode(tokens.hypotheses[0]).replace('<unk>','')
#output
for text in translated_batch:
#print(text)
return text
def translate_with_sugoi_bulk(raw_list):
# https://huggingface.co/entai2965/sugoi-v4-ja-en-ctranslate2
import ctranslate2
import sentencepiece
#set defaults
model_path='sugoi-v4-ja-en-ctranslate2'
#model_path=os.path.expanduser('~/.cache/huggingface/hub/models--entai2965--sugoi-v4-ja-en-ctranslate2/snapshots/71d67eb8e73ec2f5aaefc0689e03a4eb843d3a2b')
sentencepiece_model_path=model_path+'/spm'
device='cpu'
#device='cuda'
#load models
translator = ctranslate2.Translator(model_path, device=device)
tokenizer_for_source_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.ja.nopretok.model')
tokenizer_for_target_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.en.nopretok.model')
#tokenize batch
tokenized_batch=[]
for text in raw_list:
tokenized_batch.append(tokenizer_for_source_language.encode(text,out_type=str))
#translate
#https://opennmt.net/CTranslate2/python/ctranslate2.Translator.html?#ctranslate2.Translator.translate_batch
#translated_batch=translator.translate_batch(source=tokenized_batch,beam_size=1) #faster https://github.com/OpenNMT/CTranslate2/blob/master/docs/decoding.md#greedy-search
translated_batch=translator.translate_batch(source=tokenized_batch,beam_size=5) #disable_unk=True
assert(len(raw_list)==len(translated_batch))
#decode
for count,tokens in enumerate(translated_batch):
translated_batch[count]=tokenizer_for_target_language.decode(tokens.hypotheses[0]).replace('<unk>','')
return translated_batch
def translate_with_sugoi_bulk_shortest(raw_list):
import ctranslate2
import sentencepiece
model_path=os.path.expanduser('~/.cache/huggingface/hub/models--entai2965--sugoi-v4-ja-en-ctranslate2/snapshots/71d67eb8e73ec2f5aaefc0689e03a4eb843d3a2b')
sentencepiece_model_path=model_path+'/spm'
device='cpu'
translator = ctranslate2.Translator(model_path, device=device)
tokenizer_for_source_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.ja.nopretok.model')
tokenizer_for_target_language = sentencepiece.SentencePieceProcessor(sentencepiece_model_path+'/spm.en.nopretok.model')
# Tokenize
tokenized_batch = [tokenizer_for_source_language.encode(text, out_type=str) for text in raw_list]
# Translate
# num_hypotheses determines how many candidates are returned per source sentence
# beam_size must be >= num_hypotheses
results = translator.translate_batch(
source=tokenized_batch,
beam_size=5,
num_hypotheses=5
)
final_translations = []
# Decode and select shortest
for result in results:
# 1. Decode all candidates for this specific sentence
candidates = [tokenizer_for_target_language.decode(h).replace('<unk>', '') for h in result.hypotheses]
# 2. Pick the candidate with the minimum length (total characters)
shortest = min(candidates, key=len)
final_translations.append(shortest)
# TODO: remove "⁇"
return final_translations
def has_no_kanas(text):
"""
Returns True if the string contains NO Hiragana or Katakana.
"""
for char in text:
cp = ord(char)
# Hiragana: 0x3040 - 0x309F
# Katakana: 0x30A0 - 0x30FF
if 0x3040 <= cp <= 0x30FF or cp == 0x3000:
return False # Found a kana or space
return True
def is_sjis_single_byte(char):
"""
Checks if a character occupies exactly 1 byte in Shift-JIS (cp932).
This correctly identifies '。' (65377) as a single-byte character.
"""
try:
return len(char.encode('cp932')) == 1
except UnicodeEncodeError:
return False
def output_translated_file(input_file_str, output_file):
curr_lines_list = []
curr_addr_list = []
BULK_SIZE = 100
#BULK_SIZE = 1000 # takes too much RAM
lines = input_file_str.splitlines()
for i, line in enumerate(lines):
if line.strip() == "" or line.startswith("#") or line.startswith(";"):
# keep empty lines and comments
output_file.write(line)
continue
try:
address, text = line.split(" ", 1)
addr_int = int(address, 16)
except:
# invalid address
print("bad line:" + line)
continue
# Rule: If first char is single-byte ASCII, increment address and skip that char
#if len(text) > 0 and ord(text[0]) < 255:
# addr_int += 1
# text = text[1:]
# # 2nd formatting char
# if len(text) > 0 and ord(text[0]) < 255:
# addr_int += 1
# text = text[1:]
# # 3rd formatting char
# if len(text) > 0 and ord(text[0]) < 255:
# addr_int += 1
# text = text[1:]
# # 4t formatting char
# if len(text) > 0 and ord(text[0]) < 255:
# addr_int += 1
# text = text[1:]
# #address = hex(addr_int) # Convert back to hex string
# address = f"0x{addr_int:08x}"
# end if
# split in chunks separated by ASCII formatting tags
current_addr = addr_int
temp_chunks = []
current_chunk = ""
chunk_start_addr = addr_int
for char in text:
is_single = is_sjis_single_byte(char)
if is_single:
if current_chunk: # not-emtpy
# Save completed double-byte chunk
temp_chunks.append((f"0x{chunk_start_addr:08x}", current_chunk))
current_chunk = ""
current_addr += 1 # Move address pointer 1 byte
else:
if not current_chunk:
chunk_start_addr = current_addr # Mark start of new double-byte chunk
current_chunk += char
current_addr += 2 # Move address pointer 2 bytes
if current_chunk:
temp_chunks.append((f"0x{chunk_start_addr:08x}", current_chunk))
if (len(curr_lines_list) < BULK_SIZE) and (i != len(lines) - 2): # bulk list is full or last line
curr_lines_list.append(text)
curr_addr_list.append(address)
else:
#translated_text = translate_with_argos(text)
#translated_text = translate_with_sugoi(text)
#translated_text_lines = translate_with_sugoi_bulk(curr_lines_list) # normal translation
translated_text_lines = translate_with_sugoi_bulk_shortest(curr_lines_list)
for i, translated_line in enumerate(translated_text_lines):
output_file.write("; " + curr_addr_list[i] + " " + curr_lines_list[i] + "\n") # original line commented
output_file.write(curr_addr_list[i] + " " + translated_line + "\n") # translated line
output_file.write("\n") # empty line separator
# endfor, empty buffers
curr_lines_list.clear()
curr_addr_list.clear()
# end if
# end for
output_file.close()
# end of output_translated_file()
if __name__ == "__main__":
import argparse, sys
parser = argparse.ArgumentParser(description='translate jstrings txt dumps')
parser.add_argument('infile', nargs='?', default="-", help="input file, defaults to stdin if unspecified. Supports passing urls.")
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help="output file, defaults to stdout if unspecified")
args = parser.parse_args()
if args.infile == "-":
infile = sys.stdin
sys.stderr.write("reading from stdin...\n")
elif args.infile.startswith(("http://", "ftp://", "https://")): # TODO: proper URL validation
from urllib.request import urlopen
infile = urlopen(args.infile)
# switch to text file mode
infile = open(args.infile, encoding="utf-8", errors="ignore")
#infile = codecs.getreader("utf-8")(infile)
else:
infile = open(args.infile)
input_file_str = infile.read()
output_translated_file(input_file_str, args.outfile)