Cleanup and standardize string delimiters
Removed unused imports in dictionary initialization and standardized quote usage for string literals in the transformer module. These changes promote code consistency and reduce clutter, making the codebase cleaner and more maintainable. Additionally, includes minor formatting improvements for readability.
This commit is contained in:
parent
6add03fe4e
commit
f61f72cb1b
2 changed files with 6 additions and 6 deletions
|
@ -1,6 +1,3 @@
|
|||
from pathlib import Path
|
||||
import importlib
|
||||
|
||||
dictionary = {
|
||||
# Conditionals
|
||||
"wenn": "if",
|
||||
|
|
|
@ -5,6 +5,7 @@ from io import BytesIO
|
|||
|
||||
from .dictionary import dictionary
|
||||
|
||||
|
||||
def translate_german_keywords(tokens):
|
||||
for token in tokens:
|
||||
# Translate German keywords to English
|
||||
|
@ -13,9 +14,10 @@ def translate_german_keywords(tokens):
|
|||
else:
|
||||
yield token
|
||||
|
||||
|
||||
def parse_german_code(german_code):
|
||||
# Convert the German code into bytes for tokenization
|
||||
bytes_code = bytes(german_code, 'utf-8')
|
||||
bytes_code = bytes(german_code, "utf-8")
|
||||
|
||||
# Tokenize the German code
|
||||
tokens = tokenize.tokenize(BytesIO(bytes_code).readline)
|
||||
|
@ -24,14 +26,15 @@ def parse_german_code(german_code):
|
|||
english_tokens = translate_german_keywords(tokens)
|
||||
|
||||
# Detokenize back to a code string in English/Python
|
||||
python_code_str = tokenize.untokenize(english_tokens).decode('utf-8')
|
||||
python_code_str = tokenize.untokenize(english_tokens).decode("utf-8")
|
||||
|
||||
# Return the compiled Python code object
|
||||
return python_code_str
|
||||
|
||||
|
||||
def prepare_builtin_overrides():
|
||||
import sys
|
||||
|
||||
original_json = sys.modules["json"]
|
||||
sys.modules["sysjson"] = original_json
|
||||
del sys.modules["json"]
|
||||
del sys.modules["json"]
|
||||
|
|
Loading…
Reference in a new issue