Cleanup and standardize string delimiters

Removed unused imports in dictionary initialization and standardized quote usage for string literals in the transformer module. These changes promote code consistency and reduce clutter, making the codebase cleaner and more maintainable. Additionally, includes minor formatting improvements for readability.
This commit is contained in:
Kumi 2024-01-14 21:43:26 +01:00
parent 6add03fe4e
commit f61f72cb1b
Signed by: kumi
GPG key ID: ECBCC9082395383F
2 changed files with 6 additions and 6 deletions

View file

@ -1,6 +1,3 @@
from pathlib import Path
import importlib
dictionary = {
# Conditionals
"wenn": "if",

View file

@ -5,6 +5,7 @@ from io import BytesIO
from .dictionary import dictionary
def translate_german_keywords(tokens):
for token in tokens:
# Translate German keywords to English
@ -13,9 +14,10 @@ def translate_german_keywords(tokens):
else:
yield token
def parse_german_code(german_code):
# Convert the German code into bytes for tokenization
bytes_code = bytes(german_code, 'utf-8')
bytes_code = bytes(german_code, "utf-8")
# Tokenize the German code
tokens = tokenize.tokenize(BytesIO(bytes_code).readline)
@ -24,11 +26,12 @@ def parse_german_code(german_code):
english_tokens = translate_german_keywords(tokens)
# Detokenize back to a code string in English/Python
python_code_str = tokenize.untokenize(english_tokens).decode('utf-8')
python_code_str = tokenize.untokenize(english_tokens).decode("utf-8")
# Return the compiled Python code object
return python_code_str
def prepare_builtin_overrides():
import sys