From 96c9e6db534df6580e92a7179031279c51da0cd7 Mon Sep 17 00:00:00 2001 From: Arity-T Date: Wed, 14 May 2025 11:57:52 +0300 Subject: [PATCH] =?UTF-8?q?=D0=9F=D1=80=D0=B5=D0=B4=D0=BE=D0=B1=D1=80?= =?UTF-8?q?=D0=B0=D0=B1=D0=BE=D1=82=D0=BA=D0=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- lab3/programm/grammar.py | 5 +++-- lab3/programm/main.py | 21 +++++++++++++++++++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/lab3/programm/grammar.py b/lab3/programm/grammar.py index 70d7bf0..05fd113 100644 --- a/lab3/programm/grammar.py +++ b/lab3/programm/grammar.py @@ -286,8 +286,9 @@ class Grammar: return "\n".join(result) - def analyze(self, string: str) -> list[int]: - input_tokens = string.split() + ["$"] + def analyze(self, input_tokens: list[str]) -> list[int]: + input_tokens = input_tokens.copy() + input_tokens += ["$"] input_pos = 0 # Инициализируем стек с терминальным символом и начальным символом diff --git a/lab3/programm/main.py b/lab3/programm/main.py index 5d8b1f4..973732e 100644 --- a/lab3/programm/main.py +++ b/lab3/programm/main.py @@ -39,6 +39,11 @@ def load_grammar(filename: str = "grammar.txt") -> Grammar | None: return None +def tokenize_string(input_string: str) -> list[str]: + input_string = input_string.replace(",", " , ").replace(".", " . ") + return input_string.split() + + def check_string(grammar: Grammar | None, input_string: str) -> None: if not grammar: print("Ошибка: Грамматика не загружена") @@ -46,7 +51,18 @@ def check_string(grammar: Grammar | None, input_string: str) -> None: print(f"Проверка строки: '{input_string}'") try: - parse_result = grammar.analyze(input_string) + input_tokens = tokenize_string(input_string) + + if not input_tokens: + parse_result = grammar.analyze(input_tokens) + else: + try: + input_tokens[0] = input_tokens[0][0].lower() + input_tokens[0][1:] + parse_result = grammar.analyze(input_tokens) + except ValueError as e: + input_tokens[0] = input_tokens[0][0].upper() + input_tokens[0][1:] + parse_result = grammar.analyze(input_tokens) + print(f"Результат: Строка соответствует грамматике") print(f"Применённые правила: {parse_result}") @@ -71,7 +87,8 @@ def check_string(grammar: Grammar | None, input_string: str) -> None: def post_process_string(string: str) -> str: - string = string.capitalize() + if string: + string = string[0].upper() + string[1:] string = string.replace(" ,", ",") string = string.replace(" .", ".") string = string.replace(",.", ".")