From dfbc1df9a95408687918ecc2bc6774d4646dd4f9 Mon Sep 17 00:00:00 2001 From: =?utf8?q?J=C3=A9r=C3=B4me=20Benoit?= Date: Mon, 26 Feb 2018 14:46:26 +0100 Subject: [PATCH] Avoid filling twice token values. MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Signed-off-by: Jérôme Benoit --- lexer/lexical_analyzer.c | 11 ++++------- lexer/main.c | 1 + lexer/syntactic_analyzer.c | 3 +++ 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/lexer/lexical_analyzer.c b/lexer/lexical_analyzer.c index 8805a54..32efd0b 100644 --- a/lexer/lexical_analyzer.c +++ b/lexer/lexical_analyzer.c @@ -69,7 +69,6 @@ init: goto S1SS1; } if (isAlphaNum()) { - token[tokenFound].value[i] = c; tokenValue[i] = c; i++; c = fgetwc(source); @@ -82,13 +81,11 @@ init: MC1: if (c == L'A' && !wcscmp(fgetws(m, 6, source), L"uteur")) { - wcscpy((wchar_t*)token[tokenFound].value, L">Auteur"); wcscpy((wchar_t*)tokenValue, L">Auteur"); c = fgetwc(source); goto MC2; } if (c == L'T' && !wcscmp(fgetws(m, 5, source), L"itre")) { - wcscpy((wchar_t*)token[tokenFound].value, L">Titre"); wcscpy((wchar_t*)tokenValue, L">Titre"); c = fgetwc(source); goto MC2; @@ -121,7 +118,6 @@ SSECTION: M1: if (isAlphaNum()) { - token[tokenFound].value[i] = c; tokenValue[i] = c; i++; c = fgetwc(source); @@ -142,7 +138,6 @@ initLV1: goto initLV1LV2; } if (isAlphaNum()) { - token[tokenFound].value[i] = c; tokenValue[i] = c; i++; c = fgetwc(source); @@ -187,8 +182,9 @@ NPARA: return EXIT_SUCCESS; MOT: - tokenValue[i] = 0; tokenType = MOT; + tokenValue[i] = 0; + wcscpy((wchar_t*)token[tokenFound].value, (wchar_t*)tokenValue); return EXIT_SUCCESS; MC2: @@ -199,6 +195,7 @@ MC2: MOTCLE: tokenType = MOTCLE; + wcscpy((wchar_t*)token[tokenFound].value, (wchar_t*)tokenValue); return EXIT_SUCCESS; FIN: @@ -210,7 +207,7 @@ error: fwprintf(stderr, L"%s error with token type: %s and value: %ls\n", __func__, tokenTypestr[tokenType], - token[tokenFound].value); + tokenValue); } else { fwprintf(stderr, L"%s error with token type: %s\n", __func__, diff --git a/lexer/main.c b/lexer/main.c index 40550f5..24f0a51 100644 --- a/lexer/main.c +++ b/lexer/main.c @@ -15,6 +15,7 @@ void do_lexical_analysis() { scanner(); wprint_token(target); token[tokenFound].type = tokenTypestr[tokenType]; + wcscpy((wchar_t*)tokenValue, (wchar_t*)token[tokenFound].value); tokenFound++; } while (tokenType != FIN); // tant que la fin du fichier n'est pas atteinte } diff --git a/lexer/syntactic_analyzer.c b/lexer/syntactic_analyzer.c index ae3251a..b9787df 100644 --- a/lexer/syntactic_analyzer.c +++ b/lexer/syntactic_analyzer.c @@ -216,6 +216,9 @@ void analyze_AXIOME() { if (target != NULL) { fputws(L"\n\n", target); } + #if PRINT_TOKEN + wprint_ctoken(stdout); + #endif /* PRINT_TOKEN */ if (tokenType != FIN) { fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]); fflush(stderr); -- 2.34.1