From: Jérôme Benoit
Date: Sun, 25 Feb 2018 14:58:55 +0000 (+0100)
Subject: Fix the syntactic analyzer for real.
X-Git-Url: https://git.piment-noir.org/?a=commitdiff_plain;h=15ad4b5ab7b62ce2e966730ccd1932877d658b66;p=TP_AL_C.git
Fix the syntactic analyzer for real.
Signed-off-by: Jérôme Benoit
---
diff --git a/lexer/global_vars.c b/lexer/global_vars.c
index bec8942..3128c01 100644
--- a/lexer/global_vars.c
+++ b/lexer/global_vars.c
@@ -2,5 +2,6 @@
FILE *source = NULL, *target = NULL;
struct token_s token[TOKEN_MAX] = {{NULL, {0}}};
+wint_t tokenValue[50] = {0};
unsigned int tokenFound = 0;
const char* tokenTypestr[] = { "MOTCLE", "SECTION", "SSECTION", "NPARA", "MOT", "FIN" };
diff --git a/lexer/global_vars.h b/lexer/global_vars.h
index 6e7ddea..40eddfb 100644
--- a/lexer/global_vars.h
+++ b/lexer/global_vars.h
@@ -15,6 +15,7 @@ struct token_s {
extern struct token_s token[TOKEN_MAX];
extern wint_t c;
+extern wint_t tokenValue[50];
extern unsigned int tokenFound;
extern enum TokenType {
diff --git a/lexer/lexical_analyzer.c b/lexer/lexical_analyzer.c
index 56415a0..0d6a728 100644
--- a/lexer/lexical_analyzer.c
+++ b/lexer/lexical_analyzer.c
@@ -47,6 +47,7 @@ static bool isEOF() {
}
int scanner() {
+ tokenValue[0] = 0;
unsigned int i = 0;
wchar_t m[6];
@@ -69,6 +70,7 @@ init:
}
if (isAlphaNum()) {
token[tokenFound].value[i] = c;
+ tokenValue[i] = c;
i++;
c = fgetwc(source);
goto M1;
@@ -81,11 +83,13 @@ init:
MC1:
if (c == L'A' && !wcscmp(fgetws(m, 6, source), L"uteur")) {
wcscpy((wchar_t*)token[tokenFound].value, L">Auteur");
+ wcscpy((wchar_t*)tokenValue, L">Auteur");
c = fgetwc(source);
goto MC2;
}
if (c == L'T' && !wcscmp(fgetws(m, 5, source), L"itre")) {
wcscpy((wchar_t*)token[tokenFound].value, L">Titre");
+ wcscpy((wchar_t*)tokenValue, L">Titre");
c = fgetwc(source);
goto MC2;
}
@@ -118,6 +122,7 @@ SSECTION:
M1:
if (isAlphaNum()) {
token[tokenFound].value[i] = c;
+ tokenValue[i] = c;
i++;
c = fgetwc(source);
goto M1;
@@ -138,6 +143,7 @@ initLV1:
}
if (isAlphaNum()) {
token[tokenFound].value[i] = c;
+ tokenValue[i] = c;
i++;
c = fgetwc(source);
goto M1;
@@ -181,6 +187,8 @@ NPARA:
return EXIT_SUCCESS;
MOT:
+ token[tokenFound].value[i] = 0;
+ tokenValue[i] = 0;
tokenType = MOT;
return EXIT_SUCCESS;
diff --git a/lexer/main.c b/lexer/main.c
index 7c31ee3..40550f5 100644
--- a/lexer/main.c
+++ b/lexer/main.c
@@ -23,6 +23,7 @@ void do_syntactic_analysis() {
fputws(L"\n\n",
target);
c = fgetwc(source); // lecture du premier caractere
+ scanner();
analyze_AXIOME();
fputws(L"\n", target);
}
diff --git a/lexer/print_helper.c b/lexer/print_helper.c
index 630985b..40d0523 100644
--- a/lexer/print_helper.c
+++ b/lexer/print_helper.c
@@ -1,5 +1,6 @@
#include
#include
+#include
#include "print_helper.h"
#include "global_vars.h"
@@ -61,3 +62,12 @@ void wprint_token(FILE* out_file) {
}
fflush(out_file);
}
+
+void wprint_ctoken(FILE* out_file) {
+ if (tokenType == MOT || tokenType == MOTCLE) {
+ fwprintf(out_file, L"%20s: %ls\n", tokenTypestr[tokenType], tokenValue);
+ } else {
+ fwprintf(out_file, L"%20s\n", tokenTypestr[tokenType]);
+ }
+ fflush(out_file);
+}
diff --git a/lexer/print_helper.h b/lexer/print_helper.h
index 080ea9d..d5cc42a 100644
--- a/lexer/print_helper.h
+++ b/lexer/print_helper.h
@@ -12,5 +12,6 @@ void wpr_warning(const wchar_t *format, ...);
void wpr_error(const wchar_t *format, ...);
void wprint_token(FILE* out_file);
+void wprint_ctoken(FILE* out_file);
#endif /* PRINT_HELPER_H_ */
diff --git a/lexer/syntactic_analyzer.c b/lexer/syntactic_analyzer.c
index ddef90b..65997dd 100644
--- a/lexer/syntactic_analyzer.c
+++ b/lexer/syntactic_analyzer.c
@@ -8,7 +8,7 @@
#include "lexical_analyzer.h"
#include "print_helper.h"
-#define PRINT_TOKEN 0
+#define PRINT_TOKEN 1
/* Syntactic analyzer functions implementation */
@@ -19,16 +19,14 @@ static void analyze_TEXT() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOT) {
- scanner();
+ #if PRINT_TOKEN
+ wprint_ctoken(stdout);
+ #endif
if (target != NULL) {
- fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws((const wchar_t*)tokenValue, target);
fputws(L" ", target);
}
- #if PRINT_TOKEN
- wprint_token(stdout);
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ scanner();
analyze_TEXT();
} else if (tokenType != MOTCLE && tokenType != NPARA && tokenType != SECTION && \
tokenType != SSECTION && tokenType != FIN) {
@@ -46,39 +44,23 @@ static void analyze_P() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == NPARA) {
- scanner();
#if PRINT_TOKEN
- wprint_token(stdout);
+ wprint_ctoken(stdout);
#endif
+ scanner();
if (target != NULL) {
fputws(L"\n", target);
- fputws((const wchar_t*)token[tokenFound].value, target);
- fputws(L" ", target);
}
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- if (tokenType == MOT) {
- scanner();
- #if PRINT_TOKEN
- wprint_token(stdout);
- #endif
- if (target != NULL) {
- fputws((const wchar_t*)token[tokenFound].value, target);
- fputws(L" ", target);
- }
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- analyze_TEXT();
- if (target != NULL) {
- fputws(L"\n
\n", target);
- }
- analyze_P();
+ analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
+ analyze_P();
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
- }
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#endif
@@ -89,31 +71,25 @@ static void analyze_HEAD() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOTCLE) {
- scanner();
- #if PRINT_TOKEN
- wprint_token(stdout);
- #endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
//FIXME: Check if the MOTCLE token value is set to >Titre
if (target != NULL) {
fputws(L"\n", target);
- fputws((const wchar_t*)token[tokenFound].value, target);
fputws(L" ", target);
}
+ scanner();
analyze_TEXT();
if (target != NULL) {
fputws(L"\n\n", target);
}
if (tokenType == MOTCLE) {
- scanner();
#if PRINT_TOKEN
- wprint_token(stdout);
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ scanner();
//The text contain the author
+ fputws(L"", target);
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
@@ -130,21 +106,21 @@ static void analyze_H1() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SECTION) {
- scanner();
#if PRINT_TOKEN
- wprint_token(stdout);
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
if (target != NULL) {
fputws(L"\n", target);
- fputws((const wchar_t*)token[tokenFound].value, target);
- fputws(L" ", target);
}
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ scanner();
analyze_TEXT();
if (target != NULL) {
fputws(L"\n
\n", target);
}
+ } else {
+ fprintf(stderr, "%s error\n", __func__);
+ fflush(stderr);
+ exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -156,17 +132,13 @@ static void analyze_H2() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SSECTION) {
- scanner();
#if PRINT_TOKEN
- wprint_token(stdout);
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
if (target != NULL) {
fputws(L"\n", target);
- fputws((const wchar_t*)token[tokenFound].value, target);
- fputws(L" ", target);
}
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ scanner();
analyze_TEXT();
if (target != NULL) {
fputws(L"\n
\n", target);
@@ -181,7 +153,6 @@ static void analyze_S2() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
- //FIXME: This test is probably useless
if (tokenType == SSECTION) {
analyze_H2();
analyze_P();
@@ -200,7 +171,6 @@ static void analyze_S1() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
- //FIXME: This test is probably useless
if (tokenType == SECTION) {
analyze_H1();
analyze_P();
@@ -231,13 +201,10 @@ void analyze_AXIOME() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
- scanner();
/* print the lexical analysis result */
#if PRINT_TOKEN
- wprint_token(stdout);
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
if (target != NULL) {
fputws(L"\n", target);
}