X-Git-Url: https://git.piment-noir.org/?a=blobdiff_plain;f=lexer%2Fsyntactic_analyzer.c;h=5f14600699f60354ca18cf31189703244c7996d4;hb=4a12843dc39175cab8102af9cd3397e2eb29a027;hp=2d3880ff963fa1cccac02f2c3371d9d989b751cd;hpb=e70feb8ccefed05878cb3f714ea34ad8f8d8ddfa;p=TP_AL_C.git
diff --git a/lexer/syntactic_analyzer.c b/lexer/syntactic_analyzer.c
index 2d3880f..5f14600 100644
--- a/lexer/syntactic_analyzer.c
+++ b/lexer/syntactic_analyzer.c
@@ -8,7 +8,7 @@
#include "lexical_analyzer.h"
#include "print_helper.h"
-#define PRINT_TOKEN 0
+#define PRINT_TOKEN 1
/* Syntactic analyzer functions implementation */
@@ -19,16 +19,19 @@ static void analyze_TEXT() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOT) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws((const wchar_t*)tokenValue, target);
+ fputws(L" ", target);
+ }
+ scanner();
analyze_TEXT();
} else if (tokenType != MOTCLE && tokenType != NPARA && tokenType != SECTION && \
tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
#if DEBUG
@@ -41,29 +44,23 @@ static void analyze_P() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == NPARA) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- if (tokenType == MOT) {
- scanner();
- #if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- analyze_TEXT();
- analyze_P();
+ wprint_ctoken(stdout);
+ #endif /* PRINT_TOKEN */
+ scanner();
+ if (target != NULL) {
+ fputws(L"
\n", target);
+ }
+ analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
+ analyze_P();
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- exit(EXIT_FAILURE);
- }
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#endif
@@ -73,29 +70,29 @@ static void analyze_HEAD() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
- if (tokenType == MOTCLE) {
+ if (tokenType == MOTCLE && wcscmp(L">Titre", (const wchar_t*)tokenValue) == 0) {
+ if (target != NULL) {
+ fputws(L"\n", target);
+ fputws(L" ", target);
+ }
scanner();
- #if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
analyze_TEXT();
- if (tokenType == MOTCLE) {
- scanner();
+ if (target != NULL) {
+ fputws(L"\n\n", target);
+ }
+ if (tokenType == MOTCLE && wcscmp(L">Auteur", (const wchar_t*)tokenValue) == 0) {
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ scanner();
+ fputws(L"", target);
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -107,13 +104,21 @@ static void analyze_H1() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SECTION) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
+ scanner();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
+ } else {
+ fprintf(stderr, "%s error\n", __func__);
+ fflush(stderr);
+ exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -125,13 +130,17 @@ static void analyze_H2() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SSECTION) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
+ scanner();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -148,6 +157,7 @@ static void analyze_S2() {
analyze_S2();
} else if (tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
#if DEBUG
@@ -166,6 +176,7 @@ static void analyze_S1() {
analyze_S1();
} else if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
#if DEBUG
@@ -188,21 +199,32 @@ void analyze_AXIOME() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
- scanner();
/* print the lexical analysis result */
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
analyze_HEAD();
+ if (target != NULL) {
+ fputws(L"\n\n", target);
+ }
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
analyze_BODY();
+ if (target != NULL) {
+ fputws(L"\n\n", target);
+ }
if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
fprintf(stdout, "successful syntactic analysis\n");
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#endif
+ fflush(stdout);
}