X-Git-Url: https://git.piment-noir.org/?a=blobdiff_plain;f=lexer%2Fsyntactic_analyzer.c;h=65997ddfed47b40dad0ef4008599cbd8cb2a5828;hb=15ad4b5ab7b62ce2e966730ccd1932877d658b66;hp=8d2d2e93837565b1fe4f60085ee01623c377180f;hpb=6c47be3237ab266b1cba4e3051afaaaa55e07e52;p=TP_AL_C.git
diff --git a/lexer/syntactic_analyzer.c b/lexer/syntactic_analyzer.c
index 8d2d2e9..65997dd 100644
--- a/lexer/syntactic_analyzer.c
+++ b/lexer/syntactic_analyzer.c
@@ -8,7 +8,7 @@
#include "lexical_analyzer.h"
#include "print_helper.h"
-#define PRINT_TOKEN 0
+#define PRINT_TOKEN 1
/* Syntactic analyzer functions implementation */
@@ -19,12 +19,14 @@ static void analyze_TEXT() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOT) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws((const wchar_t*)tokenValue, target);
+ fputws(L" ", target);
+ }
+ scanner();
analyze_TEXT();
} else if (tokenType != MOTCLE && tokenType != NPARA && tokenType != SECTION && \
tokenType != SSECTION && tokenType != FIN) {
@@ -42,31 +44,23 @@ static void analyze_P() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == NPARA) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- if (tokenType == MOT) {
- scanner();
- #if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- analyze_TEXT();
- analyze_P();
+ scanner();
+ if (target != NULL) {
+ fputws(L"
\n", target);
+ }
+ analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
+ analyze_P();
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- fflush(stderr);
- exit(EXIT_FAILURE);
- }
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#endif
@@ -77,30 +71,30 @@ static void analyze_HEAD() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOTCLE) {
+ //FIXME: Check if the MOTCLE token value is set to >Titre
+ if (target != NULL) {
+ fputws(L"\n", target);
+ fputws(L" ", target);
+ }
scanner();
- #if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n\n", target);
+ }
if (tokenType == MOTCLE) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ scanner();
+ //The text contain the author
+ fputws(L"", target);
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- fflush(stderr);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -112,13 +106,21 @@ static void analyze_H1() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SECTION) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
+ scanner();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
+ } else {
+ fprintf(stderr, "%s error\n", __func__);
+ fflush(stderr);
+ exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -130,13 +132,17 @@ static void analyze_H2() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SSECTION) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
+ scanner();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -195,15 +201,24 @@ void analyze_AXIOME() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
- scanner();
/* print the lexical analysis result */
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
analyze_HEAD();
+ if (target != NULL) {
+ fputws(L"\n\n", target);
+ }
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
analyze_BODY();
+ if (target != NULL) {
+ fputws(L"\n\n", target);
+ }
if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);