X-Git-Url: https://git.piment-noir.org/?a=blobdiff_plain;f=lexer%2Fsyntactic_analyzer.c;h=29524f01f7b39d82d2328f3c773be5565941d30e;hb=f8c1adc17ee4b1e87981568009a92d84bf0f5506;hp=2d3880ff963fa1cccac02f2c3371d9d989b751cd;hpb=e70feb8ccefed05878cb3f714ea34ad8f8d8ddfa;p=TP_AL_C.git
diff --git a/lexer/syntactic_analyzer.c b/lexer/syntactic_analyzer.c
index 2d3880f..29524f0 100644
--- a/lexer/syntactic_analyzer.c
+++ b/lexer/syntactic_analyzer.c
@@ -20,8 +20,12 @@ static void analyze_TEXT() {
#endif
if (tokenType == MOT) {
scanner();
+ if (target != NULL) {
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
@@ -29,6 +33,7 @@ static void analyze_TEXT() {
} else if (tokenType != MOTCLE && tokenType != NPARA && tokenType != SECTION && \
tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
#if DEBUG
@@ -43,26 +48,36 @@ static void analyze_P() {
if (tokenType == NPARA) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif
+ if (target != NULL) {
+ fputws(L"
\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
if (tokenType == MOT) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif
+ if (target != NULL) {
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
analyze_P();
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -76,26 +91,34 @@ static void analyze_HEAD() {
if (tokenType == MOTCLE) {
scanner();
#if PRINT_TOKEN
- wprint_token();
- #endif
+ wprint_token_stdout();
+ #endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
+ //FIXME: Check if the MOTCLE token value is set to >Titre
+ if (target != NULL) {
+ fputws(L"\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n\n", target);
+ }
if (tokenType == MOTCLE) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
+ //The text contain the author
analyze_TEXT();
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -109,11 +132,19 @@ static void analyze_H1() {
if (tokenType == SECTION) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif /* PRINT_TOKEN */
+ if (target != NULL) {
+ fputws(L"\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -127,11 +158,19 @@ static void analyze_H2() {
if (tokenType == SSECTION) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif /* PRINT_TOKEN */
+ if (target != NULL) {
+ fputws(L"\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n
\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@ -142,12 +181,14 @@ static void analyze_S2() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
+ //FIXME: This test is probably useless
if (tokenType == SSECTION) {
analyze_H2();
analyze_P();
analyze_S2();
} else if (tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
#if DEBUG
@@ -159,6 +200,7 @@ static void analyze_S1() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
+ //FIXME: This test is probably useless
if (tokenType == SECTION) {
analyze_H1();
analyze_P();
@@ -166,6 +208,7 @@ static void analyze_S1() {
analyze_S1();
} else if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
#if DEBUG
@@ -191,18 +234,32 @@ void analyze_AXIOME() {
scanner();
/* print the lexical analysis result */
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
analyze_HEAD();
+ if (target != NULL) {
+ fputws(L"\n\n", target);
+ }
+ if (target != NULL) {
+ fputws(L"\n", target);
+ }
analyze_BODY();
+ if (target != NULL) {
+ fputws(L"\n\n", target);
+ }
if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
fprintf(stdout, "successful syntactic analysis\n");
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#endif
+ fflush(stdout);
}