X-Git-Url: https://git.piment-noir.org/?p=TP_AL_C.git;a=blobdiff_plain;f=lexer%2Fsyntactic_analyzer.c;h=0f1b1818b3a6c2ced9a65c8cc912fe3bd61385e2;hp=29524f01f7b39d82d2328f3c773be5565941d30e;hb=2d2a5978e84c354bf8167a3ab0b1088d264049bf;hpb=48e4b4adcc62229b0006ca4c35e54d63a9f6cc9e diff --git a/lexer/syntactic_analyzer.c b/lexer/syntactic_analyzer.c index 29524f0..0f1b181 100644 --- a/lexer/syntactic_analyzer.c +++ b/lexer/syntactic_analyzer.c @@ -1,14 +1,14 @@ /* Syntactic analyzer */ #include -#include #include #include "global_vars.h" -#include "lexical_analyzer.h" #include "print_helper.h" +#include "syntactic_analyzer.h" +#include "lexical_analyzer.h" -#define PRINT_TOKEN 0 +#define PRINT_TOKEN 1 /* Syntactic analyzer functions implementation */ @@ -19,16 +19,14 @@ static void analyze_TEXT() { fprintf(stdout, "entering %s\n", __func__); #endif if (tokenType == MOT) { - scanner(); + #if PRINT_TOKEN + wprint_ctoken(stdout); + #endif if (target != NULL) { - fputws((const wchar_t*)token[tokenFound].value, target); + fputws((const wchar_t*)tokenValue, target); fputws(L" ", target); } - #if PRINT_TOKEN - wprint_token_stdout(); - #endif - token[tokenFound].type = tokenTypestr[tokenType]; - tokenFound++; + scanner(); analyze_TEXT(); } else if (tokenType != MOTCLE && tokenType != NPARA && tokenType != SECTION && \ tokenType != SSECTION && tokenType != FIN) { @@ -46,39 +44,23 @@ static void analyze_P() { fprintf(stdout, "entering %s\n", __func__); #endif if (tokenType == NPARA) { - scanner(); #if PRINT_TOKEN - wprint_token_stdout(); - #endif + wprint_ctoken(stdout); + #endif /* PRINT_TOKEN */ if (target != NULL) { fputws(L"

\n", target); - fputws((const wchar_t*)token[tokenFound].value, target); - fputws(L" ", target); } - token[tokenFound].type = tokenTypestr[tokenType]; - tokenFound++; - if (tokenType == MOT) { - scanner(); - #if PRINT_TOKEN - wprint_token_stdout(); - #endif - if (target != NULL) { - fputws((const wchar_t*)token[tokenFound].value, target); - fputws(L" ", target); - } - token[tokenFound].type = tokenTypestr[tokenType]; - tokenFound++; - analyze_TEXT(); - if (target != NULL) { - fputws(L"\n

\n", target); - } - analyze_P(); + scanner(); + analyze_TEXT(); + if (target != NULL) { + fputws(L"\n

\n", target); + } + analyze_P(); } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) { fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]); fflush(stderr); exit(EXIT_FAILURE); } - } #if DEBUG fprintf(stdout, "leaving %s\n", __func__); #endif @@ -88,32 +70,24 @@ static void analyze_HEAD() { #if DEBUG fprintf(stdout, "entering %s\n", __func__); #endif - if (tokenType == MOTCLE) { - scanner(); - #if PRINT_TOKEN - wprint_token_stdout(); - #endif /* PRINT_TOKEN */ - token[tokenFound].type = tokenTypestr[tokenType]; - tokenFound++; - //FIXME: Check if the MOTCLE token value is set to >Titre + if (tokenType == MOTCLE && wcscmp(L">Titre", (const wchar_t*)tokenValue) == 0) { if (target != NULL) { fputws(L"\n", target); - fputws((const wchar_t*)token[tokenFound].value, target); fputws(L" ", target); } + scanner(); analyze_TEXT(); if (target != NULL) { fputws(L"\n\n", target); } - if (tokenType == MOTCLE) { - scanner(); + if (tokenType == MOTCLE && wcscmp(L">Auteur", (const wchar_t*)tokenValue) == 0) { #if PRINT_TOKEN - wprint_token_stdout(); + wprint_ctoken(stdout); #endif /* PRINT_TOKEN */ - token[tokenFound].type = tokenTypestr[tokenType]; - tokenFound++; - //The text contain the author + fputws(L"", target); } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) { fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]); fflush(stderr); @@ -130,21 +104,21 @@ static void analyze_H1() { fprintf(stdout, "entering %s\n", __func__); #endif if (tokenType == SECTION) { - scanner(); #if PRINT_TOKEN - wprint_token_stdout(); + wprint_ctoken(stdout); #endif /* PRINT_TOKEN */ if (target != NULL) { fputws(L"

\n", target); - fputws((const wchar_t*)token[tokenFound].value, target); - fputws(L" ", target); } - token[tokenFound].type = tokenTypestr[tokenType]; - tokenFound++; + scanner(); analyze_TEXT(); if (target != NULL) { fputws(L"\n

\n", target); } + } else { + fprintf(stderr, "%s error\n", __func__); + fflush(stderr); + exit(EXIT_FAILURE); } #if DEBUG fprintf(stdout, "leaving %s\n", __func__); @@ -156,17 +130,13 @@ static void analyze_H2() { fprintf(stdout, "entering %s\n", __func__); #endif if (tokenType == SSECTION) { - scanner(); #if PRINT_TOKEN - wprint_token_stdout(); + wprint_ctoken(stdout); #endif /* PRINT_TOKEN */ if (target != NULL) { fputws(L"

\n", target); - fputws((const wchar_t*)token[tokenFound].value, target); - fputws(L" ", target); } - token[tokenFound].type = tokenTypestr[tokenType]; - tokenFound++; + scanner(); analyze_TEXT(); if (target != NULL) { fputws(L"\n

\n", target); @@ -181,7 +151,6 @@ static void analyze_S2() { #if DEBUG fprintf(stdout, "entering %s\n", __func__); #endif - //FIXME: This test is probably useless if (tokenType == SSECTION) { analyze_H2(); analyze_P(); @@ -200,7 +169,6 @@ static void analyze_S1() { #if DEBUG fprintf(stdout, "entering %s\n", __func__); #endif - //FIXME: This test is probably useless if (tokenType == SECTION) { analyze_H1(); analyze_P(); @@ -227,17 +195,14 @@ static void analyze_BODY() { #endif } -void analyze_AXIOME() { +void analyze_AXIOME(void) { #if DEBUG fprintf(stdout, "entering %s\n", __func__); #endif - scanner(); /* print the lexical analysis result */ #if PRINT_TOKEN - wprint_token_stdout(); + wprint_ctoken(stdout); #endif /* PRINT_TOKEN */ - token[tokenFound].type = tokenTypestr[tokenType]; - tokenFound++; if (target != NULL) { fputws(L"\n", target); } @@ -252,6 +217,9 @@ void analyze_AXIOME() { if (target != NULL) { fputws(L"\n\n", target); } + #if PRINT_TOKEN + wprint_ctoken(stdout); + #endif /* PRINT_TOKEN */ if (tokenType != FIN) { fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]); fflush(stderr);