From: Jérôme Benoit Date: Mon, 20 Nov 2017 11:23:28 +0000 (+0100) Subject: Syntactic analyser implementation with HTML conversion code X-Git-Url: https://git.piment-noir.org/?a=commitdiff_plain;h=48e4b4adcc62229b0006ca4c35e54d63a9f6cc9e;p=TP_AL_C.git Syntactic analyser implementation with HTML conversion code from MD syntax. Yes, there's an off-by-one in the syntactic analyser but for now, it is working ... Signed-off-by: Jérôme Benoit --- diff --git a/lexer/main.c b/lexer/main.c index c67370d..f862217 100644 --- a/lexer/main.c +++ b/lexer/main.c @@ -13,15 +13,18 @@ void do_lexical_analysis() { c = fgetwc(source); // lecture du premier caractere do { scanner(); - wprint_token(); + wprint_token_target(); token[tokenFound].type = tokenTypestr[tokenType]; tokenFound++; } while (tokenType != FIN); // tant que la fin du fichier n'est pas atteinte } void do_syntactic_analysis() { + fputws(L"\n\n", + target); c = fgetwc(source); // lecture du premier caractere analyze_AXIOME(); + fputws(L"\n", target); } void print_usage(const char* name) { @@ -32,6 +35,7 @@ void print_usage(const char* name) { " -i, --input: use as input file instead of standard input\n" " -o, --output: use as output file instead of standard output\n", name); + fflush(stdout); } int main(int argc, char **argv) { diff --git a/lexer/print_helper.c b/lexer/print_helper.c index 9f97a8f..f7ab04f 100644 --- a/lexer/print_helper.c +++ b/lexer/print_helper.c @@ -53,11 +53,19 @@ void wpr_error(const wchar_t *format, ...) { fflush(stderr); } -void wprint_token() { +void wprint_token_stdout() { + if (tokenType == MOT || tokenType == MOTCLE) { + fwprintf(stdout, L"%20s: %ls\n", tokenTypestr[tokenType], token[tokenFound].value); + } else { + fwprintf(stdout, L"%20s\n", tokenTypestr[tokenType]); + } + fflush(stdout); +} + +void wprint_token_target() { if (tokenType == MOT || tokenType == MOTCLE) { fwprintf(target, L"%20s: %ls\n", tokenTypestr[tokenType], token[tokenFound].value); } else { fwprintf(target, L"%20s\n", tokenTypestr[tokenType]); } - fflush(target); } diff --git a/lexer/print_helper.h b/lexer/print_helper.h index 5daaa35..396afd4 100644 --- a/lexer/print_helper.h +++ b/lexer/print_helper.h @@ -9,6 +9,7 @@ void pr_debug(const char *format, ...); void wpr_warning(const wchar_t *format, ...); void wpr_error(const wchar_t *format, ...); -void wprint_token(); +void wprint_token_stdout(); +void wprint_token_target(); #endif /* PRINT_HELPER_H_ */ diff --git a/lexer/syntactic_analyzer.c b/lexer/syntactic_analyzer.c index 8d2d2e9..29524f0 100644 --- a/lexer/syntactic_analyzer.c +++ b/lexer/syntactic_analyzer.c @@ -20,8 +20,12 @@ static void analyze_TEXT() { #endif if (tokenType == MOT) { scanner(); + if (target != NULL) { + fputws((const wchar_t*)token[tokenFound].value, target); + fputws(L" ", target); + } #if PRINT_TOKEN - wprint_token(); + wprint_token_stdout(); #endif token[tokenFound].type = tokenTypestr[tokenType]; tokenFound++; @@ -44,28 +48,36 @@ static void analyze_P() { if (tokenType == NPARA) { scanner(); #if PRINT_TOKEN - wprint_token(); + wprint_token_stdout(); #endif + if (target != NULL) { + fputws(L"

\n", target); + fputws((const wchar_t*)token[tokenFound].value, target); + fputws(L" ", target); + } token[tokenFound].type = tokenTypestr[tokenType]; tokenFound++; if (tokenType == MOT) { scanner(); #if PRINT_TOKEN - wprint_token(); + wprint_token_stdout(); #endif + if (target != NULL) { + fputws((const wchar_t*)token[tokenFound].value, target); + fputws(L" ", target); + } token[tokenFound].type = tokenTypestr[tokenType]; tokenFound++; analyze_TEXT(); + if (target != NULL) { + fputws(L"\n

\n", target); + } analyze_P(); } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) { fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]); fflush(stderr); exit(EXIT_FAILURE); } - } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) { - fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]); - fflush(stderr); - exit(EXIT_FAILURE); } #if DEBUG fprintf(stdout, "leaving %s\n", __func__); @@ -79,28 +91,34 @@ static void analyze_HEAD() { if (tokenType == MOTCLE) { scanner(); #if PRINT_TOKEN - wprint_token(); - #endif + wprint_token_stdout(); + #endif /* PRINT_TOKEN */ token[tokenFound].type = tokenTypestr[tokenType]; tokenFound++; + //FIXME: Check if the MOTCLE token value is set to >Titre + if (target != NULL) { + fputws(L"\n", target); + fputws((const wchar_t*)token[tokenFound].value, target); + fputws(L" ", target); + } analyze_TEXT(); + if (target != NULL) { + fputws(L"\n\n", target); + } if (tokenType == MOTCLE) { scanner(); #if PRINT_TOKEN - wprint_token(); + wprint_token_stdout(); #endif /* PRINT_TOKEN */ token[tokenFound].type = tokenTypestr[tokenType]; tokenFound++; + //The text contain the author analyze_TEXT(); } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) { fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]); fflush(stderr); exit(EXIT_FAILURE); } - } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) { - fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]); - fflush(stderr); - exit(EXIT_FAILURE); } #if DEBUG fprintf(stdout, "leaving %s\n", __func__); @@ -114,11 +132,19 @@ static void analyze_H1() { if (tokenType == SECTION) { scanner(); #if PRINT_TOKEN - wprint_token(); + wprint_token_stdout(); #endif /* PRINT_TOKEN */ + if (target != NULL) { + fputws(L"

\n", target); + fputws((const wchar_t*)token[tokenFound].value, target); + fputws(L" ", target); + } token[tokenFound].type = tokenTypestr[tokenType]; tokenFound++; analyze_TEXT(); + if (target != NULL) { + fputws(L"\n

\n", target); + } } #if DEBUG fprintf(stdout, "leaving %s\n", __func__); @@ -132,11 +158,19 @@ static void analyze_H2() { if (tokenType == SSECTION) { scanner(); #if PRINT_TOKEN - wprint_token(); + wprint_token_stdout(); #endif /* PRINT_TOKEN */ + if (target != NULL) { + fputws(L"

\n", target); + fputws((const wchar_t*)token[tokenFound].value, target); + fputws(L" ", target); + } token[tokenFound].type = tokenTypestr[tokenType]; tokenFound++; analyze_TEXT(); + if (target != NULL) { + fputws(L"\n

\n", target); + } } #if DEBUG fprintf(stdout, "leaving %s\n", __func__); @@ -147,6 +181,7 @@ static void analyze_S2() { #if DEBUG fprintf(stdout, "entering %s\n", __func__); #endif + //FIXME: This test is probably useless if (tokenType == SSECTION) { analyze_H2(); analyze_P(); @@ -165,6 +200,7 @@ static void analyze_S1() { #if DEBUG fprintf(stdout, "entering %s\n", __func__); #endif + //FIXME: This test is probably useless if (tokenType == SECTION) { analyze_H1(); analyze_P(); @@ -198,12 +234,24 @@ void analyze_AXIOME() { scanner(); /* print the lexical analysis result */ #if PRINT_TOKEN - wprint_token(); + wprint_token_stdout(); #endif /* PRINT_TOKEN */ token[tokenFound].type = tokenTypestr[tokenType]; tokenFound++; + if (target != NULL) { + fputws(L"\n", target); + } analyze_HEAD(); + if (target != NULL) { + fputws(L"\n\n", target); + } + if (target != NULL) { + fputws(L"\n", target); + } analyze_BODY(); + if (target != NULL) { + fputws(L"\n\n", target); + } if (tokenType != FIN) { fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]); fflush(stderr); diff --git a/lexer/test.txt b/lexer/test.txt index f07f979..895b968 100644 --- a/lexer/test.txt +++ b/lexer/test.txt @@ -20,7 +20,7 @@ Ici vous pouvez ecrire vos paragraphes de section. Au besoin vous pouvez definir des sous sections, en les introduisant par deux symboles plus suivi du titre de sous section, comme ci-dessous. -== Ma premiere sous Section +== Ma premiere sous section Ici vous pouvez ecrire vos paragraphes de section toujours separes par une ou plusieurs lignes blanches.