/* Syntactic analyzer */
#include <stdbool.h>
-#include <stdarg.h>
#include <stdlib.h>
#include "global_vars.h"
-#include "lexical_analyzer.h"
#include "print_helper.h"
+#include "syntactic_analyzer.h"
+#include "lexical_analyzer.h"
-#define PRINT_TOKEN 0
+#define PRINT_TOKEN 1
/* Syntactic analyzer functions implementation */
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOT) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws((const wchar_t*)tokenValue, target);
+ fputws(L" ", target);
+ }
+ scanner();
analyze_TEXT();
} else if (tokenType != MOTCLE && tokenType != NPARA && tokenType != SECTION && \
tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == NPARA) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- if (tokenType == MOT) {
- scanner();
- #if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- analyze_TEXT();
- analyze_P();
+ wprint_ctoken(stdout);
+ #endif /* PRINT_TOKEN */
+ if (target != NULL) {
+ fputws(L"<p>\n", target);
+ }
+ scanner();
+ analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</p>\n", target);
+ }
+ analyze_P();
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- exit(EXIT_FAILURE);
- }
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#endif
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
- if (tokenType == MOTCLE) {
+ if (tokenType == MOTCLE && wcscmp(L">Titre", (const wchar_t*)tokenValue) == 0) {
+ if (target != NULL) {
+ fputws(L"<title>\n", target);
+ fputws(L" ", target);
+ }
scanner();
- #if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
analyze_TEXT();
- if (tokenType == MOTCLE) {
- scanner();
+ if (target != NULL) {
+ fputws(L"\n</title>\n", target);
+ }
+ if (tokenType == MOTCLE && wcscmp(L">Auteur", (const wchar_t*)tokenValue) == 0) {
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ fputws(L"<meta name=\"author\" content=\"", target);
+ scanner();
analyze_TEXT();
+ fputws(L"\">", target);
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SECTION) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"<h1>\n", target);
+ }
+ scanner();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</h1>\n", target);
+ }
+ } else {
+ fprintf(stderr, "%s error\n", __func__);
+ fflush(stderr);
+ exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SSECTION) {
- scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"<h2>\n", target);
+ }
+ scanner();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</h2>\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
analyze_S2();
} else if (tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
#if DEBUG
analyze_S1();
} else if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
#if DEBUG
#endif
}
-void analyze_AXIOME() {
+void analyze_AXIOME(void) {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
- scanner();
/* print the lexical analysis result */
#if PRINT_TOKEN
- wprint_token();
+ wprint_ctoken(stdout);
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"<head>\n", target);
+ }
analyze_HEAD();
+ if (target != NULL) {
+ fputws(L"\n</head>\n", target);
+ }
+ if (target != NULL) {
+ fputws(L"<body>\n", target);
+ }
analyze_BODY();
+ if (target != NULL) {
+ fputws(L"\n<body>\n", target);
+ }
+ #if PRINT_TOKEN
+ wprint_ctoken(stdout);
+ #endif /* PRINT_TOKEN */
if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
+ fflush(stderr);
exit(EXIT_FAILURE);
}
fprintf(stdout, "successful syntactic analysis\n");
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#endif
+ fflush(stdout);
}