repositories
/
TP_AL_C.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fix the syntactic analyzer for real.
[TP_AL_C.git]
/
lexer
/
syntactic_analyzer.c
diff --git
a/lexer/syntactic_analyzer.c
b/lexer/syntactic_analyzer.c
index 8d2d2e93837565b1fe4f60085ee01623c377180f..65997ddfed47b40dad0ef4008599cbd8cb2a5828 100644
(file)
--- a/
lexer/syntactic_analyzer.c
+++ b/
lexer/syntactic_analyzer.c
@@
-8,7
+8,7
@@
#include "lexical_analyzer.h"
#include "print_helper.h"
#include "lexical_analyzer.h"
#include "print_helper.h"
-#define PRINT_TOKEN
0
+#define PRINT_TOKEN
1
/* Syntactic analyzer functions implementation */
/* Syntactic analyzer functions implementation */
@@
-19,12
+19,14
@@
static void analyze_TEXT() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOT) {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOT) {
- scanner();
#if PRINT_TOKEN
#if PRINT_TOKEN
- wprint_
token(
);
+ wprint_
ctoken(stdout
);
#endif
#endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws((const wchar_t*)tokenValue, target);
+ fputws(L" ", target);
+ }
+ scanner();
analyze_TEXT();
} else if (tokenType != MOTCLE && tokenType != NPARA && tokenType != SECTION && \
tokenType != SSECTION && tokenType != FIN) {
analyze_TEXT();
} else if (tokenType != MOTCLE && tokenType != NPARA && tokenType != SECTION && \
tokenType != SSECTION && tokenType != FIN) {
@@
-42,31
+44,23
@@
static void analyze_P() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == NPARA) {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == NPARA) {
- scanner();
#if PRINT_TOKEN
#if PRINT_TOKEN
- wprint_
token(
);
+ wprint_
ctoken(stdout
);
#endif
#endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- if (tokenType == MOT) {
- scanner();
- #if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
- analyze_TEXT();
- analyze_P();
+ scanner();
+ if (target != NULL) {
+ fputws(L"<p>\n", target);
+ }
+ analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</p>\n", target);
+ }
+ analyze_P();
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- fflush(stderr);
- exit(EXIT_FAILURE);
- }
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#endif
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#endif
@@
-77,30
+71,30
@@
static void analyze_HEAD() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOTCLE) {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == MOTCLE) {
+ //FIXME: Check if the MOTCLE token value is set to >Titre
+ if (target != NULL) {
+ fputws(L"<title>\n", target);
+ fputws(L" ", target);
+ }
scanner();
scanner();
- #if PRINT_TOKEN
- wprint_token();
- #endif
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
analyze_TEXT();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</title>\n", target);
+ }
if (tokenType == MOTCLE) {
if (tokenType == MOTCLE) {
- scanner();
#if PRINT_TOKEN
#if PRINT_TOKEN
- wprint_
token(
);
+ wprint_
ctoken(stdout
);
#endif /* PRINT_TOKEN */
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ scanner();
+ //The text contain the author
+ fputws(L"<meta name=\"author\" content=\"", target);
analyze_TEXT();
analyze_TEXT();
+ fputws(L"\">", target);
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- fflush(stderr);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@
-112,13
+106,21
@@
static void analyze_H1() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SECTION) {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SECTION) {
- scanner();
#if PRINT_TOKEN
#if PRINT_TOKEN
- wprint_
token(
);
+ wprint_
ctoken(stdout
);
#endif /* PRINT_TOKEN */
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"<h1>\n", target);
+ }
+ scanner();
analyze_TEXT();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</h1>\n", target);
+ }
+ } else {
+ fprintf(stderr, "%s error\n", __func__);
+ fflush(stderr);
+ exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@
-130,13
+132,17
@@
static void analyze_H2() {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SSECTION) {
fprintf(stdout, "entering %s\n", __func__);
#endif
if (tokenType == SSECTION) {
- scanner();
#if PRINT_TOKEN
#if PRINT_TOKEN
- wprint_
token(
);
+ wprint_
ctoken(stdout
);
#endif /* PRINT_TOKEN */
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"<h2>\n", target);
+ }
+ scanner();
analyze_TEXT();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</h2>\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@
-195,15
+201,24
@@
void analyze_AXIOME() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
- scanner();
/* print the lexical analysis result */
#if PRINT_TOKEN
/* print the lexical analysis result */
#if PRINT_TOKEN
- wprint_
token(
);
+ wprint_
ctoken(stdout
);
#endif /* PRINT_TOKEN */
#endif /* PRINT_TOKEN */
- token[tokenFound].type = tokenTypestr[tokenType];
- tokenFound++;
+ if (target != NULL) {
+ fputws(L"<head>\n", target);
+ }
analyze_HEAD();
analyze_HEAD();
+ if (target != NULL) {
+ fputws(L"\n</head>\n", target);
+ }
+ if (target != NULL) {
+ fputws(L"<body>\n", target);
+ }
analyze_BODY();
analyze_BODY();
+ if (target != NULL) {
+ fputws(L"\n<body>\n", target);
+ }
if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);