repositories
/
TP_AL_C.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Syntactic analyser implementation with HTML conversion code
[TP_AL_C.git]
/
lexer
/
syntactic_analyzer.c
diff --git
a/lexer/syntactic_analyzer.c
b/lexer/syntactic_analyzer.c
index 8d2d2e93837565b1fe4f60085ee01623c377180f..29524f01f7b39d82d2328f3c773be5565941d30e 100644
(file)
--- a/
lexer/syntactic_analyzer.c
+++ b/
lexer/syntactic_analyzer.c
@@
-20,8
+20,12
@@
static void analyze_TEXT() {
#endif
if (tokenType == MOT) {
scanner();
#endif
if (tokenType == MOT) {
scanner();
+ if (target != NULL) {
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
#if PRINT_TOKEN
#if PRINT_TOKEN
- wprint_token();
+ wprint_token
_stdout
();
#endif
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
#endif
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
@@
-44,28
+48,36
@@
static void analyze_P() {
if (tokenType == NPARA) {
scanner();
#if PRINT_TOKEN
if (tokenType == NPARA) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token
_stdout
();
#endif
#endif
+ if (target != NULL) {
+ fputws(L"<p>\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
if (tokenType == MOT) {
scanner();
#if PRINT_TOKEN
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
if (tokenType == MOT) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token
_stdout
();
#endif
#endif
+ if (target != NULL) {
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</p>\n", target);
+ }
analyze_P();
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
analyze_P();
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- fflush(stderr);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@
-79,28
+91,34
@@
static void analyze_HEAD() {
if (tokenType == MOTCLE) {
scanner();
#if PRINT_TOKEN
if (tokenType == MOTCLE) {
scanner();
#if PRINT_TOKEN
- wprint_token();
- #endif
+ wprint_token
_stdout
();
+ #endif
/* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
+ //FIXME: Check if the MOTCLE token value is set to >Titre
+ if (target != NULL) {
+ fputws(L"<title>\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
analyze_TEXT();
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</title>\n", target);
+ }
if (tokenType == MOTCLE) {
scanner();
#if PRINT_TOKEN
if (tokenType == MOTCLE) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token
_stdout
();
#endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
#endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
+ //The text contain the author
analyze_TEXT();
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
analyze_TEXT();
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- fflush(stderr);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@
-114,11
+132,19
@@
static void analyze_H1() {
if (tokenType == SECTION) {
scanner();
#if PRINT_TOKEN
if (tokenType == SECTION) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token
_stdout
();
#endif /* PRINT_TOKEN */
#endif /* PRINT_TOKEN */
+ if (target != NULL) {
+ fputws(L"<h1>\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</h1>\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@
-132,11
+158,19
@@
static void analyze_H2() {
if (tokenType == SSECTION) {
scanner();
#if PRINT_TOKEN
if (tokenType == SSECTION) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token
_stdout
();
#endif /* PRINT_TOKEN */
#endif /* PRINT_TOKEN */
+ if (target != NULL) {
+ fputws(L"<h2>\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</h2>\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
@@
-147,6
+181,7
@@
static void analyze_S2() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
+ //FIXME: This test is probably useless
if (tokenType == SSECTION) {
analyze_H2();
analyze_P();
if (tokenType == SSECTION) {
analyze_H2();
analyze_P();
@@
-165,6
+200,7
@@
static void analyze_S1() {
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
+ //FIXME: This test is probably useless
if (tokenType == SECTION) {
analyze_H1();
analyze_P();
if (tokenType == SECTION) {
analyze_H1();
analyze_P();
@@
-198,12
+234,24
@@
void analyze_AXIOME() {
scanner();
/* print the lexical analysis result */
#if PRINT_TOKEN
scanner();
/* print the lexical analysis result */
#if PRINT_TOKEN
- wprint_token();
+ wprint_token
_stdout
();
#endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
#endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
+ if (target != NULL) {
+ fputws(L"<head>\n", target);
+ }
analyze_HEAD();
analyze_HEAD();
+ if (target != NULL) {
+ fputws(L"\n</head>\n", target);
+ }
+ if (target != NULL) {
+ fputws(L"<body>\n", target);
+ }
analyze_BODY();
analyze_BODY();
+ if (target != NULL) {
+ fputws(L"\n<body>\n", target);
+ }
if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);