c = fgetwc(source); // lecture du premier caractere
do {
scanner();
- wprint_token();
+ wprint_token_target();
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
} while (tokenType != FIN); // tant que la fin du fichier n'est pas atteinte
}
void do_syntactic_analysis() {
+ fputws(L"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"fr_FR\" lang=\"fr_FR\">\n",
+ target);
c = fgetwc(source); // lecture du premier caractere
analyze_AXIOME();
+ fputws(L"</html>\n", target);
}
void print_usage(const char* name) {
" -i, --input<filename>: use <filename> as input file instead of standard input\n"
" -o, --output<filename>: use <filename> as output file instead of standard output\n",
name);
+ fflush(stdout);
}
int main(int argc, char **argv) {
#endif
if (tokenType == MOT) {
scanner();
+ if (target != NULL) {
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
if (tokenType == NPARA) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif
+ if (target != NULL) {
+ fputws(L"<p>\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
if (tokenType == MOT) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif
+ if (target != NULL) {
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</p>\n", target);
+ }
analyze_P();
} else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != SECTION && tokenType != SSECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- fflush(stderr);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
if (tokenType == MOTCLE) {
scanner();
#if PRINT_TOKEN
- wprint_token();
- #endif
+ wprint_token_stdout();
+ #endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
+ //FIXME: Check if the MOTCLE token value is set to >Titre
+ if (target != NULL) {
+ fputws(L"<title>\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</title>\n", target);
+ }
if (tokenType == MOTCLE) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
+ //The text contain the author
analyze_TEXT();
} else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);
exit(EXIT_FAILURE);
}
- } else if (tokenType != NPARA && tokenType != SECTION && tokenType != FIN) {
- fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
- fflush(stderr);
- exit(EXIT_FAILURE);
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
if (tokenType == SECTION) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif /* PRINT_TOKEN */
+ if (target != NULL) {
+ fputws(L"<h1>\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</h1>\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
if (tokenType == SSECTION) {
scanner();
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif /* PRINT_TOKEN */
+ if (target != NULL) {
+ fputws(L"<h2>\n", target);
+ fputws((const wchar_t*)token[tokenFound].value, target);
+ fputws(L" ", target);
+ }
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
analyze_TEXT();
+ if (target != NULL) {
+ fputws(L"\n</h2>\n", target);
+ }
}
#if DEBUG
fprintf(stdout, "leaving %s\n", __func__);
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
+ //FIXME: This test is probably useless
if (tokenType == SSECTION) {
analyze_H2();
analyze_P();
#if DEBUG
fprintf(stdout, "entering %s\n", __func__);
#endif
+ //FIXME: This test is probably useless
if (tokenType == SECTION) {
analyze_H1();
analyze_P();
scanner();
/* print the lexical analysis result */
#if PRINT_TOKEN
- wprint_token();
+ wprint_token_stdout();
#endif /* PRINT_TOKEN */
token[tokenFound].type = tokenTypestr[tokenType];
tokenFound++;
+ if (target != NULL) {
+ fputws(L"<head>\n", target);
+ }
analyze_HEAD();
+ if (target != NULL) {
+ fputws(L"\n</head>\n", target);
+ }
+ if (target != NULL) {
+ fputws(L"<body>\n", target);
+ }
analyze_BODY();
+ if (target != NULL) {
+ fputws(L"\n<body>\n", target);
+ }
if (tokenType != FIN) {
fprintf(stderr, "%s follows error on %s\n", __func__, tokenTypestr[tokenType]);
fflush(stderr);