Very new to lex. This is for project for Prog. Langs. class
Consider a language built over the following grammar:
<program> ::= <statement> | <program> <statement>
<statement> ::= <assignStmt> | <ifStmt> | <whileStmt> | <printStmt>
<assignStmt> ::= <id> = <expr> ;
<ifStmt> ::= if ( <expr> ) then <stmt>
<whileStmt> ::= while ( <expr> ) do <stmt>
<printStmt> ::= print <expr> ;
<expr> ::= <term> | <expr> <addOp> <term>
<term> ::= <factor> | <term> <multOp> <factor>
<factor> ::= <id> | <number> | - <factor> | ( <expr> )
<id> ::= <letter> | <id> <letter>
<letter> ::= a | b | c | d | e | f | g | h | i | j
| k | l | m | n | o | p | r | s | t
| u | v | w | x | y | z
<number> ::= <digit> | <number> <digit>
<digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
<addOp> ::= + | -
<multOp> ::= * | / | %
Implement a lex-based C program that scans for all the tokens of the language (keywords, identifiers, numbers, operators, and so on).
My problem is I get "l7t2.l:32: unrecognized rule" error. I believe it stems from the declaration of "word" above but not sure how to fix it.
Heres my lex file, l7t2.l
%option noyywrap
%{
#include "l7t2.h"
int totDol = 0;
int *outword;
%}
digit [0-9]
number {digit}*
letter [a-zA-Z]
word ({letter}{[a-zA-Z0-9]}+)
%%
"if" {return IF;}
"then" {return THEN;}
"while" {return WHILE;}
"do" {return DO;}
"+" {return PLUSOP;}
"-" {return MINUSOP;}
"*" {return MULTOP;}
"/" {return DIVOP;}
"%" {return MODOP;}
";" {return SEMICOLON;}
"=" {return EQUAL;}
"print" {return PRINT;}
[ \t\n]+ ;
{word} {strcpy(outword, yytext);}
\${number} {totDol = 0; totDol += strtod(yytext+1, NULL); return totDol;}
%%