1
|
(********************************************************************)
|
2
|
(* *)
|
3
|
(* The LustreC compiler toolset / The LustreC Development Team *)
|
4
|
(* Copyright 2012 - -- ONERA - CNRS - INPT *)
|
5
|
(* *)
|
6
|
(* LustreC is free software, distributed WITHOUT ANY WARRANTY *)
|
7
|
(* under the terms of the GNU Lesser General Public License *)
|
8
|
(* version 2.1. *)
|
9
|
(* *)
|
10
|
(********************************************************************)
|
11
|
|
12
|
{
|
13
|
|
14
|
(* open ParserLustreSpec *)
|
15
|
open Parser_lustre
|
16
|
open Utils
|
17
|
|
18
|
exception Error of Location.t
|
19
|
|
20
|
let str_buf = Buffer.create 1024
|
21
|
|
22
|
(* As advised by Caml documentation. This way a single lexer rule is
|
23
|
used to handle all the possible keywords. *)
|
24
|
let keyword_table =
|
25
|
create_hashtable 20 [
|
26
|
(* "true", TRUE; *)
|
27
|
(* "false", FALSE; *)
|
28
|
"stateless", STATELESS;
|
29
|
"if", IF;
|
30
|
"then", THEN;
|
31
|
"else", ELSE;
|
32
|
"merge", MERGE;
|
33
|
"arrow", ARROW;
|
34
|
"fby", FBY;
|
35
|
"when", WHEN;
|
36
|
"whennot", WHENNOT;
|
37
|
"every", EVERY;
|
38
|
"node", NODE;
|
39
|
"let", LET;
|
40
|
"tel", TEL;
|
41
|
"returns", RETURNS;
|
42
|
"var", VAR;
|
43
|
"imported", IMPORTED;
|
44
|
"wcet", WCET;
|
45
|
"int", TINT;
|
46
|
"bool", TBOOL;
|
47
|
(* "float", TFLOAT; *)
|
48
|
"real", TREAL;
|
49
|
"clock", TCLOCK;
|
50
|
"not", NOT;
|
51
|
"tail", TAIL;
|
52
|
"and", AND;
|
53
|
"or", OR;
|
54
|
"xor", OR;
|
55
|
"mod", MOD;
|
56
|
"pre", PRE;
|
57
|
"div", DIV;
|
58
|
"const", CONST;
|
59
|
(* "include", INCLUDE; *)
|
60
|
"assert", ASSERT;
|
61
|
"ensures", ENSURES;
|
62
|
"requires", REQUIRES;
|
63
|
"observer", OBSERVER;
|
64
|
"invariant", INVARIANT;
|
65
|
"behavior", BEHAVIOR;
|
66
|
"assumes", ASSUMES;
|
67
|
"exists", EXISTS;
|
68
|
"forall", FORALL;
|
69
|
"c_code", CCODE;
|
70
|
"matlab", MATLAB;
|
71
|
]
|
72
|
|
73
|
}
|
74
|
|
75
|
|
76
|
let newline = ('\010' | '\013' | "\013\010")
|
77
|
let notnewline = [^ '\010' '\013']
|
78
|
let blank = [' ' '\009' '\012']
|
79
|
|
80
|
rule token = parse
|
81
|
| "(*"
|
82
|
{ comment_line 0 lexbuf }
|
83
|
| "--" notnewline* (newline|eof)
|
84
|
{ incr_line lexbuf;
|
85
|
token lexbuf }
|
86
|
| newline
|
87
|
{ incr_line lexbuf;
|
88
|
token lexbuf }
|
89
|
| blank +
|
90
|
{token lexbuf}
|
91
|
| (('-'? ['0'-'9'] ['0'-'9']* as l) '.' (['0'-'9']* as r)) as s
|
92
|
{REAL (Num.num_of_string (l^r), String.length r, s)}
|
93
|
| (('-'? ['0'-'9']+ as l) '.' (['0'-'9']+ as r) ('E'|'e') (('+'|'-') ['0'-'9'] ['0'-'9']* as exp)) as s
|
94
|
{REAL (Num.num_of_string (l^r), String.length r + -1 * int_of_string exp, s)}
|
95
|
| '-'? ['0'-'9']+
|
96
|
{INT (int_of_string (Lexing.lexeme lexbuf)) }
|
97
|
(* | '/' (['_' 'A'-'Z' 'a'-'z'] ['A'-'Z' 'a'-'z' '_' '0'-'9']* '/')+ as s
|
98
|
{IDENT s}
|
99
|
*)
|
100
|
| ['_' 'A'-'Z' 'a'-'z'] ['A'-'Z' 'a'-'z' '_' '0'-'9']*
|
101
|
{let s = Lexing.lexeme lexbuf in
|
102
|
try
|
103
|
Hashtbl.find keyword_table s
|
104
|
with Not_found ->
|
105
|
IDENT s}
|
106
|
| "->" {ARROW}
|
107
|
| "=>" {IMPL}
|
108
|
| "<=" {LTE}
|
109
|
| ">=" {GTE}
|
110
|
| "<>" {NEQ}
|
111
|
| '<' {LT}
|
112
|
| '>' {GT}
|
113
|
| "!=" {NEQ}
|
114
|
| '-' {MINUS}
|
115
|
| '+' {PLUS}
|
116
|
| '/' {DIV}
|
117
|
| '*' {MULT}
|
118
|
| '=' {EQ}
|
119
|
| '(' {LPAR}
|
120
|
| ')' {RPAR}
|
121
|
| ';' {SCOL}
|
122
|
| ':' {COL}
|
123
|
| ',' {COMMA}
|
124
|
| '=' {EQ}
|
125
|
| '/' {DIV}
|
126
|
| "&&" {AMPERAMPER}
|
127
|
| "||" {BARBAR}
|
128
|
| "::" {COLCOL}
|
129
|
| "^" {POWER}
|
130
|
| '"' { Buffer.clear str_buf; string_parse lexbuf }
|
131
|
| eof { EOF }
|
132
|
| _ { raise (Parse.Error (Location.curr lexbuf, Unexpected_eof)) }
|
133
|
and comment_line n = parse
|
134
|
| eof
|
135
|
{ raise (Parse.Error (Location.curr lexbuf, Unfinished_comment)) }
|
136
|
| "(*"
|
137
|
{ comment_line (n+1) lexbuf }
|
138
|
| "*)"
|
139
|
{ if n > 0 then comment_line (n-1) lexbuf else token lexbuf }
|
140
|
| newline
|
141
|
{ incr_line lexbuf;
|
142
|
comment_line n lexbuf }
|
143
|
| _ { comment_line n lexbuf }
|
144
|
and string_parse = parse
|
145
|
| eof { raise (Parse.Error (Location.curr lexbuf, Unfinished_string)) }
|
146
|
| "\\\"" as s { Buffer.add_string str_buf s; string_parse lexbuf}
|
147
|
| '"' { STRING (Buffer.contents str_buf) }
|
148
|
| _ as c { Buffer.add_char str_buf c; string_parse lexbuf }
|
149
|
|
150
|
{
|
151
|
|
152
|
let annot s =
|
153
|
let lexbuf = Lexing.from_string s in
|
154
|
try
|
155
|
Parser_lustre.lustre_annot(* ParserLustreSpec.lustre_annot *) token lexbuf
|
156
|
with Parsing.Parse_error as _e -> (
|
157
|
Format.eprintf "Lexing error at position %a:@.unexpected token %s@.@?"
|
158
|
(fun fmt p -> Format.fprintf fmt "%s l%i c%i" p.Lexing.pos_fname p.Lexing.pos_lnum p.Lexing.pos_cnum) lexbuf.Lexing.lex_curr_p
|
159
|
(Lexing.lexeme lexbuf);
|
160
|
raise (Error (Location.curr lexbuf)))
|
161
|
|
162
|
|
163
|
let spec s =
|
164
|
let lexbuf = Lexing.from_string s in
|
165
|
try
|
166
|
Parser_lustre.lustre_spec (*ParserLustreSpec.lustre_spec*) token lexbuf
|
167
|
with Parsing.Parse_error ->
|
168
|
raise (Error (Location.curr lexbuf))
|
169
|
}
|