aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/libraries/sqlite/unix/sqlite-3.5.1/tool
diff options
context:
space:
mode:
authordan miller2007-10-20 02:49:29 +0000
committerdan miller2007-10-20 02:49:29 +0000
commite36d23a85ebff914d74bb541558c2b6082b78edb (patch)
tree54b58fdf162e78af64055282a6035c8d2443389d /libraries/sqlite/unix/sqlite-3.5.1/tool
parent* Fixed an issue whereby avatar chat distances were being calculated against ... (diff)
downloadopensim-SC_OLD-e36d23a85ebff914d74bb541558c2b6082b78edb.zip
opensim-SC_OLD-e36d23a85ebff914d74bb541558c2b6082b78edb.tar.gz
opensim-SC_OLD-e36d23a85ebff914d74bb541558c2b6082b78edb.tar.bz2
opensim-SC_OLD-e36d23a85ebff914d74bb541558c2b6082b78edb.tar.xz
sqlite source (unix build) added to libraries
Diffstat (limited to 'libraries/sqlite/unix/sqlite-3.5.1/tool')
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/diffdb.c44
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/fragck.tcl149
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/lemon.c4781
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/lempar.c778
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/memleak.awk29
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/memleak2.awk29
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/memleak3.tcl233
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/mkkeywordhash.c559
-rwxr-xr-xlibraries/sqlite/unix/sqlite-3.5.1/tool/mkopts.tcl51
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/mksqlite3c.tcl266
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/mksqlite3internalh.tcl145
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/omittest.tcl176
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/opcodeDoc.awk23
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/report1.txt66
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/showdb.c86
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/showjournal.c76
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/soak1.tcl103
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/space_used.tcl111
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/spaceanal.tcl859
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/speedtest.tcl275
-rw-r--r--libraries/sqlite/unix/sqlite-3.5.1/tool/speedtest2.tcl207
21 files changed, 9046 insertions, 0 deletions
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/diffdb.c b/libraries/sqlite/unix/sqlite-3.5.1/tool/diffdb.c
new file mode 100644
index 0000000..0537d38
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/diffdb.c
@@ -0,0 +1,44 @@
1/*
2** A utility for printing the differences between two SQLite database files.
3*/
4#include <stdio.h>
5#include <ctype.h>
6#include <sys/types.h>
7#include <sys/stat.h>
8#include <fcntl.h>
9#include <unistd.h>
10#include <stdlib.h>
11
12
13#define PAGESIZE 1024
14static int db1 = -1;
15static int db2 = -1;
16
17int main(int argc, char **argv){
18 int iPg;
19 unsigned char a1[PAGESIZE], a2[PAGESIZE];
20 if( argc!=3 ){
21 fprintf(stderr,"Usage: %s FILENAME FILENAME\n", argv[0]);
22 exit(1);
23 }
24 db1 = open(argv[1], O_RDONLY);
25 if( db1<0 ){
26 fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
27 exit(1);
28 }
29 db2 = open(argv[2], O_RDONLY);
30 if( db2<0 ){
31 fprintf(stderr,"%s: can't open %s\n", argv[0], argv[2]);
32 exit(1);
33 }
34 iPg = 1;
35 while( read(db1, a1, PAGESIZE)==PAGESIZE && read(db2,a2,PAGESIZE)==PAGESIZE ){
36 if( memcmp(a1,a2,PAGESIZE) ){
37 printf("Page %d\n", iPg);
38 }
39 iPg++;
40 }
41 printf("%d pages checked\n", iPg-1);
42 close(db1);
43 close(db2);
44}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/fragck.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/fragck.tcl
new file mode 100644
index 0000000..35e76f4
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/fragck.tcl
@@ -0,0 +1,149 @@
1# Run this TCL script using "testfixture" to get a report that shows
2# the sequence of database pages used by a particular table or index.
3# This information is used for fragmentation analysis.
4#
5
6# Get the name of the database to analyze
7#
8
9if {[llength $argv]!=2} {
10 puts stderr "Usage: $argv0 database-name table-or-index-name"
11 exit 1
12}
13set file_to_analyze [lindex $argv 0]
14if {![file exists $file_to_analyze]} {
15 puts stderr "No such file: $file_to_analyze"
16 exit 1
17}
18if {![file readable $file_to_analyze]} {
19 puts stderr "File is not readable: $file_to_analyze"
20 exit 1
21}
22if {[file size $file_to_analyze]<512} {
23 puts stderr "Empty or malformed database: $file_to_analyze"
24 exit 1
25}
26set objname [lindex $argv 1]
27
28# Open the database
29#
30sqlite3 db [lindex $argv 0]
31set DB [btree_open [lindex $argv 0] 1000 0]
32
33# This proc is a wrapper around the btree_cursor_info command. The
34# second argument is an open btree cursor returned by [btree_cursor].
35# The first argument is the name of an array variable that exists in
36# the scope of the caller. If the third argument is non-zero, then
37# info is returned for the page that lies $up entries upwards in the
38# tree-structure. (i.e. $up==1 returns the parent page, $up==2 the
39# grandparent etc.)
40#
41# The following entries in that array are filled in with information retrieved
42# using [btree_cursor_info]:
43#
44# $arrayvar(page_no) = The page number
45# $arrayvar(entry_no) = The entry number
46# $arrayvar(page_entries) = Total number of entries on this page
47# $arrayvar(cell_size) = Cell size (local payload + header)
48# $arrayvar(page_freebytes) = Number of free bytes on this page
49# $arrayvar(page_freeblocks) = Number of free blocks on the page
50# $arrayvar(payload_bytes) = Total payload size (local + overflow)
51# $arrayvar(header_bytes) = Header size in bytes
52# $arrayvar(local_payload_bytes) = Local payload size
53# $arrayvar(parent) = Parent page number
54#
55proc cursor_info {arrayvar csr {up 0}} {
56 upvar $arrayvar a
57 foreach [list a(page_no) \
58 a(entry_no) \
59 a(page_entries) \
60 a(cell_size) \
61 a(page_freebytes) \
62 a(page_freeblocks) \
63 a(payload_bytes) \
64 a(header_bytes) \
65 a(local_payload_bytes) \
66 a(parent) \
67 a(first_ovfl) ] [btree_cursor_info $csr $up] break
68}
69
70# Determine the page-size of the database. This global variable is used
71# throughout the script.
72#
73set pageSize [db eval {PRAGMA page_size}]
74
75# Find the root page of table or index to be analyzed. Also find out
76# if the object is a table or an index.
77#
78if {$objname=="sqlite_master"} {
79 set rootpage 1
80 set type table
81} else {
82 db eval {
83 SELECT rootpage, type FROM sqlite_master
84 WHERE name=$objname
85 } break
86 if {![info exists rootpage]} {
87 puts stderr "no such table or index: $objname"
88 exit 1
89 }
90 if {$type!="table" && $type!="index"} {
91 puts stderr "$objname is something other than a table or index"
92 exit 1
93 }
94 if {![string is integer -strict $rootpage]} {
95 puts stderr "invalid root page for $objname: $rootpage"
96 exit 1
97 }
98}
99
100# The cursor $csr is pointing to an entry. Print out information
101# about the page that $up levels above that page that contains
102# the entry. If $up==0 use the page that contains the entry.
103#
104# If information about the page has been printed already, then
105# this is a no-op.
106#
107proc page_info {csr up} {
108 global seen
109 cursor_info ci $csr $up
110 set pg $ci(page_no)
111 if {[info exists seen($pg)]} return
112 set seen($pg) 1
113
114 # Do parent pages first
115 #
116 if {$ci(parent)} {
117 page_info $csr [expr {$up+1}]
118 }
119
120 # Find the depth of this page
121 #
122 set depth 1
123 set i $up
124 while {$ci(parent)} {
125 incr i
126 incr depth
127 cursor_info ci $csr $i
128 }
129
130 # print the results
131 #
132 puts [format {LEVEL %d: %6d} $depth $pg]
133}
134
135
136
137
138# Loop through the object and print out page numbers
139#
140set csr [btree_cursor $DB $rootpage 0]
141for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} {
142 page_info $csr 0
143 set i 1
144 foreach pg [btree_ovfl_info $DB $csr] {
145 puts [format {OVFL %3d: %6d} $i $pg]
146 incr i
147 }
148}
149exit 0
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/lemon.c b/libraries/sqlite/unix/sqlite-3.5.1/tool/lemon.c
new file mode 100644
index 0000000..0a5ce88
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/lemon.c
@@ -0,0 +1,4781 @@
1/*
2** This file contains all sources (including headers) to the LEMON
3** LALR(1) parser generator. The sources have been combined into a
4** single file to make it easy to include LEMON in the source tree
5** and Makefile of another program.
6**
7** The author of this program disclaims copyright.
8*/
9#include <stdio.h>
10#include <stdarg.h>
11#include <string.h>
12#include <ctype.h>
13#include <stdlib.h>
14#include <assert.h>
15
16#ifndef __WIN32__
17# if defined(_WIN32) || defined(WIN32)
18# define __WIN32__
19# endif
20#endif
21
22#ifdef __WIN32__
23extern int access();
24#else
25#include <unistd.h>
26#endif
27
28/* #define PRIVATE static */
29#define PRIVATE
30
31#ifdef TEST
32#define MAXRHS 5 /* Set low to exercise exception code */
33#else
34#define MAXRHS 1000
35#endif
36
37static char *msort(char*,char**,int(*)(const char*,const char*));
38
39static struct action *Action_new(void);
40static struct action *Action_sort(struct action *);
41
42/********** From the file "build.h" ************************************/
43void FindRulePrecedences();
44void FindFirstSets();
45void FindStates();
46void FindLinks();
47void FindFollowSets();
48void FindActions();
49
50/********* From the file "configlist.h" *********************************/
51void Configlist_init(/* void */);
52struct config *Configlist_add(/* struct rule *, int */);
53struct config *Configlist_addbasis(/* struct rule *, int */);
54void Configlist_closure(/* void */);
55void Configlist_sort(/* void */);
56void Configlist_sortbasis(/* void */);
57struct config *Configlist_return(/* void */);
58struct config *Configlist_basis(/* void */);
59void Configlist_eat(/* struct config * */);
60void Configlist_reset(/* void */);
61
62/********* From the file "error.h" ***************************************/
63void ErrorMsg(const char *, int,const char *, ...);
64
65/****** From the file "option.h" ******************************************/
66struct s_options {
67 enum { OPT_FLAG=1, OPT_INT, OPT_DBL, OPT_STR,
68 OPT_FFLAG, OPT_FINT, OPT_FDBL, OPT_FSTR} type;
69 char *label;
70 char *arg;
71 char *message;
72};
73int OptInit(/* char**,struct s_options*,FILE* */);
74int OptNArgs(/* void */);
75char *OptArg(/* int */);
76void OptErr(/* int */);
77void OptPrint(/* void */);
78
79/******** From the file "parse.h" *****************************************/
80void Parse(/* struct lemon *lemp */);
81
82/********* From the file "plink.h" ***************************************/
83struct plink *Plink_new(/* void */);
84void Plink_add(/* struct plink **, struct config * */);
85void Plink_copy(/* struct plink **, struct plink * */);
86void Plink_delete(/* struct plink * */);
87
88/********** From the file "report.h" *************************************/
89void Reprint(/* struct lemon * */);
90void ReportOutput(/* struct lemon * */);
91void ReportTable(/* struct lemon * */);
92void ReportHeader(/* struct lemon * */);
93void CompressTables(/* struct lemon * */);
94void ResortStates(/* struct lemon * */);
95
96/********** From the file "set.h" ****************************************/
97void SetSize(/* int N */); /* All sets will be of size N */
98char *SetNew(/* void */); /* A new set for element 0..N */
99void SetFree(/* char* */); /* Deallocate a set */
100
101int SetAdd(/* char*,int */); /* Add element to a set */
102int SetUnion(/* char *A,char *B */); /* A <- A U B, thru element N */
103
104#define SetFind(X,Y) (X[Y]) /* True if Y is in set X */
105
106/********** From the file "struct.h" *************************************/
107/*
108** Principal data structures for the LEMON parser generator.
109*/
110
111typedef enum {LEMON_FALSE=0, LEMON_TRUE} Boolean;
112
113/* Symbols (terminals and nonterminals) of the grammar are stored
114** in the following: */
115struct symbol {
116 char *name; /* Name of the symbol */
117 int index; /* Index number for this symbol */
118 enum {
119 TERMINAL,
120 NONTERMINAL,
121 MULTITERMINAL
122 } type; /* Symbols are all either TERMINALS or NTs */
123 struct rule *rule; /* Linked list of rules of this (if an NT) */
124 struct symbol *fallback; /* fallback token in case this token doesn't parse */
125 int prec; /* Precedence if defined (-1 otherwise) */
126 enum e_assoc {
127 LEFT,
128 RIGHT,
129 NONE,
130 UNK
131 } assoc; /* Associativity if predecence is defined */
132 char *firstset; /* First-set for all rules of this symbol */
133 Boolean lambda; /* True if NT and can generate an empty string */
134 char *destructor; /* Code which executes whenever this symbol is
135 ** popped from the stack during error processing */
136 int destructorln; /* Line number of destructor code */
137 char *datatype; /* The data type of information held by this
138 ** object. Only used if type==NONTERMINAL */
139 int dtnum; /* The data type number. In the parser, the value
140 ** stack is a union. The .yy%d element of this
141 ** union is the correct data type for this object */
142 /* The following fields are used by MULTITERMINALs only */
143 int nsubsym; /* Number of constituent symbols in the MULTI */
144 struct symbol **subsym; /* Array of constituent symbols */
145};
146
147/* Each production rule in the grammar is stored in the following
148** structure. */
149struct rule {
150 struct symbol *lhs; /* Left-hand side of the rule */
151 char *lhsalias; /* Alias for the LHS (NULL if none) */
152 int ruleline; /* Line number for the rule */
153 int nrhs; /* Number of RHS symbols */
154 struct symbol **rhs; /* The RHS symbols */
155 char **rhsalias; /* An alias for each RHS symbol (NULL if none) */
156 int line; /* Line number at which code begins */
157 char *code; /* The code executed when this rule is reduced */
158 struct symbol *precsym; /* Precedence symbol for this rule */
159 int index; /* An index number for this rule */
160 Boolean canReduce; /* True if this rule is ever reduced */
161 struct rule *nextlhs; /* Next rule with the same LHS */
162 struct rule *next; /* Next rule in the global list */
163};
164
165/* A configuration is a production rule of the grammar together with
166** a mark (dot) showing how much of that rule has been processed so far.
167** Configurations also contain a follow-set which is a list of terminal
168** symbols which are allowed to immediately follow the end of the rule.
169** Every configuration is recorded as an instance of the following: */
170struct config {
171 struct rule *rp; /* The rule upon which the configuration is based */
172 int dot; /* The parse point */
173 char *fws; /* Follow-set for this configuration only */
174 struct plink *fplp; /* Follow-set forward propagation links */
175 struct plink *bplp; /* Follow-set backwards propagation links */
176 struct state *stp; /* Pointer to state which contains this */
177 enum {
178 COMPLETE, /* The status is used during followset and */
179 INCOMPLETE /* shift computations */
180 } status;
181 struct config *next; /* Next configuration in the state */
182 struct config *bp; /* The next basis configuration */
183};
184
185/* Every shift or reduce operation is stored as one of the following */
186struct action {
187 struct symbol *sp; /* The look-ahead symbol */
188 enum e_action {
189 SHIFT,
190 ACCEPT,
191 REDUCE,
192 ERROR,
193 CONFLICT, /* Was a reduce, but part of a conflict */
194 SH_RESOLVED, /* Was a shift. Precedence resolved conflict */
195 RD_RESOLVED, /* Was reduce. Precedence resolved conflict */
196 NOT_USED /* Deleted by compression */
197 } type;
198 union {
199 struct state *stp; /* The new state, if a shift */
200 struct rule *rp; /* The rule, if a reduce */
201 } x;
202 struct action *next; /* Next action for this state */
203 struct action *collide; /* Next action with the same hash */
204};
205
206/* Each state of the generated parser's finite state machine
207** is encoded as an instance of the following structure. */
208struct state {
209 struct config *bp; /* The basis configurations for this state */
210 struct config *cfp; /* All configurations in this set */
211 int statenum; /* Sequencial number for this state */
212 struct action *ap; /* Array of actions for this state */
213 int nTknAct, nNtAct; /* Number of actions on terminals and nonterminals */
214 int iTknOfst, iNtOfst; /* yy_action[] offset for terminals and nonterms */
215 int iDflt; /* Default action */
216};
217#define NO_OFFSET (-2147483647)
218
219/* A followset propagation link indicates that the contents of one
220** configuration followset should be propagated to another whenever
221** the first changes. */
222struct plink {
223 struct config *cfp; /* The configuration to which linked */
224 struct plink *next; /* The next propagate link */
225};
226
227/* The state vector for the entire parser generator is recorded as
228** follows. (LEMON uses no global variables and makes little use of
229** static variables. Fields in the following structure can be thought
230** of as begin global variables in the program.) */
231struct lemon {
232 struct state **sorted; /* Table of states sorted by state number */
233 struct rule *rule; /* List of all rules */
234 int nstate; /* Number of states */
235 int nrule; /* Number of rules */
236 int nsymbol; /* Number of terminal and nonterminal symbols */
237 int nterminal; /* Number of terminal symbols */
238 struct symbol **symbols; /* Sorted array of pointers to symbols */
239 int errorcnt; /* Number of errors */
240 struct symbol *errsym; /* The error symbol */
241 struct symbol *wildcard; /* Token that matches anything */
242 char *name; /* Name of the generated parser */
243 char *arg; /* Declaration of the 3th argument to parser */
244 char *tokentype; /* Type of terminal symbols in the parser stack */
245 char *vartype; /* The default type of non-terminal symbols */
246 char *start; /* Name of the start symbol for the grammar */
247 char *stacksize; /* Size of the parser stack */
248 char *include; /* Code to put at the start of the C file */
249 int includeln; /* Line number for start of include code */
250 char *error; /* Code to execute when an error is seen */
251 int errorln; /* Line number for start of error code */
252 char *overflow; /* Code to execute on a stack overflow */
253 int overflowln; /* Line number for start of overflow code */
254 char *failure; /* Code to execute on parser failure */
255 int failureln; /* Line number for start of failure code */
256 char *accept; /* Code to execute when the parser excepts */
257 int acceptln; /* Line number for the start of accept code */
258 char *extracode; /* Code appended to the generated file */
259 int extracodeln; /* Line number for the start of the extra code */
260 char *tokendest; /* Code to execute to destroy token data */
261 int tokendestln; /* Line number for token destroyer code */
262 char *vardest; /* Code for the default non-terminal destructor */
263 int vardestln; /* Line number for default non-term destructor code*/
264 char *filename; /* Name of the input file */
265 char *outname; /* Name of the current output file */
266 char *tokenprefix; /* A prefix added to token names in the .h file */
267 int nconflict; /* Number of parsing conflicts */
268 int tablesize; /* Size of the parse tables */
269 int basisflag; /* Print only basis configurations */
270 int has_fallback; /* True if any %fallback is seen in the grammer */
271 char *argv0; /* Name of the program */
272};
273
274#define MemoryCheck(X) if((X)==0){ \
275 extern void memory_error(); \
276 memory_error(); \
277}
278
279/**************** From the file "table.h" *********************************/
280/*
281** All code in this file has been automatically generated
282** from a specification in the file
283** "table.q"
284** by the associative array code building program "aagen".
285** Do not edit this file! Instead, edit the specification
286** file, then rerun aagen.
287*/
288/*
289** Code for processing tables in the LEMON parser generator.
290*/
291
292/* Routines for handling a strings */
293
294char *Strsafe();
295
296void Strsafe_init(/* void */);
297int Strsafe_insert(/* char * */);
298char *Strsafe_find(/* char * */);
299
300/* Routines for handling symbols of the grammar */
301
302struct symbol *Symbol_new();
303int Symbolcmpp(/* struct symbol **, struct symbol ** */);
304void Symbol_init(/* void */);
305int Symbol_insert(/* struct symbol *, char * */);
306struct symbol *Symbol_find(/* char * */);
307struct symbol *Symbol_Nth(/* int */);
308int Symbol_count(/* */);
309struct symbol **Symbol_arrayof(/* */);
310
311/* Routines to manage the state table */
312
313int Configcmp(/* struct config *, struct config * */);
314struct state *State_new();
315void State_init(/* void */);
316int State_insert(/* struct state *, struct config * */);
317struct state *State_find(/* struct config * */);
318struct state **State_arrayof(/* */);
319
320/* Routines used for efficiency in Configlist_add */
321
322void Configtable_init(/* void */);
323int Configtable_insert(/* struct config * */);
324struct config *Configtable_find(/* struct config * */);
325void Configtable_clear(/* int(*)(struct config *) */);
326/****************** From the file "action.c" *******************************/
327/*
328** Routines processing parser actions in the LEMON parser generator.
329*/
330
331/* Allocate a new parser action */
332static struct action *Action_new(void){
333 static struct action *freelist = 0;
334 struct action *new;
335
336 if( freelist==0 ){
337 int i;
338 int amt = 100;
339 freelist = (struct action *)malloc( sizeof(struct action)*amt );
340 if( freelist==0 ){
341 fprintf(stderr,"Unable to allocate memory for a new parser action.");
342 exit(1);
343 }
344 for(i=0; i<amt-1; i++) freelist[i].next = &freelist[i+1];
345 freelist[amt-1].next = 0;
346 }
347 new = freelist;
348 freelist = freelist->next;
349 return new;
350}
351
352/* Compare two actions for sorting purposes. Return negative, zero, or
353** positive if the first action is less than, equal to, or greater than
354** the first
355*/
356static int actioncmp(
357 struct action *ap1,
358 struct action *ap2
359){
360 int rc;
361 rc = ap1->sp->index - ap2->sp->index;
362 if( rc==0 ) rc = (int)ap1->type - (int)ap2->type;
363 if( rc==0 ){
364 rc = ap1->x.rp->index - ap2->x.rp->index;
365 }
366 return rc;
367}
368
369/* Sort parser actions */
370static struct action *Action_sort(
371 struct action *ap
372){
373 ap = (struct action *)msort((char *)ap,(char **)&ap->next,
374 (int(*)(const char*,const char*))actioncmp);
375 return ap;
376}
377
378void Action_add(app,type,sp,arg)
379struct action **app;
380enum e_action type;
381struct symbol *sp;
382char *arg;
383{
384 struct action *new;
385 new = Action_new();
386 new->next = *app;
387 *app = new;
388 new->type = type;
389 new->sp = sp;
390 if( type==SHIFT ){
391 new->x.stp = (struct state *)arg;
392 }else{
393 new->x.rp = (struct rule *)arg;
394 }
395}
396/********************** New code to implement the "acttab" module ***********/
397/*
398** This module implements routines use to construct the yy_action[] table.
399*/
400
401/*
402** The state of the yy_action table under construction is an instance of
403** the following structure
404*/
405typedef struct acttab acttab;
406struct acttab {
407 int nAction; /* Number of used slots in aAction[] */
408 int nActionAlloc; /* Slots allocated for aAction[] */
409 struct {
410 int lookahead; /* Value of the lookahead token */
411 int action; /* Action to take on the given lookahead */
412 } *aAction, /* The yy_action[] table under construction */
413 *aLookahead; /* A single new transaction set */
414 int mnLookahead; /* Minimum aLookahead[].lookahead */
415 int mnAction; /* Action associated with mnLookahead */
416 int mxLookahead; /* Maximum aLookahead[].lookahead */
417 int nLookahead; /* Used slots in aLookahead[] */
418 int nLookaheadAlloc; /* Slots allocated in aLookahead[] */
419};
420
421/* Return the number of entries in the yy_action table */
422#define acttab_size(X) ((X)->nAction)
423
424/* The value for the N-th entry in yy_action */
425#define acttab_yyaction(X,N) ((X)->aAction[N].action)
426
427/* The value for the N-th entry in yy_lookahead */
428#define acttab_yylookahead(X,N) ((X)->aAction[N].lookahead)
429
430/* Free all memory associated with the given acttab */
431void acttab_free(acttab *p){
432 free( p->aAction );
433 free( p->aLookahead );
434 free( p );
435}
436
437/* Allocate a new acttab structure */
438acttab *acttab_alloc(void){
439 acttab *p = malloc( sizeof(*p) );
440 if( p==0 ){
441 fprintf(stderr,"Unable to allocate memory for a new acttab.");
442 exit(1);
443 }
444 memset(p, 0, sizeof(*p));
445 return p;
446}
447
448/* Add a new action to the current transaction set
449*/
450void acttab_action(acttab *p, int lookahead, int action){
451 if( p->nLookahead>=p->nLookaheadAlloc ){
452 p->nLookaheadAlloc += 25;
453 p->aLookahead = realloc( p->aLookahead,
454 sizeof(p->aLookahead[0])*p->nLookaheadAlloc );
455 if( p->aLookahead==0 ){
456 fprintf(stderr,"malloc failed\n");
457 exit(1);
458 }
459 }
460 if( p->nLookahead==0 ){
461 p->mxLookahead = lookahead;
462 p->mnLookahead = lookahead;
463 p->mnAction = action;
464 }else{
465 if( p->mxLookahead<lookahead ) p->mxLookahead = lookahead;
466 if( p->mnLookahead>lookahead ){
467 p->mnLookahead = lookahead;
468 p->mnAction = action;
469 }
470 }
471 p->aLookahead[p->nLookahead].lookahead = lookahead;
472 p->aLookahead[p->nLookahead].action = action;
473 p->nLookahead++;
474}
475
476/*
477** Add the transaction set built up with prior calls to acttab_action()
478** into the current action table. Then reset the transaction set back
479** to an empty set in preparation for a new round of acttab_action() calls.
480**
481** Return the offset into the action table of the new transaction.
482*/
483int acttab_insert(acttab *p){
484 int i, j, k, n;
485 assert( p->nLookahead>0 );
486
487 /* Make sure we have enough space to hold the expanded action table
488 ** in the worst case. The worst case occurs if the transaction set
489 ** must be appended to the current action table
490 */
491 n = p->mxLookahead + 1;
492 if( p->nAction + n >= p->nActionAlloc ){
493 int oldAlloc = p->nActionAlloc;
494 p->nActionAlloc = p->nAction + n + p->nActionAlloc + 20;
495 p->aAction = realloc( p->aAction,
496 sizeof(p->aAction[0])*p->nActionAlloc);
497 if( p->aAction==0 ){
498 fprintf(stderr,"malloc failed\n");
499 exit(1);
500 }
501 for(i=oldAlloc; i<p->nActionAlloc; i++){
502 p->aAction[i].lookahead = -1;
503 p->aAction[i].action = -1;
504 }
505 }
506
507 /* Scan the existing action table looking for an offset where we can
508 ** insert the current transaction set. Fall out of the loop when that
509 ** offset is found. In the worst case, we fall out of the loop when
510 ** i reaches p->nAction, which means we append the new transaction set.
511 **
512 ** i is the index in p->aAction[] where p->mnLookahead is inserted.
513 */
514 for(i=0; i<p->nAction+p->mnLookahead; i++){
515 if( p->aAction[i].lookahead<0 ){
516 for(j=0; j<p->nLookahead; j++){
517 k = p->aLookahead[j].lookahead - p->mnLookahead + i;
518 if( k<0 ) break;
519 if( p->aAction[k].lookahead>=0 ) break;
520 }
521 if( j<p->nLookahead ) continue;
522 for(j=0; j<p->nAction; j++){
523 if( p->aAction[j].lookahead==j+p->mnLookahead-i ) break;
524 }
525 if( j==p->nAction ){
526 break; /* Fits in empty slots */
527 }
528 }else if( p->aAction[i].lookahead==p->mnLookahead ){
529 if( p->aAction[i].action!=p->mnAction ) continue;
530 for(j=0; j<p->nLookahead; j++){
531 k = p->aLookahead[j].lookahead - p->mnLookahead + i;
532 if( k<0 || k>=p->nAction ) break;
533 if( p->aLookahead[j].lookahead!=p->aAction[k].lookahead ) break;
534 if( p->aLookahead[j].action!=p->aAction[k].action ) break;
535 }
536 if( j<p->nLookahead ) continue;
537 n = 0;
538 for(j=0; j<p->nAction; j++){
539 if( p->aAction[j].lookahead<0 ) continue;
540 if( p->aAction[j].lookahead==j+p->mnLookahead-i ) n++;
541 }
542 if( n==p->nLookahead ){
543 break; /* Same as a prior transaction set */
544 }
545 }
546 }
547 /* Insert transaction set at index i. */
548 for(j=0; j<p->nLookahead; j++){
549 k = p->aLookahead[j].lookahead - p->mnLookahead + i;
550 p->aAction[k] = p->aLookahead[j];
551 if( k>=p->nAction ) p->nAction = k+1;
552 }
553 p->nLookahead = 0;
554
555 /* Return the offset that is added to the lookahead in order to get the
556 ** index into yy_action of the action */
557 return i - p->mnLookahead;
558}
559
560/********************** From the file "build.c" *****************************/
561/*
562** Routines to construction the finite state machine for the LEMON
563** parser generator.
564*/
565
566/* Find a precedence symbol of every rule in the grammar.
567**
568** Those rules which have a precedence symbol coded in the input
569** grammar using the "[symbol]" construct will already have the
570** rp->precsym field filled. Other rules take as their precedence
571** symbol the first RHS symbol with a defined precedence. If there
572** are not RHS symbols with a defined precedence, the precedence
573** symbol field is left blank.
574*/
575void FindRulePrecedences(xp)
576struct lemon *xp;
577{
578 struct rule *rp;
579 for(rp=xp->rule; rp; rp=rp->next){
580 if( rp->precsym==0 ){
581 int i, j;
582 for(i=0; i<rp->nrhs && rp->precsym==0; i++){
583 struct symbol *sp = rp->rhs[i];
584 if( sp->type==MULTITERMINAL ){
585 for(j=0; j<sp->nsubsym; j++){
586 if( sp->subsym[j]->prec>=0 ){
587 rp->precsym = sp->subsym[j];
588 break;
589 }
590 }
591 }else if( sp->prec>=0 ){
592 rp->precsym = rp->rhs[i];
593 }
594 }
595 }
596 }
597 return;
598}
599
600/* Find all nonterminals which will generate the empty string.
601** Then go back and compute the first sets of every nonterminal.
602** The first set is the set of all terminal symbols which can begin
603** a string generated by that nonterminal.
604*/
605void FindFirstSets(lemp)
606struct lemon *lemp;
607{
608 int i, j;
609 struct rule *rp;
610 int progress;
611
612 for(i=0; i<lemp->nsymbol; i++){
613 lemp->symbols[i]->lambda = LEMON_FALSE;
614 }
615 for(i=lemp->nterminal; i<lemp->nsymbol; i++){
616 lemp->symbols[i]->firstset = SetNew();
617 }
618
619 /* First compute all lambdas */
620 do{
621 progress = 0;
622 for(rp=lemp->rule; rp; rp=rp->next){
623 if( rp->lhs->lambda ) continue;
624 for(i=0; i<rp->nrhs; i++){
625 struct symbol *sp = rp->rhs[i];
626 if( sp->type!=TERMINAL || sp->lambda==LEMON_FALSE ) break;
627 }
628 if( i==rp->nrhs ){
629 rp->lhs->lambda = LEMON_TRUE;
630 progress = 1;
631 }
632 }
633 }while( progress );
634
635 /* Now compute all first sets */
636 do{
637 struct symbol *s1, *s2;
638 progress = 0;
639 for(rp=lemp->rule; rp; rp=rp->next){
640 s1 = rp->lhs;
641 for(i=0; i<rp->nrhs; i++){
642 s2 = rp->rhs[i];
643 if( s2->type==TERMINAL ){
644 progress += SetAdd(s1->firstset,s2->index);
645 break;
646 }else if( s2->type==MULTITERMINAL ){
647 for(j=0; j<s2->nsubsym; j++){
648 progress += SetAdd(s1->firstset,s2->subsym[j]->index);
649 }
650 break;
651 }else if( s1==s2 ){
652 if( s1->lambda==LEMON_FALSE ) break;
653 }else{
654 progress += SetUnion(s1->firstset,s2->firstset);
655 if( s2->lambda==LEMON_FALSE ) break;
656 }
657 }
658 }
659 }while( progress );
660 return;
661}
662
663/* Compute all LR(0) states for the grammar. Links
664** are added to between some states so that the LR(1) follow sets
665** can be computed later.
666*/
667PRIVATE struct state *getstate(/* struct lemon * */); /* forward reference */
668void FindStates(lemp)
669struct lemon *lemp;
670{
671 struct symbol *sp;
672 struct rule *rp;
673
674 Configlist_init();
675
676 /* Find the start symbol */
677 if( lemp->start ){
678 sp = Symbol_find(lemp->start);
679 if( sp==0 ){
680 ErrorMsg(lemp->filename,0,
681"The specified start symbol \"%s\" is not \
682in a nonterminal of the grammar. \"%s\" will be used as the start \
683symbol instead.",lemp->start,lemp->rule->lhs->name);
684 lemp->errorcnt++;
685 sp = lemp->rule->lhs;
686 }
687 }else{
688 sp = lemp->rule->lhs;
689 }
690
691 /* Make sure the start symbol doesn't occur on the right-hand side of
692 ** any rule. Report an error if it does. (YACC would generate a new
693 ** start symbol in this case.) */
694 for(rp=lemp->rule; rp; rp=rp->next){
695 int i;
696 for(i=0; i<rp->nrhs; i++){
697 if( rp->rhs[i]==sp ){ /* FIX ME: Deal with multiterminals */
698 ErrorMsg(lemp->filename,0,
699"The start symbol \"%s\" occurs on the \
700right-hand side of a rule. This will result in a parser which \
701does not work properly.",sp->name);
702 lemp->errorcnt++;
703 }
704 }
705 }
706
707 /* The basis configuration set for the first state
708 ** is all rules which have the start symbol as their
709 ** left-hand side */
710 for(rp=sp->rule; rp; rp=rp->nextlhs){
711 struct config *newcfp;
712 newcfp = Configlist_addbasis(rp,0);
713 SetAdd(newcfp->fws,0);
714 }
715
716 /* Compute the first state. All other states will be
717 ** computed automatically during the computation of the first one.
718 ** The returned pointer to the first state is not used. */
719 (void)getstate(lemp);
720 return;
721}
722
723/* Return a pointer to a state which is described by the configuration
724** list which has been built from calls to Configlist_add.
725*/
726PRIVATE void buildshifts(/* struct lemon *, struct state * */); /* Forwd ref */
727PRIVATE struct state *getstate(lemp)
728struct lemon *lemp;
729{
730 struct config *cfp, *bp;
731 struct state *stp;
732
733 /* Extract the sorted basis of the new state. The basis was constructed
734 ** by prior calls to "Configlist_addbasis()". */
735 Configlist_sortbasis();
736 bp = Configlist_basis();
737
738 /* Get a state with the same basis */
739 stp = State_find(bp);
740 if( stp ){
741 /* A state with the same basis already exists! Copy all the follow-set
742 ** propagation links from the state under construction into the
743 ** preexisting state, then return a pointer to the preexisting state */
744 struct config *x, *y;
745 for(x=bp, y=stp->bp; x && y; x=x->bp, y=y->bp){
746 Plink_copy(&y->bplp,x->bplp);
747 Plink_delete(x->fplp);
748 x->fplp = x->bplp = 0;
749 }
750 cfp = Configlist_return();
751 Configlist_eat(cfp);
752 }else{
753 /* This really is a new state. Construct all the details */
754 Configlist_closure(lemp); /* Compute the configuration closure */
755 Configlist_sort(); /* Sort the configuration closure */
756 cfp = Configlist_return(); /* Get a pointer to the config list */
757 stp = State_new(); /* A new state structure */
758 MemoryCheck(stp);
759 stp->bp = bp; /* Remember the configuration basis */
760 stp->cfp = cfp; /* Remember the configuration closure */
761 stp->statenum = lemp->nstate++; /* Every state gets a sequence number */
762 stp->ap = 0; /* No actions, yet. */
763 State_insert(stp,stp->bp); /* Add to the state table */
764 buildshifts(lemp,stp); /* Recursively compute successor states */
765 }
766 return stp;
767}
768
769/*
770** Return true if two symbols are the same.
771*/
772int same_symbol(a,b)
773struct symbol *a;
774struct symbol *b;
775{
776 int i;
777 if( a==b ) return 1;
778 if( a->type!=MULTITERMINAL ) return 0;
779 if( b->type!=MULTITERMINAL ) return 0;
780 if( a->nsubsym!=b->nsubsym ) return 0;
781 for(i=0; i<a->nsubsym; i++){
782 if( a->subsym[i]!=b->subsym[i] ) return 0;
783 }
784 return 1;
785}
786
787/* Construct all successor states to the given state. A "successor"
788** state is any state which can be reached by a shift action.
789*/
790PRIVATE void buildshifts(lemp,stp)
791struct lemon *lemp;
792struct state *stp; /* The state from which successors are computed */
793{
794 struct config *cfp; /* For looping thru the config closure of "stp" */
795 struct config *bcfp; /* For the inner loop on config closure of "stp" */
796 struct config *new; /* */
797 struct symbol *sp; /* Symbol following the dot in configuration "cfp" */
798 struct symbol *bsp; /* Symbol following the dot in configuration "bcfp" */
799 struct state *newstp; /* A pointer to a successor state */
800
801 /* Each configuration becomes complete after it contibutes to a successor
802 ** state. Initially, all configurations are incomplete */
803 for(cfp=stp->cfp; cfp; cfp=cfp->next) cfp->status = INCOMPLETE;
804
805 /* Loop through all configurations of the state "stp" */
806 for(cfp=stp->cfp; cfp; cfp=cfp->next){
807 if( cfp->status==COMPLETE ) continue; /* Already used by inner loop */
808 if( cfp->dot>=cfp->rp->nrhs ) continue; /* Can't shift this config */
809 Configlist_reset(); /* Reset the new config set */
810 sp = cfp->rp->rhs[cfp->dot]; /* Symbol after the dot */
811
812 /* For every configuration in the state "stp" which has the symbol "sp"
813 ** following its dot, add the same configuration to the basis set under
814 ** construction but with the dot shifted one symbol to the right. */
815 for(bcfp=cfp; bcfp; bcfp=bcfp->next){
816 if( bcfp->status==COMPLETE ) continue; /* Already used */
817 if( bcfp->dot>=bcfp->rp->nrhs ) continue; /* Can't shift this one */
818 bsp = bcfp->rp->rhs[bcfp->dot]; /* Get symbol after dot */
819 if( !same_symbol(bsp,sp) ) continue; /* Must be same as for "cfp" */
820 bcfp->status = COMPLETE; /* Mark this config as used */
821 new = Configlist_addbasis(bcfp->rp,bcfp->dot+1);
822 Plink_add(&new->bplp,bcfp);
823 }
824
825 /* Get a pointer to the state described by the basis configuration set
826 ** constructed in the preceding loop */
827 newstp = getstate(lemp);
828
829 /* The state "newstp" is reached from the state "stp" by a shift action
830 ** on the symbol "sp" */
831 if( sp->type==MULTITERMINAL ){
832 int i;
833 for(i=0; i<sp->nsubsym; i++){
834 Action_add(&stp->ap,SHIFT,sp->subsym[i],(char*)newstp);
835 }
836 }else{
837 Action_add(&stp->ap,SHIFT,sp,(char *)newstp);
838 }
839 }
840}
841
842/*
843** Construct the propagation links
844*/
845void FindLinks(lemp)
846struct lemon *lemp;
847{
848 int i;
849 struct config *cfp, *other;
850 struct state *stp;
851 struct plink *plp;
852
853 /* Housekeeping detail:
854 ** Add to every propagate link a pointer back to the state to
855 ** which the link is attached. */
856 for(i=0; i<lemp->nstate; i++){
857 stp = lemp->sorted[i];
858 for(cfp=stp->cfp; cfp; cfp=cfp->next){
859 cfp->stp = stp;
860 }
861 }
862
863 /* Convert all backlinks into forward links. Only the forward
864 ** links are used in the follow-set computation. */
865 for(i=0; i<lemp->nstate; i++){
866 stp = lemp->sorted[i];
867 for(cfp=stp->cfp; cfp; cfp=cfp->next){
868 for(plp=cfp->bplp; plp; plp=plp->next){
869 other = plp->cfp;
870 Plink_add(&other->fplp,cfp);
871 }
872 }
873 }
874}
875
876/* Compute all followsets.
877**
878** A followset is the set of all symbols which can come immediately
879** after a configuration.
880*/
881void FindFollowSets(lemp)
882struct lemon *lemp;
883{
884 int i;
885 struct config *cfp;
886 struct plink *plp;
887 int progress;
888 int change;
889
890 for(i=0; i<lemp->nstate; i++){
891 for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
892 cfp->status = INCOMPLETE;
893 }
894 }
895
896 do{
897 progress = 0;
898 for(i=0; i<lemp->nstate; i++){
899 for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
900 if( cfp->status==COMPLETE ) continue;
901 for(plp=cfp->fplp; plp; plp=plp->next){
902 change = SetUnion(plp->cfp->fws,cfp->fws);
903 if( change ){
904 plp->cfp->status = INCOMPLETE;
905 progress = 1;
906 }
907 }
908 cfp->status = COMPLETE;
909 }
910 }
911 }while( progress );
912}
913
914static int resolve_conflict();
915
916/* Compute the reduce actions, and resolve conflicts.
917*/
918void FindActions(lemp)
919struct lemon *lemp;
920{
921 int i,j;
922 struct config *cfp;
923 struct state *stp;
924 struct symbol *sp;
925 struct rule *rp;
926
927 /* Add all of the reduce actions
928 ** A reduce action is added for each element of the followset of
929 ** a configuration which has its dot at the extreme right.
930 */
931 for(i=0; i<lemp->nstate; i++){ /* Loop over all states */
932 stp = lemp->sorted[i];
933 for(cfp=stp->cfp; cfp; cfp=cfp->next){ /* Loop over all configurations */
934 if( cfp->rp->nrhs==cfp->dot ){ /* Is dot at extreme right? */
935 for(j=0; j<lemp->nterminal; j++){
936 if( SetFind(cfp->fws,j) ){
937 /* Add a reduce action to the state "stp" which will reduce by the
938 ** rule "cfp->rp" if the lookahead symbol is "lemp->symbols[j]" */
939 Action_add(&stp->ap,REDUCE,lemp->symbols[j],(char *)cfp->rp);
940 }
941 }
942 }
943 }
944 }
945
946 /* Add the accepting token */
947 if( lemp->start ){
948 sp = Symbol_find(lemp->start);
949 if( sp==0 ) sp = lemp->rule->lhs;
950 }else{
951 sp = lemp->rule->lhs;
952 }
953 /* Add to the first state (which is always the starting state of the
954 ** finite state machine) an action to ACCEPT if the lookahead is the
955 ** start nonterminal. */
956 Action_add(&lemp->sorted[0]->ap,ACCEPT,sp,0);
957
958 /* Resolve conflicts */
959 for(i=0; i<lemp->nstate; i++){
960 struct action *ap, *nap;
961 struct state *stp;
962 stp = lemp->sorted[i];
963 /* assert( stp->ap ); */
964 stp->ap = Action_sort(stp->ap);
965 for(ap=stp->ap; ap && ap->next; ap=ap->next){
966 for(nap=ap->next; nap && nap->sp==ap->sp; nap=nap->next){
967 /* The two actions "ap" and "nap" have the same lookahead.
968 ** Figure out which one should be used */
969 lemp->nconflict += resolve_conflict(ap,nap,lemp->errsym);
970 }
971 }
972 }
973
974 /* Report an error for each rule that can never be reduced. */
975 for(rp=lemp->rule; rp; rp=rp->next) rp->canReduce = LEMON_FALSE;
976 for(i=0; i<lemp->nstate; i++){
977 struct action *ap;
978 for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){
979 if( ap->type==REDUCE ) ap->x.rp->canReduce = LEMON_TRUE;
980 }
981 }
982 for(rp=lemp->rule; rp; rp=rp->next){
983 if( rp->canReduce ) continue;
984 ErrorMsg(lemp->filename,rp->ruleline,"This rule can not be reduced.\n");
985 lemp->errorcnt++;
986 }
987}
988
989/* Resolve a conflict between the two given actions. If the
990** conflict can't be resolve, return non-zero.
991**
992** NO LONGER TRUE:
993** To resolve a conflict, first look to see if either action
994** is on an error rule. In that case, take the action which
995** is not associated with the error rule. If neither or both
996** actions are associated with an error rule, then try to
997** use precedence to resolve the conflict.
998**
999** If either action is a SHIFT, then it must be apx. This
1000** function won't work if apx->type==REDUCE and apy->type==SHIFT.
1001*/
1002static int resolve_conflict(apx,apy,errsym)
1003struct action *apx;
1004struct action *apy;
1005struct symbol *errsym; /* The error symbol (if defined. NULL otherwise) */
1006{
1007 struct symbol *spx, *spy;
1008 int errcnt = 0;
1009 assert( apx->sp==apy->sp ); /* Otherwise there would be no conflict */
1010 if( apx->type==SHIFT && apy->type==SHIFT ){
1011 apy->type = CONFLICT;
1012 errcnt++;
1013 }
1014 if( apx->type==SHIFT && apy->type==REDUCE ){
1015 spx = apx->sp;
1016 spy = apy->x.rp->precsym;
1017 if( spy==0 || spx->prec<0 || spy->prec<0 ){
1018 /* Not enough precedence information. */
1019 apy->type = CONFLICT;
1020 errcnt++;
1021 }else if( spx->prec>spy->prec ){ /* Lower precedence wins */
1022 apy->type = RD_RESOLVED;
1023 }else if( spx->prec<spy->prec ){
1024 apx->type = SH_RESOLVED;
1025 }else if( spx->prec==spy->prec && spx->assoc==RIGHT ){ /* Use operator */
1026 apy->type = RD_RESOLVED; /* associativity */
1027 }else if( spx->prec==spy->prec && spx->assoc==LEFT ){ /* to break tie */
1028 apx->type = SH_RESOLVED;
1029 }else{
1030 assert( spx->prec==spy->prec && spx->assoc==NONE );
1031 apy->type = CONFLICT;
1032 errcnt++;
1033 }
1034 }else if( apx->type==REDUCE && apy->type==REDUCE ){
1035 spx = apx->x.rp->precsym;
1036 spy = apy->x.rp->precsym;
1037 if( spx==0 || spy==0 || spx->prec<0 ||
1038 spy->prec<0 || spx->prec==spy->prec ){
1039 apy->type = CONFLICT;
1040 errcnt++;
1041 }else if( spx->prec>spy->prec ){
1042 apy->type = RD_RESOLVED;
1043 }else if( spx->prec<spy->prec ){
1044 apx->type = RD_RESOLVED;
1045 }
1046 }else{
1047 assert(
1048 apx->type==SH_RESOLVED ||
1049 apx->type==RD_RESOLVED ||
1050 apx->type==CONFLICT ||
1051 apy->type==SH_RESOLVED ||
1052 apy->type==RD_RESOLVED ||
1053 apy->type==CONFLICT
1054 );
1055 /* The REDUCE/SHIFT case cannot happen because SHIFTs come before
1056 ** REDUCEs on the list. If we reach this point it must be because
1057 ** the parser conflict had already been resolved. */
1058 }
1059 return errcnt;
1060}
1061/********************* From the file "configlist.c" *************************/
1062/*
1063** Routines to processing a configuration list and building a state
1064** in the LEMON parser generator.
1065*/
1066
1067static struct config *freelist = 0; /* List of free configurations */
1068static struct config *current = 0; /* Top of list of configurations */
1069static struct config **currentend = 0; /* Last on list of configs */
1070static struct config *basis = 0; /* Top of list of basis configs */
1071static struct config **basisend = 0; /* End of list of basis configs */
1072
1073/* Return a pointer to a new configuration */
1074PRIVATE struct config *newconfig(){
1075 struct config *new;
1076 if( freelist==0 ){
1077 int i;
1078 int amt = 3;
1079 freelist = (struct config *)malloc( sizeof(struct config)*amt );
1080 if( freelist==0 ){
1081 fprintf(stderr,"Unable to allocate memory for a new configuration.");
1082 exit(1);
1083 }
1084 for(i=0; i<amt-1; i++) freelist[i].next = &freelist[i+1];
1085 freelist[amt-1].next = 0;
1086 }
1087 new = freelist;
1088 freelist = freelist->next;
1089 return new;
1090}
1091
1092/* The configuration "old" is no longer used */
1093PRIVATE void deleteconfig(old)
1094struct config *old;
1095{
1096 old->next = freelist;
1097 freelist = old;
1098}
1099
1100/* Initialized the configuration list builder */
1101void Configlist_init(){
1102 current = 0;
1103 currentend = &current;
1104 basis = 0;
1105 basisend = &basis;
1106 Configtable_init();
1107 return;
1108}
1109
1110/* Initialized the configuration list builder */
1111void Configlist_reset(){
1112 current = 0;
1113 currentend = &current;
1114 basis = 0;
1115 basisend = &basis;
1116 Configtable_clear(0);
1117 return;
1118}
1119
1120/* Add another configuration to the configuration list */
1121struct config *Configlist_add(rp,dot)
1122struct rule *rp; /* The rule */
1123int dot; /* Index into the RHS of the rule where the dot goes */
1124{
1125 struct config *cfp, model;
1126
1127 assert( currentend!=0 );
1128 model.rp = rp;
1129 model.dot = dot;
1130 cfp = Configtable_find(&model);
1131 if( cfp==0 ){
1132 cfp = newconfig();
1133 cfp->rp = rp;
1134 cfp->dot = dot;
1135 cfp->fws = SetNew();
1136 cfp->stp = 0;
1137 cfp->fplp = cfp->bplp = 0;
1138 cfp->next = 0;
1139 cfp->bp = 0;
1140 *currentend = cfp;
1141 currentend = &cfp->next;
1142 Configtable_insert(cfp);
1143 }
1144 return cfp;
1145}
1146
1147/* Add a basis configuration to the configuration list */
1148struct config *Configlist_addbasis(rp,dot)
1149struct rule *rp;
1150int dot;
1151{
1152 struct config *cfp, model;
1153
1154 assert( basisend!=0 );
1155 assert( currentend!=0 );
1156 model.rp = rp;
1157 model.dot = dot;
1158 cfp = Configtable_find(&model);
1159 if( cfp==0 ){
1160 cfp = newconfig();
1161 cfp->rp = rp;
1162 cfp->dot = dot;
1163 cfp->fws = SetNew();
1164 cfp->stp = 0;
1165 cfp->fplp = cfp->bplp = 0;
1166 cfp->next = 0;
1167 cfp->bp = 0;
1168 *currentend = cfp;
1169 currentend = &cfp->next;
1170 *basisend = cfp;
1171 basisend = &cfp->bp;
1172 Configtable_insert(cfp);
1173 }
1174 return cfp;
1175}
1176
1177/* Compute the closure of the configuration list */
1178void Configlist_closure(lemp)
1179struct lemon *lemp;
1180{
1181 struct config *cfp, *newcfp;
1182 struct rule *rp, *newrp;
1183 struct symbol *sp, *xsp;
1184 int i, dot;
1185
1186 assert( currentend!=0 );
1187 for(cfp=current; cfp; cfp=cfp->next){
1188 rp = cfp->rp;
1189 dot = cfp->dot;
1190 if( dot>=rp->nrhs ) continue;
1191 sp = rp->rhs[dot];
1192 if( sp->type==NONTERMINAL ){
1193 if( sp->rule==0 && sp!=lemp->errsym ){
1194 ErrorMsg(lemp->filename,rp->line,"Nonterminal \"%s\" has no rules.",
1195 sp->name);
1196 lemp->errorcnt++;
1197 }
1198 for(newrp=sp->rule; newrp; newrp=newrp->nextlhs){
1199 newcfp = Configlist_add(newrp,0);
1200 for(i=dot+1; i<rp->nrhs; i++){
1201 xsp = rp->rhs[i];
1202 if( xsp->type==TERMINAL ){
1203 SetAdd(newcfp->fws,xsp->index);
1204 break;
1205 }else if( xsp->type==MULTITERMINAL ){
1206 int k;
1207 for(k=0; k<xsp->nsubsym; k++){
1208 SetAdd(newcfp->fws, xsp->subsym[k]->index);
1209 }
1210 break;
1211 }else{
1212 SetUnion(newcfp->fws,xsp->firstset);
1213 if( xsp->lambda==LEMON_FALSE ) break;
1214 }
1215 }
1216 if( i==rp->nrhs ) Plink_add(&cfp->fplp,newcfp);
1217 }
1218 }
1219 }
1220 return;
1221}
1222
1223/* Sort the configuration list */
1224void Configlist_sort(){
1225 current = (struct config *)msort((char *)current,(char **)&(current->next),Configcmp);
1226 currentend = 0;
1227 return;
1228}
1229
1230/* Sort the basis configuration list */
1231void Configlist_sortbasis(){
1232 basis = (struct config *)msort((char *)current,(char **)&(current->bp),Configcmp);
1233 basisend = 0;
1234 return;
1235}
1236
1237/* Return a pointer to the head of the configuration list and
1238** reset the list */
1239struct config *Configlist_return(){
1240 struct config *old;
1241 old = current;
1242 current = 0;
1243 currentend = 0;
1244 return old;
1245}
1246
1247/* Return a pointer to the head of the configuration list and
1248** reset the list */
1249struct config *Configlist_basis(){
1250 struct config *old;
1251 old = basis;
1252 basis = 0;
1253 basisend = 0;
1254 return old;
1255}
1256
1257/* Free all elements of the given configuration list */
1258void Configlist_eat(cfp)
1259struct config *cfp;
1260{
1261 struct config *nextcfp;
1262 for(; cfp; cfp=nextcfp){
1263 nextcfp = cfp->next;
1264 assert( cfp->fplp==0 );
1265 assert( cfp->bplp==0 );
1266 if( cfp->fws ) SetFree(cfp->fws);
1267 deleteconfig(cfp);
1268 }
1269 return;
1270}
1271/***************** From the file "error.c" *********************************/
1272/*
1273** Code for printing error message.
1274*/
1275
1276/* Find a good place to break "msg" so that its length is at least "min"
1277** but no more than "max". Make the point as close to max as possible.
1278*/
1279static int findbreak(msg,min,max)
1280char *msg;
1281int min;
1282int max;
1283{
1284 int i,spot;
1285 char c;
1286 for(i=spot=min; i<=max; i++){
1287 c = msg[i];
1288 if( c=='\t' ) msg[i] = ' ';
1289 if( c=='\n' ){ msg[i] = ' '; spot = i; break; }
1290 if( c==0 ){ spot = i; break; }
1291 if( c=='-' && i<max-1 ) spot = i+1;
1292 if( c==' ' ) spot = i;
1293 }
1294 return spot;
1295}
1296
1297/*
1298** The error message is split across multiple lines if necessary. The
1299** splits occur at a space, if there is a space available near the end
1300** of the line.
1301*/
1302#define ERRMSGSIZE 10000 /* Hope this is big enough. No way to error check */
1303#define LINEWIDTH 79 /* Max width of any output line */
1304#define PREFIXLIMIT 30 /* Max width of the prefix on each line */
1305void ErrorMsg(const char *filename, int lineno, const char *format, ...){
1306 char errmsg[ERRMSGSIZE];
1307 char prefix[PREFIXLIMIT+10];
1308 int errmsgsize;
1309 int prefixsize;
1310 int availablewidth;
1311 va_list ap;
1312 int end, restart, base;
1313
1314 va_start(ap, format);
1315 /* Prepare a prefix to be prepended to every output line */
1316 if( lineno>0 ){
1317 sprintf(prefix,"%.*s:%d: ",PREFIXLIMIT-10,filename,lineno);
1318 }else{
1319 sprintf(prefix,"%.*s: ",PREFIXLIMIT-10,filename);
1320 }
1321 prefixsize = strlen(prefix);
1322 availablewidth = LINEWIDTH - prefixsize;
1323
1324 /* Generate the error message */
1325 vsprintf(errmsg,format,ap);
1326 va_end(ap);
1327 errmsgsize = strlen(errmsg);
1328 /* Remove trailing '\n's from the error message. */
1329 while( errmsgsize>0 && errmsg[errmsgsize-1]=='\n' ){
1330 errmsg[--errmsgsize] = 0;
1331 }
1332
1333 /* Print the error message */
1334 base = 0;
1335 while( errmsg[base]!=0 ){
1336 end = restart = findbreak(&errmsg[base],0,availablewidth);
1337 restart += base;
1338 while( errmsg[restart]==' ' ) restart++;
1339 fprintf(stdout,"%s%.*s\n",prefix,end,&errmsg[base]);
1340 base = restart;
1341 }
1342}
1343/**************** From the file "main.c" ************************************/
1344/*
1345** Main program file for the LEMON parser generator.
1346*/
1347
1348/* Report an out-of-memory condition and abort. This function
1349** is used mostly by the "MemoryCheck" macro in struct.h
1350*/
1351void memory_error(){
1352 fprintf(stderr,"Out of memory. Aborting...\n");
1353 exit(1);
1354}
1355
1356static int nDefine = 0; /* Number of -D options on the command line */
1357static char **azDefine = 0; /* Name of the -D macros */
1358
1359/* This routine is called with the argument to each -D command-line option.
1360** Add the macro defined to the azDefine array.
1361*/
1362static void handle_D_option(char *z){
1363 char **paz;
1364 nDefine++;
1365 azDefine = realloc(azDefine, sizeof(azDefine[0])*nDefine);
1366 if( azDefine==0 ){
1367 fprintf(stderr,"out of memory\n");
1368 exit(1);
1369 }
1370 paz = &azDefine[nDefine-1];
1371 *paz = malloc( strlen(z)+1 );
1372 if( *paz==0 ){
1373 fprintf(stderr,"out of memory\n");
1374 exit(1);
1375 }
1376 strcpy(*paz, z);
1377 for(z=*paz; *z && *z!='='; z++){}
1378 *z = 0;
1379}
1380
1381
1382/* The main program. Parse the command line and do it... */
1383int main(argc,argv)
1384int argc;
1385char **argv;
1386{
1387 static int version = 0;
1388 static int rpflag = 0;
1389 static int basisflag = 0;
1390 static int compress = 0;
1391 static int quiet = 0;
1392 static int statistics = 0;
1393 static int mhflag = 0;
1394 static struct s_options options[] = {
1395 {OPT_FLAG, "b", (char*)&basisflag, "Print only the basis in report."},
1396 {OPT_FLAG, "c", (char*)&compress, "Don't compress the action table."},
1397 {OPT_FSTR, "D", (char*)handle_D_option, "Define an %ifdef macro."},
1398 {OPT_FLAG, "g", (char*)&rpflag, "Print grammar without actions."},
1399 {OPT_FLAG, "m", (char*)&mhflag, "Output a makeheaders compatible file"},
1400 {OPT_FLAG, "q", (char*)&quiet, "(Quiet) Don't print the report file."},
1401 {OPT_FLAG, "s", (char*)&statistics,
1402 "Print parser stats to standard output."},
1403 {OPT_FLAG, "x", (char*)&version, "Print the version number."},
1404 {OPT_FLAG,0,0,0}
1405 };
1406 int i;
1407 struct lemon lem;
1408
1409 OptInit(argv,options,stderr);
1410 if( version ){
1411 printf("Lemon version 1.0\n");
1412 exit(0);
1413 }
1414 if( OptNArgs()!=1 ){
1415 fprintf(stderr,"Exactly one filename argument is required.\n");
1416 exit(1);
1417 }
1418 memset(&lem, 0, sizeof(lem));
1419 lem.errorcnt = 0;
1420
1421 /* Initialize the machine */
1422 Strsafe_init();
1423 Symbol_init();
1424 State_init();
1425 lem.argv0 = argv[0];
1426 lem.filename = OptArg(0);
1427 lem.basisflag = basisflag;
1428 Symbol_new("$");
1429 lem.errsym = Symbol_new("error");
1430
1431 /* Parse the input file */
1432 Parse(&lem);
1433 if( lem.errorcnt ) exit(lem.errorcnt);
1434 if( lem.nrule==0 ){
1435 fprintf(stderr,"Empty grammar.\n");
1436 exit(1);
1437 }
1438
1439 /* Count and index the symbols of the grammar */
1440 lem.nsymbol = Symbol_count();
1441 Symbol_new("{default}");
1442 lem.symbols = Symbol_arrayof();
1443 for(i=0; i<=lem.nsymbol; i++) lem.symbols[i]->index = i;
1444 qsort(lem.symbols,lem.nsymbol+1,sizeof(struct symbol*),
1445 (int(*)())Symbolcmpp);
1446 for(i=0; i<=lem.nsymbol; i++) lem.symbols[i]->index = i;
1447 for(i=1; isupper(lem.symbols[i]->name[0]); i++);
1448 lem.nterminal = i;
1449
1450 /* Generate a reprint of the grammar, if requested on the command line */
1451 if( rpflag ){
1452 Reprint(&lem);
1453 }else{
1454 /* Initialize the size for all follow and first sets */
1455 SetSize(lem.nterminal);
1456
1457 /* Find the precedence for every production rule (that has one) */
1458 FindRulePrecedences(&lem);
1459
1460 /* Compute the lambda-nonterminals and the first-sets for every
1461 ** nonterminal */
1462 FindFirstSets(&lem);
1463
1464 /* Compute all LR(0) states. Also record follow-set propagation
1465 ** links so that the follow-set can be computed later */
1466 lem.nstate = 0;
1467 FindStates(&lem);
1468 lem.sorted = State_arrayof();
1469
1470 /* Tie up loose ends on the propagation links */
1471 FindLinks(&lem);
1472
1473 /* Compute the follow set of every reducible configuration */
1474 FindFollowSets(&lem);
1475
1476 /* Compute the action tables */
1477 FindActions(&lem);
1478
1479 /* Compress the action tables */
1480 if( compress==0 ) CompressTables(&lem);
1481
1482 /* Reorder and renumber the states so that states with fewer choices
1483 ** occur at the end. */
1484 ResortStates(&lem);
1485
1486 /* Generate a report of the parser generated. (the "y.output" file) */
1487 if( !quiet ) ReportOutput(&lem);
1488
1489 /* Generate the source code for the parser */
1490 ReportTable(&lem, mhflag);
1491
1492 /* Produce a header file for use by the scanner. (This step is
1493 ** omitted if the "-m" option is used because makeheaders will
1494 ** generate the file for us.) */
1495 if( !mhflag ) ReportHeader(&lem);
1496 }
1497 if( statistics ){
1498 printf("Parser statistics: %d terminals, %d nonterminals, %d rules\n",
1499 lem.nterminal, lem.nsymbol - lem.nterminal, lem.nrule);
1500 printf(" %d states, %d parser table entries, %d conflicts\n",
1501 lem.nstate, lem.tablesize, lem.nconflict);
1502 }
1503 if( lem.nconflict ){
1504 fprintf(stderr,"%d parsing conflicts.\n",lem.nconflict);
1505 }
1506 exit(lem.errorcnt + lem.nconflict);
1507 return (lem.errorcnt + lem.nconflict);
1508}
1509/******************** From the file "msort.c" *******************************/
1510/*
1511** A generic merge-sort program.
1512**
1513** USAGE:
1514** Let "ptr" be a pointer to some structure which is at the head of
1515** a null-terminated list. Then to sort the list call:
1516**
1517** ptr = msort(ptr,&(ptr->next),cmpfnc);
1518**
1519** In the above, "cmpfnc" is a pointer to a function which compares
1520** two instances of the structure and returns an integer, as in
1521** strcmp. The second argument is a pointer to the pointer to the
1522** second element of the linked list. This address is used to compute
1523** the offset to the "next" field within the structure. The offset to
1524** the "next" field must be constant for all structures in the list.
1525**
1526** The function returns a new pointer which is the head of the list
1527** after sorting.
1528**
1529** ALGORITHM:
1530** Merge-sort.
1531*/
1532
1533/*
1534** Return a pointer to the next structure in the linked list.
1535*/
1536#define NEXT(A) (*(char**)(((unsigned long)A)+offset))
1537
1538/*
1539** Inputs:
1540** a: A sorted, null-terminated linked list. (May be null).
1541** b: A sorted, null-terminated linked list. (May be null).
1542** cmp: A pointer to the comparison function.
1543** offset: Offset in the structure to the "next" field.
1544**
1545** Return Value:
1546** A pointer to the head of a sorted list containing the elements
1547** of both a and b.
1548**
1549** Side effects:
1550** The "next" pointers for elements in the lists a and b are
1551** changed.
1552*/
1553static char *merge(
1554 char *a,
1555 char *b,
1556 int (*cmp)(const char*,const char*),
1557 int offset
1558){
1559 char *ptr, *head;
1560
1561 if( a==0 ){
1562 head = b;
1563 }else if( b==0 ){
1564 head = a;
1565 }else{
1566 if( (*cmp)(a,b)<0 ){
1567 ptr = a;
1568 a = NEXT(a);
1569 }else{
1570 ptr = b;
1571 b = NEXT(b);
1572 }
1573 head = ptr;
1574 while( a && b ){
1575 if( (*cmp)(a,b)<0 ){
1576 NEXT(ptr) = a;
1577 ptr = a;
1578 a = NEXT(a);
1579 }else{
1580 NEXT(ptr) = b;
1581 ptr = b;
1582 b = NEXT(b);
1583 }
1584 }
1585 if( a ) NEXT(ptr) = a;
1586 else NEXT(ptr) = b;
1587 }
1588 return head;
1589}
1590
1591/*
1592** Inputs:
1593** list: Pointer to a singly-linked list of structures.
1594** next: Pointer to pointer to the second element of the list.
1595** cmp: A comparison function.
1596**
1597** Return Value:
1598** A pointer to the head of a sorted list containing the elements
1599** orginally in list.
1600**
1601** Side effects:
1602** The "next" pointers for elements in list are changed.
1603*/
1604#define LISTSIZE 30
1605static char *msort(
1606 char *list,
1607 char **next,
1608 int (*cmp)(const char*,const char*)
1609){
1610 unsigned long offset;
1611 char *ep;
1612 char *set[LISTSIZE];
1613 int i;
1614 offset = (unsigned long)next - (unsigned long)list;
1615 for(i=0; i<LISTSIZE; i++) set[i] = 0;
1616 while( list ){
1617 ep = list;
1618 list = NEXT(list);
1619 NEXT(ep) = 0;
1620 for(i=0; i<LISTSIZE-1 && set[i]!=0; i++){
1621 ep = merge(ep,set[i],cmp,offset);
1622 set[i] = 0;
1623 }
1624 set[i] = ep;
1625 }
1626 ep = 0;
1627 for(i=0; i<LISTSIZE; i++) if( set[i] ) ep = merge(ep,set[i],cmp,offset);
1628 return ep;
1629}
1630/************************ From the file "option.c" **************************/
1631static char **argv;
1632static struct s_options *op;
1633static FILE *errstream;
1634
1635#define ISOPT(X) ((X)[0]=='-'||(X)[0]=='+'||strchr((X),'=')!=0)
1636
1637/*
1638** Print the command line with a carrot pointing to the k-th character
1639** of the n-th field.
1640*/
1641static void errline(n,k,err)
1642int n;
1643int k;
1644FILE *err;
1645{
1646 int spcnt, i;
1647 if( argv[0] ) fprintf(err,"%s",argv[0]);
1648 spcnt = strlen(argv[0]) + 1;
1649 for(i=1; i<n && argv[i]; i++){
1650 fprintf(err," %s",argv[i]);
1651 spcnt += strlen(argv[i])+1;
1652 }
1653 spcnt += k;
1654 for(; argv[i]; i++) fprintf(err," %s",argv[i]);
1655 if( spcnt<20 ){
1656 fprintf(err,"\n%*s^-- here\n",spcnt,"");
1657 }else{
1658 fprintf(err,"\n%*shere --^\n",spcnt-7,"");
1659 }
1660}
1661
1662/*
1663** Return the index of the N-th non-switch argument. Return -1
1664** if N is out of range.
1665*/
1666static int argindex(n)
1667int n;
1668{
1669 int i;
1670 int dashdash = 0;
1671 if( argv!=0 && *argv!=0 ){
1672 for(i=1; argv[i]; i++){
1673 if( dashdash || !ISOPT(argv[i]) ){
1674 if( n==0 ) return i;
1675 n--;
1676 }
1677 if( strcmp(argv[i],"--")==0 ) dashdash = 1;
1678 }
1679 }
1680 return -1;
1681}
1682
1683static char emsg[] = "Command line syntax error: ";
1684
1685/*
1686** Process a flag command line argument.
1687*/
1688static int handleflags(i,err)
1689int i;
1690FILE *err;
1691{
1692 int v;
1693 int errcnt = 0;
1694 int j;
1695 for(j=0; op[j].label; j++){
1696 if( strncmp(&argv[i][1],op[j].label,strlen(op[j].label))==0 ) break;
1697 }
1698 v = argv[i][0]=='-' ? 1 : 0;
1699 if( op[j].label==0 ){
1700 if( err ){
1701 fprintf(err,"%sundefined option.\n",emsg);
1702 errline(i,1,err);
1703 }
1704 errcnt++;
1705 }else if( op[j].type==OPT_FLAG ){
1706 *((int*)op[j].arg) = v;
1707 }else if( op[j].type==OPT_FFLAG ){
1708 (*(void(*)())(op[j].arg))(v);
1709 }else if( op[j].type==OPT_FSTR ){
1710 (*(void(*)())(op[j].arg))(&argv[i][2]);
1711 }else{
1712 if( err ){
1713 fprintf(err,"%smissing argument on switch.\n",emsg);
1714 errline(i,1,err);
1715 }
1716 errcnt++;
1717 }
1718 return errcnt;
1719}
1720
1721/*
1722** Process a command line switch which has an argument.
1723*/
1724static int handleswitch(i,err)
1725int i;
1726FILE *err;
1727{
1728 int lv = 0;
1729 double dv = 0.0;
1730 char *sv = 0, *end;
1731 char *cp;
1732 int j;
1733 int errcnt = 0;
1734 cp = strchr(argv[i],'=');
1735 assert( cp!=0 );
1736 *cp = 0;
1737 for(j=0; op[j].label; j++){
1738 if( strcmp(argv[i],op[j].label)==0 ) break;
1739 }
1740 *cp = '=';
1741 if( op[j].label==0 ){
1742 if( err ){
1743 fprintf(err,"%sundefined option.\n",emsg);
1744 errline(i,0,err);
1745 }
1746 errcnt++;
1747 }else{
1748 cp++;
1749 switch( op[j].type ){
1750 case OPT_FLAG:
1751 case OPT_FFLAG:
1752 if( err ){
1753 fprintf(err,"%soption requires an argument.\n",emsg);
1754 errline(i,0,err);
1755 }
1756 errcnt++;
1757 break;
1758 case OPT_DBL:
1759 case OPT_FDBL:
1760 dv = strtod(cp,&end);
1761 if( *end ){
1762 if( err ){
1763 fprintf(err,"%sillegal character in floating-point argument.\n",emsg);
1764 errline(i,((unsigned long)end)-(unsigned long)argv[i],err);
1765 }
1766 errcnt++;
1767 }
1768 break;
1769 case OPT_INT:
1770 case OPT_FINT:
1771 lv = strtol(cp,&end,0);
1772 if( *end ){
1773 if( err ){
1774 fprintf(err,"%sillegal character in integer argument.\n",emsg);
1775 errline(i,((unsigned long)end)-(unsigned long)argv[i],err);
1776 }
1777 errcnt++;
1778 }
1779 break;
1780 case OPT_STR:
1781 case OPT_FSTR:
1782 sv = cp;
1783 break;
1784 }
1785 switch( op[j].type ){
1786 case OPT_FLAG:
1787 case OPT_FFLAG:
1788 break;
1789 case OPT_DBL:
1790 *(double*)(op[j].arg) = dv;
1791 break;
1792 case OPT_FDBL:
1793 (*(void(*)())(op[j].arg))(dv);
1794 break;
1795 case OPT_INT:
1796 *(int*)(op[j].arg) = lv;
1797 break;
1798 case OPT_FINT:
1799 (*(void(*)())(op[j].arg))((int)lv);
1800 break;
1801 case OPT_STR:
1802 *(char**)(op[j].arg) = sv;
1803 break;
1804 case OPT_FSTR:
1805 (*(void(*)())(op[j].arg))(sv);
1806 break;
1807 }
1808 }
1809 return errcnt;
1810}
1811
1812int OptInit(a,o,err)
1813char **a;
1814struct s_options *o;
1815FILE *err;
1816{
1817 int errcnt = 0;
1818 argv = a;
1819 op = o;
1820 errstream = err;
1821 if( argv && *argv && op ){
1822 int i;
1823 for(i=1; argv[i]; i++){
1824 if( argv[i][0]=='+' || argv[i][0]=='-' ){
1825 errcnt += handleflags(i,err);
1826 }else if( strchr(argv[i],'=') ){
1827 errcnt += handleswitch(i,err);
1828 }
1829 }
1830 }
1831 if( errcnt>0 ){
1832 fprintf(err,"Valid command line options for \"%s\" are:\n",*a);
1833 OptPrint();
1834 exit(1);
1835 }
1836 return 0;
1837}
1838
1839int OptNArgs(){
1840 int cnt = 0;
1841 int dashdash = 0;
1842 int i;
1843 if( argv!=0 && argv[0]!=0 ){
1844 for(i=1; argv[i]; i++){
1845 if( dashdash || !ISOPT(argv[i]) ) cnt++;
1846 if( strcmp(argv[i],"--")==0 ) dashdash = 1;
1847 }
1848 }
1849 return cnt;
1850}
1851
1852char *OptArg(n)
1853int n;
1854{
1855 int i;
1856 i = argindex(n);
1857 return i>=0 ? argv[i] : 0;
1858}
1859
1860void OptErr(n)
1861int n;
1862{
1863 int i;
1864 i = argindex(n);
1865 if( i>=0 ) errline(i,0,errstream);
1866}
1867
1868void OptPrint(){
1869 int i;
1870 int max, len;
1871 max = 0;
1872 for(i=0; op[i].label; i++){
1873 len = strlen(op[i].label) + 1;
1874 switch( op[i].type ){
1875 case OPT_FLAG:
1876 case OPT_FFLAG:
1877 break;
1878 case OPT_INT:
1879 case OPT_FINT:
1880 len += 9; /* length of "<integer>" */
1881 break;
1882 case OPT_DBL:
1883 case OPT_FDBL:
1884 len += 6; /* length of "<real>" */
1885 break;
1886 case OPT_STR:
1887 case OPT_FSTR:
1888 len += 8; /* length of "<string>" */
1889 break;
1890 }
1891 if( len>max ) max = len;
1892 }
1893 for(i=0; op[i].label; i++){
1894 switch( op[i].type ){
1895 case OPT_FLAG:
1896 case OPT_FFLAG:
1897 fprintf(errstream," -%-*s %s\n",max,op[i].label,op[i].message);
1898 break;
1899 case OPT_INT:
1900 case OPT_FINT:
1901 fprintf(errstream," %s=<integer>%*s %s\n",op[i].label,
1902 (int)(max-strlen(op[i].label)-9),"",op[i].message);
1903 break;
1904 case OPT_DBL:
1905 case OPT_FDBL:
1906 fprintf(errstream," %s=<real>%*s %s\n",op[i].label,
1907 (int)(max-strlen(op[i].label)-6),"",op[i].message);
1908 break;
1909 case OPT_STR:
1910 case OPT_FSTR:
1911 fprintf(errstream," %s=<string>%*s %s\n",op[i].label,
1912 (int)(max-strlen(op[i].label)-8),"",op[i].message);
1913 break;
1914 }
1915 }
1916}
1917/*********************** From the file "parse.c" ****************************/
1918/*
1919** Input file parser for the LEMON parser generator.
1920*/
1921
1922/* The state of the parser */
1923struct pstate {
1924 char *filename; /* Name of the input file */
1925 int tokenlineno; /* Linenumber at which current token starts */
1926 int errorcnt; /* Number of errors so far */
1927 char *tokenstart; /* Text of current token */
1928 struct lemon *gp; /* Global state vector */
1929 enum e_state {
1930 INITIALIZE,
1931 WAITING_FOR_DECL_OR_RULE,
1932 WAITING_FOR_DECL_KEYWORD,
1933 WAITING_FOR_DECL_ARG,
1934 WAITING_FOR_PRECEDENCE_SYMBOL,
1935 WAITING_FOR_ARROW,
1936 IN_RHS,
1937 LHS_ALIAS_1,
1938 LHS_ALIAS_2,
1939 LHS_ALIAS_3,
1940 RHS_ALIAS_1,
1941 RHS_ALIAS_2,
1942 PRECEDENCE_MARK_1,
1943 PRECEDENCE_MARK_2,
1944 RESYNC_AFTER_RULE_ERROR,
1945 RESYNC_AFTER_DECL_ERROR,
1946 WAITING_FOR_DESTRUCTOR_SYMBOL,
1947 WAITING_FOR_DATATYPE_SYMBOL,
1948 WAITING_FOR_FALLBACK_ID,
1949 WAITING_FOR_WILDCARD_ID
1950 } state; /* The state of the parser */
1951 struct symbol *fallback; /* The fallback token */
1952 struct symbol *lhs; /* Left-hand side of current rule */
1953 char *lhsalias; /* Alias for the LHS */
1954 int nrhs; /* Number of right-hand side symbols seen */
1955 struct symbol *rhs[MAXRHS]; /* RHS symbols */
1956 char *alias[MAXRHS]; /* Aliases for each RHS symbol (or NULL) */
1957 struct rule *prevrule; /* Previous rule parsed */
1958 char *declkeyword; /* Keyword of a declaration */
1959 char **declargslot; /* Where the declaration argument should be put */
1960 int *decllnslot; /* Where the declaration linenumber is put */
1961 enum e_assoc declassoc; /* Assign this association to decl arguments */
1962 int preccounter; /* Assign this precedence to decl arguments */
1963 struct rule *firstrule; /* Pointer to first rule in the grammar */
1964 struct rule *lastrule; /* Pointer to the most recently parsed rule */
1965};
1966
1967/* Parse a single token */
1968static void parseonetoken(psp)
1969struct pstate *psp;
1970{
1971 char *x;
1972 x = Strsafe(psp->tokenstart); /* Save the token permanently */
1973#if 0
1974 printf("%s:%d: Token=[%s] state=%d\n",psp->filename,psp->tokenlineno,
1975 x,psp->state);
1976#endif
1977 switch( psp->state ){
1978 case INITIALIZE:
1979 psp->prevrule = 0;
1980 psp->preccounter = 0;
1981 psp->firstrule = psp->lastrule = 0;
1982 psp->gp->nrule = 0;
1983 /* Fall thru to next case */
1984 case WAITING_FOR_DECL_OR_RULE:
1985 if( x[0]=='%' ){
1986 psp->state = WAITING_FOR_DECL_KEYWORD;
1987 }else if( islower(x[0]) ){
1988 psp->lhs = Symbol_new(x);
1989 psp->nrhs = 0;
1990 psp->lhsalias = 0;
1991 psp->state = WAITING_FOR_ARROW;
1992 }else if( x[0]=='{' ){
1993 if( psp->prevrule==0 ){
1994 ErrorMsg(psp->filename,psp->tokenlineno,
1995"There is not prior rule opon which to attach the code \
1996fragment which begins on this line.");
1997 psp->errorcnt++;
1998 }else if( psp->prevrule->code!=0 ){
1999 ErrorMsg(psp->filename,psp->tokenlineno,
2000"Code fragment beginning on this line is not the first \
2001to follow the previous rule.");
2002 psp->errorcnt++;
2003 }else{
2004 psp->prevrule->line = psp->tokenlineno;
2005 psp->prevrule->code = &x[1];
2006 }
2007 }else if( x[0]=='[' ){
2008 psp->state = PRECEDENCE_MARK_1;
2009 }else{
2010 ErrorMsg(psp->filename,psp->tokenlineno,
2011 "Token \"%s\" should be either \"%%\" or a nonterminal name.",
2012 x);
2013 psp->errorcnt++;
2014 }
2015 break;
2016 case PRECEDENCE_MARK_1:
2017 if( !isupper(x[0]) ){
2018 ErrorMsg(psp->filename,psp->tokenlineno,
2019 "The precedence symbol must be a terminal.");
2020 psp->errorcnt++;
2021 }else if( psp->prevrule==0 ){
2022 ErrorMsg(psp->filename,psp->tokenlineno,
2023 "There is no prior rule to assign precedence \"[%s]\".",x);
2024 psp->errorcnt++;
2025 }else if( psp->prevrule->precsym!=0 ){
2026 ErrorMsg(psp->filename,psp->tokenlineno,
2027"Precedence mark on this line is not the first \
2028to follow the previous rule.");
2029 psp->errorcnt++;
2030 }else{
2031 psp->prevrule->precsym = Symbol_new(x);
2032 }
2033 psp->state = PRECEDENCE_MARK_2;
2034 break;
2035 case PRECEDENCE_MARK_2:
2036 if( x[0]!=']' ){
2037 ErrorMsg(psp->filename,psp->tokenlineno,
2038 "Missing \"]\" on precedence mark.");
2039 psp->errorcnt++;
2040 }
2041 psp->state = WAITING_FOR_DECL_OR_RULE;
2042 break;
2043 case WAITING_FOR_ARROW:
2044 if( x[0]==':' && x[1]==':' && x[2]=='=' ){
2045 psp->state = IN_RHS;
2046 }else if( x[0]=='(' ){
2047 psp->state = LHS_ALIAS_1;
2048 }else{
2049 ErrorMsg(psp->filename,psp->tokenlineno,
2050 "Expected to see a \":\" following the LHS symbol \"%s\".",
2051 psp->lhs->name);
2052 psp->errorcnt++;
2053 psp->state = RESYNC_AFTER_RULE_ERROR;
2054 }
2055 break;
2056 case LHS_ALIAS_1:
2057 if( isalpha(x[0]) ){
2058 psp->lhsalias = x;
2059 psp->state = LHS_ALIAS_2;
2060 }else{
2061 ErrorMsg(psp->filename,psp->tokenlineno,
2062 "\"%s\" is not a valid alias for the LHS \"%s\"\n",
2063 x,psp->lhs->name);
2064 psp->errorcnt++;
2065 psp->state = RESYNC_AFTER_RULE_ERROR;
2066 }
2067 break;
2068 case LHS_ALIAS_2:
2069 if( x[0]==')' ){
2070 psp->state = LHS_ALIAS_3;
2071 }else{
2072 ErrorMsg(psp->filename,psp->tokenlineno,
2073 "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
2074 psp->errorcnt++;
2075 psp->state = RESYNC_AFTER_RULE_ERROR;
2076 }
2077 break;
2078 case LHS_ALIAS_3:
2079 if( x[0]==':' && x[1]==':' && x[2]=='=' ){
2080 psp->state = IN_RHS;
2081 }else{
2082 ErrorMsg(psp->filename,psp->tokenlineno,
2083 "Missing \"->\" following: \"%s(%s)\".",
2084 psp->lhs->name,psp->lhsalias);
2085 psp->errorcnt++;
2086 psp->state = RESYNC_AFTER_RULE_ERROR;
2087 }
2088 break;
2089 case IN_RHS:
2090 if( x[0]=='.' ){
2091 struct rule *rp;
2092 rp = (struct rule *)malloc( sizeof(struct rule) +
2093 sizeof(struct symbol*)*psp->nrhs + sizeof(char*)*psp->nrhs );
2094 if( rp==0 ){
2095 ErrorMsg(psp->filename,psp->tokenlineno,
2096 "Can't allocate enough memory for this rule.");
2097 psp->errorcnt++;
2098 psp->prevrule = 0;
2099 }else{
2100 int i;
2101 rp->ruleline = psp->tokenlineno;
2102 rp->rhs = (struct symbol**)&rp[1];
2103 rp->rhsalias = (char**)&(rp->rhs[psp->nrhs]);
2104 for(i=0; i<psp->nrhs; i++){
2105 rp->rhs[i] = psp->rhs[i];
2106 rp->rhsalias[i] = psp->alias[i];
2107 }
2108 rp->lhs = psp->lhs;
2109 rp->lhsalias = psp->lhsalias;
2110 rp->nrhs = psp->nrhs;
2111 rp->code = 0;
2112 rp->precsym = 0;
2113 rp->index = psp->gp->nrule++;
2114 rp->nextlhs = rp->lhs->rule;
2115 rp->lhs->rule = rp;
2116 rp->next = 0;
2117 if( psp->firstrule==0 ){
2118 psp->firstrule = psp->lastrule = rp;
2119 }else{
2120 psp->lastrule->next = rp;
2121 psp->lastrule = rp;
2122 }
2123 psp->prevrule = rp;
2124 }
2125 psp->state = WAITING_FOR_DECL_OR_RULE;
2126 }else if( isalpha(x[0]) ){
2127 if( psp->nrhs>=MAXRHS ){
2128 ErrorMsg(psp->filename,psp->tokenlineno,
2129 "Too many symbols on RHS or rule beginning at \"%s\".",
2130 x);
2131 psp->errorcnt++;
2132 psp->state = RESYNC_AFTER_RULE_ERROR;
2133 }else{
2134 psp->rhs[psp->nrhs] = Symbol_new(x);
2135 psp->alias[psp->nrhs] = 0;
2136 psp->nrhs++;
2137 }
2138 }else if( (x[0]=='|' || x[0]=='/') && psp->nrhs>0 ){
2139 struct symbol *msp = psp->rhs[psp->nrhs-1];
2140 if( msp->type!=MULTITERMINAL ){
2141 struct symbol *origsp = msp;
2142 msp = malloc(sizeof(*msp));
2143 memset(msp, 0, sizeof(*msp));
2144 msp->type = MULTITERMINAL;
2145 msp->nsubsym = 1;
2146 msp->subsym = malloc(sizeof(struct symbol*));
2147 msp->subsym[0] = origsp;
2148 msp->name = origsp->name;
2149 psp->rhs[psp->nrhs-1] = msp;
2150 }
2151 msp->nsubsym++;
2152 msp->subsym = realloc(msp->subsym, sizeof(struct symbol*)*msp->nsubsym);
2153 msp->subsym[msp->nsubsym-1] = Symbol_new(&x[1]);
2154 if( islower(x[1]) || islower(msp->subsym[0]->name[0]) ){
2155 ErrorMsg(psp->filename,psp->tokenlineno,
2156 "Cannot form a compound containing a non-terminal");
2157 psp->errorcnt++;
2158 }
2159 }else if( x[0]=='(' && psp->nrhs>0 ){
2160 psp->state = RHS_ALIAS_1;
2161 }else{
2162 ErrorMsg(psp->filename,psp->tokenlineno,
2163 "Illegal character on RHS of rule: \"%s\".",x);
2164 psp->errorcnt++;
2165 psp->state = RESYNC_AFTER_RULE_ERROR;
2166 }
2167 break;
2168 case RHS_ALIAS_1:
2169 if( isalpha(x[0]) ){
2170 psp->alias[psp->nrhs-1] = x;
2171 psp->state = RHS_ALIAS_2;
2172 }else{
2173 ErrorMsg(psp->filename,psp->tokenlineno,
2174 "\"%s\" is not a valid alias for the RHS symbol \"%s\"\n",
2175 x,psp->rhs[psp->nrhs-1]->name);
2176 psp->errorcnt++;
2177 psp->state = RESYNC_AFTER_RULE_ERROR;
2178 }
2179 break;
2180 case RHS_ALIAS_2:
2181 if( x[0]==')' ){
2182 psp->state = IN_RHS;
2183 }else{
2184 ErrorMsg(psp->filename,psp->tokenlineno,
2185 "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
2186 psp->errorcnt++;
2187 psp->state = RESYNC_AFTER_RULE_ERROR;
2188 }
2189 break;
2190 case WAITING_FOR_DECL_KEYWORD:
2191 if( isalpha(x[0]) ){
2192 psp->declkeyword = x;
2193 psp->declargslot = 0;
2194 psp->decllnslot = 0;
2195 psp->state = WAITING_FOR_DECL_ARG;
2196 if( strcmp(x,"name")==0 ){
2197 psp->declargslot = &(psp->gp->name);
2198 }else if( strcmp(x,"include")==0 ){
2199 psp->declargslot = &(psp->gp->include);
2200 psp->decllnslot = &psp->gp->includeln;
2201 }else if( strcmp(x,"code")==0 ){
2202 psp->declargslot = &(psp->gp->extracode);
2203 psp->decllnslot = &psp->gp->extracodeln;
2204 }else if( strcmp(x,"token_destructor")==0 ){
2205 psp->declargslot = &psp->gp->tokendest;
2206 psp->decllnslot = &psp->gp->tokendestln;
2207 }else if( strcmp(x,"default_destructor")==0 ){
2208 psp->declargslot = &psp->gp->vardest;
2209 psp->decllnslot = &psp->gp->vardestln;
2210 }else if( strcmp(x,"token_prefix")==0 ){
2211 psp->declargslot = &psp->gp->tokenprefix;
2212 }else if( strcmp(x,"syntax_error")==0 ){
2213 psp->declargslot = &(psp->gp->error);
2214 psp->decllnslot = &psp->gp->errorln;
2215 }else if( strcmp(x,"parse_accept")==0 ){
2216 psp->declargslot = &(psp->gp->accept);
2217 psp->decllnslot = &psp->gp->acceptln;
2218 }else if( strcmp(x,"parse_failure")==0 ){
2219 psp->declargslot = &(psp->gp->failure);
2220 psp->decllnslot = &psp->gp->failureln;
2221 }else if( strcmp(x,"stack_overflow")==0 ){
2222 psp->declargslot = &(psp->gp->overflow);
2223 psp->decllnslot = &psp->gp->overflowln;
2224 }else if( strcmp(x,"extra_argument")==0 ){
2225 psp->declargslot = &(psp->gp->arg);
2226 }else if( strcmp(x,"token_type")==0 ){
2227 psp->declargslot = &(psp->gp->tokentype);
2228 }else if( strcmp(x,"default_type")==0 ){
2229 psp->declargslot = &(psp->gp->vartype);
2230 }else if( strcmp(x,"stack_size")==0 ){
2231 psp->declargslot = &(psp->gp->stacksize);
2232 }else if( strcmp(x,"start_symbol")==0 ){
2233 psp->declargslot = &(psp->gp->start);
2234 }else if( strcmp(x,"left")==0 ){
2235 psp->preccounter++;
2236 psp->declassoc = LEFT;
2237 psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
2238 }else if( strcmp(x,"right")==0 ){
2239 psp->preccounter++;
2240 psp->declassoc = RIGHT;
2241 psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
2242 }else if( strcmp(x,"nonassoc")==0 ){
2243 psp->preccounter++;
2244 psp->declassoc = NONE;
2245 psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
2246 }else if( strcmp(x,"destructor")==0 ){
2247 psp->state = WAITING_FOR_DESTRUCTOR_SYMBOL;
2248 }else if( strcmp(x,"type")==0 ){
2249 psp->state = WAITING_FOR_DATATYPE_SYMBOL;
2250 }else if( strcmp(x,"fallback")==0 ){
2251 psp->fallback = 0;
2252 psp->state = WAITING_FOR_FALLBACK_ID;
2253 }else if( strcmp(x,"wildcard")==0 ){
2254 psp->state = WAITING_FOR_WILDCARD_ID;
2255 }else{
2256 ErrorMsg(psp->filename,psp->tokenlineno,
2257 "Unknown declaration keyword: \"%%%s\".",x);
2258 psp->errorcnt++;
2259 psp->state = RESYNC_AFTER_DECL_ERROR;
2260 }
2261 }else{
2262 ErrorMsg(psp->filename,psp->tokenlineno,
2263 "Illegal declaration keyword: \"%s\".",x);
2264 psp->errorcnt++;
2265 psp->state = RESYNC_AFTER_DECL_ERROR;
2266 }
2267 break;
2268 case WAITING_FOR_DESTRUCTOR_SYMBOL:
2269 if( !isalpha(x[0]) ){
2270 ErrorMsg(psp->filename,psp->tokenlineno,
2271 "Symbol name missing after %destructor keyword");
2272 psp->errorcnt++;
2273 psp->state = RESYNC_AFTER_DECL_ERROR;
2274 }else{
2275 struct symbol *sp = Symbol_new(x);
2276 psp->declargslot = &sp->destructor;
2277 psp->decllnslot = &sp->destructorln;
2278 psp->state = WAITING_FOR_DECL_ARG;
2279 }
2280 break;
2281 case WAITING_FOR_DATATYPE_SYMBOL:
2282 if( !isalpha(x[0]) ){
2283 ErrorMsg(psp->filename,psp->tokenlineno,
2284 "Symbol name missing after %destructor keyword");
2285 psp->errorcnt++;
2286 psp->state = RESYNC_AFTER_DECL_ERROR;
2287 }else{
2288 struct symbol *sp = Symbol_new(x);
2289 psp->declargslot = &sp->datatype;
2290 psp->decllnslot = 0;
2291 psp->state = WAITING_FOR_DECL_ARG;
2292 }
2293 break;
2294 case WAITING_FOR_PRECEDENCE_SYMBOL:
2295 if( x[0]=='.' ){
2296 psp->state = WAITING_FOR_DECL_OR_RULE;
2297 }else if( isupper(x[0]) ){
2298 struct symbol *sp;
2299 sp = Symbol_new(x);
2300 if( sp->prec>=0 ){
2301 ErrorMsg(psp->filename,psp->tokenlineno,
2302 "Symbol \"%s\" has already be given a precedence.",x);
2303 psp->errorcnt++;
2304 }else{
2305 sp->prec = psp->preccounter;
2306 sp->assoc = psp->declassoc;
2307 }
2308 }else{
2309 ErrorMsg(psp->filename,psp->tokenlineno,
2310 "Can't assign a precedence to \"%s\".",x);
2311 psp->errorcnt++;
2312 }
2313 break;
2314 case WAITING_FOR_DECL_ARG:
2315 if( (x[0]=='{' || x[0]=='\"' || isalnum(x[0])) ){
2316 if( *(psp->declargslot)!=0 ){
2317 ErrorMsg(psp->filename,psp->tokenlineno,
2318 "The argument \"%s\" to declaration \"%%%s\" is not the first.",
2319 x[0]=='\"' ? &x[1] : x,psp->declkeyword);
2320 psp->errorcnt++;
2321 psp->state = RESYNC_AFTER_DECL_ERROR;
2322 }else{
2323 *(psp->declargslot) = (x[0]=='\"' || x[0]=='{') ? &x[1] : x;
2324 if( psp->decllnslot ) *psp->decllnslot = psp->tokenlineno;
2325 psp->state = WAITING_FOR_DECL_OR_RULE;
2326 }
2327 }else{
2328 ErrorMsg(psp->filename,psp->tokenlineno,
2329 "Illegal argument to %%%s: %s",psp->declkeyword,x);
2330 psp->errorcnt++;
2331 psp->state = RESYNC_AFTER_DECL_ERROR;
2332 }
2333 break;
2334 case WAITING_FOR_FALLBACK_ID:
2335 if( x[0]=='.' ){
2336 psp->state = WAITING_FOR_DECL_OR_RULE;
2337 }else if( !isupper(x[0]) ){
2338 ErrorMsg(psp->filename, psp->tokenlineno,
2339 "%%fallback argument \"%s\" should be a token", x);
2340 psp->errorcnt++;
2341 }else{
2342 struct symbol *sp = Symbol_new(x);
2343 if( psp->fallback==0 ){
2344 psp->fallback = sp;
2345 }else if( sp->fallback ){
2346 ErrorMsg(psp->filename, psp->tokenlineno,
2347 "More than one fallback assigned to token %s", x);
2348 psp->errorcnt++;
2349 }else{
2350 sp->fallback = psp->fallback;
2351 psp->gp->has_fallback = 1;
2352 }
2353 }
2354 break;
2355 case WAITING_FOR_WILDCARD_ID:
2356 if( x[0]=='.' ){
2357 psp->state = WAITING_FOR_DECL_OR_RULE;
2358 }else if( !isupper(x[0]) ){
2359 ErrorMsg(psp->filename, psp->tokenlineno,
2360 "%%wildcard argument \"%s\" should be a token", x);
2361 psp->errorcnt++;
2362 }else{
2363 struct symbol *sp = Symbol_new(x);
2364 if( psp->gp->wildcard==0 ){
2365 psp->gp->wildcard = sp;
2366 }else{
2367 ErrorMsg(psp->filename, psp->tokenlineno,
2368 "Extra wildcard to token: %s", x);
2369 psp->errorcnt++;
2370 }
2371 }
2372 break;
2373 case RESYNC_AFTER_RULE_ERROR:
2374/* if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
2375** break; */
2376 case RESYNC_AFTER_DECL_ERROR:
2377 if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
2378 if( x[0]=='%' ) psp->state = WAITING_FOR_DECL_KEYWORD;
2379 break;
2380 }
2381}
2382
2383/* Run the proprocessor over the input file text. The global variables
2384** azDefine[0] through azDefine[nDefine-1] contains the names of all defined
2385** macros. This routine looks for "%ifdef" and "%ifndef" and "%endif" and
2386** comments them out. Text in between is also commented out as appropriate.
2387*/
2388static void preprocess_input(char *z){
2389 int i, j, k, n;
2390 int exclude = 0;
2391 int start = 0;
2392 int lineno = 1;
2393 int start_lineno = 1;
2394 for(i=0; z[i]; i++){
2395 if( z[i]=='\n' ) lineno++;
2396 if( z[i]!='%' || (i>0 && z[i-1]!='\n') ) continue;
2397 if( strncmp(&z[i],"%endif",6)==0 && isspace(z[i+6]) ){
2398 if( exclude ){
2399 exclude--;
2400 if( exclude==0 ){
2401 for(j=start; j<i; j++) if( z[j]!='\n' ) z[j] = ' ';
2402 }
2403 }
2404 for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
2405 }else if( (strncmp(&z[i],"%ifdef",6)==0 && isspace(z[i+6]))
2406 || (strncmp(&z[i],"%ifndef",7)==0 && isspace(z[i+7])) ){
2407 if( exclude ){
2408 exclude++;
2409 }else{
2410 for(j=i+7; isspace(z[j]); j++){}
2411 for(n=0; z[j+n] && !isspace(z[j+n]); n++){}
2412 exclude = 1;
2413 for(k=0; k<nDefine; k++){
2414 if( strncmp(azDefine[k],&z[j],n)==0 && strlen(azDefine[k])==n ){
2415 exclude = 0;
2416 break;
2417 }
2418 }
2419 if( z[i+3]=='n' ) exclude = !exclude;
2420 if( exclude ){
2421 start = i;
2422 start_lineno = lineno;
2423 }
2424 }
2425 for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
2426 }
2427 }
2428 if( exclude ){
2429 fprintf(stderr,"unterminated %%ifdef starting on line %d\n", start_lineno);
2430 exit(1);
2431 }
2432}
2433
2434/* In spite of its name, this function is really a scanner. It read
2435** in the entire input file (all at once) then tokenizes it. Each
2436** token is passed to the function "parseonetoken" which builds all
2437** the appropriate data structures in the global state vector "gp".
2438*/
2439void Parse(gp)
2440struct lemon *gp;
2441{
2442 struct pstate ps;
2443 FILE *fp;
2444 char *filebuf;
2445 int filesize;
2446 int lineno;
2447 int c;
2448 char *cp, *nextcp;
2449 int startline = 0;
2450
2451 memset(&ps, '\0', sizeof(ps));
2452 ps.gp = gp;
2453 ps.filename = gp->filename;
2454 ps.errorcnt = 0;
2455 ps.state = INITIALIZE;
2456
2457 /* Begin by reading the input file */
2458 fp = fopen(ps.filename,"rb");
2459 if( fp==0 ){
2460 ErrorMsg(ps.filename,0,"Can't open this file for reading.");
2461 gp->errorcnt++;
2462 return;
2463 }
2464 fseek(fp,0,2);
2465 filesize = ftell(fp);
2466 rewind(fp);
2467 filebuf = (char *)malloc( filesize+1 );
2468 if( filebuf==0 ){
2469 ErrorMsg(ps.filename,0,"Can't allocate %d of memory to hold this file.",
2470 filesize+1);
2471 gp->errorcnt++;
2472 return;
2473 }
2474 if( fread(filebuf,1,filesize,fp)!=filesize ){
2475 ErrorMsg(ps.filename,0,"Can't read in all %d bytes of this file.",
2476 filesize);
2477 free(filebuf);
2478 gp->errorcnt++;
2479 return;
2480 }
2481 fclose(fp);
2482 filebuf[filesize] = 0;
2483
2484 /* Make an initial pass through the file to handle %ifdef and %ifndef */
2485 preprocess_input(filebuf);
2486
2487 /* Now scan the text of the input file */
2488 lineno = 1;
2489 for(cp=filebuf; (c= *cp)!=0; ){
2490 if( c=='\n' ) lineno++; /* Keep track of the line number */
2491 if( isspace(c) ){ cp++; continue; } /* Skip all white space */
2492 if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments */
2493 cp+=2;
2494 while( (c= *cp)!=0 && c!='\n' ) cp++;
2495 continue;
2496 }
2497 if( c=='/' && cp[1]=='*' ){ /* Skip C style comments */
2498 cp+=2;
2499 while( (c= *cp)!=0 && (c!='/' || cp[-1]!='*') ){
2500 if( c=='\n' ) lineno++;
2501 cp++;
2502 }
2503 if( c ) cp++;
2504 continue;
2505 }
2506 ps.tokenstart = cp; /* Mark the beginning of the token */
2507 ps.tokenlineno = lineno; /* Linenumber on which token begins */
2508 if( c=='\"' ){ /* String literals */
2509 cp++;
2510 while( (c= *cp)!=0 && c!='\"' ){
2511 if( c=='\n' ) lineno++;
2512 cp++;
2513 }
2514 if( c==0 ){
2515 ErrorMsg(ps.filename,startline,
2516"String starting on this line is not terminated before the end of the file.");
2517 ps.errorcnt++;
2518 nextcp = cp;
2519 }else{
2520 nextcp = cp+1;
2521 }
2522 }else if( c=='{' ){ /* A block of C code */
2523 int level;
2524 cp++;
2525 for(level=1; (c= *cp)!=0 && (level>1 || c!='}'); cp++){
2526 if( c=='\n' ) lineno++;
2527 else if( c=='{' ) level++;
2528 else if( c=='}' ) level--;
2529 else if( c=='/' && cp[1]=='*' ){ /* Skip comments */
2530 int prevc;
2531 cp = &cp[2];
2532 prevc = 0;
2533 while( (c= *cp)!=0 && (c!='/' || prevc!='*') ){
2534 if( c=='\n' ) lineno++;
2535 prevc = c;
2536 cp++;
2537 }
2538 }else if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments too */
2539 cp = &cp[2];
2540 while( (c= *cp)!=0 && c!='\n' ) cp++;
2541 if( c ) lineno++;
2542 }else if( c=='\'' || c=='\"' ){ /* String a character literals */
2543 int startchar, prevc;
2544 startchar = c;
2545 prevc = 0;
2546 for(cp++; (c= *cp)!=0 && (c!=startchar || prevc=='\\'); cp++){
2547 if( c=='\n' ) lineno++;
2548 if( prevc=='\\' ) prevc = 0;
2549 else prevc = c;
2550 }
2551 }
2552 }
2553 if( c==0 ){
2554 ErrorMsg(ps.filename,ps.tokenlineno,
2555"C code starting on this line is not terminated before the end of the file.");
2556 ps.errorcnt++;
2557 nextcp = cp;
2558 }else{
2559 nextcp = cp+1;
2560 }
2561 }else if( isalnum(c) ){ /* Identifiers */
2562 while( (c= *cp)!=0 && (isalnum(c) || c=='_') ) cp++;
2563 nextcp = cp;
2564 }else if( c==':' && cp[1]==':' && cp[2]=='=' ){ /* The operator "::=" */
2565 cp += 3;
2566 nextcp = cp;
2567 }else if( (c=='/' || c=='|') && isalpha(cp[1]) ){
2568 cp += 2;
2569 while( (c = *cp)!=0 && (isalnum(c) || c=='_') ) cp++;
2570 nextcp = cp;
2571 }else{ /* All other (one character) operators */
2572 cp++;
2573 nextcp = cp;
2574 }
2575 c = *cp;
2576 *cp = 0; /* Null terminate the token */
2577 parseonetoken(&ps); /* Parse the token */
2578 *cp = c; /* Restore the buffer */
2579 cp = nextcp;
2580 }
2581 free(filebuf); /* Release the buffer after parsing */
2582 gp->rule = ps.firstrule;
2583 gp->errorcnt = ps.errorcnt;
2584}
2585/*************************** From the file "plink.c" *********************/
2586/*
2587** Routines processing configuration follow-set propagation links
2588** in the LEMON parser generator.
2589*/
2590static struct plink *plink_freelist = 0;
2591
2592/* Allocate a new plink */
2593struct plink *Plink_new(){
2594 struct plink *new;
2595
2596 if( plink_freelist==0 ){
2597 int i;
2598 int amt = 100;
2599 plink_freelist = (struct plink *)malloc( sizeof(struct plink)*amt );
2600 if( plink_freelist==0 ){
2601 fprintf(stderr,
2602 "Unable to allocate memory for a new follow-set propagation link.\n");
2603 exit(1);
2604 }
2605 for(i=0; i<amt-1; i++) plink_freelist[i].next = &plink_freelist[i+1];
2606 plink_freelist[amt-1].next = 0;
2607 }
2608 new = plink_freelist;
2609 plink_freelist = plink_freelist->next;
2610 return new;
2611}
2612
2613/* Add a plink to a plink list */
2614void Plink_add(plpp,cfp)
2615struct plink **plpp;
2616struct config *cfp;
2617{
2618 struct plink *new;
2619 new = Plink_new();
2620 new->next = *plpp;
2621 *plpp = new;
2622 new->cfp = cfp;
2623}
2624
2625/* Transfer every plink on the list "from" to the list "to" */
2626void Plink_copy(to,from)
2627struct plink **to;
2628struct plink *from;
2629{
2630 struct plink *nextpl;
2631 while( from ){
2632 nextpl = from->next;
2633 from->next = *to;
2634 *to = from;
2635 from = nextpl;
2636 }
2637}
2638
2639/* Delete every plink on the list */
2640void Plink_delete(plp)
2641struct plink *plp;
2642{
2643 struct plink *nextpl;
2644
2645 while( plp ){
2646 nextpl = plp->next;
2647 plp->next = plink_freelist;
2648 plink_freelist = plp;
2649 plp = nextpl;
2650 }
2651}
2652/*********************** From the file "report.c" **************************/
2653/*
2654** Procedures for generating reports and tables in the LEMON parser generator.
2655*/
2656
2657/* Generate a filename with the given suffix. Space to hold the
2658** name comes from malloc() and must be freed by the calling
2659** function.
2660*/
2661PRIVATE char *file_makename(lemp,suffix)
2662struct lemon *lemp;
2663char *suffix;
2664{
2665 char *name;
2666 char *cp;
2667
2668 name = malloc( strlen(lemp->filename) + strlen(suffix) + 5 );
2669 if( name==0 ){
2670 fprintf(stderr,"Can't allocate space for a filename.\n");
2671 exit(1);
2672 }
2673 strcpy(name,lemp->filename);
2674 cp = strrchr(name,'.');
2675 if( cp ) *cp = 0;
2676 strcat(name,suffix);
2677 return name;
2678}
2679
2680/* Open a file with a name based on the name of the input file,
2681** but with a different (specified) suffix, and return a pointer
2682** to the stream */
2683PRIVATE FILE *file_open(lemp,suffix,mode)
2684struct lemon *lemp;
2685char *suffix;
2686char *mode;
2687{
2688 FILE *fp;
2689
2690 if( lemp->outname ) free(lemp->outname);
2691 lemp->outname = file_makename(lemp, suffix);
2692 fp = fopen(lemp->outname,mode);
2693 if( fp==0 && *mode=='w' ){
2694 fprintf(stderr,"Can't open file \"%s\".\n",lemp->outname);
2695 lemp->errorcnt++;
2696 return 0;
2697 }
2698 return fp;
2699}
2700
2701/* Duplicate the input file without comments and without actions
2702** on rules */
2703void Reprint(lemp)
2704struct lemon *lemp;
2705{
2706 struct rule *rp;
2707 struct symbol *sp;
2708 int i, j, maxlen, len, ncolumns, skip;
2709 printf("// Reprint of input file \"%s\".\n// Symbols:\n",lemp->filename);
2710 maxlen = 10;
2711 for(i=0; i<lemp->nsymbol; i++){
2712 sp = lemp->symbols[i];
2713 len = strlen(sp->name);
2714 if( len>maxlen ) maxlen = len;
2715 }
2716 ncolumns = 76/(maxlen+5);
2717 if( ncolumns<1 ) ncolumns = 1;
2718 skip = (lemp->nsymbol + ncolumns - 1)/ncolumns;
2719 for(i=0; i<skip; i++){
2720 printf("//");
2721 for(j=i; j<lemp->nsymbol; j+=skip){
2722 sp = lemp->symbols[j];
2723 assert( sp->index==j );
2724 printf(" %3d %-*.*s",j,maxlen,maxlen,sp->name);
2725 }
2726 printf("\n");
2727 }
2728 for(rp=lemp->rule; rp; rp=rp->next){
2729 printf("%s",rp->lhs->name);
2730 /* if( rp->lhsalias ) printf("(%s)",rp->lhsalias); */
2731 printf(" ::=");
2732 for(i=0; i<rp->nrhs; i++){
2733 sp = rp->rhs[i];
2734 printf(" %s", sp->name);
2735 if( sp->type==MULTITERMINAL ){
2736 for(j=1; j<sp->nsubsym; j++){
2737 printf("|%s", sp->subsym[j]->name);
2738 }
2739 }
2740 /* if( rp->rhsalias[i] ) printf("(%s)",rp->rhsalias[i]); */
2741 }
2742 printf(".");
2743 if( rp->precsym ) printf(" [%s]",rp->precsym->name);
2744 /* if( rp->code ) printf("\n %s",rp->code); */
2745 printf("\n");
2746 }
2747}
2748
2749void ConfigPrint(fp,cfp)
2750FILE *fp;
2751struct config *cfp;
2752{
2753 struct rule *rp;
2754 struct symbol *sp;
2755 int i, j;
2756 rp = cfp->rp;
2757 fprintf(fp,"%s ::=",rp->lhs->name);
2758 for(i=0; i<=rp->nrhs; i++){
2759 if( i==cfp->dot ) fprintf(fp," *");
2760 if( i==rp->nrhs ) break;
2761 sp = rp->rhs[i];
2762 fprintf(fp," %s", sp->name);
2763 if( sp->type==MULTITERMINAL ){
2764 for(j=1; j<sp->nsubsym; j++){
2765 fprintf(fp,"|%s",sp->subsym[j]->name);
2766 }
2767 }
2768 }
2769}
2770
2771/* #define TEST */
2772#if 0
2773/* Print a set */
2774PRIVATE void SetPrint(out,set,lemp)
2775FILE *out;
2776char *set;
2777struct lemon *lemp;
2778{
2779 int i;
2780 char *spacer;
2781 spacer = "";
2782 fprintf(out,"%12s[","");
2783 for(i=0; i<lemp->nterminal; i++){
2784 if( SetFind(set,i) ){
2785 fprintf(out,"%s%s",spacer,lemp->symbols[i]->name);
2786 spacer = " ";
2787 }
2788 }
2789 fprintf(out,"]\n");
2790}
2791
2792/* Print a plink chain */
2793PRIVATE void PlinkPrint(out,plp,tag)
2794FILE *out;
2795struct plink *plp;
2796char *tag;
2797{
2798 while( plp ){
2799 fprintf(out,"%12s%s (state %2d) ","",tag,plp->cfp->stp->statenum);
2800 ConfigPrint(out,plp->cfp);
2801 fprintf(out,"\n");
2802 plp = plp->next;
2803 }
2804}
2805#endif
2806
2807/* Print an action to the given file descriptor. Return FALSE if
2808** nothing was actually printed.
2809*/
2810int PrintAction(struct action *ap, FILE *fp, int indent){
2811 int result = 1;
2812 switch( ap->type ){
2813 case SHIFT:
2814 fprintf(fp,"%*s shift %d",indent,ap->sp->name,ap->x.stp->statenum);
2815 break;
2816 case REDUCE:
2817 fprintf(fp,"%*s reduce %d",indent,ap->sp->name,ap->x.rp->index);
2818 break;
2819 case ACCEPT:
2820 fprintf(fp,"%*s accept",indent,ap->sp->name);
2821 break;
2822 case ERROR:
2823 fprintf(fp,"%*s error",indent,ap->sp->name);
2824 break;
2825 case CONFLICT:
2826 fprintf(fp,"%*s reduce %-3d ** Parsing conflict **",
2827 indent,ap->sp->name,ap->x.rp->index);
2828 break;
2829 case SH_RESOLVED:
2830 case RD_RESOLVED:
2831 case NOT_USED:
2832 result = 0;
2833 break;
2834 }
2835 return result;
2836}
2837
2838/* Generate the "y.output" log file */
2839void ReportOutput(lemp)
2840struct lemon *lemp;
2841{
2842 int i;
2843 struct state *stp;
2844 struct config *cfp;
2845 struct action *ap;
2846 FILE *fp;
2847
2848 fp = file_open(lemp,".out","wb");
2849 if( fp==0 ) return;
2850 for(i=0; i<lemp->nstate; i++){
2851 stp = lemp->sorted[i];
2852 fprintf(fp,"State %d:\n",stp->statenum);
2853 if( lemp->basisflag ) cfp=stp->bp;
2854 else cfp=stp->cfp;
2855 while( cfp ){
2856 char buf[20];
2857 if( cfp->dot==cfp->rp->nrhs ){
2858 sprintf(buf,"(%d)",cfp->rp->index);
2859 fprintf(fp," %5s ",buf);
2860 }else{
2861 fprintf(fp," ");
2862 }
2863 ConfigPrint(fp,cfp);
2864 fprintf(fp,"\n");
2865#if 0
2866 SetPrint(fp,cfp->fws,lemp);
2867 PlinkPrint(fp,cfp->fplp,"To ");
2868 PlinkPrint(fp,cfp->bplp,"From");
2869#endif
2870 if( lemp->basisflag ) cfp=cfp->bp;
2871 else cfp=cfp->next;
2872 }
2873 fprintf(fp,"\n");
2874 for(ap=stp->ap; ap; ap=ap->next){
2875 if( PrintAction(ap,fp,30) ) fprintf(fp,"\n");
2876 }
2877 fprintf(fp,"\n");
2878 }
2879 fprintf(fp, "----------------------------------------------------\n");
2880 fprintf(fp, "Symbols:\n");
2881 for(i=0; i<lemp->nsymbol; i++){
2882 int j;
2883 struct symbol *sp;
2884
2885 sp = lemp->symbols[i];
2886 fprintf(fp, " %3d: %s", i, sp->name);
2887 if( sp->type==NONTERMINAL ){
2888 fprintf(fp, ":");
2889 if( sp->lambda ){
2890 fprintf(fp, " <lambda>");
2891 }
2892 for(j=0; j<lemp->nterminal; j++){
2893 if( sp->firstset && SetFind(sp->firstset, j) ){
2894 fprintf(fp, " %s", lemp->symbols[j]->name);
2895 }
2896 }
2897 }
2898 fprintf(fp, "\n");
2899 }
2900 fclose(fp);
2901 return;
2902}
2903
2904/* Search for the file "name" which is in the same directory as
2905** the exacutable */
2906PRIVATE char *pathsearch(argv0,name,modemask)
2907char *argv0;
2908char *name;
2909int modemask;
2910{
2911 char *pathlist;
2912 char *path,*cp;
2913 char c;
2914
2915#ifdef __WIN32__
2916 cp = strrchr(argv0,'\\');
2917#else
2918 cp = strrchr(argv0,'/');
2919#endif
2920 if( cp ){
2921 c = *cp;
2922 *cp = 0;
2923 path = (char *)malloc( strlen(argv0) + strlen(name) + 2 );
2924 if( path ) sprintf(path,"%s/%s",argv0,name);
2925 *cp = c;
2926 }else{
2927 extern char *getenv();
2928 pathlist = getenv("PATH");
2929 if( pathlist==0 ) pathlist = ".:/bin:/usr/bin";
2930 path = (char *)malloc( strlen(pathlist)+strlen(name)+2 );
2931 if( path!=0 ){
2932 while( *pathlist ){
2933 cp = strchr(pathlist,':');
2934 if( cp==0 ) cp = &pathlist[strlen(pathlist)];
2935 c = *cp;
2936 *cp = 0;
2937 sprintf(path,"%s/%s",pathlist,name);
2938 *cp = c;
2939 if( c==0 ) pathlist = "";
2940 else pathlist = &cp[1];
2941 if( access(path,modemask)==0 ) break;
2942 }
2943 }
2944 }
2945 return path;
2946}
2947
2948/* Given an action, compute the integer value for that action
2949** which is to be put in the action table of the generated machine.
2950** Return negative if no action should be generated.
2951*/
2952PRIVATE int compute_action(lemp,ap)
2953struct lemon *lemp;
2954struct action *ap;
2955{
2956 int act;
2957 switch( ap->type ){
2958 case SHIFT: act = ap->x.stp->statenum; break;
2959 case REDUCE: act = ap->x.rp->index + lemp->nstate; break;
2960 case ERROR: act = lemp->nstate + lemp->nrule; break;
2961 case ACCEPT: act = lemp->nstate + lemp->nrule + 1; break;
2962 default: act = -1; break;
2963 }
2964 return act;
2965}
2966
2967#define LINESIZE 1000
2968/* The next cluster of routines are for reading the template file
2969** and writing the results to the generated parser */
2970/* The first function transfers data from "in" to "out" until
2971** a line is seen which begins with "%%". The line number is
2972** tracked.
2973**
2974** if name!=0, then any word that begin with "Parse" is changed to
2975** begin with *name instead.
2976*/
2977PRIVATE void tplt_xfer(name,in,out,lineno)
2978char *name;
2979FILE *in;
2980FILE *out;
2981int *lineno;
2982{
2983 int i, iStart;
2984 char line[LINESIZE];
2985 while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){
2986 (*lineno)++;
2987 iStart = 0;
2988 if( name ){
2989 for(i=0; line[i]; i++){
2990 if( line[i]=='P' && strncmp(&line[i],"Parse",5)==0
2991 && (i==0 || !isalpha(line[i-1]))
2992 ){
2993 if( i>iStart ) fprintf(out,"%.*s",i-iStart,&line[iStart]);
2994 fprintf(out,"%s",name);
2995 i += 4;
2996 iStart = i+1;
2997 }
2998 }
2999 }
3000 fprintf(out,"%s",&line[iStart]);
3001 }
3002}
3003
3004/* The next function finds the template file and opens it, returning
3005** a pointer to the opened file. */
3006PRIVATE FILE *tplt_open(lemp)
3007struct lemon *lemp;
3008{
3009 static char templatename[] = "lempar.c";
3010 char buf[1000];
3011 FILE *in;
3012 char *tpltname;
3013 char *cp;
3014
3015 cp = strrchr(lemp->filename,'.');
3016 if( cp ){
3017 sprintf(buf,"%.*s.lt",(int)(cp-lemp->filename),lemp->filename);
3018 }else{
3019 sprintf(buf,"%s.lt",lemp->filename);
3020 }
3021 if( access(buf,004)==0 ){
3022 tpltname = buf;
3023 }else if( access(templatename,004)==0 ){
3024 tpltname = templatename;
3025 }else{
3026 tpltname = pathsearch(lemp->argv0,templatename,0);
3027 }
3028 if( tpltname==0 ){
3029 fprintf(stderr,"Can't find the parser driver template file \"%s\".\n",
3030 templatename);
3031 lemp->errorcnt++;
3032 return 0;
3033 }
3034 in = fopen(tpltname,"rb");
3035 if( in==0 ){
3036 fprintf(stderr,"Can't open the template file \"%s\".\n",templatename);
3037 lemp->errorcnt++;
3038 return 0;
3039 }
3040 return in;
3041}
3042
3043/* Print a #line directive line to the output file. */
3044PRIVATE void tplt_linedir(out,lineno,filename)
3045FILE *out;
3046int lineno;
3047char *filename;
3048{
3049 fprintf(out,"#line %d \"",lineno);
3050 while( *filename ){
3051 if( *filename == '\\' ) putc('\\',out);
3052 putc(*filename,out);
3053 filename++;
3054 }
3055 fprintf(out,"\"\n");
3056}
3057
3058/* Print a string to the file and keep the linenumber up to date */
3059PRIVATE void tplt_print(out,lemp,str,strln,lineno)
3060FILE *out;
3061struct lemon *lemp;
3062char *str;
3063int strln;
3064int *lineno;
3065{
3066 if( str==0 ) return;
3067 tplt_linedir(out,strln,lemp->filename);
3068 (*lineno)++;
3069 while( *str ){
3070 if( *str=='\n' ) (*lineno)++;
3071 putc(*str,out);
3072 str++;
3073 }
3074 if( str[-1]!='\n' ){
3075 putc('\n',out);
3076 (*lineno)++;
3077 }
3078 tplt_linedir(out,*lineno+2,lemp->outname);
3079 (*lineno)+=2;
3080 return;
3081}
3082
3083/*
3084** The following routine emits code for the destructor for the
3085** symbol sp
3086*/
3087void emit_destructor_code(out,sp,lemp,lineno)
3088FILE *out;
3089struct symbol *sp;
3090struct lemon *lemp;
3091int *lineno;
3092{
3093 char *cp = 0;
3094
3095 int linecnt = 0;
3096 if( sp->type==TERMINAL ){
3097 cp = lemp->tokendest;
3098 if( cp==0 ) return;
3099 tplt_linedir(out,lemp->tokendestln,lemp->filename);
3100 fprintf(out,"{");
3101 }else if( sp->destructor ){
3102 cp = sp->destructor;
3103 tplt_linedir(out,sp->destructorln,lemp->filename);
3104 fprintf(out,"{");
3105 }else if( lemp->vardest ){
3106 cp = lemp->vardest;
3107 if( cp==0 ) return;
3108 tplt_linedir(out,lemp->vardestln,lemp->filename);
3109 fprintf(out,"{");
3110 }else{
3111 assert( 0 ); /* Cannot happen */
3112 }
3113 for(; *cp; cp++){
3114 if( *cp=='$' && cp[1]=='$' ){
3115 fprintf(out,"(yypminor->yy%d)",sp->dtnum);
3116 cp++;
3117 continue;
3118 }
3119 if( *cp=='\n' ) linecnt++;
3120 fputc(*cp,out);
3121 }
3122 (*lineno) += 3 + linecnt;
3123 fprintf(out,"}\n");
3124 tplt_linedir(out,*lineno,lemp->outname);
3125 return;
3126}
3127
3128/*
3129** Return TRUE (non-zero) if the given symbol has a destructor.
3130*/
3131int has_destructor(sp, lemp)
3132struct symbol *sp;
3133struct lemon *lemp;
3134{
3135 int ret;
3136 if( sp->type==TERMINAL ){
3137 ret = lemp->tokendest!=0;
3138 }else{
3139 ret = lemp->vardest!=0 || sp->destructor!=0;
3140 }
3141 return ret;
3142}
3143
3144/*
3145** Append text to a dynamically allocated string. If zText is 0 then
3146** reset the string to be empty again. Always return the complete text
3147** of the string (which is overwritten with each call).
3148**
3149** n bytes of zText are stored. If n==0 then all of zText up to the first
3150** \000 terminator is stored. zText can contain up to two instances of
3151** %d. The values of p1 and p2 are written into the first and second
3152** %d.
3153**
3154** If n==-1, then the previous character is overwritten.
3155*/
3156PRIVATE char *append_str(char *zText, int n, int p1, int p2){
3157 static char *z = 0;
3158 static int alloced = 0;
3159 static int used = 0;
3160 int c;
3161 char zInt[40];
3162
3163 if( zText==0 ){
3164 used = 0;
3165 return z;
3166 }
3167 if( n<=0 ){
3168 if( n<0 ){
3169 used += n;
3170 assert( used>=0 );
3171 }
3172 n = strlen(zText);
3173 }
3174 if( n+sizeof(zInt)*2+used >= alloced ){
3175 alloced = n + sizeof(zInt)*2 + used + 200;
3176 z = realloc(z, alloced);
3177 }
3178 if( z==0 ) return "";
3179 while( n-- > 0 ){
3180 c = *(zText++);
3181 if( c=='%' && n>0 && zText[0]=='d' ){
3182 sprintf(zInt, "%d", p1);
3183 p1 = p2;
3184 strcpy(&z[used], zInt);
3185 used += strlen(&z[used]);
3186 zText++;
3187 n--;
3188 }else{
3189 z[used++] = c;
3190 }
3191 }
3192 z[used] = 0;
3193 return z;
3194}
3195
3196/*
3197** zCode is a string that is the action associated with a rule. Expand
3198** the symbols in this string so that the refer to elements of the parser
3199** stack.
3200*/
3201PRIVATE void translate_code(struct lemon *lemp, struct rule *rp){
3202 char *cp, *xp;
3203 int i;
3204 char lhsused = 0; /* True if the LHS element has been used */
3205 char used[MAXRHS]; /* True for each RHS element which is used */
3206
3207 for(i=0; i<rp->nrhs; i++) used[i] = 0;
3208 lhsused = 0;
3209
3210 if( rp->code==0 ){
3211 rp->code = "\n";
3212 rp->line = rp->ruleline;
3213 }
3214
3215 append_str(0,0,0,0);
3216 for(cp=rp->code; *cp; cp++){
3217 if( isalpha(*cp) && (cp==rp->code || (!isalnum(cp[-1]) && cp[-1]!='_')) ){
3218 char saved;
3219 for(xp= &cp[1]; isalnum(*xp) || *xp=='_'; xp++);
3220 saved = *xp;
3221 *xp = 0;
3222 if( rp->lhsalias && strcmp(cp,rp->lhsalias)==0 ){
3223 append_str("yygotominor.yy%d",0,rp->lhs->dtnum,0);
3224 cp = xp;
3225 lhsused = 1;
3226 }else{
3227 for(i=0; i<rp->nrhs; i++){
3228 if( rp->rhsalias[i] && strcmp(cp,rp->rhsalias[i])==0 ){
3229 if( cp!=rp->code && cp[-1]=='@' ){
3230 /* If the argument is of the form @X then substituted
3231 ** the token number of X, not the value of X */
3232 append_str("yymsp[%d].major",-1,i-rp->nrhs+1,0);
3233 }else{
3234 struct symbol *sp = rp->rhs[i];
3235 int dtnum;
3236 if( sp->type==MULTITERMINAL ){
3237 dtnum = sp->subsym[0]->dtnum;
3238 }else{
3239 dtnum = sp->dtnum;
3240 }
3241 append_str("yymsp[%d].minor.yy%d",0,i-rp->nrhs+1, dtnum);
3242 }
3243 cp = xp;
3244 used[i] = 1;
3245 break;
3246 }
3247 }
3248 }
3249 *xp = saved;
3250 }
3251 append_str(cp, 1, 0, 0);
3252 } /* End loop */
3253
3254 /* Check to make sure the LHS has been used */
3255 if( rp->lhsalias && !lhsused ){
3256 ErrorMsg(lemp->filename,rp->ruleline,
3257 "Label \"%s\" for \"%s(%s)\" is never used.",
3258 rp->lhsalias,rp->lhs->name,rp->lhsalias);
3259 lemp->errorcnt++;
3260 }
3261
3262 /* Generate destructor code for RHS symbols which are not used in the
3263 ** reduce code */
3264 for(i=0; i<rp->nrhs; i++){
3265 if( rp->rhsalias[i] && !used[i] ){
3266 ErrorMsg(lemp->filename,rp->ruleline,
3267 "Label %s for \"%s(%s)\" is never used.",
3268 rp->rhsalias[i],rp->rhs[i]->name,rp->rhsalias[i]);
3269 lemp->errorcnt++;
3270 }else if( rp->rhsalias[i]==0 ){
3271 if( has_destructor(rp->rhs[i],lemp) ){
3272 append_str(" yy_destructor(%d,&yymsp[%d].minor);\n", 0,
3273 rp->rhs[i]->index,i-rp->nrhs+1);
3274 }else{
3275 /* No destructor defined for this term */
3276 }
3277 }
3278 }
3279 if( rp->code ){
3280 cp = append_str(0,0,0,0);
3281 rp->code = Strsafe(cp?cp:"");
3282 }
3283}
3284
3285/*
3286** Generate code which executes when the rule "rp" is reduced. Write
3287** the code to "out". Make sure lineno stays up-to-date.
3288*/
3289PRIVATE void emit_code(out,rp,lemp,lineno)
3290FILE *out;
3291struct rule *rp;
3292struct lemon *lemp;
3293int *lineno;
3294{
3295 char *cp;
3296 int linecnt = 0;
3297
3298 /* Generate code to do the reduce action */
3299 if( rp->code ){
3300 tplt_linedir(out,rp->line,lemp->filename);
3301 fprintf(out,"{%s",rp->code);
3302 for(cp=rp->code; *cp; cp++){
3303 if( *cp=='\n' ) linecnt++;
3304 } /* End loop */
3305 (*lineno) += 3 + linecnt;
3306 fprintf(out,"}\n");
3307 tplt_linedir(out,*lineno,lemp->outname);
3308 } /* End if( rp->code ) */
3309
3310 return;
3311}
3312
3313/*
3314** Print the definition of the union used for the parser's data stack.
3315** This union contains fields for every possible data type for tokens
3316** and nonterminals. In the process of computing and printing this
3317** union, also set the ".dtnum" field of every terminal and nonterminal
3318** symbol.
3319*/
3320void print_stack_union(out,lemp,plineno,mhflag)
3321FILE *out; /* The output stream */
3322struct lemon *lemp; /* The main info structure for this parser */
3323int *plineno; /* Pointer to the line number */
3324int mhflag; /* True if generating makeheaders output */
3325{
3326 int lineno = *plineno; /* The line number of the output */
3327 char **types; /* A hash table of datatypes */
3328 int arraysize; /* Size of the "types" array */
3329 int maxdtlength; /* Maximum length of any ".datatype" field. */
3330 char *stddt; /* Standardized name for a datatype */
3331 int i,j; /* Loop counters */
3332 int hash; /* For hashing the name of a type */
3333 char *name; /* Name of the parser */
3334
3335 /* Allocate and initialize types[] and allocate stddt[] */
3336 arraysize = lemp->nsymbol * 2;
3337 types = (char**)malloc( arraysize * sizeof(char*) );
3338 for(i=0; i<arraysize; i++) types[i] = 0;
3339 maxdtlength = 0;
3340 if( lemp->vartype ){
3341 maxdtlength = strlen(lemp->vartype);
3342 }
3343 for(i=0; i<lemp->nsymbol; i++){
3344 int len;
3345 struct symbol *sp = lemp->symbols[i];
3346 if( sp->datatype==0 ) continue;
3347 len = strlen(sp->datatype);
3348 if( len>maxdtlength ) maxdtlength = len;
3349 }
3350 stddt = (char*)malloc( maxdtlength*2 + 1 );
3351 if( types==0 || stddt==0 ){
3352 fprintf(stderr,"Out of memory.\n");
3353 exit(1);
3354 }
3355
3356 /* Build a hash table of datatypes. The ".dtnum" field of each symbol
3357 ** is filled in with the hash index plus 1. A ".dtnum" value of 0 is
3358 ** used for terminal symbols. If there is no %default_type defined then
3359 ** 0 is also used as the .dtnum value for nonterminals which do not specify
3360 ** a datatype using the %type directive.
3361 */
3362 for(i=0; i<lemp->nsymbol; i++){
3363 struct symbol *sp = lemp->symbols[i];
3364 char *cp;
3365 if( sp==lemp->errsym ){
3366 sp->dtnum = arraysize+1;
3367 continue;
3368 }
3369 if( sp->type!=NONTERMINAL || (sp->datatype==0 && lemp->vartype==0) ){
3370 sp->dtnum = 0;
3371 continue;
3372 }
3373 cp = sp->datatype;
3374 if( cp==0 ) cp = lemp->vartype;
3375 j = 0;
3376 while( isspace(*cp) ) cp++;
3377 while( *cp ) stddt[j++] = *cp++;
3378 while( j>0 && isspace(stddt[j-1]) ) j--;
3379 stddt[j] = 0;
3380 hash = 0;
3381 for(j=0; stddt[j]; j++){
3382 hash = hash*53 + stddt[j];
3383 }
3384 hash = (hash & 0x7fffffff)%arraysize;
3385 while( types[hash] ){
3386 if( strcmp(types[hash],stddt)==0 ){
3387 sp->dtnum = hash + 1;
3388 break;
3389 }
3390 hash++;
3391 if( hash>=arraysize ) hash = 0;
3392 }
3393 if( types[hash]==0 ){
3394 sp->dtnum = hash + 1;
3395 types[hash] = (char*)malloc( strlen(stddt)+1 );
3396 if( types[hash]==0 ){
3397 fprintf(stderr,"Out of memory.\n");
3398 exit(1);
3399 }
3400 strcpy(types[hash],stddt);
3401 }
3402 }
3403
3404 /* Print out the definition of YYTOKENTYPE and YYMINORTYPE */
3405 name = lemp->name ? lemp->name : "Parse";
3406 lineno = *plineno;
3407 if( mhflag ){ fprintf(out,"#if INTERFACE\n"); lineno++; }
3408 fprintf(out,"#define %sTOKENTYPE %s\n",name,
3409 lemp->tokentype?lemp->tokentype:"void*"); lineno++;
3410 if( mhflag ){ fprintf(out,"#endif\n"); lineno++; }
3411 fprintf(out,"typedef union {\n"); lineno++;
3412 fprintf(out," %sTOKENTYPE yy0;\n",name); lineno++;
3413 for(i=0; i<arraysize; i++){
3414 if( types[i]==0 ) continue;
3415 fprintf(out," %s yy%d;\n",types[i],i+1); lineno++;
3416 free(types[i]);
3417 }
3418 fprintf(out," int yy%d;\n",lemp->errsym->dtnum); lineno++;
3419 free(stddt);
3420 free(types);
3421 fprintf(out,"} YYMINORTYPE;\n"); lineno++;
3422 *plineno = lineno;
3423}
3424
3425/*
3426** Return the name of a C datatype able to represent values between
3427** lwr and upr, inclusive.
3428*/
3429static const char *minimum_size_type(int lwr, int upr){
3430 if( lwr>=0 ){
3431 if( upr<=255 ){
3432 return "unsigned char";
3433 }else if( upr<65535 ){
3434 return "unsigned short int";
3435 }else{
3436 return "unsigned int";
3437 }
3438 }else if( lwr>=-127 && upr<=127 ){
3439 return "signed char";
3440 }else if( lwr>=-32767 && upr<32767 ){
3441 return "short";
3442 }else{
3443 return "int";
3444 }
3445}
3446
3447/*
3448** Each state contains a set of token transaction and a set of
3449** nonterminal transactions. Each of these sets makes an instance
3450** of the following structure. An array of these structures is used
3451** to order the creation of entries in the yy_action[] table.
3452*/
3453struct axset {
3454 struct state *stp; /* A pointer to a state */
3455 int isTkn; /* True to use tokens. False for non-terminals */
3456 int nAction; /* Number of actions */
3457};
3458
3459/*
3460** Compare to axset structures for sorting purposes
3461*/
3462static int axset_compare(const void *a, const void *b){
3463 struct axset *p1 = (struct axset*)a;
3464 struct axset *p2 = (struct axset*)b;
3465 return p2->nAction - p1->nAction;
3466}
3467
3468/* Generate C source code for the parser */
3469void ReportTable(lemp, mhflag)
3470struct lemon *lemp;
3471int mhflag; /* Output in makeheaders format if true */
3472{
3473 FILE *out, *in;
3474 char line[LINESIZE];
3475 int lineno;
3476 struct state *stp;
3477 struct action *ap;
3478 struct rule *rp;
3479 struct acttab *pActtab;
3480 int i, j, n;
3481 char *name;
3482 int mnTknOfst, mxTknOfst;
3483 int mnNtOfst, mxNtOfst;
3484 struct axset *ax;
3485
3486 in = tplt_open(lemp);
3487 if( in==0 ) return;
3488 out = file_open(lemp,".c","wb");
3489 if( out==0 ){
3490 fclose(in);
3491 return;
3492 }
3493 lineno = 1;
3494 tplt_xfer(lemp->name,in,out,&lineno);
3495
3496 /* Generate the include code, if any */
3497 tplt_print(out,lemp,lemp->include,lemp->includeln,&lineno);
3498 if( mhflag ){
3499 char *name = file_makename(lemp, ".h");
3500 fprintf(out,"#include \"%s\"\n", name); lineno++;
3501 free(name);
3502 }
3503 tplt_xfer(lemp->name,in,out,&lineno);
3504
3505 /* Generate #defines for all tokens */
3506 if( mhflag ){
3507 char *prefix;
3508 fprintf(out,"#if INTERFACE\n"); lineno++;
3509 if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
3510 else prefix = "";
3511 for(i=1; i<lemp->nterminal; i++){
3512 fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
3513 lineno++;
3514 }
3515 fprintf(out,"#endif\n"); lineno++;
3516 }
3517 tplt_xfer(lemp->name,in,out,&lineno);
3518
3519 /* Generate the defines */
3520 fprintf(out,"#define YYCODETYPE %s\n",
3521 minimum_size_type(0, lemp->nsymbol+5)); lineno++;
3522 fprintf(out,"#define YYNOCODE %d\n",lemp->nsymbol+1); lineno++;
3523 fprintf(out,"#define YYACTIONTYPE %s\n",
3524 minimum_size_type(0, lemp->nstate+lemp->nrule+5)); lineno++;
3525 if( lemp->wildcard ){
3526 fprintf(out,"#define YYWILDCARD %d\n",
3527 lemp->wildcard->index); lineno++;
3528 }
3529 print_stack_union(out,lemp,&lineno,mhflag);
3530 fprintf(out, "#ifndef YYSTACKDEPTH\n"); lineno++;
3531 if( lemp->stacksize ){
3532 fprintf(out,"#define YYSTACKDEPTH %s\n",lemp->stacksize); lineno++;
3533 }else{
3534 fprintf(out,"#define YYSTACKDEPTH 100\n"); lineno++;
3535 }
3536 fprintf(out, "#endif\n"); lineno++;
3537 if( mhflag ){
3538 fprintf(out,"#if INTERFACE\n"); lineno++;
3539 }
3540 name = lemp->name ? lemp->name : "Parse";
3541 if( lemp->arg && lemp->arg[0] ){
3542 int i;
3543 i = strlen(lemp->arg);
3544 while( i>=1 && isspace(lemp->arg[i-1]) ) i--;
3545 while( i>=1 && (isalnum(lemp->arg[i-1]) || lemp->arg[i-1]=='_') ) i--;
3546 fprintf(out,"#define %sARG_SDECL %s;\n",name,lemp->arg); lineno++;
3547 fprintf(out,"#define %sARG_PDECL ,%s\n",name,lemp->arg); lineno++;
3548 fprintf(out,"#define %sARG_FETCH %s = yypParser->%s\n",
3549 name,lemp->arg,&lemp->arg[i]); lineno++;
3550 fprintf(out,"#define %sARG_STORE yypParser->%s = %s\n",
3551 name,&lemp->arg[i],&lemp->arg[i]); lineno++;
3552 }else{
3553 fprintf(out,"#define %sARG_SDECL\n",name); lineno++;
3554 fprintf(out,"#define %sARG_PDECL\n",name); lineno++;
3555 fprintf(out,"#define %sARG_FETCH\n",name); lineno++;
3556 fprintf(out,"#define %sARG_STORE\n",name); lineno++;
3557 }
3558 if( mhflag ){
3559 fprintf(out,"#endif\n"); lineno++;
3560 }
3561 fprintf(out,"#define YYNSTATE %d\n",lemp->nstate); lineno++;
3562 fprintf(out,"#define YYNRULE %d\n",lemp->nrule); lineno++;
3563 fprintf(out,"#define YYERRORSYMBOL %d\n",lemp->errsym->index); lineno++;
3564 fprintf(out,"#define YYERRSYMDT yy%d\n",lemp->errsym->dtnum); lineno++;
3565 if( lemp->has_fallback ){
3566 fprintf(out,"#define YYFALLBACK 1\n"); lineno++;
3567 }
3568 tplt_xfer(lemp->name,in,out,&lineno);
3569
3570 /* Generate the action table and its associates:
3571 **
3572 ** yy_action[] A single table containing all actions.
3573 ** yy_lookahead[] A table containing the lookahead for each entry in
3574 ** yy_action. Used to detect hash collisions.
3575 ** yy_shift_ofst[] For each state, the offset into yy_action for
3576 ** shifting terminals.
3577 ** yy_reduce_ofst[] For each state, the offset into yy_action for
3578 ** shifting non-terminals after a reduce.
3579 ** yy_default[] Default action for each state.
3580 */
3581
3582 /* Compute the actions on all states and count them up */
3583 ax = malloc( sizeof(ax[0])*lemp->nstate*2 );
3584 if( ax==0 ){
3585 fprintf(stderr,"malloc failed\n");
3586 exit(1);
3587 }
3588 for(i=0; i<lemp->nstate; i++){
3589 stp = lemp->sorted[i];
3590 ax[i*2].stp = stp;
3591 ax[i*2].isTkn = 1;
3592 ax[i*2].nAction = stp->nTknAct;
3593 ax[i*2+1].stp = stp;
3594 ax[i*2+1].isTkn = 0;
3595 ax[i*2+1].nAction = stp->nNtAct;
3596 }
3597 mxTknOfst = mnTknOfst = 0;
3598 mxNtOfst = mnNtOfst = 0;
3599
3600 /* Compute the action table. In order to try to keep the size of the
3601 ** action table to a minimum, the heuristic of placing the largest action
3602 ** sets first is used.
3603 */
3604 qsort(ax, lemp->nstate*2, sizeof(ax[0]), axset_compare);
3605 pActtab = acttab_alloc();
3606 for(i=0; i<lemp->nstate*2 && ax[i].nAction>0; i++){
3607 stp = ax[i].stp;
3608 if( ax[i].isTkn ){
3609 for(ap=stp->ap; ap; ap=ap->next){
3610 int action;
3611 if( ap->sp->index>=lemp->nterminal ) continue;
3612 action = compute_action(lemp, ap);
3613 if( action<0 ) continue;
3614 acttab_action(pActtab, ap->sp->index, action);
3615 }
3616 stp->iTknOfst = acttab_insert(pActtab);
3617 if( stp->iTknOfst<mnTknOfst ) mnTknOfst = stp->iTknOfst;
3618 if( stp->iTknOfst>mxTknOfst ) mxTknOfst = stp->iTknOfst;
3619 }else{
3620 for(ap=stp->ap; ap; ap=ap->next){
3621 int action;
3622 if( ap->sp->index<lemp->nterminal ) continue;
3623 if( ap->sp->index==lemp->nsymbol ) continue;
3624 action = compute_action(lemp, ap);
3625 if( action<0 ) continue;
3626 acttab_action(pActtab, ap->sp->index, action);
3627 }
3628 stp->iNtOfst = acttab_insert(pActtab);
3629 if( stp->iNtOfst<mnNtOfst ) mnNtOfst = stp->iNtOfst;
3630 if( stp->iNtOfst>mxNtOfst ) mxNtOfst = stp->iNtOfst;
3631 }
3632 }
3633 free(ax);
3634
3635 /* Output the yy_action table */
3636 fprintf(out,"static const YYACTIONTYPE yy_action[] = {\n"); lineno++;
3637 n = acttab_size(pActtab);
3638 for(i=j=0; i<n; i++){
3639 int action = acttab_yyaction(pActtab, i);
3640 if( action<0 ) action = lemp->nstate + lemp->nrule + 2;
3641 if( j==0 ) fprintf(out," /* %5d */ ", i);
3642 fprintf(out, " %4d,", action);
3643 if( j==9 || i==n-1 ){
3644 fprintf(out, "\n"); lineno++;
3645 j = 0;
3646 }else{
3647 j++;
3648 }
3649 }
3650 fprintf(out, "};\n"); lineno++;
3651
3652 /* Output the yy_lookahead table */
3653 fprintf(out,"static const YYCODETYPE yy_lookahead[] = {\n"); lineno++;
3654 for(i=j=0; i<n; i++){
3655 int la = acttab_yylookahead(pActtab, i);
3656 if( la<0 ) la = lemp->nsymbol;
3657 if( j==0 ) fprintf(out," /* %5d */ ", i);
3658 fprintf(out, " %4d,", la);
3659 if( j==9 || i==n-1 ){
3660 fprintf(out, "\n"); lineno++;
3661 j = 0;
3662 }else{
3663 j++;
3664 }
3665 }
3666 fprintf(out, "};\n"); lineno++;
3667
3668 /* Output the yy_shift_ofst[] table */
3669 fprintf(out, "#define YY_SHIFT_USE_DFLT (%d)\n", mnTknOfst-1); lineno++;
3670 n = lemp->nstate;
3671 while( n>0 && lemp->sorted[n-1]->iTknOfst==NO_OFFSET ) n--;
3672 fprintf(out, "#define YY_SHIFT_MAX %d\n", n-1); lineno++;
3673 fprintf(out, "static const %s yy_shift_ofst[] = {\n",
3674 minimum_size_type(mnTknOfst-1, mxTknOfst)); lineno++;
3675 for(i=j=0; i<n; i++){
3676 int ofst;
3677 stp = lemp->sorted[i];
3678 ofst = stp->iTknOfst;
3679 if( ofst==NO_OFFSET ) ofst = mnTknOfst - 1;
3680 if( j==0 ) fprintf(out," /* %5d */ ", i);
3681 fprintf(out, " %4d,", ofst);
3682 if( j==9 || i==n-1 ){
3683 fprintf(out, "\n"); lineno++;
3684 j = 0;
3685 }else{
3686 j++;
3687 }
3688 }
3689 fprintf(out, "};\n"); lineno++;
3690
3691 /* Output the yy_reduce_ofst[] table */
3692 fprintf(out, "#define YY_REDUCE_USE_DFLT (%d)\n", mnNtOfst-1); lineno++;
3693 n = lemp->nstate;
3694 while( n>0 && lemp->sorted[n-1]->iNtOfst==NO_OFFSET ) n--;
3695 fprintf(out, "#define YY_REDUCE_MAX %d\n", n-1); lineno++;
3696 fprintf(out, "static const %s yy_reduce_ofst[] = {\n",
3697 minimum_size_type(mnNtOfst-1, mxNtOfst)); lineno++;
3698 for(i=j=0; i<n; i++){
3699 int ofst;
3700 stp = lemp->sorted[i];
3701 ofst = stp->iNtOfst;
3702 if( ofst==NO_OFFSET ) ofst = mnNtOfst - 1;
3703 if( j==0 ) fprintf(out," /* %5d */ ", i);
3704 fprintf(out, " %4d,", ofst);
3705 if( j==9 || i==n-1 ){
3706 fprintf(out, "\n"); lineno++;
3707 j = 0;
3708 }else{
3709 j++;
3710 }
3711 }
3712 fprintf(out, "};\n"); lineno++;
3713
3714 /* Output the default action table */
3715 fprintf(out, "static const YYACTIONTYPE yy_default[] = {\n"); lineno++;
3716 n = lemp->nstate;
3717 for(i=j=0; i<n; i++){
3718 stp = lemp->sorted[i];
3719 if( j==0 ) fprintf(out," /* %5d */ ", i);
3720 fprintf(out, " %4d,", stp->iDflt);
3721 if( j==9 || i==n-1 ){
3722 fprintf(out, "\n"); lineno++;
3723 j = 0;
3724 }else{
3725 j++;
3726 }
3727 }
3728 fprintf(out, "};\n"); lineno++;
3729 tplt_xfer(lemp->name,in,out,&lineno);
3730
3731 /* Generate the table of fallback tokens.
3732 */
3733 if( lemp->has_fallback ){
3734 for(i=0; i<lemp->nterminal; i++){
3735 struct symbol *p = lemp->symbols[i];
3736 if( p->fallback==0 ){
3737 fprintf(out, " 0, /* %10s => nothing */\n", p->name);
3738 }else{
3739 fprintf(out, " %3d, /* %10s => %s */\n", p->fallback->index,
3740 p->name, p->fallback->name);
3741 }
3742 lineno++;
3743 }
3744 }
3745 tplt_xfer(lemp->name, in, out, &lineno);
3746
3747 /* Generate a table containing the symbolic name of every symbol
3748 */
3749 for(i=0; i<lemp->nsymbol; i++){
3750 sprintf(line,"\"%s\",",lemp->symbols[i]->name);
3751 fprintf(out," %-15s",line);
3752 if( (i&3)==3 ){ fprintf(out,"\n"); lineno++; }
3753 }
3754 if( (i&3)!=0 ){ fprintf(out,"\n"); lineno++; }
3755 tplt_xfer(lemp->name,in,out,&lineno);
3756
3757 /* Generate a table containing a text string that describes every
3758 ** rule in the rule set of the grammer. This information is used
3759 ** when tracing REDUCE actions.
3760 */
3761 for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
3762 assert( rp->index==i );
3763 fprintf(out," /* %3d */ \"%s ::=", i, rp->lhs->name);
3764 for(j=0; j<rp->nrhs; j++){
3765 struct symbol *sp = rp->rhs[j];
3766 fprintf(out," %s", sp->name);
3767 if( sp->type==MULTITERMINAL ){
3768 int k;
3769 for(k=1; k<sp->nsubsym; k++){
3770 fprintf(out,"|%s",sp->subsym[k]->name);
3771 }
3772 }
3773 }
3774 fprintf(out,"\",\n"); lineno++;
3775 }
3776 tplt_xfer(lemp->name,in,out,&lineno);
3777
3778 /* Generate code which executes every time a symbol is popped from
3779 ** the stack while processing errors or while destroying the parser.
3780 ** (In other words, generate the %destructor actions)
3781 */
3782 if( lemp->tokendest ){
3783 for(i=0; i<lemp->nsymbol; i++){
3784 struct symbol *sp = lemp->symbols[i];
3785 if( sp==0 || sp->type!=TERMINAL ) continue;
3786 fprintf(out," case %d:\n",sp->index); lineno++;
3787 }
3788 for(i=0; i<lemp->nsymbol && lemp->symbols[i]->type!=TERMINAL; i++);
3789 if( i<lemp->nsymbol ){
3790 emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
3791 fprintf(out," break;\n"); lineno++;
3792 }
3793 }
3794 if( lemp->vardest ){
3795 struct symbol *dflt_sp = 0;
3796 for(i=0; i<lemp->nsymbol; i++){
3797 struct symbol *sp = lemp->symbols[i];
3798 if( sp==0 || sp->type==TERMINAL ||
3799 sp->index<=0 || sp->destructor!=0 ) continue;
3800 fprintf(out," case %d:\n",sp->index); lineno++;
3801 dflt_sp = sp;
3802 }
3803 if( dflt_sp!=0 ){
3804 emit_destructor_code(out,dflt_sp,lemp,&lineno);
3805 fprintf(out," break;\n"); lineno++;
3806 }
3807 }
3808 for(i=0; i<lemp->nsymbol; i++){
3809 struct symbol *sp = lemp->symbols[i];
3810 if( sp==0 || sp->type==TERMINAL || sp->destructor==0 ) continue;
3811 fprintf(out," case %d:\n",sp->index); lineno++;
3812
3813 /* Combine duplicate destructors into a single case */
3814 for(j=i+1; j<lemp->nsymbol; j++){
3815 struct symbol *sp2 = lemp->symbols[j];
3816 if( sp2 && sp2->type!=TERMINAL && sp2->destructor
3817 && sp2->dtnum==sp->dtnum
3818 && strcmp(sp->destructor,sp2->destructor)==0 ){
3819 fprintf(out," case %d:\n",sp2->index); lineno++;
3820 sp2->destructor = 0;
3821 }
3822 }
3823
3824 emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
3825 fprintf(out," break;\n"); lineno++;
3826 }
3827 tplt_xfer(lemp->name,in,out,&lineno);
3828
3829 /* Generate code which executes whenever the parser stack overflows */
3830 tplt_print(out,lemp,lemp->overflow,lemp->overflowln,&lineno);
3831 tplt_xfer(lemp->name,in,out,&lineno);
3832
3833 /* Generate the table of rule information
3834 **
3835 ** Note: This code depends on the fact that rules are number
3836 ** sequentually beginning with 0.
3837 */
3838 for(rp=lemp->rule; rp; rp=rp->next){
3839 fprintf(out," { %d, %d },\n",rp->lhs->index,rp->nrhs); lineno++;
3840 }
3841 tplt_xfer(lemp->name,in,out,&lineno);
3842
3843 /* Generate code which execution during each REDUCE action */
3844 for(rp=lemp->rule; rp; rp=rp->next){
3845 translate_code(lemp, rp);
3846 }
3847 for(rp=lemp->rule; rp; rp=rp->next){
3848 struct rule *rp2;
3849 if( rp->code==0 ) continue;
3850 fprintf(out," case %d:\n",rp->index); lineno++;
3851 for(rp2=rp->next; rp2; rp2=rp2->next){
3852 if( rp2->code==rp->code ){
3853 fprintf(out," case %d:\n",rp2->index); lineno++;
3854 rp2->code = 0;
3855 }
3856 }
3857 emit_code(out,rp,lemp,&lineno);
3858 fprintf(out," break;\n"); lineno++;
3859 }
3860 tplt_xfer(lemp->name,in,out,&lineno);
3861
3862 /* Generate code which executes if a parse fails */
3863 tplt_print(out,lemp,lemp->failure,lemp->failureln,&lineno);
3864 tplt_xfer(lemp->name,in,out,&lineno);
3865
3866 /* Generate code which executes when a syntax error occurs */
3867 tplt_print(out,lemp,lemp->error,lemp->errorln,&lineno);
3868 tplt_xfer(lemp->name,in,out,&lineno);
3869
3870 /* Generate code which executes when the parser accepts its input */
3871 tplt_print(out,lemp,lemp->accept,lemp->acceptln,&lineno);
3872 tplt_xfer(lemp->name,in,out,&lineno);
3873
3874 /* Append any addition code the user desires */
3875 tplt_print(out,lemp,lemp->extracode,lemp->extracodeln,&lineno);
3876
3877 fclose(in);
3878 fclose(out);
3879 return;
3880}
3881
3882/* Generate a header file for the parser */
3883void ReportHeader(lemp)
3884struct lemon *lemp;
3885{
3886 FILE *out, *in;
3887 char *prefix;
3888 char line[LINESIZE];
3889 char pattern[LINESIZE];
3890 int i;
3891
3892 if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
3893 else prefix = "";
3894 in = file_open(lemp,".h","rb");
3895 if( in ){
3896 for(i=1; i<lemp->nterminal && fgets(line,LINESIZE,in); i++){
3897 sprintf(pattern,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
3898 if( strcmp(line,pattern) ) break;
3899 }
3900 fclose(in);
3901 if( i==lemp->nterminal ){
3902 /* No change in the file. Don't rewrite it. */
3903 return;
3904 }
3905 }
3906 out = file_open(lemp,".h","wb");
3907 if( out ){
3908 for(i=1; i<lemp->nterminal; i++){
3909 fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
3910 }
3911 fclose(out);
3912 }
3913 return;
3914}
3915
3916/* Reduce the size of the action tables, if possible, by making use
3917** of defaults.
3918**
3919** In this version, we take the most frequent REDUCE action and make
3920** it the default. Except, there is no default if the wildcard token
3921** is a possible look-ahead.
3922*/
3923void CompressTables(lemp)
3924struct lemon *lemp;
3925{
3926 struct state *stp;
3927 struct action *ap, *ap2;
3928 struct rule *rp, *rp2, *rbest;
3929 int nbest, n;
3930 int i;
3931 int usesWildcard;
3932
3933 for(i=0; i<lemp->nstate; i++){
3934 stp = lemp->sorted[i];
3935 nbest = 0;
3936 rbest = 0;
3937 usesWildcard = 0;
3938
3939 for(ap=stp->ap; ap; ap=ap->next){
3940 if( ap->type==SHIFT && ap->sp==lemp->wildcard ){
3941 usesWildcard = 1;
3942 }
3943 if( ap->type!=REDUCE ) continue;
3944 rp = ap->x.rp;
3945 if( rp==rbest ) continue;
3946 n = 1;
3947 for(ap2=ap->next; ap2; ap2=ap2->next){
3948 if( ap2->type!=REDUCE ) continue;
3949 rp2 = ap2->x.rp;
3950 if( rp2==rbest ) continue;
3951 if( rp2==rp ) n++;
3952 }
3953 if( n>nbest ){
3954 nbest = n;
3955 rbest = rp;
3956 }
3957 }
3958
3959 /* Do not make a default if the number of rules to default
3960 ** is not at least 1 or if the wildcard token is a possible
3961 ** lookahead.
3962 */
3963 if( nbest<1 || usesWildcard ) continue;
3964
3965
3966 /* Combine matching REDUCE actions into a single default */
3967 for(ap=stp->ap; ap; ap=ap->next){
3968 if( ap->type==REDUCE && ap->x.rp==rbest ) break;
3969 }
3970 assert( ap );
3971 ap->sp = Symbol_new("{default}");
3972 for(ap=ap->next; ap; ap=ap->next){
3973 if( ap->type==REDUCE && ap->x.rp==rbest ) ap->type = NOT_USED;
3974 }
3975 stp->ap = Action_sort(stp->ap);
3976 }
3977}
3978
3979
3980/*
3981** Compare two states for sorting purposes. The smaller state is the
3982** one with the most non-terminal actions. If they have the same number
3983** of non-terminal actions, then the smaller is the one with the most
3984** token actions.
3985*/
3986static int stateResortCompare(const void *a, const void *b){
3987 const struct state *pA = *(const struct state**)a;
3988 const struct state *pB = *(const struct state**)b;
3989 int n;
3990
3991 n = pB->nNtAct - pA->nNtAct;
3992 if( n==0 ){
3993 n = pB->nTknAct - pA->nTknAct;
3994 }
3995 return n;
3996}
3997
3998
3999/*
4000** Renumber and resort states so that states with fewer choices
4001** occur at the end. Except, keep state 0 as the first state.
4002*/
4003void ResortStates(lemp)
4004struct lemon *lemp;
4005{
4006 int i;
4007 struct state *stp;
4008 struct action *ap;
4009
4010 for(i=0; i<lemp->nstate; i++){
4011 stp = lemp->sorted[i];
4012 stp->nTknAct = stp->nNtAct = 0;
4013 stp->iDflt = lemp->nstate + lemp->nrule;
4014 stp->iTknOfst = NO_OFFSET;
4015 stp->iNtOfst = NO_OFFSET;
4016 for(ap=stp->ap; ap; ap=ap->next){
4017 if( compute_action(lemp,ap)>=0 ){
4018 if( ap->sp->index<lemp->nterminal ){
4019 stp->nTknAct++;
4020 }else if( ap->sp->index<lemp->nsymbol ){
4021 stp->nNtAct++;
4022 }else{
4023 stp->iDflt = compute_action(lemp, ap);
4024 }
4025 }
4026 }
4027 }
4028 qsort(&lemp->sorted[1], lemp->nstate-1, sizeof(lemp->sorted[0]),
4029 stateResortCompare);
4030 for(i=0; i<lemp->nstate; i++){
4031 lemp->sorted[i]->statenum = i;
4032 }
4033}
4034
4035
4036/***************** From the file "set.c" ************************************/
4037/*
4038** Set manipulation routines for the LEMON parser generator.
4039*/
4040
4041static int size = 0;
4042
4043/* Set the set size */
4044void SetSize(n)
4045int n;
4046{
4047 size = n+1;
4048}
4049
4050/* Allocate a new set */
4051char *SetNew(){
4052 char *s;
4053 int i;
4054 s = (char*)malloc( size );
4055 if( s==0 ){
4056 extern void memory_error();
4057 memory_error();
4058 }
4059 for(i=0; i<size; i++) s[i] = 0;
4060 return s;
4061}
4062
4063/* Deallocate a set */
4064void SetFree(s)
4065char *s;
4066{
4067 free(s);
4068}
4069
4070/* Add a new element to the set. Return TRUE if the element was added
4071** and FALSE if it was already there. */
4072int SetAdd(s,e)
4073char *s;
4074int e;
4075{
4076 int rv;
4077 rv = s[e];
4078 s[e] = 1;
4079 return !rv;
4080}
4081
4082/* Add every element of s2 to s1. Return TRUE if s1 changes. */
4083int SetUnion(s1,s2)
4084char *s1;
4085char *s2;
4086{
4087 int i, progress;
4088 progress = 0;
4089 for(i=0; i<size; i++){
4090 if( s2[i]==0 ) continue;
4091 if( s1[i]==0 ){
4092 progress = 1;
4093 s1[i] = 1;
4094 }
4095 }
4096 return progress;
4097}
4098/********************** From the file "table.c" ****************************/
4099/*
4100** All code in this file has been automatically generated
4101** from a specification in the file
4102** "table.q"
4103** by the associative array code building program "aagen".
4104** Do not edit this file! Instead, edit the specification
4105** file, then rerun aagen.
4106*/
4107/*
4108** Code for processing tables in the LEMON parser generator.
4109*/
4110
4111PRIVATE int strhash(x)
4112char *x;
4113{
4114 int h = 0;
4115 while( *x) h = h*13 + *(x++);
4116 return h;
4117}
4118
4119/* Works like strdup, sort of. Save a string in malloced memory, but
4120** keep strings in a table so that the same string is not in more
4121** than one place.
4122*/
4123char *Strsafe(y)
4124char *y;
4125{
4126 char *z;
4127
4128 if( y==0 ) return 0;
4129 z = Strsafe_find(y);
4130 if( z==0 && (z=malloc( strlen(y)+1 ))!=0 ){
4131 strcpy(z,y);
4132 Strsafe_insert(z);
4133 }
4134 MemoryCheck(z);
4135 return z;
4136}
4137
4138/* There is one instance of the following structure for each
4139** associative array of type "x1".
4140*/
4141struct s_x1 {
4142 int size; /* The number of available slots. */
4143 /* Must be a power of 2 greater than or */
4144 /* equal to 1 */
4145 int count; /* Number of currently slots filled */
4146 struct s_x1node *tbl; /* The data stored here */
4147 struct s_x1node **ht; /* Hash table for lookups */
4148};
4149
4150/* There is one instance of this structure for every data element
4151** in an associative array of type "x1".
4152*/
4153typedef struct s_x1node {
4154 char *data; /* The data */
4155 struct s_x1node *next; /* Next entry with the same hash */
4156 struct s_x1node **from; /* Previous link */
4157} x1node;
4158
4159/* There is only one instance of the array, which is the following */
4160static struct s_x1 *x1a;
4161
4162/* Allocate a new associative array */
4163void Strsafe_init(){
4164 if( x1a ) return;
4165 x1a = (struct s_x1*)malloc( sizeof(struct s_x1) );
4166 if( x1a ){
4167 x1a->size = 1024;
4168 x1a->count = 0;
4169 x1a->tbl = (x1node*)malloc(
4170 (sizeof(x1node) + sizeof(x1node*))*1024 );
4171 if( x1a->tbl==0 ){
4172 free(x1a);
4173 x1a = 0;
4174 }else{
4175 int i;
4176 x1a->ht = (x1node**)&(x1a->tbl[1024]);
4177 for(i=0; i<1024; i++) x1a->ht[i] = 0;
4178 }
4179 }
4180}
4181/* Insert a new record into the array. Return TRUE if successful.
4182** Prior data with the same key is NOT overwritten */
4183int Strsafe_insert(data)
4184char *data;
4185{
4186 x1node *np;
4187 int h;
4188 int ph;
4189
4190 if( x1a==0 ) return 0;
4191 ph = strhash(data);
4192 h = ph & (x1a->size-1);
4193 np = x1a->ht[h];
4194 while( np ){
4195 if( strcmp(np->data,data)==0 ){
4196 /* An existing entry with the same key is found. */
4197 /* Fail because overwrite is not allows. */
4198 return 0;
4199 }
4200 np = np->next;
4201 }
4202 if( x1a->count>=x1a->size ){
4203 /* Need to make the hash table bigger */
4204 int i,size;
4205 struct s_x1 array;
4206 array.size = size = x1a->size*2;
4207 array.count = x1a->count;
4208 array.tbl = (x1node*)malloc(
4209 (sizeof(x1node) + sizeof(x1node*))*size );
4210 if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
4211 array.ht = (x1node**)&(array.tbl[size]);
4212 for(i=0; i<size; i++) array.ht[i] = 0;
4213 for(i=0; i<x1a->count; i++){
4214 x1node *oldnp, *newnp;
4215 oldnp = &(x1a->tbl[i]);
4216 h = strhash(oldnp->data) & (size-1);
4217 newnp = &(array.tbl[i]);
4218 if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
4219 newnp->next = array.ht[h];
4220 newnp->data = oldnp->data;
4221 newnp->from = &(array.ht[h]);
4222 array.ht[h] = newnp;
4223 }
4224 free(x1a->tbl);
4225 *x1a = array;
4226 }
4227 /* Insert the new data */
4228 h = ph & (x1a->size-1);
4229 np = &(x1a->tbl[x1a->count++]);
4230 np->data = data;
4231 if( x1a->ht[h] ) x1a->ht[h]->from = &(np->next);
4232 np->next = x1a->ht[h];
4233 x1a->ht[h] = np;
4234 np->from = &(x1a->ht[h]);
4235 return 1;
4236}
4237
4238/* Return a pointer to data assigned to the given key. Return NULL
4239** if no such key. */
4240char *Strsafe_find(key)
4241char *key;
4242{
4243 int h;
4244 x1node *np;
4245
4246 if( x1a==0 ) return 0;
4247 h = strhash(key) & (x1a->size-1);
4248 np = x1a->ht[h];
4249 while( np ){
4250 if( strcmp(np->data,key)==0 ) break;
4251 np = np->next;
4252 }
4253 return np ? np->data : 0;
4254}
4255
4256/* Return a pointer to the (terminal or nonterminal) symbol "x".
4257** Create a new symbol if this is the first time "x" has been seen.
4258*/
4259struct symbol *Symbol_new(x)
4260char *x;
4261{
4262 struct symbol *sp;
4263
4264 sp = Symbol_find(x);
4265 if( sp==0 ){
4266 sp = (struct symbol *)malloc( sizeof(struct symbol) );
4267 MemoryCheck(sp);
4268 sp->name = Strsafe(x);
4269 sp->type = isupper(*x) ? TERMINAL : NONTERMINAL;
4270 sp->rule = 0;
4271 sp->fallback = 0;
4272 sp->prec = -1;
4273 sp->assoc = UNK;
4274 sp->firstset = 0;
4275 sp->lambda = LEMON_FALSE;
4276 sp->destructor = 0;
4277 sp->datatype = 0;
4278 Symbol_insert(sp,sp->name);
4279 }
4280 return sp;
4281}
4282
4283/* Compare two symbols for working purposes
4284**
4285** Symbols that begin with upper case letters (terminals or tokens)
4286** must sort before symbols that begin with lower case letters
4287** (non-terminals). Other than that, the order does not matter.
4288**
4289** We find experimentally that leaving the symbols in their original
4290** order (the order they appeared in the grammar file) gives the
4291** smallest parser tables in SQLite.
4292*/
4293int Symbolcmpp(struct symbol **a, struct symbol **b){
4294 int i1 = (**a).index + 10000000*((**a).name[0]>'Z');
4295 int i2 = (**b).index + 10000000*((**b).name[0]>'Z');
4296 return i1-i2;
4297}
4298
4299/* There is one instance of the following structure for each
4300** associative array of type "x2".
4301*/
4302struct s_x2 {
4303 int size; /* The number of available slots. */
4304 /* Must be a power of 2 greater than or */
4305 /* equal to 1 */
4306 int count; /* Number of currently slots filled */
4307 struct s_x2node *tbl; /* The data stored here */
4308 struct s_x2node **ht; /* Hash table for lookups */
4309};
4310
4311/* There is one instance of this structure for every data element
4312** in an associative array of type "x2".
4313*/
4314typedef struct s_x2node {
4315 struct symbol *data; /* The data */
4316 char *key; /* The key */
4317 struct s_x2node *next; /* Next entry with the same hash */
4318 struct s_x2node **from; /* Previous link */
4319} x2node;
4320
4321/* There is only one instance of the array, which is the following */
4322static struct s_x2 *x2a;
4323
4324/* Allocate a new associative array */
4325void Symbol_init(){
4326 if( x2a ) return;
4327 x2a = (struct s_x2*)malloc( sizeof(struct s_x2) );
4328 if( x2a ){
4329 x2a->size = 128;
4330 x2a->count = 0;
4331 x2a->tbl = (x2node*)malloc(
4332 (sizeof(x2node) + sizeof(x2node*))*128 );
4333 if( x2a->tbl==0 ){
4334 free(x2a);
4335 x2a = 0;
4336 }else{
4337 int i;
4338 x2a->ht = (x2node**)&(x2a->tbl[128]);
4339 for(i=0; i<128; i++) x2a->ht[i] = 0;
4340 }
4341 }
4342}
4343/* Insert a new record into the array. Return TRUE if successful.
4344** Prior data with the same key is NOT overwritten */
4345int Symbol_insert(data,key)
4346struct symbol *data;
4347char *key;
4348{
4349 x2node *np;
4350 int h;
4351 int ph;
4352
4353 if( x2a==0 ) return 0;
4354 ph = strhash(key);
4355 h = ph & (x2a->size-1);
4356 np = x2a->ht[h];
4357 while( np ){
4358 if( strcmp(np->key,key)==0 ){
4359 /* An existing entry with the same key is found. */
4360 /* Fail because overwrite is not allows. */
4361 return 0;
4362 }
4363 np = np->next;
4364 }
4365 if( x2a->count>=x2a->size ){
4366 /* Need to make the hash table bigger */
4367 int i,size;
4368 struct s_x2 array;
4369 array.size = size = x2a->size*2;
4370 array.count = x2a->count;
4371 array.tbl = (x2node*)malloc(
4372 (sizeof(x2node) + sizeof(x2node*))*size );
4373 if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
4374 array.ht = (x2node**)&(array.tbl[size]);
4375 for(i=0; i<size; i++) array.ht[i] = 0;
4376 for(i=0; i<x2a->count; i++){
4377 x2node *oldnp, *newnp;
4378 oldnp = &(x2a->tbl[i]);
4379 h = strhash(oldnp->key) & (size-1);
4380 newnp = &(array.tbl[i]);
4381 if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
4382 newnp->next = array.ht[h];
4383 newnp->key = oldnp->key;
4384 newnp->data = oldnp->data;
4385 newnp->from = &(array.ht[h]);
4386 array.ht[h] = newnp;
4387 }
4388 free(x2a->tbl);
4389 *x2a = array;
4390 }
4391 /* Insert the new data */
4392 h = ph & (x2a->size-1);
4393 np = &(x2a->tbl[x2a->count++]);
4394 np->key = key;
4395 np->data = data;
4396 if( x2a->ht[h] ) x2a->ht[h]->from = &(np->next);
4397 np->next = x2a->ht[h];
4398 x2a->ht[h] = np;
4399 np->from = &(x2a->ht[h]);
4400 return 1;
4401}
4402
4403/* Return a pointer to data assigned to the given key. Return NULL
4404** if no such key. */
4405struct symbol *Symbol_find(key)
4406char *key;
4407{
4408 int h;
4409 x2node *np;
4410
4411 if( x2a==0 ) return 0;
4412 h = strhash(key) & (x2a->size-1);
4413 np = x2a->ht[h];
4414 while( np ){
4415 if( strcmp(np->key,key)==0 ) break;
4416 np = np->next;
4417 }
4418 return np ? np->data : 0;
4419}
4420
4421/* Return the n-th data. Return NULL if n is out of range. */
4422struct symbol *Symbol_Nth(n)
4423int n;
4424{
4425 struct symbol *data;
4426 if( x2a && n>0 && n<=x2a->count ){
4427 data = x2a->tbl[n-1].data;
4428 }else{
4429 data = 0;
4430 }
4431 return data;
4432}
4433
4434/* Return the size of the array */
4435int Symbol_count()
4436{
4437 return x2a ? x2a->count : 0;
4438}
4439
4440/* Return an array of pointers to all data in the table.
4441** The array is obtained from malloc. Return NULL if memory allocation
4442** problems, or if the array is empty. */
4443struct symbol **Symbol_arrayof()
4444{
4445 struct symbol **array;
4446 int i,size;
4447 if( x2a==0 ) return 0;
4448 size = x2a->count;
4449 array = (struct symbol **)malloc( sizeof(struct symbol *)*size );
4450 if( array ){
4451 for(i=0; i<size; i++) array[i] = x2a->tbl[i].data;
4452 }
4453 return array;
4454}
4455
4456/* Compare two configurations */
4457int Configcmp(a,b)
4458struct config *a;
4459struct config *b;
4460{
4461 int x;
4462 x = a->rp->index - b->rp->index;
4463 if( x==0 ) x = a->dot - b->dot;
4464 return x;
4465}
4466
4467/* Compare two states */
4468PRIVATE int statecmp(a,b)
4469struct config *a;
4470struct config *b;
4471{
4472 int rc;
4473 for(rc=0; rc==0 && a && b; a=a->bp, b=b->bp){
4474 rc = a->rp->index - b->rp->index;
4475 if( rc==0 ) rc = a->dot - b->dot;
4476 }
4477 if( rc==0 ){
4478 if( a ) rc = 1;
4479 if( b ) rc = -1;
4480 }
4481 return rc;
4482}
4483
4484/* Hash a state */
4485PRIVATE int statehash(a)
4486struct config *a;
4487{
4488 int h=0;
4489 while( a ){
4490 h = h*571 + a->rp->index*37 + a->dot;
4491 a = a->bp;
4492 }
4493 return h;
4494}
4495
4496/* Allocate a new state structure */
4497struct state *State_new()
4498{
4499 struct state *new;
4500 new = (struct state *)malloc( sizeof(struct state) );
4501 MemoryCheck(new);
4502 return new;
4503}
4504
4505/* There is one instance of the following structure for each
4506** associative array of type "x3".
4507*/
4508struct s_x3 {
4509 int size; /* The number of available slots. */
4510 /* Must be a power of 2 greater than or */
4511 /* equal to 1 */
4512 int count; /* Number of currently slots filled */
4513 struct s_x3node *tbl; /* The data stored here */
4514 struct s_x3node **ht; /* Hash table for lookups */
4515};
4516
4517/* There is one instance of this structure for every data element
4518** in an associative array of type "x3".
4519*/
4520typedef struct s_x3node {
4521 struct state *data; /* The data */
4522 struct config *key; /* The key */
4523 struct s_x3node *next; /* Next entry with the same hash */
4524 struct s_x3node **from; /* Previous link */
4525} x3node;
4526
4527/* There is only one instance of the array, which is the following */
4528static struct s_x3 *x3a;
4529
4530/* Allocate a new associative array */
4531void State_init(){
4532 if( x3a ) return;
4533 x3a = (struct s_x3*)malloc( sizeof(struct s_x3) );
4534 if( x3a ){
4535 x3a->size = 128;
4536 x3a->count = 0;
4537 x3a->tbl = (x3node*)malloc(
4538 (sizeof(x3node) + sizeof(x3node*))*128 );
4539 if( x3a->tbl==0 ){
4540 free(x3a);
4541 x3a = 0;
4542 }else{
4543 int i;
4544 x3a->ht = (x3node**)&(x3a->tbl[128]);
4545 for(i=0; i<128; i++) x3a->ht[i] = 0;
4546 }
4547 }
4548}
4549/* Insert a new record into the array. Return TRUE if successful.
4550** Prior data with the same key is NOT overwritten */
4551int State_insert(data,key)
4552struct state *data;
4553struct config *key;
4554{
4555 x3node *np;
4556 int h;
4557 int ph;
4558
4559 if( x3a==0 ) return 0;
4560 ph = statehash(key);
4561 h = ph & (x3a->size-1);
4562 np = x3a->ht[h];
4563 while( np ){
4564 if( statecmp(np->key,key)==0 ){
4565 /* An existing entry with the same key is found. */
4566 /* Fail because overwrite is not allows. */
4567 return 0;
4568 }
4569 np = np->next;
4570 }
4571 if( x3a->count>=x3a->size ){
4572 /* Need to make the hash table bigger */
4573 int i,size;
4574 struct s_x3 array;
4575 array.size = size = x3a->size*2;
4576 array.count = x3a->count;
4577 array.tbl = (x3node*)malloc(
4578 (sizeof(x3node) + sizeof(x3node*))*size );
4579 if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
4580 array.ht = (x3node**)&(array.tbl[size]);
4581 for(i=0; i<size; i++) array.ht[i] = 0;
4582 for(i=0; i<x3a->count; i++){
4583 x3node *oldnp, *newnp;
4584 oldnp = &(x3a->tbl[i]);
4585 h = statehash(oldnp->key) & (size-1);
4586 newnp = &(array.tbl[i]);
4587 if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
4588 newnp->next = array.ht[h];
4589 newnp->key = oldnp->key;
4590 newnp->data = oldnp->data;
4591 newnp->from = &(array.ht[h]);
4592 array.ht[h] = newnp;
4593 }
4594 free(x3a->tbl);
4595 *x3a = array;
4596 }
4597 /* Insert the new data */
4598 h = ph & (x3a->size-1);
4599 np = &(x3a->tbl[x3a->count++]);
4600 np->key = key;
4601 np->data = data;
4602 if( x3a->ht[h] ) x3a->ht[h]->from = &(np->next);
4603 np->next = x3a->ht[h];
4604 x3a->ht[h] = np;
4605 np->from = &(x3a->ht[h]);
4606 return 1;
4607}
4608
4609/* Return a pointer to data assigned to the given key. Return NULL
4610** if no such key. */
4611struct state *State_find(key)
4612struct config *key;
4613{
4614 int h;
4615 x3node *np;
4616
4617 if( x3a==0 ) return 0;
4618 h = statehash(key) & (x3a->size-1);
4619 np = x3a->ht[h];
4620 while( np ){
4621 if( statecmp(np->key,key)==0 ) break;
4622 np = np->next;
4623 }
4624 return np ? np->data : 0;
4625}
4626
4627/* Return an array of pointers to all data in the table.
4628** The array is obtained from malloc. Return NULL if memory allocation
4629** problems, or if the array is empty. */
4630struct state **State_arrayof()
4631{
4632 struct state **array;
4633 int i,size;
4634 if( x3a==0 ) return 0;
4635 size = x3a->count;
4636 array = (struct state **)malloc( sizeof(struct state *)*size );
4637 if( array ){
4638 for(i=0; i<size; i++) array[i] = x3a->tbl[i].data;
4639 }
4640 return array;
4641}
4642
4643/* Hash a configuration */
4644PRIVATE int confighash(a)
4645struct config *a;
4646{
4647 int h=0;
4648 h = h*571 + a->rp->index*37 + a->dot;
4649 return h;
4650}
4651
4652/* There is one instance of the following structure for each
4653** associative array of type "x4".
4654*/
4655struct s_x4 {
4656 int size; /* The number of available slots. */
4657 /* Must be a power of 2 greater than or */
4658 /* equal to 1 */
4659 int count; /* Number of currently slots filled */
4660 struct s_x4node *tbl; /* The data stored here */
4661 struct s_x4node **ht; /* Hash table for lookups */
4662};
4663
4664/* There is one instance of this structure for every data element
4665** in an associative array of type "x4".
4666*/
4667typedef struct s_x4node {
4668 struct config *data; /* The data */
4669 struct s_x4node *next; /* Next entry with the same hash */
4670 struct s_x4node **from; /* Previous link */
4671} x4node;
4672
4673/* There is only one instance of the array, which is the following */
4674static struct s_x4 *x4a;
4675
4676/* Allocate a new associative array */
4677void Configtable_init(){
4678 if( x4a ) return;
4679 x4a = (struct s_x4*)malloc( sizeof(struct s_x4) );
4680 if( x4a ){
4681 x4a->size = 64;
4682 x4a->count = 0;
4683 x4a->tbl = (x4node*)malloc(
4684 (sizeof(x4node) + sizeof(x4node*))*64 );
4685 if( x4a->tbl==0 ){
4686 free(x4a);
4687 x4a = 0;
4688 }else{
4689 int i;
4690 x4a->ht = (x4node**)&(x4a->tbl[64]);
4691 for(i=0; i<64; i++) x4a->ht[i] = 0;
4692 }
4693 }
4694}
4695/* Insert a new record into the array. Return TRUE if successful.
4696** Prior data with the same key is NOT overwritten */
4697int Configtable_insert(data)
4698struct config *data;
4699{
4700 x4node *np;
4701 int h;
4702 int ph;
4703
4704 if( x4a==0 ) return 0;
4705 ph = confighash(data);
4706 h = ph & (x4a->size-1);
4707 np = x4a->ht[h];
4708 while( np ){
4709 if( Configcmp(np->data,data)==0 ){
4710 /* An existing entry with the same key is found. */
4711 /* Fail because overwrite is not allows. */
4712 return 0;
4713 }
4714 np = np->next;
4715 }
4716 if( x4a->count>=x4a->size ){
4717 /* Need to make the hash table bigger */
4718 int i,size;
4719 struct s_x4 array;
4720 array.size = size = x4a->size*2;
4721 array.count = x4a->count;
4722 array.tbl = (x4node*)malloc(
4723 (sizeof(x4node) + sizeof(x4node*))*size );
4724 if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
4725 array.ht = (x4node**)&(array.tbl[size]);
4726 for(i=0; i<size; i++) array.ht[i] = 0;
4727 for(i=0; i<x4a->count; i++){
4728 x4node *oldnp, *newnp;
4729 oldnp = &(x4a->tbl[i]);
4730 h = confighash(oldnp->data) & (size-1);
4731 newnp = &(array.tbl[i]);
4732 if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
4733 newnp->next = array.ht[h];
4734 newnp->data = oldnp->data;
4735 newnp->from = &(array.ht[h]);
4736 array.ht[h] = newnp;
4737 }
4738 free(x4a->tbl);
4739 *x4a = array;
4740 }
4741 /* Insert the new data */
4742 h = ph & (x4a->size-1);
4743 np = &(x4a->tbl[x4a->count++]);
4744 np->data = data;
4745 if( x4a->ht[h] ) x4a->ht[h]->from = &(np->next);
4746 np->next = x4a->ht[h];
4747 x4a->ht[h] = np;
4748 np->from = &(x4a->ht[h]);
4749 return 1;
4750}
4751
4752/* Return a pointer to data assigned to the given key. Return NULL
4753** if no such key. */
4754struct config *Configtable_find(key)
4755struct config *key;
4756{
4757 int h;
4758 x4node *np;
4759
4760 if( x4a==0 ) return 0;
4761 h = confighash(key) & (x4a->size-1);
4762 np = x4a->ht[h];
4763 while( np ){
4764 if( Configcmp(np->data,key)==0 ) break;
4765 np = np->next;
4766 }
4767 return np ? np->data : 0;
4768}
4769
4770/* Remove all data from the table. Pass each data to the function "f"
4771** as it is removed. ("f" may be null to avoid this step.) */
4772void Configtable_clear(f)
4773int(*f)(/* struct config * */);
4774{
4775 int i;
4776 if( x4a==0 || x4a->count==0 ) return;
4777 if( f ) for(i=0; i<x4a->count; i++) (*f)(x4a->tbl[i].data);
4778 for(i=0; i<x4a->size; i++) x4a->ht[i] = 0;
4779 x4a->count = 0;
4780 return;
4781}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/lempar.c b/libraries/sqlite/unix/sqlite-3.5.1/tool/lempar.c
new file mode 100644
index 0000000..97d146d
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/lempar.c
@@ -0,0 +1,778 @@
1/* Driver template for the LEMON parser generator.
2** The author disclaims copyright to this source code.
3*/
4/* First off, code is include which follows the "include" declaration
5** in the input file. */
6#include <stdio.h>
7%%
8/* Next is all token values, in a form suitable for use by makeheaders.
9** This section will be null unless lemon is run with the -m switch.
10*/
11/*
12** These constants (all generated automatically by the parser generator)
13** specify the various kinds of tokens (terminals) that the parser
14** understands.
15**
16** Each symbol here is a terminal symbol in the grammar.
17*/
18%%
19/* Make sure the INTERFACE macro is defined.
20*/
21#ifndef INTERFACE
22# define INTERFACE 1
23#endif
24/* The next thing included is series of defines which control
25** various aspects of the generated parser.
26** YYCODETYPE is the data type used for storing terminal
27** and nonterminal numbers. "unsigned char" is
28** used if there are fewer than 250 terminals
29** and nonterminals. "int" is used otherwise.
30** YYNOCODE is a number of type YYCODETYPE which corresponds
31** to no legal terminal or nonterminal number. This
32** number is used to fill in empty slots of the hash
33** table.
34** YYFALLBACK If defined, this indicates that one or more tokens
35** have fall-back values which should be used if the
36** original value of the token will not parse.
37** YYACTIONTYPE is the data type used for storing terminal
38** and nonterminal numbers. "unsigned char" is
39** used if there are fewer than 250 rules and
40** states combined. "int" is used otherwise.
41** ParseTOKENTYPE is the data type used for minor tokens given
42** directly to the parser from the tokenizer.
43** YYMINORTYPE is the data type used for all minor tokens.
44** This is typically a union of many types, one of
45** which is ParseTOKENTYPE. The entry in the union
46** for base tokens is called "yy0".
47** YYSTACKDEPTH is the maximum depth of the parser's stack. If
48** zero the stack is dynamically sized using realloc()
49** ParseARG_SDECL A static variable declaration for the %extra_argument
50** ParseARG_PDECL A parameter declaration for the %extra_argument
51** ParseARG_STORE Code to store %extra_argument into yypParser
52** ParseARG_FETCH Code to extract %extra_argument from yypParser
53** YYNSTATE the combined number of states.
54** YYNRULE the number of rules in the grammar
55** YYERRORSYMBOL is the code number of the error symbol. If not
56** defined, then do no error processing.
57*/
58%%
59#define YY_NO_ACTION (YYNSTATE+YYNRULE+2)
60#define YY_ACCEPT_ACTION (YYNSTATE+YYNRULE+1)
61#define YY_ERROR_ACTION (YYNSTATE+YYNRULE)
62
63/* Next are that tables used to determine what action to take based on the
64** current state and lookahead token. These tables are used to implement
65** functions that take a state number and lookahead value and return an
66** action integer.
67**
68** Suppose the action integer is N. Then the action is determined as
69** follows
70**
71** 0 <= N < YYNSTATE Shift N. That is, push the lookahead
72** token onto the stack and goto state N.
73**
74** YYNSTATE <= N < YYNSTATE+YYNRULE Reduce by rule N-YYNSTATE.
75**
76** N == YYNSTATE+YYNRULE A syntax error has occurred.
77**
78** N == YYNSTATE+YYNRULE+1 The parser accepts its input.
79**
80** N == YYNSTATE+YYNRULE+2 No such action. Denotes unused
81** slots in the yy_action[] table.
82**
83** The action table is constructed as a single large table named yy_action[].
84** Given state S and lookahead X, the action is computed as
85**
86** yy_action[ yy_shift_ofst[S] + X ]
87**
88** If the index value yy_shift_ofst[S]+X is out of range or if the value
89** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S]
90** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table
91** and that yy_default[S] should be used instead.
92**
93** The formula above is for computing the action when the lookahead is
94** a terminal symbol. If the lookahead is a non-terminal (as occurs after
95** a reduce action) then the yy_reduce_ofst[] array is used in place of
96** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of
97** YY_SHIFT_USE_DFLT.
98**
99** The following are the tables generated in this section:
100**
101** yy_action[] A single table containing all actions.
102** yy_lookahead[] A table containing the lookahead for each entry in
103** yy_action. Used to detect hash collisions.
104** yy_shift_ofst[] For each state, the offset into yy_action for
105** shifting terminals.
106** yy_reduce_ofst[] For each state, the offset into yy_action for
107** shifting non-terminals after a reduce.
108** yy_default[] Default action for each state.
109*/
110%%
111#define YY_SZ_ACTTAB (int)(sizeof(yy_action)/sizeof(yy_action[0]))
112
113/* The next table maps tokens into fallback tokens. If a construct
114** like the following:
115**
116** %fallback ID X Y Z.
117**
118** appears in the grammer, then ID becomes a fallback token for X, Y,
119** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
120** but it does not parse, the type of the token is changed to ID and
121** the parse is retried before an error is thrown.
122*/
123#ifdef YYFALLBACK
124static const YYCODETYPE yyFallback[] = {
125%%
126};
127#endif /* YYFALLBACK */
128
129/* The following structure represents a single element of the
130** parser's stack. Information stored includes:
131**
132** + The state number for the parser at this level of the stack.
133**
134** + The value of the token stored at this level of the stack.
135** (In other words, the "major" token.)
136**
137** + The semantic value stored at this level of the stack. This is
138** the information used by the action routines in the grammar.
139** It is sometimes called the "minor" token.
140*/
141struct yyStackEntry {
142 int stateno; /* The state-number */
143 int major; /* The major token value. This is the code
144 ** number for the token at this stack level */
145 YYMINORTYPE minor; /* The user-supplied minor token value. This
146 ** is the value of the token */
147};
148typedef struct yyStackEntry yyStackEntry;
149
150/* The state of the parser is completely contained in an instance of
151** the following structure */
152struct yyParser {
153 int yyidx; /* Index of top element in stack */
154 int yyerrcnt; /* Shifts left before out of the error */
155 ParseARG_SDECL /* A place to hold %extra_argument */
156#if YYSTACKDEPTH<=0
157 int yystksz; /* Current side of the stack */
158 yyStackEntry *yystack; /* The parser's stack */
159#else
160 yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */
161#endif
162};
163typedef struct yyParser yyParser;
164
165#ifndef NDEBUG
166#include <stdio.h>
167static FILE *yyTraceFILE = 0;
168static char *yyTracePrompt = 0;
169#endif /* NDEBUG */
170
171#ifndef NDEBUG
172/*
173** Turn parser tracing on by giving a stream to which to write the trace
174** and a prompt to preface each trace message. Tracing is turned off
175** by making either argument NULL
176**
177** Inputs:
178** <ul>
179** <li> A FILE* to which trace output should be written.
180** If NULL, then tracing is turned off.
181** <li> A prefix string written at the beginning of every
182** line of trace output. If NULL, then tracing is
183** turned off.
184** </ul>
185**
186** Outputs:
187** None.
188*/
189void ParseTrace(FILE *TraceFILE, char *zTracePrompt){
190 yyTraceFILE = TraceFILE;
191 yyTracePrompt = zTracePrompt;
192 if( yyTraceFILE==0 ) yyTracePrompt = 0;
193 else if( yyTracePrompt==0 ) yyTraceFILE = 0;
194}
195#endif /* NDEBUG */
196
197#ifndef NDEBUG
198/* For tracing shifts, the names of all terminals and nonterminals
199** are required. The following table supplies these names */
200static const char *const yyTokenName[] = {
201%%
202};
203#endif /* NDEBUG */
204
205#ifndef NDEBUG
206/* For tracing reduce actions, the names of all rules are required.
207*/
208static const char *const yyRuleName[] = {
209%%
210};
211#endif /* NDEBUG */
212
213
214#if YYSTACKDEPTH<=0
215/*
216** Try to increase the size of the parser stack.
217*/
218static void yyGrowStack(yyParser *p){
219 int newSize;
220 yyStackEntry *pNew;
221
222 newSize = p->yystksz*2 + 100;
223 pNew = realloc(p->yystack, newSize*sizeof(pNew[0]));
224 if( pNew ){
225 p->yystack = pNew;
226 p->yystksz = newSize;
227#ifndef NDEBUG
228 if( yyTraceFILE ){
229 fprintf(yyTraceFILE,"%sStack grows to %d entries!\n",
230 yyTracePrompt, p->yystksz);
231 }
232#endif
233 }
234}
235#endif
236
237/*
238** This function allocates a new parser.
239** The only argument is a pointer to a function which works like
240** malloc.
241**
242** Inputs:
243** A pointer to the function used to allocate memory.
244**
245** Outputs:
246** A pointer to a parser. This pointer is used in subsequent calls
247** to Parse and ParseFree.
248*/
249void *ParseAlloc(void *(*mallocProc)(size_t)){
250 yyParser *pParser;
251 pParser = (yyParser*)(*mallocProc)( (size_t)sizeof(yyParser) );
252 if( pParser ){
253 pParser->yyidx = -1;
254#if YYSTACKDEPTH<=0
255 yyGrowStack(pParser);
256#endif
257 }
258 return pParser;
259}
260
261/* The following function deletes the value associated with a
262** symbol. The symbol can be either a terminal or nonterminal.
263** "yymajor" is the symbol code, and "yypminor" is a pointer to
264** the value.
265*/
266static void yy_destructor(YYCODETYPE yymajor, YYMINORTYPE *yypminor){
267 switch( yymajor ){
268 /* Here is inserted the actions which take place when a
269 ** terminal or non-terminal is destroyed. This can happen
270 ** when the symbol is popped from the stack during a
271 ** reduce or during error processing or when a parser is
272 ** being destroyed before it is finished parsing.
273 **
274 ** Note: during a reduce, the only symbols destroyed are those
275 ** which appear on the RHS of the rule, but which are not used
276 ** inside the C code.
277 */
278%%
279 default: break; /* If no destructor action specified: do nothing */
280 }
281}
282
283/*
284** Pop the parser's stack once.
285**
286** If there is a destructor routine associated with the token which
287** is popped from the stack, then call it.
288**
289** Return the major token number for the symbol popped.
290*/
291static int yy_pop_parser_stack(yyParser *pParser){
292 YYCODETYPE yymajor;
293 yyStackEntry *yytos = &pParser->yystack[pParser->yyidx];
294
295 if( pParser->yyidx<0 ) return 0;
296#ifndef NDEBUG
297 if( yyTraceFILE && pParser->yyidx>=0 ){
298 fprintf(yyTraceFILE,"%sPopping %s\n",
299 yyTracePrompt,
300 yyTokenName[yytos->major]);
301 }
302#endif
303 yymajor = yytos->major;
304 yy_destructor( yymajor, &yytos->minor);
305 pParser->yyidx--;
306 return yymajor;
307}
308
309/*
310** Deallocate and destroy a parser. Destructors are all called for
311** all stack elements before shutting the parser down.
312**
313** Inputs:
314** <ul>
315** <li> A pointer to the parser. This should be a pointer
316** obtained from ParseAlloc.
317** <li> A pointer to a function used to reclaim memory obtained
318** from malloc.
319** </ul>
320*/
321void ParseFree(
322 void *p, /* The parser to be deleted */
323 void (*freeProc)(void*) /* Function used to reclaim memory */
324){
325 yyParser *pParser = (yyParser*)p;
326 if( pParser==0 ) return;
327 while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser);
328#if YYSTACKDEPTH<=0
329 free(pParser->yystack);
330#endif
331 (*freeProc)((void*)pParser);
332}
333
334/*
335** Find the appropriate action for a parser given the terminal
336** look-ahead token iLookAhead.
337**
338** If the look-ahead token is YYNOCODE, then check to see if the action is
339** independent of the look-ahead. If it is, return the action, otherwise
340** return YY_NO_ACTION.
341*/
342static int yy_find_shift_action(
343 yyParser *pParser, /* The parser */
344 YYCODETYPE iLookAhead /* The look-ahead token */
345){
346 int i;
347 int stateno = pParser->yystack[pParser->yyidx].stateno;
348
349 if( stateno>YY_SHIFT_MAX || (i = yy_shift_ofst[stateno])==YY_SHIFT_USE_DFLT ){
350 return yy_default[stateno];
351 }
352 if( iLookAhead==YYNOCODE ){
353 return YY_NO_ACTION;
354 }
355 i += iLookAhead;
356 if( i<0 || i>=YY_SZ_ACTTAB || yy_lookahead[i]!=iLookAhead ){
357 if( iLookAhead>0 ){
358#ifdef YYFALLBACK
359 int iFallback; /* Fallback token */
360 if( iLookAhead<sizeof(yyFallback)/sizeof(yyFallback[0])
361 && (iFallback = yyFallback[iLookAhead])!=0 ){
362#ifndef NDEBUG
363 if( yyTraceFILE ){
364 fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n",
365 yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]);
366 }
367#endif
368 return yy_find_shift_action(pParser, iFallback);
369 }
370#endif
371#ifdef YYWILDCARD
372 {
373 int j = i - iLookAhead + YYWILDCARD;
374 if( j>=0 && j<YY_SZ_ACTTAB && yy_lookahead[j]==YYWILDCARD ){
375#ifndef NDEBUG
376 if( yyTraceFILE ){
377 fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
378 yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[YYWILDCARD]);
379 }
380#endif /* NDEBUG */
381 return yy_action[j];
382 }
383 }
384#endif /* YYWILDCARD */
385 }
386 return yy_default[stateno];
387 }else{
388 return yy_action[i];
389 }
390}
391
392/*
393** Find the appropriate action for a parser given the non-terminal
394** look-ahead token iLookAhead.
395**
396** If the look-ahead token is YYNOCODE, then check to see if the action is
397** independent of the look-ahead. If it is, return the action, otherwise
398** return YY_NO_ACTION.
399*/
400static int yy_find_reduce_action(
401 int stateno, /* Current state number */
402 YYCODETYPE iLookAhead /* The look-ahead token */
403){
404 int i;
405 /* int stateno = pParser->yystack[pParser->yyidx].stateno; */
406
407 if( stateno>YY_REDUCE_MAX ||
408 (i = yy_reduce_ofst[stateno])==YY_REDUCE_USE_DFLT ){
409 return yy_default[stateno];
410 }
411 if( iLookAhead==YYNOCODE ){
412 return YY_NO_ACTION;
413 }
414 i += iLookAhead;
415 if( i<0 || i>=YY_SZ_ACTTAB || yy_lookahead[i]!=iLookAhead ){
416 return yy_default[stateno];
417 }else{
418 return yy_action[i];
419 }
420}
421
422/*
423** The following routine is called if the stack overflows.
424*/
425static void yyStackOverflow(yyParser *yypParser, YYMINORTYPE *yypMinor){
426 ParseARG_FETCH;
427 yypParser->yyidx--;
428#ifndef NDEBUG
429 if( yyTraceFILE ){
430 fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt);
431 }
432#endif
433 while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
434 /* Here code is inserted which will execute if the parser
435 ** stack every overflows */
436%%
437 ParseARG_STORE; /* Suppress warning about unused %extra_argument var */
438}
439
440/*
441** Perform a shift action.
442*/
443static void yy_shift(
444 yyParser *yypParser, /* The parser to be shifted */
445 int yyNewState, /* The new state to shift in */
446 int yyMajor, /* The major token to shift in */
447 YYMINORTYPE *yypMinor /* Pointer ot the minor token to shift in */
448){
449 yyStackEntry *yytos;
450 yypParser->yyidx++;
451#if YYSTACKDEPTH>0
452 if( yypParser->yyidx>=YYSTACKDEPTH ){
453 yyStackOverflow(yypParser, yypMinor);
454 return;
455 }
456#else
457 if( yypParser->yyidx>=yypParser->yystksz ){
458 yyGrowStack(yypParser);
459 if( yypParser->yyidx>=yypParser->yystksz ){
460 yyStackOverflow(yypParser, yypMinor);
461 return;
462 }
463 }
464#endif
465 yytos = &yypParser->yystack[yypParser->yyidx];
466 yytos->stateno = yyNewState;
467 yytos->major = yyMajor;
468 yytos->minor = *yypMinor;
469#ifndef NDEBUG
470 if( yyTraceFILE && yypParser->yyidx>0 ){
471 int i;
472 fprintf(yyTraceFILE,"%sShift %d\n",yyTracePrompt,yyNewState);
473 fprintf(yyTraceFILE,"%sStack:",yyTracePrompt);
474 for(i=1; i<=yypParser->yyidx; i++)
475 fprintf(yyTraceFILE," %s",yyTokenName[yypParser->yystack[i].major]);
476 fprintf(yyTraceFILE,"\n");
477 }
478#endif
479}
480
481/* The following table contains information about every rule that
482** is used during the reduce.
483*/
484static const struct {
485 YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
486 unsigned char nrhs; /* Number of right-hand side symbols in the rule */
487} yyRuleInfo[] = {
488%%
489};
490
491static void yy_accept(yyParser*); /* Forward Declaration */
492
493/*
494** Perform a reduce action and the shift that must immediately
495** follow the reduce.
496*/
497static void yy_reduce(
498 yyParser *yypParser, /* The parser */
499 int yyruleno /* Number of the rule by which to reduce */
500){
501 int yygoto; /* The next state */
502 int yyact; /* The next action */
503 YYMINORTYPE yygotominor; /* The LHS of the rule reduced */
504 yyStackEntry *yymsp; /* The top of the parser's stack */
505 int yysize; /* Amount to pop the stack */
506 ParseARG_FETCH;
507 yymsp = &yypParser->yystack[yypParser->yyidx];
508#ifndef NDEBUG
509 if( yyTraceFILE && yyruleno>=0
510 && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){
511 fprintf(yyTraceFILE, "%sReduce [%s].\n", yyTracePrompt,
512 yyRuleName[yyruleno]);
513 }
514#endif /* NDEBUG */
515
516 /* Silence complaints from purify about yygotominor being uninitialized
517 ** in some cases when it is copied into the stack after the following
518 ** switch. yygotominor is uninitialized when a rule reduces that does
519 ** not set the value of its left-hand side nonterminal. Leaving the
520 ** value of the nonterminal uninitialized is utterly harmless as long
521 ** as the value is never used. So really the only thing this code
522 ** accomplishes is to quieten purify.
523 **
524 ** 2007-01-16: The wireshark project (www.wireshark.org) reports that
525 ** without this code, their parser segfaults. I'm not sure what there
526 ** parser is doing to make this happen. This is the second bug report
527 ** from wireshark this week. Clearly they are stressing Lemon in ways
528 ** that it has not been previously stressed... (SQLite ticket #2172)
529 */
530 memset(&yygotominor, 0, sizeof(yygotominor));
531
532
533 switch( yyruleno ){
534 /* Beginning here are the reduction cases. A typical example
535 ** follows:
536 ** case 0:
537 ** #line <lineno> <grammarfile>
538 ** { ... } // User supplied code
539 ** #line <lineno> <thisfile>
540 ** break;
541 */
542%%
543 };
544 yygoto = yyRuleInfo[yyruleno].lhs;
545 yysize = yyRuleInfo[yyruleno].nrhs;
546 yypParser->yyidx -= yysize;
547 yyact = yy_find_reduce_action(yymsp[-yysize].stateno,yygoto);
548 if( yyact < YYNSTATE ){
549#ifdef NDEBUG
550 /* If we are not debugging and the reduce action popped at least
551 ** one element off the stack, then we can push the new element back
552 ** onto the stack here, and skip the stack overflow test in yy_shift().
553 ** That gives a significant speed improvement. */
554 if( yysize ){
555 yypParser->yyidx++;
556 yymsp -= yysize-1;
557 yymsp->stateno = yyact;
558 yymsp->major = yygoto;
559 yymsp->minor = yygotominor;
560 }else
561#endif
562 {
563 yy_shift(yypParser,yyact,yygoto,&yygotominor);
564 }
565 }else if( yyact == YYNSTATE + YYNRULE + 1 ){
566 yy_accept(yypParser);
567 }
568}
569
570/*
571** The following code executes when the parse fails
572*/
573static void yy_parse_failed(
574 yyParser *yypParser /* The parser */
575){
576 ParseARG_FETCH;
577#ifndef NDEBUG
578 if( yyTraceFILE ){
579 fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt);
580 }
581#endif
582 while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
583 /* Here code is inserted which will be executed whenever the
584 ** parser fails */
585%%
586 ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
587}
588
589/*
590** The following code executes when a syntax error first occurs.
591*/
592static void yy_syntax_error(
593 yyParser *yypParser, /* The parser */
594 int yymajor, /* The major type of the error token */
595 YYMINORTYPE yyminor /* The minor type of the error token */
596){
597 ParseARG_FETCH;
598#define TOKEN (yyminor.yy0)
599%%
600 ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
601}
602
603/*
604** The following is executed when the parser accepts
605*/
606static void yy_accept(
607 yyParser *yypParser /* The parser */
608){
609 ParseARG_FETCH;
610#ifndef NDEBUG
611 if( yyTraceFILE ){
612 fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt);
613 }
614#endif
615 while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
616 /* Here code is inserted which will be executed whenever the
617 ** parser accepts */
618%%
619 ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
620}
621
622/* The main parser program.
623** The first argument is a pointer to a structure obtained from
624** "ParseAlloc" which describes the current state of the parser.
625** The second argument is the major token number. The third is
626** the minor token. The fourth optional argument is whatever the
627** user wants (and specified in the grammar) and is available for
628** use by the action routines.
629**
630** Inputs:
631** <ul>
632** <li> A pointer to the parser (an opaque structure.)
633** <li> The major token number.
634** <li> The minor token number.
635** <li> An option argument of a grammar-specified type.
636** </ul>
637**
638** Outputs:
639** None.
640*/
641void Parse(
642 void *yyp, /* The parser */
643 int yymajor, /* The major token code number */
644 ParseTOKENTYPE yyminor /* The value for the token */
645 ParseARG_PDECL /* Optional %extra_argument parameter */
646){
647 YYMINORTYPE yyminorunion;
648 int yyact; /* The parser action. */
649 int yyendofinput; /* True if we are at the end of input */
650 int yyerrorhit = 0; /* True if yymajor has invoked an error */
651 yyParser *yypParser; /* The parser */
652
653 /* (re)initialize the parser, if necessary */
654 yypParser = (yyParser*)yyp;
655 if( yypParser->yyidx<0 ){
656#if YYSTACKDEPTH<=0
657 if( yypParser->yystksz <=0 ){
658 memset(&yyminorunion, 0, sizeof(yyminorunion));
659 yyStackOverflow(yypParser, &yyminorunion);
660 return;
661 }
662#endif
663 yypParser->yyidx = 0;
664 yypParser->yyerrcnt = -1;
665 yypParser->yystack[0].stateno = 0;
666 yypParser->yystack[0].major = 0;
667 }
668 yyminorunion.yy0 = yyminor;
669 yyendofinput = (yymajor==0);
670 ParseARG_STORE;
671
672#ifndef NDEBUG
673 if( yyTraceFILE ){
674 fprintf(yyTraceFILE,"%sInput %s\n",yyTracePrompt,yyTokenName[yymajor]);
675 }
676#endif
677
678 do{
679 yyact = yy_find_shift_action(yypParser,yymajor);
680 if( yyact<YYNSTATE ){
681 yy_shift(yypParser,yyact,yymajor,&yyminorunion);
682 yypParser->yyerrcnt--;
683 if( yyendofinput && yypParser->yyidx>=0 ){
684 yymajor = 0;
685 }else{
686 yymajor = YYNOCODE;
687 }
688 }else if( yyact < YYNSTATE + YYNRULE ){
689 yy_reduce(yypParser,yyact-YYNSTATE);
690 }else if( yyact == YY_ERROR_ACTION ){
691 int yymx;
692#ifndef NDEBUG
693 if( yyTraceFILE ){
694 fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt);
695 }
696#endif
697#ifdef YYERRORSYMBOL
698 /* A syntax error has occurred.
699 ** The response to an error depends upon whether or not the
700 ** grammar defines an error token "ERROR".
701 **
702 ** This is what we do if the grammar does define ERROR:
703 **
704 ** * Call the %syntax_error function.
705 **
706 ** * Begin popping the stack until we enter a state where
707 ** it is legal to shift the error symbol, then shift
708 ** the error symbol.
709 **
710 ** * Set the error count to three.
711 **
712 ** * Begin accepting and shifting new tokens. No new error
713 ** processing will occur until three tokens have been
714 ** shifted successfully.
715 **
716 */
717 if( yypParser->yyerrcnt<0 ){
718 yy_syntax_error(yypParser,yymajor,yyminorunion);
719 }
720 yymx = yypParser->yystack[yypParser->yyidx].major;
721 if( yymx==YYERRORSYMBOL || yyerrorhit ){
722#ifndef NDEBUG
723 if( yyTraceFILE ){
724 fprintf(yyTraceFILE,"%sDiscard input token %s\n",
725 yyTracePrompt,yyTokenName[yymajor]);
726 }
727#endif
728 yy_destructor(yymajor,&yyminorunion);
729 yymajor = YYNOCODE;
730 }else{
731 while(
732 yypParser->yyidx >= 0 &&
733 yymx != YYERRORSYMBOL &&
734 (yyact = yy_find_reduce_action(
735 yypParser->yystack[yypParser->yyidx].stateno,
736 YYERRORSYMBOL)) >= YYNSTATE
737 ){
738 yy_pop_parser_stack(yypParser);
739 }
740 if( yypParser->yyidx < 0 || yymajor==0 ){
741 yy_destructor(yymajor,&yyminorunion);
742 yy_parse_failed(yypParser);
743 yymajor = YYNOCODE;
744 }else if( yymx!=YYERRORSYMBOL ){
745 YYMINORTYPE u2;
746 u2.YYERRSYMDT = 0;
747 yy_shift(yypParser,yyact,YYERRORSYMBOL,&u2);
748 }
749 }
750 yypParser->yyerrcnt = 3;
751 yyerrorhit = 1;
752#else /* YYERRORSYMBOL is not defined */
753 /* This is what we do if the grammar does not define ERROR:
754 **
755 ** * Report an error message, and throw away the input token.
756 **
757 ** * If the input token is $, then fail the parse.
758 **
759 ** As before, subsequent error messages are suppressed until
760 ** three input tokens have been successfully shifted.
761 */
762 if( yypParser->yyerrcnt<=0 ){
763 yy_syntax_error(yypParser,yymajor,yyminorunion);
764 }
765 yypParser->yyerrcnt = 3;
766 yy_destructor(yymajor,&yyminorunion);
767 if( yyendofinput ){
768 yy_parse_failed(yypParser);
769 }
770 yymajor = YYNOCODE;
771#endif
772 }else{
773 yy_accept(yypParser);
774 yymajor = YYNOCODE;
775 }
776 }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 );
777 return;
778}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/memleak.awk b/libraries/sqlite/unix/sqlite-3.5.1/tool/memleak.awk
new file mode 100644
index 0000000..928d3b6
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/memleak.awk
@@ -0,0 +1,29 @@
1#
2# This script looks for memory leaks by analyzing the output of "sqlite"
3# when compiled with the SQLITE_DEBUG=2 option.
4#
5/[0-9]+ malloc / {
6 mem[$6] = $0
7}
8/[0-9]+ realloc / {
9 mem[$8] = "";
10 mem[$10] = $0
11}
12/[0-9]+ free / {
13 if (mem[$6]=="") {
14 print "*** free without a malloc at",$6
15 }
16 mem[$6] = "";
17 str[$6] = ""
18}
19/^string at / {
20 addr = $4
21 sub("string at " addr " is ","")
22 str[addr] = $0
23}
24END {
25 for(addr in mem){
26 if( mem[addr]=="" ) continue
27 print mem[addr], str[addr]
28 }
29}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/memleak2.awk b/libraries/sqlite/unix/sqlite-3.5.1/tool/memleak2.awk
new file mode 100644
index 0000000..5d81b70
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/memleak2.awk
@@ -0,0 +1,29 @@
1# This AWK script reads the output of testfixture when compiled for memory
2# debugging. It generates SQL commands that can be fed into an sqlite
3# instance to determine what memory is never freed. A typical usage would
4# be as follows:
5#
6# make -f memleak.mk fulltest 2>mem.out
7# awk -f ../sqlite/tool/memleak2.awk mem.out | ./sqlite :memory:
8#
9# The job performed by this script is the same as that done by memleak.awk.
10# The difference is that this script uses much less memory when the size
11# of the mem.out file is huge.
12#
13BEGIN {
14 print "CREATE TABLE mem(loc INTEGER PRIMARY KEY, src);"
15}
16/[0-9]+ malloc / {
17 print "INSERT INTO mem VALUES(" strtonum($6) ",'" $0 "');"
18}
19/[0-9]+ realloc / {
20 print "INSERT INTO mem VALUES(" strtonum($10) \
21 ",(SELECT src FROM mem WHERE loc=" strtonum($8) "));"
22 print "DELETE FROM mem WHERE loc=" strtonum($8) ";"
23}
24/[0-9]+ free / {
25 print "DELETE FROM mem WHERE loc=" strtonum($6) ";"
26}
27END {
28 print "SELECT src FROM mem;"
29}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/memleak3.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/memleak3.tcl
new file mode 100644
index 0000000..3c6e9b9
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/memleak3.tcl
@@ -0,0 +1,233 @@
1#/bin/sh
2# \
3exec `which tclsh` $0 "$@"
4#
5# The author disclaims copyright to this source code. In place of
6# a legal notice, here is a blessing:
7#
8# May you do good and not evil.
9# May you find forgiveness for yourself and forgive others.
10# May you share freely, never taking more than you give.
11######################################################################
12
13set doco "
14This script is a tool to help track down memory leaks in the sqlite
15library. The library must be compiled with the preprocessor symbol
16SQLITE_MEMDEBUG set to at least 2. It must be set to 3 to enable stack
17traces.
18
19To use, run the leaky application and save the standard error output.
20Then, execute this program with the first argument the name of the
21application binary (or interpreter) and the second argument the name of the
22text file that contains the collected stderr output.
23
24If all goes well a summary of unfreed allocations is printed out. If the
25GNU C library is in use and SQLITE_DEBUG is 3 or greater a stack trace is
26printed out for each unmatched allocation.
27
28If the \"-r <n>\" option is passed, then the program stops and prints out
29the state of the heap immediately after the <n>th call to malloc() or
30realloc().
31
32Example:
33
34$ ./testfixture ../sqlite/test/select1.test 2> memtrace.out
35$ tclsh $argv0 ?-r <malloc-number>? ./testfixture memtrace.out
36"
37
38
39proc usage {} {
40 set prg [file tail $::argv0]
41 puts "Usage: $prg ?-r <malloc-number>? <binary file> <mem trace file>"
42 puts ""
43 puts [string trim $::doco]
44 exit -1
45}
46
47proc shift {listvar} {
48 upvar $listvar l
49 set ret [lindex $l 0]
50 set l [lrange $l 1 end]
51 return $ret
52}
53
54# Argument handling. The following vars are set:
55#
56# $exe - the name of the executable (i.e. "testfixture" or "./sqlite3")
57# $memfile - the name of the file containing the trace output.
58# $report_at - The malloc number to stop and report at. Or -1 to read
59# all of $memfile.
60#
61set report_at -1
62while {[llength $argv]>2} {
63 set arg [shift argv]
64 switch -- $arg {
65 "-r" {
66 set report_at [shift argv]
67 }
68 default {
69 usage
70 }
71 }
72}
73if {[llength $argv]!=2} usage
74set exe [lindex $argv 0]
75set memfile [lindex $argv 1]
76
77# If stack traces are enabled, the 'addr2line' program is called to
78# translate a binary stack address into a human-readable form.
79set addr2line addr2line
80
81# When the SQLITE_MEMDEBUG is set as described above, SQLite prints
82# out a line for each malloc(), realloc() or free() call that the
83# library makes. If SQLITE_MEMDEBUG is 3, then a stack trace is printed
84# out before each malloc() and realloc() line.
85#
86# This program parses each line the SQLite library outputs and updates
87# the following global Tcl variables to reflect the "current" state of
88# the heap used by SQLite.
89#
90set nBytes 0 ;# Total number of bytes currently allocated.
91set nMalloc 0 ;# Total number of malloc()/realloc() calls.
92set nPeak 0 ;# Peak of nBytes.
93set iPeak 0 ;# nMalloc when nPeak was set.
94#
95# More detailed state information is stored in the $memmap array.
96# Each key in the memmap array is the address of a chunk of memory
97# currently allocated from the heap. The value is a list of the
98# following form
99#
100# {<number-of-bytes> <malloc id> <stack trace>}
101#
102array unset memmap
103
104proc process_input {input_file array_name} {
105 upvar $array_name mem
106 set input [open $input_file]
107
108 set MALLOC {([[:digit:]]+) malloc ([[:digit:]]+) bytes at 0x([[:xdigit:]]+)}
109 # set STACK {^[[:digit:]]+: STACK: (.*)$}
110 set STACK {^STACK: (.*)$}
111 set FREE {[[:digit:]]+ free ([[:digit:]]+) bytes at 0x([[:xdigit:]]+)}
112 set REALLOC {([[:digit:]]+) realloc ([[:digit:]]+) to ([[:digit:]]+)}
113 append REALLOC { bytes at 0x([[:xdigit:]]+) to 0x([[:xdigit:]]+)}
114
115 set stack ""
116 while { ![eof $input] } {
117 set line [gets $input]
118 if {[regexp $STACK $line dummy stack]} {
119 # Do nothing. The variable $stack now stores the hexadecimal stack dump
120 # for the next malloc() or realloc().
121
122 } elseif { [regexp $MALLOC $line dummy mallocid bytes addr] } {
123 # If this is a 'malloc' line, set an entry in the mem array. Each entry
124 # is a list of length three, the number of bytes allocated , the malloc
125 # number and the stack dump when it was allocated.
126 set mem($addr) [list $bytes "malloc $mallocid" $stack]
127 set stack ""
128
129 # Increase the current heap usage
130 incr ::nBytes $bytes
131
132 # Increase the number of malloc() calls
133 incr ::nMalloc
134
135 if {$::nBytes > $::nPeak} {
136 set ::nPeak $::nBytes
137 set ::iPeak $::nMalloc
138 }
139
140 } elseif { [regexp $FREE $line dummy bytes addr] } {
141 # If this is a 'free' line, remove the entry from the mem array. If the
142 # entry does not exist, or is the wrong number of bytes, announce a
143 # problem. This is more likely a bug in the regular expressions for
144 # this script than an SQLite defect.
145 if { [lindex $mem($addr) 0] != $bytes } {
146 error "byte count mismatch"
147 }
148 unset mem($addr)
149
150 # Decrease the current heap usage
151 incr ::nBytes [expr -1 * $bytes]
152
153 } elseif { [regexp $REALLOC $line dummy mallocid ob b oa a] } {
154 # "free" the old allocation in the internal model:
155 incr ::nBytes [expr -1 * $ob]
156 unset mem($oa);
157
158 # "malloc" the new allocation
159 set mem($a) [list $b "realloc $mallocid" $stack]
160 incr ::nBytes $b
161 set stack ""
162
163 # Increase the number of malloc() calls
164 incr ::nMalloc
165
166 if {$::nBytes > $::nPeak} {
167 set ::nPeak $::nBytes
168 set ::iPeak $::nMalloc
169 }
170
171 } else {
172 # puts "REJECT: $line"
173 }
174
175 if {$::nMalloc==$::report_at} report
176 }
177
178 close $input
179}
180
181proc printstack {stack} {
182 set fcount 10
183 if {[llength $stack]<10} {
184 set fcount [llength $stack]
185 }
186 foreach frame [lrange $stack 1 $fcount] {
187 foreach {f l} [split [exec $::addr2line -f --exe=$::exe $frame] \n] {}
188 puts [format "%-30s %s" $f $l]
189 }
190 if {[llength $stack]>0 } {puts ""}
191}
192
193proc report {} {
194
195 foreach key [array names ::memmap] {
196 set stack [lindex $::memmap($key) 2]
197 set bytes [lindex $::memmap($key) 0]
198 lappend summarymap($stack) $bytes
199 }
200
201 set sorted [list]
202 foreach stack [array names summarymap] {
203 set allocs $summarymap($stack)
204 set sum 0
205 foreach a $allocs {
206 incr sum $a
207 }
208 lappend sorted [list $sum $stack]
209 }
210
211 set sorted [lsort -integer -index 0 $sorted]
212 foreach s $sorted {
213 set sum [lindex $s 0]
214 set stack [lindex $s 1]
215 set allocs $summarymap($stack)
216 puts "$sum bytes in [llength $allocs] chunks ($allocs)"
217 printstack $stack
218 }
219
220 # Print out summary statistics
221 puts "Total allocations : $::nMalloc"
222 puts "Total outstanding allocations: [array size ::memmap]"
223 puts "Current heap usage : $::nBytes bytes"
224 puts "Peak heap usage : $::nPeak bytes (malloc #$::iPeak)"
225
226 exit
227}
228
229process_input $memfile memmap
230report
231
232
233
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/mkkeywordhash.c b/libraries/sqlite/unix/sqlite-3.5.1/tool/mkkeywordhash.c
new file mode 100644
index 0000000..3a34224
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/mkkeywordhash.c
@@ -0,0 +1,559 @@
1/*
2** Compile and run this standalone program in order to generate code that
3** implements a function that will translate alphabetic identifiers into
4** parser token codes.
5*/
6#include <stdio.h>
7#include <string.h>
8#include <stdlib.h>
9
10/*
11** A header comment placed at the beginning of generated code.
12*/
13static const char zHdr[] =
14 "/***** This file contains automatically generated code ******\n"
15 "**\n"
16 "** The code in this file has been automatically generated by\n"
17 "**\n"
18 "** $Header: /sqlite/sqlite/tool/mkkeywordhash.c,v 1.31 2007/07/30 18:26:20 rse Exp $\n"
19 "**\n"
20 "** The code in this file implements a function that determines whether\n"
21 "** or not a given identifier is really an SQL keyword. The same thing\n"
22 "** might be implemented more directly using a hand-written hash table.\n"
23 "** But by using this automatically generated code, the size of the code\n"
24 "** is substantially reduced. This is important for embedded applications\n"
25 "** on platforms with limited memory.\n"
26 "*/\n"
27;
28
29/*
30** All the keywords of the SQL language are stored as in a hash
31** table composed of instances of the following structure.
32*/
33typedef struct Keyword Keyword;
34struct Keyword {
35 char *zName; /* The keyword name */
36 char *zTokenType; /* Token value for this keyword */
37 int mask; /* Code this keyword if non-zero */
38 int id; /* Unique ID for this record */
39 int hash; /* Hash on the keyword */
40 int offset; /* Offset to start of name string */
41 int len; /* Length of this keyword, not counting final \000 */
42 int prefix; /* Number of characters in prefix */
43 int longestSuffix; /* Longest suffix that is a prefix on another word */
44 int iNext; /* Index in aKeywordTable[] of next with same hash */
45 int substrId; /* Id to another keyword this keyword is embedded in */
46 int substrOffset; /* Offset into substrId for start of this keyword */
47};
48
49/*
50** Define masks used to determine which keywords are allowed
51*/
52#ifdef SQLITE_OMIT_ALTERTABLE
53# define ALTER 0
54#else
55# define ALTER 0x00000001
56#endif
57#define ALWAYS 0x00000002
58#ifdef SQLITE_OMIT_ANALYZE
59# define ANALYZE 0
60#else
61# define ANALYZE 0x00000004
62#endif
63#ifdef SQLITE_OMIT_ATTACH
64# define ATTACH 0
65#else
66# define ATTACH 0x00000008
67#endif
68#ifdef SQLITE_OMIT_AUTOINCREMENT
69# define AUTOINCR 0
70#else
71# define AUTOINCR 0x00000010
72#endif
73#ifdef SQLITE_OMIT_CAST
74# define CAST 0
75#else
76# define CAST 0x00000020
77#endif
78#ifdef SQLITE_OMIT_COMPOUND_SELECT
79# define COMPOUND 0
80#else
81# define COMPOUND 0x00000040
82#endif
83#ifdef SQLITE_OMIT_CONFLICT_CLAUSE
84# define CONFLICT 0
85#else
86# define CONFLICT 0x00000080
87#endif
88#ifdef SQLITE_OMIT_EXPLAIN
89# define EXPLAIN 0
90#else
91# define EXPLAIN 0x00000100
92#endif
93#ifdef SQLITE_OMIT_FOREIGN_KEY
94# define FKEY 0
95#else
96# define FKEY 0x00000200
97#endif
98#ifdef SQLITE_OMIT_PRAGMA
99# define PRAGMA 0
100#else
101# define PRAGMA 0x00000400
102#endif
103#ifdef SQLITE_OMIT_REINDEX
104# define REINDEX 0
105#else
106# define REINDEX 0x00000800
107#endif
108#ifdef SQLITE_OMIT_SUBQUERY
109# define SUBQUERY 0
110#else
111# define SUBQUERY 0x00001000
112#endif
113#ifdef SQLITE_OMIT_TRIGGER
114# define TRIGGER 0
115#else
116# define TRIGGER 0x00002000
117#endif
118#if defined(SQLITE_OMIT_AUTOVACUUM) && \
119 (defined(SQLITE_OMIT_VACUUM) || defined(SQLITE_OMIT_ATTACH))
120# define VACUUM 0
121#else
122# define VACUUM 0x00004000
123#endif
124#ifdef SQLITE_OMIT_VIEW
125# define VIEW 0
126#else
127# define VIEW 0x00008000
128#endif
129#ifdef SQLITE_OMIT_VIRTUALTABLE
130# define VTAB 0
131#else
132# define VTAB 0x00010000
133#endif
134#ifdef SQLITE_OMIT_AUTOVACUUM
135# define AUTOVACUUM 0
136#else
137# define AUTOVACUUM 0x00020000
138#endif
139
140/*
141** These are the keywords
142*/
143static Keyword aKeywordTable[] = {
144 { "ABORT", "TK_ABORT", CONFLICT|TRIGGER },
145 { "ADD", "TK_ADD", ALTER },
146 { "AFTER", "TK_AFTER", TRIGGER },
147 { "ALL", "TK_ALL", ALWAYS },
148 { "ALTER", "TK_ALTER", ALTER },
149 { "ANALYZE", "TK_ANALYZE", ANALYZE },
150 { "AND", "TK_AND", ALWAYS },
151 { "AS", "TK_AS", ALWAYS },
152 { "ASC", "TK_ASC", ALWAYS },
153 { "ATTACH", "TK_ATTACH", ATTACH },
154 { "AUTOINCREMENT", "TK_AUTOINCR", AUTOINCR },
155 { "BEFORE", "TK_BEFORE", TRIGGER },
156 { "BEGIN", "TK_BEGIN", ALWAYS },
157 { "BETWEEN", "TK_BETWEEN", ALWAYS },
158 { "BY", "TK_BY", ALWAYS },
159 { "CASCADE", "TK_CASCADE", FKEY },
160 { "CASE", "TK_CASE", ALWAYS },
161 { "CAST", "TK_CAST", CAST },
162 { "CHECK", "TK_CHECK", ALWAYS },
163 { "COLLATE", "TK_COLLATE", ALWAYS },
164 { "COLUMN", "TK_COLUMNKW", ALTER },
165 { "COMMIT", "TK_COMMIT", ALWAYS },
166 { "CONFLICT", "TK_CONFLICT", CONFLICT },
167 { "CONSTRAINT", "TK_CONSTRAINT", ALWAYS },
168 { "CREATE", "TK_CREATE", ALWAYS },
169 { "CROSS", "TK_JOIN_KW", ALWAYS },
170 { "CURRENT_DATE", "TK_CTIME_KW", ALWAYS },
171 { "CURRENT_TIME", "TK_CTIME_KW", ALWAYS },
172 { "CURRENT_TIMESTAMP","TK_CTIME_KW", ALWAYS },
173 { "DATABASE", "TK_DATABASE", ATTACH },
174 { "DEFAULT", "TK_DEFAULT", ALWAYS },
175 { "DEFERRED", "TK_DEFERRED", ALWAYS },
176 { "DEFERRABLE", "TK_DEFERRABLE", FKEY },
177 { "DELETE", "TK_DELETE", ALWAYS },
178 { "DESC", "TK_DESC", ALWAYS },
179 { "DETACH", "TK_DETACH", ATTACH },
180 { "DISTINCT", "TK_DISTINCT", ALWAYS },
181 { "DROP", "TK_DROP", ALWAYS },
182 { "END", "TK_END", ALWAYS },
183 { "EACH", "TK_EACH", TRIGGER },
184 { "ELSE", "TK_ELSE", ALWAYS },
185 { "ESCAPE", "TK_ESCAPE", ALWAYS },
186 { "EXCEPT", "TK_EXCEPT", COMPOUND },
187 { "EXCLUSIVE", "TK_EXCLUSIVE", ALWAYS },
188 { "EXISTS", "TK_EXISTS", ALWAYS },
189 { "EXPLAIN", "TK_EXPLAIN", EXPLAIN },
190 { "FAIL", "TK_FAIL", CONFLICT|TRIGGER },
191 { "FOR", "TK_FOR", TRIGGER },
192 { "FOREIGN", "TK_FOREIGN", FKEY },
193 { "FROM", "TK_FROM", ALWAYS },
194 { "FULL", "TK_JOIN_KW", ALWAYS },
195 { "GLOB", "TK_LIKE_KW", ALWAYS },
196 { "GROUP", "TK_GROUP", ALWAYS },
197 { "HAVING", "TK_HAVING", ALWAYS },
198 { "IF", "TK_IF", ALWAYS },
199 { "IGNORE", "TK_IGNORE", CONFLICT|TRIGGER },
200 { "IMMEDIATE", "TK_IMMEDIATE", ALWAYS },
201 { "IN", "TK_IN", ALWAYS },
202 { "INDEX", "TK_INDEX", ALWAYS },
203 { "INITIALLY", "TK_INITIALLY", FKEY },
204 { "INNER", "TK_JOIN_KW", ALWAYS },
205 { "INSERT", "TK_INSERT", ALWAYS },
206 { "INSTEAD", "TK_INSTEAD", TRIGGER },
207 { "INTERSECT", "TK_INTERSECT", COMPOUND },
208 { "INTO", "TK_INTO", ALWAYS },
209 { "IS", "TK_IS", ALWAYS },
210 { "ISNULL", "TK_ISNULL", ALWAYS },
211 { "JOIN", "TK_JOIN", ALWAYS },
212 { "KEY", "TK_KEY", ALWAYS },
213 { "LEFT", "TK_JOIN_KW", ALWAYS },
214 { "LIKE", "TK_LIKE_KW", ALWAYS },
215 { "LIMIT", "TK_LIMIT", ALWAYS },
216 { "MATCH", "TK_MATCH", ALWAYS },
217 { "NATURAL", "TK_JOIN_KW", ALWAYS },
218 { "NOT", "TK_NOT", ALWAYS },
219 { "NOTNULL", "TK_NOTNULL", ALWAYS },
220 { "NULL", "TK_NULL", ALWAYS },
221 { "OF", "TK_OF", ALWAYS },
222 { "OFFSET", "TK_OFFSET", ALWAYS },
223 { "ON", "TK_ON", ALWAYS },
224 { "OR", "TK_OR", ALWAYS },
225 { "ORDER", "TK_ORDER", ALWAYS },
226 { "OUTER", "TK_JOIN_KW", ALWAYS },
227 { "PLAN", "TK_PLAN", EXPLAIN },
228 { "PRAGMA", "TK_PRAGMA", PRAGMA },
229 { "PRIMARY", "TK_PRIMARY", ALWAYS },
230 { "QUERY", "TK_QUERY", EXPLAIN },
231 { "RAISE", "TK_RAISE", TRIGGER },
232 { "REFERENCES", "TK_REFERENCES", FKEY },
233 { "REGEXP", "TK_LIKE_KW", ALWAYS },
234 { "REINDEX", "TK_REINDEX", REINDEX },
235 { "RENAME", "TK_RENAME", ALTER },
236 { "REPLACE", "TK_REPLACE", CONFLICT },
237 { "RESTRICT", "TK_RESTRICT", FKEY },
238 { "RIGHT", "TK_JOIN_KW", ALWAYS },
239 { "ROLLBACK", "TK_ROLLBACK", ALWAYS },
240 { "ROW", "TK_ROW", TRIGGER },
241 { "SELECT", "TK_SELECT", ALWAYS },
242 { "SET", "TK_SET", ALWAYS },
243 { "TABLE", "TK_TABLE", ALWAYS },
244 { "TEMP", "TK_TEMP", ALWAYS },
245 { "TEMPORARY", "TK_TEMP", ALWAYS },
246 { "THEN", "TK_THEN", ALWAYS },
247 { "TO", "TK_TO", ALTER },
248 { "TRANSACTION", "TK_TRANSACTION", ALWAYS },
249 { "TRIGGER", "TK_TRIGGER", TRIGGER },
250 { "UNION", "TK_UNION", COMPOUND },
251 { "UNIQUE", "TK_UNIQUE", ALWAYS },
252 { "UPDATE", "TK_UPDATE", ALWAYS },
253 { "USING", "TK_USING", ALWAYS },
254 { "VACUUM", "TK_VACUUM", VACUUM },
255 { "VALUES", "TK_VALUES", ALWAYS },
256 { "VIEW", "TK_VIEW", VIEW },
257 { "VIRTUAL", "TK_VIRTUAL", VTAB },
258 { "WHEN", "TK_WHEN", ALWAYS },
259 { "WHERE", "TK_WHERE", ALWAYS },
260};
261
262/* Number of keywords */
263static int nKeyword = (sizeof(aKeywordTable)/sizeof(aKeywordTable[0]));
264
265/* An array to map all upper-case characters into their corresponding
266** lower-case character.
267*/
268const unsigned char sqlite3UpperToLower[] = {
269 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
270 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
271 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
272 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103,
273 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,
274 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107,
275 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,
276 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
277 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,
278 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,
279 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,
280 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,
281 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,
282 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,
283 252,253,254,255
284};
285#define UpperToLower sqlite3UpperToLower
286
287/*
288** Comparision function for two Keyword records
289*/
290static int keywordCompare1(const void *a, const void *b){
291 const Keyword *pA = (Keyword*)a;
292 const Keyword *pB = (Keyword*)b;
293 int n = pA->len - pB->len;
294 if( n==0 ){
295 n = strcmp(pA->zName, pB->zName);
296 }
297 return n;
298}
299static int keywordCompare2(const void *a, const void *b){
300 const Keyword *pA = (Keyword*)a;
301 const Keyword *pB = (Keyword*)b;
302 int n = pB->longestSuffix - pA->longestSuffix;
303 if( n==0 ){
304 n = strcmp(pA->zName, pB->zName);
305 }
306 return n;
307}
308static int keywordCompare3(const void *a, const void *b){
309 const Keyword *pA = (Keyword*)a;
310 const Keyword *pB = (Keyword*)b;
311 int n = pA->offset - pB->offset;
312 return n;
313}
314
315/*
316** Return a KeywordTable entry with the given id
317*/
318static Keyword *findById(int id){
319 int i;
320 for(i=0; i<nKeyword; i++){
321 if( aKeywordTable[i].id==id ) break;
322 }
323 return &aKeywordTable[i];
324}
325
326/*
327** This routine does the work. The generated code is printed on standard
328** output.
329*/
330int main(int argc, char **argv){
331 int i, j, k, h;
332 int bestSize, bestCount;
333 int count;
334 int nChar;
335 int totalLen = 0;
336 int aHash[1000]; /* 1000 is much bigger than nKeyword */
337
338 /* Remove entries from the list of keywords that have mask==0 */
339 for(i=j=0; i<nKeyword; i++){
340 if( aKeywordTable[i].mask==0 ) continue;
341 if( j<i ){
342 aKeywordTable[j] = aKeywordTable[i];
343 }
344 j++;
345 }
346 nKeyword = j;
347
348 /* Fill in the lengths of strings and hashes for all entries. */
349 for(i=0; i<nKeyword; i++){
350 Keyword *p = &aKeywordTable[i];
351 p->len = strlen(p->zName);
352 totalLen += p->len;
353 p->hash = (UpperToLower[(int)p->zName[0]]*4) ^
354 (UpperToLower[(int)p->zName[p->len-1]]*3) ^ p->len;
355 p->id = i+1;
356 }
357
358 /* Sort the table from shortest to longest keyword */
359 qsort(aKeywordTable, nKeyword, sizeof(aKeywordTable[0]), keywordCompare1);
360
361 /* Look for short keywords embedded in longer keywords */
362 for(i=nKeyword-2; i>=0; i--){
363 Keyword *p = &aKeywordTable[i];
364 for(j=nKeyword-1; j>i && p->substrId==0; j--){
365 Keyword *pOther = &aKeywordTable[j];
366 if( pOther->substrId ) continue;
367 if( pOther->len<=p->len ) continue;
368 for(k=0; k<=pOther->len-p->len; k++){
369 if( memcmp(p->zName, &pOther->zName[k], p->len)==0 ){
370 p->substrId = pOther->id;
371 p->substrOffset = k;
372 break;
373 }
374 }
375 }
376 }
377
378 /* Compute the longestSuffix value for every word */
379 for(i=0; i<nKeyword; i++){
380 Keyword *p = &aKeywordTable[i];
381 if( p->substrId ) continue;
382 for(j=0; j<nKeyword; j++){
383 Keyword *pOther;
384 if( j==i ) continue;
385 pOther = &aKeywordTable[j];
386 if( pOther->substrId ) continue;
387 for(k=p->longestSuffix+1; k<p->len && k<pOther->len; k++){
388 if( memcmp(&p->zName[p->len-k], pOther->zName, k)==0 ){
389 p->longestSuffix = k;
390 }
391 }
392 }
393 }
394
395 /* Sort the table into reverse order by length */
396 qsort(aKeywordTable, nKeyword, sizeof(aKeywordTable[0]), keywordCompare2);
397
398 /* Fill in the offset for all entries */
399 nChar = 0;
400 for(i=0; i<nKeyword; i++){
401 Keyword *p = &aKeywordTable[i];
402 if( p->offset>0 || p->substrId ) continue;
403 p->offset = nChar;
404 nChar += p->len;
405 for(k=p->len-1; k>=1; k--){
406 for(j=i+1; j<nKeyword; j++){
407 Keyword *pOther = &aKeywordTable[j];
408 if( pOther->offset>0 || pOther->substrId ) continue;
409 if( pOther->len<=k ) continue;
410 if( memcmp(&p->zName[p->len-k], pOther->zName, k)==0 ){
411 p = pOther;
412 p->offset = nChar - k;
413 nChar = p->offset + p->len;
414 p->zName += k;
415 p->len -= k;
416 p->prefix = k;
417 j = i;
418 k = p->len;
419 }
420 }
421 }
422 }
423 for(i=0; i<nKeyword; i++){
424 Keyword *p = &aKeywordTable[i];
425 if( p->substrId ){
426 p->offset = findById(p->substrId)->offset + p->substrOffset;
427 }
428 }
429
430 /* Sort the table by offset */
431 qsort(aKeywordTable, nKeyword, sizeof(aKeywordTable[0]), keywordCompare3);
432
433 /* Figure out how big to make the hash table in order to minimize the
434 ** number of collisions */
435 bestSize = nKeyword;
436 bestCount = nKeyword*nKeyword;
437 for(i=nKeyword/2; i<=2*nKeyword; i++){
438 for(j=0; j<i; j++) aHash[j] = 0;
439 for(j=0; j<nKeyword; j++){
440 h = aKeywordTable[j].hash % i;
441 aHash[h] *= 2;
442 aHash[h]++;
443 }
444 for(j=count=0; j<i; j++) count += aHash[j];
445 if( count<bestCount ){
446 bestCount = count;
447 bestSize = i;
448 }
449 }
450
451 /* Compute the hash */
452 for(i=0; i<bestSize; i++) aHash[i] = 0;
453 for(i=0; i<nKeyword; i++){
454 h = aKeywordTable[i].hash % bestSize;
455 aKeywordTable[i].iNext = aHash[h];
456 aHash[h] = i+1;
457 }
458
459 /* Begin generating code */
460 printf("%s", zHdr);
461 printf("/* Hash score: %d */\n", bestCount);
462 printf("static int keywordCode(const char *z, int n){\n");
463 printf(" /* zText[] encodes %d bytes of keywords in %d bytes */\n",
464 totalLen + nKeyword, nChar+1 );
465
466 printf(" static const char zText[%d] =\n", nChar+1);
467 for(i=j=0; i<nKeyword; i++){
468 Keyword *p = &aKeywordTable[i];
469 if( p->substrId ) continue;
470 if( j==0 ) printf(" \"");
471 printf("%s", p->zName);
472 j += p->len;
473 if( j>60 ){
474 printf("\"\n");
475 j = 0;
476 }
477 }
478 printf("%s;\n", j>0 ? "\"" : " ");
479
480 printf(" static const unsigned char aHash[%d] = {\n", bestSize);
481 for(i=j=0; i<bestSize; i++){
482 if( j==0 ) printf(" ");
483 printf(" %3d,", aHash[i]);
484 j++;
485 if( j>12 ){
486 printf("\n");
487 j = 0;
488 }
489 }
490 printf("%s };\n", j==0 ? "" : "\n");
491
492 printf(" static const unsigned char aNext[%d] = {\n", nKeyword);
493 for(i=j=0; i<nKeyword; i++){
494 if( j==0 ) printf(" ");
495 printf(" %3d,", aKeywordTable[i].iNext);
496 j++;
497 if( j>12 ){
498 printf("\n");
499 j = 0;
500 }
501 }
502 printf("%s };\n", j==0 ? "" : "\n");
503
504 printf(" static const unsigned char aLen[%d] = {\n", nKeyword);
505 for(i=j=0; i<nKeyword; i++){
506 if( j==0 ) printf(" ");
507 printf(" %3d,", aKeywordTable[i].len+aKeywordTable[i].prefix);
508 j++;
509 if( j>12 ){
510 printf("\n");
511 j = 0;
512 }
513 }
514 printf("%s };\n", j==0 ? "" : "\n");
515
516 printf(" static const unsigned short int aOffset[%d] = {\n", nKeyword);
517 for(i=j=0; i<nKeyword; i++){
518 if( j==0 ) printf(" ");
519 printf(" %3d,", aKeywordTable[i].offset);
520 j++;
521 if( j>12 ){
522 printf("\n");
523 j = 0;
524 }
525 }
526 printf("%s };\n", j==0 ? "" : "\n");
527
528 printf(" static const unsigned char aCode[%d] = {\n", nKeyword);
529 for(i=j=0; i<nKeyword; i++){
530 char *zToken = aKeywordTable[i].zTokenType;
531 if( j==0 ) printf(" ");
532 printf("%s,%*s", zToken, (int)(14-strlen(zToken)), "");
533 j++;
534 if( j>=5 ){
535 printf("\n");
536 j = 0;
537 }
538 }
539 printf("%s };\n", j==0 ? "" : "\n");
540
541 printf(" int h, i;\n");
542 printf(" if( n<2 ) return TK_ID;\n");
543 printf(" h = ((charMap(z[0])*4) ^\n"
544 " (charMap(z[n-1])*3) ^\n"
545 " n) %% %d;\n", bestSize);
546 printf(" for(i=((int)aHash[h])-1; i>=0; i=((int)aNext[i])-1){\n");
547 printf(" if( aLen[i]==n &&"
548 " sqlite3StrNICmp(&zText[aOffset[i]],z,n)==0 ){\n");
549 printf(" return aCode[i];\n");
550 printf(" }\n");
551 printf(" }\n");
552 printf(" return TK_ID;\n");
553 printf("}\n");
554 printf("int sqlite3KeywordCode(const unsigned char *z, int n){\n");
555 printf(" return keywordCode((char*)z, n);\n");
556 printf("}\n");
557
558 return 0;
559}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/mkopts.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/mkopts.tcl
new file mode 100755
index 0000000..e3ddcb9
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/mkopts.tcl
@@ -0,0 +1,51 @@
1#!/usr/bin/tclsh
2#
3# This script is used to generate the array of strings and the enum
4# that appear at the beginning of the C code implementation of a
5# a TCL command and that define the available subcommands for that
6# TCL command.
7
8set prefix {}
9while {![eof stdin]} {
10 set line [gets stdin]
11 if {$line==""} continue
12 regsub -all "\[ \t\n,\]+" [string trim $line] { } line
13 foreach token [split $line { }] {
14 if {![regexp {(([a-zA-Z]+)_)?([_a-zA-Z]+)} $token all px p2 name]} continue
15 lappend namelist [string tolower $name]
16 if {$px!=""} {set prefix $p2}
17 }
18}
19
20puts " static const char *${prefix}_strs\[\] = \173"
21set col 0
22proc put_item x {
23 global col
24 if {$col==0} {puts -nonewline " "}
25 if {$col<2} {
26 puts -nonewline [format " %-21s" $x]
27 incr col
28 } else {
29 puts $x
30 set col 0
31 }
32}
33proc finalize {} {
34 global col
35 if {$col>0} {puts {}}
36 set col 0
37}
38
39foreach name [lsort $namelist] {
40 put_item \"$name\",
41}
42put_item 0
43finalize
44puts " \175;"
45puts " enum ${prefix}_enum \173"
46foreach name [lsort $namelist] {
47 regsub -all {@} $name {} name
48 put_item ${prefix}_[string toupper $name],
49}
50finalize
51puts " \175;"
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/mksqlite3c.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/mksqlite3c.tcl
new file mode 100644
index 0000000..258beac
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/mksqlite3c.tcl
@@ -0,0 +1,266 @@
1#!/usr/bin/tclsh
2#
3# To build a single huge source file holding all of SQLite (or at
4# least the core components - the test harness, shell, and TCL
5# interface are omitted.) first do
6#
7# make target_source
8#
9# The make target above moves all of the source code files into
10# a subdirectory named "tsrc". (This script expects to find the files
11# there and will not work if they are not found.) There are a few
12# generated C code files that are also added to the tsrc directory.
13# For example, the "parse.c" and "parse.h" files to implement the
14# the parser are derived from "parse.y" using lemon. And the
15# "keywordhash.h" files is generated by a program named "mkkeywordhash".
16#
17# After the "tsrc" directory has been created and populated, run
18# this script:
19#
20# tclsh mksqlite3c.tcl
21#
22# The amalgamated SQLite code will be written into sqlite3.c
23#
24
25# Begin by reading the "sqlite3.h" header file. Count the number of lines
26# in this file and extract the version number. That information will be
27# needed in order to generate the header of the amalgamation.
28#
29if {[lsearch $argv --nostatic]>=0} {
30 set addstatic 0
31} else {
32 set addstatic 1
33}
34set in [open tsrc/sqlite3.h]
35set cnt 0
36set VERSION ?????
37while {![eof $in]} {
38 set line [gets $in]
39 if {$line=="" && [eof $in]} break
40 incr cnt
41 regexp {#define\s+SQLITE_VERSION\s+"(.*)"} $line all VERSION
42}
43close $in
44
45# Open the output file and write a header comment at the beginning
46# of the file.
47#
48set out [open sqlite3.c w]
49set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1]
50puts $out [subst \
51{/******************************************************************************
52** This file is an amalgamation of many separate C source files from SQLite
53** version $VERSION. By combining all the individual C code files into this
54** single large file, the entire code can be compiled as a one translation
55** unit. This allows many compilers to do optimizations that would not be
56** possible if the files were compiled separately. Performance improvements
57** of 5% are more are commonly seen when SQLite is compiled as a single
58** translation unit.
59**
60** This file is all you need to compile SQLite. To use SQLite in other
61** programs, you need this file and the "sqlite3.h" header file that defines
62** the programming interface to the SQLite library. (If you do not have
63** the "sqlite3.h" header file at hand, you will find a copy in the first
64** $cnt lines past this header comment.) Additional code files may be
65** needed if you want a wrapper to interface SQLite with your choice of
66** programming language. The code for the "sqlite3" command-line shell
67** is also in a separate file. This file contains only code for the core
68** SQLite library.
69**
70** This amalgamation was generated on $today.
71*/
72#define SQLITE_AMALGAMATION 1}]
73if {$addstatic} {
74 puts $out \
75{#ifndef SQLITE_PRIVATE
76# define SQLITE_PRIVATE static
77#endif
78#ifndef SQLITE_API
79# define SQLITE_API
80#endif}
81}
82
83# These are the header files used by SQLite. The first time any of these
84# files are seen in a #include statement in the C code, include the complete
85# text of the file in-line. The file only needs to be included once.
86#
87foreach hdr {
88 btree.h
89 btreeInt.h
90 hash.h
91 keywordhash.h
92 mutex.h
93 opcodes.h
94 os_common.h
95 os.h
96 os_os2.h
97 pager.h
98 parse.h
99 sqlite3ext.h
100 sqlite3.h
101 sqliteInt.h
102 sqliteLimit.h
103 vdbe.h
104 vdbeInt.h
105} {
106 set available_hdr($hdr) 1
107}
108set available_hdr(sqlite3.h) 0
109
110# 78 stars used for comment formatting.
111set s78 \
112{*****************************************************************************}
113
114# Insert a comment into the code
115#
116proc section_comment {text} {
117 global out s78
118 set n [string length $text]
119 set nstar [expr {60 - $n}]
120 set stars [string range $s78 0 $nstar]
121 puts $out "/************** $text $stars/"
122}
123
124# Read the source file named $filename and write it into the
125# sqlite3.c output file. If any #include statements are seen,
126# process them approprately.
127#
128proc copy_file {filename} {
129 global seen_hdr available_hdr out addstatic
130 set tail [file tail $filename]
131 section_comment "Begin file $tail"
132 set in [open $filename r]
133 set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+(sqlite3[_a-zA-Z0-9]+)(\[|;| =)}
134 set declpattern {[a-zA-Z][a-zA-Z_0-9 ]+ \*?(sqlite3[_a-zA-Z0-9]+)\(}
135 if {[file extension $filename]==".h"} {
136 set declpattern " *$declpattern"
137 }
138 set declpattern ^$declpattern
139 while {![eof $in]} {
140 set line [gets $in]
141 if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} {
142 if {[info exists available_hdr($hdr)]} {
143 if {$available_hdr($hdr)} {
144 if {$hdr!="os_common.h"} {
145 set available_hdr($hdr) 0
146 }
147 section_comment "Include $hdr in the middle of $tail"
148 copy_file tsrc/$hdr
149 section_comment "Continuing where we left off in $tail"
150 }
151 } elseif {![info exists seen_hdr($hdr)]} {
152 set seen_hdr($hdr) 1
153 puts $out $line
154 }
155 } elseif {[regexp {^#ifdef __cplusplus} $line]} {
156 puts $out "#if 0"
157 } elseif {[regexp {^#line} $line]} {
158 # Skip #line directives.
159 } elseif {$addstatic && ![regexp {^(static|typedef)} $line]} {
160 if {[regexp $declpattern $line all funcname]} {
161 # Add the SQLITE_PRIVATE or SQLITE_API keyword before functions.
162 # so that linkage can be modified at compile-time.
163 if {[regexp {^sqlite3_} $funcname]} {
164 puts $out "SQLITE_API $line"
165 } else {
166 puts $out "SQLITE_PRIVATE $line"
167 }
168 } elseif {[regexp $varpattern $line all varname]} {
169 # Add the SQLITE_PRIVATE before variable declarations or
170 # definitions for internal use
171 if {![regexp {^sqlite3_} $varname]} {
172 regsub {^extern } $line {} line
173 puts $out "SQLITE_PRIVATE $line"
174 } elseif {![regexp {^SQLITE_EXTERN} $line]} {
175 puts $out "SQLITE_API $line"
176 } else {
177 puts $out $line
178 }
179 } elseif {[regexp {^void \(\*sqlite3_io_trace\)} $line]} {
180 puts $out "SQLITE_API $line"
181 } else {
182 puts $out $line
183 }
184 } else {
185 puts $out $line
186 }
187 }
188 close $in
189 section_comment "End of $tail"
190}
191
192
193# Process the source files. Process files containing commonly
194# used subroutines first in order to help the compiler find
195# inlining opportunities.
196#
197foreach file {
198 sqlite3.h
199
200 date.c
201 os.c
202
203 mem1.c
204 mem2.c
205 mutex.c
206 mutex_os2.c
207 mutex_unix.c
208 mutex_w32.c
209 malloc.c
210 printf.c
211 random.c
212 utf.c
213 util.c
214 hash.c
215 opcodes.c
216
217 os_os2.c
218 os_unix.c
219 os_win.c
220
221 pager.c
222
223 btmutex.c
224 btree.c
225
226 vdbefifo.c
227 vdbemem.c
228 vdbeaux.c
229 vdbeapi.c
230 vdbe.c
231 vdbeblob.c
232 journal.c
233
234 expr.c
235 alter.c
236 analyze.c
237 attach.c
238 auth.c
239 build.c
240 callback.c
241 delete.c
242 func.c
243 insert.c
244 legacy.c
245 loadext.c
246 pragma.c
247 prepare.c
248 select.c
249 table.c
250 trigger.c
251 update.c
252 vacuum.c
253 vtab.c
254 where.c
255
256 parse.c
257
258 tokenize.c
259 complete.c
260
261 main.c
262} {
263 copy_file tsrc/$file
264}
265
266close $out
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/mksqlite3internalh.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/mksqlite3internalh.tcl
new file mode 100644
index 0000000..cacd7cb
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/mksqlite3internalh.tcl
@@ -0,0 +1,145 @@
1#!/usr/bin/tclsh
2#
3# To build a single huge source file holding all of SQLite (or at
4# least the core components - the test harness, shell, and TCL
5# interface are omitted.) first do
6#
7# make target_source
8#
9# The make target above moves all of the source code files into
10# a subdirectory named "tsrc". (This script expects to find the files
11# there and will not work if they are not found.) There are a few
12# generated C code files that are also added to the tsrc directory.
13# For example, the "parse.c" and "parse.h" files to implement the
14# the parser are derived from "parse.y" using lemon. And the
15# "keywordhash.h" files is generated by a program named "mkkeywordhash".
16#
17# After the "tsrc" directory has been created and populated, run
18# this script:
19#
20# tclsh mksqlite3c.tcl
21#
22# The amalgamated SQLite code will be written into sqlite3.c
23#
24
25# Begin by reading the "sqlite3.h" header file. Count the number of lines
26# in this file and extract the version number. That information will be
27# needed in order to generate the header of the amalgamation.
28#
29set in [open tsrc/sqlite3.h]
30set cnt 0
31set VERSION ?????
32while {![eof $in]} {
33 set line [gets $in]
34 if {$line=="" && [eof $in]} break
35 incr cnt
36 regexp {#define\s+SQLITE_VERSION\s+"(.*)"} $line all VERSION
37}
38close $in
39
40# Open the output file and write a header comment at the beginning
41# of the file.
42#
43set out [open sqlite3internal.h w]
44set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1]
45puts $out [subst \
46{/******************************************************************************
47** This file is an amalgamation of many private header files from SQLite
48** version $VERSION.
49*/}]
50
51# These are the header files used by SQLite. The first time any of these
52# files are seen in a #include statement in the C code, include the complete
53# text of the file in-line. The file only needs to be included once.
54#
55foreach hdr {
56 btree.h
57 btreeInt.h
58 hash.h
59 keywordhash.h
60 opcodes.h
61 os_common.h
62 os.h
63 os_os2.h
64 pager.h
65 parse.h
66 sqlite3ext.h
67 sqlite3.h
68 sqliteInt.h
69 sqliteLimit.h
70 vdbe.h
71 vdbeInt.h
72} {
73 set available_hdr($hdr) 1
74}
75
76# 78 stars used for comment formatting.
77set s78 \
78{*****************************************************************************}
79
80# Insert a comment into the code
81#
82proc section_comment {text} {
83 global out s78
84 set n [string length $text]
85 set nstar [expr {60 - $n}]
86 set stars [string range $s78 0 $nstar]
87 puts $out "/************** $text $stars/"
88}
89
90# Read the source file named $filename and write it into the
91# sqlite3.c output file. If any #include statements are seen,
92# process them approprately.
93#
94proc copy_file {filename} {
95 global seen_hdr available_hdr out
96 set tail [file tail $filename]
97 section_comment "Begin file $tail"
98 set in [open $filename r]
99 while {![eof $in]} {
100 set line [gets $in]
101 if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} {
102 if {[info exists available_hdr($hdr)]} {
103 if {$available_hdr($hdr)} {
104 section_comment "Include $hdr in the middle of $tail"
105 copy_file tsrc/$hdr
106 section_comment "Continuing where we left off in $tail"
107 }
108 } elseif {![info exists seen_hdr($hdr)]} {
109 set seen_hdr($hdr) 1
110 puts $out $line
111 }
112 } elseif {[regexp {^#ifdef __cplusplus} $line]} {
113 puts $out "#if 0"
114 } elseif {[regexp {^#line} $line]} {
115 # Skip #line directives.
116 } else {
117 puts $out $line
118 }
119 }
120 close $in
121 section_comment "End of $tail"
122}
123
124
125# Process the source files. Process files containing commonly
126# used subroutines first in order to help the compiler find
127# inlining opportunities.
128#
129foreach file {
130 sqliteInt.h
131 sqlite3.h
132 btree.h
133 hash.h
134 os.h
135 pager.h
136 parse.h
137 sqlite3ext.h
138 vdbe.h
139} {
140 if {$available_hdr($file)} {
141 copy_file tsrc/$file
142 }
143}
144
145close $out
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/omittest.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/omittest.tcl
new file mode 100644
index 0000000..9fcbfdb
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/omittest.tcl
@@ -0,0 +1,176 @@
1
2set rcsid {$Id: omittest.tcl,v 1.3 2007/09/14 16:20:01 danielk1977 Exp $}
3
4# Documentation for this script. This may be output to stderr
5# if the script is invoked incorrectly.
6set ::USAGE_MESSAGE {
7This Tcl script is used to test the various compile time options
8available for omitting code (the SQLITE_OMIT_xxx options). It
9should be invoked as follows:
10
11 <script> ?-makefile PATH-TO-MAKEFILE?
12
13The default value for ::MAKEFILE is "../Makefile.linux.gcc".
14
15This script builds the testfixture program and runs the SQLite test suite
16once with each SQLITE_OMIT_ option defined and then once with all options
17defined together. Each run is performed in a seperate directory created
18as a sub-directory of the current directory by the script. The output
19of the build is saved in <sub-directory>/build.log. The output of the
20test-suite is saved in <sub-directory>/test.log.
21
22Almost any SQLite makefile (except those generated by configure - see below)
23should work. The following properties are required:
24
25 * The makefile should support the "testfixture" target.
26 * The makefile should support the "test" target.
27 * The makefile should support the variable "OPTS" as a way to pass
28 options from the make command line to lemon and the C compiler.
29
30More precisely, the following two invocations must be supported:
31
32 make -f $::MAKEFILE testfixture OPTS="-DSQLITE_OMIT_ALTERTABLE=1"
33 make -f $::MAKEFILE test
34
35Makefiles generated by the sqlite configure program cannot be used as
36they do not respect the OPTS variable.
37}
38
39
40# Build a testfixture executable and run quick.test using it. The first
41# parameter is the name of the directory to create and use to run the
42# test in. The second parameter is a list of OMIT symbols to define
43# when doing so. For example:
44#
45# run_quick_test /tmp/testdir {SQLITE_OMIT_TRIGGER SQLITE_OMIT_VIEW}
46#
47#
48proc run_quick_test {dir omit_symbol_list} {
49 # Compile the value of the OPTS Makefile variable.
50 set opts "-DSQLITE_MEMDEBUG=2 -DSQLITE_DEBUG -DOS_UNIX"
51 foreach sym $omit_symbol_list {
52 append opts " -D${sym}=1"
53 }
54
55 # Create the directory and do the build. If an error occurs return
56 # early without attempting to run the test suite.
57 file mkdir $dir
58 puts -nonewline "Building $dir..."
59 flush stdout
60 set rc [catch {
61 exec make -C $dir -f $::MAKEFILE testfixture OPTS=$opts >& $dir/build.log
62 }]
63 if {$rc} {
64 puts "No good. See $dir/build.log."
65 return
66 } else {
67 puts "Ok"
68 }
69
70 # Create an empty file "$dir/sqlite3". This is to trick the makefile out
71 # of trying to build the sqlite shell. The sqlite shell won't build
72 # with some of the OMIT options (i.e OMIT_COMPLETE).
73 if {![file exists $dir/sqlite3]} {
74 set wr [open $dir/sqlite3 w]
75 puts $wr "dummy"
76 close $wr
77 }
78
79 # Run the test suite.
80 puts -nonewline "Testing $dir..."
81 flush stdout
82 set rc [catch {
83 exec make -C $dir -f $::MAKEFILE test OPTS=$opts >& $dir/test.log
84 }]
85 if {$rc} {
86 puts "No good. See $dir/test.log."
87 } else {
88 puts "Ok"
89 }
90}
91
92
93# This proc processes the command line options passed to this script.
94# Currently the only option supported is "-makefile", default
95# "../Makefile.linux-gcc". Set the ::MAKEFILE variable to the value of this
96# option.
97#
98proc process_options {argv} {
99 set ::MAKEFILE ../Makefile.linux-gcc ;# Default value
100 for {set i 0} {$i < [llength $argv]} {incr i} {
101 switch -- [lindex $argv $i] {
102 -makefile {
103 incr i
104 set ::MAKEFILE [lindex $argv $i]
105 }
106
107 default {
108 puts stderr [string trim $::USAGE_MESSAGE]
109 exit -1
110 }
111 }
112 set ::MAKEFILE [file normalize $::MAKEFILE]
113 }
114}
115
116# Main routine.
117#
118
119proc main {argv} {
120 # List of SQLITE_OMIT_XXX symbols supported by SQLite.
121 set ::SYMBOLS [list \
122 SQLITE_OMIT_ALTERTABLE \
123 SQLITE_OMIT_AUTHORIZATION \
124 SQLITE_OMIT_AUTOINCREMENT \
125 SQLITE_OMIT_AUTOVACUUM \
126 SQLITE_OMIT_BLOB_LITERAL \
127 SQLITE_OMIT_COMPLETE \
128 SQLITE_OMIT_COMPOUND_SELECT \
129 SQLITE_OMIT_CONFLICT_CLAUSE \
130 SQLITE_OMIT_DATETIME_FUNCS \
131 SQLITE_OMIT_EXPLAIN \
132 SQLITE_OMIT_FLOATING_POINT \
133 SQLITE_OMIT_FOREIGN_KEY \
134 SQLITE_OMIT_INCRBLOB \
135 SQLITE_OMIT_INTEGRITY_CHECK \
136 SQLITE_OMIT_MEMORYDB \
137 SQLITE_OMIT_PAGER_PRAGMAS \
138 SQLITE_OMIT_PRAGMA \
139 SQLITE_OMIT_PROGRESS_CALLBACK \
140 SQLITE_OMIT_REINDEX \
141 SQLITE_OMIT_SCHEMA_PRAGMAS \
142 SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS \
143 SQLITE_OMIT_SUBQUERY \
144 SQLITE_OMIT_TCL_VARIABLE \
145 SQLITE_OMIT_TRIGGER \
146 SQLITE_OMIT_UTF16 \
147 SQLITE_OMIT_VACUUM \
148 SQLITE_OMIT_VIEW \
149 SQLITE_OMIT_VIRTUALTABLE \
150 ]
151
152 # Process any command line options.
153 process_options $argv
154
155 # First try a test with all OMIT symbols except SQLITE_OMIT_FLOATING_POINT
156 # and SQLITE_OMIT_PRAGMA defined. The former doesn't work (causes segfaults)
157 # and the latter is currently incompatible with the test suite (this should
158 # be fixed, but it will be a lot of work).
159 set allsyms [list]
160 foreach s $::SYMBOLS {
161 if {$s!="SQLITE_OMIT_FLOATING_POINT" && $s!="SQLITE_OMIT_PRAGMA"} {
162 lappend allsyms $s
163 }
164 }
165 run_quick_test test_OMIT_EVERYTHING $allsyms
166
167 # Now try one quick.test with each of the OMIT symbols defined. Included
168 # are the OMIT_FLOATING_POINT and OMIT_PRAGMA symbols, even though we
169 # know they will fail. It's good to be reminded of this from time to time.
170 foreach sym $::SYMBOLS {
171 set dirname "test_[string range $sym 7 end]"
172 run_quick_test $dirname $sym
173 }
174}
175
176main $argv
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/opcodeDoc.awk b/libraries/sqlite/unix/sqlite-3.5.1/tool/opcodeDoc.awk
new file mode 100644
index 0000000..4920106
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/opcodeDoc.awk
@@ -0,0 +1,23 @@
1#
2# Extract opcode documentation for sqliteVdbe.c and generate HTML
3#
4BEGIN {
5 print "<html><body bgcolor=white>"
6 print "<h1>SQLite Virtual Database Engine Opcodes</h1>"
7 print "<table>"
8}
9/ Opcode: /,/\*\// {
10 if( $2=="Opcode:" ){
11 printf "<tr><td>%s&nbsp;%s&nbsp;%s&nbsp;%s</td>\n<td>\n", $3, $4, $5, $6
12 }else if( $1=="*/" ){
13 printf "</td></tr>\n"
14 }else if( NF>1 ){
15 sub(/^ *\*\* /,"")
16 gsub(/</,"&lt;")
17 gsub(/&/,"&amp;")
18 print
19 }
20}
21END {
22 print "</table></body></html>"
23}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/report1.txt b/libraries/sqlite/unix/sqlite-3.5.1/tool/report1.txt
new file mode 100644
index 0000000..7820b8c
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/report1.txt
@@ -0,0 +1,66 @@
1The SQL database used for ACD contains 113 tables and indices implemented
2in GDBM. The following are statistics on the sizes of keys and data
3within these tables and indices.
4
5Entries: 962080
6Size: 45573853
7Avg Size: 48
8Key Size: 11045299
9Avg Key Size: 12
10Max Key Size: 99
11
12
13 Size of key Cummulative
14 and data Instances Percentage
15------------ ---------- -----------
16 0..8 266 0%
17 9..12 5485 0%
18 13..16 73633 8%
19 17..24 180918 27%
20 25..32 209823 48%
21 33..40 148995 64%
22 41..48 76304 72%
23 49..56 14346 73%
24 57..64 15725 75%
25 65..80 44916 80%
26 81..96 127815 93%
27 97..112 34769 96%
28 113..128 13314 98%
29 129..144 8098 99%
30 145..160 3355 99%
31 161..176 1159 99%
32 177..192 629 99%
33 193..208 221 99%
34 209..224 210 99%
35 225..240 129 99%
36 241..256 57 99%
37 257..288 496 99%
38 289..320 60 99%
39 321..352 37 99%
40 353..384 46 99%
41 385..416 22 99%
42 417..448 24 99%
43 449..480 26 99%
44 481..512 27 99%
45 513..1024 471 99%
46 1025..2048 389 99%
47 2049..4096 182 99%
48 4097..8192 74 99%
49 8193..16384 34 99%
5016385..32768 17 99%
5132769..65536 5 99%
5265537..131073 3 100%
53
54
55This information is gathered to help design the new built-in
56backend for sqlite 2.0. Note in particular that 99% of all
57database entries have a combined key and data size of less than
58144 bytes. So if a leaf node in the new database is able to
59store 144 bytes of combined key and data, only 1% of the leaves
60will require overflow pages. Furthermore, note that no key
61is larger than 99 bytes, so if the key will never be on an
62overflow page.
63
64The average combined size of key+data is 48. Add in 16 bytes of
65overhead for a total of 64. That means that a 1K page will
66store (on average) about 16 entries.
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/showdb.c b/libraries/sqlite/unix/sqlite-3.5.1/tool/showdb.c
new file mode 100644
index 0000000..b2ed562
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/showdb.c
@@ -0,0 +1,86 @@
1/*
2** A utility for printing all or part of an SQLite database file.
3*/
4#include <stdio.h>
5#include <ctype.h>
6#include <sys/types.h>
7#include <sys/stat.h>
8#include <fcntl.h>
9#include <unistd.h>
10#include <stdlib.h>
11
12
13static int pagesize = 1024;
14static int db = -1;
15static int mxPage = 0;
16static int perLine = 32;
17
18static void out_of_memory(void){
19 fprintf(stderr,"Out of memory...\n");
20 exit(1);
21}
22
23static print_page(int iPg){
24 unsigned char *aData;
25 int i, j;
26 aData = malloc(pagesize);
27 if( aData==0 ) out_of_memory();
28 lseek(db, (iPg-1)*pagesize, SEEK_SET);
29 read(db, aData, pagesize);
30 fprintf(stdout, "Page %d:\n", iPg);
31 for(i=0; i<pagesize; i += perLine){
32 fprintf(stdout, " %03x: ",i);
33 for(j=0; j<perLine; j++){
34 fprintf(stdout,"%02x ", aData[i+j]);
35 }
36 for(j=0; j<perLine; j++){
37 fprintf(stdout,"%c", isprint(aData[i+j]) ? aData[i+j] : '.');
38 }
39 fprintf(stdout,"\n");
40 }
41 free(aData);
42}
43
44int main(int argc, char **argv){
45 struct stat sbuf;
46 if( argc<2 ){
47 fprintf(stderr,"Usage: %s FILENAME ?PAGE? ...\n", argv[0]);
48 exit(1);
49 }
50 db = open(argv[1], O_RDONLY);
51 if( db<0 ){
52 fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
53 exit(1);
54 }
55 fstat(db, &sbuf);
56 mxPage = sbuf.st_size/pagesize + 1;
57 if( argc==2 ){
58 int i;
59 for(i=1; i<=mxPage; i++) print_page(i);
60 }else{
61 int i;
62 for(i=2; i<argc; i++){
63 int iStart, iEnd;
64 char *zLeft;
65 iStart = strtol(argv[i], &zLeft, 0);
66 if( zLeft && strcmp(zLeft,"..end")==0 ){
67 iEnd = mxPage;
68 }else if( zLeft && zLeft[0]=='.' && zLeft[1]=='.' ){
69 iEnd = strtol(&zLeft[2], 0, 0);
70 }else{
71 iEnd = iStart;
72 }
73 if( iStart<1 || iEnd<iStart || iEnd>mxPage ){
74 fprintf(stderr,
75 "Page argument should be LOWER?..UPPER?. Range 1 to %d\n",
76 mxPage);
77 exit(1);
78 }
79 while( iStart<=iEnd ){
80 print_page(iStart);
81 iStart++;
82 }
83 }
84 }
85 close(db);
86}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/showjournal.c b/libraries/sqlite/unix/sqlite-3.5.1/tool/showjournal.c
new file mode 100644
index 0000000..ec93c91
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/showjournal.c
@@ -0,0 +1,76 @@
1/*
2** A utility for printing an SQLite database journal.
3*/
4#include <stdio.h>
5#include <ctype.h>
6#include <sys/types.h>
7#include <sys/stat.h>
8#include <fcntl.h>
9#include <unistd.h>
10#include <stdlib.h>
11
12
13static int pagesize = 1024;
14static int db = -1;
15static int mxPage = 0;
16
17static void out_of_memory(void){
18 fprintf(stderr,"Out of memory...\n");
19 exit(1);
20}
21
22static print_page(int iPg){
23 unsigned char *aData;
24 int i, j;
25 aData = malloc(pagesize);
26 if( aData==0 ) out_of_memory();
27 read(db, aData, pagesize);
28 fprintf(stdout, "Page %d:\n", iPg);
29 for(i=0; i<pagesize; i += 16){
30 fprintf(stdout, " %03x: ",i);
31 for(j=0; j<16; j++){
32 fprintf(stdout,"%02x ", aData[i+j]);
33 }
34 for(j=0; j<16; j++){
35 fprintf(stdout,"%c", isprint(aData[i+j]) ? aData[i+j] : '.');
36 }
37 fprintf(stdout,"\n");
38 }
39 free(aData);
40}
41
42int main(int argc, char **argv){
43 struct stat sbuf;
44 unsigned int u;
45 int rc;
46 unsigned char zBuf[10];
47 unsigned char zBuf2[sizeof(u)];
48 if( argc!=2 ){
49 fprintf(stderr,"Usage: %s FILENAME\n", argv[0]);
50 exit(1);
51 }
52 db = open(argv[1], O_RDONLY);
53 if( db<0 ){
54 fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
55 exit(1);
56 }
57 read(db, zBuf, 8);
58 if( zBuf[7]==0xd6 ){
59 read(db, &u, sizeof(u));
60 printf("Records in Journal: %u\n", u);
61 read(db, &u, sizeof(u));
62 printf("Magic Number: 0x%08x\n", u);
63 }
64 read(db, zBuf2, sizeof(zBuf2));
65 u = zBuf2[0]<<24 | zBuf2[1]<<16 | zBuf2[2]<<8 | zBuf2[3];
66 printf("Database Size: %u\n", u);
67 while( read(db, zBuf2, sizeof(zBuf2))==sizeof(zBuf2) ){
68 u = zBuf2[0]<<24 | zBuf2[1]<<16 | zBuf2[2]<<8 | zBuf2[3];
69 print_page(u);
70 if( zBuf[7]==0xd6 ){
71 read(db, &u, sizeof(u));
72 printf("Checksum: 0x%08x\n", u);
73 }
74 }
75 close(db);
76}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/soak1.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/soak1.tcl
new file mode 100644
index 0000000..7a78b8d
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/soak1.tcl
@@ -0,0 +1,103 @@
1#!/usr/bin/tclsh
2#
3# Usage:
4#
5# tclsh soak1.tcl local-makefile.mk ?target? ?scenario?
6#
7# This generates many variations on local-makefile.mk (by modifing
8# the OPT = lines) and runs them will fulltest, one by one. The
9# constructed makefiles are named "soak1.mk".
10#
11# If ?target? is provided, that is the makefile target that is run.
12# The default is "fulltest"
13#
14# If ?scenario? is provided, it is the name of a single scenario to
15# be run. All other scenarios are skipped.
16#
17set localmake [lindex $argv 0]
18set target [lindex $argv 1]
19set scene [lindex $argv 2]
20if {$target==""} {set target fulltest}
21if {$scene==""} {set scene all}
22
23set in [open $localmake]
24set maketxt [read $in]
25close $in
26regsub -all {\\\n} $maketxt {} maketxt
27#set makefilename "soak1-[expr {int(rand()*1000000000)}].mk"
28set makefilename "soak1.mk"
29
30# Generate a makefile
31#
32proc generate_makefile {pattern} {
33 global makefilename maketxt
34 set out [open $makefilename w]
35 set seen_opt 0
36 foreach line [split $maketxt \n] {
37 if {[regexp {^ *#? *OPTS[ =+]} $line]} {
38 if {!$seen_opt} {
39 puts $out "OPTS += -DSQLITE_NO_SYNC=1"
40 foreach x $pattern {
41 puts $out "OPTS += -D$x"
42 }
43 set seen_opt 1
44 }
45 } else {
46 puts $out $line
47 }
48 }
49 close $out
50}
51
52# Run a test
53#
54proc scenario {id title pattern} {
55 global makefilename target scene
56 if {$scene!="all" && $scene!=$id && $scene!=$title} return
57 puts "**************** $title ***************"
58 generate_makefile $pattern
59 exec make -f $makefilename clean >@stdout 2>@stdout
60 exec make -f $makefilename $target >@stdout 2>@stdout
61}
62
63###############################################################################
64# Add new scenarios here
65#
66scenario 0 {Default} {}
67scenario 1 {Debug} {
68 SQLITE_DEBUG=1
69 SQLITE_MEMDEBUG=1
70}
71scenario 2 {Everything} {
72 SQLITE_DEBUG=1
73 SQLITE_MEMDEBUG=1
74 SQLITE_ENABLE_MEMORY_MANAGEMENT=1
75 SQLITE_ENABLE_COLUMN_METADATA=1
76 SQLITE_ENABLE_LOAD_EXTENSION=1 HAVE_DLOPEN=1
77 SQLITE_ENABLE_MEMORY_MANAGEMENT=1
78}
79scenario 3 {Customer-1} {
80 SQLITE_DEBUG=1 SQLITE_MEMDEBUG=1
81 THREADSAFE=1 OS_UNIX=1
82 SQLITE_DISABLE_LFS=1
83 SQLITE_DEFAULT_AUTOVACUUM=1
84 SQLITE_DEFAULT_PAGE_SIZE=1024
85 SQLITE_MAX_PAGE_SIZE=4096
86 SQLITE_DEFAULT_CACHE_SIZE=64
87 SQLITE_DEFAULT_TEMP_CACHE_SIZE=32
88 TEMP_STORE=3
89 SQLITE_OMIT_PROGRESS_CALLBACK=1
90 SQLITE_OMIT_LOAD_EXTENSION=1
91 SQLITE_OMIT_VIRTUALTABLE=1
92 SQLITE_ENABLE_IOTRACE=1
93}
94scenario 4 {Small-Cache} {
95 SQLITE_DEBUG=1 SQLITE_MEMDEBUG=1
96 THREADSAFE=1 OS_UNIX=1
97 SQLITE_DEFAULT_AUTOVACUUM=1
98 SQLITE_DEFAULT_PAGE_SIZE=1024
99 SQLITE_MAX_PAGE_SIZE=2048
100 SQLITE_DEFAULT_CACHE_SIZE=13
101 SQLITE_DEFAULT_TEMP_CACHE_SIZE=11
102 TEMP_STORE=1
103}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/space_used.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/space_used.tcl
new file mode 100644
index 0000000..2044aa3
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/space_used.tcl
@@ -0,0 +1,111 @@
1# Run this TCL script using "testfixture" in order get a report that shows
2# how much disk space is used by a particular data to actually store data
3# versus how much space is unused.
4#
5
6# Get the name of the database to analyze
7#
8if {[llength $argv]!=1} {
9 puts stderr "Usage: $argv0 database-name"
10 exit 1
11}
12set file_to_analyze [lindex $argv 0]
13
14# Open the database
15#
16sqlite db [lindex $argv 0]
17set DB [btree_open [lindex $argv 0]]
18
19# Output the schema for the generated report
20#
21puts \
22{BEGIN;
23CREATE TABLE space_used(
24 name clob, -- Name of a table or index in the database file
25 is_index boolean, -- TRUE if it is an index, false for a table
26 payload int, -- Total amount of data stored in this table or index
27 pri_pages int, -- Number of primary pages used
28 ovfl_pages int, -- Number of overflow pages used
29 pri_unused int, -- Number of unused bytes on primary pages
30 ovfl_unused int -- Number of unused bytes on overflow pages
31);}
32
33# This query will be used to find the root page number for every index and
34# table in the database.
35#
36set sql {
37 SELECT name, type, rootpage FROM sqlite_master
38 UNION ALL
39 SELECT 'sqlite_master', 'table', 2
40 ORDER BY 1
41}
42
43# Initialize variables used for summary statistics.
44#
45set total_size 0
46set total_primary 0
47set total_overflow 0
48set total_unused_primary 0
49set total_unused_ovfl 0
50
51# Analyze every table in the database, one at a time.
52#
53foreach {name type rootpage} [db eval $sql] {
54 set cursor [btree_cursor $DB $rootpage 0]
55 set go [btree_first $cursor]
56 set size 0
57 catch {unset pg_used}
58 set unused_ovfl 0
59 set n_overflow 0
60 while {$go==0} {
61 set payload [btree_payload_size $cursor]
62 incr size $payload
63 set stat [btree_cursor_dump $cursor]
64 set pgno [lindex $stat 0]
65 set freebytes [lindex $stat 4]
66 set pg_used($pgno) $freebytes
67 if {$payload>238} {
68 set n [expr {($payload-238+1019)/1020}]
69 incr n_overflow $n
70 incr unused_ovfl [expr {$n*1020+238-$payload}]
71 }
72 set go [btree_next $cursor]
73 }
74 btree_close_cursor $cursor
75 set n_primary [llength [array names pg_used]]
76 set unused_primary 0
77 foreach x [array names pg_used] {incr unused_primary $pg_used($x)}
78 regsub -all ' $name '' name
79 puts -nonewline "INSERT INTO space_used VALUES('$name'"
80 puts -nonewline ",[expr {$type=="index"}]"
81 puts ",$size,$n_primary,$n_overflow,$unused_primary,$unused_ovfl);"
82 incr total_size $size
83 incr total_primary $n_primary
84 incr total_overflow $n_overflow
85 incr total_unused_primary $unused_primary
86 incr total_unused_ovfl $unused_ovfl
87}
88
89# Output summary statistics:
90#
91puts "-- Total payload size: $total_size"
92puts "-- Total pages used: $total_primary primary and $total_overflow overflow"
93set file_pgcnt [expr {[file size [lindex $argv 0]]/1024}]
94puts -nonewline "-- Total unused bytes on primary pages: $total_unused_primary"
95if {$total_primary>0} {
96 set upp [expr {$total_unused_primary/$total_primary}]
97 puts " (avg $upp bytes/page)"
98} else {
99 puts ""
100}
101puts -nonewline "-- Total unused bytes on overflow pages: $total_unused_ovfl"
102if {$total_overflow>0} {
103 set upp [expr {$total_unused_ovfl/$total_overflow}]
104 puts " (avg $upp bytes/page)"
105} else {
106 puts ""
107}
108set n_free [expr {$file_pgcnt-$total_primary-$total_overflow}]
109if {$n_free>0} {incr n_free -1}
110puts "-- Total pages on freelist: $n_free"
111puts "COMMIT;"
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/spaceanal.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/spaceanal.tcl
new file mode 100644
index 0000000..d70b442
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/spaceanal.tcl
@@ -0,0 +1,859 @@
1# Run this TCL script using "testfixture" in order get a report that shows
2# how much disk space is used by a particular data to actually store data
3# versus how much space is unused.
4#
5
6if {[catch {
7
8# Get the name of the database to analyze
9#
10#set argv $argv0
11if {[llength $argv]!=1} {
12 puts stderr "Usage: $argv0 database-name"
13 exit 1
14}
15set file_to_analyze [lindex $argv 0]
16if {![file exists $file_to_analyze]} {
17 puts stderr "No such file: $file_to_analyze"
18 exit 1
19}
20if {![file readable $file_to_analyze]} {
21 puts stderr "File is not readable: $file_to_analyze"
22 exit 1
23}
24if {[file size $file_to_analyze]<512} {
25 puts stderr "Empty or malformed database: $file_to_analyze"
26 exit 1
27}
28
29# Maximum distance between pages before we consider it a "gap"
30#
31set MAXGAP 3
32
33# Open the database
34#
35sqlite3 db [lindex $argv 0]
36set DB [btree_open [lindex $argv 0] 1000 0]
37
38# In-memory database for collecting statistics. This script loops through
39# the tables and indices in the database being analyzed, adding a row for each
40# to an in-memory database (for which the schema is shown below). It then
41# queries the in-memory db to produce the space-analysis report.
42#
43sqlite3 mem :memory:
44set tabledef\
45{CREATE TABLE space_used(
46 name clob, -- Name of a table or index in the database file
47 tblname clob, -- Name of associated table
48 is_index boolean, -- TRUE if it is an index, false for a table
49 nentry int, -- Number of entries in the BTree
50 leaf_entries int, -- Number of leaf entries
51 payload int, -- Total amount of data stored in this table or index
52 ovfl_payload int, -- Total amount of data stored on overflow pages
53 ovfl_cnt int, -- Number of entries that use overflow
54 mx_payload int, -- Maximum payload size
55 int_pages int, -- Number of interior pages used
56 leaf_pages int, -- Number of leaf pages used
57 ovfl_pages int, -- Number of overflow pages used
58 int_unused int, -- Number of unused bytes on interior pages
59 leaf_unused int, -- Number of unused bytes on primary pages
60 ovfl_unused int, -- Number of unused bytes on overflow pages
61 gap_cnt int -- Number of gaps in the page layout
62);}
63mem eval $tabledef
64
65proc integerify {real} {
66 return [expr int($real)]
67}
68mem function int integerify
69
70# Quote a string for use in an SQL query. Examples:
71#
72# [quote {hello world}] == {'hello world'}
73# [quote {hello world's}] == {'hello world''s'}
74#
75proc quote {txt} {
76 regsub -all ' $txt '' q
77 return '$q'
78}
79
80# This proc is a wrapper around the btree_cursor_info command. The
81# second argument is an open btree cursor returned by [btree_cursor].
82# The first argument is the name of an array variable that exists in
83# the scope of the caller. If the third argument is non-zero, then
84# info is returned for the page that lies $up entries upwards in the
85# tree-structure. (i.e. $up==1 returns the parent page, $up==2 the
86# grandparent etc.)
87#
88# The following entries in that array are filled in with information retrieved
89# using [btree_cursor_info]:
90#
91# $arrayvar(page_no) = The page number
92# $arrayvar(entry_no) = The entry number
93# $arrayvar(page_entries) = Total number of entries on this page
94# $arrayvar(cell_size) = Cell size (local payload + header)
95# $arrayvar(page_freebytes) = Number of free bytes on this page
96# $arrayvar(page_freeblocks) = Number of free blocks on the page
97# $arrayvar(payload_bytes) = Total payload size (local + overflow)
98# $arrayvar(header_bytes) = Header size in bytes
99# $arrayvar(local_payload_bytes) = Local payload size
100# $arrayvar(parent) = Parent page number
101#
102proc cursor_info {arrayvar csr {up 0}} {
103 upvar $arrayvar a
104 foreach [list a(page_no) \
105 a(entry_no) \
106 a(page_entries) \
107 a(cell_size) \
108 a(page_freebytes) \
109 a(page_freeblocks) \
110 a(payload_bytes) \
111 a(header_bytes) \
112 a(local_payload_bytes) \
113 a(parent) \
114 a(first_ovfl) ] [btree_cursor_info $csr $up] break
115}
116
117# Determine the page-size of the database. This global variable is used
118# throughout the script.
119#
120set pageSize [db eval {PRAGMA page_size}]
121
122# Analyze every table in the database, one at a time.
123#
124# The following query returns the name and root-page of each table in the
125# database, including the sqlite_master table.
126#
127set sql {
128 SELECT name, rootpage FROM sqlite_master
129 WHERE type='table' AND rootpage>0
130 UNION ALL
131 SELECT 'sqlite_master', 1
132 ORDER BY 1
133}
134set wideZero [expr {10000000000 - 10000000000}]
135foreach {name rootpage} [db eval $sql] {
136 puts stderr "Analyzing table $name..."
137
138 # Code below traverses the table being analyzed (table name $name), using the
139 # btree cursor $cursor. Statistics related to table $name are accumulated in
140 # the following variables:
141 #
142 set total_payload $wideZero ;# Payload space used by all entries
143 set total_ovfl $wideZero ;# Payload space on overflow pages
144 set unused_int $wideZero ;# Unused space on interior nodes
145 set unused_leaf $wideZero ;# Unused space on leaf nodes
146 set unused_ovfl $wideZero ;# Unused space on overflow pages
147 set cnt_ovfl $wideZero ;# Number of entries that use overflows
148 set cnt_leaf_entry $wideZero ;# Number of leaf entries
149 set cnt_int_entry $wideZero ;# Number of interor entries
150 set mx_payload $wideZero ;# Maximum payload size
151 set ovfl_pages $wideZero ;# Number of overflow pages used
152 set leaf_pages $wideZero ;# Number of leaf pages
153 set int_pages $wideZero ;# Number of interior pages
154 set gap_cnt 0 ;# Number of holes in the page sequence
155 set prev_pgno 0 ;# Last page number seen
156
157 # As the btree is traversed, the array variable $seen($pgno) is set to 1
158 # the first time page $pgno is encountered.
159 #
160 catch {unset seen}
161
162 # The following loop runs once for each entry in table $name. The table
163 # is traversed using the btree cursor stored in variable $csr
164 #
165 set csr [btree_cursor $DB $rootpage 0]
166 for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} {
167 incr cnt_leaf_entry
168
169 # Retrieve information about the entry the btree-cursor points to into
170 # the array variable $ci (cursor info).
171 #
172 cursor_info ci $csr
173
174 # Check if the payload of this entry is greater than the current
175 # $mx_payload statistic for the table. Also increase the $total_payload
176 # statistic.
177 #
178 if {$ci(payload_bytes)>$mx_payload} {set mx_payload $ci(payload_bytes)}
179 incr total_payload $ci(payload_bytes)
180
181 # If this entry uses overflow pages, then update the $cnt_ovfl,
182 # $total_ovfl, $ovfl_pages and $unused_ovfl statistics.
183 #
184 set ovfl [expr {$ci(payload_bytes)-$ci(local_payload_bytes)}]
185 if {$ovfl} {
186 incr cnt_ovfl
187 incr total_ovfl $ovfl
188 set n [expr {int(ceil($ovfl/($pageSize-4.0)))}]
189 incr ovfl_pages $n
190 incr unused_ovfl [expr {$n*($pageSize-4) - $ovfl}]
191 set pglist [btree_ovfl_info $DB $csr]
192 } else {
193 set pglist {}
194 }
195
196 # If this is the first table entry analyzed for the page, then update
197 # the page-related statistics $leaf_pages and $unused_leaf. Also, if
198 # this page has a parent page that has not been analyzed, retrieve
199 # info for the parent and update statistics for it too.
200 #
201 if {![info exists seen($ci(page_no))]} {
202 set seen($ci(page_no)) 1
203 incr leaf_pages
204 incr unused_leaf $ci(page_freebytes)
205 set pglist "$ci(page_no) $pglist"
206
207 # Now check if the page has a parent that has not been analyzed. If
208 # so, update the $int_pages, $cnt_int_entry and $unused_int statistics
209 # accordingly. Then check if the parent page has a parent that has
210 # not yet been analyzed etc.
211 #
212 # set parent $ci(parent_page_no)
213 for {set up 1} \
214 {$ci(parent)!=0 && ![info exists seen($ci(parent))]} {incr up} \
215 {
216 # Mark the parent as seen.
217 #
218 set seen($ci(parent)) 1
219
220 # Retrieve info for the parent and update statistics.
221 cursor_info ci $csr $up
222 incr int_pages
223 incr cnt_int_entry $ci(page_entries)
224 incr unused_int $ci(page_freebytes)
225
226 # parent pages come before their first child
227 set pglist "$ci(page_no) $pglist"
228 }
229 }
230
231 # Check the page list for fragmentation
232 #
233 foreach pg $pglist {
234 if {$pg!=$prev_pgno+1 && $prev_pgno>0} {
235 incr gap_cnt
236 }
237 set prev_pgno $pg
238 }
239 }
240 btree_close_cursor $csr
241
242 # Handle the special case where a table contains no data. In this case
243 # all statistics are zero, except for the number of leaf pages (1) and
244 # the unused bytes on leaf pages ($pageSize - 8).
245 #
246 # An exception to the above is the sqlite_master table. If it is empty
247 # then all statistics are zero except for the number of leaf pages (1),
248 # and the number of unused bytes on leaf pages ($pageSize - 112).
249 #
250 if {[llength [array names seen]]==0} {
251 set leaf_pages 1
252 if {$rootpage==1} {
253 set unused_leaf [expr {$pageSize-112}]
254 } else {
255 set unused_leaf [expr {$pageSize-8}]
256 }
257 }
258
259 # Insert the statistics for the table analyzed into the in-memory database.
260 #
261 set sql "INSERT INTO space_used VALUES("
262 append sql [quote $name]
263 append sql ",[quote $name]"
264 append sql ",0"
265 append sql ",[expr {$cnt_leaf_entry+$cnt_int_entry}]"
266 append sql ",$cnt_leaf_entry"
267 append sql ",$total_payload"
268 append sql ",$total_ovfl"
269 append sql ",$cnt_ovfl"
270 append sql ",$mx_payload"
271 append sql ",$int_pages"
272 append sql ",$leaf_pages"
273 append sql ",$ovfl_pages"
274 append sql ",$unused_int"
275 append sql ",$unused_leaf"
276 append sql ",$unused_ovfl"
277 append sql ",$gap_cnt"
278 append sql );
279 mem eval $sql
280}
281
282# Analyze every index in the database, one at a time.
283#
284# The query below returns the name, associated table and root-page number
285# for every index in the database.
286#
287set sql {
288 SELECT name, tbl_name, rootpage FROM sqlite_master WHERE type='index'
289 ORDER BY 2, 1
290}
291foreach {name tbl_name rootpage} [db eval $sql] {
292 puts stderr "Analyzing index $name of table $tbl_name..."
293
294 # Code below traverses the index being analyzed (index name $name), using the
295 # btree cursor $cursor. Statistics related to index $name are accumulated in
296 # the following variables:
297 #
298 set total_payload $wideZero ;# Payload space used by all entries
299 set total_ovfl $wideZero ;# Payload space on overflow pages
300 set unused_leaf $wideZero ;# Unused space on leaf nodes
301 set unused_ovfl $wideZero ;# Unused space on overflow pages
302 set cnt_ovfl $wideZero ;# Number of entries that use overflows
303 set cnt_leaf_entry $wideZero ;# Number of leaf entries
304 set mx_payload $wideZero ;# Maximum payload size
305 set ovfl_pages $wideZero ;# Number of overflow pages used
306 set leaf_pages $wideZero ;# Number of leaf pages
307 set gap_cnt 0 ;# Number of holes in the page sequence
308 set prev_pgno 0 ;# Last page number seen
309
310 # As the btree is traversed, the array variable $seen($pgno) is set to 1
311 # the first time page $pgno is encountered.
312 #
313 catch {unset seen}
314
315 # The following loop runs once for each entry in index $name. The index
316 # is traversed using the btree cursor stored in variable $csr
317 #
318 set csr [btree_cursor $DB $rootpage 0]
319 for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} {
320 incr cnt_leaf_entry
321
322 # Retrieve information about the entry the btree-cursor points to into
323 # the array variable $ci (cursor info).
324 #
325 cursor_info ci $csr
326
327 # Check if the payload of this entry is greater than the current
328 # $mx_payload statistic for the table. Also increase the $total_payload
329 # statistic.
330 #
331 set payload [btree_keysize $csr]
332 if {$payload>$mx_payload} {set mx_payload $payload}
333 incr total_payload $payload
334
335 # If this entry uses overflow pages, then update the $cnt_ovfl,
336 # $total_ovfl, $ovfl_pages and $unused_ovfl statistics.
337 #
338 set ovfl [expr {$payload-$ci(local_payload_bytes)}]
339 if {$ovfl} {
340 incr cnt_ovfl
341 incr total_ovfl $ovfl
342 set n [expr {int(ceil($ovfl/($pageSize-4.0)))}]
343 incr ovfl_pages $n
344 incr unused_ovfl [expr {$n*($pageSize-4) - $ovfl}]
345 }
346
347 # If this is the first table entry analyzed for the page, then update
348 # the page-related statistics $leaf_pages and $unused_leaf.
349 #
350 if {![info exists seen($ci(page_no))]} {
351 set seen($ci(page_no)) 1
352 incr leaf_pages
353 incr unused_leaf $ci(page_freebytes)
354 set pg $ci(page_no)
355 if {$prev_pgno>0 && $pg!=$prev_pgno+1} {
356 incr gap_cnt
357 }
358 set prev_pgno $ci(page_no)
359 }
360 }
361 btree_close_cursor $csr
362
363 # Handle the special case where a index contains no data. In this case
364 # all statistics are zero, except for the number of leaf pages (1) and
365 # the unused bytes on leaf pages ($pageSize - 8).
366 #
367 if {[llength [array names seen]]==0} {
368 set leaf_pages 1
369 set unused_leaf [expr {$pageSize-8}]
370 }
371
372 # Insert the statistics for the index analyzed into the in-memory database.
373 #
374 set sql "INSERT INTO space_used VALUES("
375 append sql [quote $name]
376 append sql ",[quote $tbl_name]"
377 append sql ",1"
378 append sql ",$cnt_leaf_entry"
379 append sql ",$cnt_leaf_entry"
380 append sql ",$total_payload"
381 append sql ",$total_ovfl"
382 append sql ",$cnt_ovfl"
383 append sql ",$mx_payload"
384 append sql ",0"
385 append sql ",$leaf_pages"
386 append sql ",$ovfl_pages"
387 append sql ",0"
388 append sql ",$unused_leaf"
389 append sql ",$unused_ovfl"
390 append sql ",$gap_cnt"
391 append sql );
392 mem eval $sql
393}
394
395# Generate a single line of output in the statistics section of the
396# report.
397#
398proc statline {title value {extra {}}} {
399 set len [string length $title]
400 set dots [string range {......................................} $len end]
401 set len [string length $value]
402 set sp2 [string range { } $len end]
403 if {$extra ne ""} {
404 set extra " $extra"
405 }
406 puts "$title$dots $value$sp2$extra"
407}
408
409# Generate a formatted percentage value for $num/$denom
410#
411proc percent {num denom {of {}}} {
412 if {$denom==0.0} {return ""}
413 set v [expr {$num*100.0/$denom}]
414 set of {}
415 if {$v==100.0 || $v<0.001 || ($v>1.0 && $v<99.0)} {
416 return [format {%5.1f%% %s} $v $of]
417 } elseif {$v<0.1 || $v>99.9} {
418 return [format {%7.3f%% %s} $v $of]
419 } else {
420 return [format {%6.2f%% %s} $v $of]
421 }
422}
423
424proc divide {num denom} {
425 if {$denom==0} {return 0.0}
426 return [format %.2f [expr double($num)/double($denom)]]
427}
428
429# Generate a subreport that covers some subset of the database.
430# the $where clause determines which subset to analyze.
431#
432proc subreport {title where} {
433 global pageSize file_pgcnt
434
435 # Query the in-memory database for the sum of various statistics
436 # for the subset of tables/indices identified by the WHERE clause in
437 # $where. Note that even if the WHERE clause matches no rows, the
438 # following query returns exactly one row (because it is an aggregate).
439 #
440 # The results of the query are stored directly by SQLite into local
441 # variables (i.e. $nentry, $nleaf etc.).
442 #
443 mem eval "
444 SELECT
445 int(sum(nentry)) AS nentry,
446 int(sum(leaf_entries)) AS nleaf,
447 int(sum(payload)) AS payload,
448 int(sum(ovfl_payload)) AS ovfl_payload,
449 max(mx_payload) AS mx_payload,
450 int(sum(ovfl_cnt)) as ovfl_cnt,
451 int(sum(leaf_pages)) AS leaf_pages,
452 int(sum(int_pages)) AS int_pages,
453 int(sum(ovfl_pages)) AS ovfl_pages,
454 int(sum(leaf_unused)) AS leaf_unused,
455 int(sum(int_unused)) AS int_unused,
456 int(sum(ovfl_unused)) AS ovfl_unused,
457 int(sum(gap_cnt)) AS gap_cnt
458 FROM space_used WHERE $where" {} {}
459
460 # Output the sub-report title, nicely decorated with * characters.
461 #
462 puts ""
463 set len [string length $title]
464 set stars [string repeat * [expr 65-$len]]
465 puts "*** $title $stars"
466 puts ""
467
468 # Calculate statistics and store the results in TCL variables, as follows:
469 #
470 # total_pages: Database pages consumed.
471 # total_pages_percent: Pages consumed as a percentage of the file.
472 # storage: Bytes consumed.
473 # payload_percent: Payload bytes used as a percentage of $storage.
474 # total_unused: Unused bytes on pages.
475 # avg_payload: Average payload per btree entry.
476 # avg_fanout: Average fanout for internal pages.
477 # avg_unused: Average unused bytes per btree entry.
478 # ovfl_cnt_percent: Percentage of btree entries that use overflow pages.
479 #
480 set total_pages [expr {$leaf_pages+$int_pages+$ovfl_pages}]
481 set total_pages_percent [percent $total_pages $file_pgcnt]
482 set storage [expr {$total_pages*$pageSize}]
483 set payload_percent [percent $payload $storage {of storage consumed}]
484 set total_unused [expr {$ovfl_unused+$int_unused+$leaf_unused}]
485 set avg_payload [divide $payload $nleaf]
486 set avg_unused [divide $total_unused $nleaf]
487 if {$int_pages>0} {
488 # TODO: Is this formula correct?
489 set nTab [mem eval "
490 SELECT count(*) FROM (
491 SELECT DISTINCT tblname FROM space_used WHERE $where AND is_index=0
492 )
493 "]
494 set avg_fanout [mem eval "
495 SELECT (sum(leaf_pages+int_pages)-$nTab)/sum(int_pages) FROM space_used
496 WHERE $where AND is_index = 0
497 "]
498 set avg_fanout [format %.2f $avg_fanout]
499 }
500 set ovfl_cnt_percent [percent $ovfl_cnt $nleaf {of all entries}]
501
502 # Print out the sub-report statistics.
503 #
504 statline {Percentage of total database} $total_pages_percent
505 statline {Number of entries} $nleaf
506 statline {Bytes of storage consumed} $storage
507 statline {Bytes of payload} $payload $payload_percent
508 statline {Average payload per entry} $avg_payload
509 statline {Average unused bytes per entry} $avg_unused
510 if {[info exists avg_fanout]} {
511 statline {Average fanout} $avg_fanout
512 }
513 if {$total_pages>1} {
514 set fragmentation [percent $gap_cnt [expr {$total_pages-1}] {fragmentation}]
515 statline {Fragmentation} $fragmentation
516 }
517 statline {Maximum payload per entry} $mx_payload
518 statline {Entries that use overflow} $ovfl_cnt $ovfl_cnt_percent
519 if {$int_pages>0} {
520 statline {Index pages used} $int_pages
521 }
522 statline {Primary pages used} $leaf_pages
523 statline {Overflow pages used} $ovfl_pages
524 statline {Total pages used} $total_pages
525 if {$int_unused>0} {
526 set int_unused_percent \
527 [percent $int_unused [expr {$int_pages*$pageSize}] {of index space}]
528 statline "Unused bytes on index pages" $int_unused $int_unused_percent
529 }
530 statline "Unused bytes on primary pages" $leaf_unused \
531 [percent $leaf_unused [expr {$leaf_pages*$pageSize}] {of primary space}]
532 statline "Unused bytes on overflow pages" $ovfl_unused \
533 [percent $ovfl_unused [expr {$ovfl_pages*$pageSize}] {of overflow space}]
534 statline "Unused bytes on all pages" $total_unused \
535 [percent $total_unused $storage {of all space}]
536 return 1
537}
538
539# Calculate the overhead in pages caused by auto-vacuum.
540#
541# This procedure calculates and returns the number of pages used by the
542# auto-vacuum 'pointer-map'. If the database does not support auto-vacuum,
543# then 0 is returned. The two arguments are the size of the database file in
544# pages and the page size used by the database (in bytes).
545proc autovacuum_overhead {filePages pageSize} {
546
547 # Read the value of meta 4. If non-zero, then the database supports
548 # auto-vacuum. It would be possible to use "PRAGMA auto_vacuum" instead,
549 # but that would not work if the SQLITE_OMIT_PRAGMA macro was defined
550 # when the library was built.
551 set meta4 [lindex [btree_get_meta $::DB] 4]
552
553 # If the database is not an auto-vacuum database or the file consists
554 # of one page only then there is no overhead for auto-vacuum. Return zero.
555 if {0==$meta4 || $filePages==1} {
556 return 0
557 }
558
559 # The number of entries on each pointer map page. The layout of the
560 # database file is one pointer-map page, followed by $ptrsPerPage other
561 # pages, followed by a pointer-map page etc. The first pointer-map page
562 # is the second page of the file overall.
563 set ptrsPerPage [expr double($pageSize/5)]
564
565 # Return the number of pointer map pages in the database.
566 return [expr int(ceil( ($filePages-1.0)/($ptrsPerPage+1.0) ))]
567}
568
569
570# Calculate the summary statistics for the database and store the results
571# in TCL variables. They are output below. Variables are as follows:
572#
573# pageSize: Size of each page in bytes.
574# file_bytes: File size in bytes.
575# file_pgcnt: Number of pages in the file.
576# file_pgcnt2: Number of pages in the file (calculated).
577# av_pgcnt: Pages consumed by the auto-vacuum pointer-map.
578# av_percent: Percentage of the file consumed by auto-vacuum pointer-map.
579# inuse_pgcnt: Data pages in the file.
580# inuse_percent: Percentage of pages used to store data.
581# free_pgcnt: Free pages calculated as (<total pages> - <in-use pages>)
582# free_pgcnt2: Free pages in the file according to the file header.
583# free_percent: Percentage of file consumed by free pages (calculated).
584# free_percent2: Percentage of file consumed by free pages (header).
585# ntable: Number of tables in the db.
586# nindex: Number of indices in the db.
587# nautoindex: Number of indices created automatically.
588# nmanindex: Number of indices created manually.
589# user_payload: Number of bytes of payload in table btrees
590# (not including sqlite_master)
591# user_percent: $user_payload as a percentage of total file size.
592
593set file_bytes [file size $file_to_analyze]
594set file_pgcnt [expr {$file_bytes/$pageSize}]
595
596set av_pgcnt [autovacuum_overhead $file_pgcnt $pageSize]
597set av_percent [percent $av_pgcnt $file_pgcnt]
598
599set sql {SELECT sum(leaf_pages+int_pages+ovfl_pages) FROM space_used}
600set inuse_pgcnt [expr int([mem eval $sql])]
601set inuse_percent [percent $inuse_pgcnt $file_pgcnt]
602
603set free_pgcnt [expr $file_pgcnt-$inuse_pgcnt-$av_pgcnt]
604set free_percent [percent $free_pgcnt $file_pgcnt]
605set free_pgcnt2 [lindex [btree_get_meta $DB] 0]
606set free_percent2 [percent $free_pgcnt2 $file_pgcnt]
607
608set file_pgcnt2 [expr {$inuse_pgcnt+$free_pgcnt2+$av_pgcnt}]
609
610set ntable [db eval {SELECT count(*)+1 FROM sqlite_master WHERE type='table'}]
611set nindex [db eval {SELECT count(*) FROM sqlite_master WHERE type='index'}]
612set sql {SELECT count(*) FROM sqlite_master WHERE name LIKE 'sqlite_autoindex%'}
613set nautoindex [db eval $sql]
614set nmanindex [expr {$nindex-$nautoindex}]
615
616# set total_payload [mem eval "SELECT sum(payload) FROM space_used"]
617set user_payload [mem one {SELECT int(sum(payload)) FROM space_used
618 WHERE NOT is_index AND name NOT LIKE 'sqlite_master'}]
619set user_percent [percent $user_payload $file_bytes]
620
621# Output the summary statistics calculated above.
622#
623puts "/** Disk-Space Utilization Report For $file_to_analyze"
624catch {
625 puts "*** As of [clock format [clock seconds] -format {%Y-%b-%d %H:%M:%S}]"
626}
627puts ""
628statline {Page size in bytes} $pageSize
629statline {Pages in the whole file (measured)} $file_pgcnt
630statline {Pages in the whole file (calculated)} $file_pgcnt2
631statline {Pages that store data} $inuse_pgcnt $inuse_percent
632statline {Pages on the freelist (per header)} $free_pgcnt2 $free_percent2
633statline {Pages on the freelist (calculated)} $free_pgcnt $free_percent
634statline {Pages of auto-vacuum overhead} $av_pgcnt $av_percent
635statline {Number of tables in the database} $ntable
636statline {Number of indices} $nindex
637statline {Number of named indices} $nmanindex
638statline {Automatically generated indices} $nautoindex
639statline {Size of the file in bytes} $file_bytes
640statline {Bytes of user payload stored} $user_payload $user_percent
641
642# Output table rankings
643#
644puts ""
645puts "*** Page counts for all tables with their indices ********************"
646puts ""
647mem eval {SELECT tblname, count(*) AS cnt,
648 int(sum(int_pages+leaf_pages+ovfl_pages)) AS size
649 FROM space_used GROUP BY tblname ORDER BY size+0 DESC, tblname} {} {
650 statline [string toupper $tblname] $size [percent $size $file_pgcnt]
651}
652
653# Output subreports
654#
655if {$nindex>0} {
656 subreport {All tables and indices} 1
657}
658subreport {All tables} {NOT is_index}
659if {$nindex>0} {
660 subreport {All indices} {is_index}
661}
662foreach tbl [mem eval {SELECT name FROM space_used WHERE NOT is_index
663 ORDER BY name}] {
664 regsub ' $tbl '' qn
665 set name [string toupper $tbl]
666 set n [mem eval "SELECT count(*) FROM space_used WHERE tblname='$qn'"]
667 if {$n>1} {
668 subreport "Table $name and all its indices" "tblname='$qn'"
669 subreport "Table $name w/o any indices" "name='$qn'"
670 subreport "Indices of table $name" "tblname='$qn' AND is_index"
671 } else {
672 subreport "Table $name" "name='$qn'"
673 }
674}
675
676# Output instructions on what the numbers above mean.
677#
678puts {
679*** Definitions ******************************************************
680
681Page size in bytes
682
683 The number of bytes in a single page of the database file.
684 Usually 1024.
685
686Number of pages in the whole file
687}
688puts \
689" The number of $pageSize-byte pages that go into forming the complete
690 database"
691puts \
692{
693Pages that store data
694
695 The number of pages that store data, either as primary B*Tree pages or
696 as overflow pages. The number at the right is the data pages divided by
697 the total number of pages in the file.
698
699Pages on the freelist
700
701 The number of pages that are not currently in use but are reserved for
702 future use. The percentage at the right is the number of freelist pages
703 divided by the total number of pages in the file.
704
705Pages of auto-vacuum overhead
706
707 The number of pages that store data used by the database to facilitate
708 auto-vacuum. This is zero for databases that do not support auto-vacuum.
709
710Number of tables in the database
711
712 The number of tables in the database, including the SQLITE_MASTER table
713 used to store schema information.
714
715Number of indices
716
717 The total number of indices in the database.
718
719Number of named indices
720
721 The number of indices created using an explicit CREATE INDEX statement.
722
723Automatically generated indices
724
725 The number of indices used to implement PRIMARY KEY or UNIQUE constraints
726 on tables.
727
728Size of the file in bytes
729
730 The total amount of disk space used by the entire database files.
731
732Bytes of user payload stored
733
734 The total number of bytes of user payload stored in the database. The
735 schema information in the SQLITE_MASTER table is not counted when
736 computing this number. The percentage at the right shows the payload
737 divided by the total file size.
738
739Percentage of total database
740
741 The amount of the complete database file that is devoted to storing
742 information described by this category.
743
744Number of entries
745
746 The total number of B-Tree key/value pairs stored under this category.
747
748Bytes of storage consumed
749
750 The total amount of disk space required to store all B-Tree entries
751 under this category. The is the total number of pages used times
752 the pages size.
753
754Bytes of payload
755
756 The amount of payload stored under this category. Payload is the data
757 part of table entries and the key part of index entries. The percentage
758 at the right is the bytes of payload divided by the bytes of storage
759 consumed.
760
761Average payload per entry
762
763 The average amount of payload on each entry. This is just the bytes of
764 payload divided by the number of entries.
765
766Average unused bytes per entry
767
768 The average amount of free space remaining on all pages under this
769 category on a per-entry basis. This is the number of unused bytes on
770 all pages divided by the number of entries.
771
772Fragmentation
773
774 The percentage of pages in the table or index that are not
775 consecutive in the disk file. Many filesystems are optimized
776 for sequential file access so smaller fragmentation numbers
777 sometimes result in faster queries, especially for larger
778 database files that do not fit in the disk cache.
779
780Maximum payload per entry
781
782 The largest payload size of any entry.
783
784Entries that use overflow
785
786 The number of entries that user one or more overflow pages.
787
788Total pages used
789
790 This is the number of pages used to hold all information in the current
791 category. This is the sum of index, primary, and overflow pages.
792
793Index pages used
794
795 This is the number of pages in a table B-tree that hold only key (rowid)
796 information and no data.
797
798Primary pages used
799
800 This is the number of B-tree pages that hold both key and data.
801
802Overflow pages used
803
804 The total number of overflow pages used for this category.
805
806Unused bytes on index pages
807
808 The total number of bytes of unused space on all index pages. The
809 percentage at the right is the number of unused bytes divided by the
810 total number of bytes on index pages.
811
812Unused bytes on primary pages
813
814 The total number of bytes of unused space on all primary pages. The
815 percentage at the right is the number of unused bytes divided by the
816 total number of bytes on primary pages.
817
818Unused bytes on overflow pages
819
820 The total number of bytes of unused space on all overflow pages. The
821 percentage at the right is the number of unused bytes divided by the
822 total number of bytes on overflow pages.
823
824Unused bytes on all pages
825
826 The total number of bytes of unused space on all primary and overflow
827 pages. The percentage at the right is the number of unused bytes
828 divided by the total number of bytes.
829}
830
831# Output a dump of the in-memory database. This can be used for more
832# complex offline analysis.
833#
834puts "**********************************************************************"
835puts "The entire text of this report can be sourced into any SQL database"
836puts "engine for further analysis. All of the text above is an SQL comment."
837puts "The data used to generate this report follows:"
838puts "*/"
839puts "BEGIN;"
840puts $tabledef
841unset -nocomplain x
842mem eval {SELECT * FROM space_used} x {
843 puts -nonewline "INSERT INTO space_used VALUES"
844 set sep (
845 foreach col $x(*) {
846 set v $x($col)
847 if {$v=="" || ![string is double $v]} {set v [quote $v]}
848 puts -nonewline $sep$v
849 set sep ,
850 }
851 puts ");"
852}
853puts "COMMIT;"
854
855} err]} {
856 puts "ERROR: $err"
857 puts $errorInfo
858 exit 1
859}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/speedtest.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/speedtest.tcl
new file mode 100644
index 0000000..ef39dc5
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/speedtest.tcl
@@ -0,0 +1,275 @@
1#!/usr/bin/tclsh
2#
3# Run this script using TCLSH to do a speed comparison between
4# various versions of SQLite and PostgreSQL and MySQL
5#
6
7# Run a test
8#
9set cnt 1
10proc runtest {title} {
11 global cnt
12 set sqlfile test$cnt.sql
13 puts "<h2>Test $cnt: $title</h2>"
14 incr cnt
15 set fd [open $sqlfile r]
16 set sql [string trim [read $fd [file size $sqlfile]]]
17 close $fd
18 set sx [split $sql \n]
19 set n [llength $sx]
20 if {$n>8} {
21 set sql {}
22 for {set i 0} {$i<3} {incr i} {append sql [lindex $sx $i]<br>\n}
23 append sql "<i>... [expr {$n-6}] lines omitted</i><br>\n"
24 for {set i [expr {$n-3}]} {$i<$n} {incr i} {
25 append sql [lindex $sx $i]<br>\n
26 }
27 } else {
28 regsub -all \n [string trim $sql] <br> sql
29 }
30 puts "<blockquote>"
31 puts "$sql"
32 puts "</blockquote><table border=0 cellpadding=0 cellspacing=0>"
33 set format {<tr><td>%s</td><td align="right">&nbsp;&nbsp;&nbsp;%.3f</td></tr>}
34 set delay 1000
35# exec sync; after $delay;
36# set t [time "exec psql drh <$sqlfile" 1]
37# set t [expr {[lindex $t 0]/1000000.0}]
38# puts [format $format PostgreSQL: $t]
39 exec sync; after $delay;
40 set t [time "exec mysql -f drh <$sqlfile" 1]
41 set t [expr {[lindex $t 0]/1000000.0}]
42 puts [format $format MySQL: $t]
43# set t [time "exec ./sqlite232 s232.db <$sqlfile" 1]
44# set t [expr {[lindex $t 0]/1000000.0}]
45# puts [format $format {SQLite 2.3.2:} $t]
46# set t [time "exec ./sqlite-100 s100.db <$sqlfile" 1]
47# set t [expr {[lindex $t 0]/1000000.0}]
48# puts [format $format {SQLite 2.4 (cache=100):} $t]
49 exec sync; after $delay;
50 set t [time "exec ./sqlite248 s2k.db <$sqlfile" 1]
51 set t [expr {[lindex $t 0]/1000000.0}]
52 puts [format $format {SQLite 2.4.8:} $t]
53 exec sync; after $delay;
54 set t [time "exec ./sqlite248 sns.db <$sqlfile" 1]
55 set t [expr {[lindex $t 0]/1000000.0}]
56 puts [format $format {SQLite 2.4.8 (nosync):} $t]
57 exec sync; after $delay;
58 set t [time "exec ./sqlite2412 s2kb.db <$sqlfile" 1]
59 set t [expr {[lindex $t 0]/1000000.0}]
60 puts [format $format {SQLite 2.4.12:} $t]
61 exec sync; after $delay;
62 set t [time "exec ./sqlite2412 snsb.db <$sqlfile" 1]
63 set t [expr {[lindex $t 0]/1000000.0}]
64 puts [format $format {SQLite 2.4.12 (nosync):} $t]
65# set t [time "exec ./sqlite-t1 st1.db <$sqlfile" 1]
66# set t [expr {[lindex $t 0]/1000000.0}]
67# puts [format $format {SQLite 2.4 (test):} $t]
68 puts "</table>"
69}
70
71# Initialize the environment
72#
73expr srand(1)
74catch {exec /bin/sh -c {rm -f s*.db}}
75set fd [open clear.sql w]
76puts $fd {
77 drop table t1;
78 drop table t2;
79}
80close $fd
81catch {exec psql drh <clear.sql}
82catch {exec mysql drh <clear.sql}
83set fd [open 2kinit.sql w]
84puts $fd {
85 PRAGMA default_cache_size=2000;
86 PRAGMA default_synchronous=on;
87}
88close $fd
89exec ./sqlite248 s2k.db <2kinit.sql
90exec ./sqlite2412 s2kb.db <2kinit.sql
91set fd [open nosync-init.sql w]
92puts $fd {
93 PRAGMA default_cache_size=2000;
94 PRAGMA default_synchronous=off;
95}
96close $fd
97exec ./sqlite248 sns.db <nosync-init.sql
98exec ./sqlite2412 snsb.db <nosync-init.sql
99set ones {zero one two three four five six seven eight nine
100 ten eleven twelve thirteen fourteen fifteen sixteen seventeen
101 eighteen nineteen}
102set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety}
103proc number_name {n} {
104 if {$n>=1000} {
105 set txt "[number_name [expr {$n/1000}]] thousand"
106 set n [expr {$n%1000}]
107 } else {
108 set txt {}
109 }
110 if {$n>=100} {
111 append txt " [lindex $::ones [expr {$n/100}]] hundred"
112 set n [expr {$n%100}]
113 }
114 if {$n>=20} {
115 append txt " [lindex $::tens [expr {$n/10}]]"
116 set n [expr {$n%10}]
117 }
118 if {$n>0} {
119 append txt " [lindex $::ones $n]"
120 }
121 set txt [string trim $txt]
122 if {$txt==""} {set txt zero}
123 return $txt
124}
125
126
127
128set fd [open test$cnt.sql w]
129puts $fd "CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));"
130for {set i 1} {$i<=1000} {incr i} {
131 set r [expr {int(rand()*100000)}]
132 puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
133}
134close $fd
135runtest {1000 INSERTs}
136
137
138
139set fd [open test$cnt.sql w]
140puts $fd "BEGIN;"
141puts $fd "CREATE TABLE t2(a INTEGER, b INTEGER, c VARCHAR(100));"
142for {set i 1} {$i<=25000} {incr i} {
143 set r [expr {int(rand()*500000)}]
144 puts $fd "INSERT INTO t2 VALUES($i,$r,'[number_name $r]');"
145}
146puts $fd "COMMIT;"
147close $fd
148runtest {25000 INSERTs in a transaction}
149
150
151
152set fd [open test$cnt.sql w]
153for {set i 0} {$i<100} {incr i} {
154 set lwr [expr {$i*100}]
155 set upr [expr {($i+10)*100}]
156 puts $fd "SELECT count(*), avg(b) FROM t2 WHERE b>=$lwr AND b<$upr;"
157}
158close $fd
159runtest {100 SELECTs without an index}
160
161
162
163set fd [open test$cnt.sql w]
164for {set i 1} {$i<=100} {incr i} {
165 puts $fd "SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%[number_name $i]%';"
166}
167close $fd
168runtest {100 SELECTs on a string comparison}
169
170
171
172set fd [open test$cnt.sql w]
173puts $fd {CREATE INDEX i2a ON t2(a);}
174puts $fd {CREATE INDEX i2b ON t2(b);}
175close $fd
176runtest {Creating an index}
177
178
179
180set fd [open test$cnt.sql w]
181for {set i 0} {$i<5000} {incr i} {
182 set lwr [expr {$i*100}]
183 set upr [expr {($i+1)*100}]
184 puts $fd "SELECT count(*), avg(b) FROM t2 WHERE b>=$lwr AND b<$upr;"
185}
186close $fd
187runtest {5000 SELECTs with an index}
188
189
190
191set fd [open test$cnt.sql w]
192puts $fd "BEGIN;"
193for {set i 0} {$i<1000} {incr i} {
194 set lwr [expr {$i*10}]
195 set upr [expr {($i+1)*10}]
196 puts $fd "UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr;"
197}
198puts $fd "COMMIT;"
199close $fd
200runtest {1000 UPDATEs without an index}
201
202
203
204set fd [open test$cnt.sql w]
205puts $fd "BEGIN;"
206for {set i 1} {$i<=25000} {incr i} {
207 set r [expr {int(rand()*500000)}]
208 puts $fd "UPDATE t2 SET b=$r WHERE a=$i;"
209}
210puts $fd "COMMIT;"
211close $fd
212runtest {25000 UPDATEs with an index}
213
214
215set fd [open test$cnt.sql w]
216puts $fd "BEGIN;"
217for {set i 1} {$i<=25000} {incr i} {
218 set r [expr {int(rand()*500000)}]
219 puts $fd "UPDATE t2 SET c='[number_name $r]' WHERE a=$i;"
220}
221puts $fd "COMMIT;"
222close $fd
223runtest {25000 text UPDATEs with an index}
224
225
226
227set fd [open test$cnt.sql w]
228puts $fd "BEGIN;"
229puts $fd "INSERT INTO t1 SELECT * FROM t2;"
230puts $fd "INSERT INTO t2 SELECT * FROM t1;"
231puts $fd "COMMIT;"
232close $fd
233runtest {INSERTs from a SELECT}
234
235
236
237set fd [open test$cnt.sql w]
238puts $fd {DELETE FROM t2 WHERE c LIKE '%fifty%';}
239close $fd
240runtest {DELETE without an index}
241
242
243
244set fd [open test$cnt.sql w]
245puts $fd {DELETE FROM t2 WHERE a>10 AND a<20000;}
246close $fd
247runtest {DELETE with an index}
248
249
250
251set fd [open test$cnt.sql w]
252puts $fd {INSERT INTO t2 SELECT * FROM t1;}
253close $fd
254runtest {A big INSERT after a big DELETE}
255
256
257
258set fd [open test$cnt.sql w]
259puts $fd {BEGIN;}
260puts $fd {DELETE FROM t1;}
261for {set i 1} {$i<=3000} {incr i} {
262 set r [expr {int(rand()*100000)}]
263 puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
264}
265puts $fd {COMMIT;}
266close $fd
267runtest {A big DELETE followed by many small INSERTs}
268
269
270
271set fd [open test$cnt.sql w]
272puts $fd {DROP TABLE t1;}
273puts $fd {DROP TABLE t2;}
274close $fd
275runtest {DROP TABLE}
diff --git a/libraries/sqlite/unix/sqlite-3.5.1/tool/speedtest2.tcl b/libraries/sqlite/unix/sqlite-3.5.1/tool/speedtest2.tcl
new file mode 100644
index 0000000..4fd632d
--- /dev/null
+++ b/libraries/sqlite/unix/sqlite-3.5.1/tool/speedtest2.tcl
@@ -0,0 +1,207 @@
1#!/usr/bin/tclsh
2#
3# Run this script using TCLSH to do a speed comparison between
4# various versions of SQLite and PostgreSQL and MySQL
5#
6
7# Run a test
8#
9set cnt 1
10proc runtest {title} {
11 global cnt
12 set sqlfile test$cnt.sql
13 puts "<h2>Test $cnt: $title</h2>"
14 incr cnt
15 set fd [open $sqlfile r]
16 set sql [string trim [read $fd [file size $sqlfile]]]
17 close $fd
18 set sx [split $sql \n]
19 set n [llength $sx]
20 if {$n>8} {
21 set sql {}
22 for {set i 0} {$i<3} {incr i} {append sql [lindex $sx $i]<br>\n}
23 append sql "<i>... [expr {$n-6}] lines omitted</i><br>\n"
24 for {set i [expr {$n-3}]} {$i<$n} {incr i} {
25 append sql [lindex $sx $i]<br>\n
26 }
27 } else {
28 regsub -all \n [string trim $sql] <br> sql
29 }
30 puts "<blockquote>"
31 puts "$sql"
32 puts "</blockquote><table border=0 cellpadding=0 cellspacing=0>"
33 set format {<tr><td>%s</td><td align="right">&nbsp;&nbsp;&nbsp;%.3f</td></tr>}
34 set delay 1000
35 exec sync; after $delay;
36 set t [time "exec psql drh <$sqlfile" 1]
37 set t [expr {[lindex $t 0]/1000000.0}]
38 puts [format $format PostgreSQL: $t]
39 exec sync; after $delay;
40 set t [time "exec mysql -f drh <$sqlfile" 1]
41 set t [expr {[lindex $t 0]/1000000.0}]
42 puts [format $format MySQL: $t]
43# set t [time "exec ./sqlite232 s232.db <$sqlfile" 1]
44# set t [expr {[lindex $t 0]/1000000.0}]
45# puts [format $format {SQLite 2.3.2:} $t]
46# set t [time "exec ./sqlite-100 s100.db <$sqlfile" 1]
47# set t [expr {[lindex $t 0]/1000000.0}]
48# puts [format $format {SQLite 2.4 (cache=100):} $t]
49 exec sync; after $delay;
50 set t [time "exec ./sqlite240 s2k.db <$sqlfile" 1]
51 set t [expr {[lindex $t 0]/1000000.0}]
52 puts [format $format {SQLite 2.4:} $t]
53 exec sync; after $delay;
54 set t [time "exec ./sqlite240 sns.db <$sqlfile" 1]
55 set t [expr {[lindex $t 0]/1000000.0}]
56 puts [format $format {SQLite 2.4 (nosync):} $t]
57# set t [time "exec ./sqlite-t1 st1.db <$sqlfile" 1]
58# set t [expr {[lindex $t 0]/1000000.0}]
59# puts [format $format {SQLite 2.4 (test):} $t]
60 puts "</table>"
61}
62
63# Initialize the environment
64#
65expr srand(1)
66catch {exec /bin/sh -c {rm -f s*.db}}
67set fd [open clear.sql w]
68puts $fd {
69 drop table t1;
70 drop table t2;
71}
72close $fd
73catch {exec psql drh <clear.sql}
74catch {exec mysql drh <clear.sql}
75set fd [open 2kinit.sql w]
76puts $fd {
77 PRAGMA default_cache_size=2000;
78 PRAGMA default_synchronous=on;
79}
80close $fd
81exec ./sqlite240 s2k.db <2kinit.sql
82exec ./sqlite-t1 st1.db <2kinit.sql
83set fd [open nosync-init.sql w]
84puts $fd {
85 PRAGMA default_cache_size=2000;
86 PRAGMA default_synchronous=off;
87}
88close $fd
89exec ./sqlite240 sns.db <nosync-init.sql
90set ones {zero one two three four five six seven eight nine
91 ten eleven twelve thirteen fourteen fifteen sixteen seventeen
92 eighteen nineteen}
93set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety}
94proc number_name {n} {
95 if {$n>=1000} {
96 set txt "[number_name [expr {$n/1000}]] thousand"
97 set n [expr {$n%1000}]
98 } else {
99 set txt {}
100 }
101 if {$n>=100} {
102 append txt " [lindex $::ones [expr {$n/100}]] hundred"
103 set n [expr {$n%100}]
104 }
105 if {$n>=20} {
106 append txt " [lindex $::tens [expr {$n/10}]]"
107 set n [expr {$n%10}]
108 }
109 if {$n>0} {
110 append txt " [lindex $::ones $n]"
111 }
112 set txt [string trim $txt]
113 if {$txt==""} {set txt zero}
114 return $txt
115}
116
117
118set fd [open test$cnt.sql w]
119puts $fd "BEGIN;"
120puts $fd "CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));"
121for {set i 1} {$i<=25000} {incr i} {
122 set r [expr {int(rand()*500000)}]
123 puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
124}
125puts $fd "COMMIT;"
126close $fd
127runtest {25000 INSERTs in a transaction}
128
129
130set fd [open test$cnt.sql w]
131puts $fd "DELETE FROM t1;"
132close $fd
133runtest {DELETE everything}
134
135
136set fd [open test$cnt.sql w]
137puts $fd "BEGIN;"
138for {set i 1} {$i<=25000} {incr i} {
139 set r [expr {int(rand()*500000)}]
140 puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
141}
142puts $fd "COMMIT;"
143close $fd
144runtest {25000 INSERTs in a transaction}
145
146
147set fd [open test$cnt.sql w]
148puts $fd "DELETE FROM t1;"
149close $fd
150runtest {DELETE everything}
151
152
153set fd [open test$cnt.sql w]
154puts $fd "BEGIN;"
155for {set i 1} {$i<=25000} {incr i} {
156 set r [expr {int(rand()*500000)}]
157 puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
158}
159puts $fd "COMMIT;"
160close $fd
161runtest {25000 INSERTs in a transaction}
162
163
164set fd [open test$cnt.sql w]
165puts $fd "DELETE FROM t1;"
166close $fd
167runtest {DELETE everything}
168
169
170set fd [open test$cnt.sql w]
171puts $fd "BEGIN;"
172for {set i 1} {$i<=25000} {incr i} {
173 set r [expr {int(rand()*500000)}]
174 puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
175}
176puts $fd "COMMIT;"
177close $fd
178runtest {25000 INSERTs in a transaction}
179
180
181set fd [open test$cnt.sql w]
182puts $fd "DELETE FROM t1;"
183close $fd
184runtest {DELETE everything}
185
186
187set fd [open test$cnt.sql w]
188puts $fd "BEGIN;"
189for {set i 1} {$i<=25000} {incr i} {
190 set r [expr {int(rand()*500000)}]
191 puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
192}
193puts $fd "COMMIT;"
194close $fd
195runtest {25000 INSERTs in a transaction}
196
197
198set fd [open test$cnt.sql w]
199puts $fd "DELETE FROM t1;"
200close $fd
201runtest {DELETE everything}
202
203
204set fd [open test$cnt.sql w]
205puts $fd {DROP TABLE t1;}
206close $fd
207runtest {DROP TABLE}