From e36d23a85ebff914d74bb541558c2b6082b78edb Mon Sep 17 00:00:00 2001 From: dan miller Date: Sat, 20 Oct 2007 02:49:29 +0000 Subject: sqlite source (unix build) added to libraries --- .../sqlite/unix/sqlite-3.5.1/test/aggerror.test | 78 + libraries/sqlite/unix/sqlite-3.5.1/test/all.test | 149 + libraries/sqlite/unix/sqlite-3.5.1/test/alter.test | 756 + .../sqlite/unix/sqlite-3.5.1/test/alter2.test | 440 + .../sqlite/unix/sqlite-3.5.1/test/alter3.test | 396 + .../sqlite/unix/sqlite-3.5.1/test/altermalloc.test | 68 + .../sqlite/unix/sqlite-3.5.1/test/analyze.test | 257 + libraries/sqlite/unix/sqlite-3.5.1/test/async.test | 78 + .../sqlite/unix/sqlite-3.5.1/test/async2.test | 127 + .../sqlite/unix/sqlite-3.5.1/test/async3.test | 73 + .../sqlite/unix/sqlite-3.5.1/test/attach.test | 750 + .../sqlite/unix/sqlite-3.5.1/test/attach2.test | 389 + .../sqlite/unix/sqlite-3.5.1/test/attach3.test | 344 + .../unix/sqlite-3.5.1/test/attachmalloc.test | 48 + libraries/sqlite/unix/sqlite-3.5.1/test/auth.test | 2306 ++ libraries/sqlite/unix/sqlite-3.5.1/test/auth2.test | 75 + .../sqlite/unix/sqlite-3.5.1/test/autoinc.test | 536 + .../sqlite/unix/sqlite-3.5.1/test/autovacuum.test | 646 + .../unix/sqlite-3.5.1/test/autovacuum_crash.test | 58 + .../unix/sqlite-3.5.1/test/autovacuum_ioerr.test | 58 + .../unix/sqlite-3.5.1/test/autovacuum_ioerr2.test | 133 + .../sqlite/unix/sqlite-3.5.1/test/avtrans.test | 921 + .../sqlite/unix/sqlite-3.5.1/test/badutf.test | 143 + .../sqlite/unix/sqlite-3.5.1/test/between.test | 113 + .../sqlite/unix/sqlite-3.5.1/test/bigfile.test | 193 + .../sqlite/unix/sqlite-3.5.1/test/bigrow.test | 223 + libraries/sqlite/unix/sqlite-3.5.1/test/bind.test | 577 + .../sqlite/unix/sqlite-3.5.1/test/bindxfer.test | 84 + libraries/sqlite/unix/sqlite-3.5.1/test/blob.test | 124 + libraries/sqlite/unix/sqlite-3.5.1/test/btree.test | 1072 + .../sqlite/unix/sqlite-3.5.1/test/btree2.test | 502 + .../sqlite/unix/sqlite-3.5.1/test/btree4.test | 101 + .../sqlite/unix/sqlite-3.5.1/test/btree5.test | 292 + .../sqlite/unix/sqlite-3.5.1/test/btree6.test | 128 + .../sqlite/unix/sqlite-3.5.1/test/btree7.test | 50 + .../sqlite/unix/sqlite-3.5.1/test/btree8.test | 43 + .../sqlite/unix/sqlite-3.5.1/test/btree9.test | 49 + libraries/sqlite/unix/sqlite-3.5.1/test/busy.test | 44 + libraries/sqlite/unix/sqlite-3.5.1/test/cache.test | 63 + libraries/sqlite/unix/sqlite-3.5.1/test/capi2.test | 793 + libraries/sqlite/unix/sqlite-3.5.1/test/capi3.test | 1071 + .../sqlite/unix/sqlite-3.5.1/test/capi3b.test | 145 + .../sqlite/unix/sqlite-3.5.1/test/capi3c.test | 1245 + libraries/sqlite/unix/sqlite-3.5.1/test/cast.test | 290 + libraries/sqlite/unix/sqlite-3.5.1/test/check.test | 372 + .../sqlite/unix/sqlite-3.5.1/test/collate1.test | 307 + .../sqlite/unix/sqlite-3.5.1/test/collate2.test | 664 + .../sqlite/unix/sqlite-3.5.1/test/collate3.test | 429 + .../sqlite/unix/sqlite-3.5.1/test/collate4.test | 700 + .../sqlite/unix/sqlite-3.5.1/test/collate5.test | 270 + .../sqlite/unix/sqlite-3.5.1/test/collate6.test | 153 + .../sqlite/unix/sqlite-3.5.1/test/collate7.test | 73 + .../sqlite/unix/sqlite-3.5.1/test/collate8.test | 52 + .../sqlite/unix/sqlite-3.5.1/test/colmeta.test | 103 + .../sqlite/unix/sqlite-3.5.1/test/conflict.test | 763 + .../sqlite/unix/sqlite-3.5.1/test/corrupt.test | 169 + .../sqlite/unix/sqlite-3.5.1/test/corrupt2.test | 135 + .../sqlite/unix/sqlite-3.5.1/test/corrupt3.test | 109 + .../sqlite/unix/sqlite-3.5.1/test/corrupt4.test | 74 + libraries/sqlite/unix/sqlite-3.5.1/test/crash.test | 403 + .../sqlite/unix/sqlite-3.5.1/test/crash2.test | 132 + .../sqlite/unix/sqlite-3.5.1/test/crash3.test | 191 + .../sqlite/unix/sqlite-3.5.1/test/createtab.test | 146 + libraries/sqlite/unix/sqlite-3.5.1/test/date.test | 354 + .../sqlite/unix/sqlite-3.5.1/test/default.test | 52 + .../sqlite/unix/sqlite-3.5.1/test/delete.test | 313 + .../sqlite/unix/sqlite-3.5.1/test/delete2.test | 99 + .../sqlite/unix/sqlite-3.5.1/test/delete3.test | 57 + .../sqlite/unix/sqlite-3.5.1/test/descidx1.test | 337 + .../sqlite/unix/sqlite-3.5.1/test/descidx2.test | 184 + .../sqlite/unix/sqlite-3.5.1/test/descidx3.test | 155 + .../sqlite/unix/sqlite-3.5.1/test/diskfull.test | 115 + .../sqlite/unix/sqlite-3.5.1/test/distinctagg.test | 57 + libraries/sqlite/unix/sqlite-3.5.1/test/enc.test | 172 + libraries/sqlite/unix/sqlite-3.5.1/test/enc2.test | 554 + libraries/sqlite/unix/sqlite-3.5.1/test/enc3.test | 81 + .../sqlite/unix/sqlite-3.5.1/test/exclusive.test | 449 + .../sqlite/unix/sqlite-3.5.1/test/exclusive2.test | 297 + .../sqlite/unix/sqlite-3.5.1/test/exclusive3.test | 59 + libraries/sqlite/unix/sqlite-3.5.1/test/expr.test | 699 + .../sqlite/unix/sqlite-3.5.1/test/filefmt.test | 115 + libraries/sqlite/unix/sqlite-3.5.1/test/fkey1.test | 77 + .../sqlite/unix/sqlite-3.5.1/test/format4.test | 65 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1a.test | 186 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1b.test | 147 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1c.test | 1213 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1d.test | 65 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1e.test | 85 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1f.test | 90 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1i.test | 88 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1j.test | 89 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1k.test | 69 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1l.test | 65 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1m.test | 50 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1n.test | 45 + libraries/sqlite/unix/sqlite-3.5.1/test/fts1o.test | 138 + .../sqlite/unix/sqlite-3.5.1/test/fts1porter.test | 23590 +++++++++++++++++++ libraries/sqlite/unix/sqlite-3.5.1/test/fts2a.test | 202 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2b.test | 147 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2c.test | 1213 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2d.test | 65 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2e.test | 85 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2f.test | 90 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2g.test | 87 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2h.test | 76 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2i.test | 87 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2j.test | 89 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2k.test | 105 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2l.test | 69 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2m.test | 65 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2n.test | 196 + libraries/sqlite/unix/sqlite-3.5.1/test/fts2o.test | 169 + .../sqlite/unix/sqlite-3.5.1/test/fts2token.test | 174 + .../sqlite/unix/sqlite-3.5.1/test/fts3aa.test | 202 + .../sqlite/unix/sqlite-3.5.1/test/fts3ab.test | 147 + .../sqlite/unix/sqlite-3.5.1/test/fts3ac.test | 1213 + .../sqlite/unix/sqlite-3.5.1/test/fts3ad.test | 65 + .../sqlite/unix/sqlite-3.5.1/test/fts3ae.test | 85 + .../sqlite/unix/sqlite-3.5.1/test/fts3af.test | 90 + .../sqlite/unix/sqlite-3.5.1/test/fts3ag.test | 87 + .../sqlite/unix/sqlite-3.5.1/test/fts3ah.test | 76 + .../sqlite/unix/sqlite-3.5.1/test/fts3ai.test | 87 + .../sqlite/unix/sqlite-3.5.1/test/fts3aj.test | 89 + .../sqlite/unix/sqlite-3.5.1/test/fts3ak.test | 105 + .../sqlite/unix/sqlite-3.5.1/test/fts3al.test | 69 + .../sqlite/unix/sqlite-3.5.1/test/fts3am.test | 65 + .../sqlite/unix/sqlite-3.5.1/test/fts3an.test | 196 + .../sqlite/unix/sqlite-3.5.1/test/fts3ao.test | 169 + .../sqlite/unix/sqlite-3.5.1/test/fts3atoken.test | 174 + libraries/sqlite/unix/sqlite-3.5.1/test/fts3b.test | 218 + libraries/sqlite/unix/sqlite-3.5.1/test/func.test | 886 + libraries/sqlite/unix/sqlite-3.5.1/test/fuzz.test | 251 + libraries/sqlite/unix/sqlite-3.5.1/test/fuzz2.test | 102 + .../sqlite/unix/sqlite-3.5.1/test/fuzz_common.tcl | 392 + .../sqlite/unix/sqlite-3.5.1/test/fuzz_malloc.test | 93 + libraries/sqlite/unix/sqlite-3.5.1/test/hook.test | 297 + libraries/sqlite/unix/sqlite-3.5.1/test/icu.test | 118 + libraries/sqlite/unix/sqlite-3.5.1/test/in.test | 367 + libraries/sqlite/unix/sqlite-3.5.1/test/in2.test | 68 + .../sqlite/unix/sqlite-3.5.1/test/incrblob.test | 597 + .../unix/sqlite-3.5.1/test/incrblob_err.test | 102 + .../sqlite/unix/sqlite-3.5.1/test/incrvacuum.test | 699 + .../sqlite/unix/sqlite-3.5.1/test/incrvacuum2.test | 125 + .../unix/sqlite-3.5.1/test/incrvacuum_ioerr.test | 89 + libraries/sqlite/unix/sqlite-3.5.1/test/index.test | 711 + .../sqlite/unix/sqlite-3.5.1/test/index2.test | 74 + .../sqlite/unix/sqlite-3.5.1/test/index3.test | 58 + .../sqlite/unix/sqlite-3.5.1/test/insert.test | 391 + .../sqlite/unix/sqlite-3.5.1/test/insert2.test | 278 + .../sqlite/unix/sqlite-3.5.1/test/insert3.test | 171 + .../sqlite/unix/sqlite-3.5.1/test/insert4.test | 272 + .../sqlite/unix/sqlite-3.5.1/test/interrupt.test | 198 + .../sqlite/unix/sqlite-3.5.1/test/intpkey.test | 605 + libraries/sqlite/unix/sqlite-3.5.1/test/io.test | 549 + libraries/sqlite/unix/sqlite-3.5.1/test/ioerr.test | 290 + .../sqlite/unix/sqlite-3.5.1/test/ioerr2.test | 115 + libraries/sqlite/unix/sqlite-3.5.1/test/join.test | 461 + libraries/sqlite/unix/sqlite-3.5.1/test/join2.test | 75 + libraries/sqlite/unix/sqlite-3.5.1/test/join3.test | 62 + libraries/sqlite/unix/sqlite-3.5.1/test/join4.test | 98 + libraries/sqlite/unix/sqlite-3.5.1/test/join5.test | 110 + .../sqlite/unix/sqlite-3.5.1/test/journal1.test | 67 + .../sqlite/unix/sqlite-3.5.1/test/lastinsert.test | 366 + .../unix/sqlite-3.5.1/test/laststmtchanges.test | 281 + libraries/sqlite/unix/sqlite-3.5.1/test/like.test | 400 + libraries/sqlite/unix/sqlite-3.5.1/test/limit.test | 448 + .../sqlite/unix/sqlite-3.5.1/test/loadext.test | 218 + .../sqlite/unix/sqlite-3.5.1/test/loadext2.test | 143 + libraries/sqlite/unix/sqlite-3.5.1/test/lock.test | 354 + libraries/sqlite/unix/sqlite-3.5.1/test/lock2.test | 169 + libraries/sqlite/unix/sqlite-3.5.1/test/lock3.test | 78 + libraries/sqlite/unix/sqlite-3.5.1/test/lock4.test | 99 + libraries/sqlite/unix/sqlite-3.5.1/test/main.test | 360 + .../sqlite/unix/sqlite-3.5.1/test/malloc.test | 571 + .../sqlite/unix/sqlite-3.5.1/test/malloc2.test | 366 + .../sqlite/unix/sqlite-3.5.1/test/malloc3.test | 657 + .../sqlite/unix/sqlite-3.5.1/test/malloc4.test | 193 + .../sqlite/unix/sqlite-3.5.1/test/malloc5.test | 396 + .../sqlite/unix/sqlite-3.5.1/test/malloc6.test | 55 + .../sqlite/unix/sqlite-3.5.1/test/malloc7.test | 48 + .../sqlite/unix/sqlite-3.5.1/test/malloc8.test | 95 + .../sqlite/unix/sqlite-3.5.1/test/malloc9.test | 51 + .../sqlite/unix/sqlite-3.5.1/test/mallocA.test | 69 + .../sqlite/unix/sqlite-3.5.1/test/mallocB.test | 47 + .../sqlite/unix/sqlite-3.5.1/test/mallocC.test | 134 + .../sqlite/unix/sqlite-3.5.1/test/mallocD.test | 61 + .../unix/sqlite-3.5.1/test/malloc_common.tcl | 156 + .../sqlite/unix/sqlite-3.5.1/test/manydb.test | 91 + libraries/sqlite/unix/sqlite-3.5.1/test/memdb.test | 417 + .../sqlite/unix/sqlite-3.5.1/test/memleak.test | 98 + .../sqlite/unix/sqlite-3.5.1/test/minmax.test | 384 + .../sqlite/unix/sqlite-3.5.1/test/minmax2.test | 387 + libraries/sqlite/unix/sqlite-3.5.1/test/misc1.test | 585 + libraries/sqlite/unix/sqlite-3.5.1/test/misc2.test | 435 + libraries/sqlite/unix/sqlite-3.5.1/test/misc3.test | 317 + libraries/sqlite/unix/sqlite-3.5.1/test/misc4.test | 197 + libraries/sqlite/unix/sqlite-3.5.1/test/misc5.test | 620 + libraries/sqlite/unix/sqlite-3.5.1/test/misc6.test | 48 + libraries/sqlite/unix/sqlite-3.5.1/test/misc7.test | 439 + .../sqlite/unix/sqlite-3.5.1/test/misuse.test | 207 + .../sqlite/unix/sqlite-3.5.1/test/notnull.test | 505 + libraries/sqlite/unix/sqlite-3.5.1/test/null.test | 252 + .../sqlite/unix/sqlite-3.5.1/test/onefile.test | 61 + .../sqlite/unix/sqlite-3.5.1/test/openv2.test | 41 + libraries/sqlite/unix/sqlite-3.5.1/test/pager.test | 571 + .../sqlite/unix/sqlite-3.5.1/test/pager2.test | 408 + .../sqlite/unix/sqlite-3.5.1/test/pager3.test | 73 + .../sqlite/unix/sqlite-3.5.1/test/pageropt.test | 201 + .../sqlite/unix/sqlite-3.5.1/test/pagesize.test | 182 + .../sqlite/unix/sqlite-3.5.1/test/pragma.test | 1037 + .../sqlite/unix/sqlite-3.5.1/test/pragma2.test | 117 + .../sqlite/unix/sqlite-3.5.1/test/printf.test | 324 + .../sqlite/unix/sqlite-3.5.1/test/progress.test | 177 + .../sqlite/unix/sqlite-3.5.1/test/ptrchng.test | 222 + libraries/sqlite/unix/sqlite-3.5.1/test/quick.test | 109 + libraries/sqlite/unix/sqlite-3.5.1/test/quote.test | 89 + .../sqlite/unix/sqlite-3.5.1/test/rdonly.test | 65 + .../sqlite/unix/sqlite-3.5.1/test/reindex.test | 172 + .../sqlite/unix/sqlite-3.5.1/test/rollback.test | 82 + libraries/sqlite/unix/sqlite-3.5.1/test/rowid.test | 674 + .../sqlite/unix/sqlite-3.5.1/test/safety.test | 68 + .../sqlite/unix/sqlite-3.5.1/test/schema.test | 365 + .../sqlite/unix/sqlite-3.5.1/test/schema2.test | 338 + .../sqlite/unix/sqlite-3.5.1/test/select1.test | 913 + .../sqlite/unix/sqlite-3.5.1/test/select2.test | 185 + .../sqlite/unix/sqlite-3.5.1/test/select3.test | 264 + .../sqlite/unix/sqlite-3.5.1/test/select4.test | 617 + .../sqlite/unix/sqlite-3.5.1/test/select5.test | 192 + .../sqlite/unix/sqlite-3.5.1/test/select6.test | 507 + .../sqlite/unix/sqlite-3.5.1/test/select7.test | 159 + .../sqlite/unix/sqlite-3.5.1/test/server1.test | 171 + .../sqlite/unix/sqlite-3.5.1/test/shared.test | 911 + .../sqlite/unix/sqlite-3.5.1/test/shared2.test | 131 + .../sqlite/unix/sqlite-3.5.1/test/shared3.test | 47 + .../sqlite/unix/sqlite-3.5.1/test/shared_err.test | 463 + .../sqlite/unix/sqlite-3.5.1/test/shortread1.test | 52 + libraries/sqlite/unix/sqlite-3.5.1/test/soak.test | 90 + .../sqlite/unix/sqlite-3.5.1/test/softheap1.test | 47 + libraries/sqlite/unix/sqlite-3.5.1/test/sort.test | 467 + .../sqlite/unix/sqlite-3.5.1/test/speed1.test | 289 + .../sqlite/unix/sqlite-3.5.1/test/speed2.test | 339 + .../sqlite/unix/sqlite-3.5.1/test/speed3.test | 186 + .../sqlite/unix/sqlite-3.5.1/test/sqllimits1.test | 576 + .../sqlite/unix/sqlite-3.5.1/test/subquery.test | 494 + .../sqlite/unix/sqlite-3.5.1/test/subselect.test | 202 + .../sqlite/unix/sqlite-3.5.1/test/substr.test | 108 + libraries/sqlite/unix/sqlite-3.5.1/test/sync.test | 97 + libraries/sqlite/unix/sqlite-3.5.1/test/table.test | 674 + .../sqlite/unix/sqlite-3.5.1/test/tableapi.test | 219 + .../sqlite/unix/sqlite-3.5.1/test/tclsqlite.test | 496 + .../sqlite/unix/sqlite-3.5.1/test/temptable.test | 414 + libraries/sqlite/unix/sqlite-3.5.1/test/tester.tcl | 554 + .../sqlite/unix/sqlite-3.5.1/test/thread001.test | 139 + .../sqlite/unix/sqlite-3.5.1/test/thread002.test | 105 + .../sqlite/unix/sqlite-3.5.1/test/thread1.test | 172 + .../sqlite/unix/sqlite-3.5.1/test/thread2.test | 246 + .../unix/sqlite-3.5.1/test/thread_common.tcl | 88 + .../sqlite/unix/sqlite-3.5.1/test/threadtest1.c | 289 + .../sqlite/unix/sqlite-3.5.1/test/threadtest2.c | 133 + .../sqlite/unix/sqlite-3.5.1/test/tkt1435.test | 111 + .../sqlite/unix/sqlite-3.5.1/test/tkt1443.test | 180 + .../sqlite/unix/sqlite-3.5.1/test/tkt1444.test | 56 + .../sqlite/unix/sqlite-3.5.1/test/tkt1449.test | 262 + .../sqlite/unix/sqlite-3.5.1/test/tkt1473.test | 728 + .../sqlite/unix/sqlite-3.5.1/test/tkt1501.test | 36 + .../sqlite/unix/sqlite-3.5.1/test/tkt1512.test | 54 + .../sqlite/unix/sqlite-3.5.1/test/tkt1514.test | 27 + .../sqlite/unix/sqlite-3.5.1/test/tkt1536.test | 38 + .../sqlite/unix/sqlite-3.5.1/test/tkt1537.test | 122 + .../sqlite/unix/sqlite-3.5.1/test/tkt1567.test | 51 + .../sqlite/unix/sqlite-3.5.1/test/tkt1644.test | 111 + .../sqlite/unix/sqlite-3.5.1/test/tkt1667.test | 85 + .../sqlite/unix/sqlite-3.5.1/test/tkt1873.test | 67 + .../sqlite/unix/sqlite-3.5.1/test/tkt2141.test | 61 + .../sqlite/unix/sqlite-3.5.1/test/tkt2192.test | 140 + .../sqlite/unix/sqlite-3.5.1/test/tkt2213.test | 30 + .../sqlite/unix/sqlite-3.5.1/test/tkt2251.test | 108 + .../sqlite/unix/sqlite-3.5.1/test/tkt2285.test | 57 + .../sqlite/unix/sqlite-3.5.1/test/tkt2332.test | 67 + .../sqlite/unix/sqlite-3.5.1/test/tkt2339.test | 100 + .../sqlite/unix/sqlite-3.5.1/test/tkt2391.test | 49 + .../sqlite/unix/sqlite-3.5.1/test/tkt2409.test | 218 + .../sqlite/unix/sqlite-3.5.1/test/tkt2450.test | 48 + .../sqlite/unix/sqlite-3.5.1/test/tkt2640.test | 119 + .../sqlite/unix/sqlite-3.5.1/test/tkt2643.test | 39 + .../sqlite/unix/sqlite-3.5.1/test/tkt2686.tcl | 46 + libraries/sqlite/unix/sqlite-3.5.1/test/trace.test | 148 + libraries/sqlite/unix/sqlite-3.5.1/test/trans.test | 919 + .../sqlite/unix/sqlite-3.5.1/test/trigger1.test | 631 + .../sqlite/unix/sqlite-3.5.1/test/trigger2.test | 742 + .../sqlite/unix/sqlite-3.5.1/test/trigger3.test | 176 + .../sqlite/unix/sqlite-3.5.1/test/trigger4.test | 200 + .../sqlite/unix/sqlite-3.5.1/test/trigger5.test | 43 + .../sqlite/unix/sqlite-3.5.1/test/trigger6.test | 82 + .../sqlite/unix/sqlite-3.5.1/test/trigger7.test | 121 + .../sqlite/unix/sqlite-3.5.1/test/trigger8.test | 42 + libraries/sqlite/unix/sqlite-3.5.1/test/types.test | 324 + .../sqlite/unix/sqlite-3.5.1/test/types2.test | 340 + .../sqlite/unix/sqlite-3.5.1/test/types3.test | 98 + .../sqlite/unix/sqlite-3.5.1/test/unique.test | 253 + .../sqlite/unix/sqlite-3.5.1/test/update.test | 596 + libraries/sqlite/unix/sqlite-3.5.1/test/utf16.test | 75 + .../sqlite/unix/sqlite-3.5.1/test/utf16align.test | 84 + .../sqlite/unix/sqlite-3.5.1/test/vacuum.test | 359 + .../sqlite/unix/sqlite-3.5.1/test/vacuum2.test | 60 + .../sqlite/unix/sqlite-3.5.1/test/varint.test | 32 + libraries/sqlite/unix/sqlite-3.5.1/test/view.test | 501 + libraries/sqlite/unix/sqlite-3.5.1/test/vtab1.test | 946 + libraries/sqlite/unix/sqlite-3.5.1/test/vtab2.test | 90 + libraries/sqlite/unix/sqlite-3.5.1/test/vtab3.test | 142 + libraries/sqlite/unix/sqlite-3.5.1/test/vtab4.test | 194 + libraries/sqlite/unix/sqlite-3.5.1/test/vtab5.test | 153 + libraries/sqlite/unix/sqlite-3.5.1/test/vtab6.test | 457 + libraries/sqlite/unix/sqlite-3.5.1/test/vtab7.test | 199 + libraries/sqlite/unix/sqlite-3.5.1/test/vtab8.test | 78 + libraries/sqlite/unix/sqlite-3.5.1/test/vtab9.test | 70 + libraries/sqlite/unix/sqlite-3.5.1/test/vtabA.test | 135 + .../sqlite/unix/sqlite-3.5.1/test/vtab_alter.test | 103 + .../sqlite/unix/sqlite-3.5.1/test/vtab_err.test | 71 + .../sqlite/unix/sqlite-3.5.1/test/vtab_shared.test | 62 + libraries/sqlite/unix/sqlite-3.5.1/test/vx.txt | 43 + libraries/sqlite/unix/sqlite-3.5.1/test/where.test | 1156 + .../sqlite/unix/sqlite-3.5.1/test/where2.test | 614 + .../sqlite/unix/sqlite-3.5.1/test/where3.test | 162 + .../sqlite/unix/sqlite-3.5.1/test/where4.test | 270 + .../sqlite/unix/sqlite-3.5.1/test/where5.test | 288 + .../sqlite/unix/sqlite-3.5.1/test/zeroblob.test | 213 + 327 files changed, 107805 insertions(+) create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/aggerror.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/all.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/alter.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/alter2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/alter3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/altermalloc.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/analyze.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/async.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/async2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/async3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/attach.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/attach2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/attach3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/attachmalloc.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/auth.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/auth2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/autoinc.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_crash.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_ioerr.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_ioerr2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/avtrans.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/badutf.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/between.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/bigfile.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/bigrow.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/bind.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/bindxfer.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/blob.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/btree.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/btree2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/btree4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/btree5.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/btree6.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/btree7.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/btree8.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/btree9.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/busy.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/cache.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/capi2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/capi3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/capi3b.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/capi3c.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/cast.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/check.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/collate1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/collate2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/collate3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/collate4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/collate5.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/collate6.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/collate7.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/collate8.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/colmeta.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/conflict.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/corrupt.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/corrupt2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/corrupt3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/corrupt4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/crash.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/crash2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/crash3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/createtab.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/date.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/default.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/delete.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/delete2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/delete3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/descidx1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/descidx2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/descidx3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/diskfull.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/distinctagg.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/enc.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/enc2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/enc3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/exclusive.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/exclusive2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/exclusive3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/expr.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/filefmt.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fkey1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/format4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1a.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1b.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1c.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1d.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1e.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1f.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1i.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1j.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1k.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1l.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1m.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1n.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1o.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts1porter.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2a.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2b.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2c.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2d.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2e.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2f.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2g.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2h.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2i.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2j.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2k.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2l.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2m.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2n.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2o.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts2token.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3aa.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3ab.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3ac.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3ad.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3ae.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3af.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3ag.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3ah.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3ai.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3aj.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3ak.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3al.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3am.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3an.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3ao.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3atoken.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fts3b.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/func.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fuzz.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fuzz2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fuzz_common.tcl create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/fuzz_malloc.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/hook.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/icu.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/in.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/in2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/incrblob.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/incrblob_err.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum_ioerr.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/index.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/index2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/index3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/insert.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/insert2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/insert3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/insert4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/interrupt.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/intpkey.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/io.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/ioerr.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/ioerr2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/join.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/join2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/join3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/join4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/join5.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/journal1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/lastinsert.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/laststmtchanges.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/like.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/limit.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/loadext.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/loadext2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/lock.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/lock2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/lock3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/lock4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/main.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc5.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc6.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc7.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc8.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc9.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/mallocA.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/mallocB.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/mallocC.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/mallocD.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/malloc_common.tcl create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/manydb.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/memdb.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/memleak.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/minmax.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/minmax2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/misc1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/misc2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/misc3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/misc4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/misc5.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/misc6.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/misc7.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/misuse.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/notnull.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/null.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/onefile.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/openv2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/pager.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/pager2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/pager3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/pageropt.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/pagesize.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/pragma.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/pragma2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/printf.test create mode 100755 libraries/sqlite/unix/sqlite-3.5.1/test/progress.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/ptrchng.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/quick.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/quote.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/rdonly.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/reindex.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/rollback.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/rowid.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/safety.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/schema.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/schema2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/select1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/select2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/select3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/select4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/select5.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/select6.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/select7.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/server1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/shared.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/shared2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/shared3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/shared_err.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/shortread1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/soak.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/softheap1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/sort.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/speed1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/speed2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/speed3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/sqllimits1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/subquery.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/subselect.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/substr.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/sync.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/table.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tableapi.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tclsqlite.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/temptable.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tester.tcl create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/thread001.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/thread002.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/thread1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/thread2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/thread_common.tcl create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/threadtest1.c create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/threadtest2.c create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1435.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1443.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1444.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1449.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1473.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1501.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1512.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1514.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1536.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1537.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1567.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1644.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1667.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt1873.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2141.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2192.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2213.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2251.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2285.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2332.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2339.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2391.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2409.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2450.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2640.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2643.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/tkt2686.tcl create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trace.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trans.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trigger1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trigger2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trigger3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trigger4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trigger5.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trigger6.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trigger7.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/trigger8.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/types.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/types2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/types3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/unique.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/update.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/utf16.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/utf16align.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vacuum.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vacuum2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/varint.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/view.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab1.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab5.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab6.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab7.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab8.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab9.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtabA.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab_alter.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab_err.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vtab_shared.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/vx.txt create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/where.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/where2.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/where3.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/where4.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/where5.test create mode 100644 libraries/sqlite/unix/sqlite-3.5.1/test/zeroblob.test (limited to 'libraries/sqlite/unix/sqlite-3.5.1/test') diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/aggerror.test b/libraries/sqlite/unix/sqlite-3.5.1/test/aggerror.test new file mode 100644 index 0000000..f95d8b2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/aggerror.test @@ -0,0 +1,78 @@ +# 2006 January 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for calling sqlite3_result_error() +# from within an aggregate function implementation. +# +# $Id: aggerror.test,v 1.3 2006/05/03 23:34:06 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# Add the x_count aggregate function to the database handle. +# x_count will error out if its input is 40 or 41 or if its +# final results is 42. Make sure that such errors are handled +# appropriately. +# +do_test aggerror-1.1 { + set DB [sqlite3_connection_pointer db] + sqlite3_create_aggregate $DB + execsql { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 SELECT a+2 FROM t1; + INSERT INTO t1 SELECT a+4 FROM t1; + INSERT INTO t1 SELECT a+8 FROM t1; + INSERT INTO t1 SELECT a+16 FROM t1; + INSERT INTO t1 SELECT a+32 FROM t1 ORDER BY a LIMIT 7; + SELECT x_count(*) FROM t1; + } +} {39} +do_test aggerror-1.2 { + execsql { + INSERT INTO t1 VALUES(40); + SELECT x_count(*) FROM t1; + } +} {40} +do_test aggerror-1.3 { + catchsql { + SELECT x_count(a) FROM t1; + } +} {1 {value of 40 handed to x_count}} +ifcapable utf16 { + do_test aggerror-1.4 { + execsql { + UPDATE t1 SET a=41 WHERE a=40 + } + catchsql { + SELECT x_count(a) FROM t1; + } + } {1 abc} +} +do_test aggerror-1.5 { + execsql { + SELECT x_count(*) FROM t1 + } +} 40 +do_test aggerror-1.6 { + execsql { + INSERT INTO t1 VALUES(40); + INSERT INTO t1 VALUES(42); + } + catchsql { + SELECT x_count(*) FROM t1; + } +} {1 {x_count totals to 42}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/all.test b/libraries/sqlite/unix/sqlite-3.5.1/test/all.test new file mode 100644 index 0000000..dbdce76 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/all.test @@ -0,0 +1,149 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all tests. +# +# $Id: all.test,v 1.48 2007/09/01 11:04:28 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +rename finish_test really_finish_test +proc finish_test {} { + # no-op +} + +if {[file exists ./sqlite_test_count]} { + set COUNT [exec cat ./sqlite_test_count] +} else { + set COUNT 3 +} + +if {[llength $argv]>0} { + foreach {name value} $argv { + switch -- $name { + -count { + set COUNT $value + } + -quick { + set ISQUICK $value + } + -soak { + set SOAKTEST $value + } + default { + puts stderr "Unknown option: $name" + exit + } + } + } +} +set argv {} + +# LeakList will hold a list of the number of unfreed mallocs after +# each round of the test. This number should be constant. If it +# grows, it may mean there is a memory leak in the library. +# +set LeakList {} + +set EXCLUDE {} +lappend EXCLUDE all.test ;# This file +lappend EXCLUDE async.test +lappend EXCLUDE crash.test ;# Run seperately later. +lappend EXCLUDE crash2.test ;# Run seperately later. +lappend EXCLUDE autovacuum_crash.test ;# Run seperately later. +lappend EXCLUDE quick.test ;# Alternate test driver script +lappend EXCLUDE malloc.test ;# Run seperately later. +lappend EXCLUDE misuse.test ;# Run seperately later. +lappend EXCLUDE memleak.test ;# Alternate test driver script +lappend EXCLUDE fuzz.test +lappend EXCLUDE soak.test ;# Takes a very long time (default 1 hr) + +# Files to include in the test. If this list is empty then everything +# that is not in the EXCLUDE list is run. +# +set INCLUDE { +} + +# Test files btree2.test and btree4.test don't work if the +# SQLITE_DEFAULT_AUTOVACUUM macro is defined to true (because they depend +# on tables being allocated starting at page 2). +# +ifcapable default_autovacuum { + lappend EXCLUDE btree2.test + lappend EXCLUDE btree4.test +} + +for {set Counter 0} {$Counter<$COUNT && $nErr==0} {incr Counter} { + if {$Counter%2} { + set ::SETUP_SQL {PRAGMA default_synchronous=off;} + } else { + catch {unset ::SETUP_SQL} + } + foreach testfile [lsort -dictionary [glob $testdir/*.test]] { + set tail [file tail $testfile] + if {[lsearch -exact $EXCLUDE $tail]>=0} continue + if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue + source $testfile + catch {db close} + if {$sqlite_open_file_count>0} { + puts "$tail did not close all files: $sqlite_open_file_count" + incr nErr + lappend ::failList $tail + set sqlite_open_file_count 0 + } + } + if {[info exists Leak]} { + lappend LeakList $Leak + } +} + +# Do one last test to look for a memory leak in the library. This will +# only work if SQLite is compiled with the -DSQLITE_DEBUG=1 flag. +# +if {$LeakList!=""} { + puts -nonewline memory-leak-test... + incr ::nTest + foreach x $LeakList { + if {$x!=[lindex $LeakList 0]} { + puts " failed!" + puts "Expected: all values to be the same" + puts " Got: $LeakList" + incr ::nErr + lappend ::failList memory-leak-test + break + } + } + puts " Ok" +} + +# Run the crashtest only on unix and only once. If the library does not +# always create auto-vacuum databases, also run autovacuum_crash.test. +# +if {$::tcl_platform(platform)=="unix"} { + source $testdir/crash.test + source $testdir/crash2.test + ifcapable !default_autovacuum { + source $testdir/autovacuum_crash.test + } +} + +# Run the malloc tests and the misuse test after memory leak detection. +# Both tests leak memory. Currently, misuse.test also leaks a handful of +# file descriptors. This is not considered a problem, but can cause tests +# in malloc.test to fail. So set the open-file count to zero before running +# malloc.test to get around this. +# +catch {source $testdir/misuse.test} +set sqlite_open_file_count 0 +catch {source $testdir/malloc.test} + +catch {db close} +set sqlite_open_file_count 0 +really_finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/alter.test b/libraries/sqlite/unix/sqlite-3.5.1/test/alter.test new file mode 100644 index 0000000..c013cc0 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/alter.test @@ -0,0 +1,756 @@ +# 2004 November 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the ALTER TABLE statement. +# +# $Id: alter.test,v 1.25 2007/05/15 16:51:37 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_OMIT_ALTERTABLE is defined, omit this file. +ifcapable !altertable { + finish_test + return +} + +#---------------------------------------------------------------------- +# Test organization: +# +# alter-1.1.* - alter-1.7.*: Basic tests of ALTER TABLE, including tables +# with implicit and explicit indices. These tests came from an earlier +# fork of SQLite that also supported ALTER TABLE. +# alter-1.8.*: Tests for ALTER TABLE when the table resides in an +# attached database. +# alter-1.9.*: Tests for ALTER TABLE when their is whitespace between the +# table name and left parenthesis token. i.e: +# "CREATE TABLE abc (a, b, c);" +# alter-2.*: Test error conditions and messages. +# alter-3.*: Test ALTER TABLE on tables that have TRIGGERs attached to them. +# alter-4.*: Test ALTER TABLE on tables that have AUTOINCREMENT fields. +# + +# Create some tables to rename. Be sure to include some TEMP tables +# and some tables with odd names. +# +do_test alter-1.1 { + ifcapable tempdb { + set ::temp TEMP + } else { + set ::temp {} + } + execsql [subst -nocommands { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + CREATE TABLE [t1'x1](c UNIQUE, b PRIMARY KEY); + INSERT INTO [t1'x1] VALUES(3,4); + CREATE INDEX t1i1 ON T1(B); + CREATE INDEX t1i2 ON t1(a,b); + CREATE INDEX i3 ON [t1'x1](b,c); + CREATE $::temp TABLE "temp table"(e,f,g UNIQUE); + CREATE INDEX i2 ON [temp table](f); + INSERT INTO [temp table] VALUES(5,6,7); + }] + execsql { + SELECT 't1', * FROM t1; + SELECT 't1''x1', * FROM "t1'x1"; + SELECT * FROM [temp table]; + } +} {t1 1 2 t1'x1 3 4 5 6 7} +do_test alter-1.2 { + execsql [subst { + CREATE $::temp TABLE objlist(type, name, tbl_name); + INSERT INTO objlist SELECT type, name, tbl_name + FROM sqlite_master WHERE NAME!='objlist'; + }] + ifcapable tempdb { + execsql { + INSERT INTO objlist SELECT type, name, tbl_name + FROM sqlite_temp_master WHERE NAME!='objlist'; + } + } + + execsql { + SELECT type, name, tbl_name FROM objlist ORDER BY tbl_name, type desc, name; + } +} [list \ + table t1 t1 \ + index t1i1 t1 \ + index t1i2 t1 \ + table t1'x1 t1'x1 \ + index i3 t1'x1 \ + index {sqlite_autoindex_t1'x1_1} t1'x1 \ + index {sqlite_autoindex_t1'x1_2} t1'x1 \ + table {temp table} {temp table} \ + index i2 {temp table} \ + index {sqlite_autoindex_temp table_1} {temp table} \ + ] + +# Make some changes +# +integrity_check alter-1.3.0 +do_test alter-1.3 { + execsql { + ALTER TABLE [T1] RENAME to [-t1-]; + ALTER TABLE "t1'x1" RENAME TO T2; + ALTER TABLE [temp table] RENAME to TempTab; + } +} {} +integrity_check alter-1.3.1 +do_test alter-1.4 { + execsql { + SELECT 't1', * FROM [-t1-]; + SELECT 't2', * FROM t2; + SELECT * FROM temptab; + } +} {t1 1 2 t2 3 4 5 6 7} +do_test alter-1.5 { + execsql { + DELETE FROM objlist; + INSERT INTO objlist SELECT type, name, tbl_name + FROM sqlite_master WHERE NAME!='objlist'; + } + catchsql { + INSERT INTO objlist SELECT type, name, tbl_name + FROM sqlite_temp_master WHERE NAME!='objlist'; + } + execsql { + SELECT type, name, tbl_name FROM objlist ORDER BY tbl_name, type desc, name; + } +} [list \ + table -t1- -t1- \ + index t1i1 -t1- \ + index t1i2 -t1- \ + table T2 T2 \ + index i3 T2 \ + index {sqlite_autoindex_T2_1} T2 \ + index {sqlite_autoindex_T2_2} T2 \ + table {TempTab} {TempTab} \ + index i2 {TempTab} \ + index {sqlite_autoindex_TempTab_1} {TempTab} \ + ] + +# Make sure the changes persist after restarting the database. +# (The TEMP table will not persist, of course.) +# +ifcapable tempdb { + do_test alter-1.6 { + db close + sqlite3 db test.db + set DB [sqlite3_connection_pointer db] + execsql { + CREATE TEMP TABLE objlist(type, name, tbl_name); + INSERT INTO objlist SELECT type, name, tbl_name FROM sqlite_master; + INSERT INTO objlist + SELECT type, name, tbl_name FROM sqlite_temp_master + WHERE NAME!='objlist'; + SELECT type, name, tbl_name FROM objlist + ORDER BY tbl_name, type desc, name; + } + } [list \ + table -t1- -t1- \ + index t1i1 -t1- \ + index t1i2 -t1- \ + table T2 T2 \ + index i3 T2 \ + index {sqlite_autoindex_T2_1} T2 \ + index {sqlite_autoindex_T2_2} T2 \ + ] +} else { + execsql { + DROP TABLE TempTab; + } +} + +# Make sure the ALTER TABLE statements work with the +# non-callback API +# +do_test alter-1.7 { + stepsql $DB { + ALTER TABLE [-t1-] RENAME to [*t1*]; + ALTER TABLE T2 RENAME TO []; + } + execsql { + DELETE FROM objlist; + INSERT INTO objlist SELECT type, name, tbl_name + FROM sqlite_master WHERE NAME!='objlist'; + } + catchsql { + INSERT INTO objlist SELECT type, name, tbl_name + FROM sqlite_temp_master WHERE NAME!='objlist'; + } + execsql { + SELECT type, name, tbl_name FROM objlist ORDER BY tbl_name, type desc, name; + } +} [list \ + table *t1* *t1* \ + index t1i1 *t1* \ + index t1i2 *t1* \ + table \ + index i3 \ + index {sqlite_autoindex__1} \ + index {sqlite_autoindex__2} \ + ] + +# Check that ALTER TABLE works on attached databases. +# +do_test alter-1.8.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' AS aux; + } +} {} +do_test alter-1.8.2 { + execsql { + CREATE TABLE t4(a PRIMARY KEY, b, c); + CREATE TABLE aux.t4(a PRIMARY KEY, b, c); + CREATE INDEX i4 ON t4(b); + CREATE INDEX aux.i4 ON t4(b); + } +} {} +do_test alter-1.8.3 { + execsql { + INSERT INTO t4 VALUES('main', 'main', 'main'); + INSERT INTO aux.t4 VALUES('aux', 'aux', 'aux'); + SELECT * FROM t4 WHERE a = 'main'; + } +} {main main main} +do_test alter-1.8.4 { + execsql { + ALTER TABLE t4 RENAME TO t5; + SELECT * FROM t4 WHERE a = 'aux'; + } +} {aux aux aux} +do_test alter-1.8.5 { + execsql { + SELECT * FROM t5; + } +} {main main main} +do_test alter-1.8.6 { + execsql { + SELECT * FROM t5 WHERE b = 'main'; + } +} {main main main} +do_test alter-1.8.7 { + execsql { + ALTER TABLE aux.t4 RENAME TO t5; + SELECT * FROM aux.t5 WHERE b = 'aux'; + } +} {aux aux aux} + +do_test alter-1.9.1 { + execsql { + CREATE TABLE tbl1 (a, b, c); + INSERT INTO tbl1 VALUES(1, 2, 3); + } +} {} +do_test alter-1.9.2 { + execsql { + SELECT * FROM tbl1; + } +} {1 2 3} +do_test alter-1.9.3 { + execsql { + ALTER TABLE tbl1 RENAME TO tbl2; + SELECT * FROM tbl2; + } +} {1 2 3} +do_test alter-1.9.4 { + execsql { + DROP TABLE tbl2; + } +} {} + +# Test error messages +# +do_test alter-2.1 { + catchsql { + ALTER TABLE none RENAME TO hi; + } +} {1 {no such table: none}} +do_test alter-2.2 { + execsql { + CREATE TABLE t3(p,q,r); + } + catchsql { + ALTER TABLE [] RENAME TO t3; + } +} {1 {there is already another table or index with this name: t3}} +do_test alter-2.3 { + catchsql { + ALTER TABLE [] RENAME TO i3; + } +} {1 {there is already another table or index with this name: i3}} +do_test alter-2.4 { + catchsql { + ALTER TABLE SqLiTe_master RENAME TO master; + } +} {1 {table sqlite_master may not be altered}} +do_test alter-2.5 { + catchsql { + ALTER TABLE t3 RENAME TO sqlite_t3; + } +} {1 {object name reserved for internal use: sqlite_t3}} +do_test alter-2.6 { + catchsql { + ALTER TABLE t3 ADD COLUMN (ALTER TABLE t3 ADD COLUMN); + } +} {1 {near "(": syntax error}} + +# If this compilation does not include triggers, omit the alter-3.* tests. +ifcapable trigger { + +#----------------------------------------------------------------------- +# Tests alter-3.* test ALTER TABLE on tables that have triggers. +# +# alter-3.1.*: ALTER TABLE with triggers. +# alter-3.2.*: Test that the ON keyword cannot be used as a database, +# table or column name unquoted. This is done because part of the +# ALTER TABLE code (specifically the implementation of SQL function +# "sqlite_alter_trigger") will break in this case. +# alter-3.3.*: ALTER TABLE with TEMP triggers (todo). +# + +# An SQL user-function for triggers to fire, so that we know they +# are working. +proc trigfunc {args} { + set ::TRIGGER $args +} +db func trigfunc trigfunc + +do_test alter-3.1.0 { + execsql { + CREATE TABLE t6(a, b, c); + CREATE TRIGGER trig1 AFTER INSERT ON t6 BEGIN + SELECT trigfunc('trig1', new.a, new.b, new.c); + END; + } +} {} +do_test alter-3.1.1 { + execsql { + INSERT INTO t6 VALUES(1, 2, 3); + } + set ::TRIGGER +} {trig1 1 2 3} +do_test alter-3.1.2 { + execsql { + ALTER TABLE t6 RENAME TO t7; + INSERT INTO t7 VALUES(4, 5, 6); + } + set ::TRIGGER +} {trig1 4 5 6} +do_test alter-3.1.3 { + execsql { + DROP TRIGGER trig1; + } +} {} +do_test alter-3.1.4 { + execsql { + CREATE TRIGGER trig2 AFTER INSERT ON main.t7 BEGIN + SELECT trigfunc('trig2', new.a, new.b, new.c); + END; + INSERT INTO t7 VALUES(1, 2, 3); + } + set ::TRIGGER +} {trig2 1 2 3} +do_test alter-3.1.5 { + execsql { + ALTER TABLE t7 RENAME TO t8; + INSERT INTO t8 VALUES(4, 5, 6); + } + set ::TRIGGER +} {trig2 4 5 6} +do_test alter-3.1.6 { + execsql { + DROP TRIGGER trig2; + } +} {} +do_test alter-3.1.7 { + execsql { + CREATE TRIGGER trig3 AFTER INSERT ON main.'t8'BEGIN + SELECT trigfunc('trig3', new.a, new.b, new.c); + END; + INSERT INTO t8 VALUES(1, 2, 3); + } + set ::TRIGGER +} {trig3 1 2 3} +do_test alter-3.1.8 { + execsql { + ALTER TABLE t8 RENAME TO t9; + INSERT INTO t9 VALUES(4, 5, 6); + } + set ::TRIGGER +} {trig3 4 5 6} + +# Make sure "ON" cannot be used as a database, table or column name without +# quoting. Otherwise the sqlite_alter_trigger() function might not work. +file delete -force test3.db +file delete -force test3.db-journal +do_test alter-3.2.1 { + catchsql { + ATTACH 'test3.db' AS ON; + } +} {1 {near "ON": syntax error}} +do_test alter-3.2.2 { + catchsql { + ATTACH 'test3.db' AS 'ON'; + } +} {0 {}} +do_test alter-3.2.3 { + catchsql { + CREATE TABLE ON.t1(a, b, c); + } +} {1 {near "ON": syntax error}} +do_test alter-3.2.4 { + catchsql { + CREATE TABLE 'ON'.t1(a, b, c); + } +} {0 {}} +do_test alter-3.2.4 { + catchsql { + CREATE TABLE 'ON'.ON(a, b, c); + } +} {1 {near "ON": syntax error}} +do_test alter-3.2.5 { + catchsql { + CREATE TABLE 'ON'.'ON'(a, b, c); + } +} {0 {}} +do_test alter-3.2.6 { + catchsql { + CREATE TABLE t10(a, ON, c); + } +} {1 {near "ON": syntax error}} +do_test alter-3.2.7 { + catchsql { + CREATE TABLE t10(a, 'ON', c); + } +} {0 {}} +do_test alter-3.2.8 { + catchsql { + CREATE TRIGGER trig4 AFTER INSERT ON ON BEGIN SELECT 1; END; + } +} {1 {near "ON": syntax error}} +do_test alter-3.2.9 { + catchsql { + CREATE TRIGGER 'on'.trig4 AFTER INSERT ON 'ON' BEGIN SELECT 1; END; + } +} {0 {}} +do_test alter-3.2.10 { + execsql { + DROP TABLE t10; + } +} {} + +do_test alter-3.3.1 { + execsql [subst { + CREATE TABLE tbl1(a, b, c); + CREATE $::temp TRIGGER trig1 AFTER INSERT ON tbl1 BEGIN + SELECT trigfunc('trig1', new.a, new.b, new.c); + END; + }] +} {} +do_test alter-3.3.2 { + execsql { + INSERT INTO tbl1 VALUES('a', 'b', 'c'); + } + set ::TRIGGER +} {trig1 a b c} +do_test alter-3.3.3 { + execsql { + ALTER TABLE tbl1 RENAME TO tbl2; + INSERT INTO tbl2 VALUES('d', 'e', 'f'); + } + set ::TRIGGER +} {trig1 d e f} +do_test alter-3.3.4 { + execsql [subst { + CREATE $::temp TRIGGER trig2 AFTER UPDATE ON tbl2 BEGIN + SELECT trigfunc('trig2', new.a, new.b, new.c); + END; + }] +} {} +do_test alter-3.3.5 { + execsql { + ALTER TABLE tbl2 RENAME TO tbl3; + INSERT INTO tbl3 VALUES('g', 'h', 'i'); + } + set ::TRIGGER +} {trig1 g h i} +do_test alter-3.3.6 { + execsql { + UPDATE tbl3 SET a = 'G' where a = 'g'; + } + set ::TRIGGER +} {trig2 G h i} +do_test alter-3.3.7 { + execsql { + DROP TABLE tbl3; + } +} {} +ifcapable tempdb { + do_test alter-3.3.8 { + execsql { + SELECT * FROM sqlite_temp_master WHERE type = 'trigger'; + } + } {} +} + +} ;# ifcapable trigger + +# If the build does not include AUTOINCREMENT fields, omit alter-4.*. +ifcapable autoinc { + +do_test alter-4.1 { + execsql { + CREATE TABLE tbl1(a INTEGER PRIMARY KEY AUTOINCREMENT); + INSERT INTO tbl1 VALUES(10); + } +} {} +do_test alter-4.2 { + execsql { + INSERT INTO tbl1 VALUES(NULL); + SELECT a FROM tbl1; + } +} {10 11} +do_test alter-4.3 { + execsql { + ALTER TABLE tbl1 RENAME TO tbl2; + DELETE FROM tbl2; + INSERT INTO tbl2 VALUES(NULL); + SELECT a FROM tbl2; + } +} {12} +do_test alter-4.4 { + execsql { + DROP TABLE tbl2; + } +} {} + +} ;# ifcapable autoinc + +# Test that it is Ok to execute an ALTER TABLE immediately after +# opening a database. +do_test alter-5.1 { + execsql { + CREATE TABLE tbl1(a, b, c); + INSERT INTO tbl1 VALUES('x', 'y', 'z'); + } +} {} +do_test alter-5.2 { + sqlite3 db2 test.db + execsql { + ALTER TABLE tbl1 RENAME TO tbl2; + SELECT * FROM tbl2; + } db2 +} {x y z} +do_test alter-5.3 { + db2 close +} {} + +foreach tblname [execsql { + SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite%' +}] { + execsql "DROP TABLE \"$tblname\"" +} + +set ::tbl_name "abc\uABCDdef" +do_test alter-6.1 { + string length $::tbl_name +} {7} +do_test alter-6.2 { + execsql " + CREATE TABLE ${tbl_name}(a, b, c); + " + set ::oid [execsql {SELECT max(oid) FROM sqlite_master}] + execsql " + SELECT sql FROM sqlite_master WHERE oid = $::oid; + " +} "{CREATE TABLE ${::tbl_name}(a, b, c)}" +execsql " + SELECT * FROM ${::tbl_name} +" +set ::tbl_name2 "abcXdef" +do_test alter-6.3 { + execsql " + ALTER TABLE $::tbl_name RENAME TO $::tbl_name2 + " + execsql " + SELECT sql FROM sqlite_master WHERE oid = $::oid + " +} "{CREATE TABLE '${::tbl_name2}'(a, b, c)}" +do_test alter-6.4 { + execsql " + ALTER TABLE $::tbl_name2 RENAME TO $::tbl_name + " + execsql " + SELECT sql FROM sqlite_master WHERE oid = $::oid + " +} "{CREATE TABLE '${::tbl_name}'(a, b, c)}" +set ::col_name ghi\1234\jkl +do_test alter-6.5 { + execsql " + ALTER TABLE $::tbl_name ADD COLUMN $::col_name VARCHAR + " + execsql " + SELECT sql FROM sqlite_master WHERE oid = $::oid + " +} "{CREATE TABLE '${::tbl_name}'(a, b, c, $::col_name VARCHAR)}" +set ::col_name2 B\3421\A +do_test alter-6.6 { + db close + sqlite3 db test.db + execsql " + ALTER TABLE $::tbl_name ADD COLUMN $::col_name2 + " + execsql " + SELECT sql FROM sqlite_master WHERE oid = $::oid + " +} "{CREATE TABLE '${::tbl_name}'(a, b, c, $::col_name VARCHAR, $::col_name2)}" +do_test alter-6.7 { + execsql " + INSERT INTO ${::tbl_name} VALUES(1, 2, 3, 4, 5); + SELECT $::col_name, $::col_name2 FROM $::tbl_name; + " +} {4 5} + +# Ticket #1665: Make sure ALTER TABLE ADD COLUMN works on a table +# that includes a COLLATE clause. +# +do_test alter-7.1 { + execsql { + CREATE TABLE t1(a TEXT COLLATE BINARY); + ALTER TABLE t1 ADD COLUMN b INTEGER COLLATE NOCASE; + INSERT INTO t1 VALUES(1,'-2'); + INSERT INTO t1 VALUES(5.4e-8,'5.4e-8'); + SELECT typeof(a), a, typeof(b), b FROM t1; + } +} {text 1 integer -2 text 5.4e-8 real 5.4e-08} + +# Make sure that when a column is added by ALTER TABLE ADD COLUMN and has +# a default value that the default value is used by aggregate functions. +# +do_test alter-8.1 { + execsql { + CREATE TABLE t2(a INTEGER); + INSERT INTO t2 VALUES(1); + INSERT INTO t2 VALUES(1); + INSERT INTO t2 VALUES(2); + ALTER TABLE t2 ADD COLUMN b INTEGER DEFAULT 9; + SELECT sum(b) FROM t2; + } +} {27} +do_test alter-8.2 { + execsql { + SELECT a, sum(b) FROM t2 GROUP BY a; + } +} {1 18 2 9} + +#-------------------------------------------------------------------------- +# alter-9.X - Special test: Make sure the sqlite_rename_trigger() and +# rename_table() functions do not crash when handed bad input. +# +ifcapable trigger { + do_test alter-9.1 { + execsql {SELECT SQLITE_RENAME_TRIGGER(0,0)} + } {{}} +} +do_test alter-9.2 { + execsql { + SELECT SQLITE_RENAME_TABLE(0,0); + SELECT SQLITE_RENAME_TABLE(10,20); + SELECT SQLITE_RENAME_TABLE("foo", "foo"); + } +} {{} {} {}} + +#------------------------------------------------------------------------ +# alter-10.X - Make sure ALTER TABLE works with multi-byte UTF-8 characters +# in the names. +# +do_test alter-10.1 { + execsql "CREATE TABLE xyz(x UNIQUE)" + execsql "ALTER TABLE xyz RENAME TO xyz\u1234abc" + execsql {SELECT name FROM sqlite_master WHERE name LIKE 'xyz%'} +} [list xyz\u1234abc] +do_test alter-10.2 { + execsql {SELECT name FROM sqlite_master WHERE name LIKE 'sqlite_autoindex%'} +} [list sqlite_autoindex_xyz\u1234abc_1] +do_test alter-10.3 { + execsql "ALTER TABLE xyz\u1234abc RENAME TO xyzabc" + execsql {SELECT name FROM sqlite_master WHERE name LIKE 'xyz%'} +} [list xyzabc] +do_test alter-10.4 { + execsql {SELECT name FROM sqlite_master WHERE name LIKE 'sqlite_autoindex%'} +} [list sqlite_autoindex_xyzabc_1] + +do_test alter-11.1 { + sqlite3_exec db {CREATE TABLE t11(%c6%c6)} + execsql { + ALTER TABLE t11 ADD COLUMN abc; + } + catchsql { + ALTER TABLE t11 ADD COLUMN abc; + } +} {1 {duplicate column name: abc}} +set isutf16 [regexp 16 [db one {PRAGMA encoding}]] +if {!$isutf16} { + do_test alter-11.2 { + execsql {INSERT INTO t11 VALUES(1,2)} + sqlite3_exec db {SELECT %c6%c6 AS xyz, abc FROM t11} + } {0 {xyz abc 1 2}} +} +do_test alter-11.3 { + sqlite3_exec db {CREATE TABLE t11b("%81%82%83" text)} + execsql { + ALTER TABLE t11b ADD COLUMN abc; + } + catchsql { + ALTER TABLE t11b ADD COLUMN abc; + } +} {1 {duplicate column name: abc}} +if {!$isutf16} { + do_test alter-11.4 { + execsql {INSERT INTO t11b VALUES(3,4)} + sqlite3_exec db {SELECT %81%82%83 AS xyz, abc FROM t11b} + } {0 {xyz abc 3 4}} + do_test alter-11.5 { + sqlite3_exec db {SELECT [%81%82%83] AS xyz, abc FROM t11b} + } {0 {xyz abc 3 4}} + do_test alter-11.6 { + sqlite3_exec db {SELECT "%81%82%83" AS xyz, abc FROM t11b} + } {0 {xyz abc 3 4}} +} +do_test alter-11.7 { + sqlite3_exec db {CREATE TABLE t11c(%81%82%83 text)} + execsql { + ALTER TABLE t11c ADD COLUMN abc; + } + catchsql { + ALTER TABLE t11c ADD COLUMN abc; + } +} {1 {duplicate column name: abc}} +if {!$isutf16} { + do_test alter-11.8 { + execsql {INSERT INTO t11c VALUES(5,6)} + sqlite3_exec db {SELECT %81%82%83 AS xyz, abc FROM t11c} + } {0 {xyz abc 5 6}} + do_test alter-11.9 { + sqlite3_exec db {SELECT [%81%82%83] AS xyz, abc FROM t11c} + } {0 {xyz abc 5 6}} + do_test alter-11.10 { + sqlite3_exec db {SELECT "%81%82%83" AS xyz, abc FROM t11c} + } {0 {xyz abc 5 6}} +} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/alter2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/alter2.test new file mode 100644 index 0000000..9c73992 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/alter2.test @@ -0,0 +1,440 @@ +# 2005 February 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing that SQLite can handle a subtle +# file format change that may be used in the future to implement +# "ALTER TABLE ... ADD COLUMN". +# +# $Id: alter2.test,v 1.9 2007/09/12 17:01:45 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# We have to have pragmas in order to do this test +ifcapable {!pragma} return + +# These tests do not work if there is a codec. The +# btree_open command does not know how to handle codecs. +# +#if {[catch {sqlite3 -has_codec} r] || $r} return + +# The file format change affects the way row-records stored in tables (but +# not indices) are interpreted. Before version 3.1.3, a row-record for a +# table with N columns was guaranteed to contain exactly N fields. As +# of version 3.1.3, the record may contain up to N fields. In this case +# the M fields that are present are the values for the left-most M +# columns. The (N-M) rightmost columns contain NULL. +# +# If any records in the database contain less fields than their table +# has columns, then the file-format meta value should be set to (at least) 2. +# + +# This procedure sets the value of the file-format in file 'test.db' +# to $newval. Also, the schema cookie is incremented. +# +proc set_file_format {newval} { + set bt [btree_open test.db 10 0] + btree_begin_transaction $bt + set meta [btree_get_meta $bt] + lset meta 2 $newval ;# File format + lset meta 1 [expr [lindex $meta 1]+1] ;# Schema cookie + eval "btree_update_meta $bt $meta" + btree_commit $bt + btree_close $bt +} + +# This procedure returns the value of the file-format in file 'test.db'. +# +proc get_file_format {{fname test.db}} { + set bt [btree_open $fname 10 0] + set meta [btree_get_meta $bt] + btree_close $bt + lindex $meta 2 +} + +# This procedure sets the SQL statement stored for table $tbl in the +# sqlite_master table of file 'test.db' to $sql. Also set the file format +# to the supplied value. This is 2 if the added column has a default that is +# NULL, or 3 otherwise. +# +proc alter_table {tbl sql {file_format 2}} { + sqlite3 dbat test.db + set s [string map {' ''} $sql] + set t [string map {' ''} $tbl] + dbat eval [subst { + PRAGMA writable_schema = 1; + UPDATE sqlite_master SET sql = '$s' WHERE name = '$t' AND type = 'table'; + PRAGMA writable_schema = 0; + }] + dbat close + set_file_format 2 +} + +#----------------------------------------------------------------------- +# Some basic tests to make sure short rows are handled. +# +do_test alter2-1.1 { + execsql { + CREATE TABLE abc(a, b); + INSERT INTO abc VALUES(1, 2); + INSERT INTO abc VALUES(3, 4); + INSERT INTO abc VALUES(5, 6); + } +} {} +do_test alter2-1.2 { + # ALTER TABLE abc ADD COLUMN c; + alter_table abc {CREATE TABLE abc(a, b, c);} +} {} +do_test alter2-1.3 { + execsql { + SELECT * FROM abc; + } +} {1 2 {} 3 4 {} 5 6 {}} +do_test alter2-1.4 { + execsql { + UPDATE abc SET c = 10 WHERE a = 1; + SELECT * FROM abc; + } +} {1 2 10 3 4 {} 5 6 {}} +do_test alter2-1.5 { + execsql { + CREATE INDEX abc_i ON abc(c); + } +} {} +do_test alter2-1.6 { + execsql { + SELECT c FROM abc ORDER BY c; + } +} {{} {} 10} +do_test alter2-1.7 { + execsql { + SELECT * FROM abc WHERE c = 10; + } +} {1 2 10} +do_test alter2-1.8 { + execsql { + SELECT sum(a), c FROM abc GROUP BY c; + } +} {8 {} 1 10} +do_test alter2-1.9 { + # ALTER TABLE abc ADD COLUMN d; + alter_table abc {CREATE TABLE abc(a, b, c, d);} + execsql { SELECT * FROM abc; } + execsql { + UPDATE abc SET d = 11 WHERE c IS NULL AND a<4; + SELECT * FROM abc; + } +} {1 2 10 {} 3 4 {} 11 5 6 {} {}} +do_test alter2-1.10 { + execsql { + SELECT typeof(d) FROM abc; + } +} {null integer null} +do_test alter2-1.99 { + execsql { + DROP TABLE abc; + } +} {} + +#----------------------------------------------------------------------- +# Test that views work when the underlying table structure is changed. +# +ifcapable view { + do_test alter2-2.1 { + execsql { + CREATE TABLE abc2(a, b, c); + INSERT INTO abc2 VALUES(1, 2, 10); + INSERT INTO abc2 VALUES(3, 4, NULL); + INSERT INTO abc2 VALUES(5, 6, NULL); + CREATE VIEW abc2_v AS SELECT * FROM abc2; + SELECT * FROM abc2_v; + } + } {1 2 10 3 4 {} 5 6 {}} + do_test alter2-2.2 { + # ALTER TABLE abc ADD COLUMN d; + alter_table abc2 {CREATE TABLE abc2(a, b, c, d);} + execsql { + SELECT * FROM abc2_v; + } + } {1 2 10 {} 3 4 {} {} 5 6 {} {}} + do_test alter2-2.3 { + execsql { + DROP TABLE abc2; + DROP VIEW abc2_v; + } + } {} +} + +#----------------------------------------------------------------------- +# Test that triggers work when a short row is copied to the old.* +# trigger pseudo-table. +# +ifcapable trigger { + do_test alter2-3.1 { + execsql { + CREATE TABLE abc3(a, b); + CREATE TABLE blog(o, n); + CREATE TRIGGER abc3_t AFTER UPDATE OF b ON abc3 BEGIN + INSERT INTO blog VALUES(old.b, new.b); + END; + } + } {} + do_test alter2-3.2 { + execsql { + INSERT INTO abc3 VALUES(1, 4); + UPDATE abc3 SET b = 2 WHERE b = 4; + SELECT * FROM blog; + } + } {4 2} + do_test alter2-3.3 { + execsql { + INSERT INTO abc3 VALUES(3, 4); + INSERT INTO abc3 VALUES(5, 6); + } + alter_table abc3 {CREATE TABLE abc3(a, b, c);} + execsql { + SELECT * FROM abc3; + } + } {1 2 {} 3 4 {} 5 6 {}} + do_test alter2-3.4 { + execsql { + UPDATE abc3 SET b = b*2 WHERE a<4; + SELECT * FROM abc3; + } + } {1 4 {} 3 8 {} 5 6 {}} + do_test alter2-3.5 { + execsql { + SELECT * FROM blog; + } + } {4 2 2 4 4 8} + + do_test alter2-3.6 { + execsql { + CREATE TABLE clog(o, n); + CREATE TRIGGER abc3_t2 AFTER UPDATE OF c ON abc3 BEGIN + INSERT INTO clog VALUES(old.c, new.c); + END; + UPDATE abc3 SET c = a*2; + SELECT * FROM clog; + } + } {{} 2 {} 6 {} 10} +} + +#--------------------------------------------------------------------- +# Check that an error occurs if the database is upgraded to a file +# format that SQLite does not support (in this case 5). Note: The +# file format is checked each time the schema is read, so changing the +# file format requires incrementing the schema cookie. +# +do_test alter2-4.1 { + set_file_format 5 +} {} +do_test alter2-4.2 { + catchsql { + SELECT * FROM sqlite_master; + } +} {1 {unsupported file format}} +do_test alter2-4.3 { + sqlite3_errcode $::DB +} {SQLITE_ERROR} +do_test alter2-4.4 { + set ::DB [sqlite3_connection_pointer db] + catchsql { + SELECT * FROM sqlite_master; + } +} {1 {unsupported file format}} +do_test alter2-4.5 { + sqlite3_errcode $::DB +} {SQLITE_ERROR} + +#--------------------------------------------------------------------- +# Check that executing VACUUM on a file with file-format version 2 +# resets the file format to 1. +# +set default_file_format [expr $SQLITE_DEFAULT_FILE_FORMAT==4 ? 4 : 1] +ifcapable vacuum { + do_test alter2-5.1 { + set_file_format 2 + get_file_format + } {2} + do_test alter2-5.2 { + execsql { + VACUUM; + } + } {} + do_test alter2-5.3 { + get_file_format + } $default_file_format +} + +#--------------------------------------------------------------------- +# Test that when a database with file-format 2 is opened, new +# databases are still created with file-format 1. +# +do_test alter2-6.1 { + db close + set_file_format 2 + sqlite3 db test.db + set ::DB [sqlite3_connection_pointer db] + get_file_format +} {2} +do_test alter2-6.2 { + file delete -force test2.db-journal + file delete -force test2.db + execsql { + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t1(a, b); + } + get_file_format test2.db +} $default_file_format +do_test alter2-6.3 { + execsql { + CREATE TABLE t1(a, b); + } + get_file_format +} {2} + +#--------------------------------------------------------------------- +# Test that types and values for columns added with default values +# other than NULL work with SELECT statements. +# +do_test alter2-7.1 { + execsql { + DROP TABLE t1; + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + SELECT * FROM t1; + } +} {1 2 3 4} +do_test alter2-7.2 { + set sql {CREATE TABLE t1(a, b DEFAULT '123', c INTEGER DEFAULT '123')} + alter_table t1 $sql 3 + execsql { + SELECT * FROM t1 LIMIT 1; + } +} {1 123 123} +do_test alter2-7.3 { + execsql { + SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1; + } +} {1 integer 123 text 123 integer} +do_test alter2-7.4 { + execsql { + SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1; + } +} {1 integer 123 text 123 integer} +do_test alter2-7.5 { + set sql {CREATE TABLE t1(a, b DEFAULT -123.0, c VARCHAR(10) default 5)} + alter_table t1 $sql 3 + execsql { + SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1; + } +} {1 integer -123 integer 5 text} + +#----------------------------------------------------------------------- +# Test that UPDATE trigger tables work with default values, and that when +# a row is updated the default values are correctly transfered to the +# new row. +# +ifcapable trigger { +db function set_val {set ::val} + do_test alter2-8.1 { + execsql { + CREATE TRIGGER trig1 BEFORE UPDATE ON t1 BEGIN + SELECT set_val( + old.b||' '||typeof(old.b)||' '||old.c||' '||typeof(old.c)||' '|| + new.b||' '||typeof(new.b)||' '||new.c||' '||typeof(new.c) + ); + END; + } + list + } {} +} +do_test alter2-8.2 { + execsql { + UPDATE t1 SET c = 10 WHERE a = 1; + SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1; + } +} {1 integer -123 integer 10 text} +ifcapable trigger { + do_test alter2-8.3 { + set ::val + } {-123 integer 5 text -123 integer 10 text} +} + +#----------------------------------------------------------------------- +# Test that DELETE trigger tables work with default values, and that when +# a row is updated the default values are correctly transfered to the +# new row. +# +ifcapable trigger { + do_test alter2-9.1 { + execsql { + CREATE TRIGGER trig2 BEFORE DELETE ON t1 BEGIN + SELECT set_val( + old.b||' '||typeof(old.b)||' '||old.c||' '||typeof(old.c) + ); + END; + } + list + } {} + do_test alter2-9.2 { + execsql { + DELETE FROM t1 WHERE a = 2; + } + set ::val + } {-123 integer 5 text} +} + +#----------------------------------------------------------------------- +# Test creating an index on a column added with a default value. +# +ifcapable bloblit { + do_test alter2-10.1 { + execsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES('a'); + INSERT INTO t2 VALUES('b'); + INSERT INTO t2 VALUES('c'); + INSERT INTO t2 VALUES('d'); + } + alter_table t2 {CREATE TABLE t2(a, b DEFAULT X'ABCD', c DEFAULT NULL);} 3 + catchsql { + SELECT * FROM sqlite_master; + } + execsql { + SELECT quote(a), quote(b), quote(c) FROM t2 LIMIT 1; + } + } {'a' X'ABCD' NULL} + do_test alter2-10.2 { + execsql { + CREATE INDEX i1 ON t2(b); + SELECT a FROM t2 WHERE b = X'ABCD'; + } + } {a b c d} + do_test alter2-10.3 { + execsql { + DELETE FROM t2 WHERE a = 'c'; + SELECT a FROM t2 WHERE b = X'ABCD'; + } + } {a b d} + do_test alter2-10.4 { + execsql { + SELECT count(b) FROM t2 WHERE b = X'ABCD'; + } + } {3} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/alter3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/alter3.test new file mode 100644 index 0000000..a9aa02e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/alter3.test @@ -0,0 +1,396 @@ +# 2005 February 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing that SQLite can handle a subtle +# file format change that may be used in the future to implement +# "ALTER TABLE ... ADD COLUMN". +# +# $Id: alter3.test,v 1.9 2006/01/17 09:35:02 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl + +# If SQLITE_OMIT_ALTERTABLE is defined, omit this file. +ifcapable !altertable { + finish_test + return +} + +# Determine if there is a codec available on this test. +# +if {[catch {sqlite3 -has_codec} r] || $r} { + set has_codec 1 +} else { + set has_codec 0 +} + + +# Test Organisation: +# ------------------ +# +# alter3-1.*: Test that ALTER TABLE correctly modifies the CREATE TABLE sql. +# alter3-2.*: Test error messages. +# alter3-3.*: Test adding columns with default value NULL. +# alter3-4.*: Test adding columns with default values other than NULL. +# alter3-5.*: Test adding columns to tables in ATTACHed databases. +# alter3-6.*: Test that temp triggers are not accidentally dropped. +# alter3-7.*: Test that VACUUM resets the file-format. +# + +# This procedure returns the value of the file-format in file 'test.db'. +# +proc get_file_format {{fname test.db}} { + set bt [btree_open $fname 10 0] + set meta [btree_get_meta $bt] + btree_close $bt + lindex $meta 2 +} + +do_test alter3-1.1 { + execsql { + CREATE TABLE abc(a, b, c); + SELECT sql FROM sqlite_master; + } +} {{CREATE TABLE abc(a, b, c)}} +do_test alter3-1.2 { + execsql {ALTER TABLE abc ADD d INTEGER;} + execsql { + SELECT sql FROM sqlite_master; + } +} {{CREATE TABLE abc(a, b, c, d INTEGER)}} +do_test alter3-1.3 { + execsql {ALTER TABLE abc ADD e} + execsql { + SELECT sql FROM sqlite_master; + } +} {{CREATE TABLE abc(a, b, c, d INTEGER, e)}} +do_test alter3-1.4 { + execsql { + CREATE TABLE main.t1(a, b); + ALTER TABLE t1 ADD c; + SELECT sql FROM sqlite_master WHERE tbl_name = 't1'; + } +} {{CREATE TABLE t1(a, b, c)}} +do_test alter3-1.5 { + execsql { + ALTER TABLE t1 ADD d CHECK (a>d); + SELECT sql FROM sqlite_master WHERE tbl_name = 't1'; + } +} {{CREATE TABLE t1(a, b, c, d CHECK (a>d))}} +ifcapable foreignkey { + do_test alter3-1.6 { + execsql { + CREATE TABLE t2(a, b, UNIQUE(a, b)); + ALTER TABLE t2 ADD c REFERENCES t1(c) ; + SELECT sql FROM sqlite_master WHERE tbl_name = 't2' AND type = 'table'; + } + } {{CREATE TABLE t2(a, b, c REFERENCES t1(c), UNIQUE(a, b))}} +} +do_test alter3-1.7 { + execsql { + CREATE TABLE t3(a, b, UNIQUE(a, b)); + ALTER TABLE t3 ADD COLUMN c VARCHAR(10, 20); + SELECT sql FROM sqlite_master WHERE tbl_name = 't3' AND type = 'table'; + } +} {{CREATE TABLE t3(a, b, c VARCHAR(10, 20), UNIQUE(a, b))}} +do_test alter3-1.99 { + catchsql { + # May not exist if foriegn-keys are omitted at compile time. + DROP TABLE t2; + } + execsql { + DROP TABLE abc; + DROP TABLE t1; + DROP TABLE t3; + } +} {} + +do_test alter3-2.1 { + execsql { + CREATE TABLE t1(a, b); + } + catchsql { + ALTER TABLE t1 ADD c PRIMARY KEY; + } +} {1 {Cannot add a PRIMARY KEY column}} +do_test alter3-2.2 { + catchsql { + ALTER TABLE t1 ADD c UNIQUE + } +} {1 {Cannot add a UNIQUE column}} +do_test alter3-2.3 { + catchsql { + ALTER TABLE t1 ADD b VARCHAR(10) + } +} {1 {duplicate column name: b}} +do_test alter3-2.3 { + catchsql { + ALTER TABLE t1 ADD c NOT NULL; + } +} {1 {Cannot add a NOT NULL column with default value NULL}} +do_test alter3-2.4 { + catchsql { + ALTER TABLE t1 ADD c NOT NULL DEFAULT 10; + } +} {0 {}} +ifcapable view { + do_test alter3-2.5 { + execsql { + CREATE VIEW v1 AS SELECT * FROM t1; + } + catchsql { + alter table v1 add column d; + } + } {1 {Cannot add a column to a view}} +} +do_test alter3-2.6 { + catchsql { + alter table t1 add column d DEFAULT CURRENT_TIME; + } +} {1 {Cannot add a column with non-constant default}} +do_test alter3-2.99 { + execsql { + DROP TABLE t1; + } +} {} + +do_test alter3-3.1 { + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 100); + INSERT INTO t1 VALUES(2, 300); + SELECT * FROM t1; + } +} {1 100 2 300} +do_test alter3-3.1 { + execsql { + PRAGMA schema_version = 10; + } +} {} +do_test alter3-3.2 { + execsql { + ALTER TABLE t1 ADD c; + SELECT * FROM t1; + } +} {1 100 {} 2 300 {}} +if {!$has_codec} { + do_test alter3-3.3 { + get_file_format + } {3} +} +ifcapable schema_version { + do_test alter3-3.4 { + execsql { + PRAGMA schema_version; + } + } {11} +} + +do_test alter3-4.1 { + db close + file delete -force test.db + set ::DB [sqlite3 db test.db] + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 100); + INSERT INTO t1 VALUES(2, 300); + SELECT * FROM t1; + } +} {1 100 2 300} +do_test alter3-4.1 { + execsql { + PRAGMA schema_version = 20; + } +} {} +do_test alter3-4.2 { + execsql { + ALTER TABLE t1 ADD c DEFAULT 'hello world'; + SELECT * FROM t1; + } +} {1 100 {hello world} 2 300 {hello world}} +if {!$has_codec} { + do_test alter3-4.3 { + get_file_format + } {3} +} +ifcapable schema_version { + do_test alter3-4.4 { + execsql { + PRAGMA schema_version; + } + } {21} +} +do_test alter3-4.99 { + execsql { + DROP TABLE t1; + } +} {} + +do_test alter3-5.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t1 AS SELECT * FROM t1; + PRAGMA aux.schema_version = 30; + SELECT sql FROM aux.sqlite_master; + } +} {{CREATE TABLE t1(a,b)}} +do_test alter3-5.2 { + execsql { + ALTER TABLE aux.t1 ADD COLUMN c VARCHAR(128); + SELECT sql FROM aux.sqlite_master; + } +} {{CREATE TABLE t1(a,b, c VARCHAR(128))}} +do_test alter3-5.3 { + execsql { + SELECT * FROM aux.t1; + } +} {1 one {} 2 two {}} +ifcapable schema_version { + do_test alter3-5.4 { + execsql { + PRAGMA aux.schema_version; + } + } {31} +} +if {!$has_codec} { + do_test alter3-5.5 { + list [get_file_format test2.db] [get_file_format] + } {2 3} +} +do_test alter3-5.6 { + execsql { + ALTER TABLE aux.t1 ADD COLUMN d DEFAULT 1000; + SELECT sql FROM aux.sqlite_master; + } +} {{CREATE TABLE t1(a,b, c VARCHAR(128), d DEFAULT 1000)}} +do_test alter3-5.7 { + execsql { + SELECT * FROM aux.t1; + } +} {1 one {} 1000 2 two {} 1000} +ifcapable schema_version { + do_test alter3-5.8 { + execsql { + PRAGMA aux.schema_version; + } + } {32} +} +do_test alter3-5.9 { + execsql { + SELECT * FROM t1; + } +} {1 one 2 two} +do_test alter3-5.99 { + execsql { + DROP TABLE aux.t1; + DROP TABLE t1; + } +} {} + +#---------------------------------------------------------------- +# Test that the table schema is correctly reloaded when a column +# is added to a table. +# +ifcapable trigger&&tempdb { + do_test alter3-6.1 { + execsql { + CREATE TABLE t1(a, b); + CREATE TABLE log(trig, a, b); + + CREATE TRIGGER t1_a AFTER INSERT ON t1 BEGIN + INSERT INTO log VALUES('a', new.a, new.b); + END; + CREATE TEMP TRIGGER t1_b AFTER INSERT ON t1 BEGIN + INSERT INTO log VALUES('b', new.a, new.b); + END; + + INSERT INTO t1 VALUES(1, 2); + SELECT * FROM log; + } + } {b 1 2 a 1 2} + do_test alter3-6.2 { + execsql { + ALTER TABLE t1 ADD COLUMN c DEFAULT 'c'; + INSERT INTO t1(a, b) VALUES(3, 4); + SELECT * FROM log; + } + } {b 1 2 a 1 2 b 3 4 a 3 4} +} + +if {!$has_codec} { + ifcapable vacuum { + do_test alter3-7.1 { + execsql { + VACUUM; + } + get_file_format + } {1} + do_test alter3-7.2 { + execsql { + CREATE TABLE abc(a, b, c); + ALTER TABLE abc ADD d DEFAULT NULL; + } + get_file_format + } {2} + do_test alter3-7.3 { + execsql { + ALTER TABLE abc ADD e DEFAULT 10; + } + get_file_format + } {3} + do_test alter3-7.4 { + execsql { + ALTER TABLE abc ADD f DEFAULT NULL; + } + get_file_format + } {3} + do_test alter3-7.5 { + execsql { + VACUUM; + } + get_file_format + } {1} + } +} + +# Ticket #1183 - Make sure adding columns to large tables does not cause +# memory corruption (as was the case before this bug was fixed). +do_test alter3-8.1 { + execsql { + CREATE TABLE t4(c1); + } +} {} +set ::sql "" +do_test alter3-8.2 { + set cols c1 + for {set i 2} {$i < 100} {incr i} { + execsql " + ALTER TABLE t4 ADD c$i + " + lappend cols c$i + } + set ::sql "CREATE TABLE t4([join $cols {, }])" + list +} {} +do_test alter3-8.2 { + execsql { + SELECT sql FROM sqlite_master WHERE name = 't4'; + } +} [list $::sql] + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/altermalloc.test b/libraries/sqlite/unix/sqlite-3.5.1/test/altermalloc.test new file mode 100644 index 0000000..8c6c8d8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/altermalloc.test @@ -0,0 +1,68 @@ +# 2005 September 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the ALTER TABLE statement and +# specifically out-of-memory conditions within that command. +# +# $Id: altermalloc.test,v 1.7 2007/10/03 08:46:45 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_OMIT_ALTERTABLE is defined, omit this file. +ifcapable !altertable||!memdebug { + finish_test + return +} + +source $testdir/malloc_common.tcl + +do_malloc_test altermalloc-1 -tclprep { + db close +} -tclbody { + if {[catch {sqlite3 db test.db}]} { + error "out of memory" + } + sqlite3_extended_result_codes db 1 +} -sqlbody { + CREATE TABLE t1(a int); + ALTER TABLE t1 ADD COLUMN b INTEGER DEFAULT NULL; + ALTER TABLE t1 ADD COLUMN c TEXT DEFAULT 'default-text'; + ALTER TABLE t1 RENAME TO t2; +} + +# Test malloc() failure on an ALTER TABLE on a virtual table. +# +ifcapable vtab { + do_malloc_test altermalloc-vtab -tclprep { + sqlite3 db2 test.db + sqlite3_extended_result_codes db2 1 + register_echo_module [sqlite3_connection_pointer db2] + db2 eval { + CREATE TABLE t1(a, b VARCHAR, c INTEGER); + CREATE VIRTUAL TABLE t1echo USING echo(t1); + } + db2 close + + register_echo_module [sqlite3_connection_pointer db] + } -tclbody { + set rc [catch {db eval { ALTER TABLE t1echo RENAME TO t1_echo }} msg] + if {$msg eq "vtable constructor failed: t1echo"} { + set msg "out of memory" + } + if {$rc} { + error $msg + } + } +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/analyze.test b/libraries/sqlite/unix/sqlite-3.5.1/test/analyze.test new file mode 100644 index 0000000..ded287e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/analyze.test @@ -0,0 +1,257 @@ +# 2005 July 22 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# This file implements tests for the ANALYZE command. +# +# $Id: analyze.test,v 1.5 2005/09/10 22:40:54 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# There is nothing to test if ANALYZE is disable for this build. +# +ifcapable {!analyze} { + finish_test + return +} + +# Basic sanity checks. +# +do_test analyze-1.1 { + catchsql { + ANALYZE no_such_table + } +} {1 {no such table: no_such_table}} +do_test analyze-1.2 { + execsql { + SELECT count(*) FROM sqlite_master WHERE name='sqlite_stat1' + } +} {0} +do_test analyze-1.3 { + catchsql { + ANALYZE no_such_db.no_such_table + } +} {1 {unknown database no_such_db}} +do_test analyze-1.4 { + execsql { + SELECT count(*) FROM sqlite_master WHERE name='sqlite_stat1' + } +} {0} +do_test analyze-1.5.1 { + catchsql { + ANALYZE + } +} {0 {}} +do_test analyze-1.5.2 { + catchsql { + PRAGMA empty_result_callbacks=1; + ANALYZE + } +} {0 {}} +do_test analyze-1.6 { + execsql { + SELECT count(*) FROM sqlite_master WHERE name='sqlite_stat1' + } +} {1} +do_test analyze-1.7 { + execsql { + SELECT * FROM sqlite_stat1 + } +} {} +do_test analyze-1.8 { + catchsql { + ANALYZE main + } +} {0 {}} +do_test analyze-1.9 { + execsql { + SELECT * FROM sqlite_stat1 + } +} {} +do_test analyze-1.10 { + catchsql { + CREATE TABLE t1(a,b); + ANALYZE main.t1; + } +} {0 {}} +do_test analyze-1.11 { + execsql { + SELECT * FROM sqlite_stat1 + } +} {} +do_test analyze-1.12 { + catchsql { + ANALYZE t1; + } +} {0 {}} +do_test analyze-1.13 { + execsql { + SELECT * FROM sqlite_stat1 + } +} {} + +# Create some indices that can be analyzed. But do not yet add +# data. Without data in the tables, no analysis is done. +# +do_test analyze-2.1 { + execsql { + CREATE INDEX t1i1 ON t1(a); + ANALYZE main.t1; + SELECT * FROM sqlite_stat1 ORDER BY idx; + } +} {} +do_test analyze-2.2 { + execsql { + CREATE INDEX t1i2 ON t1(b); + ANALYZE t1; + SELECT * FROM sqlite_stat1 ORDER BY idx; + } +} {} +do_test analyze-2.3 { + execsql { + CREATE INDEX t1i3 ON t1(a,b); + ANALYZE main; + SELECT * FROM sqlite_stat1 ORDER BY idx; + } +} {} + +# Start adding data to the table. Verify that the analysis +# is done correctly. +# +do_test analyze-3.1 { + execsql { + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(1,3); + ANALYZE main.t1; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t1i1 {2 2} t1i2 {2 1} t1i3 {2 2 1}} +do_test analyze-3.2 { + execsql { + INSERT INTO t1 VALUES(1,4); + INSERT INTO t1 VALUES(1,5); + ANALYZE t1; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t1i1 {4 4} t1i2 {4 1} t1i3 {4 4 1}} +do_test analyze-3.3 { + execsql { + INSERT INTO t1 VALUES(2,5); + ANALYZE main; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1}} +do_test analyze-3.4 { + execsql { + CREATE TABLE t2 AS SELECT * FROM t1; + CREATE INDEX t2i1 ON t2(a); + CREATE INDEX t2i2 ON t2(b); + CREATE INDEX t2i3 ON t2(a,b); + ANALYZE; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3} t2i2 {5 2} t2i3 {5 3 1}} +do_test analyze-3.5 { + execsql { + DROP INDEX t2i3; + ANALYZE t1; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3} t2i2 {5 2} t2i3 {5 3 1}} +do_test analyze-3.6 { + execsql { + ANALYZE t2; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3} t2i2 {5 2}} +do_test analyze-3.7 { + execsql { + DROP INDEX t2i2; + ANALYZE t2; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3}} +do_test analyze-3.8 { + execsql { + CREATE TABLE t3 AS SELECT a, b, rowid AS c, 'hi' AS d FROM t1; + CREATE INDEX t3i1 ON t3(a); + CREATE INDEX t3i2 ON t3(a,b,c,d); + CREATE INDEX t3i3 ON t3(d,b,c,a); + DROP TABLE t1; + DROP TABLE t2; + ANALYZE; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1}} + +# Try corrupting the sqlite_stat1 table and make sure the +# database is still able to function. +# +do_test analyze-4.0 { + sqlite3 db2 test.db + db2 eval { + CREATE TABLE t4(x,y,z); + CREATE INDEX t4i1 ON t4(x); + CREATE INDEX t4i2 ON t4(y); + INSERT INTO t4 SELECT a,b,c FROM t3; + } + db2 close + db close + sqlite3 db test.db + execsql { + ANALYZE; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1} t4i1 {5 3} t4i2 {5 2}} +do_test analyze-4.1 { + execsql { + PRAGMA writable_schema=on; + INSERT INTO sqlite_stat1 VALUES(null,null,null); + PRAGMA writable_schema=off; + } + db close + sqlite3 db test.db + execsql { + SELECT * FROM t4 WHERE x=1234; + } +} {} +do_test analyze-4.2 { + execsql { + PRAGMA writable_schema=on; + DELETE FROM sqlite_stat1; + INSERT INTO sqlite_stat1 VALUES('t4','t4i1','nonsense'); + INSERT INTO sqlite_stat1 VALUES('t4','t4i2','120897349817238741092873198273409187234918720394817209384710928374109827172901827349871928741910'); + PRAGMA writable_schema=off; + } + db close + sqlite3 db test.db + execsql { + SELECT * FROM t4 WHERE x=1234; + } +} {} + +# This test corrupts the database file so it must be the last test +# in the series. +# +do_test analyze-99.1 { + execsql { + PRAGMA writable_schema=on; + UPDATE sqlite_master SET sql='nonsense'; + } + db close + sqlite3 db test.db + catchsql { + ANALYZE + } +} {1 {malformed database schema - near "nonsense": syntax error}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/async.test b/libraries/sqlite/unix/sqlite-3.5.1/test/async.test new file mode 100644 index 0000000..268a7ca --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/async.test @@ -0,0 +1,78 @@ +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all tests. +# +# $Id: async.test,v 1.12 2007/09/14 16:20:01 danielk1977 Exp $ + + +if {[catch {sqlite3async_enable}]} { + # The async logic is not built into this system + return +} + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +rename finish_test really_finish_test +proc finish_test {} { + catch {db close} + catch {db2 close} + catch {db3 close} +} +set ISQUICK 1 + +set INCLUDE { + insert.test + insert2.test + insert3.test + lock.test + lock2.test + lock3.test + select1.test + select2.test + select3.test + select4.test + trans.test +} + +# Enable asynchronous IO. +sqlite3async_enable 1 + +rename do_test really_do_test +proc do_test {name args} { + uplevel really_do_test async_io-$name $args + sqlite3async_start + sqlite3async_halt idle + sqlite3async_wait +} + +foreach testfile [lsort -dictionary [glob $testdir/*.test]] { + set tail [file tail $testfile] + if {[lsearch -exact $INCLUDE $tail]<0} continue + source $testfile + + # Make sure everything is flushed through. This is because [source]ing + # the next test file will delete the database file on disk (using + # [file delete]). If the asynchronous backend still has the file + # open, it will become confused. + # + sqlite3async_halt idle + sqlite3async_start + sqlite3async_wait +} + +# Flush the write-queue and disable asynchronous IO. This should ensure +# all allocated memory is cleaned up. +set sqlite3async_trace 1 +sqlite3async_halt idle +sqlite3async_start +sqlite3async_wait +sqlite3async_enable 0 +set sqlite3async_trace 0 + +really_finish_test +rename really_do_test do_test +rename really_finish_test finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/async2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/async2.test new file mode 100644 index 0000000..cfbe06a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/async2.test @@ -0,0 +1,127 @@ +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: async2.test,v 1.8 2007/09/05 16:54:41 danielk1977 Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if { + [info commands sqlite3async_enable]=="" || + [info command sqlite3_memdebug_fail]=="" +} { + # The async logic is not built into this system + puts "Skipping async2 tests: not compiled with required features" + finish_test + return +} + +# Enable asynchronous IO. + +set setup_script { + CREATE TABLE counter(c); + INSERT INTO counter(c) VALUES (1); +} + +set sql_script { + BEGIN; + UPDATE counter SET c = 2; + CREATE TABLE t1(a PRIMARY KEY, b, c); + CREATE TABLE t2(a PRIMARY KEY, b, c); + COMMIT; + + BEGIN; + UPDATE counter SET c = 3; + INSERT INTO t1 VALUES('abcdefghij', 'four', 'score'); + INSERT INTO t2 VALUES('klmnopqrst', 'and', 'seven'); + COMMIT; + + UPDATE counter SET c = 'FIN'; +} + +db close + +foreach err [list ioerr malloc-transient malloc-persistent] { + set ::go 1 + for {set n 1} {$::go} {incr n} { + set ::sqlite_io_error_pending 0 + sqlite3_memdebug_fail -1 + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql $::setup_script + db close + + sqlite3async_enable 1 + sqlite3 db test.db + + switch -- $err { + ioerr { set ::sqlite_io_error_pending $n } + malloc-persistent { sqlite3_memdebug_fail $n -repeat 1 } + malloc-transient { sqlite3_memdebug_fail $n -repeat 0 } + } + + catchsql $::sql_script + db close + + sqlite3async_halt idle + sqlite3async_start + sqlite3async_wait + sqlite3async_enable 0 + + set ::sqlite_io_error_pending 0 + sqlite3_memdebug_fail -1 + + sqlite3 db test.db + set c [db eval {SELECT c FROM counter LIMIT 1}] + switch -- $c { + 1 { + do_test async-$err-1.1.$n { + execsql { + SELECT name FROM sqlite_master; + } + } {counter} + } + 2 { + do_test async-$err-1.2.$n.1 { + execsql { + SELECT * FROM t1; + } + } {} + do_test async-$err-1.2.$n.2 { + execsql { + SELECT * FROM t2; + } + } {} + } + 3 { + do_test async-$err-1.3.$n.1 { + execsql { + SELECT * FROM t1; + } + } {abcdefghij four score} + do_test async-$err-1.3.$n.2 { + execsql { + SELECT * FROM t2; + } + } {klmnopqrst and seven} + } + FIN { + set ::go 0 + } + } + + db close + } +} + +catch {db close} +sqlite3async_halt idle +sqlite3async_start +sqlite3async_wait + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/async3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/async3.test new file mode 100644 index 0000000..0434a28 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/async3.test @@ -0,0 +1,73 @@ +# 2007 September 5 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing the code in test_async.c. +# Specifically, it tests that the xFullPathname() method of +# of the asynchronous vfs works correctly. +# +# $Id: async3.test,v 1.2 2007/09/05 16:54:41 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if { [info commands sqlite3async_enable]=="" } { + # The async logic is not built into this system + puts "Skipping async3 tests: not compiled with required features" + finish_test + return +} + +db close +sqlite3async_enable 1 +sqlite3async_start + +set paths { + chocolate/banana/vanilla/file.db + chocolate//banana/vanilla/file.db + chocolate/./banana//vanilla/file.db + chocolate/banana/./vanilla/file.db + chocolate/banana/../banana/vanilla/file.db + chocolate/banana/./vanilla/extra_bit/../file.db +} + +do_test async3-1.0 { + file mkdir [file join chocolate banana vanilla] + file delete -force chocolate/banana/vanilla/file.db + file delete -force chocolate/banana/vanilla/file.db-journal +} {} + +do_test async3-1.1 { + sqlite3 db chocolate/banana/vanilla/file.db + execsql { + CREATE TABLE abc(a, b, c); + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + } +} {} + +set N 2 +foreach p $paths { + sqlite3 db2 $p + do_test async3-1.$N.1 { + execsql {SELECT * FROM abc} db2 + } {} + do_test async3-1.$N.2 { + catchsql {INSERT INTO abc VALUES(4, 5, 6)} db2 + } {1 {database is locked}} + db2 close + incr N +} + +db close +sqlite3async_halt idle +sqlite3async_wait +sqlite3async_enable 0 +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/attach.test b/libraries/sqlite/unix/sqlite-3.5.1/test/attach.test new file mode 100644 index 0000000..2fb6d77 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/attach.test @@ -0,0 +1,750 @@ +# 2003 April 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is testing the ATTACH and DETACH commands +# and related functionality. +# +# $Id: attach.test,v 1.46 2007/09/12 17:01:45 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +for {set i 2} {$i<=15} {incr i} { + file delete -force test$i.db + file delete -force test$i.db-journal +} + +set btree_trace 0 +do_test attach-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(3,4); + SELECT * FROM t1; + } +} {1 2 3 4} +do_test attach-1.2 { + sqlite3 db2 test2.db + execsql { + CREATE TABLE t2(x,y); + INSERT INTO t2 VALUES(1,'x'); + INSERT INTO t2 VALUES(2,'y'); + SELECT * FROM t2; + } db2 +} {1 x 2 y} +do_test attach-1.3 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + SELECT * FROM two.t2; + } +} {1 x 2 y} +do_test attach-1.4 { + execsql { + SELECT * FROM t2; + } +} {1 x 2 y} +do_test attach-1.5 { +btree_breakpoint + execsql { + DETACH DATABASE two; + SELECT * FROM t1; + } +} {1 2 3 4} +do_test attach-1.6 { + catchsql { + SELECT * FROM t2; + } +} {1 {no such table: t2}} +do_test attach-1.7 { + catchsql { + SELECT * FROM two.t2; + } +} {1 {no such table: two.t2}} +do_test attach-1.8 { + catchsql { + ATTACH DATABASE 'test3.db' AS three; + } +} {0 {}} +do_test attach-1.9 { + catchsql { + SELECT * FROM three.sqlite_master; + } +} {0 {}} +do_test attach-1.10 { + catchsql { + DETACH DATABASE [three]; + } +} {0 {}} +do_test attach-1.11 { + execsql { + ATTACH 'test.db' AS db2; + ATTACH 'test.db' AS db3; + ATTACH 'test.db' AS db4; + ATTACH 'test.db' AS db5; + ATTACH 'test.db' AS db6; + ATTACH 'test.db' AS db7; + ATTACH 'test.db' AS db8; + ATTACH 'test.db' AS db9; + } +} {} +proc db_list {db} { + set list {} + foreach {idx name file} [execsql {PRAGMA database_list} $db] { + lappend list $idx $name + } + return $list +} +ifcapable schema_pragmas { +do_test attach-1.11b { + db_list db +} {0 main 2 db2 3 db3 4 db4 5 db5 6 db6 7 db7 8 db8 9 db9} +} ;# ifcapable schema_pragmas +do_test attach-1.12 { + catchsql { + ATTACH 'test.db' as db2; + } +} {1 {database db2 is already in use}} +do_test attach-1.13 { + catchsql { + ATTACH 'test.db' as db5; + } +} {1 {database db5 is already in use}} +do_test attach-1.14 { + catchsql { + ATTACH 'test.db' as db9; + } +} {1 {database db9 is already in use}} +do_test attach-1.15 { + catchsql { + ATTACH 'test.db' as main; + } +} {1 {database main is already in use}} +ifcapable tempdb { + do_test attach-1.16 { + catchsql { + ATTACH 'test.db' as temp; + } + } {1 {database temp is already in use}} +} +do_test attach-1.17 { + catchsql { + ATTACH 'test.db' as MAIN; + } +} {1 {database MAIN is already in use}} +do_test attach-1.18 { + catchsql { + ATTACH 'test.db' as db10; + ATTACH 'test.db' as db11; + } +} {0 {}} +do_test attach-1.19 { + catchsql { + ATTACH 'test.db' as db12; + } +} {1 {too many attached databases - max 10}} +do_test attach-1.20.1 { + execsql { + DETACH db5; + } +} {} +ifcapable schema_pragmas { +do_test attach-1.20.2 { + db_list db +} {0 main 2 db2 3 db3 4 db4 5 db6 6 db7 7 db8 8 db9 9 db10 10 db11} +} ;# ifcapable schema_pragmas +integrity_check attach-1.20.3 +ifcapable tempdb { + execsql {select * from sqlite_temp_master} +} +do_test attach-1.21 { + catchsql { + ATTACH 'test.db' as db12; + } +} {0 {}} +do_test attach-1.22 { + catchsql { + ATTACH 'test.db' as db13; + } +} {1 {too many attached databases - max 10}} +do_test attach-1.23 { + catchsql { + DETACH "db14"; + } +} {1 {no such database: db14}} +do_test attach-1.24 { + catchsql { + DETACH db12; + } +} {0 {}} +do_test attach-1.25 { + catchsql { + DETACH db12; + } +} {1 {no such database: db12}} +do_test attach-1.26 { + catchsql { + DETACH main; + } +} {1 {cannot detach database main}} + +ifcapable tempdb { + do_test attach-1.27 { + catchsql { + DETACH Temp; + } + } {1 {cannot detach database Temp}} +} else { + do_test attach-1.27 { + catchsql { + DETACH Temp; + } + } {1 {no such database: Temp}} +} + +do_test attach-1.28 { + catchsql { + DETACH db11; + DETACH db10; + DETACH db9; + DETACH db8; + DETACH db7; + DETACH db6; + DETACH db4; + DETACH db3; + DETACH db2; + } +} {0 {}} +ifcapable schema_pragmas { + ifcapable tempdb { + do_test attach-1.29 { + db_list db + } {0 main 1 temp} + } else { + do_test attach-1.29 { + db_list db + } {0 main} + } +} ;# ifcapable schema_pragmas + +ifcapable {trigger} { # Only do the following tests if triggers are enabled +do_test attach-2.1 { + execsql { + CREATE TABLE tx(x1,x2,y1,y2); + CREATE TRIGGER r1 AFTER UPDATE ON t2 FOR EACH ROW BEGIN + INSERT INTO tx(x1,x2,y1,y2) VALUES(OLD.x,NEW.x,OLD.y,NEW.y); + END; + SELECT * FROM tx; + } db2; +} {} +do_test attach-2.2 { + execsql { + UPDATE t2 SET x=x+10; + SELECT * FROM tx; + } db2; +} {1 11 x x 2 12 y y} +do_test attach-2.3 { + execsql { + CREATE TABLE tx(x1,x2,y1,y2); + SELECT * FROM tx; + } +} {} +do_test attach-2.4 { + execsql { + ATTACH 'test2.db' AS db2; + } +} {} +do_test attach-2.5 { + execsql { + UPDATE db2.t2 SET x=x+10; + SELECT * FROM db2.tx; + } +} {1 11 x x 2 12 y y 11 21 x x 12 22 y y} +do_test attach-2.6 { + execsql { + SELECT * FROM main.tx; + } +} {} +do_test attach-2.7 { + execsql { + SELECT type, name, tbl_name FROM db2.sqlite_master; + } +} {table t2 t2 table tx tx trigger r1 t2} + +ifcapable schema_pragmas&&tempdb { + do_test attach-2.8 { + db_list db + } {0 main 1 temp 2 db2} +} ;# ifcapable schema_pragmas&&tempdb +ifcapable schema_pragmas&&!tempdb { + do_test attach-2.8 { + db_list db + } {0 main 2 db2} +} ;# ifcapable schema_pragmas&&!tempdb + +do_test attach-2.9 { + execsql { + CREATE INDEX i2 ON t2(x); + SELECT * FROM t2 WHERE x>5; + } db2 +} {21 x 22 y} +do_test attach-2.10 { + execsql { + SELECT type, name, tbl_name FROM sqlite_master; + } db2 +} {table t2 t2 table tx tx trigger r1 t2 index i2 t2} +#do_test attach-2.11 { +# catchsql { +# SELECT * FROM t2 WHERE x>5; +# } +#} {1 {database schema has changed}} +ifcapable schema_pragmas { + ifcapable tempdb { + do_test attach-2.12 { + db_list db + } {0 main 1 temp 2 db2} + } else { + do_test attach-2.12 { + db_list db + } {0 main 2 db2} + } +} ;# ifcapable schema_pragmas +do_test attach-2.13 { + catchsql { + SELECT * FROM t2 WHERE x>5; + } +} {0 {21 x 22 y}} +do_test attach-2.14 { + execsql { + SELECT type, name, tbl_name FROM sqlite_master; + } +} {table t1 t1 table tx tx} +do_test attach-2.15 { + execsql { + SELECT type, name, tbl_name FROM db2.sqlite_master; + } +} {table t2 t2 table tx tx trigger r1 t2 index i2 t2} +do_test attach-2.16 { + db close + sqlite3 db test.db + execsql { + ATTACH 'test2.db' AS db2; + SELECT type, name, tbl_name FROM db2.sqlite_master; + } +} {table t2 t2 table tx tx trigger r1 t2 index i2 t2} +} ;# End of ifcapable {trigger} + +do_test attach-3.1 { + db close + db2 close + sqlite3 db test.db + sqlite3 db2 test2.db + execsql { + SELECT * FROM t1 + } +} {1 2 3 4} + +# If we are testing a version of the code that lacks trigger support, +# adjust the database contents so that they are the same if triggers +# had been enabled. +ifcapable {!trigger} { + db2 eval { + DELETE FROM t2; + INSERT INTO t2 VALUES(21, 'x'); + INSERT INTO t2 VALUES(22, 'y'); + CREATE TABLE tx(x1,x2,y1,y2); + INSERT INTO tx VALUES(1, 11, 'x', 'x'); + INSERT INTO tx VALUES(2, 12, 'y', 'y'); + INSERT INTO tx VALUES(11, 21, 'x', 'x'); + INSERT INTO tx VALUES(12, 22, 'y', 'y'); + CREATE INDEX i2 ON t2(x); + } +} + +do_test attach-3.2 { + catchsql { + SELECT * FROM t2 + } +} {1 {no such table: t2}} +do_test attach-3.3 { + catchsql { + ATTACH DATABASE 'test2.db' AS db2; + SELECT * FROM t2 + } +} {0 {21 x 22 y}} + +# Even though 'db' has started a transaction, it should not yet have +# a lock on test2.db so 'db2' should be readable. +do_test attach-3.4 { + execsql BEGIN + catchsql { + SELECT * FROM t2; + } db2; +} {0 {21 x 22 y}} + +# Reading from test2.db from db within a transaction should not +# prevent test2.db from being read by db2. +do_test attach-3.5 { + execsql {SELECT * FROM t2} +btree_breakpoint + catchsql { + SELECT * FROM t2; + } db2; +} {0 {21 x 22 y}} + +# Making a change to test2.db through db causes test2.db to get +# a reserved lock. It should still be accessible through db2. +do_test attach-3.6 { + execsql { + UPDATE t2 SET x=x+1 WHERE x=50; + } + catchsql { + SELECT * FROM t2; + } db2; +} {0 {21 x 22 y}} + +do_test attach-3.7 { + execsql ROLLBACK + execsql {SELECT * FROM t2} db2 +} {21 x 22 y} + +# Start transactions on both db and db2. Once again, just because +# we make a change to test2.db using db2, only a RESERVED lock is +# obtained, so test2.db should still be readable using db. +# +do_test attach-3.8 { + execsql BEGIN + execsql BEGIN db2 + execsql {UPDATE t2 SET x=0 WHERE 0} db2 + catchsql {SELECT * FROM t2} +} {0 {21 x 22 y}} + +# It is also still accessible from db2. +do_test attach-3.9 { + catchsql {SELECT * FROM t2} db2 +} {0 {21 x 22 y}} + +do_test attach-3.10 { + execsql {SELECT * FROM t1} +} {1 2 3 4} + +do_test attach-3.11 { + catchsql {UPDATE t1 SET a=a+1} +} {0 {}} +do_test attach-3.12 { + execsql {SELECT * FROM t1} +} {2 2 4 4} + +# db2 has a RESERVED lock on test2.db, so db cannot write to any tables +# in test2.db. +do_test attach-3.13 { + catchsql {UPDATE t2 SET x=x+1 WHERE x=50} +} {1 {database is locked}} + +# Change for version 3. Transaction is no longer rolled back +# for a locked database. +execsql {ROLLBACK} + +# db is able to reread its schema because db2 still only holds a +# reserved lock. +do_test attach-3.14 { + catchsql {SELECT * FROM t1} +} {0 {1 2 3 4}} +do_test attach-3.15 { + execsql COMMIT db2 + execsql {SELECT * FROM t1} +} {1 2 3 4} + +#set btree_trace 1 + +# Ticket #323 +do_test attach-4.1 { + execsql {DETACH db2} + db2 close + sqlite3 db2 test2.db + execsql { + CREATE TABLE t3(x,y); + CREATE UNIQUE INDEX t3i1 ON t3(x); + INSERT INTO t3 VALUES(1,2); + SELECT * FROM t3; + } db2; +} {1 2} +do_test attach-4.2 { + execsql { + CREATE TABLE t3(a,b); + CREATE UNIQUE INDEX t3i1b ON t3(a); + INSERT INTO t3 VALUES(9,10); + SELECT * FROM t3; + } +} {9 10} +do_test attach-4.3 { + execsql { + ATTACH DATABASE 'test2.db' AS db2; + SELECT * FROM db2.t3; + } +} {1 2} +do_test attach-4.4 { + execsql { + SELECT * FROM main.t3; + } +} {9 10} +do_test attach-4.5 { + execsql { + INSERT INTO db2.t3 VALUES(9,10); + SELECT * FROM db2.t3; + } +} {1 2 9 10} +execsql { + DETACH db2; +} +ifcapable {trigger} { + do_test attach-4.6 { + execsql { + CREATE TABLE t4(x); + CREATE TRIGGER t3r3 AFTER INSERT ON t3 BEGIN + INSERT INTO t4 VALUES('db2.' || NEW.x); + END; + INSERT INTO t3 VALUES(6,7); + SELECT * FROM t4; + } db2 + } {db2.6} + do_test attach-4.7 { + execsql { + CREATE TABLE t4(y); + CREATE TRIGGER t3r3 AFTER INSERT ON t3 BEGIN + INSERT INTO t4 VALUES('main.' || NEW.a); + END; + INSERT INTO main.t3 VALUES(11,12); + SELECT * FROM main.t4; + } + } {main.11} +} +ifcapable {!trigger} { + # When we do not have trigger support, set up the table like they + # would have been had triggers been there. The tests that follow need + # this setup. + execsql { + CREATE TABLE t4(x); + INSERT INTO t3 VALUES(6,7); + INSERT INTO t4 VALUES('db2.6'); + INSERT INTO t4 VALUES('db2.13'); + } db2 + execsql { + CREATE TABLE t4(y); + INSERT INTO main.t3 VALUES(11,12); + INSERT INTO t4 VALUES('main.11'); + } +} + + +# This one is tricky. On the UNION ALL select, we have to make sure +# the schema for both main and db2 is valid before starting to execute +# the first query of the UNION ALL. If we wait to test the validity of +# the schema for main until after the first query has run, that test will +# fail and the query will abort but we will have already output some +# results. When the query is retried, the results will be repeated. +# +ifcapable compound { +do_test attach-4.8 { + execsql { + ATTACH DATABASE 'test2.db' AS db2; + INSERT INTO db2.t3 VALUES(13,14); + SELECT * FROM db2.t4 UNION ALL SELECT * FROM main.t4; + } +} {db2.6 db2.13 main.11} + +do_test attach-4.9 { + ifcapable {!trigger} {execsql {INSERT INTO main.t4 VALUES('main.15')}} + execsql { + INSERT INTO main.t3 VALUES(15,16); + SELECT * FROM db2.t4 UNION ALL SELECT * FROM main.t4; + } +} {db2.6 db2.13 main.11 main.15} +} ;# ifcapable compound + +ifcapable !compound { + ifcapable {!trigger} {execsql {INSERT INTO main.t4 VALUES('main.15')}} + execsql { + ATTACH DATABASE 'test2.db' AS db2; + INSERT INTO db2.t3 VALUES(13,14); + INSERT INTO main.t3 VALUES(15,16); + } +} ;# ifcapable !compound + +ifcapable view { +do_test attach-4.10 { + execsql { + DETACH DATABASE db2; + } + execsql { + CREATE VIEW v3 AS SELECT x*100+y FROM t3; + SELECT * FROM v3; + } db2 +} {102 910 607 1314} +do_test attach-4.11 { + execsql { + CREATE VIEW v3 AS SELECT a*100+b FROM t3; + SELECT * FROM v3; + } +} {910 1112 1516} +do_test attach-4.12 { + execsql { + ATTACH DATABASE 'test2.db' AS db2; + SELECT * FROM db2.v3; + } +} {102 910 607 1314} +do_test attach-4.13 { + execsql { + SELECT * FROM main.v3; + } +} {910 1112 1516} +} ;# ifcapable view + +# Tests for the sqliteFix...() routines in attach.c +# +ifcapable {trigger} { +do_test attach-5.1 { + db close + sqlite3 db test.db + db2 close + file delete -force test2.db + sqlite3 db2 test2.db + catchsql { + ATTACH DATABASE 'test.db' AS orig; + CREATE TRIGGER r1 AFTER INSERT ON orig.t1 BEGIN + SELECT 'no-op'; + END; + } db2 +} {1 {trigger r1 cannot reference objects in database orig}} +do_test attach-5.2 { + catchsql { + CREATE TABLE t5(x,y); + CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN + SELECT 'no-op'; + END; + } db2 +} {0 {}} +do_test attach-5.3 { + catchsql { + DROP TRIGGER r5; + CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN + SELECT 'no-op' FROM orig.t1; + END; + } db2 +} {1 {trigger r5 cannot reference objects in database orig}} +ifcapable tempdb { + do_test attach-5.4 { + catchsql { + CREATE TEMP TABLE t6(p,q,r); + CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN + SELECT 'no-op' FROM temp.t6; + END; + } db2 + } {1 {trigger r5 cannot reference objects in database temp}} +} +ifcapable subquery { + do_test attach-5.5 { + catchsql { + CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN + SELECT 'no-op' || (SELECT * FROM temp.t6); + END; + } db2 + } {1 {trigger r5 cannot reference objects in database temp}} + do_test attach-5.6 { + catchsql { + CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN + SELECT 'no-op' FROM t1 WHERE x<(SELECT min(x) FROM temp.t6); + END; + } db2 + } {1 {trigger r5 cannot reference objects in database temp}} + do_test attach-5.7 { + catchsql { + CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN + SELECT 'no-op' FROM t1 GROUP BY 1 HAVING x<(SELECT min(x) FROM temp.t6); + END; + } db2 + } {1 {trigger r5 cannot reference objects in database temp}} + do_test attach-5.7 { + catchsql { + CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN + SELECT max(1,x,(SELECT min(x) FROM temp.t6)) FROM t1; + END; + } db2 + } {1 {trigger r5 cannot reference objects in database temp}} + do_test attach-5.8 { + catchsql { + CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN + INSERT INTO t1 VALUES((SELECT min(x) FROM temp.t6),5); + END; + } db2 + } {1 {trigger r5 cannot reference objects in database temp}} + do_test attach-5.9 { + catchsql { + CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN + DELETE FROM t1 WHERE x<(SELECT min(x) FROM temp.t6); + END; + } db2 + } {1 {trigger r5 cannot reference objects in database temp}} +} ;# endif subquery +} ;# endif trigger + +# Check to make sure we get a sensible error if unable to open +# the file that we are trying to attach. +# +do_test attach-6.1 { + catchsql { + ATTACH DATABASE 'no-such-file' AS nosuch; + } +} {0 {}} +if {$tcl_platform(platform)=="unix"} { + do_test attach-6.2 { + sqlite3 dbx cannot-read + dbx eval {CREATE TABLE t1(a,b,c)} + dbx close + file attributes cannot-read -permission 0000 + if {[file writable cannot-read]} { + puts "\n**** Tests do not work when run as root ****" + file delete -force cannot-read + exit 1 + } + catchsql { + ATTACH DATABASE 'cannot-read' AS noread; + } + } {1 {unable to open database: cannot-read}} + file delete -force cannot-read +} + +# Check the error message if we try to access a database that has +# not been attached. +do_test attach-6.3 { + catchsql { + CREATE TABLE no_such_db.t1(a, b, c); + } +} {1 {unknown database no_such_db}} +for {set i 2} {$i<=15} {incr i} { + catch {db$i close} +} +db close +file delete -force test2.db +file delete -force no-such-file + +ifcapable subquery { + do_test attach-7.1 { + file delete -force test.db test.db-journal + sqlite3 db test.db + catchsql { + DETACH RAISE ( IGNORE ) IN ( SELECT "AAAAAA" . * ORDER BY + REGISTER LIMIT "AAAAAA" . "AAAAAA" OFFSET RAISE ( IGNORE ) NOT NULL ) + } + } {1 {invalid name: "RAISE ( IGNORE ) IN ( SELECT "AAAAAA" . * ORDER BY + REGISTER LIMIT "AAAAAA" . "AAAAAA" OFFSET RAISE ( IGNORE ) NOT NULL )"}} +} +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/attach2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/attach2.test new file mode 100644 index 0000000..6e79f29 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/attach2.test @@ -0,0 +1,389 @@ +# 2003 July 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is testing the ATTACH and DETACH commands +# and related functionality. +# +# $Id: attach2.test,v 1.36 2007/08/10 19:46:14 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Ticket #354 +# +# Databases test.db and test2.db contain identical schemas. Make +# sure we can attach test2.db from test.db. +# +do_test attach2-1.1 { + db eval { + CREATE TABLE t1(a,b); + CREATE INDEX x1 ON t1(a); + } + file delete -force test2.db + file delete -force test2.db-journal + sqlite3 db2 test2.db + db2 eval { + CREATE TABLE t1(a,b); + CREATE INDEX x1 ON t1(a); + } + catchsql { + ATTACH 'test2.db' AS t2; + } +} {0 {}} + +# Ticket #514 +# +proc db_list {db} { + set list {} + foreach {idx name file} [execsql {PRAGMA database_list} $db] { + lappend list $idx $name + } + return $list +} +db eval {DETACH t2} +do_test attach2-2.1 { + # lock test2.db then try to attach it. This is no longer an error because + # db2 just RESERVES the database. It does not obtain a write-lock until + # we COMMIT. + db2 eval {BEGIN} + db2 eval {UPDATE t1 SET a = 0 WHERE 0} + catchsql { + ATTACH 'test2.db' AS t2; + } +} {0 {}} +ifcapable schema_pragmas { +do_test attach2-2.2 { + # make sure test2.db did get attached. + db_list db +} {0 main 2 t2} +} ;# ifcapable schema_pragmas +db2 eval {COMMIT} + +do_test attach2-2.5 { + # Make sure we can read test2.db from db + catchsql { + SELECT name FROM t2.sqlite_master; + } +} {0 {t1 x1}} +do_test attach2-2.6 { + # lock test2.db and try to read from it. This should still work because + # the lock is only a RESERVED lock which does not prevent reading. + # + db2 eval BEGIN + db2 eval {UPDATE t1 SET a = 0 WHERE 0} + catchsql { + SELECT name FROM t2.sqlite_master; + } +} {0 {t1 x1}} +do_test attach2-2.7 { + # but we can still read from test1.db even though test2.db is locked. + catchsql { + SELECT name FROM main.sqlite_master; + } +} {0 {t1 x1}} +do_test attach2-2.8 { + # start a transaction on test.db even though test2.db is locked. + catchsql { + BEGIN; + INSERT INTO t1 VALUES(8,9); + } +} {0 {}} +do_test attach2-2.9 { + execsql { + SELECT * FROM t1 + } +} {8 9} +do_test attach2-2.10 { + # now try to write to test2.db. the write should fail + catchsql { + INSERT INTO t2.t1 VALUES(1,2); + } +} {1 {database is locked}} +do_test attach2-2.11 { + # when the write failed in the previous test, the transaction should + # have rolled back. + # + # Update for version 3: A transaction is no longer rolled back if a + # database is found to be busy. + execsql {rollback} + db2 eval ROLLBACK + execsql { + SELECT * FROM t1 + } +} {} +do_test attach2-2.12 { + catchsql { + COMMIT + } +} {1 {cannot commit - no transaction is active}} + +# Ticket #574: Make sure it works using the non-callback API +# +do_test attach2-3.1 { + set DB [sqlite3_connection_pointer db] + set rc [catch {sqlite3_prepare $DB "ATTACH 'test2.db' AS t2" -1 TAIL} VM] + if {$rc} {lappend rc $VM} + sqlite3_step $VM + sqlite3_finalize $VM + set rc +} {0} +do_test attach2-3.2 { + set rc [catch {sqlite3_prepare $DB "DETACH t2" -1 TAIL} VM] + if {$rc} {lappend rc $VM} + sqlite3_step $VM + sqlite3_finalize $VM + set rc +} {0} + +db close +for {set i 2} {$i<=15} {incr i} { + catch {db$i close} +} + +# A procedure to verify the status of locks on a database. +# +proc lock_status {testnum db expected_result} { + # If the database was compiled with OMIT_TEMPDB set, then + # the lock_status list will not contain an entry for the temp + # db. But the test code doesn't know this, so it's easiest + # to filter it out of the $expected_result list here. + ifcapable !tempdb { + set expected_result [concat \ + [lrange $expected_result 0 1] \ + [lrange $expected_result 4 end] \ + ] + } + do_test attach2-$testnum [subst { + $db cache flush ;# The lock_status pragma should not be cached + execsql {PRAGMA lock_status} $db + }] $expected_result +} +set sqlite_os_trace 0 + +# Tests attach2-4.* test that read-locks work correctly with attached +# databases. +do_test attach2-4.1 { + sqlite3 db test.db + sqlite3 db2 test.db + execsql {ATTACH 'test2.db' as file2} + execsql {ATTACH 'test2.db' as file2} db2 +} {} + +lock_status 4.1.1 db {main unlocked temp closed file2 unlocked} +lock_status 4.1.2 db2 {main unlocked temp closed file2 unlocked} + +do_test attach2-4.2 { + # Handle 'db' read-locks test.db + execsql {BEGIN} + execsql {SELECT * FROM t1} + # Lock status: + # db - shared(main) + # db2 - +} {} + +lock_status 4.2.1 db {main shared temp closed file2 unlocked} +lock_status 4.2.2 db2 {main unlocked temp closed file2 unlocked} + +do_test attach2-4.3 { + # The read lock held by db does not prevent db2 from reading test.db + execsql {SELECT * FROM t1} db2 +} {} + +lock_status 4.3.1 db {main shared temp closed file2 unlocked} +lock_status 4.3.2 db2 {main unlocked temp closed file2 unlocked} + +do_test attach2-4.4 { + # db is holding a read lock on test.db, so we should not be able + # to commit a write to test.db from db2 + catchsql { + INSERT INTO t1 VALUES(1, 2) + } db2 +} {1 {database is locked}} + +lock_status 4.4.1 db {main shared temp closed file2 unlocked} +lock_status 4.4.2 db2 {main unlocked temp closed file2 unlocked} + +# We have to make sure that the cache_size and the soft_heap_limit +# are large enough to hold the entire change in memory. If either +# is set too small, then changes will spill to the database, forcing +# a reserved lock to promote to exclusive. That will mess up our +# test results. + +set soft_limit [sqlite3_soft_heap_limit 0] + + +do_test attach2-4.5 { + # Handle 'db2' reserves file2. + execsql {BEGIN} db2 + execsql {INSERT INTO file2.t1 VALUES(1, 2)} db2 + # Lock status: + # db - shared(main) + # db2 - reserved(file2) +} {} + +lock_status 4.5.1 db {main shared temp closed file2 unlocked} +lock_status 4.5.2 db2 {main unlocked temp closed file2 reserved} + +do_test attach2-4.6.1 { + # Reads are allowed against a reserved database. + catchsql { + SELECT * FROM file2.t1; + } + # Lock status: + # db - shared(main), shared(file2) + # db2 - reserved(file2) +} {0 {}} + +lock_status 4.6.1.1 db {main shared temp closed file2 shared} +lock_status 4.6.1.2 db2 {main unlocked temp closed file2 reserved} + +do_test attach2-4.6.2 { + # Writes against a reserved database are not allowed. + catchsql { + UPDATE file2.t1 SET a=0; + } +} {1 {database is locked}} + +lock_status 4.6.2.1 db {main shared temp closed file2 shared} +lock_status 4.6.2.2 db2 {main unlocked temp closed file2 reserved} + +do_test attach2-4.7 { + # Ensure handle 'db' retains the lock on the main file after + # failing to obtain a write-lock on file2. + catchsql { + INSERT INTO t1 VALUES(1, 2) + } db2 +} {0 {}} + +lock_status 4.7.1 db {main shared temp closed file2 shared} +lock_status 4.7.2 db2 {main reserved temp closed file2 reserved} + +do_test attach2-4.8 { + # We should still be able to read test.db from db2 + execsql {SELECT * FROM t1} db2 +} {1 2} + +lock_status 4.8.1 db {main shared temp closed file2 shared} +lock_status 4.8.2 db2 {main reserved temp closed file2 reserved} + +do_test attach2-4.9 { + # Try to upgrade the handle 'db' lock. + catchsql { + INSERT INTO t1 VALUES(1, 2) + } +} {1 {database is locked}} + +lock_status 4.9.1 db {main shared temp closed file2 shared} +lock_status 4.9.2 db2 {main reserved temp closed file2 reserved} + +do_test attach2-4.10 { + # We cannot commit db2 while db is holding a read-lock + catchsql {COMMIT} db2 +} {1 {database is locked}} + +lock_status 4.10.1 db {main shared temp closed file2 shared} +lock_status 4.10.2 db2 {main pending temp closed file2 reserved} + +set sqlite_os_trace 0 +do_test attach2-4.11 { + # db is able to commit. + catchsql {COMMIT} +} {0 {}} + +lock_status 4.11.1 db {main unlocked temp closed file2 unlocked} +lock_status 4.11.2 db2 {main pending temp closed file2 reserved} + +do_test attach2-4.12 { + # Now we can commit db2 + catchsql {COMMIT} db2 +} {0 {}} + +lock_status 4.12.1 db {main unlocked temp closed file2 unlocked} +lock_status 4.12.2 db2 {main unlocked temp closed file2 unlocked} + +do_test attach2-4.13 { + execsql {SELECT * FROM file2.t1} +} {1 2} +do_test attach2-4.14 { + execsql {INSERT INTO t1 VALUES(1, 2)} +} {} +do_test attach2-4.15 { + execsql {SELECT * FROM t1} db2 +} {1 2 1 2} + +db close +db2 close +file delete -force test2.db +sqlite3_soft_heap_limit $soft_limit + +# These tests - attach2-5.* - check that the master journal file is deleted +# correctly when a multi-file transaction is committed or rolled back. +# +# Update: It's not actually created if a rollback occurs, so that test +# doesn't really prove too much. +foreach f [glob test.db*] {file delete -force $f} +do_test attach2-5.1 { + sqlite3 db test.db + execsql { + ATTACH 'test.db2' AS aux; + } +} {} +do_test attach2-5.2 { + execsql { + BEGIN; + CREATE TABLE tbl(a, b, c); + CREATE TABLE aux.tbl(a, b, c); + COMMIT; + } +} {} +do_test attach2-5.3 { + lsort [glob test.db*] +} {test.db test.db2} +do_test attach2-5.4 { + execsql { + BEGIN; + DROP TABLE aux.tbl; + DROP TABLE tbl; + ROLLBACK; + } +} {} +do_test attach2-5.5 { + lsort [glob test.db*] +} {test.db test.db2} + +# Check that a database cannot be ATTACHed or DETACHed during a transaction. +do_test attach2-6.1 { + execsql { + BEGIN; + } +} {} +do_test attach2-6.2 { + catchsql { + ATTACH 'test3.db' as aux2; + } +} {1 {cannot ATTACH database within transaction}} + +do_test attach2-6.3 { + catchsql { + DETACH aux; + } +} {1 {cannot DETACH database within transaction}} +do_test attach2-6.4 { + execsql { + COMMIT; + DETACH aux; + } +} {} + +db close + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/attach3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/attach3.test new file mode 100644 index 0000000..d0702df --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/attach3.test @@ -0,0 +1,344 @@ +# 2003 July 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is testing the ATTACH and DETACH commands +# and schema changes to attached databases. +# +# $Id: attach3.test,v 1.17 2006/06/20 11:01:09 danielk1977 Exp $ +# + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create tables t1 and t2 in the main database +execsql { + CREATE TABLE t1(a, b); + CREATE TABLE t2(c, d); +} + +# Create tables t1 and t2 in database file test2.db +file delete -force test2.db +file delete -force test2.db-journal +sqlite3 db2 test2.db +execsql { + CREATE TABLE t1(a, b); + CREATE TABLE t2(c, d); +} db2 +db2 close + +# Create a table in the auxilary database. +do_test attach3-1.1 { + execsql { + ATTACH 'test2.db' AS aux; + } +} {} +do_test attach3-1.2 { + execsql { + CREATE TABLE aux.t3(e, f); + } +} {} +do_test attach3-1.3 { + execsql { + SELECT * FROM sqlite_master WHERE name = 't3'; + } +} {} +do_test attach3-1.4 { + execsql { + SELECT * FROM aux.sqlite_master WHERE name = 't3'; + } +} "table t3 t3 [expr $AUTOVACUUM?5:4] {CREATE TABLE t3(e, f)}" +do_test attach3-1.5 { + execsql { + INSERT INTO t3 VALUES(1, 2); + SELECT * FROM t3; + } +} {1 2} + +# Create an index on the auxilary database table. +do_test attach3-2.1 { + execsql { + CREATE INDEX aux.i1 on t3(e); + } +} {} +do_test attach3-2.2 { + execsql { + SELECT * FROM sqlite_master WHERE name = 'i1'; + } +} {} +do_test attach3-2.3 { + execsql { + SELECT * FROM aux.sqlite_master WHERE name = 'i1'; + } +} "index i1 t3 [expr $AUTOVACUUM?6:5] {CREATE INDEX i1 on t3(e)}" + +# Drop the index on the aux database table. +do_test attach3-3.1 { + execsql { + DROP INDEX aux.i1; + SELECT * FROM aux.sqlite_master WHERE name = 'i1'; + } +} {} +do_test attach3-3.2 { + execsql { + CREATE INDEX aux.i1 on t3(e); + SELECT * FROM aux.sqlite_master WHERE name = 'i1'; + } +} "index i1 t3 [expr $AUTOVACUUM?6:5] {CREATE INDEX i1 on t3(e)}" +do_test attach3-3.3 { + execsql { + DROP INDEX i1; + SELECT * FROM aux.sqlite_master WHERE name = 'i1'; + } +} {} + +# Drop tables t1 and t2 in the auxilary database. +do_test attach3-4.1 { + execsql { + DROP TABLE aux.t1; + SELECT name FROM aux.sqlite_master; + } +} {t2 t3} +do_test attach3-4.2 { + # This will drop main.t2 + execsql { + DROP TABLE t2; + SELECT name FROM aux.sqlite_master; + } +} {t2 t3} +do_test attach3-4.3 { + execsql { + DROP TABLE t2; + SELECT name FROM aux.sqlite_master; + } +} {t3} + +# Create a view in the auxilary database. +ifcapable view { +do_test attach3-5.1 { + execsql { + CREATE VIEW aux.v1 AS SELECT * FROM t3; + } +} {} +do_test attach3-5.2 { + execsql { + SELECT * FROM aux.sqlite_master WHERE name = 'v1'; + } +} {view v1 v1 0 {CREATE VIEW v1 AS SELECT * FROM t3}} +do_test attach3-5.3 { + execsql { + INSERT INTO aux.t3 VALUES('hello', 'world'); + SELECT * FROM v1; + } +} {1 2 hello world} + +# Drop the view +do_test attach3-6.1 { + execsql { + DROP VIEW aux.v1; + } +} {} +do_test attach3-6.2 { + execsql { + SELECT * FROM aux.sqlite_master WHERE name = 'v1'; + } +} {} +} ;# ifcapable view + +ifcapable {trigger} { +# Create a trigger in the auxilary database. +do_test attach3-7.1 { + execsql { + CREATE TRIGGER aux.tr1 AFTER INSERT ON t3 BEGIN + INSERT INTO t3 VALUES(new.e*2, new.f*2); + END; + } +} {} +do_test attach3-7.2 { + execsql { + DELETE FROM t3; + INSERT INTO t3 VALUES(10, 20); + SELECT * FROM t3; + } +} {10 20 20 40} +do_test attach3-5.3 { + execsql { + SELECT * FROM aux.sqlite_master WHERE name = 'tr1'; + } +} {trigger tr1 t3 0 {CREATE TRIGGER tr1 AFTER INSERT ON t3 BEGIN + INSERT INTO t3 VALUES(new.e*2, new.f*2); + END}} + +# Drop the trigger +do_test attach3-8.1 { + execsql { + DROP TRIGGER aux.tr1; + } +} {} +do_test attach3-8.2 { + execsql { + SELECT * FROM aux.sqlite_master WHERE name = 'tr1'; + } +} {} + +ifcapable tempdb { + # Try to trick SQLite into dropping the wrong temp trigger. + do_test attach3-9.0 { + execsql { + CREATE TABLE main.t4(a, b, c); + CREATE TABLE aux.t4(a, b, c); + CREATE TEMP TRIGGER tst_trigger BEFORE INSERT ON aux.t4 BEGIN + SELECT 'hello world'; + END; + SELECT count(*) FROM sqlite_temp_master; + } + } {1} + do_test attach3-9.1 { + execsql { + DROP TABLE main.t4; + SELECT count(*) FROM sqlite_temp_master; + } + } {1} + do_test attach3-9.2 { + execsql { + DROP TABLE aux.t4; + SELECT count(*) FROM sqlite_temp_master; + } + } {0} +} +} ;# endif trigger + +# Make sure the aux.sqlite_master table is read-only +do_test attach3-10.0 { + catchsql { + INSERT INTO aux.sqlite_master VALUES(1, 2, 3, 4, 5); + } +} {1 {table sqlite_master may not be modified}} + +# Failure to attach leaves us in a workable state. +# Ticket #811 +# +do_test attach3-11.0 { + catchsql { + ATTACH DATABASE '/nodir/nofile.x' AS notadb; + } +} {1 {unable to open database: /nodir/nofile.x}} +do_test attach3-11.1 { + catchsql { + ATTACH DATABASE ':memory:' AS notadb; + } +} {0 {}} +do_test attach3-11.2 { + catchsql { + DETACH DATABASE notadb; + } +} {0 {}} + +# Return a list of attached databases +# +proc db_list {} { + set x [execsql { + PRAGMA database_list; + }] + set y {} + foreach {n id file} $x {lappend y $id} + return $y +} + +ifcapable schema_pragmas&&tempdb { + +ifcapable !trigger { + execsql {create temp table dummy(dummy)} +} + +# Ticket #1825 +# +do_test attach3-12.1 { + db_list +} {main temp aux} +do_test attach3-12.2 { + execsql { + ATTACH DATABASE ? AS ? + } + db_list +} {main temp aux {}} +do_test attach3-12.3 { + execsql { + DETACH aux + } + db_list +} {main temp {}} +do_test attach3-12.4 { + execsql { + DETACH ? + } + db_list +} {main temp} +do_test attach3-12.5 { + execsql { + ATTACH DATABASE '' AS '' + } + db_list +} {main temp {}} +do_test attach3-12.6 { + execsql { + DETACH '' + } + db_list +} {main temp} +do_test attach3-12.7 { + execsql { + ATTACH DATABASE '' AS ? + } + db_list +} {main temp {}} +do_test attach3-12.8 { + execsql { + DETACH '' + } + db_list +} {main temp} +do_test attach3-12.9 { + execsql { + ATTACH DATABASE '' AS NULL + } + db_list +} {main temp {}} +do_test attach3-12.10 { + execsql { + DETACH ? + } + db_list +} {main temp} +do_test attach3-12.11 { + catchsql { + DETACH NULL + } +} {1 {no such database: }} +do_test attach3-12.12 { + catchsql { + ATTACH null AS null; + ATTACH '' AS ''; + } +} {1 {database is already in use}} +do_test attach3-12.13 { + db_list +} {main temp {}} +do_test attach3-12.14 { + execsql { + DETACH ''; + } + db_list +} {main temp} + +} ;# ifcapable pragma + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/attachmalloc.test b/libraries/sqlite/unix/sqlite-3.5.1/test/attachmalloc.test new file mode 100644 index 0000000..38778ca --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/attachmalloc.test @@ -0,0 +1,48 @@ +# 2005 September 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the ATTACH statement and +# specifically out-of-memory conditions within that command. +# +# $Id: attachmalloc.test,v 1.6 2007/10/03 08:46:45 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !memdebug { + finish_test + return +} + +source $testdir/malloc_common.tcl + +do_malloc_test attachmalloc-1 -tclprep { + db close + for {set i 2} {$i<=4} {incr i} { + file delete -force test$i.db + file delete -force test$i.db-journal + } +} -tclbody { + if {[catch {sqlite3 db test.db}]} { + error "out of memory" + } + sqlite3_extended_result_codes db 1 +} -sqlbody { + ATTACH 'test2.db' AS two; + CREATE TABLE two.t1(x); + ATTACH 'test3.db' AS three; + CREATE TABLE three.t1(x); + ATTACH 'test4.db' AS four; + CREATE TABLE four.t1(x); +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/auth.test b/libraries/sqlite/unix/sqlite-3.5.1/test/auth.test new file mode 100644 index 0000000..0c64404 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/auth.test @@ -0,0 +1,2306 @@ +# 2003 April 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is testing the sqlite3_set_authorizer() API +# and related functionality. +# +# $Id: auth.test,v 1.37 2006/08/24 14:59:46 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# disable this test if the SQLITE_OMIT_AUTHORIZATION macro is +# defined during compilation. +if {[catch {db auth {}} msg]} { + finish_test + return +} + +rename proc proc_real +proc_real proc {name arguments script} { + proc_real $name $arguments $script + if {$name=="auth"} { + db authorizer ::auth + } +} + +do_test auth-1.1.1 { + db close + set ::DB [sqlite3 db test.db] + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + db authorizer ::auth + catchsql {CREATE TABLE t1(a,b,c)} +} {1 {not authorized}} +do_test auth-1.1.2 { + db errorcode +} {23} +do_test auth-1.1.3 { + db authorizer +} {::auth} +do_test auth-1.1.4 { + # Ticket #896. + catchsql { + SELECT x; + } +} {1 {no such column: x}} +do_test auth-1.2 { + execsql {SELECT name FROM sqlite_master} +} {} +do_test auth-1.3.1 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE TABLE t1(a,b,c)} +} {1 {not authorized}} +do_test auth-1.3.2 { + db errorcode +} {23} +do_test auth-1.3.3 { + set ::authargs +} {t1 {} main {}} +do_test auth-1.4 { + execsql {SELECT name FROM sqlite_master} +} {} + +ifcapable tempdb { + do_test auth-1.5 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE TEMP TABLE t1(a,b,c)} + } {1 {not authorized}} + do_test auth-1.6 { + execsql {SELECT name FROM sqlite_temp_master} + } {} + do_test auth-1.7.1 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE TEMP TABLE t1(a,b,c)} + } {1 {not authorized}} + do_test auth-1.7.2 { + set ::authargs + } {t1 {} temp {}} + do_test auth-1.8 { + execsql {SELECT name FROM sqlite_temp_master} + } {} +} + +do_test auth-1.9 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE TABLE t1(a,b,c)} +} {0 {}} +do_test auth-1.10 { + execsql {SELECT name FROM sqlite_master} +} {} +do_test auth-1.11 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE TABLE t1(a,b,c)} +} {0 {}} +do_test auth-1.12 { + execsql {SELECT name FROM sqlite_master} +} {} + +ifcapable tempdb { + do_test auth-1.13 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE TEMP TABLE t1(a,b,c)} + } {0 {}} + do_test auth-1.14 { + execsql {SELECT name FROM sqlite_temp_master} + } {} + do_test auth-1.15 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE TEMP TABLE t1(a,b,c)} + } {0 {}} + do_test auth-1.16 { + execsql {SELECT name FROM sqlite_temp_master} + } {} + + do_test auth-1.17 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE TEMP TABLE t1(a,b,c)} + } {0 {}} + do_test auth-1.18 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} +} + +do_test auth-1.19.1 { + set ::authargs {} + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE TABLE t2(a,b,c)} +} {0 {}} +do_test auth-1.19.2 { + set ::authargs +} {} +do_test auth-1.20 { + execsql {SELECT name FROM sqlite_master} +} {t2} + +do_test auth-1.21.1 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TABLE t2} +} {1 {not authorized}} +do_test auth-1.21.2 { + set ::authargs +} {t2 {} main {}} +do_test auth-1.22 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.23.1 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TABLE t2} +} {0 {}} +do_test auth-1.23.2 { + set ::authargs +} {t2 {} main {}} +do_test auth-1.24 { + execsql {SELECT name FROM sqlite_master} +} {t2} + +ifcapable tempdb { + do_test auth-1.25 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TABLE t1} + } {1 {not authorized}} + do_test auth-1.26 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} + do_test auth-1.27 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TABLE t1} + } {0 {}} + do_test auth-1.28 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} +} + +do_test auth-1.29 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="t2"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {INSERT INTO t2 VALUES(1,2,3)} +} {1 {not authorized}} +do_test auth-1.30 { + execsql {SELECT * FROM t2} +} {} +do_test auth-1.31 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="t2"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {INSERT INTO t2 VALUES(1,2,3)} +} {0 {}} +do_test auth-1.32 { + execsql {SELECT * FROM t2} +} {} +do_test auth-1.33 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="t1"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {INSERT INTO t2 VALUES(1,2,3)} +} {0 {}} +do_test auth-1.34 { + execsql {SELECT * FROM t2} +} {1 2 3} + +do_test auth-1.35.1 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2} +} {1 {access to t2.b is prohibited}} +do_test auth-1.35.2 { + execsql {ATTACH DATABASE 'test.db' AS two} + catchsql {SELECT * FROM two.t2} +} {1 {access to two.t2.b is prohibited}} +execsql {DETACH DATABASE two} +do_test auth-1.36 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2} +} {0 {1 {} 3}} +do_test auth-1.37 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2 WHERE b=2} +} {0 {}} +do_test auth-1.38 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="a"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2 WHERE b=2} +} {0 {{} 2 3}} +do_test auth-1.39 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2 WHERE b IS NULL} +} {0 {1 {} 3}} +do_test auth-1.40 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {SELECT a,c FROM t2 WHERE b IS NULL} +} {1 {access to t2.b is prohibited}} + +do_test auth-1.41 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_UPDATE" && $arg1=="t2" && $arg2=="b"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {UPDATE t2 SET a=11} +} {0 {}} +do_test auth-1.42 { + execsql {SELECT * FROM t2} +} {11 2 3} +do_test auth-1.43 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_UPDATE" && $arg1=="t2" && $arg2=="b"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {UPDATE t2 SET b=22, c=33} +} {1 {not authorized}} +do_test auth-1.44 { + execsql {SELECT * FROM t2} +} {11 2 3} +do_test auth-1.45 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_UPDATE" && $arg1=="t2" && $arg2=="b"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {UPDATE t2 SET b=22, c=33} +} {0 {}} +do_test auth-1.46 { + execsql {SELECT * FROM t2} +} {11 2 33} + +do_test auth-1.47 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="t2"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DELETE FROM t2 WHERE a=11} +} {1 {not authorized}} +do_test auth-1.48 { + execsql {SELECT * FROM t2} +} {11 2 33} +do_test auth-1.49 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="t2"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DELETE FROM t2 WHERE a=11} +} {0 {}} +do_test auth-1.50 { + execsql {SELECT * FROM t2} +} {11 2 33} + +do_test auth-1.51 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_SELECT"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2} +} {1 {not authorized}} +do_test auth-1.52 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_SELECT"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2} +} {0 {}} +do_test auth-1.53 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_SELECT"} { + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2} +} {0 {11 2 33}} + +# Update for version 3: There used to be a handful of test here that +# tested the authorisation callback with the COPY command. The following +# test makes the same database modifications as they used to. +do_test auth-1.54 { + execsql {INSERT INTO t2 VALUES(7, 8, 9);} +} {} +do_test auth-1.55 { + execsql {SELECT * FROM t2} +} {11 2 33 7 8 9} + +do_test auth-1.63 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TABLE t2} +} {1 {not authorized}} +do_test auth-1.64 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.65 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="t2"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TABLE t2} +} {1 {not authorized}} +do_test auth-1.66 { + execsql {SELECT name FROM sqlite_master} +} {t2} + +ifcapable tempdb { + do_test auth-1.67 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TABLE t1} + } {1 {not authorized}} + do_test auth-1.68 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} + do_test auth-1.69 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="t1"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TABLE t1} + } {1 {not authorized}} + do_test auth-1.70 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} +} + +do_test auth-1.71 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TABLE t2} +} {0 {}} +do_test auth-1.72 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.73 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="t2"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TABLE t2} +} {0 {}} +do_test auth-1.74 { + execsql {SELECT name FROM sqlite_master} +} {t2} + +ifcapable tempdb { + do_test auth-1.75 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TABLE t1} + } {0 {}} + do_test auth-1.76 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} + do_test auth-1.77 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="t1"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TABLE t1} + } {0 {}} + do_test auth-1.78 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} +} + +# Test cases auth-1.79 to auth-1.124 test creating and dropping views. +# Omit these if the library was compiled with views omitted. +ifcapable view { +do_test auth-1.79 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE VIEW v1 AS SELECT a+1,b+1 FROM t2} +} {1 {not authorized}} +do_test auth-1.80 { + set ::authargs +} {v1 {} main {}} +do_test auth-1.81 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.82 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE VIEW v1 AS SELECT a+1,b+1 FROM t2} +} {0 {}} +do_test auth-1.83 { + set ::authargs +} {v1 {} main {}} +do_test auth-1.84 { + execsql {SELECT name FROM sqlite_master} +} {t2} + +ifcapable tempdb { + do_test auth-1.85 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE TEMPORARY VIEW v1 AS SELECT a+1,b+1 FROM t2} + } {1 {not authorized}} + do_test auth-1.86 { + set ::authargs + } {v1 {} temp {}} + do_test auth-1.87 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} + do_test auth-1.88 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE TEMPORARY VIEW v1 AS SELECT a+1,b+1 FROM t2} + } {0 {}} + do_test auth-1.89 { + set ::authargs + } {v1 {} temp {}} + do_test auth-1.90 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} +} + +do_test auth-1.91 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE VIEW v1 AS SELECT a+1,b+1 FROM t2} +} {1 {not authorized}} +do_test auth-1.92 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.93 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE VIEW v1 AS SELECT a+1,b+1 FROM t2} +} {0 {}} +do_test auth-1.94 { + execsql {SELECT name FROM sqlite_master} +} {t2} + +ifcapable tempdb { + do_test auth-1.95 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE TEMPORARY VIEW v1 AS SELECT a+1,b+1 FROM t2} + } {1 {not authorized}} + do_test auth-1.96 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} + do_test auth-1.97 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE TEMPORARY VIEW v1 AS SELECT a+1,b+1 FROM t2} + } {0 {}} + do_test auth-1.98 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} +} + +do_test auth-1.99 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + CREATE VIEW v2 AS SELECT a+1,b+1 FROM t2; + DROP VIEW v2 + } +} {1 {not authorized}} +do_test auth-1.100 { + execsql {SELECT name FROM sqlite_master} +} {t2 v2} +do_test auth-1.101 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP VIEW v2} +} {1 {not authorized}} +do_test auth-1.102 { + set ::authargs +} {v2 {} main {}} +do_test auth-1.103 { + execsql {SELECT name FROM sqlite_master} +} {t2 v2} +do_test auth-1.104 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP VIEW v2} +} {0 {}} +do_test auth-1.105 { + execsql {SELECT name FROM sqlite_master} +} {t2 v2} +do_test auth-1.106 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP VIEW v2} +} {0 {}} +do_test auth-1.107 { + set ::authargs +} {v2 {} main {}} +do_test auth-1.108 { + execsql {SELECT name FROM sqlite_master} +} {t2 v2} +do_test auth-1.109 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {DROP VIEW v2} +} {0 {}} +do_test auth-1.110 { + set ::authargs +} {v2 {} main {}} +do_test auth-1.111 { + execsql {SELECT name FROM sqlite_master} +} {t2} + + +ifcapable tempdb { + do_test auth-1.112 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + CREATE TEMP VIEW v1 AS SELECT a+1,b+1 FROM t1; + DROP VIEW v1 + } + } {1 {not authorized}} + do_test auth-1.113 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1 v1} + do_test auth-1.114 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP VIEW v1} + } {1 {not authorized}} + do_test auth-1.115 { + set ::authargs + } {v1 {} temp {}} + do_test auth-1.116 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1 v1} + do_test auth-1.117 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP VIEW v1} + } {0 {}} + do_test auth-1.118 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1 v1} + do_test auth-1.119 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP VIEW v1} + } {0 {}} + do_test auth-1.120 { + set ::authargs + } {v1 {} temp {}} + do_test auth-1.121 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1 v1} + do_test auth-1.122 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_VIEW"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {DROP VIEW v1} + } {0 {}} + do_test auth-1.123 { + set ::authargs + } {v1 {} temp {}} + do_test auth-1.124 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} +} +} ;# ifcapable view + +# Test cases auth-1.125 to auth-1.176 test creating and dropping triggers. +# Omit these if the library was compiled with triggers omitted. +# +ifcapable trigger&&tempdb { +do_test auth-1.125 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + CREATE TRIGGER r2 DELETE on t2 BEGIN + SELECT NULL; + END; + } +} {1 {not authorized}} +do_test auth-1.126 { + set ::authargs +} {r2 t2 main {}} +do_test auth-1.127 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.128 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + CREATE TRIGGER r2 DELETE on t2 BEGIN + SELECT NULL; + END; + } +} {1 {not authorized}} +do_test auth-1.129 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.130 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + CREATE TRIGGER r2 DELETE on t2 BEGIN + SELECT NULL; + END; + } +} {0 {}} +do_test auth-1.131 { + set ::authargs +} {r2 t2 main {}} +do_test auth-1.132 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.133 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + CREATE TRIGGER r2 DELETE on t2 BEGIN + SELECT NULL; + END; + } +} {0 {}} +do_test auth-1.134 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.135 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql { + CREATE TABLE tx(id); + CREATE TRIGGER r2 AFTER INSERT ON t2 BEGIN + INSERT INTO tx VALUES(NEW.rowid); + END; + } +} {0 {}} +do_test auth-1.136.1 { + set ::authargs +} {r2 t2 main {}} +do_test auth-1.136.2 { + execsql { + SELECT name FROM sqlite_master WHERE type='trigger' + } +} {r2} +do_test auth-1.136.3 { + proc auth {code arg1 arg2 arg3 arg4} { + lappend ::authargs $code $arg1 $arg2 $arg3 $arg4 + return SQLITE_OK + } + set ::authargs {} + execsql { + INSERT INTO t2 VALUES(1,2,3); + } + set ::authargs +} {SQLITE_INSERT t2 {} main {} SQLITE_INSERT tx {} main r2 SQLITE_READ t2 ROWID main r2} +do_test auth-1.136.4 { + execsql { + SELECT * FROM tx; + } +} {3} +do_test auth-1.137 { + execsql {SELECT name FROM sqlite_master} +} {t2 tx r2} +do_test auth-1.138 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + CREATE TRIGGER r1 DELETE on t1 BEGIN + SELECT NULL; + END; + } +} {1 {not authorized}} +do_test auth-1.139 { + set ::authargs +} {r1 t1 temp {}} +do_test auth-1.140 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1} +do_test auth-1.141 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + CREATE TRIGGER r1 DELETE on t1 BEGIN + SELECT NULL; + END; + } +} {1 {not authorized}} +do_test auth-1.142 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1} +do_test auth-1.143 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + CREATE TRIGGER r1 DELETE on t1 BEGIN + SELECT NULL; + END; + } +} {0 {}} +do_test auth-1.144 { + set ::authargs +} {r1 t1 temp {}} +do_test auth-1.145 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1} +do_test auth-1.146 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + CREATE TRIGGER r1 DELETE on t1 BEGIN + SELECT NULL; + END; + } +} {0 {}} +do_test auth-1.147 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1} +do_test auth-1.148 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql { + CREATE TRIGGER r1 DELETE on t1 BEGIN + SELECT NULL; + END; + } +} {0 {}} +do_test auth-1.149 { + set ::authargs +} {r1 t1 temp {}} +do_test auth-1.150 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1 r1} + +do_test auth-1.151 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r2} +} {1 {not authorized}} +do_test auth-1.152 { + execsql {SELECT name FROM sqlite_master} +} {t2 tx r2} +do_test auth-1.153 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r2} +} {1 {not authorized}} +do_test auth-1.154 { + set ::authargs +} {r2 t2 main {}} +do_test auth-1.155 { + execsql {SELECT name FROM sqlite_master} +} {t2 tx r2} +do_test auth-1.156 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r2} +} {0 {}} +do_test auth-1.157 { + execsql {SELECT name FROM sqlite_master} +} {t2 tx r2} +do_test auth-1.158 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r2} +} {0 {}} +do_test auth-1.159 { + set ::authargs +} {r2 t2 main {}} +do_test auth-1.160 { + execsql {SELECT name FROM sqlite_master} +} {t2 tx r2} +do_test auth-1.161 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r2} +} {0 {}} +do_test auth-1.162 { + set ::authargs +} {r2 t2 main {}} +do_test auth-1.163 { + execsql { + DROP TABLE tx; + DELETE FROM t2 WHERE a=1 AND b=2 AND c=3; + SELECT name FROM sqlite_master; + } +} {t2} + +do_test auth-1.164 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r1} +} {1 {not authorized}} +do_test auth-1.165 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1 r1} +do_test auth-1.166 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r1} +} {1 {not authorized}} +do_test auth-1.167 { + set ::authargs +} {r1 t1 temp {}} +do_test auth-1.168 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1 r1} +do_test auth-1.169 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r1} +} {0 {}} +do_test auth-1.170 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1 r1} +do_test auth-1.171 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r1} +} {0 {}} +do_test auth-1.172 { + set ::authargs +} {r1 t1 temp {}} +do_test auth-1.173 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1 r1} +do_test auth-1.174 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_TRIGGER"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {DROP TRIGGER r1} +} {0 {}} +do_test auth-1.175 { + set ::authargs +} {r1 t1 temp {}} +do_test auth-1.176 { + execsql {SELECT name FROM sqlite_temp_master} +} {t1} +} ;# ifcapable trigger + +do_test auth-1.177 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE INDEX i2 ON t2(a)} +} {1 {not authorized}} +do_test auth-1.178 { + set ::authargs +} {i2 t2 main {}} +do_test auth-1.179 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.180 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE INDEX i2 ON t2(a)} +} {1 {not authorized}} +do_test auth-1.181 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.182 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE INDEX i2 ON t2(b)} +} {0 {}} +do_test auth-1.183 { + set ::authargs +} {i2 t2 main {}} +do_test auth-1.184 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.185 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE INDEX i2 ON t2(b)} +} {0 {}} +do_test auth-1.186 { + execsql {SELECT name FROM sqlite_master} +} {t2} +do_test auth-1.187 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {CREATE INDEX i2 ON t2(a)} +} {0 {}} +do_test auth-1.188 { + set ::authargs +} {i2 t2 main {}} +do_test auth-1.189 { + execsql {SELECT name FROM sqlite_master} +} {t2 i2} + +ifcapable tempdb { + do_test auth-1.190 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE INDEX i1 ON t1(a)} + } {1 {not authorized}} + do_test auth-1.191 { + set ::authargs + } {i1 t1 temp {}} + do_test auth-1.192 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} + do_test auth-1.193 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {CREATE INDEX i1 ON t1(b)} + } {1 {not authorized}} + do_test auth-1.194 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} + do_test auth-1.195 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE INDEX i1 ON t1(b)} + } {0 {}} + do_test auth-1.196 { + set ::authargs + } {i1 t1 temp {}} + do_test auth-1.197 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} + do_test auth-1.198 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_INSERT" && $arg1=="sqlite_temp_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {CREATE INDEX i1 ON t1(c)} + } {0 {}} + do_test auth-1.199 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} + do_test auth-1.200 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_CREATE_TEMP_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {CREATE INDEX i1 ON t1(a)} + } {0 {}} + do_test auth-1.201 { + set ::authargs + } {i1 t1 temp {}} + do_test auth-1.202 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1 i1} +} + +do_test auth-1.203 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP INDEX i2} +} {1 {not authorized}} +do_test auth-1.204 { + execsql {SELECT name FROM sqlite_master} +} {t2 i2} +do_test auth-1.205 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP INDEX i2} +} {1 {not authorized}} +do_test auth-1.206 { + set ::authargs +} {i2 t2 main {}} +do_test auth-1.207 { + execsql {SELECT name FROM sqlite_master} +} {t2 i2} +do_test auth-1.208 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP INDEX i2} +} {0 {}} +do_test auth-1.209 { + execsql {SELECT name FROM sqlite_master} +} {t2 i2} +do_test auth-1.210 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP INDEX i2} +} {0 {}} +do_test auth-1.211 { + set ::authargs +} {i2 t2 main {}} +do_test auth-1.212 { + execsql {SELECT name FROM sqlite_master} +} {t2 i2} +do_test auth-1.213 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {DROP INDEX i2} +} {0 {}} +do_test auth-1.214 { + set ::authargs +} {i2 t2 main {}} +do_test auth-1.215 { + execsql {SELECT name FROM sqlite_master} +} {t2} + +ifcapable tempdb { + do_test auth-1.216 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} { + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP INDEX i1} + } {1 {not authorized}} + do_test auth-1.217 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1 i1} + do_test auth-1.218 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {DROP INDEX i1} + } {1 {not authorized}} + do_test auth-1.219 { + set ::authargs + } {i1 t1 temp {}} + do_test auth-1.220 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1 i1} + do_test auth-1.221 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE" && $arg1=="sqlite_temp_master"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP INDEX i1} + } {0 {}} + do_test auth-1.222 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1 i1} + do_test auth-1.223 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {DROP INDEX i1} + } {0 {}} + do_test auth-1.224 { + set ::authargs + } {i1 t1 temp {}} + do_test auth-1.225 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1 i1} + do_test auth-1.226 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DROP_TEMP_INDEX"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {DROP INDEX i1} + } {0 {}} + do_test auth-1.227 { + set ::authargs + } {i1 t1 temp {}} + do_test auth-1.228 { + execsql {SELECT name FROM sqlite_temp_master} + } {t1} +} + +do_test auth-1.229 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_PRAGMA"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {PRAGMA full_column_names=on} +} {1 {not authorized}} +do_test auth-1.230 { + set ::authargs +} {full_column_names on {} {}} +do_test auth-1.231 { + execsql2 {SELECT a FROM t2} +} {a 11 a 7} +do_test auth-1.232 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_PRAGMA"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {PRAGMA full_column_names=on} +} {0 {}} +do_test auth-1.233 { + set ::authargs +} {full_column_names on {} {}} +do_test auth-1.234 { + execsql2 {SELECT a FROM t2} +} {a 11 a 7} +do_test auth-1.235 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_PRAGMA"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {PRAGMA full_column_names=on} +} {0 {}} +do_test auth-1.236 { + execsql2 {SELECT a FROM t2} +} {t2.a 11 t2.a 7} +do_test auth-1.237 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_PRAGMA"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql {PRAGMA full_column_names=OFF} +} {0 {}} +do_test auth-1.238 { + set ::authargs +} {full_column_names OFF {} {}} +do_test auth-1.239 { + execsql2 {SELECT a FROM t2} +} {a 11 a 7} + +do_test auth-1.240 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_TRANSACTION"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {BEGIN} +} {1 {not authorized}} +do_test auth-1.241 { + set ::authargs +} {BEGIN {} {} {}} +do_test auth-1.242 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_TRANSACTION" && $arg1!="BEGIN"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql {BEGIN; INSERT INTO t2 VALUES(44,55,66); COMMIT} +} {1 {not authorized}} +do_test auth-1.243 { + set ::authargs +} {COMMIT {} {} {}} +do_test auth-1.244 { + execsql {SELECT * FROM t2} +} {11 2 33 7 8 9 44 55 66} +do_test auth-1.245 { + catchsql {ROLLBACK} +} {1 {not authorized}} +do_test auth-1.246 { + set ::authargs +} {ROLLBACK {} {} {}} +do_test auth-1.247 { + catchsql {END TRANSACTION} +} {1 {not authorized}} +do_test auth-1.248 { + set ::authargs +} {COMMIT {} {} {}} +do_test auth-1.249 { + db authorizer {} + catchsql {ROLLBACK} +} {0 {}} +do_test auth-1.250 { + execsql {SELECT * FROM t2} +} {11 2 33 7 8 9} + +# ticket #340 - authorization for ATTACH and DETACH. +# +do_test auth-1.251 { + db authorizer ::auth + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ATTACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + } + return SQLITE_OK + } + catchsql { + ATTACH DATABASE ':memory:' AS test1 + } +} {0 {}} +do_test auth-1.252 { + set ::authargs +} {:memory: {} {} {}} +do_test auth-1.253 { + catchsql {DETACH DATABASE test1} + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ATTACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + ATTACH DATABASE ':memory:' AS test1; + } +} {1 {not authorized}} +do_test auth-1.254 { + lindex [execsql {PRAGMA database_list}] 7 +} {} +do_test auth-1.255 { + catchsql {DETACH DATABASE test1} + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ATTACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + ATTACH DATABASE ':memory:' AS test1; + } +} {0 {}} +do_test auth-1.256 { + lindex [execsql {PRAGMA database_list}] 7 +} {} +do_test auth-1.257 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DETACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + execsql {ATTACH DATABASE ':memory:' AS test1} + catchsql { + DETACH DATABASE test1; + } +} {0 {}} +do_test auth-1.258 { + lindex [execsql {PRAGMA database_list}] 7 +} {} +do_test auth-1.259 { + execsql {ATTACH DATABASE ':memory:' AS test1} + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DETACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + DETACH DATABASE test1; + } +} {0 {}} +ifcapable tempdb { + ifcapable schema_pragmas { + do_test auth-1.260 { + lindex [execsql {PRAGMA database_list}] 7 + } {test1} + } ;# ifcapable schema_pragmas + do_test auth-1.261 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DETACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + DETACH DATABASE test1; + } + } {1 {not authorized}} + ifcapable schema_pragmas { + do_test auth-1.262 { + lindex [execsql {PRAGMA database_list}] 7 + } {test1} + } ;# ifcapable schema_pragmas + db authorizer {} + execsql {DETACH DATABASE test1} + db authorizer ::auth + + # Authorization for ALTER TABLE. These tests are omitted if the library + # was built without ALTER TABLE support. + ifcapable altertable { + + do_test auth-1.263 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t1 RENAME TO t1x + } + } {0 {}} + do_test auth-1.264 { + execsql {SELECT name FROM sqlite_temp_master WHERE type='table'} + } {t1x} + do_test auth-1.265 { + set authargs + } {temp t1 {} {}} + do_test auth-1.266 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t1x RENAME TO t1 + } + } {0 {}} + do_test auth-1.267 { + execsql {SELECT name FROM sqlite_temp_master WHERE type='table'} + } {t1x} + do_test auth-1.268 { + set authargs + } {temp t1x {} {}} + do_test auth-1.269 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t1x RENAME TO t1 + } + } {1 {not authorized}} + do_test auth-1.270 { + execsql {SELECT name FROM sqlite_temp_master WHERE type='table'} + } {t1x} + + do_test auth-1.271 { + set authargs + } {temp t1x {} {}} + } ;# ifcapable altertable + +} else { + db authorizer {} + db eval { + DETACH DATABASE test1; + } +} + +ifcapable altertable { +db authorizer {} +catchsql {ALTER TABLE t1x RENAME TO t1} +db authorizer ::auth +do_test auth-1.272 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t2 RENAME TO t2x + } +} {0 {}} +do_test auth-1.273 { + execsql {SELECT name FROM sqlite_master WHERE type='table'} +} {t2x} +do_test auth-1.274 { + set authargs +} {main t2 {} {}} +do_test auth-1.275 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t2x RENAME TO t2 + } +} {0 {}} +do_test auth-1.276 { + execsql {SELECT name FROM sqlite_master WHERE type='table'} +} {t2x} +do_test auth-1.277 { + set authargs +} {main t2x {} {}} +do_test auth-1.278 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t2x RENAME TO t2 + } +} {1 {not authorized}} +do_test auth-1.279 { + execsql {SELECT name FROM sqlite_master WHERE type='table'} +} {t2x} +do_test auth-1.280 { + set authargs +} {main t2x {} {}} +db authorizer {} +catchsql {ALTER TABLE t2x RENAME TO t2} + +} ;# ifcapable altertable + +# Test the authorization callbacks for the REINDEX command. +ifcapable reindex { + +proc auth {code args} { + if {$code=="SQLITE_REINDEX"} { + set ::authargs [concat $::authargs $args] + } + return SQLITE_OK +} +db authorizer auth +do_test auth-1.281 { + execsql { + CREATE TABLE t3(a PRIMARY KEY, b, c); + CREATE INDEX t3_idx1 ON t3(c COLLATE BINARY); + CREATE INDEX t3_idx2 ON t3(b COLLATE NOCASE); + } +} {} +do_test auth-1.282 { + set ::authargs {} + execsql { + REINDEX t3_idx1; + } + set ::authargs +} {t3_idx1 {} main {}} +do_test auth-1.283 { + set ::authargs {} + execsql { + REINDEX BINARY; + } + set ::authargs +} {t3_idx1 {} main {} sqlite_autoindex_t3_1 {} main {}} +do_test auth-1.284 { + set ::authargs {} + execsql { + REINDEX NOCASE; + } + set ::authargs +} {t3_idx2 {} main {}} +do_test auth-1.285 { + set ::authargs {} + execsql { + REINDEX t3; + } + set ::authargs +} {t3_idx2 {} main {} t3_idx1 {} main {} sqlite_autoindex_t3_1 {} main {}} +do_test auth-1.286 { + execsql { + DROP TABLE t3; + } +} {} +ifcapable tempdb { + do_test auth-1.287 { + execsql { + CREATE TEMP TABLE t3(a PRIMARY KEY, b, c); + CREATE INDEX t3_idx1 ON t3(c COLLATE BINARY); + CREATE INDEX t3_idx2 ON t3(b COLLATE NOCASE); + } + } {} + do_test auth-1.288 { + set ::authargs {} + execsql { + REINDEX temp.t3_idx1; + } + set ::authargs + } {t3_idx1 {} temp {}} + do_test auth-1.289 { + set ::authargs {} + execsql { + REINDEX BINARY; + } + set ::authargs + } {t3_idx1 {} temp {} sqlite_autoindex_t3_1 {} temp {}} + do_test auth-1.290 { + set ::authargs {} + execsql { + REINDEX NOCASE; + } + set ::authargs + } {t3_idx2 {} temp {}} + do_test auth-1.291 { + set ::authargs {} + execsql { + REINDEX temp.t3; + } + set ::authargs + } {t3_idx2 {} temp {} t3_idx1 {} temp {} sqlite_autoindex_t3_1 {} temp {}} + proc auth {code args} { + if {$code=="SQLITE_REINDEX"} { + set ::authargs [concat $::authargs $args] + return SQLITE_DENY + } + return SQLITE_OK + } + do_test auth-1.292 { + set ::authargs {} + catchsql { + REINDEX temp.t3; + } + } {1 {not authorized}} + do_test auth-1.293 { + execsql { + DROP TABLE t3; + } + } {} +} + +} ;# ifcapable reindex + +ifcapable analyze { + proc auth {code args} { + if {$code=="SQLITE_ANALYZE"} { + set ::authargs [concat $::authargs $args] + } + return SQLITE_OK + } + do_test auth-1.294 { + set ::authargs {} + execsql { + CREATE TABLE t4(a,b,c); + CREATE INDEX t4i1 ON t4(a); + CREATE INDEX t4i2 ON t4(b,a,c); + INSERT INTO t4 VALUES(1,2,3); + ANALYZE; + } + set ::authargs + } {t4 {} main {}} + do_test auth-1.295 { + execsql { + SELECT count(*) FROM sqlite_stat1; + } + } 2 + proc auth {code args} { + if {$code=="SQLITE_ANALYZE"} { + set ::authargs [concat $::authargs $args] + return SQLITE_DENY + } + return SQLITE_OK + } + do_test auth-1.296 { + set ::authargs {} + catchsql { + ANALYZE; + } + } {1 {not authorized}} + do_test auth-1.297 { + execsql { + SELECT count(*) FROM sqlite_stat1; + } + } 2 +} ;# ifcapable analyze + + +# Authorization for ALTER TABLE ADD COLUMN. +# These tests are omitted if the library +# was built without ALTER TABLE support. +ifcapable {altertable} { + do_test auth-1.300 { + execsql {CREATE TABLE t5(x)} + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t5 ADD COLUMN new_col_1; + } + } {0 {}} + do_test auth-1.301 { + set x [execsql {SELECT sql FROM sqlite_master WHERE name='t5'}] + regexp new_col_1 $x + } {1} + do_test auth-1.302 { + set authargs + } {main t5 {} {}} + do_test auth-1.303 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t5 ADD COLUMN new_col_2; + } + } {0 {}} + do_test auth-1.304 { + set x [execsql {SELECT sql FROM sqlite_master WHERE name='t5'}] + regexp new_col_2 $x + } {0} + do_test auth-1.305 { + set authargs + } {main t5 {} {}} + do_test auth-1.306 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t5 ADD COLUMN new_col_3 + } + } {1 {not authorized}} + do_test auth-1.307 { + set x [execsql {SELECT sql FROM sqlite_temp_master WHERE type='t5'}] + regexp new_col_3 $x + } {0} + + do_test auth-1.308 { + set authargs + } {main t5 {} {}} + execsql {DROP TABLE t5} +} ;# ifcapable altertable + +do_test auth-2.1 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t3" && $arg2=="x"} { + return SQLITE_DENY + } + return SQLITE_OK + } + db authorizer ::auth + execsql {CREATE TABLE t3(x INTEGER PRIMARY KEY, y, z)} + catchsql {SELECT * FROM t3} +} {1 {access to t3.x is prohibited}} +do_test auth-2.1 { + catchsql {SELECT y,z FROM t3} +} {0 {}} +do_test auth-2.2 { + catchsql {SELECT ROWID,y,z FROM t3} +} {1 {access to t3.x is prohibited}} +do_test auth-2.3 { + catchsql {SELECT OID,y,z FROM t3} +} {1 {access to t3.x is prohibited}} +do_test auth-2.4 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t3" && $arg2=="x"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + execsql {INSERT INTO t3 VALUES(44,55,66)} + catchsql {SELECT * FROM t3} +} {0 {{} 55 66}} +do_test auth-2.5 { + catchsql {SELECT rowid,y,z FROM t3} +} {0 {{} 55 66}} +do_test auth-2.6 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t3" && $arg2=="ROWID"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {SELECT * FROM t3} +} {0 {44 55 66}} +do_test auth-2.7 { + catchsql {SELECT ROWID,y,z FROM t3} +} {0 {44 55 66}} +do_test auth-2.8 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="ROWID"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {SELECT ROWID,b,c FROM t2} +} {0 {{} 2 33 {} 8 9}} +do_test auth-2.9.1 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="ROWID"} { + return bogus + } + return SQLITE_OK + } + catchsql {SELECT ROWID,b,c FROM t2} +} {1 {illegal return value (999) from the authorization function - should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY}} +do_test auth-2.9.2 { + db errorcode +} {1} +do_test auth-2.10 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_SELECT"} { + return bogus + } + return SQLITE_OK + } + catchsql {SELECT ROWID,b,c FROM t2} +} {1 {illegal return value (1) from the authorization function - should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY}} +do_test auth-2.11.1 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg2=="a"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2, t3} +} {0 {{} 2 33 44 55 66 {} 8 9 44 55 66}} +do_test auth-2.11.2 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg2=="x"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql {SELECT * FROM t2, t3} +} {0 {11 2 33 {} 55 66 7 8 9 {} 55 66}} + +# Make sure the OLD and NEW pseudo-tables of a trigger get authorized. +# +ifcapable trigger { + do_test auth-3.1 { + proc auth {code arg1 arg2 arg3 arg4} { + return SQLITE_OK + } + execsql { + CREATE TABLE tx(a1,a2,b1,b2,c1,c2); + CREATE TRIGGER r1 AFTER UPDATE ON t2 FOR EACH ROW BEGIN + INSERT INTO tx VALUES(OLD.a,NEW.a,OLD.b,NEW.b,OLD.c,NEW.c); + END; + UPDATE t2 SET a=a+1; + SELECT * FROM tx; + } + } {11 12 2 2 33 33 7 8 8 8 9 9} + do_test auth-3.2 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="c"} { + return SQLITE_IGNORE + } + return SQLITE_OK + } + execsql { + DELETE FROM tx; + UPDATE t2 SET a=a+100; + SELECT * FROM tx; + } + } {12 112 2 2 {} {} 8 108 8 8 {} {}} +} ;# ifcapable trigger + +# Make sure the names of views and triggers are passed on on arg4. +# +ifcapable trigger { +do_test auth-4.1 { + proc auth {code arg1 arg2 arg3 arg4} { + lappend ::authargs $code $arg1 $arg2 $arg3 $arg4 + return SQLITE_OK + } + set authargs {} + execsql { + UPDATE t2 SET a=a+1; + } + set authargs +} [list \ + SQLITE_READ t2 a main {} \ + SQLITE_UPDATE t2 a main {} \ + SQLITE_INSERT tx {} main r1 \ + SQLITE_READ t2 a main r1 \ + SQLITE_READ t2 a main r1 \ + SQLITE_READ t2 b main r1 \ + SQLITE_READ t2 b main r1 \ + SQLITE_READ t2 c main r1 \ + SQLITE_READ t2 c main r1] +} + +ifcapable {view && trigger} { +do_test auth-4.2 { + execsql { + CREATE VIEW v1 AS SELECT a+b AS x FROM t2; + CREATE TABLE v1chng(x1,x2); + CREATE TRIGGER r2 INSTEAD OF UPDATE ON v1 BEGIN + INSERT INTO v1chng VALUES(OLD.x,NEW.x); + END; + SELECT * FROM v1; + } +} {115 117} +do_test auth-4.3 { + set authargs {} + execsql { + UPDATE v1 SET x=1 WHERE x=117 + } + set authargs +} [list \ + SQLITE_UPDATE v1 x main {} \ + SQLITE_READ v1 x main {} \ + SQLITE_SELECT {} {} {} v1 \ + SQLITE_READ t2 a main v1 \ + SQLITE_READ t2 b main v1 \ + SQLITE_INSERT v1chng {} main r2 \ + SQLITE_READ v1 x main r2 \ + SQLITE_READ v1 x main r2] +do_test auth-4.4 { + execsql { + CREATE TRIGGER r3 INSTEAD OF DELETE ON v1 BEGIN + INSERT INTO v1chng VALUES(OLD.x,NULL); + END; + SELECT * FROM v1; + } +} {115 117} +do_test auth-4.5 { + set authargs {} + execsql { + DELETE FROM v1 WHERE x=117 + } + set authargs +} [list \ + SQLITE_DELETE v1 {} main {} \ + SQLITE_READ v1 x main {} \ + SQLITE_SELECT {} {} {} v1 \ + SQLITE_READ t2 a main v1 \ + SQLITE_READ t2 b main v1 \ + SQLITE_INSERT v1chng {} main r3 \ + SQLITE_READ v1 x main r3] + +} ;# ifcapable view && trigger + +# Ticket #1338: Make sure authentication works in the presence of an AS +# clause. +# +do_test auth-5.1 { + proc auth {code arg1 arg2 arg3 arg4} { + return SQLITE_OK + } + execsql { + SELECT count(a) AS cnt FROM t4 ORDER BY cnt + } +} {1} + +# Ticket #1607 +# +ifcapable compound&&subquery { + ifcapable trigger { + execsql { + DROP TABLE tx; + } + ifcapable view { + execsql { + DROP TABLE v1chng; + } + } + } + do_test auth-5.2 { + execsql { + SELECT name FROM ( + SELECT * FROM sqlite_master UNION ALL SELECT * FROM sqlite_temp_master) + WHERE type='table' + ORDER BY name + } + } {sqlite_stat1 t1 t2 t3 t4} +} + + +rename proc {} +rename proc_real proc + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/auth2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/auth2.test new file mode 100644 index 0000000..6e9a463 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/auth2.test @@ -0,0 +1,75 @@ +# 2006 Aug 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is testing the sqlite3_set_authorizer() API +# and related functionality. +# +# $Id: auth2.test,v 1.1 2006/08/24 14:59:46 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# disable this test if the SQLITE_OMIT_AUTHORIZATION macro is +# defined during compilation. +if {[catch {db auth {}} msg]} { + finish_test + return +} + +do_test auth2-1.1 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1,2,3); + } + set ::flist {} + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_FUNCTION"} { + lappend ::flist $arg2 + if {$arg2=="max"} { + return SQLITE_DENY + } elseif {$arg2=="min"} { + return SQLITE_IGNORE + } else { + return SQLITE_OK + } + } + return SQLITE_OK + } + db authorizer ::auth + catchsql {SELECT max(a,b,c) FROM t1} +} {1 {not authorized to use function: max}} +do_test auth2-1.2 { + set ::flist +} max +do_test auth2-1.3 { + set ::flist {} + catchsql {SELECT min(a,b,c) FROM t1} +} {0 {{}}} +do_test auth2-1.4 { + set ::flist +} min +do_test auth2-1.5 { + set ::flist {} + catchsql {SELECT coalesce(min(a,b,c),999) FROM t1} +} {0 999} +do_test auth2-1.6 { + set ::flist +} {coalesce min} +do_test auth2-1.7 { + set ::flist {} + catchsql {SELECT coalesce(a,b,c) FROM t1} +} {0 1} +do_test auth2-1.8 { + set ::flist +} coalesce + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/autoinc.test b/libraries/sqlite/unix/sqlite-3.5.1/test/autoinc.test new file mode 100644 index 0000000..134b4d8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/autoinc.test @@ -0,0 +1,536 @@ +# 2004 November 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the AUTOINCREMENT features. +# +# $Id: autoinc.test,v 1.9 2006/01/03 00:33:50 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If the library is not compiled with autoincrement support then +# skip all tests in this file. +# +ifcapable {!autoinc} { + finish_test + return +} + +# The database is initially empty. +# +do_test autoinc-1.1 { + execsql { + SELECT name FROM sqlite_master WHERE type='table'; + } +} {} + +# Add a table with the AUTOINCREMENT feature. Verify that the +# SQLITE_SEQUENCE table gets created. +# +do_test autoinc-1.2 { + execsql { + CREATE TABLE t1(x INTEGER PRIMARY KEY AUTOINCREMENT, y); + SELECT name FROM sqlite_master WHERE type='table'; + } +} {t1 sqlite_sequence} + +# The SQLITE_SEQUENCE table is initially empty +# +do_test autoinc-1.3 { + execsql { + SELECT * FROM sqlite_sequence; + } +} {} + +# Close and reopen the database. Verify that everything is still there. +# +do_test autoinc-1.4 { + db close + sqlite3 db test.db + execsql { + SELECT * FROM sqlite_sequence; + } +} {} + +# We are not allowed to drop the sqlite_sequence table. +# +do_test autoinc-1.5 { + catchsql {DROP TABLE sqlite_sequence} +} {1 {table sqlite_sequence may not be dropped}} +do_test autoinc-1.6 { + execsql {SELECT name FROM sqlite_master WHERE type='table'} +} {t1 sqlite_sequence} + +# Insert an entries into the t1 table and make sure the largest key +# is always recorded in the sqlite_sequence table. +# +do_test autoinc-2.1 { + execsql { + SELECT * FROM sqlite_sequence + } +} {} +do_test autoinc-2.2 { + execsql { + INSERT INTO t1 VALUES(12,34); + SELECT * FROM sqlite_sequence; + } +} {t1 12} +do_test autoinc-2.3 { + execsql { + INSERT INTO t1 VALUES(1,23); + SELECT * FROM sqlite_sequence; + } +} {t1 12} +do_test autoinc-2.4 { + execsql { + INSERT INTO t1 VALUES(123,456); + SELECT * FROM sqlite_sequence; + } +} {t1 123} +do_test autoinc-2.5 { + execsql { + INSERT INTO t1 VALUES(NULL,567); + SELECT * FROM sqlite_sequence; + } +} {t1 124} +do_test autoinc-2.6 { + execsql { + DELETE FROM t1 WHERE y=567; + SELECT * FROM sqlite_sequence; + } +} {t1 124} +do_test autoinc-2.7 { + execsql { + INSERT INTO t1 VALUES(NULL,567); + SELECT * FROM sqlite_sequence; + } +} {t1 125} +do_test autoinc-2.8 { + execsql { + DELETE FROM t1; + SELECT * FROM sqlite_sequence; + } +} {t1 125} +do_test autoinc-2.9 { + execsql { + INSERT INTO t1 VALUES(12,34); + SELECT * FROM sqlite_sequence; + } +} {t1 125} +do_test autoinc-2.10 { + execsql { + INSERT INTO t1 VALUES(125,456); + SELECT * FROM sqlite_sequence; + } +} {t1 125} +do_test autoinc-2.11 { + execsql { + INSERT INTO t1 VALUES(-1234567,-1); + SELECT * FROM sqlite_sequence; + } +} {t1 125} +do_test autoinc-2.12 { + execsql { + INSERT INTO t1 VALUES(234,5678); + SELECT * FROM sqlite_sequence; + } +} {t1 234} +do_test autoinc-2.13 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(NULL,1); + SELECT * FROM sqlite_sequence; + } +} {t1 235} +do_test autoinc-2.14 { + execsql { + SELECT * FROM t1; + } +} {235 1} + +# Manually change the autoincrement values in sqlite_sequence. +# +do_test autoinc-2.20 { + execsql { + UPDATE sqlite_sequence SET seq=1234 WHERE name='t1'; + INSERT INTO t1 VALUES(NULL,2); + SELECT * FROM t1; + } +} {235 1 1235 2} +do_test autoinc-2.21 { + execsql { + SELECT * FROM sqlite_sequence; + } +} {t1 1235} +do_test autoinc-2.22 { + execsql { + UPDATE sqlite_sequence SET seq=NULL WHERE name='t1'; + INSERT INTO t1 VALUES(NULL,3); + SELECT * FROM t1; + } +} {235 1 1235 2 1236 3} +do_test autoinc-2.23 { + execsql { + SELECT * FROM sqlite_sequence; + } +} {t1 1236} +do_test autoinc-2.24 { + execsql { + UPDATE sqlite_sequence SET seq='a-string' WHERE name='t1'; + INSERT INTO t1 VALUES(NULL,4); + SELECT * FROM t1; + } +} {235 1 1235 2 1236 3 1237 4} +do_test autoinc-2.25 { + execsql { + SELECT * FROM sqlite_sequence; + } +} {t1 1237} +do_test autoinc-2.26 { + execsql { + DELETE FROM sqlite_sequence WHERE name='t1'; + INSERT INTO t1 VALUES(NULL,5); + SELECT * FROM t1; + } +} {235 1 1235 2 1236 3 1237 4 1238 5} +do_test autoinc-2.27 { + execsql { + SELECT * FROM sqlite_sequence; + } +} {t1 1238} +do_test autoinc-2.28 { + execsql { + UPDATE sqlite_sequence SET seq='12345678901234567890' + WHERE name='t1'; + INSERT INTO t1 VALUES(NULL,6); + SELECT * FROM t1; + } +} {235 1 1235 2 1236 3 1237 4 1238 5 1239 6} +do_test autoinc-2.29 { + execsql { + SELECT * FROM sqlite_sequence; + } +} {t1 1239} + +# Test multi-row inserts +# +do_test autoinc-2.50 { + execsql { + DELETE FROM t1 WHERE y>=3; + INSERT INTO t1 SELECT NULL, y+2 FROM t1; + SELECT * FROM t1; + } +} {235 1 1235 2 1240 3 1241 4} +do_test autoinc-2.51 { + execsql { + SELECT * FROM sqlite_sequence + } +} {t1 1241} + +ifcapable tempdb { + do_test autoinc-2.52 { + execsql { + CREATE TEMP TABLE t2 AS SELECT y FROM t1; + INSERT INTO t1 SELECT NULL, y+4 FROM t2; + SELECT * FROM t1; + } + } {235 1 1235 2 1240 3 1241 4 1242 5 1243 6 1244 7 1245 8} + do_test autoinc-2.53 { + execsql { + SELECT * FROM sqlite_sequence + } + } {t1 1245} + do_test autoinc-2.54 { + execsql { + DELETE FROM t1; + INSERT INTO t1 SELECT NULL, y FROM t2; + SELECT * FROM t1; + } + } {1246 1 1247 2 1248 3 1249 4} + do_test autoinc-2.55 { + execsql { + SELECT * FROM sqlite_sequence + } + } {t1 1249} +} + +# Create multiple AUTOINCREMENT tables. Make sure all sequences are +# tracked separately and do not interfere with one another. +# +do_test autoinc-2.70 { + catchsql { + DROP TABLE t2; + } + execsql { + CREATE TABLE t2(d, e INTEGER PRIMARY KEY AUTOINCREMENT, f); + INSERT INTO t2(d) VALUES(1); + SELECT * FROM sqlite_sequence; + } +} [ifcapable tempdb {list t1 1249 t2 1} else {list t1 1241 t2 1}] +do_test autoinc-2.71 { + execsql { + INSERT INTO t2(d) VALUES(2); + SELECT * FROM sqlite_sequence; + } +} [ifcapable tempdb {list t1 1249 t2 2} else {list t1 1241 t2 2}] +do_test autoinc-2.72 { + execsql { + INSERT INTO t1(x) VALUES(10000); + SELECT * FROM sqlite_sequence; + } +} {t1 10000 t2 2} +do_test autoinc-2.73 { + execsql { + CREATE TABLE t3(g INTEGER PRIMARY KEY AUTOINCREMENT, h); + INSERT INTO t3(h) VALUES(1); + SELECT * FROM sqlite_sequence; + } +} {t1 10000 t2 2 t3 1} +do_test autoinc-2.74 { + execsql { + INSERT INTO t2(d,e) VALUES(3,100); + SELECT * FROM sqlite_sequence; + } +} {t1 10000 t2 100 t3 1} + + +# When a table with an AUTOINCREMENT is deleted, the corresponding entry +# in the SQLITE_SEQUENCE table should also be deleted. But the SQLITE_SEQUENCE +# table itself should remain behind. +# +do_test autoinc-3.1 { + execsql {SELECT name FROM sqlite_sequence} +} {t1 t2 t3} +do_test autoinc-3.2 { + execsql { + DROP TABLE t1; + SELECT name FROM sqlite_sequence; + } +} {t2 t3} +do_test autoinc-3.3 { + execsql { + DROP TABLE t3; + SELECT name FROM sqlite_sequence; + } +} {t2} +do_test autoinc-3.4 { + execsql { + DROP TABLE t2; + SELECT name FROM sqlite_sequence; + } +} {} + +# AUTOINCREMENT on TEMP tables. +# +ifcapable tempdb { + do_test autoinc-4.1 { + execsql { + SELECT 1, name FROM sqlite_master WHERE type='table'; + SELECT 2, name FROM sqlite_temp_master WHERE type='table'; + } + } {1 sqlite_sequence} + do_test autoinc-4.2 { + execsql { + CREATE TABLE t1(x INTEGER PRIMARY KEY AUTOINCREMENT, y); + CREATE TEMP TABLE t3(a INTEGER PRIMARY KEY AUTOINCREMENT, b); + SELECT 1, name FROM sqlite_master WHERE type='table'; + SELECT 2, name FROM sqlite_temp_master WHERE type='table'; + } + } {1 sqlite_sequence 1 t1 2 t3 2 sqlite_sequence} + do_test autoinc-4.3 { + execsql { + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + } + } {} + do_test autoinc-4.4 { + execsql { + INSERT INTO t1 VALUES(10,1); + INSERT INTO t3 VALUES(20,2); + INSERT INTO t1 VALUES(NULL,3); + INSERT INTO t3 VALUES(NULL,4); + } + } {} + + ifcapable compound { + do_test autoinc-4.4.1 { + execsql { + SELECT * FROM t1 UNION ALL SELECT * FROM t3; + } + } {10 1 11 3 20 2 21 4} + } ;# ifcapable compound + + do_test autoinc-4.5 { + execsql { + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + } + } {1 t1 11 2 t3 21} + do_test autoinc-4.6 { + execsql { + INSERT INTO t1 SELECT * FROM t3; + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + } + } {1 t1 21 2 t3 21} + do_test autoinc-4.7 { + execsql { + INSERT INTO t3 SELECT x+100, y FROM t1; + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + } + } {1 t1 21 2 t3 121} + do_test autoinc-4.8 { + execsql { + DROP TABLE t3; + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + } + } {1 t1 21} + do_test autoinc-4.9 { + execsql { + CREATE TEMP TABLE t2(p INTEGER PRIMARY KEY AUTOINCREMENT, q); + INSERT INTO t2 SELECT * FROM t1; + DROP TABLE t1; + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + } + } {2 t2 21} + do_test autoinc-4.10 { + execsql { + DROP TABLE t2; + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + } + } {} +} + +# Make sure AUTOINCREMENT works on ATTACH-ed tables. +# +ifcapable tempdb { + do_test autoinc-5.1 { + file delete -force test2.db + file delete -force test2.db-journal + sqlite3 db2 test2.db + execsql { + CREATE TABLE t4(m INTEGER PRIMARY KEY AUTOINCREMENT, n); + CREATE TABLE t5(o, p INTEGER PRIMARY KEY AUTOINCREMENT); + } db2; + execsql { + ATTACH 'test2.db' as aux; + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + SELECT 3, * FROM aux.sqlite_sequence; + } + } {} + do_test autoinc-5.2 { + execsql { + INSERT INTO t4 VALUES(NULL,1); + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + SELECT 3, * FROM aux.sqlite_sequence; + } + } {3 t4 1} + do_test autoinc-5.3 { + execsql { + INSERT INTO t5 VALUES(100,200); + SELECT * FROM sqlite_sequence + } db2 + } {t4 1 t5 200} + do_test autoinc-5.4 { + execsql { + SELECT 1, * FROM main.sqlite_sequence; + SELECT 2, * FROM temp.sqlite_sequence; + SELECT 3, * FROM aux.sqlite_sequence; + } + } {3 t4 1 3 t5 200} +} + +# Requirement REQ00310: Make sure an insert fails if the sequence is +# already at its maximum value. +# +ifcapable {rowid32} { + do_test autoinc-6.1 { + execsql { + CREATE TABLE t6(v INTEGER PRIMARY KEY AUTOINCREMENT, w); + INSERT INTO t6 VALUES(2147483647,1); + SELECT seq FROM main.sqlite_sequence WHERE name='t6'; + } + } 2147483647 +} +ifcapable {!rowid32} { + do_test autoinc-6.1 { + execsql { + CREATE TABLE t6(v INTEGER PRIMARY KEY AUTOINCREMENT, w); + INSERT INTO t6 VALUES(9223372036854775807,1); + SELECT seq FROM main.sqlite_sequence WHERE name='t6'; + } + } 9223372036854775807 +} +do_test autoinc-6.2 { + catchsql { + INSERT INTO t6 VALUES(NULL,1); + } +} {1 {database or disk is full}} + +# Allow the AUTOINCREMENT keyword inside the parentheses +# on a separate PRIMARY KEY designation. +# +do_test autoinc-7.1 { + execsql { + CREATE TABLE t7(x INTEGER, y REAL, PRIMARY KEY(x AUTOINCREMENT)); + INSERT INTO t7(y) VALUES(123); + INSERT INTO t7(y) VALUES(234); + DELETE FROM t7; + INSERT INTO t7(y) VALUES(345); + SELECT * FROM t7; + } +} {3 345.0} + +# Test that if the AUTOINCREMENT is applied to a non integer primary key +# the error message is sensible. +do_test autoinc-7.2 { + catchsql { + CREATE TABLE t8(x TEXT PRIMARY KEY AUTOINCREMENT); + } +} {1 {AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY}} + + +# Ticket #1283. Make sure that preparing but never running a statement +# that creates the sqlite_sequence table does not mess up the database. +# +do_test autoinc-8.1 { + catch {db2 close} + catch {db close} + file delete -force test.db + sqlite3 db test.db + set DB [sqlite3_connection_pointer db] + set STMT [sqlite3_prepare $DB { + CREATE TABLE t1( + x INTEGER PRIMARY KEY AUTOINCREMENT + ) + } -1 TAIL] + sqlite3_finalize $STMT + set STMT [sqlite3_prepare $DB { + CREATE TABLE t1( + x INTEGER PRIMARY KEY AUTOINCREMENT + ) + } -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql { + INSERT INTO t1 VALUES(NULL); + SELECT * FROM t1; + } +} {1} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum.test b/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum.test new file mode 100644 index 0000000..6128f09 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum.test @@ -0,0 +1,646 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the SELECT statement. +# +# $Id: autovacuum.test,v 1.26 2007/04/07 15:03:17 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If this build of the library does not support auto-vacuum, omit this +# whole file. +ifcapable {!autovacuum || !pragma} { + finish_test + return +} + +# Return a string $len characters long. The returned string is $char repeated +# over and over. For example, [make_str abc 8] returns "abcabcab". +proc make_str {char len} { + set str [string repeat $char. $len] + return [string range $str 0 [expr $len-1]] +} + +# Return the number of pages in the file test.db by looking at the file system. +proc file_pages {} { + return [expr [file size test.db] / 1024] +} + +#------------------------------------------------------------------------- +# Test cases autovacuum-1.* work as follows: +# +# 1. A table with a single indexed field is created. +# 2. Approximately 20 rows are inserted into the table. Each row is long +# enough such that it uses at least 2 overflow pages for both the table +# and index entry. +# 3. The rows are deleted in a psuedo-random order. Sometimes only one row +# is deleted per transaction, sometimes more than one. +# 4. After each transaction the table data is checked to ensure it is correct +# and a "PRAGMA integrity_check" is executed. +# 5. Once all the rows are deleted the file is checked to make sure it +# consists of exactly 4 pages. +# +# Steps 2-5 are repeated for a few different psuedo-random delete patterns +# (defined by the $delete_orders list). +set delete_orders [list] +lappend delete_orders {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} +lappend delete_orders {20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +lappend delete_orders {8 18 2 4 14 11 13 3 10 7 9 5 12 17 19 15 20 6 16 1} +lappend delete_orders {10 3 11 17 19 20 7 4 13 6 1 14 16 12 9 18 8 15 5 2} +lappend delete_orders {{1 2 3 4 5 6 7 8 9 10} {11 12 13 14 15 16 17 18 19 20}} +lappend delete_orders {{19 8 17 15} {16 11 9 14} {18 5 3 1} {13 20 7 2} {6 12}} + +# The length of each table entry. +# set ENTRY_LEN 3500 +set ENTRY_LEN 3500 + +do_test autovacuum-1.1 { + execsql { + PRAGMA auto_vacuum = 1; + CREATE TABLE av1(a); + CREATE INDEX av1_idx ON av1(a); + } +} {} + +set tn 0 +foreach delete_order $delete_orders { + incr tn + + # Set up the table. + set ::tbl_data [list] + foreach i [lsort -integer [eval concat $delete_order]] { + execsql "INSERT INTO av1 (oid, a) VALUES($i, '[make_str $i $ENTRY_LEN]')" + lappend ::tbl_data [make_str $i $ENTRY_LEN] + } + + # Make sure the integrity check passes with the initial data. + ifcapable {integrityck} { + do_test autovacuum-1.$tn.1 { + execsql { + pragma integrity_check + } + } {ok} + } + + foreach delete $delete_order { + # Delete one set of rows from the table. + do_test autovacuum-1.$tn.($delete).1 { + execsql " + DELETE FROM av1 WHERE oid = [join $delete " OR oid = "] + " + } {} + + # Do the integrity check. + ifcapable {integrityck} { + do_test autovacuum-1.$tn.($delete).2 { + execsql { + pragma integrity_check + } + } {ok} + } + # Ensure the data remaining in the table is what was expected. + foreach d $delete { + set idx [lsearch $::tbl_data [make_str $d $ENTRY_LEN]] + set ::tbl_data [lreplace $::tbl_data $idx $idx] + } + do_test autovacuum-1.$tn.($delete).3 { + execsql { + select a from av1 + } + } $::tbl_data + } + + # All rows have been deleted. Ensure the file has shrunk to 4 pages. + do_test autovacuum-1.$tn.3 { + file_pages + } {4} +} + +#--------------------------------------------------------------------------- +# Tests cases autovacuum-2.* test that root pages are allocated +# and deallocated correctly at the start of the file. Operation is roughly as +# follows: +# +# autovacuum-2.1.*: Drop the tables that currently exist in the database. +# autovacuum-2.2.*: Create some tables. Ensure that data pages can be +# moved correctly to make space for new root-pages. +# autovacuum-2.3.*: Drop one of the tables just created (not the last one), +# and check that one of the other tables is moved to +# the free root-page location. +# autovacuum-2.4.*: Check that a table can be created correctly when the +# root-page it requires is on the free-list. +# autovacuum-2.5.*: Check that a table with indices can be dropped. This +# is slightly tricky because dropping one of the +# indices/table btrees could move the root-page of another. +# The code-generation layer of SQLite overcomes this problem +# by dropping the btrees in descending order of root-pages. +# This test ensures that this actually happens. +# +do_test autovacuum-2.1.1 { + execsql { + DROP TABLE av1; + } +} {} +do_test autovacuum-2.1.2 { + file_pages +} {1} + +# Create a table and put some data in it. +do_test autovacuum-2.2.1 { + execsql { + CREATE TABLE av1(x); + SELECT rootpage FROM sqlite_master ORDER BY rootpage; + } +} {3} +do_test autovacuum-2.2.2 { + execsql " + INSERT INTO av1 VALUES('[make_str abc 3000]'); + INSERT INTO av1 VALUES('[make_str def 3000]'); + INSERT INTO av1 VALUES('[make_str ghi 3000]'); + INSERT INTO av1 VALUES('[make_str jkl 3000]'); + " + set ::av1_data [db eval {select * from av1}] + file_pages +} {15} + +# Create another table. Check it is located immediately after the first. +# This test case moves the second page in an over-flow chain. +do_test autovacuum-2.2.3 { + execsql { + CREATE TABLE av2(x); + SELECT rootpage FROM sqlite_master ORDER BY rootpage; + } +} {3 4} +do_test autovacuum-2.2.4 { + file_pages +} {16} + +# Create another table. Check it is located immediately after the second. +# This test case moves the first page in an over-flow chain. +do_test autovacuum-2.2.5 { + execsql { + CREATE TABLE av3(x); + SELECT rootpage FROM sqlite_master ORDER BY rootpage; + } +} {3 4 5} +do_test autovacuum-2.2.6 { + file_pages +} {17} + +# Create another table. Check it is located immediately after the second. +# This test case moves a btree leaf page. +do_test autovacuum-2.2.7 { + execsql { + CREATE TABLE av4(x); + SELECT rootpage FROM sqlite_master ORDER BY rootpage; + } +} {3 4 5 6} +do_test autovacuum-2.2.8 { + file_pages +} {18} +do_test autovacuum-2.2.9 { + execsql { + select * from av1 + } +} $av1_data + +do_test autovacuum-2.3.1 { + execsql { + INSERT INTO av2 SELECT 'av1' || x FROM av1; + INSERT INTO av3 SELECT 'av2' || x FROM av1; + INSERT INTO av4 SELECT 'av3' || x FROM av1; + } + set ::av2_data [execsql {select x from av2}] + set ::av3_data [execsql {select x from av3}] + set ::av4_data [execsql {select x from av4}] + file_pages +} {54} +do_test autovacuum-2.3.2 { + execsql { + DROP TABLE av2; + SELECT rootpage FROM sqlite_master ORDER BY rootpage; + } +} {3 4 5} +do_test autovacuum-2.3.3 { + file_pages +} {41} +do_test autovacuum-2.3.4 { + execsql { + SELECT x FROM av3; + } +} $::av3_data +do_test autovacuum-2.3.5 { + execsql { + SELECT x FROM av4; + } +} $::av4_data + +# Drop all the tables in the file. This puts all pages except the first 2 +# (the sqlite_master root-page and the first pointer map page) on the +# free-list. +do_test autovacuum-2.4.1 { + execsql { + DROP TABLE av1; + DROP TABLE av3; + BEGIN; + DROP TABLE av4; + } + file_pages +} {15} +do_test autovacuum-2.4.2 { + for {set i 3} {$i<=10} {incr i} { + execsql "CREATE TABLE av$i (x)" + } + file_pages +} {15} +do_test autovacuum-2.4.3 { + execsql { + SELECT rootpage FROM sqlite_master ORDER by rootpage + } +} {3 4 5 6 7 8 9 10} + +# Right now there are 5 free pages in the database. Consume and then free +# a 520 pages. Then create 520 tables. This ensures that at least some of the +# desired root-pages reside on the second free-list trunk page, and that the +# trunk itself is required at some point. +do_test autovacuum-2.4.4 { + execsql " + INSERT INTO av3 VALUES ('[make_str abcde [expr 1020*520 + 500]]'); + DELETE FROM av3; + " +} {} +set root_page_list [list] +set pending_byte_page [expr ($::sqlite_pending_byte / 1024) + 1] +for {set i 3} {$i<=532} {incr i} { + # 207 and 412 are pointer-map pages. + if { $i!=207 && $i!=412 && $i != $pending_byte_page} { + lappend root_page_list $i + } +} +if {$i >= $pending_byte_page} { + lappend root_page_list $i +} +do_test autovacuum-2.4.5 { + for {set i 11} {$i<=530} {incr i} { + execsql "CREATE TABLE av$i (x)" + } + execsql { + SELECT rootpage FROM sqlite_master ORDER by rootpage + } +} $root_page_list + +# Just for fun, delete all those tables and see if the database is 1 page. +do_test autovacuum-2.4.6 { + execsql COMMIT; + file_pages +} [expr 561 + (($i >= $pending_byte_page)?1:0)] +integrity_check autovacuum-2.4.6 +do_test autovacuum-2.4.7 { + execsql BEGIN + for {set i 3} {$i<=530} {incr i} { + execsql "DROP TABLE av$i" + } + execsql COMMIT + file_pages +} 1 + +# Create some tables with indices to drop. +do_test autovacuum-2.5.1 { + execsql { + CREATE TABLE av1(a PRIMARY KEY, b, c); + INSERT INTO av1 VALUES('av1 a', 'av1 b', 'av1 c'); + + CREATE TABLE av2(a PRIMARY KEY, b, c); + CREATE INDEX av2_i1 ON av2(b); + CREATE INDEX av2_i2 ON av2(c); + INSERT INTO av2 VALUES('av2 a', 'av2 b', 'av2 c'); + + CREATE TABLE av3(a PRIMARY KEY, b, c); + CREATE INDEX av3_i1 ON av3(b); + INSERT INTO av3 VALUES('av3 a', 'av3 b', 'av3 c'); + + CREATE TABLE av4(a, b, c); + CREATE INDEX av4_i1 ON av4(a); + CREATE INDEX av4_i2 ON av4(b); + CREATE INDEX av4_i3 ON av4(c); + CREATE INDEX av4_i4 ON av4(a, b, c); + INSERT INTO av4 VALUES('av4 a', 'av4 b', 'av4 c'); + } +} {} + +do_test autovacuum-2.5.2 { + execsql { + SELECT name, rootpage FROM sqlite_master; + } +} [list av1 3 sqlite_autoindex_av1_1 4 \ + av2 5 sqlite_autoindex_av2_1 6 av2_i1 7 av2_i2 8 \ + av3 9 sqlite_autoindex_av3_1 10 av3_i1 11 \ + av4 12 av4_i1 13 av4_i2 14 av4_i3 15 av4_i4 16 \ +] + +# The following 4 tests are SELECT queries that use the indices created. +# If the root-pages in the internal schema are not updated correctly when +# a table or indice is moved, these queries will fail. They are repeated +# after each table is dropped (i.e. as test cases 2.5.*.[1..4]). +do_test autovacuum-2.5.2.1 { + execsql { + SELECT * FROM av1 WHERE a = 'av1 a'; + } +} {{av1 a} {av1 b} {av1 c}} +do_test autovacuum-2.5.2.2 { + execsql { + SELECT * FROM av2 WHERE a = 'av2 a' AND b = 'av2 b' AND c = 'av2 c' + } +} {{av2 a} {av2 b} {av2 c}} +do_test autovacuum-2.5.2.3 { + execsql { + SELECT * FROM av3 WHERE a = 'av3 a' AND b = 'av3 b'; + } +} {{av3 a} {av3 b} {av3 c}} +do_test autovacuum-2.5.2.4 { + execsql { + SELECT * FROM av4 WHERE a = 'av4 a' AND b = 'av4 b' AND c = 'av4 c'; + } +} {{av4 a} {av4 b} {av4 c}} + +# Drop table av3. Indices av4_i2, av4_i3 and av4_i4 are moved to fill the two +# root pages vacated. The operation proceeds as: +# Step 1: Delete av3_i1 (root-page 11). Move root-page of av4_i4 to page 11. +# Step 2: Delete av3 (root-page 10). Move root-page of av4_i3 to page 10. +# Step 3: Delete sqlite_autoindex_av1_3 (root-page 9). Move av4_i2 to page 9. +do_test autovacuum-2.5.3 { + execsql { + DROP TABLE av3; + SELECT name, rootpage FROM sqlite_master; + } +} [list av1 3 sqlite_autoindex_av1_1 4 \ + av2 5 sqlite_autoindex_av2_1 6 av2_i1 7 av2_i2 8 \ + av4 12 av4_i1 13 av4_i2 9 av4_i3 10 av4_i4 11 \ +] +do_test autovacuum-2.5.3.1 { + execsql { + SELECT * FROM av1 WHERE a = 'av1 a'; + } +} {{av1 a} {av1 b} {av1 c}} +do_test autovacuum-2.5.3.2 { + execsql { + SELECT * FROM av2 WHERE a = 'av2 a' AND b = 'av2 b' AND c = 'av2 c' + } +} {{av2 a} {av2 b} {av2 c}} +do_test autovacuum-2.5.3.3 { + execsql { + SELECT * FROM av4 WHERE a = 'av4 a' AND b = 'av4 b' AND c = 'av4 c'; + } +} {{av4 a} {av4 b} {av4 c}} + +# Drop table av1: +# Step 1: Delete av1 (root page 4). Root-page of av4_i1 fills the gap. +# Step 2: Delete sqlite_autoindex_av1_1 (root page 3). Move av4 to the gap. +do_test autovacuum-2.5.4 { + execsql { + DROP TABLE av1; + SELECT name, rootpage FROM sqlite_master; + } +} [list av2 5 sqlite_autoindex_av2_1 6 av2_i1 7 av2_i2 8 \ + av4 3 av4_i1 4 av4_i2 9 av4_i3 10 av4_i4 11 \ +] +do_test autovacuum-2.5.4.2 { + execsql { + SELECT * FROM av2 WHERE a = 'av2 a' AND b = 'av2 b' AND c = 'av2 c' + } +} {{av2 a} {av2 b} {av2 c}} +do_test autovacuum-2.5.4.4 { + execsql { + SELECT * FROM av4 WHERE a = 'av4 a' AND b = 'av4 b' AND c = 'av4 c'; + } +} {{av4 a} {av4 b} {av4 c}} + +# Drop table av4: +# Step 1: Delete av4_i4. +# Step 2: Delete av4_i3. +# Step 3: Delete av4_i2. +# Step 4: Delete av4_i1. av2_i2 replaces it. +# Step 5: Delete av4. av2_i1 replaces it. +do_test autovacuum-2.5.5 { + execsql { + DROP TABLE av4; + SELECT name, rootpage FROM sqlite_master; + } +} [list av2 5 sqlite_autoindex_av2_1 6 av2_i1 3 av2_i2 4] +do_test autovacuum-2.5.5.2 { + execsql { + SELECT * FROM av2 WHERE a = 'av2 a' AND b = 'av2 b' AND c = 'av2 c' + } +} {{av2 a} {av2 b} {av2 c}} + +#-------------------------------------------------------------------------- +# Test cases autovacuum-3.* test the operation of the "PRAGMA auto_vacuum" +# command. +# +do_test autovacuum-3.1 { + execsql { + PRAGMA auto_vacuum; + } +} {1} +do_test autovacuum-3.2 { + db close + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum; + } +} {1} +do_test autovacuum-3.3 { + execsql { + PRAGMA auto_vacuum = 0; + PRAGMA auto_vacuum; + } +} {1} + +do_test autovacuum-3.4 { + db close + file delete -force test.db + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum; + } +} $AUTOVACUUM +do_test autovacuum-3.5 { + execsql { + CREATE TABLE av1(x); + PRAGMA auto_vacuum; + } +} $AUTOVACUUM +do_test autovacuum-3.6 { + execsql { + PRAGMA auto_vacuum = 1; + PRAGMA auto_vacuum; + } +} $AUTOVACUUM +do_test autovacuum-3.7 { + execsql { + DROP TABLE av1; + } + file_pages +} [expr $AUTOVACUUM?1:2] + + +#----------------------------------------------------------------------- +# Test that if a statement transaction around a CREATE INDEX statement is +# rolled back no corruption occurs. +# +do_test autovacuum-4.0 { + # The last round of tests may have left the db in non-autovacuum mode. + # Reset everything just in case. + # + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum = 1; + PRAGMA auto_vacuum; + } +} {1} +do_test autovacuum-4.1 { + execsql { + CREATE TABLE av1(a, b); + BEGIN; + } + for {set i 0} {$i<100} {incr i} { + execsql "INSERT INTO av1 VALUES($i, '[string repeat X 200]');" + } + execsql "INSERT INTO av1 VALUES(99, '[string repeat X 200]');" + execsql { + SELECT sum(a) FROM av1; + } +} {5049} +do_test autovacuum-4.2 { + catchsql { + CREATE UNIQUE INDEX av1_i ON av1(a); + } +} {1 {indexed columns are not unique}} +do_test autovacuum-4.3 { + execsql { + SELECT sum(a) FROM av1; + } +} {5049} +do_test autovacuum-4.4 { + execsql { + COMMIT; + } +} {} + +ifcapable integrityck { + +# Ticket #1727 +do_test autovacuum-5.1 { + db close + sqlite3 db :memory: + db eval { + PRAGMA auto_vacuum=1; + CREATE TABLE t1(a); + CREATE TABLE t2(a); + DROP TABLE t1; + PRAGMA integrity_check; + } +} ok + +} + +# Ticket #1728. +# +# In autovacuum mode, when tables or indices are deleted, the rootpage +# values in the symbol table have to be updated. There was a bug in this +# logic so that if an index/table was moved twice, the second move might +# not occur. This would leave the internal symbol table in an inconsistent +# state causing subsequent statements to fail. +# +# The problem is difficult to reproduce. The sequence of statements in +# the following test are carefully designed make it occur and thus to +# verify that this very obscure bug has been resolved. +# +ifcapable integrityck&&memorydb { + +do_test autovacuum-6.1 { + db close + sqlite3 db :memory: + db eval { + PRAGMA auto_vacuum=1; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a); + CREATE TABLE t2(a); + CREATE INDEX i2 ON t2(a); + CREATE TABLE t3(a); + CREATE INDEX i3 ON t2(a); + CREATE INDEX x ON t1(b); + DROP TABLE t3; + PRAGMA integrity_check; + DROP TABLE t2; + PRAGMA integrity_check; + DROP TABLE t1; + PRAGMA integrity_check; + } +} {ok ok ok} + +} + +#--------------------------------------------------------------------- +# Test cases autovacuum-7.X test the case where a page must be moved +# and the destination location collides with at least one other +# entry in the page hash-table (internal to the pager.c module. +# +do_test autovacuum-7.1 { + db close + file delete -force test.db + file delete -force test.db-journal + sqlite3 db test.db + + execsql { + PRAGMA auto_vacuum=1; + CREATE TABLE t1(a, b, PRIMARY KEY(a, b)); + INSERT INTO t1 VALUES(randstr(400,400),randstr(400,400)); + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2 + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 4 + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 8 + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 16 + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 32 + } + + expr {[file size test.db] / 1024} +} {73} + +do_test autovacuum-7.2 { + execsql { + CREATE TABLE t2(a, b, PRIMARY KEY(a, b)); + INSERT INTO t2 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2 + CREATE TABLE t3(a, b, PRIMARY KEY(a, b)); + INSERT INTO t3 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2 + CREATE TABLE t4(a, b, PRIMARY KEY(a, b)); + INSERT INTO t4 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2 + CREATE TABLE t5(a, b, PRIMARY KEY(a, b)); + INSERT INTO t5 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2 + } + expr {[file size test.db] / 1024} +} {354} + +do_test autovacuum-7.3 { + db close + sqlite3 db test.db + execsql { + BEGIN; + DELETE FROM t4; + COMMIT; + SELECT count(*) FROM t1; + } + expr {[file size test.db] / 1024} +} {286} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_crash.test b/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_crash.test new file mode 100644 index 0000000..981cc4f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_crash.test @@ -0,0 +1,58 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file runs the tests in the file crash.test with auto-vacuum enabled +# databases. +# +# $Id: autovacuum_crash.test,v 1.2 2005/01/16 09:06:34 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If this build of the library does not support auto-vacuum, omit this +# whole file. +ifcapable {!autovacuum} { + finish_test + return +} + +rename finish_test really_finish_test2 +proc finish_test {} {} +set ISQUICK 1 + +rename sqlite3 real_sqlite3 +proc sqlite3 {args} { + set r [eval "real_sqlite3 $args"] + if { [llength $args] == 2 } { + [lindex $args 0] eval {pragma auto_vacuum = 1} + } + set r +} + +rename do_test really_do_test +proc do_test {args} { + set sc [concat really_do_test "autovacuum-[lindex $args 0]" \ + [lrange $args 1 end]] + eval $sc +} + +source $testdir/crash.test + +rename sqlite3 "" +rename real_sqlite3 sqlite3 +rename finish_test "" +rename really_finish_test2 finish_test +rename do_test "" +rename really_do_test do_test +finish_test + + + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_ioerr.test b/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_ioerr.test new file mode 100644 index 0000000..7e01cb2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_ioerr.test @@ -0,0 +1,58 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file runs the tests in the file ioerr.test with auto-vacuum enabled +# databases. +# +# $Id: autovacuum_ioerr.test,v 1.3 2006/01/16 12:46:41 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If this build of the library does not support auto-vacuum, omit this +# whole file. +ifcapable {!autovacuum} { + finish_test + return +} + +rename finish_test really_finish_test2 +proc finish_test {} {} +set ISQUICK 1 + +rename sqlite3 real_sqlite3 +proc sqlite3 {args} { + set r [eval "real_sqlite3 $args"] + if { [llength $args] == 2 } { + [lindex $args 0] eval {pragma auto_vacuum = 1} + } + set r +} + +rename do_test really_do_test +proc do_test {args} { + set sc [concat really_do_test "autovacuum-[lindex $args 0]" \ + [lrange $args 1 end]] + eval $sc +} + +source $testdir/ioerr.test + +rename sqlite3 "" +rename real_sqlite3 sqlite3 +rename finish_test "" +rename really_finish_test2 finish_test +rename do_test "" +rename really_do_test do_test +finish_test + + + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_ioerr2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_ioerr2.test new file mode 100644 index 0000000..3eb549d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/autovacuum_ioerr2.test @@ -0,0 +1,133 @@ +# 2001 October 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing for correct handling of I/O errors +# such as writes failing because the disk is full. +# +# The tests in this file use special facilities that are only +# available in the SQLite test fixture. +# +# $Id: autovacuum_ioerr2.test,v 1.6 2007/04/28 15:47:44 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If this build of the library does not support auto-vacuum, omit this +# whole file. +ifcapable {!autovacuum} { + finish_test + return +} + +do_ioerr_test autovacuum-ioerr2-1 -sqlprep { + PRAGMA auto_vacuum = 1; + CREATE TABLE abc(a); + INSERT INTO abc VALUES(randstr(1500,1500)); +} -sqlbody { + CREATE TABLE abc2(a); + BEGIN; + DELETE FROM abc; + INSERT INTO abc VALUES(randstr(1500,1500)); + CREATE TABLE abc3(a); + COMMIT; +} + +do_ioerr_test autovacuum-ioerr2-2 -tclprep { + execsql { + PRAGMA auto_vacuum = 1; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE abc(a); + INSERT INTO abc VALUES(randstr(1100,1100)); -- Page 4 is overflow + INSERT INTO abc VALUES(randstr(1100,1100)); -- Page 5 is overflow + } + for {set i 0} {$i<150} {incr i} { + execsql { + INSERT INTO abc VALUES(randstr(100,100)); + } + } + execsql COMMIT +} -sqlbody { + BEGIN; + DELETE FROM abc WHERE length(a)>100; + UPDATE abc SET a = randstr(90,90); + CREATE TABLE abc3(a); + COMMIT; +} + +do_ioerr_test autovacuum-ioerr2-3 -sqlprep { + PRAGMA auto_vacuum = 1; + CREATE TABLE abc(a); + CREATE TABLE abc2(b); +} -sqlbody { + BEGIN; + INSERT INTO abc2 VALUES(10); + DROP TABLE abc; + COMMIT; + DROP TABLE abc2; +} + +file delete -force backup.db +ifcapable subquery { + do_ioerr_test autovacuum-ioerr2-4 -tclprep { + if {![file exists backup.db]} { + sqlite3 dbb backup.db + execsql { + PRAGMA auto_vacuum = 1; + BEGIN; + CREATE TABLE abc(a); + INSERT INTO abc VALUES(randstr(1100,1100)); -- Page 4 is overflow + INSERT INTO abc VALUES(randstr(1100,1100)); -- Page 5 is overflow + } dbb + for {set i 0} {$i<2500} {incr i} { + execsql { + INSERT INTO abc VALUES(randstr(100,100)); + } dbb + } + execsql { + COMMIT; + PRAGMA cache_size = 10; + } dbb + dbb close + } + db close + file delete -force test.db + file delete -force test.db-journal + copy_file backup.db test.db + set ::DB [sqlite3 db test.db] + execsql { + PRAGMA cache_size = 10; + } + } -sqlbody { + BEGIN; + DELETE FROM abc WHERE oid < 3; + UPDATE abc SET a = randstr(100,100) WHERE oid > 2300; + UPDATE abc SET a = randstr(1100,1100) WHERE oid = + (select max(oid) from abc); + COMMIT; + } +} + +do_ioerr_test autovacuum-ioerr2-1 -sqlprep { + PRAGMA auto_vacuum = 1; + CREATE TABLE abc(a); + INSERT INTO abc VALUES(randstr(1500,1500)); +} -sqlbody { + CREATE TABLE abc2(a); + BEGIN; + DELETE FROM abc; + INSERT INTO abc VALUES(randstr(1500,1500)); + CREATE TABLE abc3(a); + COMMIT; +} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/avtrans.test b/libraries/sqlite/unix/sqlite-3.5.1/test/avtrans.test new file mode 100644 index 0000000..328e028 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/avtrans.test @@ -0,0 +1,921 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. This +# file is a copy of "trans.test" modified to run under autovacuum mode. +# the point is to stress the autovacuum logic and try to get it to fail. +# +# $Id: avtrans.test,v 1.6 2007/09/12 17:01:45 danielk1977 Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# Create several tables to work with. +# +do_test avtrans-1.0 { + execsql { + PRAGMA auto_vacuum=ON; + CREATE TABLE one(a int PRIMARY KEY, b text); + INSERT INTO one VALUES(1,'one'); + INSERT INTO one VALUES(2,'two'); + INSERT INTO one VALUES(3,'three'); + SELECT b FROM one ORDER BY a; + } +} {one two three} +do_test avtrans-1.1 { + execsql { + CREATE TABLE two(a int PRIMARY KEY, b text); + INSERT INTO two VALUES(1,'I'); + INSERT INTO two VALUES(5,'V'); + INSERT INTO two VALUES(10,'X'); + SELECT b FROM two ORDER BY a; + } +} {I V X} +do_test avtrans-1.9 { + sqlite3 altdb test.db + execsql {SELECT b FROM one ORDER BY a} altdb +} {one two three} +do_test avtrans-1.10 { + execsql {SELECT b FROM two ORDER BY a} altdb +} {I V X} +integrity_check avtrans-1.11 + +# Basic transactions +# +do_test avtrans-2.1 { + set v [catch {execsql {BEGIN}} msg] + lappend v $msg +} {0 {}} +do_test avtrans-2.2 { + set v [catch {execsql {END}} msg] + lappend v $msg +} {0 {}} +do_test avtrans-2.3 { + set v [catch {execsql {BEGIN TRANSACTION}} msg] + lappend v $msg +} {0 {}} +do_test avtrans-2.4 { + set v [catch {execsql {COMMIT TRANSACTION}} msg] + lappend v $msg +} {0 {}} +do_test avtrans-2.5 { + set v [catch {execsql {BEGIN TRANSACTION 'foo'}} msg] + lappend v $msg +} {0 {}} +do_test avtrans-2.6 { + set v [catch {execsql {ROLLBACK TRANSACTION 'foo'}} msg] + lappend v $msg +} {0 {}} +do_test avtrans-2.10 { + execsql { + BEGIN; + SELECT a FROM one ORDER BY a; + SELECT a FROM two ORDER BY a; + END; + } +} {1 2 3 1 5 10} +integrity_check avtrans-2.11 + +# Check the locking behavior +# +sqlite3_soft_heap_limit 0 +do_test avtrans-3.1 { + execsql { + BEGIN; + UPDATE one SET a = 0 WHERE 0; + SELECT a FROM one ORDER BY a; + } +} {1 2 3} +do_test avtrans-3.2 { + catchsql { + SELECT a FROM two ORDER BY a; + } altdb +} {0 {1 5 10}} +do_test avtrans-3.3 { + catchsql { + SELECT a FROM one ORDER BY a; + } altdb +} {0 {1 2 3}} +do_test avtrans-3.4 { + catchsql { + INSERT INTO one VALUES(4,'four'); + } +} {0 {}} +do_test avtrans-3.5 { + catchsql { + SELECT a FROM two ORDER BY a; + } altdb +} {0 {1 5 10}} +do_test avtrans-3.6 { + catchsql { + SELECT a FROM one ORDER BY a; + } altdb +} {0 {1 2 3}} +do_test avtrans-3.7 { + catchsql { + INSERT INTO two VALUES(4,'IV'); + } +} {0 {}} +do_test avtrans-3.8 { + catchsql { + SELECT a FROM two ORDER BY a; + } altdb +} {0 {1 5 10}} +do_test avtrans-3.9 { + catchsql { + SELECT a FROM one ORDER BY a; + } altdb +} {0 {1 2 3}} +do_test avtrans-3.10 { + execsql {END TRANSACTION} +} {} +do_test avtrans-3.11 { + set v [catch {execsql { + SELECT a FROM two ORDER BY a; + } altdb} msg] + lappend v $msg +} {0 {1 4 5 10}} +do_test avtrans-3.12 { + set v [catch {execsql { + SELECT a FROM one ORDER BY a; + } altdb} msg] + lappend v $msg +} {0 {1 2 3 4}} +do_test avtrans-3.13 { + set v [catch {execsql { + SELECT a FROM two ORDER BY a; + } db} msg] + lappend v $msg +} {0 {1 4 5 10}} +do_test avtrans-3.14 { + set v [catch {execsql { + SELECT a FROM one ORDER BY a; + } db} msg] + lappend v $msg +} {0 {1 2 3 4}} +sqlite3_soft_heap_limit $soft_limit +integrity_check avtrans-3.15 + +do_test avtrans-4.1 { + set v [catch {execsql { + COMMIT; + } db} msg] + lappend v $msg +} {1 {cannot commit - no transaction is active}} +do_test avtrans-4.2 { + set v [catch {execsql { + ROLLBACK; + } db} msg] + lappend v $msg +} {1 {cannot rollback - no transaction is active}} +do_test avtrans-4.3 { + catchsql { + BEGIN TRANSACTION; + UPDATE two SET a = 0 WHERE 0; + SELECT a FROM two ORDER BY a; + } db +} {0 {1 4 5 10}} +do_test avtrans-4.4 { + catchsql { + SELECT a FROM two ORDER BY a; + } altdb +} {0 {1 4 5 10}} +do_test avtrans-4.5 { + catchsql { + SELECT a FROM one ORDER BY a; + } altdb +} {0 {1 2 3 4}} +do_test avtrans-4.6 { + catchsql { + BEGIN TRANSACTION; + SELECT a FROM one ORDER BY a; + } db +} {1 {cannot start a transaction within a transaction}} +do_test avtrans-4.7 { + catchsql { + SELECT a FROM two ORDER BY a; + } altdb +} {0 {1 4 5 10}} +do_test avtrans-4.8 { + catchsql { + SELECT a FROM one ORDER BY a; + } altdb +} {0 {1 2 3 4}} +do_test avtrans-4.9 { + set v [catch {execsql { + END TRANSACTION; + SELECT a FROM two ORDER BY a; + } db} msg] + lappend v $msg +} {0 {1 4 5 10}} +do_test avtrans-4.10 { + set v [catch {execsql { + SELECT a FROM two ORDER BY a; + } altdb} msg] + lappend v $msg +} {0 {1 4 5 10}} +do_test avtrans-4.11 { + set v [catch {execsql { + SELECT a FROM one ORDER BY a; + } altdb} msg] + lappend v $msg +} {0 {1 2 3 4}} +integrity_check avtrans-4.12 +do_test avtrans-4.98 { + altdb close + execsql { + DROP TABLE one; + DROP TABLE two; + } +} {} +integrity_check avtrans-4.99 + +# Check out the commit/rollback behavior of the database +# +do_test avtrans-5.1 { + execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name} +} {} +do_test avtrans-5.2 { + execsql {BEGIN TRANSACTION} + execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name} +} {} +do_test avtrans-5.3 { + execsql {CREATE TABLE one(a text, b int)} + execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name} +} {one} +do_test avtrans-5.4 { + execsql {SELECT a,b FROM one ORDER BY b} +} {} +do_test avtrans-5.5 { + execsql {INSERT INTO one(a,b) VALUES('hello', 1)} + execsql {SELECT a,b FROM one ORDER BY b} +} {hello 1} +do_test avtrans-5.6 { + execsql {ROLLBACK} + execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name} +} {} +do_test avtrans-5.7 { + set v [catch { + execsql {SELECT a,b FROM one ORDER BY b} + } msg] + lappend v $msg +} {1 {no such table: one}} + +# Test commits and rollbacks of table CREATE TABLEs, CREATE INDEXs +# DROP TABLEs and DROP INDEXs +# +do_test avtrans-5.8 { + execsql { + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name + } +} {} +do_test avtrans-5.9 { + execsql { + BEGIN TRANSACTION; + CREATE TABLE t1(a int, b int, c int); + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {t1} +do_test avtrans-5.10 { + execsql { + CREATE INDEX i1 ON t1(a); + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {i1 t1} +do_test avtrans-5.11 { + execsql { + COMMIT; + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {i1 t1} +do_test avtrans-5.12 { + execsql { + BEGIN TRANSACTION; + CREATE TABLE t2(a int, b int, c int); + CREATE INDEX i2a ON t2(a); + CREATE INDEX i2b ON t2(b); + DROP TABLE t1; + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {i2a i2b t2} +do_test avtrans-5.13 { + execsql { + ROLLBACK; + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {i1 t1} +do_test avtrans-5.14 { + execsql { + BEGIN TRANSACTION; + DROP INDEX i1; + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {t1} +do_test avtrans-5.15 { + execsql { + ROLLBACK; + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {i1 t1} +do_test avtrans-5.16 { + execsql { + BEGIN TRANSACTION; + DROP INDEX i1; + CREATE TABLE t2(x int, y int, z int); + CREATE INDEX i2x ON t2(x); + CREATE INDEX i2y ON t2(y); + INSERT INTO t2 VALUES(1,2,3); + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {i2x i2y t1 t2} +do_test avtrans-5.17 { + execsql { + COMMIT; + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {i2x i2y t1 t2} +do_test avtrans-5.18 { + execsql { + SELECT * FROM t2; + } +} {1 2 3} +do_test avtrans-5.19 { + execsql { + SELECT x FROM t2 WHERE y=2; + } +} {1} +do_test avtrans-5.20 { + execsql { + BEGIN TRANSACTION; + DROP TABLE t1; + DROP TABLE t2; + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {} +do_test avtrans-5.21 { + set r [catch {execsql { + SELECT * FROM t2 + }} msg] + lappend r $msg +} {1 {no such table: t2}} +do_test avtrans-5.22 { + execsql { + ROLLBACK; + SELECT name fROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name; + } +} {i2x i2y t1 t2} +do_test avtrans-5.23 { + execsql { + SELECT * FROM t2; + } +} {1 2 3} +integrity_check avtrans-5.23 + + +# Try to DROP and CREATE tables and indices with the same name +# within a transaction. Make sure ROLLBACK works. +# +do_test avtrans-6.1 { + execsql2 { + INSERT INTO t1 VALUES(1,2,3); + BEGIN TRANSACTION; + DROP TABLE t1; + CREATE TABLE t1(p,q,r); + ROLLBACK; + SELECT * FROM t1; + } +} {a 1 b 2 c 3} +do_test avtrans-6.2 { + execsql2 { + INSERT INTO t1 VALUES(1,2,3); + BEGIN TRANSACTION; + DROP TABLE t1; + CREATE TABLE t1(p,q,r); + COMMIT; + SELECT * FROM t1; + } +} {} +do_test avtrans-6.3 { + execsql2 { + INSERT INTO t1 VALUES(1,2,3); + SELECT * FROM t1; + } +} {p 1 q 2 r 3} +do_test avtrans-6.4 { + execsql2 { + BEGIN TRANSACTION; + DROP TABLE t1; + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(4,5,6); + SELECT * FROM t1; + DROP TABLE t1; + } +} {a 4 b 5 c 6} +do_test avtrans-6.5 { + execsql2 { + ROLLBACK; + SELECT * FROM t1; + } +} {p 1 q 2 r 3} +do_test avtrans-6.6 { + execsql2 { + BEGIN TRANSACTION; + DROP TABLE t1; + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(4,5,6); + SELECT * FROM t1; + DROP TABLE t1; + } +} {a 4 b 5 c 6} +do_test avtrans-6.7 { + catchsql { + COMMIT; + SELECT * FROM t1; + } +} {1 {no such table: t1}} + +# Repeat on a table with an automatically generated index. +# +do_test avtrans-6.10 { + execsql2 { + CREATE TABLE t1(a unique,b,c); + INSERT INTO t1 VALUES(1,2,3); + BEGIN TRANSACTION; + DROP TABLE t1; + CREATE TABLE t1(p unique,q,r); + ROLLBACK; + SELECT * FROM t1; + } +} {a 1 b 2 c 3} +do_test avtrans-6.11 { + execsql2 { + BEGIN TRANSACTION; + DROP TABLE t1; + CREATE TABLE t1(p unique,q,r); + COMMIT; + SELECT * FROM t1; + } +} {} +do_test avtrans-6.12 { + execsql2 { + INSERT INTO t1 VALUES(1,2,3); + SELECT * FROM t1; + } +} {p 1 q 2 r 3} +do_test avtrans-6.13 { + execsql2 { + BEGIN TRANSACTION; + DROP TABLE t1; + CREATE TABLE t1(a unique,b,c); + INSERT INTO t1 VALUES(4,5,6); + SELECT * FROM t1; + DROP TABLE t1; + } +} {a 4 b 5 c 6} +do_test avtrans-6.14 { + execsql2 { + ROLLBACK; + SELECT * FROM t1; + } +} {p 1 q 2 r 3} +do_test avtrans-6.15 { + execsql2 { + BEGIN TRANSACTION; + DROP TABLE t1; + CREATE TABLE t1(a unique,b,c); + INSERT INTO t1 VALUES(4,5,6); + SELECT * FROM t1; + DROP TABLE t1; + } +} {a 4 b 5 c 6} +do_test avtrans-6.16 { + catchsql { + COMMIT; + SELECT * FROM t1; + } +} {1 {no such table: t1}} + +do_test avtrans-6.20 { + execsql { + CREATE TABLE t1(a integer primary key,b,c); + INSERT INTO t1 VALUES(1,-2,-3); + INSERT INTO t1 VALUES(4,-5,-6); + SELECT * FROM t1; + } +} {1 -2 -3 4 -5 -6} +do_test avtrans-6.21 { + execsql { + CREATE INDEX i1 ON t1(b); + SELECT * FROM t1 WHERE b<1; + } +} {4 -5 -6 1 -2 -3} +do_test avtrans-6.22 { + execsql { + BEGIN TRANSACTION; + DROP INDEX i1; + SELECT * FROM t1 WHERE b<1; + ROLLBACK; + } +} {1 -2 -3 4 -5 -6} +do_test avtrans-6.23 { + execsql { + SELECT * FROM t1 WHERE b<1; + } +} {4 -5 -6 1 -2 -3} +do_test avtrans-6.24 { + execsql { + BEGIN TRANSACTION; + DROP TABLE t1; + ROLLBACK; + SELECT * FROM t1 WHERE b<1; + } +} {4 -5 -6 1 -2 -3} + +do_test avtrans-6.25 { + execsql { + BEGIN TRANSACTION; + DROP INDEX i1; + CREATE INDEX i1 ON t1(c); + SELECT * FROM t1 WHERE b<1; + } +} {1 -2 -3 4 -5 -6} +do_test avtrans-6.26 { + execsql { + SELECT * FROM t1 WHERE c<1; + } +} {4 -5 -6 1 -2 -3} +do_test avtrans-6.27 { + execsql { + ROLLBACK; + SELECT * FROM t1 WHERE b<1; + } +} {4 -5 -6 1 -2 -3} +do_test avtrans-6.28 { + execsql { + SELECT * FROM t1 WHERE c<1; + } +} {1 -2 -3 4 -5 -6} + +# The following repeats steps 6.20 through 6.28, but puts a "unique" +# constraint the first field of the table in order to generate an +# automatic index. +# +do_test avtrans-6.30 { + execsql { + BEGIN TRANSACTION; + DROP TABLE t1; + CREATE TABLE t1(a int unique,b,c); + COMMIT; + INSERT INTO t1 VALUES(1,-2,-3); + INSERT INTO t1 VALUES(4,-5,-6); + SELECT * FROM t1 ORDER BY a; + } +} {1 -2 -3 4 -5 -6} +do_test avtrans-6.31 { + execsql { + CREATE INDEX i1 ON t1(b); + SELECT * FROM t1 WHERE b<1; + } +} {4 -5 -6 1 -2 -3} +do_test avtrans-6.32 { + execsql { + BEGIN TRANSACTION; + DROP INDEX i1; + SELECT * FROM t1 WHERE b<1; + ROLLBACK; + } +} {1 -2 -3 4 -5 -6} +do_test avtrans-6.33 { + execsql { + SELECT * FROM t1 WHERE b<1; + } +} {4 -5 -6 1 -2 -3} +do_test avtrans-6.34 { + execsql { + BEGIN TRANSACTION; + DROP TABLE t1; + ROLLBACK; + SELECT * FROM t1 WHERE b<1; + } +} {4 -5 -6 1 -2 -3} + +do_test avtrans-6.35 { + execsql { + BEGIN TRANSACTION; + DROP INDEX i1; + CREATE INDEX i1 ON t1(c); + SELECT * FROM t1 WHERE b<1; + } +} {1 -2 -3 4 -5 -6} +do_test avtrans-6.36 { + execsql { + SELECT * FROM t1 WHERE c<1; + } +} {4 -5 -6 1 -2 -3} +do_test avtrans-6.37 { + execsql { + DROP INDEX i1; + SELECT * FROM t1 WHERE c<1; + } +} {1 -2 -3 4 -5 -6} +do_test avtrans-6.38 { + execsql { + ROLLBACK; + SELECT * FROM t1 WHERE b<1; + } +} {4 -5 -6 1 -2 -3} +do_test avtrans-6.39 { + execsql { + SELECT * FROM t1 WHERE c<1; + } +} {1 -2 -3 4 -5 -6} +integrity_check avtrans-6.40 + +ifcapable !floatingpoint { + finish_test + return +} + +# Test to make sure rollback restores the database back to its original +# state. +# +do_test avtrans-7.1 { + execsql {BEGIN} + for {set i 0} {$i<1000} {incr i} { + set r1 [expr {rand()}] + set r2 [expr {rand()}] + set r3 [expr {rand()}] + execsql "INSERT INTO t2 VALUES($r1,$r2,$r3)" + } + execsql {COMMIT} + set ::checksum [execsql {SELECT md5sum(x,y,z) FROM t2}] + set ::checksum2 [ + execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} + ] + execsql {SELECT count(*) FROM t2} +} {1001} +do_test avtrans-7.2 { + execsql {SELECT md5sum(x,y,z) FROM t2} +} $checksum +do_test avtrans-7.2.1 { + execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} +} $checksum2 +do_test avtrans-7.3 { + execsql { + BEGIN; + DELETE FROM t2; + ROLLBACK; + SELECT md5sum(x,y,z) FROM t2; + } +} $checksum +do_test avtrans-7.4 { + execsql { + BEGIN; + INSERT INTO t2 SELECT * FROM t2; + ROLLBACK; + SELECT md5sum(x,y,z) FROM t2; + } +} $checksum +do_test avtrans-7.5 { + execsql { + BEGIN; + DELETE FROM t2; + ROLLBACK; + SELECT md5sum(x,y,z) FROM t2; + } +} $checksum +do_test avtrans-7.6 { + execsql { + BEGIN; + INSERT INTO t2 SELECT * FROM t2; + ROLLBACK; + SELECT md5sum(x,y,z) FROM t2; + } +} $checksum +do_test avtrans-7.7 { + execsql { + BEGIN; + CREATE TABLE t3 AS SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t3; + ROLLBACK; + SELECT md5sum(x,y,z) FROM t2; + } +} $checksum +do_test avtrans-7.8 { + execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} +} $checksum2 +ifcapable tempdb { + do_test avtrans-7.9 { + execsql { + BEGIN; + CREATE TEMP TABLE t3 AS SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t3; + ROLLBACK; + SELECT md5sum(x,y,z) FROM t2; + } + } $checksum +} +do_test avtrans-7.10 { + execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} +} $checksum2 +ifcapable tempdb { + do_test avtrans-7.11 { + execsql { + BEGIN; + CREATE TEMP TABLE t3 AS SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t3; + DROP INDEX i2x; + DROP INDEX i2y; + CREATE INDEX i3a ON t3(x); + ROLLBACK; + SELECT md5sum(x,y,z) FROM t2; + } + } $checksum +} +do_test avtrans-7.12 { + execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} +} $checksum2 +ifcapable tempdb { + do_test avtrans-7.13 { + execsql { + BEGIN; + DROP TABLE t2; + ROLLBACK; + SELECT md5sum(x,y,z) FROM t2; + } + } $checksum +} +do_test avtrans-7.14 { + execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} +} $checksum2 +integrity_check avtrans-7.15 + +# Arrange for another process to begin modifying the database but abort +# and die in the middle of the modification. Then have this process read +# the database. This process should detect the journal file and roll it +# back. Verify that this happens correctly. +# +set fd [open test.tcl w] +puts $fd { + sqlite3 db test.db + db eval { + PRAGMA default_cache_size=20; + BEGIN; + CREATE TABLE t3 AS SELECT * FROM t2; + DELETE FROM t2; + } + sqlite_abort +} +close $fd +do_test avtrans-8.1 { + catch {exec [info nameofexec] test.tcl} + execsql {SELECT md5sum(x,y,z) FROM t2} +} $checksum +do_test avtrans-8.2 { + execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} +} $checksum2 +integrity_check avtrans-8.3 + +# In the following sequence of tests, compute the MD5 sum of the content +# of a table, make lots of modifications to that table, then do a rollback. +# Verify that after the rollback, the MD5 checksum is unchanged. +# +do_test avtrans-9.1 { + execsql { + PRAGMA default_cache_size=10; + } + db close + sqlite3 db test.db + execsql { + BEGIN; + CREATE TABLE t3(x TEXT); + INSERT INTO t3 VALUES(randstr(10,400)); + INSERT INTO t3 VALUES(randstr(10,400)); + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + COMMIT; + SELECT count(*) FROM t3; + } +} {1024} + +# The following procedure computes a "signature" for table "t3". If +# T3 changes in any way, the signature should change. +# +# This is used to test ROLLBACK. We gather a signature for t3, then +# make lots of changes to t3, then rollback and take another signature. +# The two signatures should be the same. +# +proc signature {} { + return [db eval {SELECT count(*), md5sum(x) FROM t3}] +} + +# Repeat the following group of tests 20 times for quick testing and +# 40 times for full testing. Each iteration of the test makes table +# t3 a little larger, and thus takes a little longer, so doing 40 tests +# is more than 2.0 times slower than doing 20 tests. Considerably more. +# +if {[info exists ISQUICK]} { + set limit 20 +} else { + set limit 40 +} + +# Do rollbacks. Make sure the signature does not change. +# +for {set i 2} {$i<=$limit} {incr i} { + set ::sig [signature] + set cnt [lindex $::sig 0] + if {$i%2==0} { + execsql {PRAGMA fullfsync=ON} + } else { + execsql {PRAGMA fullfsync=OFF} + } + set sqlite_sync_count 0 + set sqlite_fullsync_count 0 + do_test avtrans-9.$i.1-$cnt { + execsql { + BEGIN; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + ROLLBACK; + } + signature + } $sig + do_test avtrans-9.$i.2-$cnt { + execsql { + BEGIN; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + ROLLBACK; + } + signature + } $sig + if {$i<$limit} { + do_test avtrans-9.$i.3-$cnt { + execsql { + INSERT INTO t3 SELECT randstr(10,400) FROM t3 WHERE random()%10==0; + } + } {} + if {$tcl_platform(platform)=="unix"} { + do_test avtrans-9.$i.4-$cnt { + expr {$sqlite_sync_count>0} + } 1 + ifcapable pager_pragmas { + do_test avtrans-9.$i.5-$cnt { + expr {$sqlite_fullsync_count>0} + } [expr {$i%2==0}] + } else { + do_test avtrans-9.$i.5-$cnt { + expr {$sqlite_fullsync_count==0} + } {1} + } + } + } + set ::pager_old_format 0 +} +integrity_check avtrans-10.1 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/badutf.test b/libraries/sqlite/unix/sqlite-3.5.1/test/badutf.test new file mode 100644 index 0000000..d09c933 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/badutf.test @@ -0,0 +1,143 @@ +# 2007 May 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file checks to make sure SQLite is able to gracefully +# handle malformed UTF-8. +# +# $Id: badutf.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test badutf-1.1 { + db eval {PRAGMA encoding=UTF8} + sqlite3_exec db {SELECT hex('%80') AS x} +} {0 {x 80}} +do_test badutf-1.2 { + sqlite3_exec db {SELECT hex('%81') AS x} +} {0 {x 81}} +do_test badutf-1.3 { + sqlite3_exec db {SELECT hex('%bf') AS x} +} {0 {x BF}} +do_test badutf-1.4 { + sqlite3_exec db {SELECT hex('%c0') AS x} +} {0 {x C0}} +do_test badutf-1.5 { + sqlite3_exec db {SELECT hex('%e0') AS x} +} {0 {x E0}} +do_test badutf-1.6 { + sqlite3_exec db {SELECT hex('%f0') AS x} +} {0 {x F0}} +do_test badutf-1.7 { + sqlite3_exec db {SELECT hex('%ff') AS x} +} {0 {x FF}} + +sqlite3 db2 {} +ifcapable utf16 { + do_test badutf-1.10 { + db2 eval {PRAGMA encoding=UTF16be} + sqlite3_exec db2 {SELECT hex('%80') AS x} + } {0 {x 0080}} + do_test badutf-1.11 { + sqlite3_exec db2 {SELECT hex('%81') AS x} + } {0 {x 0081}} + do_test badutf-1.12 { + sqlite3_exec db2 {SELECT hex('%bf') AS x} + } {0 {x 00BF}} + do_test badutf-1.13 { + sqlite3_exec db2 {SELECT hex('%c0') AS x} + } {0 {x FFFD}} + do_test badutf-1.14 { + sqlite3_exec db2 {SELECT hex('%c1') AS x} + } {0 {x FFFD}} + do_test badutf-1.15 { + sqlite3_exec db2 {SELECT hex('%c0%bf') AS x} + } {0 {x FFFD}} + do_test badutf-1.16 { + sqlite3_exec db2 {SELECT hex('%c1%bf') AS x} + } {0 {x FFFD}} + do_test badutf-1.17 { + sqlite3_exec db2 {SELECT hex('%c3%bf') AS x} + } {0 {x 00FF}} + do_test badutf-1.18 { + sqlite3_exec db2 {SELECT hex('%e0') AS x} + } {0 {x FFFD}} + do_test badutf-1.19 { + sqlite3_exec db2 {SELECT hex('%f0') AS x} + } {0 {x FFFD}} + do_test badutf-1.20 { + sqlite3_exec db2 {SELECT hex('%ff') AS x} + } {0 {x FFFD}} +} + + +ifcapable bloblit { + do_test badutf-2.1 { + sqlite3_exec db {SELECT '%80'=CAST(x'80' AS text) AS x} + } {0 {x 1}} + do_test badutf-2.2 { + sqlite3_exec db {SELECT CAST('%80' AS blob)=x'80' AS x} + } {0 {x 1}} +} + +do_test badutf-3.1 { + sqlite3_exec db {SELECT length('%80') AS x} +} {0 {x 1}} +do_test badutf-3.2 { + sqlite3_exec db {SELECT length('%61%62%63') AS x} +} {0 {x 3}} +do_test badutf-3.3 { + sqlite3_exec db {SELECT length('%7f%80%81') AS x} +} {0 {x 3}} +do_test badutf-3.4 { + sqlite3_exec db {SELECT length('%61%c0') AS x} +} {0 {x 2}} +do_test badutf-3.5 { + sqlite3_exec db {SELECT length('%61%c0%80%80%80%80%80%80%80%80%80%80') AS x} +} {0 {x 2}} +do_test badutf-3.6 { + sqlite3_exec db {SELECT length('%c0%80%80%80%80%80%80%80%80%80%80') AS x} +} {0 {x 1}} +do_test badutf-3.7 { + sqlite3_exec db {SELECT length('%80%80%80%80%80%80%80%80%80%80') AS x} +} {0 {x 10}} +do_test badutf-3.8 { + sqlite3_exec db {SELECT length('%80%80%80%80%80%f0%80%80%80%80') AS x} +} {0 {x 6}} +do_test badutf-3.9 { + sqlite3_exec db {SELECT length('%80%80%80%80%80%f0%80%80%80%ff') AS x} +} {0 {x 7}} + +do_test badutf-4.1 { + sqlite3_exec db {SELECT hex(trim('%80%80%80%f0%80%80%80%ff','%80%ff')) AS x} +} {0 {x F0}} +do_test badutf-4.2 { + sqlite3_exec db {SELECT hex(ltrim('%80%80%80%f0%80%80%80%ff','%80%ff')) AS x} +} {0 {x F0808080FF}} +do_test badutf-4.3 { + sqlite3_exec db {SELECT hex(rtrim('%80%80%80%f0%80%80%80%ff','%80%ff')) AS x} +} {0 {x 808080F0}} +do_test badutf-4.4 { + sqlite3_exec db {SELECT hex(trim('%80%80%80%f0%80%80%80%ff','%ff%80')) AS x} +} {0 {x 808080F0808080FF}} +do_test badutf-4.5 { + sqlite3_exec db {SELECT hex(trim('%ff%80%80%f0%80%80%80%ff','%ff%80')) AS x} +} {0 {x 80F0808080FF}} +do_test badutf-4.6 { + sqlite3_exec db {SELECT hex(trim('%ff%80%f0%80%80%80%ff','%ff%80')) AS x} +} {0 {x F0808080FF}} +do_test badutf-4.7 { + sqlite3_exec db {SELECT hex(trim('%ff%80%f0%80%80%80%ff','%ff%80%80')) AS x} +} {0 {x FF80F0808080FF}} + +db2 close +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/between.test b/libraries/sqlite/unix/sqlite-3.5.1/test/between.test new file mode 100644 index 0000000..4543675 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/between.test @@ -0,0 +1,113 @@ +# 2005 July 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the use of indices in WHERE clauses +# when the WHERE clause contains the BETWEEN operator. +# +# $Id: between.test,v 1.2 2006/01/17 09:35:02 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Build some test data +# +do_test between-1.0 { + execsql { + BEGIN; + CREATE TABLE t1(w int, x int, y int, z int); + } + for {set i 1} {$i<=100} {incr i} { + set w $i + set x [expr {int(log($i)/log(2))}] + set y [expr {$i*$i + 2*$i + 1}] + set z [expr {$x+$y}] + ifcapable tclvar { + # Random unplanned test of the $varname variable syntax. + execsql {INSERT INTO t1 VALUES($::w,$::x,$::y,$::z)} + } else { + # If the $varname syntax is not available, use the regular variable + # declaration syntax. + execsql {INSERT INTO t1 VALUES(:w,:x,:y,:z)} + } + } + execsql { + CREATE UNIQUE INDEX i1w ON t1(w); + CREATE INDEX i1xy ON t1(x,y); + CREATE INDEX i1zyx ON t1(z,y,x); + COMMIT; + } +} {} + +# This procedure executes the SQL. Then it appends to the result the +# "sort" or "nosort" keyword depending on whether or not any sorting +# is done. Then it appends the ::sqlite_query_plan variable. +# +proc queryplan {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return [concat $data $::sqlite_query_plan] +} + +do_test between-1.1.1 { + queryplan { + SELECT * FROM t1 WHERE w BETWEEN 5 AND 6 ORDER BY +w + } +} {5 2 36 38 6 2 49 51 sort t1 i1w} +do_test between-1.1.2 { + queryplan { + SELECT * FROM t1 WHERE +w BETWEEN 5 AND 6 ORDER BY +w + } +} {5 2 36 38 6 2 49 51 sort t1 {}} +do_test between-1.2.1 { + queryplan { + SELECT * FROM t1 WHERE w BETWEEN 5 AND 65-y ORDER BY +w + } +} {5 2 36 38 6 2 49 51 sort t1 i1w} +do_test between-1.2.2 { + queryplan { + SELECT * FROM t1 WHERE +w BETWEEN 5 AND 65-y ORDER BY +w + } +} {5 2 36 38 6 2 49 51 sort t1 {}} +do_test between-1.3.1 { + queryplan { + SELECT * FROM t1 WHERE w BETWEEN 41-y AND 6 ORDER BY +w + } +} {5 2 36 38 6 2 49 51 sort t1 i1w} +do_test between-1.3.2 { + queryplan { + SELECT * FROM t1 WHERE +w BETWEEN 41-y AND 6 ORDER BY +w + } +} {5 2 36 38 6 2 49 51 sort t1 {}} +do_test between-1.4 { + queryplan { + SELECT * FROM t1 WHERE w BETWEEN 41-y AND 65-y ORDER BY +w + } +} {5 2 36 38 6 2 49 51 sort t1 {}} +do_test between-1.5.1 { + queryplan { + SELECT * FROM t1 WHERE 26 BETWEEN y AND z ORDER BY +w + } +} {4 2 25 27 sort t1 i1zyx} +do_test between-1.5.2 { + queryplan { + SELECT * FROM t1 WHERE 26 BETWEEN +y AND z ORDER BY +w + } +} {4 2 25 27 sort t1 i1zyx} +do_test between-1.5.3 { + queryplan { + SELECT * FROM t1 WHERE 26 BETWEEN y AND +z ORDER BY +w + } +} {4 2 25 27 sort t1 {}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/bigfile.test b/libraries/sqlite/unix/sqlite-3.5.1/test/bigfile.test new file mode 100644 index 0000000..20ace5c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/bigfile.test @@ -0,0 +1,193 @@ +# 2002 November 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script testing the ability of SQLite to handle database +# files larger than 4GB. +# +# $Id: bigfile.test,v 1.10 2007/08/18 10:59:21 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_DISABLE_LFS is defined, omit this file. +ifcapable !lfs { + finish_test + return +} + +# These tests only work for Tcl version 8.4 and later. Prior to 8.4, +# Tcl was unable to handle large files. +# +scan $::tcl_version %f vx +if {$vx<8.4} return + +# Mac OS X does not handle large files efficiently. So skip this test +# on that platform. +if {$tcl_platform(os)=="Darwin"} return + +# This is the md5 checksum of all the data in table t1 as created +# by the first test. We will use this number to make sure that data +# never changes. +# +set MAGIC_SUM {593f1efcfdbe698c28b4b1b693f7e4cf} + +do_test bigfile-1.1 { + execsql { + BEGIN; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES('abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 SELECT rowid || ' ' || x FROM t1; + INSERT INTO t1 SELECT rowid || ' ' || x FROM t1; + INSERT INTO t1 SELECT rowid || ' ' || x FROM t1; + INSERT INTO t1 SELECT rowid || ' ' || x FROM t1; + INSERT INTO t1 SELECT rowid || ' ' || x FROM t1; + INSERT INTO t1 SELECT rowid || ' ' || x FROM t1; + INSERT INTO t1 SELECT rowid || ' ' || x FROM t1; + COMMIT; + } + execsql { + SELECT md5sum(x) FROM t1; + } +} $::MAGIC_SUM + +# Try to create a large file - a file that is larger than 2^32 bytes. +# If this fails, it means that the system being tested does not support +# large files. So skip all of the remaining tests in this file. +# +db close +if {[catch {fake_big_file 4096 test.db} msg]} { + puts "**** Unable to create a file larger than 4096 MB. *****" + puts "$msg" + finish_test + return +} + +do_test bigfile-1.2 { + sqlite3 db test.db + execsql { + SELECT md5sum(x) FROM t1; + } +} $::MAGIC_SUM + +# The previous test may fail on some systems because they are unable +# to handle large files. If that is so, then skip all of the following +# tests. We will know the above test failed because the "db" command +# does not exist. +# +if {[llength [info command db]]>0} { + +do_test bigfile-1.3 { + execsql { + CREATE TABLE t2 AS SELECT * FROM t1; + SELECT md5sum(x) FROM t2; + } +} $::MAGIC_SUM +do_test bigfile-1.4 { + db close + sqlite3 db test.db + execsql { + SELECT md5sum(x) FROM t1; + } +} $::MAGIC_SUM +do_test bigfile-1.5 { + execsql { + SELECT md5sum(x) FROM t2; + } +} $::MAGIC_SUM + +db close +if {[catch {fake_big_file 8192 test.db}]} { + puts "**** Unable to create a file larger than 8192 MB. *****" + finish_test + return +} + +do_test bigfile-1.6 { + sqlite3 db test.db + execsql { + SELECT md5sum(x) FROM t1; + } +} $::MAGIC_SUM +do_test bigfile-1.7 { + execsql { + CREATE TABLE t3 AS SELECT * FROM t1; + SELECT md5sum(x) FROM t3; + } +} $::MAGIC_SUM +do_test bigfile-1.8 { + db close + sqlite3 db test.db + execsql { + SELECT md5sum(x) FROM t1; + } +} $::MAGIC_SUM +do_test bigfile-1.9 { + execsql { + SELECT md5sum(x) FROM t2; + } +} $::MAGIC_SUM +do_test bigfile-1.10 { + execsql { + SELECT md5sum(x) FROM t3; + } +} $::MAGIC_SUM + +db close +if {[catch {fake_big_file 16384 test.db}]} { + puts "**** Unable to create a file larger than 16384 MB. *****" + finish_test + return +} + +do_test bigfile-1.11 { + sqlite3 db test.db + execsql { + SELECT md5sum(x) FROM t1; + } +} $::MAGIC_SUM +do_test bigfile-1.12 { + execsql { + CREATE TABLE t4 AS SELECT * FROM t1; + SELECT md5sum(x) FROM t4; + } +} $::MAGIC_SUM +do_test bigfile-1.13 { + db close + sqlite3 db test.db + execsql { + SELECT md5sum(x) FROM t1; + } +} $::MAGIC_SUM +do_test bigfile-1.14 { + execsql { + SELECT md5sum(x) FROM t2; + } +} $::MAGIC_SUM +do_test bigfile-1.15 { + execsql { + SELECT md5sum(x) FROM t3; + } +} $::MAGIC_SUM +do_test bigfile-1.16 { + execsql { + SELECT md5sum(x) FROM t3; + } +} $::MAGIC_SUM +do_test bigfile-1.17 { + execsql { + SELECT md5sum(x) FROM t4; + } +} $::MAGIC_SUM + +} ;# End of the "if( db command exists )" + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/bigrow.test b/libraries/sqlite/unix/sqlite-3.5.1/test/bigrow.test new file mode 100644 index 0000000..fa59c36 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/bigrow.test @@ -0,0 +1,223 @@ +# 2001 September 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is stressing the library by putting large amounts +# of data in a single row of a table. +# +# $Id: bigrow.test,v 1.5 2004/08/07 23:54:48 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Make a big string that we can use for test data +# +do_test bigrow-1.0 { + set ::bigstr {} + for {set i 1} {$i<=9999} {incr i} { + set sep [string index "abcdefghijklmnopqrstuvwxyz" [expr {$i%26}]] + append ::bigstr "$sep [format %04d $i] " + } + string length $::bigstr +} {69993} + +# Make a table into which we can insert some but records. +# +do_test bigrow-1.1 { + execsql { + CREATE TABLE t1(a text, b text, c text); + SELECT name FROM sqlite_master + WHERE type='table' OR type='index' + ORDER BY name + } +} {t1} + +do_test bigrow-1.2 { + set ::big1 [string range $::bigstr 0 65519] + set sql "INSERT INTO t1 VALUES('abc'," + append sql "'$::big1', 'xyz');" + execsql $sql + execsql {SELECT a, c FROM t1} +} {abc xyz} +do_test bigrow-1.3 { + execsql {SELECT b FROM t1} +} [list $::big1] +do_test bigrow-1.4 { + set ::big2 [string range $::bigstr 0 65520] + set sql "INSERT INTO t1 VALUES('abc2'," + append sql "'$::big2', 'xyz2');" + set r [catch {execsql $sql} msg] + lappend r $msg +} {0 {}} +do_test bigrow-1.4.1 { + execsql {SELECT b FROM t1 ORDER BY c} +} [list $::big1 $::big2] +do_test bigrow-1.4.2 { + execsql {SELECT c FROM t1 ORDER BY c} +} {xyz xyz2} +do_test bigrow-1.4.3 { + execsql {DELETE FROM t1 WHERE a='abc2'} + execsql {SELECT c FROM t1} +} {xyz} + +do_test bigrow-1.5 { + execsql { + UPDATE t1 SET a=b, b=a; + SELECT b,c FROM t1 + } +} {abc xyz} +do_test bigrow-1.6 { + execsql { + SELECT * FROM t1 + } +} [list $::big1 abc xyz] +do_test bigrow-1.7 { + execsql { + INSERT INTO t1 VALUES('1','2','3'); + INSERT INTO t1 VALUES('A','B','C'); + SELECT b FROM t1 WHERE a=='1'; + } +} {2} +do_test bigrow-1.8 { + execsql "SELECT b FROM t1 WHERE a=='$::big1'" +} {abc} +do_test bigrow-1.9 { + execsql "SELECT b FROM t1 WHERE a!='$::big1' ORDER BY a" +} {2 B} + +# Try doing some indexing on big columns +# +do_test bigrow-2.1 { + execsql { + CREATE INDEX i1 ON t1(a) + } + execsql "SELECT b FROM t1 WHERE a=='$::big1'" +} {abc} +do_test bigrow-2.2 { + execsql { + UPDATE t1 SET a=b, b=a + } + execsql "SELECT b FROM t1 WHERE a=='abc'" +} [list $::big1] +do_test bigrow-2.3 { + execsql { + UPDATE t1 SET a=b, b=a + } + execsql "SELECT b FROM t1 WHERE a=='$::big1'" +} {abc} +catch {unset ::bigstr} +catch {unset ::big1} +catch {unset ::big2} + +# Mosts of the tests above were created back when rows were limited in +# size to 64K. Now rows can be much bigger. Test that logic. Also +# make sure things work correctly at the transition boundries between +# row sizes of 256 to 257 bytes and from 65536 to 65537 bytes. +# +# We begin by testing the 256..257 transition. +# +do_test bigrow-3.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi'); + } + execsql {SELECT a,length(b),c FROM t1} +} {one 30 hi} +do_test bigrow-3.2 { + execsql { + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + } + execsql {SELECT a,length(b),c FROM t1} +} {one 240 hi} +for {set i 1} {$i<10} {incr i} { + do_test bigrow-3.3.$i { + execsql "UPDATE t1 SET b=b||'$i'" + execsql {SELECT a,length(b),c FROM t1} + } "one [expr {240+$i}] hi" +} + +# Now test the 65536..65537 row-size transition. +# +do_test bigrow-4.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi'); + } + execsql {SELECT a,length(b),c FROM t1} +} {one 30 hi} +do_test bigrow-4.2 { + execsql { + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + } + execsql {SELECT a,length(b),c FROM t1} +} {one 122880 hi} +do_test bigrow-4.3 { + execsql { + UPDATE t1 SET b=substr(b,1,65515) + } + execsql {SELECT a,length(b),c FROM t1} +} {one 65515 hi} +for {set i 1} {$i<10} {incr i} { + do_test bigrow-4.4.$i { + execsql "UPDATE t1 SET b=b||'$i'" + execsql {SELECT a,length(b),c FROM t1} + } "one [expr {65515+$i}] hi" +} + +# Check to make sure the library recovers safely if a row contains +# too much data. +# +do_test bigrow-5.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi'); + } + execsql {SELECT a,length(b),c FROM t1} +} {one 30 hi} +set i 1 +for {set sz 60} {$sz<1048560} {incr sz $sz} { + do_test bigrow-5.2.$i { + execsql { + UPDATE t1 SET b=b||b; + SELECT a,length(b),c FROM t1; + } + } "one $sz hi" + incr i +} +do_test bigrow-5.3 { + catchsql {UPDATE t1 SET b=b||b} +} {0 {}} +do_test bigrow-5.4 { + execsql {SELECT length(b) FROM t1} +} 1966080 +do_test bigrow-5.5 { + catchsql {UPDATE t1 SET b=b||b} +} {0 {}} +do_test bigrow-5.6 { + execsql {SELECT length(b) FROM t1} +} 3932160 +do_test bigrow-5.99 { + execsql {DROP TABLE t1} +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/bind.test b/libraries/sqlite/unix/sqlite-3.5.1/test/bind.test new file mode 100644 index 0000000..115734a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/bind.test @@ -0,0 +1,577 @@ +# 2003 September 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script testing the sqlite_bind API. +# +# $Id: bind.test,v 1.40 2007/05/10 17:23:12 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +proc sqlite_step {stmt N VALS COLS} { + upvar VALS vals + upvar COLS cols + set vals [list] + set cols [list] + + set rc [sqlite3_step $stmt] + for {set i 0} {$i < [sqlite3_column_count $stmt]} {incr i} { + lappend cols [sqlite3_column_name $stmt $i] + } + for {set i 0} {$i < [sqlite3_data_count $stmt]} {incr i} { + lappend vals [sqlite3_column_text $stmt $i] + } + + return $rc +} + +do_test bind-1.1 { + set DB [sqlite3_connection_pointer db] + execsql {CREATE TABLE t1(a,b,c);} + set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES(:1,?,:abc)} -1 TAIL] + set TAIL +} {} +do_test bind-1.1.1 { + sqlite3_bind_parameter_count $VM +} 3 +do_test bind-1.1.2 { + sqlite3_bind_parameter_name $VM 1 +} {:1} +do_test bind-1.1.3 { + sqlite3_bind_parameter_name $VM 2 +} {} +do_test bind-1.1.4 { + sqlite3_bind_parameter_name $VM 3 +} {:abc} +do_test bind-1.2 { + sqlite_step $VM N VALUES COLNAMES +} {SQLITE_DONE} +do_test bind-1.3 { + execsql {SELECT rowid, * FROM t1} +} {1 {} {} {}} +do_test bind-1.4 { + sqlite3_reset $VM + sqlite_bind $VM 1 {test value 1} normal + sqlite_step $VM N VALUES COLNAMES +} SQLITE_DONE +do_test bind-1.5 { + execsql {SELECT rowid, * FROM t1} +} {1 {} {} {} 2 {test value 1} {} {}} +do_test bind-1.6 { + sqlite3_reset $VM + sqlite_bind $VM 3 {'test value 2'} normal + sqlite_step $VM N VALUES COLNAMES +} SQLITE_DONE +do_test bind-1.7 { + execsql {SELECT rowid, * FROM t1} +} {1 {} {} {} 2 {test value 1} {} {} 3 {test value 1} {} {'test value 2'}} +do_test bind-1.8 { + sqlite3_reset $VM + set sqlite_static_bind_value 123 + sqlite_bind $VM 1 {} static + sqlite_bind $VM 2 {abcdefg} normal + sqlite_bind $VM 3 {} null + execsql {DELETE FROM t1} + sqlite_step $VM N VALUES COLNAMES + execsql {SELECT rowid, * FROM t1} +} {1 123 abcdefg {}} +do_test bind-1.9 { + sqlite3_reset $VM + sqlite_bind $VM 1 {456} normal + sqlite_step $VM N VALUES COLNAMES + execsql {SELECT rowid, * FROM t1} +} {1 123 abcdefg {} 2 456 abcdefg {}} + +do_test bind-1.99 { + sqlite3_finalize $VM +} SQLITE_OK + +# Prepare the statement in different ways depending on whether or not +# the $var processing is compiled into the library. +# +ifcapable {tclvar} { + do_test bind-2.1 { + execsql { + DELETE FROM t1; + } + set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES($one,$::two,$x(-z-))}\ + -1 TX] + set TX + } {} + set v1 {$one} + set v2 {$::two} + set v3 {$x(-z-)} +} +ifcapable {!tclvar} { + do_test bind-2.1 { + execsql { + DELETE FROM t1; + } + set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES(:one,:two,:_)} -1 TX] + set TX + } {} + set v1 {:one} + set v2 {:two} + set v3 {:_} +} + +do_test bind-2.1.1 { + sqlite3_bind_parameter_count $VM +} 3 +do_test bind-2.1.2 { + sqlite3_bind_parameter_name $VM 1 +} $v1 +do_test bind-2.1.3 { + sqlite3_bind_parameter_name $VM 2 +} $v2 +do_test bind-2.1.4 { + sqlite3_bind_parameter_name $VM 3 +} $v3 +do_test bind-2.1.5 { + sqlite3_bind_parameter_index $VM $v1 +} 1 +do_test bind-2.1.6 { + sqlite3_bind_parameter_index $VM $v2 +} 2 +do_test bind-2.1.7 { + sqlite3_bind_parameter_index $VM $v3 +} 3 +do_test bind-2.1.8 { + sqlite3_bind_parameter_index $VM {:hi} +} 0 + +# 32 bit Integers +do_test bind-2.2 { + sqlite3_bind_int $VM 1 123 + sqlite3_bind_int $VM 2 456 + sqlite3_bind_int $VM 3 789 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + execsql {SELECT rowid, * FROM t1} +} {1 123 456 789} +do_test bind-2.3 { + sqlite3_bind_int $VM 2 -2000000000 + sqlite3_bind_int $VM 3 2000000000 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + execsql {SELECT rowid, * FROM t1} +} {1 123 456 789 2 123 -2000000000 2000000000} +do_test bind-2.4 { + execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} +} {integer integer integer integer integer integer} +do_test bind-2.5 { + execsql { + DELETE FROM t1; + } +} {} + +# 64 bit Integers +do_test bind-3.1 { + sqlite3_bind_int64 $VM 1 32 + sqlite3_bind_int64 $VM 2 -2000000000000 + sqlite3_bind_int64 $VM 3 2000000000000 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + execsql {SELECT rowid, * FROM t1} +} {1 32 -2000000000000 2000000000000} +do_test bind-3.2 { + execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} +} {integer integer integer} +do_test bind-3.3 { + execsql { + DELETE FROM t1; + } +} {} + +# Doubles +do_test bind-4.1 { + sqlite3_bind_double $VM 1 1234.1234 + sqlite3_bind_double $VM 2 0.00001 + sqlite3_bind_double $VM 3 123456789 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + set x [execsql {SELECT rowid, * FROM t1}] + regsub {1e-005} $x {1e-05} y + set y +} {1 1234.1234 1e-05 123456789.0} +do_test bind-4.2 { + execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} +} {real real real} +do_test bind-4.3 { + execsql { + DELETE FROM t1; + } +} {} +do_test bind-4.4 { + sqlite3_bind_double $VM 1 NaN + sqlite3_bind_double $VM 2 1e300 + sqlite3_bind_double $VM 3 -1e-300 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + set x [execsql {SELECT rowid, * FROM t1}] + regsub {1e-005} $x {1e-05} y + set y +} {1 {} 1e+300 -1e-300} +do_test bind-4.5 { + execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} +} {null real real} +do_test bind-4.6 { + execsql { + DELETE FROM t1; + } +} {} + +# NULL +do_test bind-5.1 { + sqlite3_bind_null $VM 1 + sqlite3_bind_null $VM 2 + sqlite3_bind_null $VM 3 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + execsql {SELECT rowid, * FROM t1} +} {1 {} {} {}} +do_test bind-5.2 { + execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} +} {null null null} +do_test bind-5.3 { + execsql { + DELETE FROM t1; + } +} {} + +# UTF-8 text +do_test bind-6.1 { + sqlite3_bind_text $VM 1 hellothere 5 + sqlite3_bind_text $VM 2 ".." 1 + sqlite3_bind_text $VM 3 world -1 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + execsql {SELECT rowid, * FROM t1} +} {1 hello . world} +do_test bind-6.2 { + execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} +} {text text text} +do_test bind-6.3 { + execsql { + DELETE FROM t1; + } +} {} + +# UTF-16 text +ifcapable {utf16} { + do_test bind-7.1 { + sqlite3_bind_text16 $VM 1 [encoding convertto unicode hellothere] 10 + sqlite3_bind_text16 $VM 2 [encoding convertto unicode ""] 0 + sqlite3_bind_text16 $VM 3 [encoding convertto unicode world] 10 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + execsql {SELECT rowid, * FROM t1} + } {1 hello {} world} + do_test bind-7.2 { + execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} + } {text text text} +} +do_test bind-7.3 { + execsql { + DELETE FROM t1; + } +} {} + +# Test that the 'out of range' error works. +do_test bind-8.1 { + catch { sqlite3_bind_null $VM 0 } +} {1} +do_test bind-8.2 { + sqlite3_errmsg $DB +} {bind or column index out of range} +ifcapable {utf16} { + do_test bind-8.3 { + encoding convertfrom unicode [sqlite3_errmsg16 $DB] + } {bind or column index out of range} +} +do_test bind-8.4 { + sqlite3_bind_null $VM 1 + sqlite3_errmsg $DB +} {not an error} +do_test bind-8.5 { + catch { sqlite3_bind_null $VM 4 } +} {1} +do_test bind-8.6 { + sqlite3_errmsg $DB +} {bind or column index out of range} +ifcapable {utf16} { + do_test bind-8.7 { + encoding convertfrom unicode [sqlite3_errmsg16 $DB] + } {bind or column index out of range} +} + +do_test bind-8.8 { + catch { sqlite3_bind_blob $VM 0 "abc" 3 } +} {1} +do_test bind-8.9 { + catch { sqlite3_bind_blob $VM 4 "abc" 3 } +} {1} +do_test bind-8.10 { + catch { sqlite3_bind_text $VM 0 "abc" 3 } +} {1} +ifcapable {utf16} { + do_test bind-8.11 { + catch { sqlite3_bind_text16 $VM 4 "abc" 2 } + } {1} +} +do_test bind-8.12 { + catch { sqlite3_bind_int $VM 0 5 } +} {1} +do_test bind-8.13 { + catch { sqlite3_bind_int $VM 4 5 } +} {1} +do_test bind-8.14 { + catch { sqlite3_bind_double $VM 0 5.0 } +} {1} +do_test bind-8.15 { + catch { sqlite3_bind_double $VM 4 6.0 } +} {1} + +do_test bind-8.99 { + sqlite3_finalize $VM +} SQLITE_OK + +do_test bind-9.1 { + execsql { + CREATE TABLE t2(a,b,c,d,e,f); + } + set rc [catch { + sqlite3_prepare $DB { + INSERT INTO t2(a) VALUES(?0) + } -1 TAIL + } msg] + lappend rc $msg +} {1 {(1) variable number must be between ?1 and ?999}} +do_test bind-9.2 { + set rc [catch { + sqlite3_prepare $DB { + INSERT INTO t2(a) VALUES(?1000) + } -1 TAIL + } msg] + lappend rc $msg +} {1 {(1) variable number must be between ?1 and ?999}} +do_test bind-9.3 { + set VM [ + sqlite3_prepare $DB { + INSERT INTO t2(a,b) VALUES(?1,?999) + } -1 TAIL + ] + sqlite3_bind_parameter_count $VM +} {999} +catch {sqlite3_finalize $VM} +do_test bind-9.4 { + set VM [ + sqlite3_prepare $DB { + INSERT INTO t2(a,b,c,d) VALUES(?1,?997,?,?) + } -1 TAIL + ] + sqlite3_bind_parameter_count $VM +} {999} +do_test bind-9.5 { + sqlite3_bind_int $VM 1 1 + sqlite3_bind_int $VM 997 999 + sqlite3_bind_int $VM 998 1000 + sqlite3_bind_int $VM 999 1001 + sqlite3_step $VM +} SQLITE_DONE +do_test bind-9.6 { + sqlite3_finalize $VM +} SQLITE_OK +do_test bind-9.7 { + execsql {SELECT * FROM t2} +} {1 999 1000 1001 {} {}} + +ifcapable {tclvar} { + do_test bind-10.1 { + set VM [ + sqlite3_prepare $DB { + INSERT INTO t2(a,b,c,d,e,f) VALUES(:abc,$abc,:abc,$ab,$abc,:abc) + } -1 TAIL + ] + sqlite3_bind_parameter_count $VM + } 3 + set v1 {$abc} + set v2 {$ab} +} +ifcapable {!tclvar} { + do_test bind-10.1 { + set VM [ + sqlite3_prepare $DB { + INSERT INTO t2(a,b,c,d,e,f) VALUES(:abc,:xyz,:abc,:xy,:xyz,:abc) + } -1 TAIL + ] + sqlite3_bind_parameter_count $VM + } 3 + set v1 {:xyz} + set v2 {:xy} +} +do_test bind-10.2 { + sqlite3_bind_parameter_index $VM :abc +} 1 +do_test bind-10.3 { + sqlite3_bind_parameter_index $VM $v1 +} 2 +do_test bind-10.4 { + sqlite3_bind_parameter_index $VM $v2 +} 3 +do_test bind-10.5 { + sqlite3_bind_parameter_name $VM 1 +} :abc +do_test bind-10.6 { + sqlite3_bind_parameter_name $VM 2 +} $v1 +do_test bind-10.7 { + sqlite3_bind_parameter_name $VM 3 +} $v2 +do_test bind-10.7.1 { + sqlite3_bind_parameter_name 0 1 ;# Ignore if VM is NULL +} {} +do_test bind-10.7.2 { + sqlite3_bind_parameter_name $VM 0 ;# Ignore if index too small +} {} +do_test bind-10.7.3 { + sqlite3_bind_parameter_name $VM 4 ;# Ignore if index is too big +} {} +do_test bind-10.8 { + sqlite3_bind_int $VM 1 1 + sqlite3_bind_int $VM 2 2 + sqlite3_bind_int $VM 3 3 + sqlite3_step $VM +} SQLITE_DONE +do_test bind-10.8.1 { + # Binding attempts after program start should fail + set rc [catch { + sqlite3_bind_int $VM 1 1 + } msg] + lappend rc $msg +} {1 {}} +do_test bind-10.9 { + sqlite3_finalize $VM +} SQLITE_OK +do_test bind-10.10 { + execsql {SELECT * FROM t2} +} {1 999 1000 1001 {} {} 1 2 1 3 2 1} + +# Ticket #918 +# +do_test bind-10.11 { + # catch {sqlite3_finalize $VM} + set VM [ + sqlite3_prepare $DB { + INSERT INTO t2(a,b,c,d,e,f) VALUES(:abc,?,?4,:pqr,:abc,?4) + } -1 TAIL + ] + sqlite3_bind_parameter_count $VM +} 5 +do_test bind-10.11.1 { + sqlite3_bind_parameter_index 0 :xyz ;# ignore NULL VM arguments +} 0 +do_test bind-10.12 { + sqlite3_bind_parameter_index $VM :xyz +} 0 +do_test bind-10.13 { + sqlite3_bind_parameter_index $VM {} +} 0 +do_test bind-10.14 { + sqlite3_bind_parameter_index $VM :pqr +} 5 +do_test bind-10.15 { + sqlite3_bind_parameter_index $VM ?4 +} 4 +do_test bind-10.16 { + sqlite3_bind_parameter_name $VM 1 +} :abc +do_test bind-10.17 { + sqlite3_bind_parameter_name $VM 2 +} {} +do_test bind-10.18 { + sqlite3_bind_parameter_name $VM 3 +} {} +do_test bind-10.19 { + sqlite3_bind_parameter_name $VM 4 +} {?4} +do_test bind-10.20 { + sqlite3_bind_parameter_name $VM 5 +} :pqr +catch {sqlite3_finalize $VM} + +# Make sure we catch an unterminated "(" in a Tcl-style variable name +# +ifcapable tclvar { + do_test bind-11.1 { + catchsql {SELECT * FROM sqlite_master WHERE name=$abc(123 and sql NOT NULL;} + } {1 {unrecognized token: "$abc(123"}} +} + +if {[execsql {pragma encoding}]=="UTF-8"} { + # Test the ability to bind text that contains embedded '\000' characters. + # Make sure we can recover the entire input string. + # + do_test bind-12.1 { + execsql { + CREATE TABLE t3(x BLOB); + } + set VM [sqlite3_prepare $DB {INSERT INTO t3 VALUES(?)} -1 TAIL] + sqlite_bind $VM 1 not-used blob10 + sqlite3_step $VM + sqlite3_finalize $VM + execsql { + SELECT typeof(x), length(x), quote(x), + length(cast(x AS BLOB)), quote(cast(x AS BLOB)) FROM t3 + } + } {text 3 'abc' 10 X'6162630078797A007071'} + do_test bind-12.2 { + sqlite3_create_function $DB + execsql { + SELECT quote(cast(x_coalesce(x) AS blob)) FROM t3 + } + } {X'6162630078797A007071'} +} + +# Test the operation of sqlite3_clear_bindings +# +do_test bind-13.1 { + set VM [sqlite3_prepare $DB {SELECT ?,?,?} -1 TAIL] + sqlite3_step $VM + list [sqlite3_column_type $VM 0] [sqlite3_column_type $VM 1] \ + [sqlite3_column_type $VM 2] +} {NULL NULL NULL} +do_test bind-13.2 { + sqlite3_reset $VM + sqlite3_bind_int $VM 1 1 + sqlite3_bind_int $VM 2 2 + sqlite3_bind_int $VM 3 3 + sqlite3_step $VM + list [sqlite3_column_type $VM 0] [sqlite3_column_type $VM 1] \ + [sqlite3_column_type $VM 2] +} {INTEGER INTEGER INTEGER} +do_test bind-13.3 { + sqlite3_reset $VM + sqlite3_step $VM + list [sqlite3_column_type $VM 0] [sqlite3_column_type $VM 1] \ + [sqlite3_column_type $VM 2] +} {INTEGER INTEGER INTEGER} +do_test bind-13.4 { + sqlite3_reset $VM + sqlite3_clear_bindings $VM + sqlite3_step $VM + list [sqlite3_column_type $VM 0] [sqlite3_column_type $VM 1] \ + [sqlite3_column_type $VM 2] +} {NULL NULL NULL} +sqlite3_finalize $VM + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/bindxfer.test b/libraries/sqlite/unix/sqlite-3.5.1/test/bindxfer.test new file mode 100644 index 0000000..710ebc7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/bindxfer.test @@ -0,0 +1,84 @@ +# 2005 April 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script testing the sqlite_transfer_bindings() API. +# +# $Id: bindxfer.test,v 1.4 2007/04/05 11:25:59 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +proc sqlite_step {stmt VALS COLS} { + upvar #0 $VALS vals + upvar #0 $COLS cols + set vals [list] + set cols [list] + + set rc [sqlite3_step $stmt] + for {set i 0} {$i < [sqlite3_column_count $stmt]} {incr i} { + lappend cols [sqlite3_column_name $stmt $i] + } + for {set i 0} {$i < [sqlite3_data_count $stmt]} {incr i} { + lappend vals [sqlite3_column_text $stmt $i] + } + + return $rc +} + +do_test bindxfer-1.1 { + set DB [sqlite3_connection_pointer db] + execsql {CREATE TABLE t1(a,b,c);} + set VM1 [sqlite3_prepare $DB {SELECT ?, ?, ?} -1 TAIL] + set TAIL +} {} +do_test bindxfer-1.2 { + sqlite3_bind_parameter_count $VM1 +} 3 +do_test bindxfer-1.3 { + set VM2 [sqlite3_prepare $DB {SELECT ?, ?, ?} -1 TAIL] + set TAIL +} {} +do_test bindxfer-1.4 { + sqlite3_bind_parameter_count $VM2 +} 3 +do_test bindxfer-1.5 { + sqlite_bind $VM1 1 one normal + set sqlite_static_bind_value two + sqlite_bind $VM1 2 {} static + sqlite_bind $VM1 3 {} null + sqlite3_transfer_bindings $VM1 $VM2 + sqlite_step $VM1 VALUES COLNAMES +} SQLITE_ROW +do_test bindxfer-1.6 { + set VALUES +} {{} {} {}} +do_test bindxfer-1.7 { + sqlite_step $VM2 VALUES COLNAMES +} SQLITE_ROW +do_test bindxfer-1.8 { + set VALUES +} {one two {}} +do_test bindxfer-1.9 { + catch {sqlite3_finalize $VM1} + catch {sqlite3_finalize $VM2} + sqlite3_transfer_bindings $VM1 $VM2 +} 21 ;# SQLITE_MISUSE +do_test bindxfer-1.10 { + set VM1 [sqlite3_prepare $DB {SELECT ?, ?, ?} -1 TAIL] + set VM2 [sqlite3_prepare $DB {SELECT ?, ?, ?, ?} -1 TAIL] + sqlite3_transfer_bindings $VM1 $VM2 +} 1 ;# SQLITE_ERROR +catch {sqlite3_finalize $VM1} +catch {sqlite3_finalize $VM2} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/blob.test b/libraries/sqlite/unix/sqlite-3.5.1/test/blob.test new file mode 100644 index 0000000..b3c22b4 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/blob.test @@ -0,0 +1,124 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: blob.test,v 1.5 2006/01/03 00:33:50 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!bloblit} { + finish_test + return +} + +proc bin_to_hex {blob} { + set bytes {} + binary scan $blob \c* bytes + set bytes2 [list] + foreach b $bytes {lappend bytes2 [format %02X [expr $b & 0xFF]]} + join $bytes2 {} +} + +# Simplest possible case. Specify a blob literal +do_test blob-1.0 { + set blob [execsql {SELECT X'01020304';}] + bin_to_hex [lindex $blob 0] +} {01020304} +do_test blob-1.1 { + set blob [execsql {SELECT x'ABCDEF';}] + bin_to_hex [lindex $blob 0] +} {ABCDEF} +do_test blob-1.2 { + set blob [execsql {SELECT x'';}] + bin_to_hex [lindex $blob 0] +} {} +do_test blob-1.3 { + set blob [execsql {SELECT x'abcdEF12';}] + bin_to_hex [lindex $blob 0] +} {ABCDEF12} + +# Try some syntax errors in blob literals. +do_test blob-1.4 { + catchsql {SELECT X'01020k304', 100} +} {1 {unrecognized token: "X'01020"}} +do_test blob-1.5 { + catchsql {SELECT X'01020, 100} +} {1 {unrecognized token: "X'01020"}} +do_test blob-1.6 { + catchsql {SELECT X'01020 100'} +} {1 {unrecognized token: "X'01020"}} +do_test blob-1.7 { + catchsql {SELECT X'01001'} +} {1 {unrecognized token: "X'01001'"}} + +# Insert a blob into a table and retrieve it. +do_test blob-2.0 { + execsql { + CREATE TABLE t1(a BLOB, b BLOB); + INSERT INTO t1 VALUES(X'123456', x'7890ab'); + INSERT INTO t1 VALUES(X'CDEF12', x'345678'); + } + set blobs [execsql {SELECT * FROM t1}] + set blobs2 [list] + foreach b $blobs {lappend blobs2 [bin_to_hex $b]} + set blobs2 +} {123456 7890AB CDEF12 345678} + +# An index on a blob column +do_test blob-2.1 { + execsql { + CREATE INDEX i1 ON t1(a); + } + set blobs [execsql {SELECT * FROM t1}] + set blobs2 [list] + foreach b $blobs {lappend blobs2 [bin_to_hex $b]} + set blobs2 +} {123456 7890AB CDEF12 345678} +do_test blob-2.2 { + set blobs [execsql {SELECT * FROM t1 where a = X'123456'}] + set blobs2 [list] + foreach b $blobs {lappend blobs2 [bin_to_hex $b]} + set blobs2 +} {123456 7890AB} +do_test blob-2.3 { + set blobs [execsql {SELECT * FROM t1 where a = X'CDEF12'}] + set blobs2 [list] + foreach b $blobs {lappend blobs2 [bin_to_hex $b]} + set blobs2 +} {CDEF12 345678} +do_test blob-2.4 { + set blobs [execsql {SELECT * FROM t1 where a = X'CD12'}] + set blobs2 [list] + foreach b $blobs {lappend blobs2 [bin_to_hex $b]} + set blobs2 +} {} + +# Try to bind a blob value to a prepared statement. +do_test blob-3.0 { + sqlite3 db2 test.db + set DB [sqlite3_connection_pointer db2] + set STMT [sqlite3_prepare $DB "DELETE FROM t1 WHERE a = ?" -1 DUMMY] + sqlite3_bind_blob $STMT 1 "\x12\x34\x56" 3 + sqlite3_step $STMT +} {SQLITE_DONE} +do_test blob-3.1 { + sqlite3_finalize $STMT + db2 close +} {} +do_test blob-2.3 { + set blobs [execsql {SELECT * FROM t1}] + set blobs2 [list] + foreach b $blobs {lappend blobs2 [bin_to_hex $b]} + set blobs2 +} {CDEF12 345678} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/btree.test b/libraries/sqlite/unix/sqlite-3.5.1/test/btree.test new file mode 100644 index 0000000..d8575d06 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/btree.test @@ -0,0 +1,1072 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is btree database backend +# +# $Id: btree.test,v 1.41 2007/09/06 22:19:15 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable default_autovacuum { + finish_test + return +} + +# Basic functionality. Open and close a database. +# +do_test btree-1.1 { + file delete -force test1.bt + file delete -force test1.bt-journal + set rc [catch {btree_open test1.bt 2000 0} ::b1] +} {0} + +# The second element of the list returned by btree_pager_stats is the +# number of pages currently checked out. We'll be checking this value +# frequently during this test script, to make sure the btree library +# is properly releasing the pages it checks out, and thus avoiding +# page leaks. +# +do_test btree-1.1.1 { + lindex [btree_pager_stats $::b1] 1 +} {0} +do_test btree-1.2 { + set rc [catch {btree_open test1.bt 2000 0} ::b2] +} {0} +do_test btree-1.3 { + set rc [catch {btree_close $::b2} msg] + lappend rc $msg +} {0 {}} + +# Do an insert and verify that the database file grows in size. +# +do_test btree-1.4 { + set rc [catch {btree_begin_transaction $::b1} msg] + lappend rc $msg +} {0 {}} +do_test btree-1.4.1 { + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-1.5 { + set rc [catch {btree_cursor $::b1 1 1} ::c1] + if {$rc} {lappend rc $::c1} + set rc +} {0} +do_test btree-1.6 { + set rc [catch {btree_insert $::c1 100 1.00} msg] + lappend rc $msg +} {0 {}} +do_test btree-1.7 { + btree_move_to $::c1 100 + btree_key $::c1 +} {100} +do_test btree-1.8 { + btree_data $::c1 +} {1.00} +do_test btree-1.9 { + set rc [catch {btree_close_cursor $::c1} msg] + lappend rc $msg +} {0 {}} +do_test btree-1.10 { + set rc [catch {btree_commit $::b1} msg] + lappend rc $msg +} {0 {}} +do_test btree-1.11 { + file size test1.bt +} {1024} +do_test btree-1.12 { + lindex [btree_pager_stats $::b1] 1 +} {0} + +# Reopen the database and attempt to read the record that we wrote. +# +do_test btree-2.1 { + set rc [catch {btree_cursor $::b1 1 1} ::c1] + if {$rc} {lappend rc $::c1} + set rc +} {0} +do_test btree-2.1.1 { + btree_cursor_list $::b1 +} {} +do_test btree-2.2 { + btree_move_to $::c1 99 +} {1} +do_test btree-2.3 { + btree_move_to $::c1 101 +} {-1} +do_test btree-2.4 { + btree_move_to $::c1 100 +} {0} +do_test btree-2.5 { + btree_key $::c1 +} {100} +do_test btree-2.6 { + btree_data $::c1 +} {1.00} +do_test btree-2.7 { + lindex [btree_pager_stats $::b1] 1 +} {1} + +# Do some additional inserts +# +do_test btree-3.1 { + btree_begin_transaction $::b1 + btree_insert $::c1 200 2.00 + btree_move_to $::c1 200 + btree_key $::c1 +} {200} +do_test btree-3.1.1 { + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-3.2 { + btree_insert $::c1 300 3.00 + btree_move_to $::c1 300 + btree_key $::c1 +} {300} +do_test btree-3.4 { + btree_insert $::c1 400 4.00 + btree_move_to $::c1 400 + btree_key $::c1 +} {400} +do_test btree-3.5 { + btree_insert $::c1 500 5.00 + btree_move_to $::c1 500 + btree_key $::c1 +} {500} +do_test btree-3.6 { + btree_insert $::c1 600 6.00 + btree_move_to $::c1 600 + btree_key $::c1 +} {600} +#btree_page_dump $::b1 2 +do_test btree-3.7 { + set rc [btree_move_to $::c1 0] + expr {$rc>0} +} {1} +do_test btree-3.8 { + btree_key $::c1 +} {100} +do_test btree-3.9 { + btree_data $::c1 +} {1.00} +do_test btree-3.10 { + btree_next $::c1 + btree_key $::c1 +} {200} +do_test btree-3.11 { + btree_data $::c1 +} {2.00} +do_test btree-3.12 { + btree_next $::c1 + btree_key $::c1 +} {300} +do_test btree-3.13 { + btree_data $::c1 +} {3.00} +do_test btree-3.14 { + btree_next $::c1 + btree_key $::c1 +} {400} +do_test btree-3.15 { + btree_data $::c1 +} {4.00} +do_test btree-3.16 { + btree_next $::c1 + btree_key $::c1 +} {500} +do_test btree-3.17 { + btree_data $::c1 +} {5.00} +do_test btree-3.18 { + btree_next $::c1 + btree_key $::c1 +} {600} +do_test btree-3.19 { + btree_data $::c1 +} {6.00} +do_test btree-3.20.1 { + btree_next $::c1 + btree_key $::c1 +} {0} +do_test btree-3.20.2 { + btree_eof $::c1 +} {1} +# This test case used to test that one couldn't request data from an +# invalid cursor. That is now an assert()ed condition. +# +# do_test btree-3.21 { +# set rc [catch {btree_data $::c1} res] +# lappend rc $res +# } {1 SQLITE_INTERNAL} + +# Commit the changes, reopen and reread the data +# +do_test btree-3.22 { + set rc [catch {btree_close_cursor $::c1} msg] + lappend rc $msg +} {0 {}} +do_test btree-3.22.1 { + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-3.23 { + set rc [catch {btree_commit $::b1} msg] + lappend rc $msg +} {0 {}} +do_test btree-3.23.1 { + lindex [btree_pager_stats $::b1] 1 +} {0} +do_test btree-3.24 { + file size test1.bt +} {1024} +do_test btree-3.25 { + set rc [catch {btree_cursor $::b1 1 1} ::c1] + if {$rc} {lappend rc $::c1} + set rc +} {0} +do_test btree-3.25.1 { + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-3.26 { + set rc [btree_move_to $::c1 0] + expr {$rc>0} +} {1} +do_test btree-3.27 { + btree_key $::c1 +} {100} +do_test btree-3.28 { + btree_data $::c1 +} {1.00} +do_test btree-3.29 { + btree_next $::c1 + btree_key $::c1 +} {200} +do_test btree-3.30 { + btree_data $::c1 +} {2.00} +do_test btree-3.31 { + btree_next $::c1 + btree_key $::c1 +} {300} +do_test btree-3.32 { + btree_data $::c1 +} {3.00} +do_test btree-3.33 { + btree_next $::c1 + btree_key $::c1 +} {400} +do_test btree-3.34 { + btree_data $::c1 +} {4.00} +do_test btree-3.35 { + btree_next $::c1 + btree_key $::c1 +} {500} +do_test btree-3.36 { + btree_data $::c1 +} {5.00} +do_test btree-3.37 { + btree_next $::c1 + btree_key $::c1 +} {600} +do_test btree-3.38 { + btree_data $::c1 +} {6.00} +do_test btree-3.39 { + btree_next $::c1 + btree_key $::c1 +} {0} +# This test case used to test that requesting data from an invalid cursor +# returned SQLITE_INTERNAL. That is now an assert()ed condition. +# +# do_test btree-3.40 { +# set rc [catch {btree_data $::c1} res] +# lappend rc $res +# } {1 SQLITE_INTERNAL} +do_test btree-3.41 { + lindex [btree_pager_stats $::b1] 1 +} {1} + + +# Now try a delete +# +do_test btree-4.1 { + btree_begin_transaction $::b1 + btree_move_to $::c1 100 + btree_key $::c1 +} {100} +do_test btree-4.1.1 { + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-4.2 { + btree_delete $::c1 +} {} +do_test btree-4.3 { + btree_move_to $::c1 100 + btree_key $::c1 +} {200} +do_test btree-4.4 { + btree_next $::c1 + btree_key $::c1 +} {300} +do_test btree-4.5 { + btree_next $::c1 + btree_key $::c1 +} {400} +do_test btree-4.4 { + btree_move_to $::c1 0 + set r {} + while 1 { + set key [btree_key $::c1] + if {[btree_eof $::c1]} break + lappend r $key + lappend r [btree_data $::c1] + btree_next $::c1 + } + set r +} {200 2.00 300 3.00 400 4.00 500 5.00 600 6.00} + +# Commit and make sure the delete is still there. +# +do_test btree-4.5 { + btree_commit $::b1 + btree_move_to $::c1 0 + set r {} + while 1 { + set key [btree_key $::c1] + if {[btree_eof $::c1]} break + lappend r $key + lappend r [btree_data $::c1] + btree_next $::c1 + } + set r +} {200 2.00 300 3.00 400 4.00 500 5.00 600 6.00} + +# Completely close the database and reopen it. Then check +# the data again. +# +do_test btree-4.6 { + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-4.7 { + btree_close_cursor $::c1 + lindex [btree_pager_stats $::b1] 1 +} {0} +do_test btree-4.8 { + btree_close $::b1 + set ::b1 [btree_open test1.bt 2000 0] + set ::c1 [btree_cursor $::b1 1 1] + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-4.9 { + set r {} + btree_first $::c1 + while 1 { + set key [btree_key $::c1] + if {[btree_eof $::c1]} break + lappend r $key + lappend r [btree_data $::c1] + btree_next $::c1 + } + set r +} {200 2.00 300 3.00 400 4.00 500 5.00 600 6.00} + +# Try to read and write meta data +# +do_test btree-5.1 { + btree_get_meta $::b1 +} {0 0 0 0 0 0 0 0 0 0} +do_test btree-5.2 { + set rc [catch { + btree_update_meta $::b1 0 1 2 3 4 5 6 7 8 9 + } msg] + lappend rc $msg +} {1 SQLITE_ERROR} +do_test btree-5.3 { + btree_begin_transaction $::b1 + set rc [catch { + btree_update_meta $::b1 0 1 2 3 0 5 6 0 8 9 + } msg] + lappend rc $msg +} {0 {}} +do_test btree-5.4 { + btree_get_meta $::b1 +} {0 1 2 3 0 5 6 0 8 9} +do_test btree-5.5 { + btree_close_cursor $::c1 + btree_rollback $::b1 + btree_get_meta $::b1 +} {0 0 0 0 0 0 0 0 0 0} +do_test btree-5.6 { + btree_begin_transaction $::b1 + btree_update_meta $::b1 0 10 20 30 0 50 60 0 80 90 + btree_commit $::b1 + btree_get_meta $::b1 +} {0 10 20 30 0 50 60 0 80 90} + +proc select_all {cursor} { + set r {} + btree_first $cursor + while {![btree_eof $cursor]} { + set key [btree_key $cursor] + lappend r $key + lappend r [btree_data $cursor] + btree_next $cursor + } + return $r +} +proc select_keys {cursor} { + set r {} + btree_first $cursor + while {![btree_eof $cursor]} { + set key [btree_key $cursor] + lappend r $key + btree_next $cursor + } + return $r +} + +# Try to create a new table in the database file +# +do_test btree-6.1 { + set rc [catch {btree_create_table $::b1 0} msg] + lappend rc $msg +} {1 SQLITE_ERROR} +do_test btree-6.2 { + btree_begin_transaction $::b1 + set ::t2 [btree_create_table $::b1 0] +} {2} +do_test btree-6.2.1 { + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-6.2.2 { + set ::c2 [btree_cursor $::b1 $::t2 1] + lindex [btree_pager_stats $::b1] 1 +} {2} +do_test btree-6.2.3 { + btree_insert $::c2 ten 10 + btree_move_to $::c2 ten + btree_key $::c2 +} {ten} +do_test btree-6.3 { + btree_commit $::b1 + set ::c1 [btree_cursor $::b1 1 1] + lindex [btree_pager_stats $::b1] 1 +} {2} +do_test btree-6.3.1 { + select_all $::c1 +} {200 2.00 300 3.00 400 4.00 500 5.00 600 6.00} +#btree_page_dump $::b1 3 +do_test btree-6.4 { + select_all $::c2 +} {ten 10} + +# Drop the new table, then create it again anew. +# +do_test btree-6.5 { + btree_begin_transaction $::b1 +} {} +do_test btree-6.6 { + btree_close_cursor $::c2 +} {} +do_test btree-6.6.1 { + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-6.7 { + btree_close_cursor $::c1 + btree_drop_table $::b1 $::t2 +} {} +do_test btree-6.7.1 { + lindex [btree_get_meta $::b1] 0 +} {1} +do_test btree-6.8 { + set ::t2 [btree_create_table $::b1 0] +} {2} +do_test btree-6.8.1 { + lindex [btree_get_meta $::b1] 0 +} {0} +do_test btree-6.9 { + set ::c2 [btree_cursor $::b1 $::t2 1] + lindex [btree_pager_stats $::b1] 1 +} {2} + +# This test case used to test that requesting the key from an invalid cursor +# returned an empty string. But that is now an assert()ed condition. +# +# do_test btree-6.9.1 { +# btree_move_to $::c2 {} +# btree_key $::c2 +# } {} + +# If we drop table 1 it just clears the table. Table 1 always exists. +# +do_test btree-6.10 { + btree_close_cursor $::c2 + btree_drop_table $::b1 1 + set ::c2 [btree_cursor $::b1 $::t2 1] + set ::c1 [btree_cursor $::b1 1 1] + btree_first $::c1 + btree_eof $::c1 +} {1} +do_test btree-6.11 { + btree_commit $::b1 + select_all $::c1 +} {} +do_test btree-6.12 { + select_all $::c2 +} {} +do_test btree-6.13 { + btree_close_cursor $::c2 + lindex [btree_pager_stats $::b1] 1 +} {1} + +# Check to see that pages defragment properly. To do this test we will +# +# 1. Fill the first page of table 1 with data. +# 2. Delete every other entry of table 1. +# 3. Insert a single entry that requires more contiguous +# space than is available. +# +do_test btree-7.1 { + btree_begin_transaction $::b1 +} {} +catch {unset key} +catch {unset data} + +# Check to see that data on overflow pages work correctly. +# +do_test btree-8.1 { + set data "*** This is a very long key " + while {[string length $data]<1234} {append data $data} + set ::data $data + btree_insert $::c1 2020 $data +} {} +btree_page_dump $::b1 1 +btree_page_dump $::b1 2 +do_test btree-8.1.1 { + lindex [btree_pager_stats $::b1] 1 +} {1} +#btree_pager_ref_dump $::b1 +do_test btree-8.2 { + btree_move_to $::c1 2020 + string length [btree_data $::c1] +} [string length $::data] +do_test btree-8.3 { + btree_data $::c1 +} $::data +do_test btree-8.4 { + btree_delete $::c1 +} {} +do_test btree-8.4.1 { + lindex [btree_get_meta $::b1] 0 +} [expr {int(([string length $::data]-238+1019)/1020)}] +do_test btree-8.4.2 { + btree_integrity_check $::b1 1 2 +} {} +do_test btree-8.5 { + set data "*** This is an even longer key " + while {[string length $data]<2000} {append data $data} + append data END + set ::data $data + btree_insert $::c1 2030 $data +} {} +do_test btree-8.6 { + btree_move_to $::c1 2030 + string length [btree_data $::c1] +} [string length $::data] +do_test btree-8.7 { + btree_data $::c1 +} $::data +do_test btree-8.8 { + btree_commit $::b1 + btree_data $::c1 +} $::data +do_test btree-8.9.1 { + btree_close_cursor $::c1 + btree_close $::b1 + set ::b1 [btree_open test1.bt 2000 0] + set ::c1 [btree_cursor $::b1 1 1] + btree_move_to $::c1 2030 + btree_data $::c1 +} $::data +do_test btree-8.9.2 { + btree_integrity_check $::b1 1 2 +} {} +do_test btree-8.10 { + btree_begin_transaction $::b1 + btree_delete $::c1 +} {} +do_test btree-8.11 { + lindex [btree_get_meta $::b1] 0 +} {4} + +# Now check out keys on overflow pages. +# +do_test btree-8.12.1 { + set ::keyprefix "This is a long prefix to a key " + while {[string length $::keyprefix]<256} {append ::keyprefix $::keyprefix} + btree_close_cursor $::c1 + btree_clear_table $::b1 2 + lindex [btree_get_meta $::b1] 0 +} {4} +do_test btree-8.12.2 { + btree_integrity_check $::b1 1 2 +} {} +do_test btree-8.12.3 { + set ::c1 [btree_cursor $::b1 2 1] + btree_insert $::c1 ${::keyprefix}1 1 + btree_first $::c1 + btree_data $::c1 +} {1} +do_test btree-8.13 { + btree_key $::c1 +} ${keyprefix}1 +do_test btree-8.14 { + btree_insert $::c1 ${::keyprefix}2 2 + btree_insert $::c1 ${::keyprefix}3 3 + btree_last $::c1 + btree_key $::c1 +} ${keyprefix}3 +do_test btree-8.15 { + btree_move_to $::c1 ${::keyprefix}2 + btree_data $::c1 +} {2} +do_test btree-8.16 { + btree_move_to $::c1 ${::keyprefix}1 + btree_data $::c1 +} {1} +do_test btree-8.17 { + btree_move_to $::c1 ${::keyprefix}3 + btree_data $::c1 +} {3} +do_test btree-8.18 { + lindex [btree_get_meta $::b1] 0 +} {1} +do_test btree-8.19 { + btree_move_to $::c1 ${::keyprefix}2 + btree_key $::c1 +} ${::keyprefix}2 +#btree_page_dump $::b1 2 +do_test btree-8.20 { + btree_delete $::c1 + btree_next $::c1 + btree_key $::c1 +} ${::keyprefix}3 +#btree_page_dump $::b1 2 +do_test btree-8.21 { + lindex [btree_get_meta $::b1] 0 +} {2} +do_test btree-8.22 { + lindex [btree_pager_stats $::b1] 1 +} {2} +do_test btree-8.23.1 { + btree_close_cursor $::c1 + btree_drop_table $::b1 2 + btree_integrity_check $::b1 1 +} {} +do_test btree-8.23.2 { + btree_create_table $::b1 0 +} {2} +do_test btree-8.23.3 { + set ::c1 [btree_cursor $::b1 2 1] + lindex [btree_get_meta $::b1] 0 +} {4} +do_test btree-8.24 { + lindex [btree_pager_stats $::b1] 1 +} {2} +#btree_pager_ref_dump $::b1 +do_test btree-8.25 { + btree_integrity_check $::b1 1 2 +} {} + +# Check page splitting logic +# +do_test btree-9.1 { + for {set i 1} {$i<=19} {incr i} { + set key [format %03d $i] + set data "*** $key *** $key *** $key *** $key ***" + btree_insert $::c1 $key $data + } +} {} +#btree_tree_dump $::b1 2 +#btree_pager_ref_dump $::b1 +#set pager_refinfo_enable 1 +do_test btree-9.2 { + btree_insert $::c1 020 {*** 020 *** 020 *** 020 *** 020 ***} + select_keys $::c1 +} {001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 016 017 018 019 020} +#btree_page_dump $::b1 2 +#btree_pager_ref_dump $::b1 +#set pager_refinfo_enable 0 + +# The previous "select_keys" command left the cursor pointing at the root +# page. So there should only be two pages checked out. 2 (the root) and +# page 1. +do_test btree-9.2.1 { + lindex [btree_pager_stats $::b1] 1 +} {2} +for {set i 1} {$i<=20} {incr i} { + do_test btree-9.3.$i.1 [subst { + btree_move_to $::c1 [format %03d $i] + btree_key $::c1 + }] [format %03d $i] + do_test btree-9.3.$i.2 [subst { + btree_move_to $::c1 [format %03d $i] + string range \[btree_data $::c1\] 0 10 + }] "*** [format %03d $i] ***" +} +do_test btree-9.4.1 { + lindex [btree_pager_stats $::b1] 1 +} {2} + +# Check the page joining logic. +# +#btree_page_dump $::b1 2 +#btree_pager_ref_dump $::b1 +do_test btree-9.4.2 { + btree_move_to $::c1 005 + btree_delete $::c1 +} {} +#btree_page_dump $::b1 2 +for {set i 1} {$i<=19} {incr i} { + if {$i==5} continue + do_test btree-9.5.$i.1 [subst { + btree_move_to $::c1 [format %03d $i] + btree_key $::c1 + }] [format %03d $i] + do_test btree-9.5.$i.2 [subst { + btree_move_to $::c1 [format %03d $i] + string range \[btree_data $::c1\] 0 10 + }] "*** [format %03d $i] ***" +} +#btree_pager_ref_dump $::b1 +do_test btree-9.6 { + btree_close_cursor $::c1 + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-9.7 { + btree_integrity_check $::b1 1 2 +} {} +do_test btree-9.8 { + btree_rollback $::b1 + lindex [btree_pager_stats $::b1] 1 +} {0} +do_test btree-9.9 { + btree_integrity_check $::b1 1 2 +} {} +do_test btree-9.10 { + btree_close $::b1 + set ::b1 [btree_open test1.bt 2000 0] + btree_integrity_check $::b1 1 2 +} {} + +# Create a tree of depth two. That is, there is a single divider entry +# on the root pages and two leaf pages. Then delete the divider entry +# see what happens. +# +do_test btree-10.1 { + btree_begin_transaction $::b1 + btree_clear_table $::b1 2 + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-10.2 { + set ::c1 [btree_cursor $::b1 2 1] + lindex [btree_pager_stats $::b1] 1 +} {2} +do_test btree-10.3 { +btree_breakpoint + for {set i 1} {$i<=30} {incr i} { + set key [format %03d $i] + set data "*** $key *** $key *** $key *** $key ***" + btree_insert $::c1 $key $data + } + select_keys $::c1 +} {001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 016 017 018 019 020 021 022 023 024 025 026 027 028 029 030} +#btree_tree_dump $::b1 2 +do_test btree-10.4 { + # The divider entry is 012. This is found by uncommenting the + # btree_tree_dump call above and looking at the tree. If the page size + # changes, this test will no longer work. + btree_move_to $::c1 012 + btree_delete $::c1 + select_keys $::c1 +} {001 002 003 004 005 006 007 008 009 010 011 013 014 015 016 017 018 019 020 021 022 023 024 025 026 027 028 029 030} +#btree_pager_ref_dump $::b1 +#btree_tree_dump $::b1 2 +for {set i 1} {$i<=30} {incr i} { + # Check the number of unreference pages. This should be 3 in most cases, + # but 2 when the cursor is pointing to the divider entry which is now 013. + do_test btree-10.5.$i { + btree_move_to $::c1 [format %03d $i] + lindex [btree_pager_stats $::b1] 1 + } [expr {$i==13?2:3}] + #btree_pager_ref_dump $::b1 + #btree_tree_dump $::b1 2 +} + +# Create a tree with lots more pages +# +catch {unset ::data} +catch {unset ::key} +for {set i 31} {$i<=2000} {incr i} { + do_test btree-11.1.$i.1 { + set key [format %03d $i] + set ::data "*** $key *** $key *** $key *** $key ***" + btree_insert $::c1 $key $data + btree_move_to $::c1 $key + btree_key $::c1 + } [format %03d $i] + do_test btree-11.1.$i.2 { + btree_data $::c1 + } $::data + set ::key [format %03d [expr {$i/2}]] + if {$::key=="012"} {set ::key 013} + do_test btree-11.1.$i.3 { + btree_move_to $::c1 $::key + btree_key $::c1 + } $::key +} +catch {unset ::data} +catch {unset ::key} + +# Make sure our reference count is still correct. +# +do_test btree-11.2 { + btree_close_cursor $::c1 + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-11.3 { + set ::c1 [btree_cursor $::b1 2 1] + lindex [btree_pager_stats $::b1] 1 +} {2} + +# Delete the dividers on the root page +# +#btree_page_dump $::b1 2 +do_test btree-11.4 { + btree_move_to $::c1 1667 + btree_delete $::c1 + btree_move_to $::c1 1667 + set k [btree_key $::c1] + if {$k==1666} { + set k [btree_next $::c1] + } + btree_key $::c1 +} {1668} +#btree_page_dump $::b1 2 + +# Change the data on an intermediate node such that the node becomes overfull +# and has to split. We happen to know that intermediate nodes exist on +# 337, 401 and 465 by the btree_page_dumps above +# +catch {unset ::data} +set ::data {This is going to be a very long data segment} +append ::data $::data +append ::data $::data +do_test btree-12.1 { + btree_insert $::c1 337 $::data + btree_move_to $::c1 337 + btree_data $::c1 +} $::data +do_test btree-12.2 { + btree_insert $::c1 401 $::data + btree_move_to $::c1 401 + btree_data $::c1 +} $::data +do_test btree-12.3 { + btree_insert $::c1 465 $::data + btree_move_to $::c1 465 + btree_data $::c1 +} $::data +do_test btree-12.4 { + btree_move_to $::c1 337 + btree_key $::c1 +} {337} +do_test btree-12.5 { + btree_data $::c1 +} $::data +do_test btree-12.6 { + btree_next $::c1 + btree_key $::c1 +} {338} +do_test btree-12.7 { + btree_move_to $::c1 464 + btree_key $::c1 +} {464} +do_test btree-12.8 { + btree_next $::c1 + btree_data $::c1 +} $::data +do_test btree-12.9 { + btree_next $::c1 + btree_key $::c1 +} {466} +do_test btree-12.10 { + btree_move_to $::c1 400 + btree_key $::c1 +} {400} +do_test btree-12.11 { + btree_next $::c1 + btree_data $::c1 +} $::data +do_test btree-12.12 { + btree_next $::c1 + btree_key $::c1 +} {402} +# btree_commit $::b1 +# btree_tree_dump $::b1 1 +do_test btree-13.1 { + btree_integrity_check $::b1 1 2 +} {} + +# To Do: +# +# 1. Do some deletes from the 3-layer tree +# 2. Commit and reopen the database +# 3. Read every 15th entry and make sure it works +# 4. Implement btree_sanity and put it throughout this script +# + +do_test btree-15.98 { + btree_close_cursor $::c1 + lindex [btree_pager_stats $::b1] 1 +} {1} +do_test btree-15.99 { + btree_rollback $::b1 + lindex [btree_pager_stats $::b1] 1 +} {0} +btree_pager_ref_dump $::b1 + +# Miscellaneous tests. +# +# btree-16.1 - Check that a statement cannot be started if a transaction +# is not active. +# btree-16.2 - Check that it is an error to request more payload from a +# btree entry than the entry contains. +do_test btree-16.1 { + catch {btree_begin_statement $::b1} msg + set msg +} SQLITE_ERROR + +do_test btree-16.2 { + btree_begin_transaction $::b1 + set ::c1 [btree_cursor $::b1 2 1] + btree_insert $::c1 1 helloworld + btree_close_cursor $::c1 + btree_commit $::b1 +} {} +do_test btree-16.3 { + set ::c1 [btree_cursor $::b1 2 1] + btree_first $::c1 +} 0 +do_test btree-16.4 { + catch {btree_data $::c1 [expr [btree_payload_size $::c1] + 10]} msg + set msg +} SQLITE_ERROR + +if {$tcl_platform(platform)=="unix"} { + do_test btree-16.5 { + btree_close $::b1 + set ::origperm [file attributes test1.bt -permissions] + file attributes test1.bt -permissions o-w,g-w,a-w + set ::b1 [btree_open test1.bt 2000 0] + catch {btree_cursor $::b1 2 1} msg + file attributes test1.bt -permissions $::origperm + btree_close $::b1 + set ::b1 [btree_open test1.bt 2000 0] + set msg + } {SQLITE_READONLY} +} + +do_test btree-16.6 { + set ::c1 [btree_cursor $::b1 2 1] + set ::c2 [btree_cursor $::b1 2 1] + btree_begin_transaction $::b1 + for {set i 0} {$i<100} {incr i} { + btree_insert $::c1 $i [string repeat helloworld 10] + } + btree_last $::c2 + btree_insert $::c1 100 [string repeat helloworld 10] +} {} + +do_test btree-16.7 { + btree_close_cursor $::c1 + btree_close_cursor $::c2 + btree_commit $::b1 + set ::c1 [btree_cursor $::b1 2 1] + catch {btree_insert $::c1 101 helloworld} msg + set msg +} {SQLITE_ERROR} +do_test btree-16.8 { + btree_first $::c1 + catch {btree_delete $::c1} msg + set msg +} {SQLITE_ERROR} +do_test btree-16.9 { + btree_close_cursor $::c1 + btree_begin_transaction $::b1 + set ::c1 [btree_cursor $::b1 2 0] + catch {btree_insert $::c1 101 helloworld} msg + set msg +} {SQLITE_PERM} +do_test btree-16.10 { + catch {btree_delete $::c1} msg + set msg +} {SQLITE_PERM} + +# As of 2006-08-16 (version 3.3.7+) a read cursor will no +# longer block a write cursor from the same database +# connectiin. The following three tests uses to return +# the SQLITE_LOCK error, but no more. +# +do_test btree-16.11 { + btree_close_cursor $::c1 + set ::c2 [btree_cursor $::b1 2 1] + set ::c1 [btree_cursor $::b1 2 0] + catch {btree_insert $::c2 101 helloworld} msg + set msg +} {} +do_test btree-16.12 { + btree_first $::c2 + catch {btree_delete $::c2} msg + set msg +} {} +do_test btree-16.13 { + catch {btree_clear_table $::b1 2} msg + set msg +} {} + + +do_test btree-16.14 { + btree_close_cursor $::c1 + btree_close_cursor $::c2 + btree_commit $::b1 + catch {btree_clear_table $::b1 2} msg + set msg +} {SQLITE_ERROR} +do_test btree-16.15 { + catch {btree_drop_table $::b1 2} msg + set msg +} {SQLITE_ERROR} +do_test btree-16.16 { + btree_begin_transaction $::b1 + set ::c1 [btree_cursor $::b1 2 0] + catch {btree_drop_table $::b1 2} msg + set msg +} {SQLITE_LOCKED} + +do_test btree-99.1 { + btree_close $::b1 +} {} +catch {unset data} +catch {unset key} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/btree2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/btree2.test new file mode 100644 index 0000000..35263c1 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/btree2.test @@ -0,0 +1,502 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is btree database backend +# +# $Id: btree2.test,v 1.15 2006/03/19 13:00:25 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if {[info commands btree_open]!=""} { + +# Create a new database file containing no entries. The database should +# contain 5 tables: +# +# 2 The descriptor table +# 3 The foreground table +# 4 The background table +# 5 The long key table +# 6 The long data table +# +# An explanation for what all these tables are used for is provided below. +# +do_test btree2-1.1 { + expr srand(1) + file delete -force test2.bt + file delete -force test2.bt-journal + set ::b [btree_open test2.bt 2000 0] + btree_begin_transaction $::b + btree_create_table $::b 0 +} {2} +do_test btree2-1.2 { + btree_create_table $::b 0 +} {3} +do_test btree2-1.3 { + btree_create_table $::b 0 +} {4} +do_test btree2-1.4 { + btree_create_table $::b 0 +} {5} +do_test btree2-1.5 { + btree_create_table $::b 0 +} {6} +do_test btree2-1.6 { + set ::c2 [btree_cursor $::b 2 1] + btree_insert $::c2 {one} {1} + btree_move_to $::c2 {one} + btree_delete $::c2 + btree_close_cursor $::c2 + btree_commit $::b + btree_integrity_check $::b 1 2 3 4 5 6 +} {} + +# This test module works by making lots of pseudo-random changes to a +# database while simultaneously maintaining an invariant on that database. +# Periodically, the script does a sanity check on the database and verifies +# that the invariant is satisfied. +# +# The invariant is as follows: +# +# 1. The descriptor table always contains 2 enters. An entry keyed by +# "N" is the number of elements in the foreground and background tables +# combined. The entry keyed by "L" is the number of digits in the keys +# for foreground and background tables. +# +# 2. The union of the foreground an background tables consists of N entries +# where each entry has an L-digit key. (Actually, some keys can be longer +# than L characters, but they always start with L digits.) The keys +# cover all integers between 1 and N. Whenever an entry is added to +# the foreground it is removed form the background and vice versa. +# +# 3. Some entries in the foreground and background tables have keys that +# begin with an L-digit number but are followed by additional characters. +# For each such entry there is a corresponding entry in the long key +# table. The long key table entry has a key which is just the L-digit +# number and data which is the length of the key in the foreground and +# background tables. +# +# 4. The data for both foreground and background entries is usually a +# short string. But some entries have long data strings. For each +# such entries there is an entry in the long data type. The key to +# long data table is an L-digit number. (The extension on long keys +# is omitted.) The data is the number of charaters in the data of the +# foreground or background entry. +# +# The following function builds a database that satisfies all of the above +# invariants. +# +proc build_db {N L} { + for {set i 2} {$i<=6} {incr i} { + catch {btree_close_cursor [set ::c$i]} + btree_clear_table $::b $i + set ::c$i [btree_cursor $::b $i 1] + } + btree_insert $::c2 N $N + btree_insert $::c2 L $L + set format %0${L}d + for {set i 1} {$i<=$N} {incr i} { + set key [format $format $i] + set data $key + btree_insert $::c3 $key $data + } +} + +# Given a base key number and a length, construct the full text of the key +# or data. +# +proc make_payload {keynum L len} { + set key [format %0${L}d $keynum] + set r $key + set i 1 + while {[string length $r]<$len} { + append r " ($i) $key" + incr i + } + return [string range $r 0 [expr {$len-1}]] +} + +# Verify the invariants on the database. Return an empty string on +# success or an error message if something is amiss. +# +proc check_invariants {} { + set ck [btree_integrity_check $::b 1 2 3 4 5 6] + if {$ck!=""} { + puts "\n*** SANITY:\n$ck" + exit + return $ck + } + btree_move_to $::c3 {} + btree_move_to $::c4 {} + btree_move_to $::c2 N + set N [btree_data $::c2] + btree_move_to $::c2 L + set L [btree_data $::c2] + set LM1 [expr {$L-1}] + for {set i 1} {$i<=$N} {incr i} { + set key {} + if {![btree_eof $::c3]} { + set key [btree_key $::c3] + } + if {[scan $key %d k]<1} {set k 0} + if {$k!=$i} { + set key {} + if {![btree_eof $::c4]} { + set key [btree_key $::c4] + } + if {[scan $key %d k]<1} {set k 0} + if {$k!=$i} { + return "Key $i is missing from both foreground and background" + } + set data [btree_data $::c4] + btree_next $::c4 + } else { + set data [btree_data $::c3] + btree_next $::c3 + } + set skey [string range $key 0 $LM1] + if {[btree_move_to $::c5 $skey]==0} { + set keylen [btree_data $::c5] + } else { + set keylen $L + } + if {[string length $key]!=$keylen} { + return "Key $i is the wrong size.\ + Is \"$key\" but should be \"[make_payload $k $L $keylen]\"" + } + if {[make_payload $k $L $keylen]!=$key} { + return "Key $i has an invalid extension" + } + if {[btree_move_to $::c6 $skey]==0} { + set datalen [btree_data $::c6] + } else { + set datalen $L + } + if {[string length $data]!=$datalen} { + return "Data for $i is the wrong size.\ + Is [string length $data] but should be $datalen" + } + if {[make_payload $k $L $datalen]!=$data} { + return "Entry $i has an incorrect data" + } + } +} + +# Look at all elements in both the foreground and background tables. +# Make sure the key is always the same as the prefix of the data. +# +# This routine was used for hunting bugs. It is not a part of standard +# tests. +# +proc check_data {n key} { + global c3 c4 + incr n -1 + foreach c [list $c3 $c4] { + btree_first $c ;# move_to $c $key + set cnt 0 + while {![btree_eof $c]} { + set key [btree_key $c] + set data [btree_data $c] + if {[string range $key 0 $n] ne [string range $data 0 $n]} { + puts "key=[list $key] data=[list $data] n=$n" + puts "cursor info = [btree_cursor_info $c]" + btree_page_dump $::b [lindex [btree_cursor_info $c] 0] + exit + } + btree_next $c + } + } +} + +# Make random changes to the database such that each change preserves +# the invariants. The number of changes is $n*N where N is the parameter +# from the descriptor table. Each changes begins with a random key. +# the entry with that key is put in the foreground table with probability +# $I and it is put in background with probability (1.0-$I). It gets +# a long key with probability $K and long data with probability $D. +# +set chngcnt 0 +proc random_changes {n I K D} { + global chngcnt + btree_move_to $::c2 N + set N [btree_data $::c2] + btree_move_to $::c2 L + set L [btree_data $::c2] + set LM1 [expr {$L-1}] + set total [expr {int($N*$n)}] + set format %0${L}d + for {set i 0} {$i<$total} {incr i} { + set k [expr {int(rand()*$N)+1}] + set insert [expr {rand()<=$I}] + set longkey [expr {rand()<=$K}] + set longdata [expr {rand()<=$D}] + if {$longkey} { + set x [expr {rand()}] + set keylen [expr {int($x*$x*$x*$x*3000)+10}] + } else { + set keylen $L + } + set key [make_payload $k $L $keylen] + if {$longdata} { + set x [expr {rand()}] + set datalen [expr {int($x*$x*$x*$x*3000)+10}] + } else { + set datalen $L + } + set data [make_payload $k $L $datalen] + set basekey [format $format $k] + if {[set c [btree_move_to $::c3 $basekey]]==0} { + btree_delete $::c3 + } else { + if {$c<0} {btree_next $::c3} + if {![btree_eof $::c3]} { + if {[string match $basekey* [btree_key $::c3]]} { + btree_delete $::c3 + } + } + } + if {[set c [btree_move_to $::c4 $basekey]]==0} { + btree_delete $::c4 + } else { + if {$c<0} {btree_next $::c4} + if {![btree_eof $::c4]} { + if {[string match $basekey* [btree_key $::c4]]} { + btree_delete $::c4 + } + } + } + set kx -1 + if {![btree_eof $::c4]} { + if {[scan [btree_key $::c4] %d kx]<1} {set kx -1} + } + if {$kx==$k} { + btree_delete $::c4 + } + # For debugging - change the "0" to "1" to integrity check after + # every change. + if 0 { + incr chngcnt + puts check----$chngcnt + set ck [btree_integrity_check $::b 1 2 3 4 5 6] + if {$ck!=""} { + puts "\nSANITY CHECK FAILED!\n$ck" + exit + } + } + if {$insert} { + btree_insert $::c3 $key $data + } else { + btree_insert $::c4 $key $data + } + if {$longkey} { + btree_insert $::c5 $basekey $keylen + } elseif {[btree_move_to $::c5 $basekey]==0} { + btree_delete $::c5 + } + if {$longdata} { + btree_insert $::c6 $basekey $datalen + } elseif {[btree_move_to $::c6 $basekey]==0} { + btree_delete $::c6 + } + # For debugging - change the "0" to "1" to integrity check after + # every change. + if 0 { + incr chngcnt + puts check----$chngcnt + set ck [btree_integrity_check $::b 1 2 3 4 5 6] + if {$ck!=""} { + puts "\nSANITY CHECK FAILED!\n$ck" + exit + } + } + } +} +set btree_trace 0 + +# Repeat this test sequence on database of various sizes +# +set testno 2 +foreach {N L} { + 10 2 + 50 2 + 200 3 + 2000 5 +} { + puts "**** N=$N L=$L ****" + set hash [md5file test2.bt] + do_test btree2-$testno.1 [subst -nocommands { + set ::c2 [btree_cursor $::b 2 1] + set ::c3 [btree_cursor $::b 3 1] + set ::c4 [btree_cursor $::b 4 1] + set ::c5 [btree_cursor $::b 5 1] + set ::c6 [btree_cursor $::b 6 1] + btree_begin_transaction $::b + build_db $N $L + check_invariants + }] {} + do_test btree2-$testno.2 { + btree_close_cursor $::c2 + btree_close_cursor $::c3 + btree_close_cursor $::c4 + btree_close_cursor $::c5 + btree_close_cursor $::c6 + btree_rollback $::b + md5file test2.bt + } $hash + do_test btree2-$testno.3 [subst -nocommands { + btree_begin_transaction $::b + set ::c2 [btree_cursor $::b 2 1] + set ::c3 [btree_cursor $::b 3 1] + set ::c4 [btree_cursor $::b 4 1] + set ::c5 [btree_cursor $::b 5 1] + set ::c6 [btree_cursor $::b 6 1] + build_db $N $L + check_invariants + }] {} + do_test btree2-$testno.4 { + btree_commit $::b + check_invariants + } {} + do_test btree2-$testno.5 { + lindex [btree_pager_stats $::b] 1 + } {6} + do_test btree2-$testno.6 { + btree_cursor_info $::c2 + btree_cursor_info $::c3 + btree_cursor_info $::c4 + btree_cursor_info $::c5 + btree_cursor_info $::c6 + btree_close_cursor $::c2 + btree_close_cursor $::c3 + btree_close_cursor $::c4 + btree_close_cursor $::c5 + btree_close_cursor $::c6 + lindex [btree_pager_stats $::b] 1 + } {0} + do_test btree2-$testno.7 { + btree_close $::b + } {} + + # For each database size, run various changes tests. + # + set num2 1 + foreach {n I K D} { + 0.5 0.5 0.1 0.1 + 1.0 0.2 0.1 0.1 + 1.0 0.8 0.1 0.1 + 2.0 0.0 0.1 0.1 + 2.0 1.0 0.1 0.1 + 2.0 0.0 0.0 0.0 + 2.0 1.0 0.0 0.0 + } { + set testid btree2-$testno.8.$num2 + set hash [md5file test2.bt] + do_test $testid.0 { + set ::b [btree_open test2.bt 2000 0] + set ::c2 [btree_cursor $::b 2 1] + set ::c3 [btree_cursor $::b 3 1] + set ::c4 [btree_cursor $::b 4 1] + set ::c5 [btree_cursor $::b 5 1] + set ::c6 [btree_cursor $::b 6 1] + check_invariants + } {} + set cnt 6 + for {set i 2} {$i<=6} {incr i} { + if {[lindex [btree_cursor_info [set ::c$i]] 0]!=$i} {incr cnt} + } + do_test $testid.1 { + btree_begin_transaction $::b + lindex [btree_pager_stats $::b] 1 + } $cnt + do_test $testid.2 [subst { + random_changes $n $I $K $D + }] {} + do_test $testid.3 { + check_invariants + } {} + do_test $testid.4 { + btree_close_cursor $::c2 + btree_close_cursor $::c3 + btree_close_cursor $::c4 + btree_close_cursor $::c5 + btree_close_cursor $::c6 + btree_rollback $::b + md5file test2.bt + } $hash + btree_begin_transaction $::b + set ::c2 [btree_cursor $::b 2 1] + set ::c3 [btree_cursor $::b 3 1] + set ::c4 [btree_cursor $::b 4 1] + set ::c5 [btree_cursor $::b 5 1] + set ::c6 [btree_cursor $::b 6 1] + do_test $testid.5 [subst { + random_changes $n $I $K $D + }] {} + do_test $testid.6 { + check_invariants + } {} + do_test $testid.7 { + btree_commit $::b + check_invariants + } {} + set hash [md5file test2.bt] + do_test $testid.8 { + btree_close_cursor $::c2 + btree_close_cursor $::c3 + btree_close_cursor $::c4 + btree_close_cursor $::c5 + btree_close_cursor $::c6 + lindex [btree_pager_stats $::b] 1 + } {0} + do_test $testid.9 { + btree_close $::b + set ::b [btree_open test2.bt 2000 0] + set ::c2 [btree_cursor $::b 2 1] + set ::c3 [btree_cursor $::b 3 1] + set ::c4 [btree_cursor $::b 4 1] + set ::c5 [btree_cursor $::b 5 1] + set ::c6 [btree_cursor $::b 6 1] + check_invariants + } {} + do_test $testid.10 { + btree_close_cursor $::c2 + btree_close_cursor $::c3 + btree_close_cursor $::c4 + btree_close_cursor $::c5 + btree_close_cursor $::c6 + lindex [btree_pager_stats $::b] 1 + } {0} + do_test $testid.11 { + btree_close $::b + } {} + incr num2 + } + incr testno + set ::b [btree_open test2.bt 2000 0] +} + +# Testing is complete. Shut everything down. +# +do_test btree-999.1 { + lindex [btree_pager_stats $::b] 1 +} {0} +do_test btree-999.2 { + btree_close $::b +} {} +do_test btree-999.3 { + file delete -force test2.bt + file exists test2.bt-journal +} {0} + +} ;# end if( not mem: and has pager_open command ); + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/btree4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/btree4.test new file mode 100644 index 0000000..8a08af3 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/btree4.test @@ -0,0 +1,101 @@ +# 2002 December 03 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is btree database backend +# +# This file focuses on testing the sqliteBtreeNext() and +# sqliteBtreePrevious() procedures and making sure they are able +# to step through an entire table from either direction. +# +# $Id: btree4.test,v 1.2 2004/05/09 20:40:12 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if {[info commands btree_open]!=""} { + +# Open a test database. +# +file delete -force test1.bt +file delete -force test1.bt-journal +set b1 [btree_open test1.bt 2000 0] +btree_begin_transaction $b1 +do_test btree4-0.1 { + btree_create_table $b1 0 +} 2 + +set data {abcdefghijklmnopqrstuvwxyz0123456789} +append data $data +append data $data +append data $data +append data $data + +foreach N {10 100 1000} { + btree_clear_table $::b1 2 + set ::c1 [btree_cursor $::b1 2 1] + do_test btree4-$N.1 { + for {set i 1} {$i<=$N} {incr i} { + btree_insert $::c1 [format k-%05d $i] $::data-$i + } + btree_first $::c1 + btree_key $::c1 + } {k-00001} + do_test btree4-$N.2 { + btree_data $::c1 + } $::data-1 + for {set i 2} {$i<=$N} {incr i} { + do_test btree-$N.3.$i.1 { + btree_next $::c1 + } 0 + do_test btree-$N.3.$i.2 { + btree_key $::c1 + } [format k-%05d $i] + do_test btree-$N.3.$i.3 { + btree_data $::c1 + } $::data-$i + } + do_test btree4-$N.4 { + btree_next $::c1 + } 1 + do_test btree4-$N.5 { + btree_last $::c1 + } 0 + do_test btree4-$N.6 { + btree_key $::c1 + } [format k-%05d $N] + do_test btree4-$N.7 { + btree_data $::c1 + } $::data-$N + for {set i [expr {$N-1}]} {$i>=1} {incr i -1} { + do_test btree4-$N.8.$i.1 { + btree_prev $::c1 + } 0 + do_test btree4-$N.8.$i.2 { + btree_key $::c1 + } [format k-%05d $i] + do_test btree4-$N.8.$i.3 { + btree_data $::c1 + } $::data-$i + } + do_test btree4-$N.9 { + btree_prev $::c1 + } 1 + btree_close_cursor $::c1 +} + +btree_rollback $::b1 +btree_pager_ref_dump $::b1 +btree_close $::b1 + +} ;# end if( not mem: and has pager_open command ); + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/btree5.test b/libraries/sqlite/unix/sqlite-3.5.1/test/btree5.test new file mode 100644 index 0000000..2afcd84 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/btree5.test @@ -0,0 +1,292 @@ +# 2004 May 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is btree database backend +# +# $Id: btree5.test,v 1.5 2004/05/14 12:17:46 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Attempting to read table 1 of an empty file gives an SQLITE_EMPTY +# error. +# +do_test btree5-1.1 { + file delete -force test1.bt + file delete -force test1.bt-journal + set rc [catch {btree_open test1.bt 2000 0} ::b1] +} {0} +do_test btree5-1.2 { + set rc [catch {btree_cursor $::b1 1 0} ::c1] +} {1} +do_test btree5-1.3 { + set ::c1 +} {SQLITE_EMPTY} +do_test btree5-1.4 { + set rc [catch {btree_cursor $::b1 1 1} ::c1] +} {1} +do_test btree5-1.5 { + set ::c1 +} {SQLITE_EMPTY} + +# Starting a transaction initializes the first page of the database +# and the error goes away. +# +do_test btree5-1.6 { + btree_begin_transaction $b1 + set rc [catch {btree_cursor $b1 1 0} c1] +} {0} +do_test btree5-1.7 { + btree_first $c1 +} {1} +do_test btree5-1.8 { + btree_close_cursor $c1 + btree_rollback $b1 + set rc [catch {btree_cursor $b1 1 0} c1] +} {1} +do_test btree5-1.9 { + set c1 +} {SQLITE_EMPTY} +do_test btree5-1.10 { + btree_begin_transaction $b1 + set rc [catch {btree_cursor $b1 1 0} c1] +} {0} +do_test btree5-1.11 { + btree_first $c1 +} {1} +do_test btree5-1.12 { + btree_close_cursor $c1 + btree_commit $b1 + set rc [catch {btree_cursor $b1 1 0} c1] +} {0} +do_test btree5-1.13 { + btree_first $c1 +} {1} +do_test btree5-1.14 { + btree_close_cursor $c1 + btree_integrity_check $b1 1 +} {} + +# Insert many entries into table 1. This is designed to test the +# virtual-root logic that comes into play for page one. It is also +# a good test of INTKEY tables. +# +# Stagger the inserts. After the inserts complete, go back and do +# deletes. Stagger the deletes too. Repeat this several times. +# + +# Do N inserts into table 1 using random keys between 0 and 1000000 +# +proc random_inserts {N} { + global c1 + while {$N>0} { + set k [expr {int(rand()*1000000)}] + if {[btree_move_to $c1 $k]==0} continue; # entry already exists + btree_insert $c1 $k data-for-$k + incr N -1 + } +} + +# Do N delete from table 1 +# +proc random_deletes {N} { + global c1 + while {$N>0} { + set k [expr {int(rand()*1000000)}] + btree_move_to $c1 $k + btree_delete $c1 + incr N -1 + } +} + +# Make sure the table has exactly N entries. Make sure the data for +# each entry agrees with its key. +# +proc check_table {N} { + global c1 + btree_first $c1 + set cnt 0 + while {![btree_eof $c1]} { + if {[set data [btree_data $c1]] ne "data-for-[btree_key $c1]"} { + return "wrong data for entry $cnt" + } + set n [string length $data] + set fdata1 [btree_fetch_data $c1 $n] + set fdata2 [btree_fetch_data $c1 -1] + if {$fdata1 ne "" && $fdata1 ne $data} { + return "DataFetch returned the wrong value with amt=$n" + } + if {$fdata1 ne $fdata2} { + return "DataFetch returned the wrong value when amt=-1" + } + if {$n>10} { + set fdata3 [btree_fetch_data $c1 10] + if {$fdata3 ne [string range $data 0 9]} { + return "DataFetch returned the wrong value when amt=10" + } + } + incr cnt + btree_next $c1 + } + if {$cnt!=$N} { + return "wrong number of entries" + } + return {} +} + +# Initialize the database +# +btree_begin_transaction $b1 +set c1 [btree_cursor $b1 1 1] +set btree_trace 0 + +# Do the tests. +# +set cnt 0 +for {set i 1} {$i<=100} {incr i} { + do_test btree5-2.$i.1 { + random_inserts 200 + incr cnt 200 + check_table $cnt + } {} + do_test btree5-2.$i.2 { + btree_integrity_check $b1 1 + } {} + do_test btree5-2.$i.3 { + random_deletes 190 + incr cnt -190 + check_table $cnt + } {} + do_test btree5-2.$i.4 { + btree_integrity_check $b1 1 + } {} +} + +#btree_tree_dump $b1 1 +btree_close_cursor $c1 +btree_commit $b1 +btree_begin_transaction $b1 + +# This procedure converts an integer into a variable-length text key. +# The conversion is reversible. +# +# The first two characters of the string are alphabetics derived from +# the least significant bits of the number. Because they are derived +# from least significant bits, the sort order of the resulting string +# is different from numeric order. After the alphabetic prefix comes +# the original number. A variable-length suffix follows. The length +# of the suffix is based on a hash of the original number. +# +proc num_to_key {n} { + global charset ncharset suffix + set c1 [string index $charset [expr {$n%$ncharset}]] + set c2 [string index $charset [expr {($n/$ncharset)%$ncharset}]] + set nsuf [expr {($n*211)%593}] + return $c1$c2-$n-[string range $suffix 0 $nsuf] +} +set charset {abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ} +set ncharset [string length $charset] +set suffix $charset$charset +while {[string length $suffix]<1000} {append suffix $suffix} + +# This procedures extracts the original integer used to create +# a key by num_to_key +# +proc key_to_num {key} { + regexp {^..-([0-9]+)} $key all n + return $n +} + +# Insert into table $tab keys corresponding to all values between +# $start and $end, inclusive. +# +proc insert_range {tab start end} { + for {set i $start} {$i<=$end} {incr i} { + btree_insert $tab [num_to_key $i] {} + } +} + +# Delete from table $tab keys corresponding to all values between +# $start and $end, inclusive. +# +proc delete_range {tab start end} { + for {set i $start} {$i<=$end} {incr i} { + if {[btree_move_to $tab [num_to_key $i]]==0} { + btree_delete $tab + } + } +} + +# Make sure table $tab contains exactly those keys corresponding +# to values between $start and $end +# +proc check_range {tab start end} { + btree_first $tab + while {![btree_eof $tab]} { + set key [btree_key $tab] + set i [key_to_num $key] + if {[num_to_key $i] ne $key} { + return "malformed key: $key" + } + set got($i) 1 + btree_next $tab + } + set all [lsort -integer [array names got]] + if {[llength $all]!=$end+1-$start} { + return "table contains wrong number of values" + } + if {[lindex $all 0]!=$start} { + return "wrong starting value" + } + if {[lindex $all end]!=$end} { + return "wrong ending value" + } + return {} +} + +# Create a zero-data table and test it out. +# +do_test btree5-3.1 { + set rc [catch {btree_create_table $b1 2} t2] +} {0} +do_test btree5-3.2 { + set rc [catch {btree_cursor $b1 $t2 1} c2] +} {0} +set start 1 +set end 100 +for {set i 1} {$i<=100} {incr i} { + do_test btree5-3.3.$i.1 { + insert_range $c2 $start $end + btree_integrity_check $b1 1 $t2 + } {} + do_test btree5-3.3.$i.2 { + check_range $c2 $start $end + } {} + set nstart $start + incr nstart 89 + do_test btree5-3.3.$i.3 { + delete_range $c2 $start $nstart + btree_integrity_check $b1 1 $t2 + } {} + incr start 90 + do_test btree5-3.3.$i.4 { + check_range $c2 $start $end + } {} + incr end 100 +} + + +btree_close_cursor $c2 +btree_commit $b1 +btree_close $b1 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/btree6.test b/libraries/sqlite/unix/sqlite-3.5.1/test/btree6.test new file mode 100644 index 0000000..2d31157 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/btree6.test @@ -0,0 +1,128 @@ +# 2004 May 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is btree database backend - specifically +# the B+tree tables. B+trees store all data on the leaves rather +# that storing data with keys on interior nodes. +# +# $Id: btree6.test,v 1.4 2004/05/20 22:16:31 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# Insert many entries into the table that cursor $cur points to. +# The table should be an INTKEY table. +# +# Stagger the inserts. After the inserts complete, go back and do +# deletes. Stagger the deletes too. Repeat this several times. +# + +# Do N inserts into table $tab using random keys between 0 and 1000000 +# +proc random_inserts {cur N} { + global inscnt + while {$N>0} { + set k [expr {int(rand()*1000000)}] + if {[btree_move_to $cur $k]==0} { + continue; # entry already exists + } + incr inscnt + btree_insert $cur $k data-for-$k + incr N -1 + } +} +set inscnt 0 + +# Do N delete from the table that $cur points to. +# +proc random_deletes {cur N} { + while {$N>0} { + set k [expr {int(rand()*1000000)}] + btree_move_to $cur $k + btree_delete $cur + incr N -1 + } +} + +# Make sure the table that $cur points to has exactly N entries. +# Make sure the data for each entry agrees with its key. +# +proc check_table {cur N} { + btree_first $cur + set cnt 0 + while {![btree_eof $cur]} { + if {[set data [btree_data $cur]] ne "data-for-[btree_key $cur]"} { + return "wrong data for entry $cnt" + } + set n [string length $data] + set fdata1 [btree_fetch_data $cur $n] + set fdata2 [btree_fetch_data $cur -1] + if {$fdata1 ne "" && $fdata1 ne $data} { + return "DataFetch returned the wrong value with amt=$n" + } + if {$fdata1 ne $fdata2} { + return "DataFetch returned the wrong value when amt=-1" + } + if {$n>10} { + set fdata3 [btree_fetch_data $cur 10] + if {$fdata3 ne [string range $data 0 9]} { + return "DataFetch returned the wrong value when amt=10" + } + } + incr cnt + btree_next $cur + } + if {$cnt!=$N} { + return "wrong number of entries. Got $cnt. Looking for $N" + } + return {} +} + +# Initialize the database +# +file delete -force test1.bt +file delete -force test1.bt-journal +set b1 [btree_open test1.bt 2000 0] +btree_begin_transaction $b1 +set tab [btree_create_table $b1 5] +set cur [btree_cursor $b1 $tab 1] +set btree_trace 0 +expr srand(1) + +# Do the tests. +# +set cnt 0 +for {set i 1} {$i<=40} {incr i} { + do_test btree6-1.$i.1 { + random_inserts $cur 200 + incr cnt 200 + check_table $cur $cnt + } {} + do_test btree6-1.$i.2 { + btree_integrity_check $b1 1 $tab + } {} + do_test btree6-1.$i.3 { + random_deletes $cur 90 + incr cnt -90 + check_table $cur $cnt + } {} + do_test btree6-1.$i.4 { + btree_integrity_check $b1 1 $tab + } {} +} + +btree_close_cursor $cur +btree_commit $b1 +btree_close $b1 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/btree7.test b/libraries/sqlite/unix/sqlite-3.5.1/test/btree7.test new file mode 100644 index 0000000..eaf3713 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/btree7.test @@ -0,0 +1,50 @@ +# 2004 Jun 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is btree database backend. +# +# $Id: btree7.test,v 1.2 2004/11/04 14:47:13 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Stress the balance routine by trying to create situations where +# 3 neighboring nodes split into 5. +# +set bigdata _123456789 ;# 10 +append bigdata $bigdata ;# 20 +append bigdata $bigdata ;# 40 +append bigdata $bigdata ;# 80 +append bigdata $bigdata ;# 160 +append bigdata $bigdata ;# 320 +append bigdata $bigdata ;# 640 +set data450 [string range $bigdata 0 449] +do_test btree7-1.1 { + execsql " + CREATE TABLE t1(x INTEGER PRIMARY KEY, y TEXT); + INSERT INTO t1 VALUES(1, '$bigdata'); + INSERT INTO t1 VALUES(2, '$bigdata'); + INSERT INTO t1 VALUES(3, '$data450'); + INSERT INTO t1 VALUES(5, '$data450'); + INSERT INTO t1 VALUES(8, '$bigdata'); + INSERT INTO t1 VALUES(9, '$bigdata'); + " +} {} +integrity_check btree7-1.2 +do_test btree7-1.3 { + execsql " + INSERT INTO t1 VALUES(4, '$bigdata'); + " +} {} +integrity_check btree7-1.4 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/btree8.test b/libraries/sqlite/unix/sqlite-3.5.1/test/btree8.test new file mode 100644 index 0000000..f547271 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/btree8.test @@ -0,0 +1,43 @@ +# 2005 August 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is btree database backend. +# +# $Id: btree8.test,v 1.6 2005/08/02 17:13:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Ticket #1346: If the table rooted on page 1 contains a single entry +# and that single entries has to flow out into another page because +# page 1 is 100-bytes smaller than most other pages, then you delete that +# one entry, everything should still work. +# +do_test btree8-1.1 { + execsql { +CREATE TABLE t1(x + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- + ---------------------------------------------------------------------------- +); +DROP table t1; + } +} {} +integrity_check btree8-1.2 diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/btree9.test b/libraries/sqlite/unix/sqlite-3.5.1/test/btree9.test new file mode 100644 index 0000000..678a12c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/btree9.test @@ -0,0 +1,49 @@ +# 2007 May 01 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is btree database backend. +# +# $Id: btree9.test,v 1.1 2007/05/02 01:34:32 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# The sqlite3BtreeInsert() API now has an additional "nZero" parameter +# which specifies the number of zero bytes to append to the end of the +# data. This feature allows large zero-filled BLOBs to be created without +# having to allocate a big chunk of memory to instantiate the blob. +# +# The following code tests the new feature. +# + +# Create the database +# +do_test btree9-1.1 { + file delete -force test1.bt + file delete -force test1.bt-journal + set b1 [btree_open test1.bt 2000 0] + btree_begin_transaction $b1 + set t1 [btree_create_table $b1 5] + set c1 [btree_cursor $b1 $t1 1] + btree_insert $c1 1 data-for-1 20000 + btree_move_to $c1 1 + btree_key $c1 +} {1} +do_test btree9-1.2 { + btree_payload_size $c1 +} {20010} + + +btree_close_cursor $c1 +btree_commit $b1 +btree_close $b1 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/busy.test b/libraries/sqlite/unix/sqlite-3.5.1/test/busy.test new file mode 100644 index 0000000..f5a4072 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/busy.test @@ -0,0 +1,44 @@ +# 2005 july 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file test the busy handler +# +# $Id: busy.test,v 1.2 2005/09/17 18:02:37 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test busy-1.1 { + sqlite3 db2 test.db + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + SELECT * FROM t1 + } +} 1 +proc busy x { + lappend ::busyargs $x + if {$x>2} {return 1} + return 0 +} +set busyargs {} +do_test busy-1.2 { + db busy busy + db2 eval {begin exclusive} + catchsql {begin immediate} +} {1 {database is locked}} +do_test busy-1.3 { + set busyargs +} {0 1 2 3} + +db2 close + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/cache.test b/libraries/sqlite/unix/sqlite-3.5.1/test/cache.test new file mode 100644 index 0000000..dd51c7c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/cache.test @@ -0,0 +1,63 @@ +# 2007 March 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: cache.test,v 1.4 2007/08/22 02:56:44 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!pager_pragmas} { + finish_test + return +} +sqlite3_soft_heap_limit 0 + +proc pager_cache_size {db} { + set bt [btree_from_db $db] + db_enter $db + array set stats [btree_pager_stats $bt] + db_leave $db + return $stats(page) +} + +do_test cache-1.1 { + pager_cache_size db +} {0} + +do_test cache-1.2 { + execsql { + PRAGMA auto_vacuum=OFF; + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + } + pager_cache_size db +} {2} + +# At one point, repeatedly locking and unlocking the cache was causing +# a resource leak of one page per repetition. The page wasn't actually +# leaked, but would not be reused until the pager-cache was full (i.e. +# 2000 pages by default). +# +# This tests that once the pager-cache is initialised, it can be locked +# and unlocked repeatedly without internally allocating any new pages. +# +set cache_size [pager_cache_size db] +for {set ii 0} {$ii < 10} {incr ii} { + + do_test cache-1.3.$ii { + execsql {SELECT * FROM abc} + pager_cache_size db + } $::cache_size + +} +sqlite3_soft_heap_limit $soft_limit + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/capi2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/capi2.test new file mode 100644 index 0000000..4ebcff2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/capi2.test @@ -0,0 +1,793 @@ +# 2003 January 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script testing the callback-free C/C++ API. +# +# $Id: capi2.test,v 1.34 2007/08/22 00:39:21 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Return the text values from the current row pointed at by STMT as a list. +proc get_row_values {STMT} { + set VALUES [list] + for {set i 0} {$i < [sqlite3_data_count $STMT]} {incr i} { + lappend VALUES [sqlite3_column_text $STMT $i] + } + return $VALUES +} + +# Return the column names followed by declaration types for the result set +# of the SQL statement STMT. +# +# i.e. for: +# CREATE TABLE abc(a text, b integer); +# SELECT * FROM abc; +# +# The result is {a b text integer} +proc get_column_names {STMT} { + set VALUES [list] + for {set i 0} {$i < [sqlite3_column_count $STMT]} {incr i} { + lappend VALUES [sqlite3_column_name $STMT $i] + } + for {set i 0} {$i < [sqlite3_column_count $STMT]} {incr i} { + lappend VALUES [sqlite3_column_decltype $STMT $i] + } + return $VALUES +} + +# Check basic functionality +# +do_test capi2-1.1 { + set DB [sqlite3_connection_pointer db] + execsql {CREATE TABLE t1(a,b,c)} + set VM [sqlite3_prepare $DB {SELECT name, rowid FROM sqlite_master} -1 TAIL] + set TAIL +} {} +do_test capi2-1.2 { + sqlite3_step $VM +} {SQLITE_ROW} +do_test capi2-1.3 { + sqlite3_data_count $VM +} {2} +do_test capi2-1.4 { + get_row_values $VM +} {t1 1} +do_test capi2-1.5 { + get_column_names $VM +} {name rowid text INTEGER} +do_test capi2-1.6 { + sqlite3_step $VM +} {SQLITE_DONE} +do_test capi2-1.7 { + list [sqlite3_column_count $VM] [get_row_values $VM] [get_column_names $VM] +} {2 {} {name rowid text INTEGER}} +do_test capi2-1.8-misuse { + sqlite3_step $VM +} {SQLITE_MISUSE} + +# Update: In v2, once SQLITE_MISUSE is returned the statement handle cannot +# be interrogated for more information. However in v3, since the column +# count, names and types are determined at compile time, these are still +# accessible after an SQLITE_MISUSE error. +do_test capi2-1.9 { + list [sqlite3_column_count $VM] [get_row_values $VM] [get_column_names $VM] +} {2 {} {name rowid text INTEGER}} +do_test capi2-1.10 { + sqlite3_data_count $VM +} {0} + +do_test capi2-1.11 { + sqlite3_finalize $VM +} {SQLITE_OK} + +# Check to make sure that the "tail" of a multi-statement SQL script +# is returned by sqlite3_prepare. +# +do_test capi2-2.1 { + set SQL { + SELECT name, rowid FROM sqlite_master; + SELECT name, rowid FROM sqlite_master WHERE 0; + -- A comment at the end + } + set VM [sqlite3_prepare $DB $SQL -1 SQL] + set SQL +} { + SELECT name, rowid FROM sqlite_master WHERE 0; + -- A comment at the end + } +do_test capi2-2.2 { + set r [sqlite3_step $VM] + lappend r [sqlite3_column_count $VM] \ + [get_row_values $VM] \ + [get_column_names $VM] +} {SQLITE_ROW 2 {t1 1} {name rowid text INTEGER}} +do_test capi2-2.3 { + set r [sqlite3_step $VM] + lappend r [sqlite3_column_count $VM] \ + [get_row_values $VM] \ + [get_column_names $VM] +} {SQLITE_DONE 2 {} {name rowid text INTEGER}} +do_test capi2-2.4 { + sqlite3_finalize $VM +} {SQLITE_OK} +do_test capi2-2.5 { + set VM [sqlite3_prepare $DB $SQL -1 SQL] + set SQL +} { + -- A comment at the end + } +do_test capi2-2.6 { + set r [sqlite3_step $VM] + lappend r [sqlite3_column_count $VM] \ + [get_row_values $VM] \ + [get_column_names $VM] +} {SQLITE_DONE 2 {} {name rowid text INTEGER}} +do_test capi2-2.7 { + sqlite3_finalize $VM +} {SQLITE_OK} +do_test capi2-2.8 { + set VM [sqlite3_prepare $DB $SQL -1 SQL] + list $SQL $VM +} {{} {}} + +# Check the error handling. +# +do_test capi2-3.1 { + set rc [catch { + sqlite3_prepare $DB {select bogus from sqlite_master} -1 TAIL + } msg] + lappend rc $msg $TAIL +} {1 {(1) no such column: bogus} {}} +do_test capi2-3.2 { + set rc [catch { + sqlite3_prepare $DB {select bogus from } -1 TAIL + } msg] + lappend rc $msg $TAIL +} {1 {(1) near " ": syntax error} {}} +do_test capi2-3.3 { + set rc [catch { + sqlite3_prepare $DB {;;;;select bogus from sqlite_master} -1 TAIL + } msg] + lappend rc $msg $TAIL +} {1 {(1) no such column: bogus} {}} +do_test capi2-3.4 { + set rc [catch { + sqlite3_prepare $DB {select bogus from sqlite_master;x;} -1 TAIL + } msg] + lappend rc $msg $TAIL +} {1 {(1) no such column: bogus} {x;}} +do_test capi2-3.5 { + set rc [catch { + sqlite3_prepare $DB {select bogus from sqlite_master;;;x;} -1 TAIL + } msg] + lappend rc $msg $TAIL +} {1 {(1) no such column: bogus} {;;x;}} +do_test capi2-3.6 { + set rc [catch { + sqlite3_prepare $DB {select 5/0} -1 TAIL + } VM] + lappend rc $TAIL +} {0 {}} +do_test capi2-3.7 { + list [sqlite3_step $VM] \ + [sqlite3_column_count $VM] \ + [get_row_values $VM] \ + [get_column_names $VM] +} {SQLITE_ROW 1 {{}} {5/0 {}}} +do_test capi2-3.8 { + sqlite3_finalize $VM +} {SQLITE_OK} +do_test capi2-3.9 { + execsql {CREATE UNIQUE INDEX i1 ON t1(a)} + set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES(1,2,3)} -1 TAIL] + set TAIL +} {} +do_test capi2-3.9b {db changes} {0} +do_test capi2-3.10 { + list [sqlite3_step $VM] \ + [sqlite3_column_count $VM] \ + [get_row_values $VM] \ + [get_column_names $VM] +} {SQLITE_DONE 0 {} {}} + +# Update for v3 - the change has not actually happened until the query is +# finalized. Is this going to cause trouble for anyone? Lee Nelson maybe? +# (Later:) The change now happens just before SQLITE_DONE is returned. +do_test capi2-3.10b {db changes} {1} +do_test capi2-3.11 { + sqlite3_finalize $VM +} {SQLITE_OK} +do_test capi2-3.11b {db changes} {1} +#do_test capi2-3.12-misuse { +# sqlite3_finalize $VM +#} {SQLITE_MISUSE} +do_test capi2-3.13 { + set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES(1,3,4)} -1 TAIL] + list [sqlite3_step $VM] \ + [sqlite3_column_count $VM] \ + [get_row_values $VM] \ + [get_column_names $VM] +} {SQLITE_ERROR 0 {} {}} + +# Update for v3: Preparing a statement does not affect the change counter. +# (Test result changes from 0 to 1). (Later:) change counter updates occur +# when sqlite3_step returns, not at finalize time. +do_test capi2-3.13b {db changes} {0} + +do_test capi2-3.14 { + list [sqlite3_finalize $VM] [sqlite3_errmsg $DB] +} {SQLITE_CONSTRAINT {column a is not unique}} +do_test capi2-3.15 { + set VM [sqlite3_prepare $DB {CREATE TABLE t2(a NOT NULL, b)} -1 TAIL] + set TAIL +} {} +do_test capi2-3.16 { + list [sqlite3_step $VM] \ + [sqlite3_column_count $VM] \ + [get_row_values $VM] \ + [get_column_names $VM] +} {SQLITE_DONE 0 {} {}} +do_test capi2-3.17 { + list [sqlite3_finalize $VM] [sqlite3_errmsg $DB] +} {SQLITE_OK {not an error}} +do_test capi2-3.18 { + set VM [sqlite3_prepare $DB {INSERT INTO t2 VALUES(NULL,2)} -1 TAIL] + list [sqlite3_step $VM] \ + [sqlite3_column_count $VM] \ + [get_row_values $VM] \ + [get_column_names $VM] +} {SQLITE_ERROR 0 {} {}} +do_test capi2-3.19 { + list [sqlite3_finalize $VM] [sqlite3_errmsg $DB] +} {SQLITE_CONSTRAINT {t2.a may not be NULL}} + +do_test capi2-3.20 { + execsql { + CREATE TABLE a1(message_id, name , UNIQUE(message_id, name) ); + INSERT INTO a1 VALUES(1, 1); + } +} {} +do_test capi2-3.21 { + set VM [sqlite3_prepare $DB {INSERT INTO a1 VALUES(1, 1)} -1 TAIL] + sqlite3_step $VM +} {SQLITE_ERROR} +do_test capi2-3.22 { + sqlite3_errcode $DB +} {SQLITE_ERROR} +do_test capi2-3.23 { + sqlite3_finalize $VM +} {SQLITE_CONSTRAINT} +do_test capi2-3.24 { + sqlite3_errcode $DB +} {SQLITE_CONSTRAINT} + +# Two or more virtual machines exists at the same time. +# +do_test capi2-4.1 { + set VM1 [sqlite3_prepare $DB {INSERT INTO t2 VALUES(1,2)} -1 TAIL] + set TAIL +} {} +do_test capi2-4.2 { + set VM2 [sqlite3_prepare $DB {INSERT INTO t2 VALUES(2,3)} -1 TAIL] + set TAIL +} {} +do_test capi2-4.3 { + set VM3 [sqlite3_prepare $DB {INSERT INTO t2 VALUES(3,4)} -1 TAIL] + set TAIL +} {} +do_test capi2-4.4 { + list [sqlite3_step $VM2] \ + [sqlite3_column_count $VM2] \ + [get_row_values $VM2] \ + [get_column_names $VM2] +} {SQLITE_DONE 0 {} {}} +do_test capi2-4.5 { + execsql {SELECT * FROM t2 ORDER BY a} +} {2 3} +do_test capi2-4.6 { + sqlite3_finalize $VM2 +} {SQLITE_OK} +do_test capi2-4.7 { + list [sqlite3_step $VM3] \ + [sqlite3_column_count $VM3] \ + [get_row_values $VM3] \ + [get_column_names $VM3] +} {SQLITE_DONE 0 {} {}} +do_test capi2-4.8 { + execsql {SELECT * FROM t2 ORDER BY a} +} {2 3 3 4} +do_test capi2-4.9 { + sqlite3_finalize $VM3 +} {SQLITE_OK} +do_test capi2-4.10 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_DONE 0 {} {}} +do_test capi2-4.11 { + execsql {SELECT * FROM t2 ORDER BY a} +} {1 2 2 3 3 4} +do_test capi2-4.12 { + sqlite3_finalize $VM1 +} {SQLITE_OK} + +# Interleaved SELECTs +# +do_test capi2-5.1 { + set VM1 [sqlite3_prepare $DB {SELECT * FROM t2} -1 TAIL] + set VM2 [sqlite3_prepare $DB {SELECT * FROM t2} -1 TAIL] + set VM3 [sqlite3_prepare $DB {SELECT * FROM t2} -1 TAIL] + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 2 {2 3} {a b {} {}}} +do_test capi2-5.2 { + list [sqlite3_step $VM2] \ + [sqlite3_column_count $VM2] \ + [get_row_values $VM2] \ + [get_column_names $VM2] +} {SQLITE_ROW 2 {2 3} {a b {} {}}} +do_test capi2-5.3 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 2 {3 4} {a b {} {}}} +do_test capi2-5.4 { + list [sqlite3_step $VM3] \ + [sqlite3_column_count $VM3] \ + [get_row_values $VM3] \ + [get_column_names $VM3] +} {SQLITE_ROW 2 {2 3} {a b {} {}}} +do_test capi2-5.5 { + list [sqlite3_step $VM3] \ + [sqlite3_column_count $VM3] \ + [get_row_values $VM3] \ + [get_column_names $VM3] +} {SQLITE_ROW 2 {3 4} {a b {} {}}} +do_test capi2-5.6 { + list [sqlite3_step $VM3] \ + [sqlite3_column_count $VM3] \ + [get_row_values $VM3] \ + [get_column_names $VM3] +} {SQLITE_ROW 2 {1 2} {a b {} {}}} +do_test capi2-5.7 { + list [sqlite3_step $VM3] \ + [sqlite3_column_count $VM3] \ + [get_row_values $VM3] \ + [get_column_names $VM3] +} {SQLITE_DONE 2 {} {a b {} {}}} +do_test capi2-5.8 { + sqlite3_finalize $VM3 +} {SQLITE_OK} +do_test capi2-5.9 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 2 {1 2} {a b {} {}}} +do_test capi2-5.10 { + sqlite3_finalize $VM1 +} {SQLITE_OK} +do_test capi2-5.11 { + list [sqlite3_step $VM2] \ + [sqlite3_column_count $VM2] \ + [get_row_values $VM2] \ + [get_column_names $VM2] +} {SQLITE_ROW 2 {3 4} {a b {} {}}} +do_test capi2-5.12 { + list [sqlite3_step $VM2] \ + [sqlite3_column_count $VM2] \ + [get_row_values $VM2] \ + [get_column_names $VM2] +} {SQLITE_ROW 2 {1 2} {a b {} {}}} +do_test capi2-5.11 { + sqlite3_finalize $VM2 +} {SQLITE_OK} + +# Check for proper SQLITE_BUSY returns. +# +do_test capi2-6.1 { + execsql { + BEGIN; + CREATE TABLE t3(x counter); + INSERT INTO t3 VALUES(1); + INSERT INTO t3 VALUES(2); + INSERT INTO t3 SELECT x+2 FROM t3; + INSERT INTO t3 SELECT x+4 FROM t3; + INSERT INTO t3 SELECT x+8 FROM t3; + COMMIT; + } + set VM1 [sqlite3_prepare $DB {SELECT * FROM t3} -1 TAIL] + sqlite3 db2 test.db + execsql {BEGIN} db2 +} {} +# Update for v3: BEGIN doesn't write-lock the database. It is quite +# difficult to get v3 to write-lock the database, which causes a few +# problems for test scripts. +# +# do_test capi2-6.2 { +# list [sqlite3_step $VM1] \ +# [sqlite3_column_count $VM1] \ +# [get_row_values $VM1] \ +# [get_column_names $VM1] +# } {SQLITE_BUSY 0 {} {}} +do_test capi2-6.3 { + execsql {COMMIT} db2 +} {} +do_test capi2-6.4 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 1 {x counter}} +do_test capi2-6.5 { + catchsql {INSERT INTO t3 VALUES(10);} db2 +} {1 {database is locked}} +do_test capi2-6.6 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 2 {x counter}} +do_test capi2-6.7 { + execsql {SELECT * FROM t2} db2 +} {2 3 3 4 1 2} +do_test capi2-6.8 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 3 {x counter}} +do_test capi2-6.9 { + execsql {SELECT * FROM t2} +} {2 3 3 4 1 2} +do_test capi2-6.10 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 4 {x counter}} +do_test capi2-6.11 { + execsql {BEGIN} +} {} +do_test capi2-6.12 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 5 {x counter}} + +# A read no longer blocks a write in the same connection. +#do_test capi2-6.13 { +# catchsql {UPDATE t3 SET x=x+1} +#} {1 {database table is locked}} + +do_test capi2-6.14 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 6 {x counter}} +do_test capi2-6.15 { + execsql {SELECT * FROM t1} +} {1 2 3} +do_test capi2-6.16 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 7 {x counter}} +do_test capi2-6.17 { + catchsql {UPDATE t1 SET b=b+1} +} {0 {}} +do_test capi2-6.18 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 8 {x counter}} +do_test capi2-6.19 { + execsql {SELECT * FROM t1} +} {1 3 3} +do_test capi2-6.20 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 9 {x counter}} +#do_test capi2-6.21 { +# execsql {ROLLBACK; SELECT * FROM t1} +#} {1 2 3} +do_test capi2-6.22 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 10 {x counter}} +#do_test capi2-6.23 { +# execsql {BEGIN TRANSACTION;} +#} {} +do_test capi2-6.24 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 11 {x counter}} +do_test capi2-6.25 { + execsql { + INSERT INTO t1 VALUES(2,3,4); + SELECT * FROM t1; + } +} {1 3 3 2 3 4} +do_test capi2-6.26 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 12 {x counter}} +do_test capi2-6.27 { + catchsql { + INSERT INTO t1 VALUES(2,4,5); + SELECT * FROM t1; + } +} {1 {column a is not unique}} +do_test capi2-6.28 { + list [sqlite3_step $VM1] \ + [sqlite3_column_count $VM1] \ + [get_row_values $VM1] \ + [get_column_names $VM1] +} {SQLITE_ROW 1 13 {x counter}} +do_test capi2-6.99 { + sqlite3_finalize $VM1 +} {SQLITE_OK} +catchsql {ROLLBACK} + +do_test capi2-7.1 { + stepsql $DB { + SELECT * FROM t1 + } +} {0 1 2 3} +do_test capi2-7.2 { + stepsql $DB { + PRAGMA count_changes=on + } +} {0} +do_test capi2-7.3 { + stepsql $DB { + UPDATE t1 SET a=a+10; + } +} {0 1} +do_test capi2-7.4 { + stepsql $DB { + INSERT INTO t1 SELECT a+1,b+1,c+1 FROM t1; + } +} {0 1} +do_test capi2-7.4b {sqlite3_changes $DB} {1} +do_test capi2-7.5 { + stepsql $DB { + UPDATE t1 SET a=a+10; + } +} {0 2} +do_test capi2-7.5b {sqlite3_changes $DB} {2} +do_test capi2-7.6 { + stepsql $DB { + SELECT * FROM t1; + } +} {0 21 2 3 22 3 4} +do_test capi2-7.7 { + stepsql $DB { + INSERT INTO t1 SELECT a+2,b+2,c+2 FROM t1; + } +} {0 2} +do_test capi2-7.8 { + sqlite3_changes $DB +} {2} +do_test capi2-7.9 { + stepsql $DB { + SELECT * FROM t1; + } +} {0 21 2 3 22 3 4 23 4 5 24 5 6} +do_test capi2-7.10 { + stepsql $DB { + UPDATE t1 SET a=a-20; + SELECT * FROM t1; + } +} {0 4 1 2 3 2 3 4 3 4 5 4 5 6} + +# Update for version 3: A SELECT statement no longer resets the change +# counter (Test result changes from 0 to 4). +do_test capi2-7.11 { + sqlite3_changes $DB +} {4} +do_test capi2-7.11a { + execsql {SELECT count(*) FROM t1} +} {4} + +ifcapable {explain} { + do_test capi2-7.12 { +btree_breakpoint + set x [stepsql $DB {EXPLAIN SELECT * FROM t1}] + lindex $x 0 + } {0} +} + +# Ticket #261 - make sure we can finalize before the end of a query. +# +do_test capi2-8.1 { + set VM1 [sqlite3_prepare $DB {SELECT * FROM t2} -1 TAIL] + sqlite3_finalize $VM1 +} {SQLITE_OK} + +# Tickets #384 and #385 - make sure the TAIL argument to sqlite3_prepare +# and all of the return pointers in sqlite_step can be null. +# +do_test capi2-9.1 { + set VM1 [sqlite3_prepare $DB {SELECT * FROM t2} -1 DUMMY] + sqlite3_step $VM1 + sqlite3_finalize $VM1 +} {SQLITE_OK} + +# Test that passing a NULL pointer to sqlite3_finalize() or sqlite3_reset +# does not cause an error. +do_test capi2-10.1 { + sqlite3_finalize 0 +} {SQLITE_OK} +do_test capi2-10.2 { + sqlite3_reset 0 +} {SQLITE_OK} + +#--------------------------------------------------------------------------- +# The following tests - capi2-11.* - test the "column origin" APIs. +# +# sqlite3_column_origin_name() +# sqlite3_column_database_name() +# sqlite3_column_table_name() +# + +ifcapable columnmetadata { + +# This proc uses the database handle $::DB to compile the SQL statement passed +# as a parameter. The return value of this procedure is a list with one +# element for each column returned by the compiled statement. Each element of +# this list is itself a list of length three, consisting of the origin +# database, table and column for the corresponding returned column. +proc check_origins {sql} { + set ret [list] + set ::STMT [sqlite3_prepare $::DB $sql -1 dummy] + for {set i 0} {$i < [sqlite3_column_count $::STMT]} {incr i} { + lappend ret [list \ + [sqlite3_column_database_name $::STMT $i] \ + [sqlite3_column_table_name $::STMT $i] \ + [sqlite3_column_origin_name $::STMT $i] \ + ] + } + sqlite3_finalize $::STMT + return $ret +} +do_test capi2-11.1 { + execsql { + CREATE TABLE tab1(col1, col2); + } +} {} +do_test capi2-11.2 { + check_origins {SELECT col2, col1 FROM tab1} +} [list {main tab1 col2} {main tab1 col1}] +do_test capi2-11.3 { + check_origins {SELECT col2 AS hello, col1 AS world FROM tab1} +} [list {main tab1 col2} {main tab1 col1}] + +ifcapable subquery { + do_test capi2-11.4 { + check_origins {SELECT b, a FROM (SELECT col1 AS a, col2 AS b FROM tab1)} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-11.5 { + check_origins {SELECT (SELECT col2 FROM tab1), (SELECT col1 FROM tab1)} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-11.6 { + check_origins {SELECT (SELECT col2), (SELECT col1) FROM tab1} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-11.7 { + check_origins {SELECT * FROM tab1} + } [list {main tab1 col1} {main tab1 col2}] + do_test capi2-11.8 { + check_origins {SELECT * FROM (SELECT * FROM tab1)} + } [list {main tab1 col1} {main tab1 col2}] +} + +ifcapable view&&subquery { + do_test capi2-12.1 { + execsql { + CREATE VIEW view1 AS SELECT * FROM tab1; + } + } {} + do_test capi2-12.2 { + check_origins {SELECT col2, col1 FROM view1} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-12.3 { + check_origins {SELECT col2 AS hello, col1 AS world FROM view1} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-12.4 { + check_origins {SELECT b, a FROM (SELECT col1 AS a, col2 AS b FROM view1)} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-12.5 { + check_origins {SELECT (SELECT col2 FROM view1), (SELECT col1 FROM view1)} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-12.6 { + check_origins {SELECT (SELECT col2), (SELECT col1) FROM view1} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-12.7 { + check_origins {SELECT * FROM view1} + } [list {main tab1 col1} {main tab1 col2}] + do_test capi2-12.8 { + check_origins {select * from (select * from view1)} + } [list {main tab1 col1} {main tab1 col2}] + do_test capi2-12.9 { + check_origins {select * from (select * from (select * from view1))} + } [list {main tab1 col1} {main tab1 col2}] + do_test capi2-12.10 { + db close + sqlite3 db test.db + set ::DB [sqlite3_connection_pointer db] + check_origins {select * from (select * from (select * from view1))} + } [list {main tab1 col1} {main tab1 col2}] + + # This view will thwart the flattening optimization. + do_test capi2-13.1 { + execsql { + CREATE VIEW view2 AS SELECT * FROM tab1 limit 10 offset 10; + } + } {} + breakpoint + do_test capi2-13.2 { + check_origins {SELECT col2, col1 FROM view2} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-13.3 { + check_origins {SELECT col2 AS hello, col1 AS world FROM view2} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-13.4 { + check_origins {SELECT b, a FROM (SELECT col1 AS a, col2 AS b FROM view2)} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-13.5 { + check_origins {SELECT (SELECT col2 FROM view2), (SELECT col1 FROM view2)} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-13.6 { + check_origins {SELECT (SELECT col2), (SELECT col1) FROM view2} + } [list {main tab1 col2} {main tab1 col1}] + do_test capi2-13.7 { + check_origins {SELECT * FROM view2} + } [list {main tab1 col1} {main tab1 col2}] + do_test capi2-13.8 { + check_origins {select * from (select * from view2)} + } [list {main tab1 col1} {main tab1 col2}] + do_test capi2-13.9 { + check_origins {select * from (select * from (select * from view2))} + } [list {main tab1 col1} {main tab1 col2}] + do_test capi2-13.10 { + db close + sqlite3 db test.db + set ::DB [sqlite3_connection_pointer db] + check_origins {select * from (select * from (select * from view2))} + } [list {main tab1 col1} {main tab1 col2}] + do_test capi2-13.11 { + check_origins {select * from (select * from tab1 limit 10 offset 10)} + } [list {main tab1 col1} {main tab1 col2}] +} + + +} ;# ifcapable columnmetadata + +db2 close +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/capi3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/capi3.test new file mode 100644 index 0000000..2c89c43 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/capi3.test @@ -0,0 +1,1071 @@ +# 2003 January 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script testing the callback-free C/C++ API. +# +# $Id: capi3.test,v 1.55 2007/08/29 19:15:09 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Return the UTF-16 representation of the supplied UTF-8 string $str. +# If $nt is true, append two 0x00 bytes as a nul terminator. +proc utf16 {str {nt 1}} { + set r [encoding convertto unicode $str] + if {$nt} { + append r "\x00\x00" + } + return $r +} + +# Return the UTF-8 representation of the supplied UTF-16 string $str. +proc utf8 {str} { + # If $str ends in two 0x00 0x00 bytes, knock these off before + # converting to UTF-8 using TCL. + binary scan $str \c* vals + if {[lindex $vals end]==0 && [lindex $vals end-1]==0} { + set str [binary format \c* [lrange $vals 0 end-2]] + } + + set r [encoding convertfrom unicode $str] + return $r +} + +# These tests complement those in capi2.test. They are organized +# as follows: +# +# capi3-1.*: Test sqlite3_prepare +# capi3-2.*: Test sqlite3_prepare16 +# capi3-3.*: Test sqlite3_open +# capi3-4.*: Test sqlite3_open16 +# capi3-5.*: Test the various sqlite3_result_* APIs +# capi3-6.*: Test that sqlite3_close fails if there are outstanding VMs. +# + +set DB [sqlite3_connection_pointer db] + +do_test capi3-1.0 { + sqlite3_get_autocommit $DB +} 1 +do_test capi3-1.1 { + set STMT [sqlite3_prepare $DB {SELECT name FROM sqlite_master} -1 TAIL] + sqlite3_finalize $STMT + set TAIL +} {} +do_test capi3-1.2 { + sqlite3_errcode $DB +} {SQLITE_OK} +do_test capi3-1.3 { + sqlite3_errmsg $DB +} {not an error} +do_test capi3-1.4 { + set sql {SELECT name FROM sqlite_master;SELECT 10} + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_finalize $STMT + set TAIL +} {SELECT 10} +do_test capi3-1.5 { + set sql {SELECT namex FROM sqlite_master} + catch { + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + } +} {1} +do_test capi3-1.6 { + sqlite3_errcode $DB +} {SQLITE_ERROR} +do_test capi3-1.7 { + sqlite3_errmsg $DB +} {no such column: namex} + +ifcapable {utf16} { + do_test capi3-2.1 { + set sql16 [utf16 {SELECT name FROM sqlite_master}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 ::TAIL] + sqlite3_finalize $STMT + utf8 $::TAIL + } {} + do_test capi3-2.2 { + set sql [utf16 {SELECT name FROM sqlite_master;SELECT 10}] + set STMT [sqlite3_prepare16 $DB $sql -1 TAIL] + sqlite3_finalize $STMT + utf8 $TAIL + } {SELECT 10} + do_test capi3-2.3 { + set sql [utf16 {SELECT namex FROM sqlite_master}] + catch { + set STMT [sqlite3_prepare16 $DB $sql -1 TAIL] + } + } {1} + do_test capi3-2.4 { + sqlite3_errcode $DB + } {SQLITE_ERROR} + do_test capi3-2.5 { + sqlite3_errmsg $DB + } {no such column: namex} + + ifcapable schema_pragmas { + do_test capi3-2.6 { + execsql {CREATE TABLE tablename(x)} + set sql16 [utf16 {PRAGMA table_info("TableName")}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + } SQLITE_ROW + do_test capi3-2.7 { + sqlite3_step $STMT + } SQLITE_DONE + do_test capi3-2.8 { + sqlite3_finalize $STMT + } SQLITE_OK + } + +} ;# endif utf16 + +# rename sqlite3_open sqlite3_open_old +# proc sqlite3_open {fname options} {sqlite3_open_new $fname $options} + +do_test capi3-3.1 { + set db2 [sqlite3_open test.db {}] + sqlite3_errcode $db2 +} {SQLITE_OK} +# FIX ME: Should test the db handle works. +do_test capi3-3.2 { + sqlite3_close $db2 +} {SQLITE_OK} +do_test capi3-3.3 { + catch { + set db2 [sqlite3_open /bogus/path/test.db {}] + } + sqlite3_errcode $db2 +} {SQLITE_CANTOPEN} +do_test capi3-3.4 { + sqlite3_errmsg $db2 +} {unable to open database file} +do_test capi3-3.5 { + sqlite3_close $db2 +} {SQLITE_OK} +do_test capi3-3.6.1-misuse { + sqlite3_close $db2 +} {SQLITE_MISUSE} +do_test capi3-3.6.2-misuse { + sqlite3_errmsg $db2 +} {library routine called out of sequence} +ifcapable {utf16} { + do_test capi3-3.6.3-misuse { + utf8 [sqlite3_errmsg16 $db2] + } {library routine called out of sequence} +} + +# rename sqlite3_open "" +# rename sqlite3_open_old sqlite3_open + +ifcapable {utf16} { +do_test capi3-4.1 { + set db2 [sqlite3_open16 [utf16 test.db] {}] + sqlite3_errcode $db2 +} {SQLITE_OK} +# FIX ME: Should test the db handle works. +do_test capi3-4.2 { + sqlite3_close $db2 +} {SQLITE_OK} +do_test capi3-4.3 { + catch { + set db2 [sqlite3_open16 [utf16 /bogus/path/test.db] {}] + } + sqlite3_errcode $db2 +} {SQLITE_CANTOPEN} +do_test capi3-4.4 { + utf8 [sqlite3_errmsg16 $db2] +} {unable to open database file} +do_test capi3-4.5 { + sqlite3_close $db2 +} {SQLITE_OK} +} ;# utf16 + +# This proc is used to test the following API calls: +# +# sqlite3_column_count +# sqlite3_column_name +# sqlite3_column_name16 +# sqlite3_column_decltype +# sqlite3_column_decltype16 +# +# $STMT is a compiled SQL statement. $test is a prefix +# to use for test names within this proc. $names is a list +# of the column names that should be returned by $STMT. +# $decltypes is a list of column declaration types for $STMT. +# +# Example: +# +# set STMT [sqlite3_prepare "SELECT 1, 2, 2;" -1 DUMMY] +# check_header test1.1 {1 2 3} {"" "" ""} +# +proc check_header {STMT test names decltypes} { + + # Use the return value of sqlite3_column_count() to build + # a list of column indexes. i.e. If sqlite3_column_count + # is 3, build the list {0 1 2}. + set ::idxlist [list] + set ::numcols [sqlite3_column_count $STMT] + for {set i 0} {$i < $::numcols} {incr i} {lappend ::idxlist $i} + + # Column names in UTF-8 + do_test $test.1 { + set cnamelist [list] + foreach i $idxlist {lappend cnamelist [sqlite3_column_name $STMT $i]} + set cnamelist + } $names + + # Column names in UTF-16 + ifcapable {utf16} { + do_test $test.2 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_name16 $STMT $i]] + } + set cnamelist + } $names + } + + # Column names in UTF-8 + do_test $test.3 { + set cnamelist [list] + foreach i $idxlist {lappend cnamelist [sqlite3_column_name $STMT $i]} + set cnamelist + } $names + + # Column names in UTF-16 + ifcapable {utf16} { + do_test $test.4 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_name16 $STMT $i]] + } + set cnamelist + } $names + } + + # Column names in UTF-8 + do_test $test.5 { + set cnamelist [list] + foreach i $idxlist {lappend cnamelist [sqlite3_column_decltype $STMT $i]} + set cnamelist + } $decltypes + + # Column declaration types in UTF-16 + ifcapable {utf16} { + do_test $test.6 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_decltype16 $STMT $i]] + } + set cnamelist + } $decltypes + } + + + # Test some out of range conditions: + ifcapable {utf16} { + do_test $test.7 { + list \ + [sqlite3_column_name $STMT -1] \ + [sqlite3_column_name16 $STMT -1] \ + [sqlite3_column_decltype $STMT -1] \ + [sqlite3_column_decltype16 $STMT -1] \ + [sqlite3_column_name $STMT $numcols] \ + [sqlite3_column_name16 $STMT $numcols] \ + [sqlite3_column_decltype $STMT $numcols] \ + [sqlite3_column_decltype16 $STMT $numcols] + } {{} {} {} {} {} {} {} {}} + } +} + +# This proc is used to test the following API calls: +# +# sqlite3_column_origin_name +# sqlite3_column_origin_name16 +# sqlite3_column_table_name +# sqlite3_column_table_name16 +# sqlite3_column_database_name +# sqlite3_column_database_name16 +# +# $STMT is a compiled SQL statement. $test is a prefix +# to use for test names within this proc. $names is a list +# of the column names that should be returned by $STMT. +# $decltypes is a list of column declaration types for $STMT. +# +# Example: +# +# set STMT [sqlite3_prepare "SELECT 1, 2, 2;" -1 DUMMY] +# check_header test1.1 {1 2 3} {"" "" ""} +# +proc check_origin_header {STMT test dbs tables cols} { + # If sqlite3_column_origin_name() and friends are not compiled into + # this build, this proc is a no-op. + ifcapable columnmetadata { + # Use the return value of sqlite3_column_count() to build + # a list of column indexes. i.e. If sqlite3_column_count + # is 3, build the list {0 1 2}. + set ::idxlist [list] + set ::numcols [sqlite3_column_count $STMT] + for {set i 0} {$i < $::numcols} {incr i} {lappend ::idxlist $i} + + # Database names in UTF-8 + do_test $test.8 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [sqlite3_column_database_name $STMT $i] + } + set cnamelist + } $dbs + + # Database names in UTF-16 + ifcapable {utf16} { + do_test $test.9 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_database_name16 $STMT $i]] + } + set cnamelist + } $dbs + } + + # Table names in UTF-8 + do_test $test.10 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [sqlite3_column_table_name $STMT $i] + } + set cnamelist + } $tables + + # Table names in UTF-16 + ifcapable {utf16} { + do_test $test.11 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_table_name16 $STMT $i]] + } + set cnamelist + } $tables + } + + # Origin names in UTF-8 + do_test $test.12 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [sqlite3_column_origin_name $STMT $i] + } + set cnamelist + } $cols + + # Origin declaration types in UTF-16 + ifcapable {utf16} { + do_test $test.13 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_origin_name16 $STMT $i]] + } + set cnamelist + } $cols + } + } +} + +# This proc is used to test the following APIs: +# +# sqlite3_data_count +# sqlite3_column_type +# sqlite3_column_int +# sqlite3_column_text +# sqlite3_column_text16 +# sqlite3_column_double +# +# $STMT is a compiled SQL statement for which the previous call +# to sqlite3_step returned SQLITE_ROW. $test is a prefix to use +# for test names within this proc. $types is a list of the +# manifest types for the current row. $ints, $doubles and $strings +# are lists of the integer, real and string representations of +# the values in the current row. +# +# Example: +# +# set STMT [sqlite3_prepare "SELECT 'hello', 1.1, NULL" -1 DUMMY] +# sqlite3_step $STMT +# check_data test1.2 {TEXT REAL NULL} {0 1 0} {0 1.1 0} {hello 1.1 {}} +# +proc check_data {STMT test types ints doubles strings} { + + # Use the return value of sqlite3_column_count() to build + # a list of column indexes. i.e. If sqlite3_column_count + # is 3, build the list {0 1 2}. + set ::idxlist [list] + set numcols [sqlite3_data_count $STMT] + for {set i 0} {$i < $numcols} {incr i} {lappend ::idxlist $i} + +# types +do_test $test.1 { + set types [list] + foreach i $idxlist {lappend types [sqlite3_column_type $STMT $i]} + set types +} $types + +# Integers +do_test $test.2 { + set ints [list] + foreach i $idxlist {lappend ints [sqlite3_column_int64 $STMT $i]} + set ints +} $ints + +# bytes +set lens [list] +foreach i $::idxlist { + lappend lens [string length [lindex $strings $i]] +} +do_test $test.3 { + set bytes [list] + set lens [list] + foreach i $idxlist { + lappend bytes [sqlite3_column_bytes $STMT $i] + } + set bytes +} $lens + +# bytes16 +ifcapable {utf16} { + set lens [list] + foreach i $::idxlist { + lappend lens [expr 2 * [string length [lindex $strings $i]]] + } + do_test $test.4 { + set bytes [list] + set lens [list] + foreach i $idxlist { + lappend bytes [sqlite3_column_bytes16 $STMT $i] + } + set bytes + } $lens +} + +# Blob +do_test $test.5 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_blob $STMT $i]} + set utf8 +} $strings + +# UTF-8 +do_test $test.6 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_text $STMT $i]} + set utf8 +} $strings + +# Floats +do_test $test.7 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_double $STMT $i]} + set utf8 +} $doubles + +# UTF-16 +ifcapable {utf16} { + do_test $test.8 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [utf8 [sqlite3_column_text16 $STMT $i]]} + set utf8 + } $strings +} + +# Integers +do_test $test.9 { + set ints [list] + foreach i $idxlist {lappend ints [sqlite3_column_int $STMT $i]} + set ints +} $ints + +# Floats +do_test $test.10 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_double $STMT $i]} + set utf8 +} $doubles + +# UTF-8 +do_test $test.11 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_text $STMT $i]} + set utf8 +} $strings + +# Types +do_test $test.12 { + set types [list] + foreach i $idxlist {lappend types [sqlite3_column_type $STMT $i]} + set types +} $types + +# Test that an out of range request returns the equivalent of NULL +do_test $test.13 { + sqlite3_column_int $STMT -1 +} {0} +do_test $test.13 { + sqlite3_column_text $STMT -1 +} {} + +} + +ifcapable !floatingpoint { + finish_test + return +} + +do_test capi3-5.0 { + execsql { + CREATE TABLE t1(a VARINT, b BLOB, c VARCHAR(16)); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES('one', 'two', NULL); + INSERT INTO t1 VALUES(1.2, 1.3, 1.4); + } + set sql "SELECT * FROM t1" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_column_count $STMT +} 3 + +check_header $STMT capi3-5.1 {a b c} {VARINT BLOB VARCHAR(16)} +check_origin_header $STMT capi3-5.1 {main main main} {t1 t1 t1} {a b c} +do_test capi3-5.2 { + sqlite3_step $STMT +} SQLITE_ROW + +check_header $STMT capi3-5.3 {a b c} {VARINT BLOB VARCHAR(16)} +check_origin_header $STMT capi3-5.3 {main main main} {t1 t1 t1} {a b c} +check_data $STMT capi3-5.4 {INTEGER INTEGER TEXT} {1 2 3} {1.0 2.0 3.0} {1 2 3} + +do_test capi3-5.5 { + sqlite3_step $STMT +} SQLITE_ROW + +check_header $STMT capi3-5.6 {a b c} {VARINT BLOB VARCHAR(16)} +check_origin_header $STMT capi3-5.6 {main main main} {t1 t1 t1} {a b c} +check_data $STMT capi3-5.7 {TEXT TEXT NULL} {0 0 0} {0.0 0.0 0.0} {one two {}} + +do_test capi3-5.8 { + sqlite3_step $STMT +} SQLITE_ROW + +check_header $STMT capi3-5.9 {a b c} {VARINT BLOB VARCHAR(16)} +check_origin_header $STMT capi3-5.9 {main main main} {t1 t1 t1} {a b c} +check_data $STMT capi3-5.10 {FLOAT FLOAT TEXT} {1 1 1} {1.2 1.3 1.4} {1.2 1.3 1.4} + +do_test capi3-5.11 { + sqlite3_step $STMT +} SQLITE_DONE + +do_test capi3-5.12 { + sqlite3_finalize $STMT +} SQLITE_OK + +do_test capi3-5.20 { + set sql "SELECT a, sum(b), max(c) FROM t1 GROUP BY a" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_column_count $STMT +} 3 + +check_header $STMT capi3-5.21 {a sum(b) max(c)} {VARINT {} {}} +check_origin_header $STMT capi3-5.22 {main {} {}} {t1 {} {}} {a {} {}} +do_test capi3-5.23 { + sqlite3_finalize $STMT +} SQLITE_OK + +do_test capi3-5.30 { + set sql "SELECT a AS x, sum(b) AS y, max(c) AS z FROM t1 AS m GROUP BY x" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_column_count $STMT +} 3 + +check_header $STMT capi3-5.31 {x y z} {VARINT {} {}} +check_origin_header $STMT capi3-5.32 {main {} {}} {t1 {} {}} {a {} {}} +do_test capi3-5.33 { + sqlite3_finalize $STMT +} SQLITE_OK + + +set ::ENC [execsql {pragma encoding}] +db close + +do_test capi3-6.0 { + sqlite3 db test.db + set DB [sqlite3_connection_pointer db] + sqlite3_key $DB xyzzy + set sql {SELECT a FROM t1 order by rowid} + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + expr 0 +} {0} +do_test capi3-6.1 { + db cache flush + sqlite3_close $DB +} {SQLITE_BUSY} +do_test capi3-6.2 { + sqlite3_step $STMT +} {SQLITE_ROW} +check_data $STMT capi3-6.3 {INTEGER} {1} {1.0} {1} +do_test capi3-6.3 { + sqlite3_finalize $STMT +} {SQLITE_OK} +do_test capi3-6.4-misuse { + db cache flush + sqlite3_close $DB +} {SQLITE_OK} +db close + +if {![sqlite3 -has-codec]} { + # Test what happens when the library encounters a newer file format. + # Do this by updating the file format via the btree layer. + do_test capi3-7.1 { + set ::bt [btree_open test.db 10 0] + btree_begin_transaction $::bt + set meta [btree_get_meta $::bt] + lset meta 2 5 + eval [concat btree_update_meta $::bt [lrange $meta 0 end]] + btree_commit $::bt + btree_close $::bt + } {} + do_test capi3-7.2 { + sqlite3 db test.db + catchsql { + SELECT * FROM sqlite_master; + } + } {1 {unsupported file format}} + db close +} + +if {![sqlite3 -has-codec]} { + # Now test that the library correctly handles bogus entries in the + # sqlite_master table (schema corruption). + do_test capi3-8.1 { + file delete -force test.db + file delete -force test.db-journal + sqlite3 db test.db + execsql { + CREATE TABLE t1(a); + } + db close + } {} + do_test capi3-8.2 { + set ::bt [btree_open test.db 10 0] + btree_begin_transaction $::bt + set ::bc [btree_cursor $::bt 1 1] + + # Build a 5-field row record consisting of 5 null records. This is + # officially black magic. + catch {unset data} + set data [binary format c6 {6 0 0 0 0 0}] + btree_insert $::bc 5 $data + + btree_close_cursor $::bc + btree_commit $::bt + btree_close $::bt + } {} + do_test capi3-8.3 { + sqlite3 db test.db + catchsql { + SELECT * FROM sqlite_master; + } + } {1 {malformed database schema}} + do_test capi3-8.4 { + set ::bt [btree_open test.db 10 0] + btree_begin_transaction $::bt + set ::bc [btree_cursor $::bt 1 1] + + # Build a 5-field row record. The first field is a string 'table', and + # subsequent fields are all NULL. Replace the other broken record with + # this one and try to read the schema again. The broken record uses + # either UTF-8 or native UTF-16 (if this file is being run by + # utf16.test). + if { [string match UTF-16* $::ENC] } { + set data [binary format c6a10 {6 33 0 0 0 0} [utf16 table]] + } else { + set data [binary format c6a5 {6 23 0 0 0 0} table] + } + btree_insert $::bc 5 $data + + btree_close_cursor $::bc + btree_commit $::bt + btree_close $::bt + } {}; + do_test capi3-8.5 { + db close + sqlite3 db test.db + catchsql { + SELECT * FROM sqlite_master; + } + } {1 {malformed database schema}} + db close +} +file delete -force test.db +file delete -force test.db-journal + + +# Test the english language string equivalents for sqlite error codes +set code2english [list \ +SQLITE_OK {not an error} \ +SQLITE_ERROR {SQL logic error or missing database} \ +SQLITE_PERM {access permission denied} \ +SQLITE_ABORT {callback requested query abort} \ +SQLITE_BUSY {database is locked} \ +SQLITE_LOCKED {database table is locked} \ +SQLITE_NOMEM {out of memory} \ +SQLITE_READONLY {attempt to write a readonly database} \ +SQLITE_INTERRUPT {interrupted} \ +SQLITE_IOERR {disk I/O error} \ +SQLITE_CORRUPT {database disk image is malformed} \ +SQLITE_FULL {database or disk is full} \ +SQLITE_CANTOPEN {unable to open database file} \ +SQLITE_EMPTY {table contains no data} \ +SQLITE_SCHEMA {database schema has changed} \ +SQLITE_CONSTRAINT {constraint failed} \ +SQLITE_MISMATCH {datatype mismatch} \ +SQLITE_MISUSE {library routine called out of sequence} \ +SQLITE_NOLFS {kernel lacks large file support} \ +SQLITE_AUTH {authorization denied} \ +SQLITE_FORMAT {auxiliary database format error} \ +SQLITE_RANGE {bind or column index out of range} \ +SQLITE_NOTADB {file is encrypted or is not a database} \ +unknownerror {unknown error} \ +] + +set test_number 1 +foreach {code english} $code2english { + do_test capi3-9.$test_number "sqlite3_test_errstr $code" $english + incr test_number +} + +# Test the error message when a "real" out of memory occurs. +ifcapable memdebug { + do_test capi3-10-1 { + sqlite3 db test.db + set DB [sqlite3_connection_pointer db] + sqlite3_memdebug_fail 1 + catchsql { + select * from sqlite_master; + } + } {1 {out of memory}} + do_test capi3-10-2 { + sqlite3_errmsg $::DB + } {out of memory} + ifcapable {utf16} { + do_test capi3-10-3 { + utf8 [sqlite3_errmsg16 $::DB] + } {out of memory} + } + db close + sqlite3_memdebug_fail -1 +} + +# The following tests - capi3-11.* - test that a COMMIT or ROLLBACK +# statement issued while there are still outstanding VMs that are part of +# the transaction fails. +sqlite3 db test.db +set DB [sqlite3_connection_pointer db] +sqlite_register_test_function $DB func +do_test capi3-11.1 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 'int'); + INSERT INTO t1 VALUES(2, 'notatype'); + } +} {} +do_test capi3-11.1.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3-11.2 { + set STMT [sqlite3_prepare $DB "SELECT func(b, a) FROM t1" -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3-11.3 { + catchsql { + COMMIT; + } +} {1 {cannot commit transaction - SQL statements in progress}} +do_test capi3-11.3.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3-11.4 { + sqlite3_step $STMT +} {SQLITE_ERROR} +do_test capi3-11.5 { + sqlite3_finalize $STMT +} {SQLITE_ERROR} +do_test capi3-11.6 { + catchsql { + SELECT * FROM t1; + } +} {0 {1 int 2 notatype}} +do_test capi3-11.6.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3-11.7 { + catchsql { + COMMIT; + } +} {0 {}} +do_test capi3-11.7.1 { + sqlite3_get_autocommit $DB +} 1 +do_test capi3-11.8 { + execsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES(1); + INSERT INTO t2 VALUES(2); + BEGIN; + INSERT INTO t2 VALUES(3); + } +} {} +do_test capi3-11.8.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3-11.9 { + set STMT [sqlite3_prepare $DB "SELECT a FROM t2" -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3-11.9.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3-11.9.2 { + catchsql { + ROLLBACK; + } +} {1 {cannot rollback transaction - SQL statements in progress}} +do_test capi3-11.9.3 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3-11.10 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3-11.11 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3-11.12 { + sqlite3_step $STMT +} {SQLITE_DONE} +do_test capi3-11.13 { + sqlite3_finalize $STMT +} {SQLITE_OK} +do_test capi3-11.14 { + execsql { + SELECT a FROM t2; + } +} {1 2 3} +do_test capi3-11.14.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3-11.15 { + catchsql { + ROLLBACK; + } +} {0 {}} +do_test capi3-11.15.1 { + sqlite3_get_autocommit $DB +} 1 +do_test capi3-11.16 { + execsql { + SELECT a FROM t2; + } +} {1 2} + +# Sanity check on the definition of 'outstanding VM'. This means any VM +# that has had sqlite3_step() called more recently than sqlite3_finalize() or +# sqlite3_reset(). So a VM that has just been prepared or reset does not +# count as an active VM. +do_test capi3-11.17 { + execsql { + BEGIN; + } +} {} +do_test capi3-11.18 { + set STMT [sqlite3_prepare $DB "SELECT a FROM t1" -1 TAIL] + catchsql { + COMMIT; + } +} {0 {}} +do_test capi3-11.19 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3-11.20 { + catchsql { + BEGIN; + COMMIT; + } +} {1 {cannot commit transaction - SQL statements in progress}} +do_test capi3-11.20 { + sqlite3_reset $STMT + catchsql { + COMMIT; + } +} {0 {}} +do_test capi3-11.21 { + sqlite3_finalize $STMT +} {SQLITE_OK} + +# The following tests - capi3-12.* - check that it's Ok to start a +# transaction while other VMs are active, and that it's Ok to execute +# atomic updates in the same situation +# +do_test capi3-12.1 { + set STMT [sqlite3_prepare $DB "SELECT a FROM t2" -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3-12.2 { + catchsql { + INSERT INTO t1 VALUES(3, NULL); + } +} {0 {}} +do_test capi3-12.3 { + catchsql { + INSERT INTO t2 VALUES(4); + } +} {0 {}} +do_test capi3-12.4 { + catchsql { + BEGIN; + INSERT INTO t1 VALUES(4, NULL); + } +} {0 {}} +do_test capi3-12.5 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3-12.5.1 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3-12.6 { + sqlite3_step $STMT +} {SQLITE_DONE} +do_test capi3-12.7 { + sqlite3_finalize $STMT +} {SQLITE_OK} +do_test capi3-12.8 { + execsql { + COMMIT; + SELECT a FROM t1; + } +} {1 2 3 4} + +# Test cases capi3-13.* test the sqlite3_clear_bindings() and +# sqlite3_sleep APIs. +# +if {[llength [info commands sqlite3_clear_bindings]]>0} { + do_test capi3-13.1 { + execsql { + DELETE FROM t1; + } + set STMT [sqlite3_prepare $DB "INSERT INTO t1 VALUES(?, ?)" -1 TAIL] + sqlite3_step $STMT + } {SQLITE_DONE} + do_test capi3-13.2 { + sqlite3_reset $STMT + sqlite3_bind_text $STMT 1 hello 5 + sqlite3_bind_text $STMT 2 world 5 + sqlite3_step $STMT + } {SQLITE_DONE} + do_test capi3-13.3 { + sqlite3_reset $STMT + sqlite3_clear_bindings $STMT + sqlite3_step $STMT + } {SQLITE_DONE} + do_test capi3-13-4 { + sqlite3_finalize $STMT + execsql { + SELECT * FROM t1; + } + } {{} {} hello world {} {}} +} +if {[llength [info commands sqlite3_sleep]]>0} { + do_test capi3-13-5 { + set ms [sqlite3_sleep 80] + expr {$ms==80 || $ms==1000} + } {1} +} + +# Ticket #1219: Make sure binding APIs can handle a NULL pointer. +# +do_test capi3-14.1-misuse { + set rc [catch {sqlite3_bind_text 0 1 hello 5} msg] + lappend rc $msg +} {1 SQLITE_MISUSE} + +# Ticket #1650: Honor the nBytes parameter to sqlite3_prepare. +# +do_test capi3-15.1 { + set sql {SELECT * FROM t2} + set nbytes [string length $sql] + append sql { WHERE a==1} + set STMT [sqlite3_prepare $DB $sql $nbytes TAIL] + sqlite3_step $STMT + sqlite3_column_int $STMT 0 +} {1} +do_test capi3-15.2 { + sqlite3_step $STMT + sqlite3_column_int $STMT 0 +} {2} +do_test capi3-15.3 { + sqlite3_finalize $STMT +} {SQLITE_OK} + +# Make sure code is always generated even if an IF EXISTS or +# IF NOT EXISTS clause is present that the table does not or +# does exists. That way we will always have a prepared statement +# to expire when the schema changes. +# +do_test capi3-16.1 { + set sql {DROP TABLE IF EXISTS t3} + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_finalize $STMT + expr {$STMT!=""} +} {1} +do_test capi3-16.2 { + set sql {CREATE TABLE IF NOT EXISTS t1(x,y)} + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_finalize $STMT + expr {$STMT!=""} +} {1} + +# But still we do not generate code if there is no SQL +# +do_test capi3-16.3 { + set STMT [sqlite3_prepare $DB {} -1 TAIL] + sqlite3_finalize $STMT + expr {$STMT==""} +} {1} +do_test capi3-16.4 { + set STMT [sqlite3_prepare $DB {;} -1 TAIL] + sqlite3_finalize $STMT + expr {$STMT==""} +} {1} + +# Ticket #2426: Misuse of sqlite3_column_* by calling it after +# a sqlite3_reset should be harmless. +# +do_test capi3-17.1 { + set STMT [sqlite3_prepare $DB {SELECT * FROM t2} -1 TAIL] + sqlite3_step $STMT + sqlite3_column_int $STMT 0 +} {1} +do_test capi3-17.2 { + sqlite3_reset $STMT + sqlite3_column_int $STMT 0 +} {0} +do_test capi3-17.3 { + sqlite3_finalize $STMT +} {SQLITE_OK} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/capi3b.test b/libraries/sqlite/unix/sqlite-3.5.1/test/capi3b.test new file mode 100644 index 0000000..44790c7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/capi3b.test @@ -0,0 +1,145 @@ +# 2004 September 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script testing the callback-free C/C++ API and in +# particular the behavior of sqlite3_step() when trying to commit +# with lock contention. +# +# $Id: capi3b.test,v 1.4 2007/08/10 19:46:14 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# These tests depend on the pager holding changes in cache +# until it is time to commit. But that won't happen if the +# soft-heap-limit is set too low. So disable the soft heap limit +# for the duration of this test. +# +sqlite3_soft_heap_limit 0 + + +set DB [sqlite3_connection_pointer db] +sqlite3 db2 test.db +set DB2 [sqlite3_connection_pointer db2] + +# Create some data in the database +# +do_test capi3b-1.1 { + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + SELECT * FROM t1 + } +} {1 2} + +# Make sure the second database connection can see the data +# +do_test capi3b-1.2 { + execsql { + SELECT * FROM t1 + } db2 +} {1 2} + +# First database connection acquires a shared lock +# +do_test capi3b-1.3 { + execsql { + BEGIN; + SELECT * FROM t1; + } +} {1 2} + +# Second database connection tries to write. The sqlite3_step() +# function returns SQLITE_BUSY because it cannot commit. +# +do_test capi3b-1.4 { + set VM [sqlite3_prepare $DB2 {INSERT INTO t1 VALUES(3)} -1 TAIL] + sqlite3_step $VM +} SQLITE_BUSY + +# The sqlite3_step call can be repeated multiple times. +# +do_test capi3b-1.5.1 { + sqlite3_step $VM +} SQLITE_BUSY +do_test capi3b-1.5.2 { + sqlite3_step $VM +} SQLITE_BUSY + +# The first connection closes its transaction. This allows the second +# connections sqlite3_step to succeed. +# +do_test capi3b-1.6 { + execsql COMMIT + sqlite3_step $VM +} SQLITE_DONE +do_test capi3b-1.7 { + sqlite3_finalize $VM +} SQLITE_OK +do_test capi3b-1.8 { + execsql {SELECT * FROM t1} db2 +} {1 2 3} +do_test capi3b-1.9 { + execsql {SELECT * FROM t1} +} {1 2 3} + +# Start doing a SELECT with one connection. This gets a SHARED lock. +# Then do an INSERT with the other connection. The INSERT should +# not be able to complete until the SELECT finishes. +# +do_test capi3b-2.1 { + set VM1 [sqlite3_prepare $DB {SELECT * FROM t1} -1 TAIL] + sqlite3_step $VM1 +} SQLITE_ROW +do_test capi3b-2.2 { + sqlite3_column_text $VM1 0 +} 1 +do_test capi3b-2.3 { + set VM2 [sqlite3_prepare $DB2 {INSERT INTO t1 VALUES(4)} -1 TAIL] + sqlite3_step $VM2 +} SQLITE_BUSY +do_test capi3b-2.4 { + sqlite3_step $VM1 +} SQLITE_ROW +do_test capi3b-2.5 { + sqlite3_column_text $VM1 0 +} 2 +do_test capi3b-2.6 { + sqlite3_step $VM2 +} SQLITE_BUSY +do_test capi3b-2.7 { + sqlite3_step $VM1 +} SQLITE_ROW +do_test capi3b-2.8 { + sqlite3_column_text $VM1 0 +} 3 +do_test capi3b-2.9 { + sqlite3_step $VM2 +} SQLITE_BUSY +do_test capi3b-2.10 { + sqlite3_step $VM1 +} SQLITE_DONE +do_test capi3b-2.11 { + sqlite3_step $VM2 +} SQLITE_DONE +do_test capi3b-2.12 { + sqlite3_finalize $VM1 + sqlite3_finalize $VM2 + execsql {SELECT * FROM t1} +} {1 2 3 4} + +catch {db2 close} + +sqlite3_soft_heap_limit $soft_limit +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/capi3c.test b/libraries/sqlite/unix/sqlite-3.5.1/test/capi3c.test new file mode 100644 index 0000000..750e73a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/capi3c.test @@ -0,0 +1,1245 @@ +# 2006 November 08 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This is a copy of the capi3.test file that has been adapted to +# test the new sqlite3_prepare_v2 interface. +# +# $Id: capi3c.test,v 1.12 2007/09/03 07:31:10 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Return the UTF-16 representation of the supplied UTF-8 string $str. +# If $nt is true, append two 0x00 bytes as a nul terminator. +proc utf16 {str {nt 1}} { + set r [encoding convertto unicode $str] + if {$nt} { + append r "\x00\x00" + } + return $r +} + +# Return the UTF-8 representation of the supplied UTF-16 string $str. +proc utf8 {str} { + # If $str ends in two 0x00 0x00 bytes, knock these off before + # converting to UTF-8 using TCL. + binary scan $str \c* vals + if {[lindex $vals end]==0 && [lindex $vals end-1]==0} { + set str [binary format \c* [lrange $vals 0 end-2]] + } + + set r [encoding convertfrom unicode $str] + return $r +} + +# These tests complement those in capi2.test. They are organized +# as follows: +# +# capi3c-1.*: Test sqlite3_prepare_v2 +# capi3c-2.*: Test sqlite3_prepare16_v2 +# capi3c-3.*: Test sqlite3_open +# capi3c-4.*: Test sqlite3_open16 +# capi3c-5.*: Test the various sqlite3_result_* APIs +# capi3c-6.*: Test that sqlite3_close fails if there are outstanding VMs. +# + +set DB [sqlite3_connection_pointer db] + +do_test capi3c-1.0 { + sqlite3_get_autocommit $DB +} 1 +do_test capi3c-1.1 { + set STMT [sqlite3_prepare_v2 $DB {SELECT name FROM sqlite_master} -1 TAIL] + sqlite3_finalize $STMT + set TAIL +} {} +do_test capi3c-1.2 { + sqlite3_errcode $DB +} {SQLITE_OK} +do_test capi3c-1.3 { + sqlite3_errmsg $DB +} {not an error} +do_test capi3c-1.4 { + set sql {SELECT name FROM sqlite_master;SELECT 10} + set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] + sqlite3_finalize $STMT + set TAIL +} {SELECT 10} +do_test capi3c-1.5 { + set sql {SELECT namex FROM sqlite_master} + catch { + set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] + } +} {1} +do_test capi3c-1.6 { + sqlite3_errcode $DB +} {SQLITE_ERROR} +do_test capi3c-1.7 { + sqlite3_errmsg $DB +} {no such column: namex} + +ifcapable {utf16} { + do_test capi3c-2.1 { + set sql16 [utf16 {SELECT name FROM sqlite_master}] + set STMT [sqlite3_prepare16_v2 $DB $sql16 -1 ::TAIL] + sqlite3_finalize $STMT + utf8 $::TAIL + } {} + do_test capi3c-2.2 { + set sql [utf16 {SELECT name FROM sqlite_master;SELECT 10}] + set STMT [sqlite3_prepare16_v2 $DB $sql -1 TAIL] + sqlite3_finalize $STMT + utf8 $TAIL + } {SELECT 10} + do_test capi3c-2.3 { + set sql [utf16 {SELECT namex FROM sqlite_master}] + catch { + set STMT [sqlite3_prepare16_v2 $DB $sql -1 TAIL] + } + } {1} + do_test capi3c-2.4 { + sqlite3_errcode $DB + } {SQLITE_ERROR} + do_test capi3c-2.5 { + sqlite3_errmsg $DB + } {no such column: namex} + + ifcapable schema_pragmas { + do_test capi3c-2.6 { + execsql {CREATE TABLE tablename(x)} + set sql16 [utf16 {PRAGMA table_info("TableName")}] + set STMT [sqlite3_prepare16_v2 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + } SQLITE_ROW + do_test capi3c-2.7 { + sqlite3_step $STMT + } SQLITE_DONE + do_test capi3c-2.8 { + sqlite3_finalize $STMT + } SQLITE_OK + } + +} ;# endif utf16 + +# rename sqlite3_open sqlite3_open_old +# proc sqlite3_open {fname options} {sqlite3_open_new $fname $options} + +do_test capi3c-3.1 { + set db2 [sqlite3_open test.db {}] + sqlite3_errcode $db2 +} {SQLITE_OK} +# FIX ME: Should test the db handle works. +do_test capi3c-3.2 { + sqlite3_close $db2 +} {SQLITE_OK} +do_test capi3c-3.3 { + catch { + set db2 [sqlite3_open /bogus/path/test.db {}] + } + sqlite3_errcode $db2 +} {SQLITE_CANTOPEN} +do_test capi3c-3.4 { + sqlite3_errmsg $db2 +} {unable to open database file} +do_test capi3c-3.5 { + sqlite3_close $db2 +} {SQLITE_OK} +do_test capi3c-3.6.1-misuse { + sqlite3_close $db2 +} {SQLITE_MISUSE} +do_test capi3c-3.6.2-misuse { + sqlite3_errmsg $db2 +} {library routine called out of sequence} +ifcapable {utf16} { + do_test capi3c-3.6.3-misuse { + utf8 [sqlite3_errmsg16 $db2] + } {library routine called out of sequence} +} + +# rename sqlite3_open "" +# rename sqlite3_open_old sqlite3_open + +ifcapable {utf16} { +do_test capi3c-4.1 { + set db2 [sqlite3_open16 [utf16 test.db] {}] + sqlite3_errcode $db2 +} {SQLITE_OK} +# FIX ME: Should test the db handle works. +do_test capi3c-4.2 { + sqlite3_close $db2 +} {SQLITE_OK} +do_test capi3c-4.3 { + catch { + set db2 [sqlite3_open16 [utf16 /bogus/path/test.db] {}] + } + sqlite3_errcode $db2 +} {SQLITE_CANTOPEN} +do_test capi3c-4.4 { + utf8 [sqlite3_errmsg16 $db2] +} {unable to open database file} +do_test capi3c-4.5 { + sqlite3_close $db2 +} {SQLITE_OK} +} ;# utf16 + +# This proc is used to test the following API calls: +# +# sqlite3_column_count +# sqlite3_column_name +# sqlite3_column_name16 +# sqlite3_column_decltype +# sqlite3_column_decltype16 +# +# $STMT is a compiled SQL statement. $test is a prefix +# to use for test names within this proc. $names is a list +# of the column names that should be returned by $STMT. +# $decltypes is a list of column declaration types for $STMT. +# +# Example: +# +# set STMT [sqlite3_prepare_v2 "SELECT 1, 2, 2;" -1 DUMMY] +# check_header test1.1 {1 2 3} {"" "" ""} +# +proc check_header {STMT test names decltypes} { + + # Use the return value of sqlite3_column_count() to build + # a list of column indexes. i.e. If sqlite3_column_count + # is 3, build the list {0 1 2}. + set ::idxlist [list] + set ::numcols [sqlite3_column_count $STMT] + for {set i 0} {$i < $::numcols} {incr i} {lappend ::idxlist $i} + + # Column names in UTF-8 + do_test $test.1 { + set cnamelist [list] + foreach i $idxlist {lappend cnamelist [sqlite3_column_name $STMT $i]} + set cnamelist + } $names + + # Column names in UTF-16 + ifcapable {utf16} { + do_test $test.2 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_name16 $STMT $i]] + } + set cnamelist + } $names + } + + # Column names in UTF-8 + do_test $test.3 { + set cnamelist [list] + foreach i $idxlist {lappend cnamelist [sqlite3_column_name $STMT $i]} + set cnamelist + } $names + + # Column names in UTF-16 + ifcapable {utf16} { + do_test $test.4 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_name16 $STMT $i]] + } + set cnamelist + } $names + } + + # Column names in UTF-8 + do_test $test.5 { + set cnamelist [list] + foreach i $idxlist {lappend cnamelist [sqlite3_column_decltype $STMT $i]} + set cnamelist + } $decltypes + + # Column declaration types in UTF-16 + ifcapable {utf16} { + do_test $test.6 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_decltype16 $STMT $i]] + } + set cnamelist + } $decltypes + } + + + # Test some out of range conditions: + ifcapable {utf16} { + do_test $test.7 { + list \ + [sqlite3_column_name $STMT -1] \ + [sqlite3_column_name16 $STMT -1] \ + [sqlite3_column_decltype $STMT -1] \ + [sqlite3_column_decltype16 $STMT -1] \ + [sqlite3_column_name $STMT $numcols] \ + [sqlite3_column_name16 $STMT $numcols] \ + [sqlite3_column_decltype $STMT $numcols] \ + [sqlite3_column_decltype16 $STMT $numcols] + } {{} {} {} {} {} {} {} {}} + } +} + +# This proc is used to test the following API calls: +# +# sqlite3_column_origin_name +# sqlite3_column_origin_name16 +# sqlite3_column_table_name +# sqlite3_column_table_name16 +# sqlite3_column_database_name +# sqlite3_column_database_name16 +# +# $STMT is a compiled SQL statement. $test is a prefix +# to use for test names within this proc. $names is a list +# of the column names that should be returned by $STMT. +# $decltypes is a list of column declaration types for $STMT. +# +# Example: +# +# set STMT [sqlite3_prepare_v2 "SELECT 1, 2, 2;" -1 DUMMY] +# check_header test1.1 {1 2 3} {"" "" ""} +# +proc check_origin_header {STMT test dbs tables cols} { + # If sqlite3_column_origin_name() and friends are not compiled into + # this build, this proc is a no-op. +ifcapable columnmetadata { + + # Use the return value of sqlite3_column_count() to build + # a list of column indexes. i.e. If sqlite3_column_count + # is 3, build the list {0 1 2}. + set ::idxlist [list] + set ::numcols [sqlite3_column_count $STMT] + for {set i 0} {$i < $::numcols} {incr i} {lappend ::idxlist $i} + + # Database names in UTF-8 + do_test $test.8 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [sqlite3_column_database_name $STMT $i] + } + set cnamelist + } $dbs + + # Database names in UTF-16 + ifcapable {utf16} { + do_test $test.9 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_database_name16 $STMT $i]] + } + set cnamelist + } $dbs + } + + # Table names in UTF-8 + do_test $test.10 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [sqlite3_column_table_name $STMT $i] + } + set cnamelist + } $tables + + # Table names in UTF-16 + ifcapable {utf16} { + do_test $test.11 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_table_name16 $STMT $i]] + } + set cnamelist + } $tables + } + + # Origin names in UTF-8 + do_test $test.12 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [sqlite3_column_origin_name $STMT $i] + } + set cnamelist + } $cols + + # Origin declaration types in UTF-16 + ifcapable {utf16} { + do_test $test.13 { + set cnamelist [list] + foreach i $idxlist { + lappend cnamelist [utf8 [sqlite3_column_origin_name16 $STMT $i]] + } + set cnamelist + } $cols + } + } +} + +# This proc is used to test the following APIs: +# +# sqlite3_data_count +# sqlite3_column_type +# sqlite3_column_int +# sqlite3_column_text +# sqlite3_column_text16 +# sqlite3_column_double +# +# $STMT is a compiled SQL statement for which the previous call +# to sqlite3_step returned SQLITE_ROW. $test is a prefix to use +# for test names within this proc. $types is a list of the +# manifest types for the current row. $ints, $doubles and $strings +# are lists of the integer, real and string representations of +# the values in the current row. +# +# Example: +# +# set STMT [sqlite3_prepare_v2 "SELECT 'hello', 1.1, NULL" -1 DUMMY] +# sqlite3_step $STMT +# check_data test1.2 {TEXT REAL NULL} {0 1 0} {0 1.1 0} {hello 1.1 {}} +# +proc check_data {STMT test types ints doubles strings} { + + # Use the return value of sqlite3_column_count() to build + # a list of column indexes. i.e. If sqlite3_column_count + # is 3, build the list {0 1 2}. + set ::idxlist [list] + set numcols [sqlite3_data_count $STMT] + for {set i 0} {$i < $numcols} {incr i} {lappend ::idxlist $i} + +# types +do_test $test.1 { + set types [list] + foreach i $idxlist {lappend types [sqlite3_column_type $STMT $i]} + set types +} $types + +# Integers +do_test $test.2 { + set ints [list] + foreach i $idxlist {lappend ints [sqlite3_column_int64 $STMT $i]} + set ints +} $ints + +# bytes +set lens [list] +foreach i $::idxlist { + lappend lens [string length [lindex $strings $i]] +} +do_test $test.3 { + set bytes [list] + set lens [list] + foreach i $idxlist { + lappend bytes [sqlite3_column_bytes $STMT $i] + } + set bytes +} $lens + +# bytes16 +ifcapable {utf16} { + set lens [list] + foreach i $::idxlist { + lappend lens [expr 2 * [string length [lindex $strings $i]]] + } + do_test $test.4 { + set bytes [list] + set lens [list] + foreach i $idxlist { + lappend bytes [sqlite3_column_bytes16 $STMT $i] + } + set bytes + } $lens +} + +# Blob +do_test $test.5 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_blob $STMT $i]} + set utf8 +} $strings + +# UTF-8 +do_test $test.6 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_text $STMT $i]} + set utf8 +} $strings + +# Floats +do_test $test.7 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_double $STMT $i]} + set utf8 +} $doubles + +# UTF-16 +ifcapable {utf16} { + do_test $test.8 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [utf8 [sqlite3_column_text16 $STMT $i]]} + set utf8 + } $strings +} + +# Integers +do_test $test.9 { + set ints [list] + foreach i $idxlist {lappend ints [sqlite3_column_int $STMT $i]} + set ints +} $ints + +# Floats +do_test $test.10 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_double $STMT $i]} + set utf8 +} $doubles + +# UTF-8 +do_test $test.11 { + set utf8 [list] + foreach i $idxlist {lappend utf8 [sqlite3_column_text $STMT $i]} + set utf8 +} $strings + +# Types +do_test $test.12 { + set types [list] + foreach i $idxlist {lappend types [sqlite3_column_type $STMT $i]} + set types +} $types + +# Test that an out of range request returns the equivalent of NULL +do_test $test.13 { + sqlite3_column_int $STMT -1 +} {0} +do_test $test.13 { + sqlite3_column_text $STMT -1 +} {} + +} + +ifcapable !floatingpoint { + finish_test + return +} + +do_test capi3c-5.0 { + execsql { + CREATE TABLE t1(a VARINT, b BLOB, c VARCHAR(16)); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES('one', 'two', NULL); + INSERT INTO t1 VALUES(1.2, 1.3, 1.4); + } + set sql "SELECT * FROM t1" + set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] + sqlite3_column_count $STMT +} 3 + +check_header $STMT capi3c-5.1 {a b c} {VARINT BLOB VARCHAR(16)} +check_origin_header $STMT capi3c-5.1 {main main main} {t1 t1 t1} {a b c} +do_test capi3c-5.2 { + sqlite3_step $STMT +} SQLITE_ROW + +check_header $STMT capi3c-5.3 {a b c} {VARINT BLOB VARCHAR(16)} +check_origin_header $STMT capi3c-5.3 {main main main} {t1 t1 t1} {a b c} +check_data $STMT capi3c-5.4 {INTEGER INTEGER TEXT} {1 2 3} {1.0 2.0 3.0} {1 2 3} + +do_test capi3c-5.5 { + sqlite3_step $STMT +} SQLITE_ROW + +check_header $STMT capi3c-5.6 {a b c} {VARINT BLOB VARCHAR(16)} +check_origin_header $STMT capi3c-5.6 {main main main} {t1 t1 t1} {a b c} +check_data $STMT capi3c-5.7 {TEXT TEXT NULL} {0 0 0} {0.0 0.0 0.0} {one two {}} + +do_test capi3c-5.8 { + sqlite3_step $STMT +} SQLITE_ROW + +check_header $STMT capi3c-5.9 {a b c} {VARINT BLOB VARCHAR(16)} +check_origin_header $STMT capi3c-5.9 {main main main} {t1 t1 t1} {a b c} +check_data $STMT capi3c-5.10 {FLOAT FLOAT TEXT} {1 1 1} {1.2 1.3 1.4} {1.2 1.3 1.4} + +do_test capi3c-5.11 { + sqlite3_step $STMT +} SQLITE_DONE + +do_test capi3c-5.12 { + sqlite3_finalize $STMT +} SQLITE_OK + +do_test capi3c-5.20 { + set sql "SELECT a, sum(b), max(c) FROM t1 GROUP BY a" + set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] + sqlite3_column_count $STMT +} 3 + +check_header $STMT capi3c-5.21 {a sum(b) max(c)} {VARINT {} {}} +check_origin_header $STMT capi3c-5.22 {main {} {}} {t1 {} {}} {a {} {}} +do_test capi3c-5.23 { + sqlite3_finalize $STMT +} SQLITE_OK + + +set ::ENC [execsql {pragma encoding}] +db close + +do_test capi3c-6.0 { +btree_breakpoint + sqlite3 db test.db + set DB [sqlite3_connection_pointer db] +btree_breakpoint + sqlite3_key $DB xyzzy + set sql {SELECT a FROM t1 order by rowid} + set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] + expr 0 +} {0} +do_test capi3c-6.1 { + db cache flush + sqlite3_close $DB +} {SQLITE_BUSY} +do_test capi3c-6.2 { + sqlite3_step $STMT +} {SQLITE_ROW} +check_data $STMT capi3c-6.3 {INTEGER} {1} {1.0} {1} +do_test capi3c-6.3 { + sqlite3_finalize $STMT +} {SQLITE_OK} +do_test capi3c-6.4 { + db cache flush + sqlite3_close $DB +} {SQLITE_OK} +do_test capi3c-6.99-misuse { + db close +} {} + +if {![sqlite3 -has-codec]} { + # Test what happens when the library encounters a newer file format. + # Do this by updating the file format via the btree layer. + do_test capi3c-7.1 { + set ::bt [btree_open test.db 10 0] + btree_begin_transaction $::bt + set meta [btree_get_meta $::bt] + lset meta 2 5 + eval [concat btree_update_meta $::bt [lrange $meta 0 end]] + btree_commit $::bt + btree_close $::bt + } {} + do_test capi3c-7.2 { + sqlite3 db test.db + catchsql { + SELECT * FROM sqlite_master; + } + } {1 {unsupported file format}} + db close +} + +if {![sqlite3 -has-codec]} { + # Now test that the library correctly handles bogus entries in the + # sqlite_master table (schema corruption). + do_test capi3c-8.1 { + file delete -force test.db + file delete -force test.db-journal + sqlite3 db test.db + execsql { + CREATE TABLE t1(a); + } + db close + } {} + do_test capi3c-8.2 { + set ::bt [btree_open test.db 10 0] + btree_begin_transaction $::bt + set ::bc [btree_cursor $::bt 1 1] + + # Build a 5-field row record consisting of 5 null records. This is + # officially black magic. + catch {unset data} + set data [binary format c6 {6 0 0 0 0 0}] + btree_insert $::bc 5 $data + + btree_close_cursor $::bc + btree_commit $::bt + btree_close $::bt + } {} + do_test capi3c-8.3 { + sqlite3 db test.db + catchsql { + SELECT * FROM sqlite_master; + } + } {1 {malformed database schema}} + do_test capi3c-8.4 { + set ::bt [btree_open test.db 10 0] + btree_begin_transaction $::bt + set ::bc [btree_cursor $::bt 1 1] + + # Build a 5-field row record. The first field is a string 'table', and + # subsequent fields are all NULL. Replace the other broken record with + # this one and try to read the schema again. The broken record uses + # either UTF-8 or native UTF-16 (if this file is being run by + # utf16.test). + if { [string match UTF-16* $::ENC] } { + set data [binary format c6a10 {6 33 0 0 0 0} [utf16 table]] + } else { + set data [binary format c6a5 {6 23 0 0 0 0} table] + } + btree_insert $::bc 5 $data + + btree_close_cursor $::bc + btree_commit $::bt + btree_close $::bt + } {}; + do_test capi3c-8.5 { + db close + sqlite3 db test.db + catchsql { + SELECT * FROM sqlite_master; + } + } {1 {malformed database schema}} + db close +} +file delete -force test.db +file delete -force test.db-journal + + +# Test the english language string equivalents for sqlite error codes +set code2english [list \ +SQLITE_OK {not an error} \ +SQLITE_ERROR {SQL logic error or missing database} \ +SQLITE_PERM {access permission denied} \ +SQLITE_ABORT {callback requested query abort} \ +SQLITE_BUSY {database is locked} \ +SQLITE_LOCKED {database table is locked} \ +SQLITE_NOMEM {out of memory} \ +SQLITE_READONLY {attempt to write a readonly database} \ +SQLITE_INTERRUPT {interrupted} \ +SQLITE_IOERR {disk I/O error} \ +SQLITE_CORRUPT {database disk image is malformed} \ +SQLITE_FULL {database or disk is full} \ +SQLITE_CANTOPEN {unable to open database file} \ +SQLITE_EMPTY {table contains no data} \ +SQLITE_SCHEMA {database schema has changed} \ +SQLITE_CONSTRAINT {constraint failed} \ +SQLITE_MISMATCH {datatype mismatch} \ +SQLITE_MISUSE {library routine called out of sequence} \ +SQLITE_NOLFS {kernel lacks large file support} \ +SQLITE_AUTH {authorization denied} \ +SQLITE_FORMAT {auxiliary database format error} \ +SQLITE_RANGE {bind or column index out of range} \ +SQLITE_NOTADB {file is encrypted or is not a database} \ +unknownerror {unknown error} \ +] + +set test_number 1 +foreach {code english} $code2english { + do_test capi3c-9.$test_number "sqlite3_test_errstr $code" $english + incr test_number +} + +# Test the error message when a "real" out of memory occurs. +ifcapable memdebug { + do_test capi3c-10-1 { + sqlite3 db test.db + set DB [sqlite3_connection_pointer db] + sqlite3_memdebug_fail 0 + catchsql { + select * from sqlite_master; + } + } {1 {out of memory}} + do_test capi3c-10-2 { + sqlite3_errmsg $::DB + } {out of memory} + ifcapable {utf16} { + do_test capi3c-10-3 { + utf8 [sqlite3_errmsg16 $::DB] + } {out of memory} + } + db close + sqlite3_memdebug_fail -1 +} + +# The following tests - capi3c-11.* - test that a COMMIT or ROLLBACK +# statement issued while there are still outstanding VMs that are part of +# the transaction fails. +sqlite3 db test.db +set DB [sqlite3_connection_pointer db] +sqlite_register_test_function $DB func +do_test capi3c-11.1 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 'int'); + INSERT INTO t1 VALUES(2, 'notatype'); + } +} {} +do_test capi3c-11.1.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3c-11.2 { + set STMT [sqlite3_prepare_v2 $DB "SELECT func(b, a) FROM t1" -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-11.3 { + catchsql { + COMMIT; + } +} {1 {cannot commit transaction - SQL statements in progress}} +do_test capi3c-11.3.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3c-11.4 { + sqlite3_step $STMT +} {SQLITE_ERROR} +do_test capi3c-11.5 { + sqlite3_finalize $STMT +} {SQLITE_ERROR} +do_test capi3c-11.6 { + catchsql { + SELECT * FROM t1; + } +} {0 {1 int 2 notatype}} +do_test capi3c-11.6.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3c-11.7 { + catchsql { + COMMIT; + } +} {0 {}} +do_test capi3c-11.7.1 { + sqlite3_get_autocommit $DB +} 1 +do_test capi3c-11.8 { + execsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES(1); + INSERT INTO t2 VALUES(2); + BEGIN; + INSERT INTO t2 VALUES(3); + } +} {} +do_test capi3c-11.8.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3c-11.9 { + set STMT [sqlite3_prepare_v2 $DB "SELECT a FROM t2" -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-11.9.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3c-11.9.2 { + catchsql { + ROLLBACK; + } +} {1 {cannot rollback transaction - SQL statements in progress}} +do_test capi3c-11.9.3 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3c-11.10 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-11.11 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-11.12 { + sqlite3_step $STMT +} {SQLITE_DONE} +do_test capi3c-11.13 { + sqlite3_finalize $STMT +} {SQLITE_OK} +do_test capi3c-11.14 { + execsql { + SELECT a FROM t2; + } +} {1 2 3} +do_test capi3c-11.14.1 { + sqlite3_get_autocommit $DB +} 0 +do_test capi3c-11.15 { + catchsql { + ROLLBACK; + } +} {0 {}} +do_test capi3c-11.15.1 { + sqlite3_get_autocommit $DB +} 1 +do_test capi3c-11.16 { + execsql { + SELECT a FROM t2; + } +} {1 2} + +# Sanity check on the definition of 'outstanding VM'. This means any VM +# that has had sqlite3_step() called more recently than sqlite3_finalize() or +# sqlite3_reset(). So a VM that has just been prepared or reset does not +# count as an active VM. +do_test capi3c-11.17 { + execsql { + BEGIN; + } +} {} +do_test capi3c-11.18 { + set STMT [sqlite3_prepare_v2 $DB "SELECT a FROM t1" -1 TAIL] + catchsql { + COMMIT; + } +} {0 {}} +do_test capi3c-11.19 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-11.20 { + catchsql { + BEGIN; + COMMIT; + } +} {1 {cannot commit transaction - SQL statements in progress}} +do_test capi3c-11.20 { + sqlite3_reset $STMT + catchsql { + COMMIT; + } +} {0 {}} +do_test capi3c-11.21 { + sqlite3_finalize $STMT +} {SQLITE_OK} + +# The following tests - capi3c-12.* - check that it's Ok to start a +# transaction while other VMs are active, and that it's Ok to execute +# atomic updates in the same situation +# +do_test capi3c-12.1 { + set STMT [sqlite3_prepare_v2 $DB "SELECT a FROM t2" -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-12.2 { + catchsql { + INSERT INTO t1 VALUES(3, NULL); + } +} {0 {}} +do_test capi3c-12.3 { + catchsql { + INSERT INTO t2 VALUES(4); + } +} {0 {}} +do_test capi3c-12.4 { + catchsql { + BEGIN; + INSERT INTO t1 VALUES(4, NULL); + } +} {0 {}} +do_test capi3c-12.5 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-12.5.1 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-12.6 { + sqlite3_step $STMT +} {SQLITE_DONE} +do_test capi3c-12.7 { + sqlite3_finalize $STMT +} {SQLITE_OK} +do_test capi3c-12.8 { + execsql { + COMMIT; + SELECT a FROM t1; + } +} {1 2 3 4} + +# Test cases capi3c-13.* test the sqlite3_clear_bindings() and +# sqlite3_sleep APIs. +# +if {[llength [info commands sqlite3_clear_bindings]]>0} { + do_test capi3c-13.1 { + execsql { + DELETE FROM t1; + } + set STMT [sqlite3_prepare_v2 $DB "INSERT INTO t1 VALUES(?, ?)" -1 TAIL] + sqlite3_step $STMT + } {SQLITE_DONE} + do_test capi3c-13.2 { + sqlite3_reset $STMT + sqlite3_bind_text $STMT 1 hello 5 + sqlite3_bind_text $STMT 2 world 5 + sqlite3_step $STMT + } {SQLITE_DONE} + do_test capi3c-13.3 { + sqlite3_reset $STMT + sqlite3_clear_bindings $STMT + sqlite3_step $STMT + } {SQLITE_DONE} + do_test capi3c-13-4 { + sqlite3_finalize $STMT + execsql { + SELECT * FROM t1; + } + } {{} {} hello world {} {}} +} +if {[llength [info commands sqlite3_sleep]]>0} { + do_test capi3c-13-5 { + set ms [sqlite3_sleep 80] + expr {$ms==80 || $ms==1000} + } {1} +} + +# Ticket #1219: Make sure binding APIs can handle a NULL pointer. +# +do_test capi3c-14.1 { + set rc [catch {sqlite3_bind_text 0 1 hello 5} msg] + lappend rc $msg +} {1 SQLITE_MISUSE} + +# Ticket #1650: Honor the nBytes parameter to sqlite3_prepare. +# +do_test capi3c-15.1 { + set sql {SELECT * FROM t2} + set nbytes [string length $sql] + append sql { WHERE a==1} + set STMT [sqlite3_prepare_v2 $DB $sql $nbytes TAIL] + sqlite3_step $STMT + sqlite3_column_int $STMT 0 +} {1} +do_test capi3c-15.2 { + sqlite3_step $STMT + sqlite3_column_int $STMT 0 +} {2} +do_test capi3c-15.3 { + sqlite3_finalize $STMT +} {SQLITE_OK} + +# Make sure code is always generated even if an IF EXISTS or +# IF NOT EXISTS clause is present that the table does not or +# does exists. That way we will always have a prepared statement +# to expire when the schema changes. +# +do_test capi3c-16.1 { + set sql {DROP TABLE IF EXISTS t3} + set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] + sqlite3_finalize $STMT + expr {$STMT!=""} +} {1} +do_test capi3c-16.2 { + set sql {CREATE TABLE IF NOT EXISTS t1(x,y)} + set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] + sqlite3_finalize $STMT + expr {$STMT!=""} +} {1} + +# But still we do not generate code if there is no SQL +# +do_test capi3c-16.3 { + set STMT [sqlite3_prepare_v2 $DB {} -1 TAIL] + sqlite3_finalize $STMT + expr {$STMT==""} +} {1} +do_test capi3c-16.4 { + set STMT [sqlite3_prepare_v2 $DB {;} -1 TAIL] + sqlite3_finalize $STMT + expr {$STMT==""} +} {1} + +# Ticket #2154. +# +do_test capi3c-17.1 { + set STMT [sqlite3_prepare_v2 $DB {SELECT max(a) FROM t2} -1 TAIL] + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-17.2 { + sqlite3_column_int $STMT 0 +} 4 +do_test capi3c-17.3 { + sqlite3_step $STMT +} SQLITE_DONE +do_test capi3c-17.4 { + sqlite3_reset $STMT + db eval {CREATE INDEX i2 ON t2(a)} + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-17.5 { + sqlite3_column_int $STMT 0 +} 4 +do_test capi3c-17.6 { + sqlite3_step $STMT +} SQLITE_DONE +do_test capi3c-17.7 { + sqlite3_reset $STMT + db eval {DROP INDEX i2} + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-17.8 { + sqlite3_column_int $STMT 0 +} 4 +do_test capi3c-17.9 { + sqlite3_step $STMT +} SQLITE_DONE +do_test capi3c-17.10 { + sqlite3_finalize $STMT + set STMT [sqlite3_prepare_v2 $DB {SELECT b FROM t1 WHERE a=?} -1 TAIL] + sqlite3_bind_int $STMT 1 2 + db eval { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,'one'); + INSERT INTO t1 VALUES(2,'two'); + INSERT INTO t1 VALUES(3,'three'); + INSERT INTO t1 VALUES(4,'four'); + } + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-17.11 { + sqlite3_column_text $STMT 0 +} two +do_test capi3c-17.12 { + sqlite3_step $STMT +} SQLITE_DONE +do_test capi3c-17.13 { + sqlite3_reset $STMT + db eval {CREATE INDEX i1 ON t1(a)} + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-17.14 { + sqlite3_column_text $STMT 0 +} two +do_test capi3c-17.15 { + sqlite3_step $STMT +} SQLITE_DONE +do_test capi3c-17.16 { + sqlite3_reset $STMT + db eval {DROP INDEX i1} + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-17.17 { + sqlite3_column_text $STMT 0 +} two +do_test capi3c-17.18 { + sqlite3_step $STMT +} SQLITE_DONE +do_test capi3c-17.99 { + sqlite3_finalize $STMT +} SQLITE_OK + +# On the mailing list it has been reported that finalizing after +# an SQLITE_BUSY return leads to a segfault. Here we test that case. +# +do_test capi3c-18.1 { + sqlite3 db2 test.db + set STMT [sqlite3_prepare_v2 $DB {SELECT max(a) FROM t1} -1 TAIL] + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-18.2 { + sqlite3_column_int $STMT 0 +} 4 +do_test capi3c-18.3 { + sqlite3_reset $STMT + db2 eval {BEGIN EXCLUSIVE} + sqlite3_step $STMT +} SQLITE_BUSY +do_test capi3c-18.4 { + sqlite3_finalize $STMT +} SQLITE_BUSY +do_test capi3c-18.5 { + db2 eval {COMMIT} + db2 close +} {} + +# Ticket #2158. The sqlite3_step() will still return SQLITE_SCHEMA +# if the database schema changes in a way that makes the statement +# no longer valid. +# +do_test capi3c-19.1 { + db eval { + CREATE TABLE t3(x,y); + INSERT INTO t3 VALUES(1,2); + } + set STMT [sqlite3_prepare_v2 $DB {SELECT * FROM t3} -1 TAIL] + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-19.2 { + sqlite3_column_int $STMT 0 +} 1 +do_test capi3c-19.3 { + sqlite3_step $STMT +} SQLITE_DONE +do_test capi3c-19.4 { + sqlite3_reset $STMT + db eval {DROP TABLE t3} + sqlite3_step $STMT +} SQLITE_SCHEMA +do_test capi3c-19.4.2 { + sqlite3_errmsg $DB +} {no such table: t3} +do_test capi3c-19.5 { + sqlite3_reset $STMT + db eval { + CREATE TABLE t3(x,y); + INSERT INTO t3 VALUES(1,2); + } + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-19.6 { + sqlite3_column_int $STMT 1 +} 2 +do_test capi3c-19.99 { + sqlite3_finalize $STMT +} SQLITE_OK + +# Make sure a change in a separate database connection does not +# cause an SQLITE_SCHEMA return. +# +do_test capi3c-20.1 { + set STMT [sqlite3_prepare_v2 $DB {SELECT * FROM t3} -1 TAIL] + sqlite3 db2 test.db + db2 eval {CREATE TABLE t4(x)} + sqlite3_step $STMT +} SQLITE_ROW +do_test capi3c-20.2 { + sqlite3_column_int $STMT 1 +} 2 +do_test capi3c-20.3 { + sqlite3_step $STMT +} SQLITE_DONE +do_test capi3c-20.4 { + db2 close + sqlite3_finalize $STMT +} SQLITE_OK + +# Test that sqlite3_step() sets the database error code correctly. +# See ticket #2497. +# +ifcapable progress { + do_test capi3c-21.1 { + set STMT [sqlite3_prepare_v2 $DB {SELECT * FROM t3} -1 TAIL] + db progress 5 "expr 1" + sqlite3_step $STMT + } {SQLITE_INTERRUPT} + do_test capi3c-21.2 { + sqlite3_errcode $DB + } {SQLITE_INTERRUPT} + do_test capi3c-21.3 { + sqlite3_finalize $STMT + } {SQLITE_INTERRUPT} + do_test capi3c-21.4 { + set STMT [sqlite3_prepare $DB {SELECT * FROM t3} -1 TAIL] + db progress 5 "expr 1" + sqlite3_step $STMT + } {SQLITE_ERROR} + do_test capi3c-21.5 { + sqlite3_errcode $DB + } {SQLITE_ERROR} + do_test capi3c-21.6 { + sqlite3_finalize $STMT + } {SQLITE_INTERRUPT} + do_test capi3c-21.7 { + sqlite3_errcode $DB + } {SQLITE_INTERRUPT} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/cast.test b/libraries/sqlite/unix/sqlite-3.5.1/test/cast.test new file mode 100644 index 0000000..fc0f74b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/cast.test @@ -0,0 +1,290 @@ +# 2005 June 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the CAST operator. +# +# $Id: cast.test,v 1.8 2007/08/13 15:18:28 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if the build includes the CAST operator +ifcapable !cast { + finish_test + return +} + +# Tests for the CAST( AS blob), CAST( AS text) and CAST( AS numeric) built-ins +# +ifcapable bloblit { + do_test cast-1.1 { + execsql {SELECT x'616263'} + } abc + do_test cast-1.2 { + execsql {SELECT typeof(x'616263')} + } blob + do_test cast-1.3 { + execsql {SELECT CAST(x'616263' AS text)} + } abc + do_test cast-1.4 { + execsql {SELECT typeof(CAST(x'616263' AS text))} + } text + do_test cast-1.5 { + execsql {SELECT CAST(x'616263' AS numeric)} + } 0 + do_test cast-1.6 { + execsql {SELECT typeof(CAST(x'616263' AS numeric))} + } integer + do_test cast-1.7 { + execsql {SELECT CAST(x'616263' AS blob)} + } abc + do_test cast-1.8 { + execsql {SELECT typeof(CAST(x'616263' AS blob))} + } blob + do_test cast-1.9 { + execsql {SELECT CAST(x'616263' AS integer)} + } 0 + do_test cast-1.10 { + execsql {SELECT typeof(CAST(x'616263' AS integer))} + } integer +} +do_test cast-1.11 { + execsql {SELECT null} +} {{}} +do_test cast-1.12 { + execsql {SELECT typeof(NULL)} +} null +do_test cast-1.13 { + execsql {SELECT CAST(NULL AS text)} +} {{}} +do_test cast-1.14 { + execsql {SELECT typeof(CAST(NULL AS text))} +} null +do_test cast-1.15 { + execsql {SELECT CAST(NULL AS numeric)} +} {{}} +do_test cast-1.16 { + execsql {SELECT typeof(CAST(NULL AS numeric))} +} null +do_test cast-1.17 { + execsql {SELECT CAST(NULL AS blob)} +} {{}} +do_test cast-1.18 { + execsql {SELECT typeof(CAST(NULL AS blob))} +} null +do_test cast-1.19 { + execsql {SELECT CAST(NULL AS integer)} +} {{}} +do_test cast-1.20 { + execsql {SELECT typeof(CAST(NULL AS integer))} +} null +do_test cast-1.21 { + execsql {SELECT 123} +} {123} +do_test cast-1.22 { + execsql {SELECT typeof(123)} +} integer +do_test cast-1.23 { + execsql {SELECT CAST(123 AS text)} +} {123} +do_test cast-1.24 { + execsql {SELECT typeof(CAST(123 AS text))} +} text +do_test cast-1.25 { + execsql {SELECT CAST(123 AS numeric)} +} 123 +do_test cast-1.26 { + execsql {SELECT typeof(CAST(123 AS numeric))} +} integer +do_test cast-1.27 { + execsql {SELECT CAST(123 AS blob)} +} {123} +do_test cast-1.28 { + execsql {SELECT typeof(CAST(123 AS blob))} +} blob +do_test cast-1.29 { + execsql {SELECT CAST(123 AS integer)} +} {123} +do_test cast-1.30 { + execsql {SELECT typeof(CAST(123 AS integer))} +} integer +do_test cast-1.31 { + execsql {SELECT 123.456} +} {123.456} +do_test cast-1.32 { + execsql {SELECT typeof(123.456)} +} real +do_test cast-1.33 { + execsql {SELECT CAST(123.456 AS text)} +} {123.456} +do_test cast-1.34 { + execsql {SELECT typeof(CAST(123.456 AS text))} +} text +do_test cast-1.35 { + execsql {SELECT CAST(123.456 AS numeric)} +} 123.456 +do_test cast-1.36 { + execsql {SELECT typeof(CAST(123.456 AS numeric))} +} real +do_test cast-1.37 { + execsql {SELECT CAST(123.456 AS blob)} +} {123.456} +do_test cast-1.38 { + execsql {SELECT typeof(CAST(123.456 AS blob))} +} blob +do_test cast-1.39 { + execsql {SELECT CAST(123.456 AS integer)} +} {123} +do_test cast-1.38 { + execsql {SELECT typeof(CAST(123.456 AS integer))} +} integer +do_test cast-1.41 { + execsql {SELECT '123abc'} +} {123abc} +do_test cast-1.42 { + execsql {SELECT typeof('123abc')} +} text +do_test cast-1.43 { + execsql {SELECT CAST('123abc' AS text)} +} {123abc} +do_test cast-1.44 { + execsql {SELECT typeof(CAST('123abc' AS text))} +} text +do_test cast-1.45 { + execsql {SELECT CAST('123abc' AS numeric)} +} 123 +do_test cast-1.46 { + execsql {SELECT typeof(CAST('123abc' AS numeric))} +} integer +do_test cast-1.47 { + execsql {SELECT CAST('123abc' AS blob)} +} {123abc} +do_test cast-1.48 { + execsql {SELECT typeof(CAST('123abc' AS blob))} +} blob +do_test cast-1.49 { + execsql {SELECT CAST('123abc' AS integer)} +} 123 +do_test cast-1.50 { + execsql {SELECT typeof(CAST('123abc' AS integer))} +} integer +do_test cast-1.51 { + execsql {SELECT CAST('123.5abc' AS numeric)} +} 123.5 +do_test cast-1.53 { + execsql {SELECT CAST('123.5abc' AS integer)} +} 123 + +# Ticket #1662. Ignore leading spaces in numbers when casting. +# +do_test cast-2.1 { + execsql {SELECT CAST(' 123' AS integer)} +} 123 +do_test cast-2.2 { + execsql {SELECT CAST(' -123.456' AS real)} +} -123.456 + +# ticket #2364. Use full percision integers if possible when casting +# to numeric. Do not fallback to real (and the corresponding 48-bit +# mantissa) unless absolutely necessary. +# +do_test cast-3.1 { + execsql {SELECT CAST(9223372036854774800 AS integer)} +} 9223372036854774800 +do_test cast-3.2 { + execsql {SELECT CAST(9223372036854774800 AS numeric)} +} 9223372036854774800 +do_test cast-3.3 { + execsql {SELECT CAST(9223372036854774800 AS real)} +} 9.22337203685477e+18 +do_test cast-3.4 { + execsql {SELECT CAST(CAST(9223372036854774800 AS real) AS integer)} +} 9223372036854774784 +do_test cast-3.5 { + execsql {SELECT CAST(-9223372036854774800 AS integer)} +} -9223372036854774800 +do_test cast-3.6 { + execsql {SELECT CAST(-9223372036854774800 AS numeric)} +} -9223372036854774800 +do_test cast-3.7 { + execsql {SELECT CAST(-9223372036854774800 AS real)} +} -9.22337203685477e+18 +do_test cast-3.8 { + execsql {SELECT CAST(CAST(-9223372036854774800 AS real) AS integer)} +} -9223372036854774784 +do_test cast-3.11 { + execsql {SELECT CAST('9223372036854774800' AS integer)} +} 9223372036854774800 +do_test cast-3.12 { + execsql {SELECT CAST('9223372036854774800' AS numeric)} +} 9223372036854774800 +do_test cast-3.13 { + execsql {SELECT CAST('9223372036854774800' AS real)} +} 9.22337203685477e+18 +ifcapable long_double { + do_test cast-3.14 { + execsql {SELECT CAST(CAST('9223372036854774800' AS real) AS integer)} + } 9223372036854774784 +} +do_test cast-3.15 { + execsql {SELECT CAST('-9223372036854774800' AS integer)} +} -9223372036854774800 +do_test cast-3.16 { + execsql {SELECT CAST('-9223372036854774800' AS numeric)} +} -9223372036854774800 +do_test cast-3.17 { + execsql {SELECT CAST('-9223372036854774800' AS real)} +} -9.22337203685477e+18 +ifcapable long_double { + do_test cast-3.18 { + execsql {SELECT CAST(CAST('-9223372036854774800' AS real) AS integer)} + } -9223372036854774784 +} +if {[db eval {PRAGMA encoding}]=="UTF-8"} { + do_test cast-3.21 { + execsql {SELECT CAST(x'39323233333732303336383534373734383030' AS integer)} + } 9223372036854774800 + do_test cast-3.22 { + execsql {SELECT CAST(x'39323233333732303336383534373734383030' AS numeric)} + } 9223372036854774800 + do_test cast-3.23 { + execsql {SELECT CAST(x'39323233333732303336383534373734383030' AS real)} + } 9.22337203685477e+18 + ifcapable long_double { + do_test cast-3.24 { + execsql { + SELECT CAST(CAST(x'39323233333732303336383534373734383030' AS real) + AS integer) + } + } 9223372036854774784 + } +} +do_test case-3.31 { + execsql {SELECT CAST(NULL AS numeric)} +} {{}} + +# Test to see if it is possible to trick SQLite into reading past +# the end of a blob when converting it to a number. +do_test cast-3.32.1 { + set blob "1234567890" + set DB [sqlite3_connection_pointer db] + set ::STMT [sqlite3_prepare $DB {SELECT CAST(? AS real)} -1 TAIL] + sqlite3_bind_blob -static $::STMT 1 $blob 5 + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test cast-3.32.2 { + sqlite3_column_int $::STMT 0 +} {12345} +do_test cast-3.32.3 { + sqlite3_finalize $::STMT +} {SQLITE_OK} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/check.test b/libraries/sqlite/unix/sqlite-3.5.1/test/check.test new file mode 100644 index 0000000..cdf2b46 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/check.test @@ -0,0 +1,372 @@ +# 2005 November 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing CHECK constraints +# +# $Id: check.test,v 1.11 2007/07/23 19:39:47 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if the build includes support for CHECK constraints +ifcapable !check { + finish_test + return +} + +do_test check-1.1 { + execsql { + CREATE TABLE t1( + x INTEGER CHECK( x<5 ), + y REAL CHECK( y>x ) + ); + } +} {} +do_test check-1.2 { + execsql { + INSERT INTO t1 VALUES(3,4); + SELECT * FROM t1; + } +} {3 4.0} +do_test check-1.3 { + catchsql { + INSERT INTO t1 VALUES(6,7); + } +} {1 {constraint failed}} +do_test check-1.4 { + execsql { + SELECT * FROM t1; + } +} {3 4.0} +do_test check-1.5 { + catchsql { + INSERT INTO t1 VALUES(4,3); + } +} {1 {constraint failed}} +do_test check-1.6 { + execsql { + SELECT * FROM t1; + } +} {3 4.0} +do_test check-1.7 { + catchsql { + INSERT INTO t1 VALUES(NULL,6); + } +} {0 {}} +do_test check-1.8 { + execsql { + SELECT * FROM t1; + } +} {3 4.0 {} 6.0} +do_test check-1.9 { + catchsql { + INSERT INTO t1 VALUES(2,NULL); + } +} {0 {}} +do_test check-1.10 { + execsql { + SELECT * FROM t1; + } +} {3 4.0 {} 6.0 2 {}} +do_test check-1.11 { + execsql { + DELETE FROM t1 WHERE x IS NULL OR x!=3; + UPDATE t1 SET x=2 WHERE x==3; + SELECT * FROM t1; + } +} {2 4.0} +do_test check-1.12 { + catchsql { + UPDATE t1 SET x=7 WHERE x==2 + } +} {1 {constraint failed}} +do_test check-1.13 { + execsql { + SELECT * FROM t1; + } +} {2 4.0} +do_test check-1.14 { + catchsql { + UPDATE t1 SET x=5 WHERE x==2 + } +} {1 {constraint failed}} +do_test check-1.15 { + execsql { + SELECT * FROM t1; + } +} {2 4.0} +do_test check-1.16 { + catchsql { + UPDATE t1 SET x=4, y=11 WHERE x==2 + } +} {0 {}} +do_test check-1.17 { + execsql { + SELECT * FROM t1; + } +} {4 11.0} + +do_test check-2.1 { + execsql { + CREATE TABLE t2( + x INTEGER CHECK( typeof(coalesce(x,0))=="integer" ), + y REAL CHECK( typeof(coalesce(y,0.1))=="real" ), + z TEXT CHECK( typeof(coalesce(z,''))=="text" ) + ); + } +} {} +do_test check-2.2 { + execsql { + INSERT INTO t2 VALUES(1,2.2,'three'); + SELECT * FROM t2; + } +} {1 2.2 three} +do_test check-2.3 { + execsql { + INSERT INTO t2 VALUES(NULL, NULL, NULL); + SELECT * FROM t2; + } +} {1 2.2 three {} {} {}} +do_test check-2.4 { + catchsql { + INSERT INTO t2 VALUES(1.1, NULL, NULL); + } +} {1 {constraint failed}} +do_test check-2.5 { + catchsql { + INSERT INTO t2 VALUES(NULL, 5, NULL); + } +} {1 {constraint failed}} +do_test check-2.6 { + catchsql { + INSERT INTO t2 VALUES(NULL, NULL, 3.14159); + } +} {1 {constraint failed}} + +ifcapable subquery { + do_test check-3.1 { + catchsql { + CREATE TABLE t3( + x, y, z, + CHECK( x<(SELECT min(x) FROM t1) ) + ); + } + } {1 {subqueries prohibited in CHECK constraints}} +} + +do_test check-3.2 { + execsql { + SELECT name FROM sqlite_master ORDER BY name + } +} {t1 t2} +do_test check-3.3 { + catchsql { + CREATE TABLE t3( + x, y, z, + CHECK( q $rhs} {return 1} + } + if {$lhs_ishex} { + return -1; + } + if {$rhs_ishex} { + return 1; + } + return [string compare $lhs $rhs] +} +db function hex {format 0x%X} + +# Mimic the SQLite 2 collation type NUMERIC. +db collate numeric numeric_collate +proc numeric_collate {lhs rhs} { + if {$lhs == $rhs} {return 0} + return [expr ($lhs>$rhs)?1:-1] +} + +do_test collate1-1.0 { + execsql { + CREATE TABLE collate1t1(c1, c2); + INSERT INTO collate1t1 VALUES(45, hex(45)); + INSERT INTO collate1t1 VALUES(NULL, NULL); + INSERT INTO collate1t1 VALUES(281, hex(281)); + } +} {} +do_test collate1-1.1 { + execsql { + SELECT c2 FROM collate1t1 ORDER BY 1; + } +} {{} 0x119 0x2D} +do_test collate1-1.2 { + execsql { + SELECT c2 FROM collate1t1 ORDER BY 1 COLLATE hex; + } +} {{} 0x2D 0x119} +do_test collate1-1.3 { + execsql { + SELECT c2 FROM collate1t1 ORDER BY 1 COLLATE hex DESC; + } +} {0x119 0x2D {}} +do_test collate1-1.4 { + execsql { + SELECT c2 FROM collate1t1 ORDER BY 1 COLLATE hex ASC; + } +} {{} 0x2D 0x119} +do_test collate1-1.5 { + execsql { + SELECT c2 COLLATE hex FROM collate1t1 ORDER BY 1 + } +} {{} 0x2D 0x119} +do_test collate1-1.6 { + execsql { + SELECT c2 COLLATE hex FROM collate1t1 ORDER BY 1 ASC + } +} {{} 0x2D 0x119} +do_test collate1-1.7 { + execsql { + SELECT c2 COLLATE hex FROM collate1t1 ORDER BY 1 DESC + } +} {0x119 0x2D {}} +do_test collate1-1.99 { + execsql { + DROP TABLE collate1t1; + } +} {} + +do_test collate1-2.0 { + execsql { + CREATE TABLE collate1t1(c1, c2); + INSERT INTO collate1t1 VALUES('5', '0x11'); + INSERT INTO collate1t1 VALUES('5', '0xA'); + INSERT INTO collate1t1 VALUES(NULL, NULL); + INSERT INTO collate1t1 VALUES('7', '0xA'); + INSERT INTO collate1t1 VALUES('11', '0x11'); + INSERT INTO collate1t1 VALUES('11', '0x101'); + } +} {} +do_test collate1-2.2 { + execsql { + SELECT c1, c2 FROM collate1t1 ORDER BY 1 COLLATE numeric, 2 COLLATE hex; + } +} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101} +do_test collate1-2.3 { + execsql { + SELECT c1, c2 FROM collate1t1 ORDER BY 1 COLLATE binary, 2 COLLATE hex; + } +} {{} {} 11 0x11 11 0x101 5 0xA 5 0x11 7 0xA} +do_test collate1-2.4 { + execsql { + SELECT c1, c2 FROM collate1t1 ORDER BY 1 COLLATE binary DESC, 2 COLLATE hex; + } +} {7 0xA 5 0xA 5 0x11 11 0x11 11 0x101 {} {}} +do_test collate1-2.5 { + execsql { + SELECT c1, c2 FROM collate1t1 + ORDER BY 1 COLLATE binary DESC, 2 COLLATE hex DESC; + } +} {7 0xA 5 0x11 5 0xA 11 0x101 11 0x11 {} {}} +do_test collate1-2.6 { + execsql { + SELECT c1, c2 FROM collate1t1 + ORDER BY 1 COLLATE binary ASC, 2 COLLATE hex ASC; + } +} {{} {} 11 0x11 11 0x101 5 0xA 5 0x11 7 0xA} +do_test collate1-2.12.1 { + execsql { + SELECT c1 COLLATE numeric, c2 FROM collate1t1 + ORDER BY 1, 2 COLLATE hex; + } +} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101} +do_test collate1-2.12.2 { + execsql { + SELECT c1 COLLATE hex, c2 FROM collate1t1 + ORDER BY 1 COLLATE numeric, 2 COLLATE hex; + } +} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101} +do_test collate1-2.12.3 { + execsql { + SELECT c1, c2 COLLATE hex FROM collate1t1 + ORDER BY 1 COLLATE numeric, 2; + } +} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101} +do_test collate1-2.12.4 { + execsql { + SELECT c1 COLLATE numeric, c2 COLLATE hex + FROM collate1t1 + ORDER BY 1, 2; + } +} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101} +do_test collate1-2.13 { + execsql { + SELECT c1 COLLATE binary, c2 COLLATE hex + FROM collate1t1 + ORDER BY 1, 2; + } +} {{} {} 11 0x11 11 0x101 5 0xA 5 0x11 7 0xA} +do_test collate1-2.14 { + execsql { + SELECT c1, c2 + FROM collate1t1 ORDER BY 1 COLLATE binary DESC, 2 COLLATE hex; + } +} {7 0xA 5 0xA 5 0x11 11 0x11 11 0x101 {} {}} +do_test collate1-2.15 { + execsql { + SELECT c1 COLLATE binary, c2 COLLATE hex + FROM collate1t1 + ORDER BY 1 DESC, 2 DESC; + } +} {7 0xA 5 0x11 5 0xA 11 0x101 11 0x11 {} {}} +do_test collate1-2.16 { + execsql { + SELECT c1 COLLATE hex, c2 COLLATE binary + FROM collate1t1 + ORDER BY 1 COLLATE binary ASC, 2 COLLATE hex ASC; + } +} {{} {} 11 0x11 11 0x101 5 0xA 5 0x11 7 0xA} +do_test collate1-2.99 { + execsql { + DROP TABLE collate1t1; + } +} {} + +# +# These tests ensure that the default collation type for a column is used +# by an ORDER BY clause correctly. The focus is all the different ways +# the column can be referenced. i.e. a, collate2t1.a, main.collate2t1.a etc. +# +do_test collate1-3.0 { + execsql { + CREATE TABLE collate1t1(a COLLATE hex, b); + INSERT INTO collate1t1 VALUES( '0x5', 5 ); + INSERT INTO collate1t1 VALUES( '1', 1 ); + INSERT INTO collate1t1 VALUES( '0x45', 69 ); + INSERT INTO collate1t1 VALUES( NULL, NULL ); + SELECT * FROM collate1t1 ORDER BY a; + } +} {{} {} 1 1 0x5 5 0x45 69} + +do_test collate1-3.1 { + execsql { + SELECT * FROM collate1t1 ORDER BY 1; + } +} {{} {} 1 1 0x5 5 0x45 69} +do_test collate1-3.2 { + execsql { + SELECT * FROM collate1t1 ORDER BY collate1t1.a; + } +} {{} {} 1 1 0x5 5 0x45 69} +do_test collate1-3.3 { + execsql { + SELECT * FROM collate1t1 ORDER BY main.collate1t1.a; + } +} {{} {} 1 1 0x5 5 0x45 69} +do_test collate1-3.4 { + execsql { + SELECT a as c1, b as c2 FROM collate1t1 ORDER BY c1; + } +} {{} {} 1 1 0x5 5 0x45 69} +do_test collate1-3.5 { + execsql { + SELECT a as c1, b as c2 FROM collate1t1 ORDER BY c1 COLLATE binary; + } +} {{} {} 0x45 69 0x5 5 1 1} +do_test collate1-3.5.1 { + execsql { + SELECT a COLLATE binary as c1, b as c2 + FROM collate1t1 ORDER BY c1; + } +} {{} {} 0x45 69 0x5 5 1 1} +do_test collate1-3.6 { + execsql { + DROP TABLE collate1t1; + } +} {} + +# Update for SQLite version 3. The collate1-4.* test cases were written +# before manifest types were introduced. The following test cases still +# work, due to the 'affinity' mechanism, but they don't prove anything +# about collation sequences. +# +do_test collate1-4.0 { + execsql { + CREATE TABLE collate1t1(c1 numeric, c2 text); + INSERT INTO collate1t1 VALUES(1, 1); + INSERT INTO collate1t1 VALUES(12, 12); + INSERT INTO collate1t1 VALUES(NULL, NULL); + INSERT INTO collate1t1 VALUES(101, 101); + } +} {} +do_test collate1-4.1 { + execsql { + SELECT c1 FROM collate1t1 ORDER BY 1; + } +} {{} 1 12 101} +do_test collate1-4.2 { + execsql { + SELECT c2 FROM collate1t1 ORDER BY 1; + } +} {{} 1 101 12} +do_test collate1-4.3 { + execsql { + SELECT c2+0 FROM collate1t1 ORDER BY 1; + } +} {{} 1 12 101} +do_test collate1-4.4 { + execsql { + SELECT c1||'' FROM collate1t1 ORDER BY 1; + } +} {{} 1 101 12} +do_test collate1-4.4.1 { + execsql { + SELECT (c1||'') COLLATE numeric FROM collate1t1 ORDER BY 1; + } +} {{} 1 12 101} +do_test collate1-4.5 { + execsql { + DROP TABLE collate1t1; + } +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/collate2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/collate2.test new file mode 100644 index 0000000..28f4a91 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/collate2.test @@ -0,0 +1,664 @@ +# +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is page cache subsystem. +# +# $Id: collate2.test,v 1.5 2007/02/01 23:02:46 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# +# Tests are organised as follows: +# +# collate2-1.* WHERE expressions (sqliteExprIfTrue). +# collate2-2.* WHERE NOT expressions (sqliteExprIfFalse). +# collate2-3.* SELECT expressions (sqliteExprCode). +# collate2-4.* Precedence of collation/data types in binary comparisons +# collate2-5.* JOIN syntax. +# + +# Create a collation type BACKWARDS for use in testing. This collation type +# is similar to the built-in TEXT collation type except the order of +# characters in each string is reversed before the comparison is performed. +db collate BACKWARDS backwards_collate +proc backwards_collate {a b} { + set ra {}; + set rb {} + foreach c [split $a {}] { set ra $c$ra } + foreach c [split $b {}] { set rb $c$rb } + return [string compare $ra $rb] +} + +# The following values are used in these tests: +# NULL aa ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB +# +# The collation orders for each of the tested collation types are: +# +# BINARY: NULL AA AB Aa Ab BA BB Ba Bb aA aB aa ab bA bB ba bb +# NOCASE: NULL aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB +# BACKWARDS: NULL AA BA aA bA AB BB aB bB Aa Ba aa ba Ab Bb ab bb +# +# These tests verify that the default collation type for a column is used +# for comparison operators (<, >, <=, >=, =) involving that column and +# an expression that is not a column with a default collation type. +# +# The collation sequences BINARY and NOCASE are built-in, the BACKWARDS +# collation sequence is implemented by the TCL proc backwards_collate +# above. +# +do_test collate2-1.0 { + execsql { + CREATE TABLE collate2t1( + a COLLATE BINARY, + b COLLATE NOCASE, + c COLLATE BACKWARDS + ); + INSERT INTO collate2t1 VALUES( NULL, NULL, NULL ); + + INSERT INTO collate2t1 VALUES( 'aa', 'aa', 'aa' ); + INSERT INTO collate2t1 VALUES( 'ab', 'ab', 'ab' ); + INSERT INTO collate2t1 VALUES( 'ba', 'ba', 'ba' ); + INSERT INTO collate2t1 VALUES( 'bb', 'bb', 'bb' ); + + INSERT INTO collate2t1 VALUES( 'aA', 'aA', 'aA' ); + INSERT INTO collate2t1 VALUES( 'aB', 'aB', 'aB' ); + INSERT INTO collate2t1 VALUES( 'bA', 'bA', 'bA' ); + INSERT INTO collate2t1 VALUES( 'bB', 'bB', 'bB' ); + + INSERT INTO collate2t1 VALUES( 'Aa', 'Aa', 'Aa' ); + INSERT INTO collate2t1 VALUES( 'Ab', 'Ab', 'Ab' ); + INSERT INTO collate2t1 VALUES( 'Ba', 'Ba', 'Ba' ); + INSERT INTO collate2t1 VALUES( 'Bb', 'Bb', 'Bb' ); + + INSERT INTO collate2t1 VALUES( 'AA', 'AA', 'AA' ); + INSERT INTO collate2t1 VALUES( 'AB', 'AB', 'AB' ); + INSERT INTO collate2t1 VALUES( 'BA', 'BA', 'BA' ); + INSERT INTO collate2t1 VALUES( 'BB', 'BB', 'BB' ); + } + if {[info exists collate_test_use_index]} { + execsql { + CREATE INDEX collate2t1_i1 ON collate2t1(a); + CREATE INDEX collate2t1_i2 ON collate2t1(b); + CREATE INDEX collate2t1_i3 ON collate2t1(c); + } + } +} {} +do_test collate2-1.1 { + execsql { + SELECT a FROM collate2t1 WHERE a > 'aa' ORDER BY 1; + } +} {ab bA bB ba bb} +do_test collate2-1.1.1 { + execsql { + SELECT a FROM collate2t1 WHERE a COLLATE binary > 'aa' ORDER BY 1; + } +} {ab bA bB ba bb} +do_test collate2-1.1.2 { + execsql { + SELECT a FROM collate2t1 WHERE b COLLATE binary > 'aa' ORDER BY 1; + } +} {ab bA bB ba bb} +do_test collate2-1.1.3 { + execsql { + SELECT a FROM collate2t1 WHERE c COLLATE binary > 'aa' ORDER BY 1; + } +} {ab bA bB ba bb} +do_test collate2-1.2 { + execsql { + SELECT b FROM collate2t1 WHERE b > 'aa' ORDER BY 1, oid; + } +} {ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.2.1 { + execsql { + SELECT b FROM collate2t1 WHERE a COLLATE nocase > 'aa' + ORDER BY 1, oid; + } +} {ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.2.2 { + execsql { + SELECT b FROM collate2t1 WHERE b COLLATE nocase > 'aa' + ORDER BY 1, oid; + } +} {ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.2.3 { + execsql { + SELECT b FROM collate2t1 WHERE c COLLATE nocase > 'aa' + ORDER BY 1, oid; + } +} {ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.3 { + execsql { + SELECT c FROM collate2t1 WHERE c > 'aa' ORDER BY 1; + } +} {ba Ab Bb ab bb} +do_test collate2-1.3.1 { + execsql { + SELECT c FROM collate2t1 WHERE a COLLATE backwards > 'aa' + ORDER BY 1; + } +} {ba Ab Bb ab bb} +do_test collate2-1.3.2 { + execsql { + SELECT c FROM collate2t1 WHERE b COLLATE backwards > 'aa' + ORDER BY 1; + } +} {ba Ab Bb ab bb} +do_test collate2-1.3.3 { + execsql { + SELECT c FROM collate2t1 WHERE c COLLATE backwards > 'aa' + ORDER BY 1; + } +} {ba Ab Bb ab bb} +do_test collate2-1.4 { + execsql { + SELECT a FROM collate2t1 WHERE a < 'aa' ORDER BY 1; + } +} {AA AB Aa Ab BA BB Ba Bb aA aB} +do_test collate2-1.5 { + execsql { + SELECT b FROM collate2t1 WHERE b < 'aa' ORDER BY 1, oid; + } +} {} +do_test collate2-1.6 { + execsql { + SELECT c FROM collate2t1 WHERE c < 'aa' ORDER BY 1; + } +} {AA BA aA bA AB BB aB bB Aa Ba} +do_test collate2-1.7 { + execsql { + SELECT a FROM collate2t1 WHERE a = 'aa'; + } +} {aa} +do_test collate2-1.8 { + execsql { + SELECT b FROM collate2t1 WHERE b = 'aa' ORDER BY oid; + } +} {aa aA Aa AA} +do_test collate2-1.9 { + execsql { + SELECT c FROM collate2t1 WHERE c = 'aa'; + } +} {aa} +do_test collate2-1.10 { + execsql { + SELECT a FROM collate2t1 WHERE a >= 'aa' ORDER BY 1; + } +} {aa ab bA bB ba bb} +do_test collate2-1.11 { + execsql { + SELECT b FROM collate2t1 WHERE b >= 'aa' ORDER BY 1, oid; + } +} {aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.12 { + execsql { + SELECT c FROM collate2t1 WHERE c >= 'aa' ORDER BY 1; + } +} {aa ba Ab Bb ab bb} +do_test collate2-1.13 { + execsql { + SELECT a FROM collate2t1 WHERE a <= 'aa' ORDER BY 1; + } +} {AA AB Aa Ab BA BB Ba Bb aA aB aa} +do_test collate2-1.14 { + execsql { + SELECT b FROM collate2t1 WHERE b <= 'aa' ORDER BY 1, oid; + } +} {aa aA Aa AA} +do_test collate2-1.15 { + execsql { + SELECT c FROM collate2t1 WHERE c <= 'aa' ORDER BY 1; + } +} {AA BA aA bA AB BB aB bB Aa Ba aa} +do_test collate2-1.16 { + execsql { + SELECT a FROM collate2t1 WHERE a BETWEEN 'Aa' AND 'Bb' ORDER BY 1; + } +} {Aa Ab BA BB Ba Bb} +do_test collate2-1.17 { + execsql { + SELECT b FROM collate2t1 WHERE b BETWEEN 'Aa' AND 'Bb' ORDER BY 1, oid; + } +} {aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.18 { + execsql { + SELECT c FROM collate2t1 WHERE c BETWEEN 'Aa' AND 'Bb' ORDER BY 1; + } +} {Aa Ba aa ba Ab Bb} +do_test collate2-1.19 { + execsql { + SELECT a FROM collate2t1 WHERE + CASE a WHEN 'aa' THEN 1 ELSE 0 END + ORDER BY 1, oid; + } +} {aa} +do_test collate2-1.20 { + execsql { + SELECT b FROM collate2t1 WHERE + CASE b WHEN 'aa' THEN 1 ELSE 0 END + ORDER BY 1, oid; + } +} {aa aA Aa AA} +do_test collate2-1.21 { + execsql { + SELECT c FROM collate2t1 WHERE + CASE c WHEN 'aa' THEN 1 ELSE 0 END + ORDER BY 1, oid; + } +} {aa} + +ifcapable subquery { + do_test collate2-1.22 { + execsql { + SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb') ORDER BY 1, oid; + } + } {aa bb} + do_test collate2-1.23 { + execsql { + SELECT b FROM collate2t1 WHERE b IN ('aa', 'bb') ORDER BY 1, oid; + } + } {aa aA Aa AA bb bB Bb BB} + do_test collate2-1.24 { + execsql { + SELECT c FROM collate2t1 WHERE c IN ('aa', 'bb') ORDER BY 1, oid; + } + } {aa bb} + do_test collate2-1.25 { + execsql { + SELECT a FROM collate2t1 + WHERE a IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb')); + } + } {aa bb} + do_test collate2-1.26 { + execsql { + SELECT b FROM collate2t1 + WHERE b IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb')); + } + } {aa bb aA bB Aa Bb AA BB} + do_test collate2-1.27 { + execsql { + SELECT c FROM collate2t1 + WHERE c IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb')); + } + } {aa bb} +} ;# ifcapable subquery + +do_test collate2-2.1 { + execsql { + SELECT a FROM collate2t1 WHERE NOT a > 'aa' ORDER BY 1; + } +} {AA AB Aa Ab BA BB Ba Bb aA aB aa} +do_test collate2-2.2 { + execsql { + SELECT b FROM collate2t1 WHERE NOT b > 'aa' ORDER BY 1, oid; + } +} {aa aA Aa AA} +do_test collate2-2.3 { + execsql { + SELECT c FROM collate2t1 WHERE NOT c > 'aa' ORDER BY 1; + } +} {AA BA aA bA AB BB aB bB Aa Ba aa} +do_test collate2-2.4 { + execsql { + SELECT a FROM collate2t1 WHERE NOT a < 'aa' ORDER BY 1; + } +} {aa ab bA bB ba bb} +do_test collate2-2.5 { + execsql { + SELECT b FROM collate2t1 WHERE NOT b < 'aa' ORDER BY 1, oid; + } +} {aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-2.6 { + execsql { + SELECT c FROM collate2t1 WHERE NOT c < 'aa' ORDER BY 1; + } +} {aa ba Ab Bb ab bb} +do_test collate2-2.7 { + execsql { + SELECT a FROM collate2t1 WHERE NOT a = 'aa'; + } +} {ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB} +do_test collate2-2.8 { + execsql { + SELECT b FROM collate2t1 WHERE NOT b = 'aa'; + } +} {ab ba bb aB bA bB Ab Ba Bb AB BA BB} +do_test collate2-2.9 { + execsql { + SELECT c FROM collate2t1 WHERE NOT c = 'aa'; + } +} {ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB} +do_test collate2-2.10 { + execsql { + SELECT a FROM collate2t1 WHERE NOT a >= 'aa' ORDER BY 1; + } +} {AA AB Aa Ab BA BB Ba Bb aA aB} +do_test collate2-2.11 { + execsql { + SELECT b FROM collate2t1 WHERE NOT b >= 'aa' ORDER BY 1, oid; + } +} {} +do_test collate2-2.12 { + execsql { + SELECT c FROM collate2t1 WHERE NOT c >= 'aa' ORDER BY 1; + } +} {AA BA aA bA AB BB aB bB Aa Ba} +do_test collate2-2.13 { + execsql { + SELECT a FROM collate2t1 WHERE NOT a <= 'aa' ORDER BY 1; + } +} {ab bA bB ba bb} +do_test collate2-2.14 { + execsql { + SELECT b FROM collate2t1 WHERE NOT b <= 'aa' ORDER BY 1, oid; + } +} {ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-2.15 { + execsql { + SELECT c FROM collate2t1 WHERE NOT c <= 'aa' ORDER BY 1; + } +} {ba Ab Bb ab bb} +do_test collate2-2.16 { + execsql { + SELECT a FROM collate2t1 WHERE a NOT BETWEEN 'Aa' AND 'Bb' ORDER BY 1; + } +} {AA AB aA aB aa ab bA bB ba bb} +do_test collate2-2.17 { + execsql { + SELECT b FROM collate2t1 WHERE b NOT BETWEEN 'Aa' AND 'Bb' ORDER BY 1, oid; + } +} {} +do_test collate2-2.18 { + execsql { + SELECT c FROM collate2t1 WHERE c NOT BETWEEN 'Aa' AND 'Bb' ORDER BY 1; + } +} {AA BA aA bA AB BB aB bB ab bb} +do_test collate2-2.19 { + execsql { + SELECT a FROM collate2t1 WHERE NOT CASE a WHEN 'aa' THEN 1 ELSE 0 END; + } +} {{} ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB} +do_test collate2-2.20 { + execsql { + SELECT b FROM collate2t1 WHERE NOT CASE b WHEN 'aa' THEN 1 ELSE 0 END; + } +} {{} ab ba bb aB bA bB Ab Ba Bb AB BA BB} +do_test collate2-2.21 { + execsql { + SELECT c FROM collate2t1 WHERE NOT CASE c WHEN 'aa' THEN 1 ELSE 0 END; + } +} {{} ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB} + +ifcapable subquery { + do_test collate2-2.22 { + execsql { + SELECT a FROM collate2t1 WHERE NOT a IN ('aa', 'bb'); + } + } {ab ba aA aB bA bB Aa Ab Ba Bb AA AB BA BB} + do_test collate2-2.23 { + execsql { + SELECT b FROM collate2t1 WHERE NOT b IN ('aa', 'bb'); + } + } {ab ba aB bA Ab Ba AB BA} + do_test collate2-2.24 { + execsql { + SELECT c FROM collate2t1 WHERE NOT c IN ('aa', 'bb'); + } + } {ab ba aA aB bA bB Aa Ab Ba Bb AA AB BA BB} + do_test collate2-2.25 { + execsql { + SELECT a FROM collate2t1 + WHERE NOT a IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb')); + } + } {ab ba aA aB bA bB Aa Ab Ba Bb AA AB BA BB} + do_test collate2-2.26 { + execsql { + SELECT b FROM collate2t1 + WHERE NOT b IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb')); + } + } {ab ba aB bA Ab Ba AB BA} + do_test collate2-2.27 { + execsql { + SELECT c FROM collate2t1 + WHERE NOT c IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb')); + } + } {ab ba aA aB bA bB Aa Ab Ba Bb AA AB BA BB} +} + +do_test collate2-3.1 { + execsql { + SELECT a > 'aa' FROM collate2t1; + } +} {{} 0 1 1 1 0 0 1 1 0 0 0 0 0 0 0 0} +do_test collate2-3.2 { + execsql { + SELECT b > 'aa' FROM collate2t1; + } +} {{} 0 1 1 1 0 1 1 1 0 1 1 1 0 1 1 1} +do_test collate2-3.3 { + execsql { + SELECT c > 'aa' FROM collate2t1; + } +} {{} 0 1 1 1 0 0 0 0 0 1 0 1 0 0 0 0} +do_test collate2-3.4 { + execsql { + SELECT a < 'aa' FROM collate2t1; + } +} {{} 0 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1} +do_test collate2-3.5 { + execsql { + SELECT b < 'aa' FROM collate2t1; + } +} {{} 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} +do_test collate2-3.6 { + execsql { + SELECT c < 'aa' FROM collate2t1; + } +} {{} 0 0 0 0 1 1 1 1 1 0 1 0 1 1 1 1} +do_test collate2-3.7 { + execsql { + SELECT a = 'aa' FROM collate2t1; + } +} {{} 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} +do_test collate2-3.8 { + execsql { + SELECT b = 'aa' FROM collate2t1; + } +} {{} 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0} +do_test collate2-3.9 { + execsql { + SELECT c = 'aa' FROM collate2t1; + } +} {{} 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} +do_test collate2-3.10 { + execsql { + SELECT a <= 'aa' FROM collate2t1; + } +} {{} 1 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1} +do_test collate2-3.11 { + execsql { + SELECT b <= 'aa' FROM collate2t1; + } +} {{} 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0} +do_test collate2-3.12 { + execsql { + SELECT c <= 'aa' FROM collate2t1; + } +} {{} 1 0 0 0 1 1 1 1 1 0 1 0 1 1 1 1} +do_test collate2-3.13 { + execsql { + SELECT a >= 'aa' FROM collate2t1; + } +} {{} 1 1 1 1 0 0 1 1 0 0 0 0 0 0 0 0} +do_test collate2-3.14 { + execsql { + SELECT b >= 'aa' FROM collate2t1; + } +} {{} 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1} +do_test collate2-3.15 { + execsql { + SELECT c >= 'aa' FROM collate2t1; + } +} {{} 1 1 1 1 0 0 0 0 0 1 0 1 0 0 0 0} +do_test collate2-3.16 { + execsql { + SELECT a BETWEEN 'Aa' AND 'Bb' FROM collate2t1; + } +} {{} 0 0 0 0 0 0 0 0 1 1 1 1 0 0 1 1} +do_test collate2-3.17 { + execsql { + SELECT b BETWEEN 'Aa' AND 'Bb' FROM collate2t1; + } +} {{} 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1} +do_test collate2-3.18 { + execsql { + SELECT c BETWEEN 'Aa' AND 'Bb' FROM collate2t1; + } +} {{} 1 0 1 0 0 0 0 0 1 1 1 1 0 0 0 0} +do_test collate2-3.19 { + execsql { + SELECT CASE a WHEN 'aa' THEN 1 ELSE 0 END FROM collate2t1; + } +} {0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} +do_test collate2-3.20 { + execsql { + SELECT CASE b WHEN 'aa' THEN 1 ELSE 0 END FROM collate2t1; + } +} {0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0} +do_test collate2-3.21 { + execsql { + SELECT CASE c WHEN 'aa' THEN 1 ELSE 0 END FROM collate2t1; + } +} {0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} + +ifcapable subquery { + do_test collate2-3.22 { + execsql { + SELECT a IN ('aa', 'bb') FROM collate2t1; + } + } {{} 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0} + do_test collate2-3.23 { + execsql { + SELECT b IN ('aa', 'bb') FROM collate2t1; + } + } {{} 1 0 0 1 1 0 0 1 1 0 0 1 1 0 0 1} + do_test collate2-3.24 { + execsql { + SELECT c IN ('aa', 'bb') FROM collate2t1; + } + } {{} 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0} + do_test collate2-3.25 { + execsql { + SELECT a IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb')) + FROM collate2t1; + } + } {{} 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0} + do_test collate2-3.26 { + execsql { + SELECT b IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb')) + FROM collate2t1; + } + } {{} 1 0 0 1 1 0 0 1 1 0 0 1 1 0 0 1} + do_test collate2-3.27 { + execsql { + SELECT c IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb')) + FROM collate2t1; + } + } {{} 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0} +} + +do_test collate2-4.0 { + execsql { + CREATE TABLE collate2t2(b COLLATE binary); + CREATE TABLE collate2t3(b text); + INSERT INTO collate2t2 VALUES('aa'); + INSERT INTO collate2t3 VALUES('aa'); + } +} {} + +# Test that when both sides of a binary comparison operator have +# default collation types, the collate type for the leftmost term +# is used. +do_test collate2-4.1 { + execsql { + SELECT collate2t1.a FROM collate2t1, collate2t2 + WHERE collate2t1.b = collate2t2.b; + } +} {aa aA Aa AA} +do_test collate2-4.2 { + execsql { + SELECT collate2t1.a FROM collate2t1, collate2t2 + WHERE collate2t2.b = collate2t1.b; + } +} {aa} + +# Test that when one side has a default collation type and the other +# does not, the collation type is used. +do_test collate2-4.3 { + execsql { + SELECT collate2t1.a FROM collate2t1, collate2t3 + WHERE collate2t1.b = collate2t3.b||''; + } +} {aa aA Aa AA} +do_test collate2-4.4 { + execsql { + SELECT collate2t1.a FROM collate2t1, collate2t3 + WHERE collate2t3.b||'' = collate2t1.b; + } +} {aa aA Aa AA} + +do_test collate2-4.5 { + execsql { + DROP TABLE collate2t3; + } +} {} + +# +# Test that the default collation types are used when the JOIN syntax +# is used in place of a WHERE clause. +# +# SQLite transforms the JOIN syntax into a WHERE clause internally, so +# the focus of these tests is to ensure that the table on the left-hand-side +# of the join determines the collation type used. +# +do_test collate2-5.0 { + execsql { + SELECT collate2t1.b FROM collate2t1 JOIN collate2t2 USING (b); + } +} {aa aA Aa AA} +do_test collate2-5.1 { + execsql { + SELECT collate2t1.b FROM collate2t2 JOIN collate2t1 USING (b); + } +} {aa} +do_test collate2-5.2 { + execsql { + SELECT collate2t1.b FROM collate2t1 NATURAL JOIN collate2t2; + } +} {aa aA Aa AA} +do_test collate2-5.3 { + execsql { + SELECT collate2t1.b FROM collate2t2 NATURAL JOIN collate2t1; + } +} {aa} +do_test collate2-5.4 { + execsql { + SELECT collate2t2.b FROM collate2t1 LEFT OUTER JOIN collate2t2 USING (b) order by collate2t1.oid; + } +} {{} aa {} {} {} aa {} {} {} aa {} {} {} aa {} {} {}} +do_test collate2-5.5 { + execsql { + SELECT collate2t1.b, collate2t2.b FROM collate2t2 LEFT OUTER JOIN collate2t1 USING (b); + } +} {aa aa} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/collate3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/collate3.test new file mode 100644 index 0000000..ad52782 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/collate3.test @@ -0,0 +1,429 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is page cache subsystem. +# +# $Id: collate3.test,v 1.11 2005/09/08 01:58:43 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# +# Tests are organised as follows: +# +# collate3.1.* - Errors related to unknown collation sequences. +# collate3.2.* - Errors related to undefined collation sequences. +# collate3.3.* - Writing to a table that has an index with an undefined c.s. +# collate3.4.* - Misc errors. +# collate3.5.* - Collation factory. +# + +# +# These tests ensure that when a user executes a statement with an +# unknown collation sequence an error is returned. +# +do_test collate3-1.0 { + execsql { + CREATE TABLE collate3t1(c1); + } +} {} +do_test collate3-1.1 { + catchsql { + SELECT * FROM collate3t1 ORDER BY 1 collate garbage; + } +} {1 {no such collation sequence: garbage}} +do_test collate3-1.2 { + catchsql { + CREATE TABLE collate3t2(c1 collate garbage); + } +} {1 {no such collation sequence: garbage}} +do_test collate3-1.3 { + catchsql { + CREATE INDEX collate3i1 ON collate3t1(c1 COLLATE garbage); + } +} {1 {no such collation sequence: garbage}} + +execsql { + DROP TABLE collate3t1; +} + +# +# Create a table with a default collation sequence, then close +# and re-open the database without re-registering the collation +# sequence. Then make sure the library stops us from using +# the collation sequence in: +# * an explicitly collated ORDER BY +# * an ORDER BY that uses the default collation sequence +# * an expression (=) +# * a CREATE TABLE statement +# * a CREATE INDEX statement that uses a default collation sequence +# * a GROUP BY that uses the default collation sequence +# * a SELECT DISTINCT that uses the default collation sequence +# * Compound SELECTs that uses the default collation sequence +# * An ORDER BY on a compound SELECT with an explicit ORDER BY. +# +do_test collate3-2.0 { + db collate string_compare {string compare} + execsql { + CREATE TABLE collate3t1(c1 COLLATE string_compare, c2); + } + db close + sqlite3 db test.db + expr 0 +} 0 +do_test collate3-2.1 { + catchsql { + SELECT * FROM collate3t1 ORDER BY 1 COLLATE string_compare; + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-2.2 { + catchsql { + SELECT * FROM collate3t1 ORDER BY c1; + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-2.3 { + catchsql { + SELECT * FROM collate3t1 WHERE c1 = 'xxx'; + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-2.4 { + catchsql { + CREATE TABLE collate3t2(c1 COLLATE string_compare); + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-2.5 { + catchsql { + CREATE INDEX collate3t1_i1 ON collate3t1(c1); + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-2.6 { + catchsql { + SELECT * FROM collate3t1; + } +} {0 {}} +do_test collate3-2.7.1 { + catchsql { + SELECT count(*) FROM collate3t1 GROUP BY c1; + } +} {1 {no such collation sequence: string_compare}} +# do_test collate3-2.7.2 { +# catchsql { +# SELECT * FROM collate3t1 GROUP BY c1; +# } +# } {1 {GROUP BY may only be used on aggregate queries}} +do_test collate3-2.7.2 { + catchsql { + SELECT * FROM collate3t1 GROUP BY c1; + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-2.8 { + catchsql { + SELECT DISTINCT c1 FROM collate3t1; + } +} {1 {no such collation sequence: string_compare}} + +ifcapable compound { + do_test collate3-2.9 { + catchsql { + SELECT c1 FROM collate3t1 UNION SELECT c1 FROM collate3t1; + } + } {1 {no such collation sequence: string_compare}} + do_test collate3-2.10 { + catchsql { + SELECT c1 FROM collate3t1 EXCEPT SELECT c1 FROM collate3t1; + } + } {1 {no such collation sequence: string_compare}} + do_test collate3-2.11 { + catchsql { + SELECT c1 FROM collate3t1 INTERSECT SELECT c1 FROM collate3t1; + } + } {1 {no such collation sequence: string_compare}} + do_test collate3-2.12 { + catchsql { + SELECT c1 FROM collate3t1 UNION ALL SELECT c1 FROM collate3t1; + } + } {0 {}} + do_test collate3-2.13 { +btree_breakpoint + catchsql { + SELECT 10 UNION ALL SELECT 20 ORDER BY 1 COLLATE string_compare; + } + } {1 {no such collation sequence: string_compare}} + do_test collate3-2.14 { + catchsql { + SELECT 10 INTERSECT SELECT 20 ORDER BY 1 COLLATE string_compare; + } + } {1 {no such collation sequence: string_compare}} + do_test collate3-2.15 { + catchsql { + SELECT 10 EXCEPT SELECT 20 ORDER BY 1 COLLATE string_compare; + } + } {1 {no such collation sequence: string_compare}} + do_test collate3-2.16 { + catchsql { + SELECT 10 UNION SELECT 20 ORDER BY 1 COLLATE string_compare; + } + } {1 {no such collation sequence: string_compare}} + do_test collate3-2.17 { + catchsql { + SELECT c1 FROM collate3t1 UNION ALL SELECT c1 FROM collate3t1 ORDER BY 1; + } + } {1 {no such collation sequence: string_compare}} +} ;# ifcapable compound + +# +# Create an index that uses a collation sequence then close and +# re-open the database without re-registering the collation +# sequence. Then check that for the table with the index +# * An INSERT fails, +# * An UPDATE on the column with the index fails, +# * An UPDATE on a different column succeeds. +# * A DELETE with a WHERE clause fails +# * A DELETE without a WHERE clause succeeds +# +# Also, ensure that the restrictions tested by collate3-2.* still +# apply after the index has been created. +# +do_test collate3-3.0 { + db collate string_compare {string compare} + execsql { + CREATE INDEX collate3t1_i1 ON collate3t1(c1); + INSERT INTO collate3t1 VALUES('xxx', 'yyy'); + } + db close + sqlite3 db test.db + expr 0 +} 0 +db eval {select * from collate3t1} +do_test collate3-3.1 { + catchsql { + INSERT INTO collate3t1 VALUES('xxx', 0); + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-3.2 { + catchsql { + UPDATE collate3t1 SET c1 = 'xxx'; + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-3.3 { + catchsql { + UPDATE collate3t1 SET c2 = 'xxx'; + } +} {0 {}} +do_test collate3-3.4 { + catchsql { + DELETE FROM collate3t1 WHERE 1; + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-3.5 { + catchsql { + SELECT * FROM collate3t1; + } +} {0 {xxx xxx}} +do_test collate3-3.6 { + catchsql { + DELETE FROM collate3t1; + } +} {0 {}} +ifcapable {integrityck} { + do_test collate3-3.8 { + catchsql { + PRAGMA integrity_check + } + } {1 {no such collation sequence: string_compare}} +} +do_test collate3-3.9 { + catchsql { + SELECT * FROM collate3t1; + } +} {0 {}} +do_test collate3-3.10 { + catchsql { + SELECT * FROM collate3t1 ORDER BY 1 COLLATE string_compare; + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-3.11 { + catchsql { + SELECT * FROM collate3t1 ORDER BY c1; + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-3.12 { + catchsql { + SELECT * FROM collate3t1 WHERE c1 = 'xxx'; + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-3.13 { + catchsql { + CREATE TABLE collate3t2(c1 COLLATE string_compare); + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-3.14 { + catchsql { + CREATE INDEX collate3t1_i2 ON collate3t1(c1); + } +} {1 {no such collation sequence: string_compare}} +do_test collate3-3.15 { + execsql { + DROP TABLE collate3t1; + } +} {} + +# Check we can create an index that uses an explicit collation +# sequence and then close and re-open the database. +do_test collate3-4.6 { + db collate user_defined "string compare" + execsql { + CREATE TABLE collate3t1(a, b); + INSERT INTO collate3t1 VALUES('hello', NULL); + CREATE INDEX collate3i1 ON collate3t1(a COLLATE user_defined); + } +} {} +do_test collate3-4.7 { + db close + sqlite3 db test.db + catchsql { + SELECT * FROM collate3t1 ORDER BY a COLLATE user_defined; + } +} {1 {no such collation sequence: user_defined}} +do_test collate3-4.8 { + db collate user_defined "string compare" + catchsql { + SELECT * FROM collate3t1 ORDER BY a COLLATE user_defined; + } +} {0 {hello {}}} +do_test collate3-4.8 { + db close + lindex [catch { + sqlite3 db test.db + }] 0 +} {0} +do_test collate3-4.8 { + execsql { + DROP TABLE collate3t1; + } +} {} + +# Compare strings as numbers. +proc numeric_compare {lhs rhs} { + if {$rhs > $lhs} { + set res -1 + } else { + set res [expr ($lhs > $rhs)?1:0] + } + return $res +} + +# Check we can create a view that uses an explicit collation +# sequence and then close and re-open the database. +ifcapable view { +do_test collate3-4.9 { + db collate user_defined numeric_compare + execsql { + CREATE TABLE collate3t1(a, b); + INSERT INTO collate3t1 VALUES('2', NULL); + INSERT INTO collate3t1 VALUES('101', NULL); + INSERT INTO collate3t1 VALUES('12', NULL); + CREATE VIEW collate3v1 AS SELECT * FROM collate3t1 + ORDER BY 1 COLLATE user_defined; + SELECT * FROM collate3v1; + } +} {2 {} 12 {} 101 {}} +do_test collate3-4.10 { + db close + sqlite3 db test.db + catchsql { + SELECT * FROM collate3v1; + } +} {1 {no such collation sequence: user_defined}} +do_test collate3-4.11 { + db collate user_defined numeric_compare + catchsql { + SELECT * FROM collate3v1; + } +} {0 {2 {} 12 {} 101 {}}} +do_test collate3-4.12 { + execsql { + DROP TABLE collate3t1; + } +} {} +} ;# ifcapable view + +# +# Test the collation factory. In the code, the "no such collation sequence" +# message is only generated in two places. So these tests just test that +# the collation factory can be called once from each of those points. +# +do_test collate3-5.0 { + catchsql { + CREATE TABLE collate3t1(a); + INSERT INTO collate3t1 VALUES(10); + SELECT a FROM collate3t1 ORDER BY 1 COLLATE unk; + } +} {1 {no such collation sequence: unk}} +do_test collate3-5.1 { + set ::cfact_cnt 0 + proc cfact {nm} { + db collate $nm {string compare} + incr ::cfact_cnt + } + db collation_needed cfact +} {} +do_test collate3-5.2 { + catchsql { + SELECT a FROM collate3t1 ORDER BY 1 COLLATE unk; + } +} {0 10} +do_test collate3-5.3 { + set ::cfact_cnt +} {1} +do_test collate3-5.4 { + catchsql { + SELECT a FROM collate3t1 ORDER BY 1 COLLATE unk; + } +} {0 10} +do_test collate3-5.5 { + set ::cfact_cnt +} {1} +do_test collate3-5.6 { + catchsql { + SELECT a FROM collate3t1 ORDER BY 1 COLLATE unk; + } +} {0 10} +do_test collate3-5.7 { + execsql { + DROP TABLE collate3t1; + CREATE TABLE collate3t1(a COLLATE unk); + } + db close + sqlite3 db test.db + catchsql { + SELECT a FROM collate3t1 ORDER BY 1; + } +} {1 {no such collation sequence: unk}} +do_test collate3-5.8 { + set ::cfact_cnt 0 + proc cfact {nm} { + db collate $nm {string compare} + incr ::cfact_cnt + } + db collation_needed cfact + catchsql { + SELECT a FROM collate3t1 ORDER BY 1; + } +} {0 {}} + +do_test collate3-5.9 { + execsql { + DROP TABLE collate3t1; + } +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/collate4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/collate4.test new file mode 100644 index 0000000..7dcd32d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/collate4.test @@ -0,0 +1,700 @@ +# +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is page cache subsystem. +# +# $Id: collate4.test,v 1.8 2005/04/01 10:47:40 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +db collate TEXT text_collate +proc text_collate {a b} { + return [string compare $a $b] +} + +# Do an SQL statement. Append the search count to the end of the result. +# +proc count sql { + set ::sqlite_search_count 0 + return [concat [execsql $sql] $::sqlite_search_count] +} + +# This procedure executes the SQL. Then it checks the generated program +# for the SQL and appends a "nosort" to the result if the program contains the +# SortCallback opcode. If the program does not contain the SortCallback +# opcode it appends "sort" +# +proc cksort {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return $data +} + +# +# Test cases are organized roughly as follows: +# +# collate4-1.* ORDER BY. +# collate4-2.* WHERE clauses. +# collate4-3.* constraints (primary key, unique). +# collate4-4.* simple min() or max() queries. +# collate4-5.* REINDEX command +# collate4-6.* INTEGER PRIMARY KEY indices. +# + +# +# These tests - collate4-1.* - check that indices are correctly +# selected or not selected to implement ORDER BY clauses when +# user defined collation sequences are involved. +# +# Because these tests also exercise all the different ways indices +# can be created, they also serve to verify that indices are correctly +# initialised with user-defined collation sequences when they are +# created. +# +# Tests named collate4-1.1.* use indices with a single column. Tests +# collate4-1.2.* use indices with two columns. +# +do_test collate4-1.1.0 { + execsql { + CREATE TABLE collate4t1(a COLLATE NOCASE, b COLLATE TEXT); + INSERT INTO collate4t1 VALUES( 'a', 'a' ); + INSERT INTO collate4t1 VALUES( 'b', 'b' ); + INSERT INTO collate4t1 VALUES( NULL, NULL ); + INSERT INTO collate4t1 VALUES( 'B', 'B' ); + INSERT INTO collate4t1 VALUES( 'A', 'A' ); + CREATE INDEX collate4i1 ON collate4t1(a); + CREATE INDEX collate4i2 ON collate4t1(b); + } +} {} +do_test collate4-1.1.1 { + cksort {SELECT a FROM collate4t1 ORDER BY a} +} {{} a A b B nosort} +do_test collate4-1.1.2 { + cksort {SELECT a FROM collate4t1 ORDER BY a COLLATE NOCASE} +} {{} a A b B nosort} +do_test collate4-1.1.3 { + cksort {SELECT a FROM collate4t1 ORDER BY a COLLATE TEXT} +} {{} A B a b sort} +do_test collate4-1.1.4 { + cksort {SELECT b FROM collate4t1 ORDER BY b} +} {{} A B a b nosort} +do_test collate4-1.1.5 { + cksort {SELECT b FROM collate4t1 ORDER BY b COLLATE TEXT} +} {{} A B a b nosort} +do_test collate4-1.1.6 { + cksort {SELECT b FROM collate4t1 ORDER BY b COLLATE NOCASE} +} {{} a A b B sort} + +do_test collate4-1.1.7 { + execsql { + CREATE TABLE collate4t2( + a PRIMARY KEY COLLATE NOCASE, + b UNIQUE COLLATE TEXT + ); + INSERT INTO collate4t2 VALUES( 'a', 'a' ); + INSERT INTO collate4t2 VALUES( NULL, NULL ); + INSERT INTO collate4t2 VALUES( 'B', 'B' ); + } +} {} +do_test collate4-1.1.8 { + cksort {SELECT a FROM collate4t2 ORDER BY a} +} {{} a B nosort} +do_test collate4-1.1.9 { + cksort {SELECT a FROM collate4t2 ORDER BY a COLLATE NOCASE} +} {{} a B nosort} +do_test collate4-1.1.10 { + cksort {SELECT a FROM collate4t2 ORDER BY a COLLATE TEXT} +} {{} B a sort} +do_test collate4-1.1.11 { + cksort {SELECT b FROM collate4t2 ORDER BY b} +} {{} B a nosort} +do_test collate4-1.1.12 { + cksort {SELECT b FROM collate4t2 ORDER BY b COLLATE TEXT} +} {{} B a nosort} +do_test collate4-1.1.13 { + cksort {SELECT b FROM collate4t2 ORDER BY b COLLATE NOCASE} +} {{} a B sort} + +do_test collate4-1.1.14 { + execsql { + CREATE TABLE collate4t3( + b COLLATE TEXT, + a COLLATE NOCASE, + UNIQUE(a), PRIMARY KEY(b) + ); + INSERT INTO collate4t3 VALUES( 'a', 'a' ); + INSERT INTO collate4t3 VALUES( NULL, NULL ); + INSERT INTO collate4t3 VALUES( 'B', 'B' ); + } +} {} +do_test collate4-1.1.15 { + cksort {SELECT a FROM collate4t3 ORDER BY a} +} {{} a B nosort} +do_test collate4-1.1.16 { + cksort {SELECT a FROM collate4t3 ORDER BY a COLLATE NOCASE} +} {{} a B nosort} +do_test collate4-1.1.17 { + cksort {SELECT a FROM collate4t3 ORDER BY a COLLATE TEXT} +} {{} B a sort} +do_test collate4-1.1.18 { + cksort {SELECT b FROM collate4t3 ORDER BY b} +} {{} B a nosort} +do_test collate4-1.1.19 { + cksort {SELECT b FROM collate4t3 ORDER BY b COLLATE TEXT} +} {{} B a nosort} +do_test collate4-1.1.20 { + cksort {SELECT b FROM collate4t3 ORDER BY b COLLATE NOCASE} +} {{} a B sort} + +do_test collate4-1.1.21 { + execsql { + CREATE TABLE collate4t4(a COLLATE NOCASE, b COLLATE TEXT); + INSERT INTO collate4t4 VALUES( 'a', 'a' ); + INSERT INTO collate4t4 VALUES( 'b', 'b' ); + INSERT INTO collate4t4 VALUES( NULL, NULL ); + INSERT INTO collate4t4 VALUES( 'B', 'B' ); + INSERT INTO collate4t4 VALUES( 'A', 'A' ); + CREATE INDEX collate4i3 ON collate4t4(a COLLATE TEXT); + CREATE INDEX collate4i4 ON collate4t4(b COLLATE NOCASE); + } +} {} +do_test collate4-1.1.22 { + cksort {SELECT a FROM collate4t4 ORDER BY a} +} {{} a A b B sort} +do_test collate4-1.1.23 { + cksort {SELECT a FROM collate4t4 ORDER BY a COLLATE NOCASE} +} {{} a A b B sort} +do_test collate4-1.1.24 { + cksort {SELECT a FROM collate4t4 ORDER BY a COLLATE TEXT} +} {{} A B a b nosort} +do_test collate4-1.1.25 { + cksort {SELECT b FROM collate4t4 ORDER BY b} +} {{} A B a b sort} +do_test collate4-1.1.26 { + cksort {SELECT b FROM collate4t4 ORDER BY b COLLATE TEXT} +} {{} A B a b sort} +do_test collate4-1.1.27 { + cksort {SELECT b FROM collate4t4 ORDER BY b COLLATE NOCASE} +} {{} a A b B nosort} + +do_test collate4-1.1.30 { + execsql { + DROP TABLE collate4t1; + DROP TABLE collate4t2; + DROP TABLE collate4t3; + DROP TABLE collate4t4; + } +} {} + +do_test collate4-1.2.0 { + execsql { + CREATE TABLE collate4t1(a COLLATE NOCASE, b COLLATE TEXT); + INSERT INTO collate4t1 VALUES( 'a', 'a' ); + INSERT INTO collate4t1 VALUES( 'b', 'b' ); + INSERT INTO collate4t1 VALUES( NULL, NULL ); + INSERT INTO collate4t1 VALUES( 'B', 'B' ); + INSERT INTO collate4t1 VALUES( 'A', 'A' ); + CREATE INDEX collate4i1 ON collate4t1(a, b); + } +} {} +do_test collate4-1.2.1 { + cksort {SELECT a FROM collate4t1 ORDER BY a} +} {{} A a B b nosort} +do_test collate4-1.2.2 { + cksort {SELECT a FROM collate4t1 ORDER BY a COLLATE nocase} +} {{} A a B b nosort} +do_test collate4-1.2.3 { + cksort {SELECT a FROM collate4t1 ORDER BY a COLLATE text} +} {{} A B a b sort} +do_test collate4-1.2.4 { + cksort {SELECT a FROM collate4t1 ORDER BY a, b} +} {{} A a B b nosort} +do_test collate4-1.2.5 { + cksort {SELECT a FROM collate4t1 ORDER BY a, b COLLATE nocase} +} {{} a A b B sort} +do_test collate4-1.2.6 { + cksort {SELECT a FROM collate4t1 ORDER BY a, b COLLATE text} +} {{} A a B b nosort} + +do_test collate4-1.2.7 { + execsql { + CREATE TABLE collate4t2( + a COLLATE NOCASE, + b COLLATE TEXT, + PRIMARY KEY(a, b) + ); + INSERT INTO collate4t2 VALUES( 'a', 'a' ); + INSERT INTO collate4t2 VALUES( NULL, NULL ); + INSERT INTO collate4t2 VALUES( 'B', 'B' ); + } +} {} +do_test collate4-1.2.8 { + cksort {SELECT a FROM collate4t2 ORDER BY a} +} {{} a B nosort} +do_test collate4-1.2.9 { + cksort {SELECT a FROM collate4t2 ORDER BY a COLLATE nocase} +} {{} a B nosort} +do_test collate4-1.2.10 { + cksort {SELECT a FROM collate4t2 ORDER BY a COLLATE text} +} {{} B a sort} +do_test collate4-1.2.11 { + cksort {SELECT a FROM collate4t2 ORDER BY a, b} +} {{} a B nosort} +do_test collate4-1.2.12 { + cksort {SELECT a FROM collate4t2 ORDER BY a, b COLLATE nocase} +} {{} a B sort} +do_test collate4-1.2.13 { + cksort {SELECT a FROM collate4t2 ORDER BY a, b COLLATE text} +} {{} a B nosort} + +do_test collate4-1.2.14 { + execsql { + CREATE TABLE collate4t3(a COLLATE NOCASE, b COLLATE TEXT); + INSERT INTO collate4t3 VALUES( 'a', 'a' ); + INSERT INTO collate4t3 VALUES( 'b', 'b' ); + INSERT INTO collate4t3 VALUES( NULL, NULL ); + INSERT INTO collate4t3 VALUES( 'B', 'B' ); + INSERT INTO collate4t3 VALUES( 'A', 'A' ); + CREATE INDEX collate4i2 ON collate4t3(a COLLATE TEXT, b COLLATE NOCASE); + } +} {} +do_test collate4-1.2.15 { + cksort {SELECT a FROM collate4t3 ORDER BY a} +} {{} a A b B sort} +do_test collate4-1.2.16 { + cksort {SELECT a FROM collate4t3 ORDER BY a COLLATE nocase} +} {{} a A b B sort} +do_test collate4-1.2.17 { + cksort {SELECT a FROM collate4t3 ORDER BY a COLLATE text} +} {{} A B a b nosort} +do_test collate4-1.2.18 { + cksort {SELECT a FROM collate4t3 ORDER BY a COLLATE text, b} +} {{} A B a b sort} +do_test collate4-1.2.19 { + cksort {SELECT a FROM collate4t3 ORDER BY a COLLATE text, b COLLATE nocase} +} {{} A B a b nosort} +do_test collate4-1.2.20 { + cksort {SELECT a FROM collate4t3 ORDER BY a COLLATE text, b COLLATE text} +} {{} A B a b sort} +do_test collate4-1.2.21 { + cksort {SELECT a FROM collate4t3 ORDER BY a COLLATE text DESC} +} {b a B A {} nosort} +do_test collate4-1.2.22 { + cksort {SELECT a FROM collate4t3 ORDER BY a COLLATE text DESC, b} +} {b a B A {} sort} +do_test collate4-1.2.23 { + cksort {SELECT a FROM collate4t3 + ORDER BY a COLLATE text DESC, b COLLATE nocase} +} {b a B A {} sort} +do_test collate4-1.2.24 { + cksort {SELECT a FROM collate4t3 + ORDER BY a COLLATE text DESC, b COLLATE nocase DESC} +} {b a B A {} nosort} + +do_test collate4-1.2.25 { + execsql { + DROP TABLE collate4t1; + DROP TABLE collate4t2; + DROP TABLE collate4t3; + } +} {} + +# +# These tests - collate4-2.* - check that indices are correctly +# selected or not selected to implement WHERE clauses when user +# defined collation sequences are involved. +# +# Indices may optimise WHERE clauses using <, >, <=, >=, = or IN +# operators. +# +do_test collate4-2.1.0 { + execsql { + CREATE TABLE collate4t1(a COLLATE NOCASE); + CREATE TABLE collate4t2(b COLLATE TEXT); + + INSERT INTO collate4t1 VALUES('a'); + INSERT INTO collate4t1 VALUES('A'); + INSERT INTO collate4t1 VALUES('b'); + INSERT INTO collate4t1 VALUES('B'); + INSERT INTO collate4t1 VALUES('c'); + INSERT INTO collate4t1 VALUES('C'); + INSERT INTO collate4t1 VALUES('d'); + INSERT INTO collate4t1 VALUES('D'); + INSERT INTO collate4t1 VALUES('e'); + INSERT INTO collate4t1 VALUES('D'); + + INSERT INTO collate4t2 VALUES('A'); + INSERT INTO collate4t2 VALUES('Z'); + } +} {} +do_test collate4-2.1.1 { + count { + SELECT * FROM collate4t2, collate4t1 WHERE a = b; + } +} {A a A A 19} +do_test collate4-2.1.2 { + execsql { + CREATE INDEX collate4i1 ON collate4t1(a); + } + count { + SELECT * FROM collate4t2, collate4t1 WHERE a = b; + } +} {A a A A 5} +do_test collate4-2.1.3 { + count { + SELECT * FROM collate4t2, collate4t1 WHERE b = a; + } +} {A A 19} +do_test collate4-2.1.4 { + execsql { + DROP INDEX collate4i1; + CREATE INDEX collate4i1 ON collate4t1(a COLLATE TEXT); + } + count { + SELECT * FROM collate4t2, collate4t1 WHERE a = b; + } +} {A a A A 19} +do_test collate4-2.1.5 { + count { + SELECT * FROM collate4t2, collate4t1 WHERE b = a; + } +} {A A 4} +ifcapable subquery { + do_test collate4-2.1.6 { + count { + SELECT a FROM collate4t1 WHERE a IN (SELECT * FROM collate4t2); + } + } {a A 10} + do_test collate4-2.1.7 { + execsql { + DROP INDEX collate4i1; + CREATE INDEX collate4i1 ON collate4t1(a); + } + count { + SELECT a FROM collate4t1 WHERE a IN (SELECT * FROM collate4t2); + } + } {a A 6} + do_test collate4-2.1.8 { + count { + SELECT a FROM collate4t1 WHERE a IN ('z', 'a'); + } + } {a A 5} + do_test collate4-2.1.9 { + execsql { + DROP INDEX collate4i1; + CREATE INDEX collate4i1 ON collate4t1(a COLLATE TEXT); + } + count { + SELECT a FROM collate4t1 WHERE a IN ('z', 'a'); + } + } {a A 9} +} +do_test collate4-2.1.10 { + execsql { + DROP TABLE collate4t1; + DROP TABLE collate4t2; + } +} {} + +do_test collate4-2.2.0 { + execsql { + CREATE TABLE collate4t1(a COLLATE nocase, b COLLATE text, c); + CREATE TABLE collate4t2(a COLLATE nocase, b COLLATE text, c COLLATE TEXT); + + INSERT INTO collate4t1 VALUES('0', '0', '0'); + INSERT INTO collate4t1 VALUES('0', '0', '1'); + INSERT INTO collate4t1 VALUES('0', '1', '0'); + INSERT INTO collate4t1 VALUES('0', '1', '1'); + INSERT INTO collate4t1 VALUES('1', '0', '0'); + INSERT INTO collate4t1 VALUES('1', '0', '1'); + INSERT INTO collate4t1 VALUES('1', '1', '0'); + INSERT INTO collate4t1 VALUES('1', '1', '1'); + insert into collate4t2 SELECT * FROM collate4t1; + } +} {} +do_test collate4-2.2.1 { + count { + SELECT * FROM collate4t2 NATURAL JOIN collate4t1; + } +} {0 0 0 0 0 1 0 1 0 0 1 1 1 0 0 1 0 1 1 1 0 1 1 1 63} +do_test collate4-2.2.1b { + execsql { + CREATE INDEX collate4i1 ON collate4t1(a, b, c); + } + count { + SELECT * FROM collate4t2 NATURAL JOIN collate4t1; + } +} {0 0 0 0 0 1 0 1 0 0 1 1 1 0 0 1 0 1 1 1 0 1 1 1 29} +do_test collate4-2.2.2 { + execsql { + DROP INDEX collate4i1; + CREATE INDEX collate4i1 ON collate4t1(a, b, c COLLATE text); + } + count { + SELECT * FROM collate4t2 NATURAL JOIN collate4t1; + } +} {0 0 0 0 0 1 0 1 0 0 1 1 1 0 0 1 0 1 1 1 0 1 1 1 22} + +do_test collate4-2.2.10 { + execsql { + DROP TABLE collate4t1; + DROP TABLE collate4t2; + } +} {} + +# +# These tests - collate4-3.* verify that indices that implement +# UNIQUE and PRIMARY KEY constraints operate correctly with user +# defined collation sequences. +# +do_test collate4-3.0 { + execsql { + CREATE TABLE collate4t1(a PRIMARY KEY COLLATE NOCASE); + } +} {} +do_test collate4-3.1 { + catchsql { + INSERT INTO collate4t1 VALUES('abc'); + INSERT INTO collate4t1 VALUES('ABC'); + } +} {1 {column a is not unique}} +do_test collate4-3.2 { + execsql { + SELECT * FROM collate4t1; + } +} {abc} +do_test collate4-3.3 { + catchsql { + INSERT INTO collate4t1 SELECT upper(a) FROM collate4t1; + } +} {1 {column a is not unique}} +do_test collate4-3.4 { + catchsql { + INSERT INTO collate4t1 VALUES(1); + UPDATE collate4t1 SET a = 'abc'; + } +} {1 {column a is not unique}} +do_test collate4-3.5 { + execsql { + DROP TABLE collate4t1; + CREATE TABLE collate4t1(a COLLATE NOCASE UNIQUE); + } +} {} +do_test collate4-3.6 { + catchsql { + INSERT INTO collate4t1 VALUES('abc'); + INSERT INTO collate4t1 VALUES('ABC'); + } +} {1 {column a is not unique}} +do_test collate4-3.7 { + execsql { + SELECT * FROM collate4t1; + } +} {abc} +do_test collate4-3.8 { + catchsql { + INSERT INTO collate4t1 SELECT upper(a) FROM collate4t1; + } +} {1 {column a is not unique}} +do_test collate4-3.9 { + catchsql { + INSERT INTO collate4t1 VALUES(1); + UPDATE collate4t1 SET a = 'abc'; + } +} {1 {column a is not unique}} +do_test collate4-3.10 { + execsql { + DROP TABLE collate4t1; + CREATE TABLE collate4t1(a); + CREATE UNIQUE INDEX collate4i1 ON collate4t1(a COLLATE NOCASE); + } +} {} +do_test collate4-3.11 { + catchsql { + INSERT INTO collate4t1 VALUES('abc'); + INSERT INTO collate4t1 VALUES('ABC'); + } +} {1 {column a is not unique}} +do_test collate4-3.12 { + execsql { + SELECT * FROM collate4t1; + } +} {abc} +do_test collate4-3.13 { + catchsql { + INSERT INTO collate4t1 SELECT upper(a) FROM collate4t1; + } +} {1 {column a is not unique}} +do_test collate4-3.14 { + catchsql { + INSERT INTO collate4t1 VALUES(1); + UPDATE collate4t1 SET a = 'abc'; + } +} {1 {column a is not unique}} + +do_test collate4-3.15 { + execsql { + DROP TABLE collate4t1; + } +} {} + +# Mimic the SQLite 2 collation type NUMERIC. +db collate numeric numeric_collate +proc numeric_collate {lhs rhs} { + if {$lhs == $rhs} {return 0} + return [expr ($lhs>$rhs)?1:-1] +} + +# +# These tests - collate4-4.* check that min() and max() only ever +# use indices constructed with built-in collation type numeric. +# +# CHANGED: min() and max() now use the collation type. If there +# is an indice that can be used, it is used. +# +do_test collate4-4.0 { + execsql { + CREATE TABLE collate4t1(a COLLATE TEXT); + INSERT INTO collate4t1 VALUES('2'); + INSERT INTO collate4t1 VALUES('10'); + INSERT INTO collate4t1 VALUES('20'); + INSERT INTO collate4t1 VALUES('104'); + } +} {} +do_test collate4-4.1 { + count { + SELECT max(a) FROM collate4t1 + } +} {20 3} +do_test collate4-4.2 { + count { + SELECT min(a) FROM collate4t1 + } +} {10 3} +do_test collate4-4.3 { + # Test that the index with collation type TEXT is used. + execsql { + CREATE INDEX collate4i1 ON collate4t1(a); + } + count { + SELECT min(a) FROM collate4t1; + } +} {10 2} +do_test collate4-4.4 { + count { + SELECT max(a) FROM collate4t1; + } +} {20 1} +do_test collate4-4.5 { + # Test that the index with collation type NUMERIC is not used. + execsql { + DROP INDEX collate4i1; + CREATE INDEX collate4i1 ON collate4t1(a COLLATE NUMERIC); + } + count { + SELECT min(a) FROM collate4t1; + } +} {10 3} +do_test collate4-4.6 { + count { + SELECT max(a) FROM collate4t1; + } +} {20 3} +do_test collate4-4.7 { + execsql { + DROP TABLE collate4t1; + } +} {} + +# Also test the scalar min() and max() functions. +# +do_test collate4-4.8 { + execsql { + CREATE TABLE collate4t1(a COLLATE TEXT, b COLLATE NUMERIC); + INSERT INTO collate4t1 VALUES('11', '101'); + INSERT INTO collate4t1 VALUES('101', '11') + } +} {} +do_test collate4-4.9 { + execsql { + SELECT max(a, b) FROM collate4t1; + } +} {11 11} +do_test collate4-4.10 { + execsql { + SELECT max(b, a) FROM collate4t1; + } +} {101 101} +do_test collate4-4.11 { + execsql { + SELECT max(a, '101') FROM collate4t1; + } +} {11 101} +do_test collate4-4.12 { + execsql { + SELECT max('101', a) FROM collate4t1; + } +} {11 101} +do_test collate4-4.13 { + execsql { + SELECT max(b, '101') FROM collate4t1; + } +} {101 101} +do_test collate4-4.14 { + execsql { + SELECT max('101', b) FROM collate4t1; + } +} {101 101} + +do_test collate4-4.15 { + execsql { + DROP TABLE collate4t1; + } +} {} + +# +# These tests - collate4.6.* - ensure that implict INTEGER PRIMARY KEY +# indices do not confuse collation sequences. +# +# These indices are never used for sorting in SQLite. And you can't +# create another index on an INTEGER PRIMARY KEY column, so we don't have +# to test that. +# (Revised 2004-Nov-22): The ROWID can be used for sorting now. +# +do_test collate4-6.0 { + execsql { + CREATE TABLE collate4t1(a INTEGER PRIMARY KEY); + INSERT INTO collate4t1 VALUES(101); + INSERT INTO collate4t1 VALUES(10); + INSERT INTO collate4t1 VALUES(15); + } +} {} +do_test collate4-6.1 { + cksort { + SELECT * FROM collate4t1 ORDER BY 1; + } +} {10 15 101 nosort} +do_test collate4-6.2 { + cksort { + SELECT * FROM collate4t1 ORDER BY oid; + } +} {10 15 101 nosort} +do_test collate4-6.3 { + cksort { + SELECT * FROM collate4t1 ORDER BY oid||'' COLLATE TEXT; + } +} {10 101 15 sort} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/collate5.test b/libraries/sqlite/unix/sqlite-3.5.1/test/collate5.test new file mode 100644 index 0000000..5e63c1f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/collate5.test @@ -0,0 +1,270 @@ +# +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this file is testing DISTINCT, UNION, INTERSECT and EXCEPT +# SELECT statements that use user-defined collation sequences. Also +# GROUP BY clauses that use user-defined collation sequences. +# +# $Id: collate5.test,v 1.5 2005/09/07 22:48:16 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# +# Tests are organised as follows: +# collate5-1.* - DISTINCT +# collate5-2.* - Compound SELECT +# collate5-3.* - ORDER BY on compound SELECT +# collate5-4.* - GROUP BY + +# Create the collation sequence 'TEXT', purely for asthetic reasons. The +# test cases in this script could just as easily use BINARY. +db collate TEXT [list string compare] + +# Mimic the SQLite 2 collation type NUMERIC. +db collate numeric numeric_collate +proc numeric_collate {lhs rhs} { + if {$lhs == $rhs} {return 0} + return [expr ($lhs>$rhs)?1:-1] +} + +# +# These tests - collate5-1.* - focus on the DISTINCT keyword. +# +do_test collate5-1.0 { + execsql { + CREATE TABLE collate5t1(a COLLATE nocase, b COLLATE text); + + INSERT INTO collate5t1 VALUES('a', 'apple'); + INSERT INTO collate5t1 VALUES('A', 'Apple'); + INSERT INTO collate5t1 VALUES('b', 'banana'); + INSERT INTO collate5t1 VALUES('B', 'banana'); + INSERT INTO collate5t1 VALUES('n', NULL); + INSERT INTO collate5t1 VALUES('N', NULL); + } +} {} +do_test collate5-1.1 { + execsql { + SELECT DISTINCT a FROM collate5t1; + } +} {a b n} +do_test collate5-1.2 { + execsql { + SELECT DISTINCT b FROM collate5t1; + } +} {apple Apple banana {}} +do_test collate5-1.3 { + execsql { + SELECT DISTINCT a, b FROM collate5t1; + } +} {a apple A Apple b banana n {}} + +# The remainder of this file tests compound SELECT statements. +# Omit it if the library is compiled such that they are omitted. +# +ifcapable !compound { + finish_test + return +} + +# +# Tests named collate5-2.* focus on UNION, EXCEPT and INTERSECT +# queries that use user-defined collation sequences. +# +# collate5-2.1.* - UNION +# collate5-2.2.* - INTERSECT +# collate5-2.3.* - EXCEPT +# +do_test collate5-2.0 { + execsql { + CREATE TABLE collate5t2(a COLLATE text, b COLLATE nocase); + + INSERT INTO collate5t2 VALUES('a', 'apple'); + INSERT INTO collate5t2 VALUES('A', 'apple'); + INSERT INTO collate5t2 VALUES('b', 'banana'); + INSERT INTO collate5t2 VALUES('B', 'Banana'); + } +} {} + +do_test collate5-2.1.1 { + execsql { + SELECT a FROM collate5t1 UNION select a FROM collate5t2; + } +} {A B N} +do_test collate5-2.1.2 { + execsql { + SELECT a FROM collate5t2 UNION select a FROM collate5t1; + } +} {A B N a b n} +do_test collate5-2.1.3 { + execsql { + SELECT a, b FROM collate5t1 UNION select a, b FROM collate5t2; + } +} {A Apple A apple B Banana b banana N {}} +do_test collate5-2.1.4 { + execsql { + SELECT a, b FROM collate5t2 UNION select a, b FROM collate5t1; + } +} {A Apple B banana N {} a apple b banana n {}} + +do_test collate5-2.2.1 { + execsql { + SELECT a FROM collate5t1 EXCEPT select a FROM collate5t2; + } +} {N} +do_test collate5-2.2.2 { + execsql { + SELECT a FROM collate5t2 EXCEPT select a FROM collate5t1 WHERE a != 'a'; + } +} {A a} +do_test collate5-2.2.3 { + execsql { + SELECT a, b FROM collate5t1 EXCEPT select a, b FROM collate5t2; + } +} {A Apple N {}} +do_test collate5-2.2.4 { + execsql { + SELECT a, b FROM collate5t2 EXCEPT select a, b FROM collate5t1 + where a != 'a'; + } +} {A apple a apple} + +do_test collate5-2.3.1 { + execsql { + SELECT a FROM collate5t1 INTERSECT select a FROM collate5t2; + } +} {A B} +do_test collate5-2.3.2 { + execsql { + SELECT a FROM collate5t2 INTERSECT select a FROM collate5t1 WHERE a != 'a'; + } +} {B b} +do_test collate5-2.3.3 { + execsql { + SELECT a, b FROM collate5t1 INTERSECT select a, b FROM collate5t2; + } +} {a apple B banana} +do_test collate5-2.3.4 { + execsql { + SELECT a, b FROM collate5t2 INTERSECT select a, b FROM collate5t1; + } +} {A apple B Banana a apple b banana} + +# +# This test ensures performs a UNION operation with a bunch of different +# length records. The goal is to test that the logic that compares records +# for the compound SELECT operators works with record lengths that lie +# either side of the troublesome 256 and 65536 byte marks. +# +set ::lens [list \ + 0 1 2 3 4 5 6 7 8 9 \ + 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 \ + 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 \ + 65520 65521 65522 65523 65524 65525 65526 65527 65528 65529 65530 \ + 65531 65532 65533 65534 65535 65536 65537 65538 65539 65540 65541 \ + 65542 65543 65544 65545 65546 65547 65548 65549 65550 65551 ] +do_test collate5-2.4.0 { + execsql { + BEGIN; + CREATE TABLE collate5t3(a, b); + } + foreach ii $::lens { + execsql "INSERT INTO collate5t3 VALUES($ii, '[string repeat a $ii]');" + } + expr [llength [execsql { + COMMIT; + SELECT * FROM collate5t3 UNION SELECT * FROM collate5t3; + }]] / 2 +} [llength $::lens] +do_test collate5-2.4.1 { + execsql {DROP TABLE collate5t3;} +} {} +unset ::lens + +# +# These tests - collate5-3.* - focus on compound SELECT queries that +# feature ORDER BY clauses. +# +do_test collate5-3.0 { + execsql { + SELECT a FROM collate5t1 UNION ALL SELECT a FROM collate5t2 ORDER BY 1; + } +} {a A a A b B b B n N} +do_test collate5-3.1 { + execsql { + SELECT a FROM collate5t2 UNION ALL SELECT a FROM collate5t1 ORDER BY 1; + } +} {A A B B N a a b b n} +do_test collate5-3.2 { + execsql { + SELECT a FROM collate5t1 UNION ALL SELECT a FROM collate5t2 + ORDER BY 1 COLLATE TEXT; + } +} {A A B B N a a b b n} + +do_test collate5-3.3 { + execsql { + CREATE TABLE collate5t_cn(a COLLATE NUMERIC); + CREATE TABLE collate5t_ct(a COLLATE TEXT); + INSERT INTO collate5t_cn VALUES('1'); + INSERT INTO collate5t_cn VALUES('11'); + INSERT INTO collate5t_cn VALUES('101'); + INSERT INTO collate5t_ct SELECT * FROM collate5t_cn; + } +} {} +do_test collate5-3.4 { + execsql { + SELECT a FROM collate5t_cn INTERSECT SELECT a FROM collate5t_ct ORDER BY 1; + } +} {1 11 101} +do_test collate5-3.5 { + execsql { + SELECT a FROM collate5t_ct INTERSECT SELECT a FROM collate5t_cn ORDER BY 1; + } +} {1 101 11} + +do_test collate5-3.20 { + execsql { + DROP TABLE collate5t_cn; + DROP TABLE collate5t_ct; + DROP TABLE collate5t1; + DROP TABLE collate5t2; + } +} {} + +do_test collate5-4.0 { + execsql { + CREATE TABLE collate5t1(a COLLATE NOCASE, b COLLATE NUMERIC); + INSERT INTO collate5t1 VALUES('a', '1'); + INSERT INTO collate5t1 VALUES('A', '1.0'); + INSERT INTO collate5t1 VALUES('b', '2'); + INSERT INTO collate5t1 VALUES('B', '3'); + } +} {} +do_test collate5-4.1 { + string tolower [execsql { + SELECT a, count(*) FROM collate5t1 GROUP BY a; + }] +} {a 2 b 2} +do_test collate5-4.2 { + execsql { + SELECT a, b, count(*) FROM collate5t1 GROUP BY a, b ORDER BY a, b; + } +} {A 1.0 2 b 2 1 B 3 1} +do_test collate5-4.3 { + execsql { + DROP TABLE collate5t1; + } +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/collate6.test b/libraries/sqlite/unix/sqlite-3.5.1/test/collate6.test new file mode 100644 index 0000000..d238639 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/collate6.test @@ -0,0 +1,153 @@ +# +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is collation sequences in concert with triggers. +# +# $Id: collate6.test,v 1.4 2007/07/30 14:40:48 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# There are no tests in this file that will work without +# trigger support. +# +ifcapable {!trigger} { + finish_test + return +} + +# Create a case-insensitive collation type NOCASE for use in testing. +# Normally, capital letters are less than their lower-case counterparts. +db collate NOCASE nocase_collate +proc nocase_collate {a b} { + return [string compare -nocase $a $b] +} + +# +# Tests are organized as follows: +# collate6-1.* - triggers. +# + +do_test collate6-1.0 { + execsql { + CREATE TABLE collate6log(a, b); + CREATE TABLE collate6tab(a COLLATE NOCASE, b COLLATE BINARY); + } +} {} + +# Test that the default collation sequence applies to new.* references +# in WHEN clauses. +do_test collate6-1.1 { + execsql { + CREATE TRIGGER collate6trig BEFORE INSERT ON collate6tab + WHEN new.a = 'a' BEGIN + INSERT INTO collate6log VALUES(new.a, new.b); + END; + } +} {} +do_test collate6-1.2 { + execsql { + INSERT INTO collate6tab VALUES('a', 'b'); + SELECT * FROM collate6log; + } +} {a b} +do_test collate6-1.3 { + execsql { + INSERT INTO collate6tab VALUES('A', 'B'); + SELECT * FROM collate6log; + } +} {a b A B} +do_test collate6-1.4 { + execsql { + DROP TRIGGER collate6trig; + DELETE FROM collate6log; + } +} {} + +# Test that the default collation sequence applies to new.* references +# in the body of triggers. +do_test collate6-1.5 { + execsql { + CREATE TRIGGER collate6trig BEFORE INSERT ON collate6tab BEGIN + INSERT INTO collate6log VALUES(new.a='a', new.b='b'); + END; + } +} {} +do_test collate6-1.6 { + execsql { + INSERT INTO collate6tab VALUES('a', 'b'); + SELECT * FROM collate6log; + } +} {1 1} +do_test collate6-1.7 { + execsql { + INSERT INTO collate6tab VALUES('A', 'B'); + SELECT * FROM collate6log; + } +} {1 1 1 0} +do_test collate6-1.8 { + execsql { + DROP TRIGGER collate6trig; + DELETE FROM collate6log; + } +} {} + +do_test collate6-1.9 { + execsql { + DROP TABLE collate6tab; + } +} {} + +# Test that an explicit collation sequence overrides an implicit +# one attached to a 'new' reference. +# +do_test collate6-2.1 { + execsql { + CREATE TABLE abc(a COLLATE binary, b, c); + CREATE TABLE def(a, b, c); + CREATE TRIGGER abc_t1 AFTER INSERT ON abc BEGIN + INSERT INTO def SELECT * FROM abc WHERE a < new.a COLLATE nocase; + END + } +} {} +do_test collate6-2.2 { + execsql { + INSERT INTO abc VALUES('One', 'Two', 'Three'); + INSERT INTO abc VALUES('one', 'two', 'three'); + SELECT * FROM def; + } +} {} +do_test collate6-2.3 { + execsql { + UPDATE abc SET a = 'four' WHERE a = 'one'; + CREATE TRIGGER abc_t2 AFTER UPDATE ON abc BEGIN + INSERT INTO def SELECT * FROM abc WHERE a < new.a COLLATE nocase; + END; + SELECT * FROM def; + } +} {} + +# At one point the 6-3.2 (but not 6-3.1) was causing an assert() to fail. +# +do_test collate6-3.1 { + execsql { + SELECT 1 FROM sqlite_master WHERE name COLLATE nocase = 'hello'; + } +} {} +do_test collate6-3.2 { + execsql { + SELECT 1 FROM sqlite_master WHERE 'hello' = name COLLATE nocase; + } +} {} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/collate7.test b/libraries/sqlite/unix/sqlite-3.5.1/test/collate7.test new file mode 100644 index 0000000..0c913e2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/collate7.test @@ -0,0 +1,73 @@ +# +# 2007 May 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is the experimental sqlite3_create_collation_v2() +# API. +# +# $Id: collate7.test,v 1.1 2007/05/07 14:58:53 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +set ::caseless_del 0 +proc caseless_cmp {zLeft zRight} { + string compare -nocase $zLeft $zRight +} + +do_test collate7-1.1 { + set cmd [list incr ::caseless_del] + sqlite3_create_collation_v2 db CASELESS caseless_cmp $cmd + set ::caseless_del +} {0} +do_test collate7-1.2 { + sqlite_delete_collation db CASELESS + set ::caseless_del +} {1} +do_test collate7-1.3 { + catchsql { + CREATE TABLE abc(a COLLATE CASELESS, b, c); + } +} {1 {no such collation sequence: CASELESS}} +do_test collate7-1.4 { + sqlite3_create_collation_v2 db CASELESS caseless_cmp {incr ::caseless_del} + db close + set ::caseless_del +} {2} + +do_test collate7-2.1 { + file delete -force test.db test.db-journal + sqlite3 db test.db + sqlite3_create_collation_v2 db CASELESS caseless_cmp {incr ::caseless_del} + execsql { + PRAGMA encoding='utf-16'; + CREATE TABLE abc16(a COLLATE CASELESS, b, c); + } db + set ::caseless_del +} {2} +do_test collate7-2.2 { + execsql { + SELECT * FROM abc16 WHERE a < 'abc'; + } + set ::caseless_del +} {2} +do_test collate7-2.3 { + sqlite_delete_collation db CASELESS + set ::caseless_del +} {3} +do_test collate7-2.4 { + catchsql { + SELECT * FROM abc16 WHERE a < 'abc'; + } +} {1 {no such collation sequence: CASELESS}} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/collate8.test b/libraries/sqlite/unix/sqlite-3.5.1/test/collate8.test new file mode 100644 index 0000000..3e90c38 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/collate8.test @@ -0,0 +1,52 @@ +# +# 2007 June 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is making sure collations pass through the +# unary + operator. +# +# $Id: collate8.test,v 1.1 2007/06/20 16:13:23 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test collate8-1.1 { + execsql { + CREATE TABLE t1(a TEXT COLLATE nocase); + INSERT INTO t1 VALUES('aaa'); + INSERT INTO t1 VALUES('BBB'); + INSERT INTO t1 VALUES('ccc'); + INSERT INTO t1 VALUES('DDD'); + SELECT a FROM t1 ORDER BY a; + } +} {aaa BBB ccc DDD} +do_test collate8-1.2 { + execsql { + SELECT rowid FROM t1 WHERE a<'ccc' ORDER BY 1 + } +} {1 2} +do_test collate8-1.3 { + execsql { + SELECT rowid FROM t1 WHERE a<'ccc' COLLATE binary ORDER BY 1 + } +} {1 2 4} +do_test collate8-1.4 { + execsql { + SELECT rowid FROM t1 WHERE +a<'ccc' ORDER BY 1 + } +} {1 2} +do_test collate8-1.5 { + execsql { + SELECT a FROM t1 ORDER BY +a + } +} {aaa BBB ccc DDD} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/colmeta.test b/libraries/sqlite/unix/sqlite-3.5.1/test/colmeta.test new file mode 100644 index 0000000..c2513e6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/colmeta.test @@ -0,0 +1,103 @@ +# +# 2006 February 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is the sqlite3_table_column_metadata() API. +# +# $Id: colmeta.test,v 1.3 2006/02/10 13:33:31 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !columnmetadata { + finish_test + return +} + +# Set up a schema in the main and temp test databases. +do_test colmeta-0 { + execsql { + CREATE TABLE abc(a, b, c); + CREATE TABLE abc2(a PRIMARY KEY COLLATE NOCASE, b VARCHAR(32), c); + CREATE TABLE abc3(a NOT NULL, b INTEGER PRIMARY KEY, c); + } + ifcapable autoinc { + execsql { + CREATE TABLE abc4(a, b INTEGER PRIMARY KEY AUTOINCREMENT, c); + } + } + ifcapable view { + execsql { + CREATE VIEW v1 AS SELECT * FROM abc2; + } + } +} {} + + +# Return values are of the form: +# +# { } +# +set tests { + 1 {main abc a} {0 {{} BINARY 0 0 0}} + 2 {{} abc a} {0 {{} BINARY 0 0 0}} + 3 {{} abc2 b} {0 {VARCHAR(32) BINARY 0 0 0}} + 4 {main abc2 b} {0 {VARCHAR(32) BINARY 0 0 0}} + 5 {{} abc2 a} {0 {{} NOCASE 0 1 0}} + 6 {{} abc3 a} {0 {{} BINARY 1 0 0}} + 7 {{} abc3 b} {0 {INTEGER BINARY 0 1 0}} + 13 {main abc rowid} {0 {INTEGER BINARY 0 1 0}} + 14 {main abc3 rowid} {0 {INTEGER BINARY 0 1 0}} + 16 {main abc d} {1 {no such table column: abc.d}} +} +ifcapable view { + set tests [concat $tests { + 8 {{} abc4 b} {0 {INTEGER BINARY 0 1 1}} + 15 {main abc4 rowid} {0 {INTEGER BINARY 0 1 1}} + }] +} +ifcapable view { + set tests [concat $tests { + 9 {{} v1 a} {1 {no such table column: v1.a}} + 10 {main v1 b} {1 {no such table column: v1.b}} + 11 {main v1 badname} {1 {no such table column: v1.badname}} + 12 {main v1 rowid} {1 {no such table column: v1.rowid}} + }] +} + +foreach {tn params results} $tests { + set ::DB [sqlite3_connection_pointer db] + + set tstbody [concat sqlite3_table_column_metadata $::DB $params] + do_test colmeta-$tn.1 { + list [catch $tstbody msg] [set msg] + } $results + + db close + sqlite3 db test.db + + set ::DB [sqlite3_connection_pointer db] + set tstbody [concat sqlite3_table_column_metadata $::DB $params] + do_test colmeta-$tn.2 { + list [catch $tstbody msg] [set msg] + } $results +} + +do_test colmeta-misuse.1 { + db close + set rc [catch { + sqlite3_table_column_metadata $::DB a b c + } msg] + list $rc $msg +} {1 {library routine called out of sequence}} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/conflict.test b/libraries/sqlite/unix/sqlite-3.5.1/test/conflict.test new file mode 100644 index 0000000..6bca1e8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/conflict.test @@ -0,0 +1,763 @@ +# 2002 January 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for the conflict resolution extension +# to SQLite. +# +# $Id: conflict.test,v 1.30 2007/08/21 14:27:02 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !conflict { + finish_test + return +} + +# Create tables for the first group of tests. +# +do_test conflict-1.0 { + execsql { + CREATE TABLE t1(a, b, c, UNIQUE(a,b)); + CREATE TABLE t2(x); + SELECT c FROM t1 ORDER BY c; + } +} {} + +# Six columns of configuration data as follows: +# +# i The reference number of the test +# cmd An INSERT or REPLACE command to execute against table t1 +# t0 True if there is an error from $cmd +# t1 Content of "c" column of t1 assuming no error in $cmd +# t2 Content of "x" column of t2 +# t3 Number of temporary files created by this test +# +foreach {i cmd t0 t1 t2 t3} { + 1 INSERT 1 {} 1 0 + 2 {INSERT OR IGNORE} 0 3 1 0 + 3 {INSERT OR REPLACE} 0 4 1 0 + 4 REPLACE 0 4 1 0 + 5 {INSERT OR FAIL} 1 {} 1 0 + 6 {INSERT OR ABORT} 1 {} 1 0 + 7 {INSERT OR ROLLBACK} 1 {} {} 0 +} { + do_test conflict-1.$i { + set ::sqlite_opentemp_count 0 + set r0 [catch {execsql [subst { + DELETE FROM t1; + DELETE FROM t2; + INSERT INTO t1 VALUES(1,2,3); + BEGIN; + INSERT INTO t2 VALUES(1); + $cmd INTO t1 VALUES(1,2,4); + }]} r1] + catch {execsql {COMMIT}} + if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]} + set r2 [execsql {SELECT x FROM t2}] + set r3 $::sqlite_opentemp_count + list $r0 $r1 $r2 $r3 + } [list $t0 $t1 $t2 $t3] +} + +# Create tables for the first group of tests. +# +do_test conflict-2.0 { + execsql { + DROP TABLE t1; + DROP TABLE t2; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, UNIQUE(a,b)); + CREATE TABLE t2(x); + SELECT c FROM t1 ORDER BY c; + } +} {} + +# Six columns of configuration data as follows: +# +# i The reference number of the test +# cmd An INSERT or REPLACE command to execute against table t1 +# t0 True if there is an error from $cmd +# t1 Content of "c" column of t1 assuming no error in $cmd +# t2 Content of "x" column of t2 +# +foreach {i cmd t0 t1 t2} { + 1 INSERT 1 {} 1 + 2 {INSERT OR IGNORE} 0 3 1 + 3 {INSERT OR REPLACE} 0 4 1 + 4 REPLACE 0 4 1 + 5 {INSERT OR FAIL} 1 {} 1 + 6 {INSERT OR ABORT} 1 {} 1 + 7 {INSERT OR ROLLBACK} 1 {} {} +} { + do_test conflict-2.$i { + set r0 [catch {execsql [subst { + DELETE FROM t1; + DELETE FROM t2; + INSERT INTO t1 VALUES(1,2,3); + BEGIN; + INSERT INTO t2 VALUES(1); + $cmd INTO t1 VALUES(1,2,4); + }]} r1] + catch {execsql {COMMIT}} + if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]} + set r2 [execsql {SELECT x FROM t2}] + list $r0 $r1 $r2 + } [list $t0 $t1 $t2] +} + +# Create tables for the first group of tests. +# +do_test conflict-3.0 { + execsql { + DROP TABLE t1; + DROP TABLE t2; + CREATE TABLE t1(a, b, c INTEGER, PRIMARY KEY(c), UNIQUE(a,b)); + CREATE TABLE t2(x); + SELECT c FROM t1 ORDER BY c; + } +} {} + +# Six columns of configuration data as follows: +# +# i The reference number of the test +# cmd An INSERT or REPLACE command to execute against table t1 +# t0 True if there is an error from $cmd +# t1 Content of "c" column of t1 assuming no error in $cmd +# t2 Content of "x" column of t2 +# +foreach {i cmd t0 t1 t2} { + 1 INSERT 1 {} 1 + 2 {INSERT OR IGNORE} 0 3 1 + 3 {INSERT OR REPLACE} 0 4 1 + 4 REPLACE 0 4 1 + 5 {INSERT OR FAIL} 1 {} 1 + 6 {INSERT OR ABORT} 1 {} 1 + 7 {INSERT OR ROLLBACK} 1 {} {} +} { + do_test conflict-3.$i { + set r0 [catch {execsql [subst { + DELETE FROM t1; + DELETE FROM t2; + INSERT INTO t1 VALUES(1,2,3); + BEGIN; + INSERT INTO t2 VALUES(1); + $cmd INTO t1 VALUES(1,2,4); + }]} r1] + catch {execsql {COMMIT}} + if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]} + set r2 [execsql {SELECT x FROM t2}] + list $r0 $r1 $r2 + } [list $t0 $t1 $t2] +} + +do_test conflict-4.0 { + execsql { + DROP TABLE t2; + CREATE TABLE t2(x); + SELECT x FROM t2; + } +} {} + +# Six columns of configuration data as follows: +# +# i The reference number of the test +# conf1 The conflict resolution algorithm on the UNIQUE constraint +# cmd An INSERT or REPLACE command to execute against table t1 +# t0 True if there is an error from $cmd +# t1 Content of "c" column of t1 assuming no error in $cmd +# t2 Content of "x" column of t2 +# +foreach {i conf1 cmd t0 t1 t2} { + 1 {} INSERT 1 {} 1 + 2 REPLACE INSERT 0 4 1 + 3 IGNORE INSERT 0 3 1 + 4 FAIL INSERT 1 {} 1 + 5 ABORT INSERT 1 {} 1 + 6 ROLLBACK INSERT 1 {} {} + 7 REPLACE {INSERT OR IGNORE} 0 3 1 + 8 IGNORE {INSERT OR REPLACE} 0 4 1 + 9 FAIL {INSERT OR IGNORE} 0 3 1 + 10 ABORT {INSERT OR REPLACE} 0 4 1 + 11 ROLLBACK {INSERT OR IGNORE } 0 3 1 +} { + do_test conflict-4.$i { + if {$conf1!=""} {set conf1 "ON CONFLICT $conf1"} + set r0 [catch {execsql [subst { + DROP TABLE t1; + CREATE TABLE t1(a,b,c,UNIQUE(a,b) $conf1); + DELETE FROM t2; + INSERT INTO t1 VALUES(1,2,3); + BEGIN; + INSERT INTO t2 VALUES(1); + $cmd INTO t1 VALUES(1,2,4); + }]} r1] + catch {execsql {COMMIT}} + if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]} + set r2 [execsql {SELECT x FROM t2}] + list $r0 $r1 $r2 + } [list $t0 $t1 $t2] +} + +do_test conflict-5.0 { + execsql { + DROP TABLE t2; + CREATE TABLE t2(x); + SELECT x FROM t2; + } +} {} + +# Six columns of configuration data as follows: +# +# i The reference number of the test +# conf1 The conflict resolution algorithm on the NOT NULL constraint +# cmd An INSERT or REPLACE command to execute against table t1 +# t0 True if there is an error from $cmd +# t1 Content of "c" column of t1 assuming no error in $cmd +# t2 Content of "x" column of t2 +# +foreach {i conf1 cmd t0 t1 t2} { + 1 {} INSERT 1 {} 1 + 2 REPLACE INSERT 0 5 1 + 3 IGNORE INSERT 0 {} 1 + 4 FAIL INSERT 1 {} 1 + 5 ABORT INSERT 1 {} 1 + 6 ROLLBACK INSERT 1 {} {} + 7 REPLACE {INSERT OR IGNORE} 0 {} 1 + 8 IGNORE {INSERT OR REPLACE} 0 5 1 + 9 FAIL {INSERT OR IGNORE} 0 {} 1 + 10 ABORT {INSERT OR REPLACE} 0 5 1 + 11 ROLLBACK {INSERT OR IGNORE} 0 {} 1 + 12 {} {INSERT OR IGNORE} 0 {} 1 + 13 {} {INSERT OR REPLACE} 0 5 1 + 14 {} {INSERT OR FAIL} 1 {} 1 + 15 {} {INSERT OR ABORT} 1 {} 1 + 16 {} {INSERT OR ROLLBACK} 1 {} {} +} { + if {$t0} {set t1 {t1.c may not be NULL}} + do_test conflict-5.$i { + if {$conf1!=""} {set conf1 "ON CONFLICT $conf1"} + set r0 [catch {execsql [subst { + DROP TABLE t1; + CREATE TABLE t1(a,b,c NOT NULL $conf1 DEFAULT 5); + DELETE FROM t2; + BEGIN; + INSERT INTO t2 VALUES(1); + $cmd INTO t1 VALUES(1,2,NULL); + }]} r1] + catch {execsql {COMMIT}} + if {!$r0} {set r1 [execsql {SELECT c FROM t1}]} + set r2 [execsql {SELECT x FROM t2}] + list $r0 $r1 $r2 + } [list $t0 $t1 $t2] +} + +do_test conflict-6.0 { + execsql { + DROP TABLE t2; + CREATE TABLE t2(a,b,c); + INSERT INTO t2 VALUES(1,2,1); + INSERT INTO t2 VALUES(2,3,2); + INSERT INTO t2 VALUES(3,4,1); + INSERT INTO t2 VALUES(4,5,4); + SELECT c FROM t2 ORDER BY b; + CREATE TABLE t3(x); + INSERT INTO t3 VALUES(1); + } +} {1 2 1 4} + +# Six columns of configuration data as follows: +# +# i The reference number of the test +# conf1 The conflict resolution algorithm on the UNIQUE constraint +# cmd An UPDATE command to execute against table t1 +# t0 True if there is an error from $cmd +# t1 Content of "b" column of t1 assuming no error in $cmd +# t2 Content of "x" column of t3 +# t3 Number of temporary files for tables +# t4 Number of temporary files for statement journals +# +# Update: Since temporary table files are now opened lazily, and none +# of the following tests use large quantities of data, t3 is always 0. +# +foreach {i conf1 cmd t0 t1 t2 t3 t4} { + 1 {} UPDATE 1 {6 7 8 9} 1 0 1 + 2 REPLACE UPDATE 0 {7 6 9} 1 0 0 + 3 IGNORE UPDATE 0 {6 7 3 9} 1 0 0 + 4 FAIL UPDATE 1 {6 7 3 4} 1 0 0 + 5 ABORT UPDATE 1 {1 2 3 4} 1 0 1 + 6 ROLLBACK UPDATE 1 {1 2 3 4} 0 0 0 + 7 REPLACE {UPDATE OR IGNORE} 0 {6 7 3 9} 1 0 0 + 8 IGNORE {UPDATE OR REPLACE} 0 {7 6 9} 1 0 0 + 9 FAIL {UPDATE OR IGNORE} 0 {6 7 3 9} 1 0 0 + 10 ABORT {UPDATE OR REPLACE} 0 {7 6 9} 1 0 0 + 11 ROLLBACK {UPDATE OR IGNORE} 0 {6 7 3 9} 1 0 0 + 12 {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1 0 0 + 13 {} {UPDATE OR REPLACE} 0 {7 6 9} 1 0 0 + 14 {} {UPDATE OR FAIL} 1 {6 7 3 4} 1 0 0 + 15 {} {UPDATE OR ABORT} 1 {1 2 3 4} 1 0 1 + 16 {} {UPDATE OR ROLLBACK} 1 {1 2 3 4} 0 0 0 +} { + if {$t0} {set t1 {column a is not unique}} + if {[info exists TEMP_STORE] && $TEMP_STORE>=2} { + set t3 $t4 + } else { + set t3 [expr {$t3+$t4}] + } + do_test conflict-6.$i { + db close + sqlite3 db test.db + if {$conf1!=""} {set conf1 "ON CONFLICT $conf1"} + execsql {pragma temp_store=file} + set ::sqlite_opentemp_count 0 + set r0 [catch {execsql [subst { + DROP TABLE t1; + CREATE TABLE t1(a,b,c, UNIQUE(a) $conf1); + INSERT INTO t1 SELECT * FROM t2; + UPDATE t3 SET x=0; + BEGIN; + $cmd t3 SET x=1; + $cmd t1 SET b=b*2; + $cmd t1 SET a=c+5; + }]} r1] + catch {execsql {COMMIT}} + if {!$r0} {set r1 [execsql {SELECT a FROM t1 ORDER BY b}]} + set r2 [execsql {SELECT x FROM t3}] + list $r0 $r1 $r2 $::sqlite_opentemp_count + } [list $t0 $t1 $t2 $t3] +} + +# Test to make sure a lot of IGNOREs don't cause a stack overflow +# +do_test conflict-7.1 { + execsql { + DROP TABLE t1; + DROP TABLE t2; + DROP TABLE t3; + CREATE TABLE t1(a unique, b); + } + for {set i 1} {$i<=50} {incr i} { + execsql "INSERT into t1 values($i,[expr {$i+1}]);" + } + execsql { + SELECT count(*), min(a), max(b) FROM t1; + } +} {50 1 51} +do_test conflict-7.2 { + execsql { + PRAGMA count_changes=on; + UPDATE OR IGNORE t1 SET a=1000; + } +} {1} +do_test conflict-7.2.1 { + db changes +} {1} +do_test conflict-7.3 { + execsql { + SELECT b FROM t1 WHERE a=1000; + } +} {2} +do_test conflict-7.4 { + execsql { + SELECT count(*) FROM t1; + } +} {50} +do_test conflict-7.5 { + execsql { + PRAGMA count_changes=on; + UPDATE OR REPLACE t1 SET a=1001; + } +} {50} +do_test conflict-7.5.1 { + db changes +} {50} +do_test conflict-7.6 { + execsql { + SELECT b FROM t1 WHERE a=1001; + } +} {51} +do_test conflict-7.7 { + execsql { + SELECT count(*) FROM t1; + } +} {1} + +# Update for version 3: A SELECT statement no longer resets the change +# counter (Test result changes from 0 to 50). +do_test conflict-7.7.1 { + db changes +} {50} + +# Make sure the row count is right for rows that are ignored on +# an insert. +# +do_test conflict-8.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2); + } + execsql { + INSERT OR IGNORE INTO t1 VALUES(2,3); + } +} {1} +do_test conflict-8.1.1 { + db changes +} {1} +do_test conflict-8.2 { + execsql { + INSERT OR IGNORE INTO t1 VALUES(2,4); + } +} {0} +do_test conflict-8.2.1 { + db changes +} {0} +do_test conflict-8.3 { + execsql { + INSERT OR REPLACE INTO t1 VALUES(2,4); + } +} {1} +do_test conflict-8.3.1 { + db changes +} {1} +do_test conflict-8.4 { + execsql { + INSERT OR IGNORE INTO t1 SELECT * FROM t1; + } +} {0} +do_test conflict-8.4.1 { + db changes +} {0} +do_test conflict-8.5 { + execsql { + INSERT OR IGNORE INTO t1 SELECT a+2,b+2 FROM t1; + } +} {2} +do_test conflict-8.5.1 { + db changes +} {2} +do_test conflict-8.6 { + execsql { + INSERT OR IGNORE INTO t1 SELECT a+3,b+3 FROM t1; + } +} {3} +do_test conflict-8.6.1 { + db changes +} {3} + +integrity_check conflict-8.99 + +do_test conflict-9.1 { + execsql { + PRAGMA count_changes=0; + CREATE TABLE t2( + a INTEGER UNIQUE ON CONFLICT IGNORE, + b INTEGER UNIQUE ON CONFLICT FAIL, + c INTEGER UNIQUE ON CONFLICT REPLACE, + d INTEGER UNIQUE ON CONFLICT ABORT, + e INTEGER UNIQUE ON CONFLICT ROLLBACK + ); + CREATE TABLE t3(x); + INSERT INTO t3 VALUES(1); + SELECT * FROM t3; + } +} {1} +do_test conflict-9.2 { + catchsql { + INSERT INTO t2 VALUES(1,1,1,1,1); + INSERT INTO t2 VALUES(2,2,2,2,2); + SELECT * FROM t2; + } +} {0 {1 1 1 1 1 2 2 2 2 2}} +do_test conflict-9.3 { + catchsql { + INSERT INTO t2 VALUES(1,3,3,3,3); + SELECT * FROM t2; + } +} {0 {1 1 1 1 1 2 2 2 2 2}} +do_test conflict-9.4 { + catchsql { + UPDATE t2 SET a=a+1 WHERE a=1; + SELECT * FROM t2; + } +} {0 {1 1 1 1 1 2 2 2 2 2}} +do_test conflict-9.5 { + catchsql { + INSERT INTO t2 VALUES(3,1,3,3,3); + SELECT * FROM t2; + } +} {1 {column b is not unique}} +do_test conflict-9.6 { + catchsql { + UPDATE t2 SET b=b+1 WHERE b=1; + SELECT * FROM t2; + } +} {1 {column b is not unique}} +do_test conflict-9.7 { + catchsql { + BEGIN; + UPDATE t3 SET x=x+1; + INSERT INTO t2 VALUES(3,1,3,3,3); + SELECT * FROM t2; + } +} {1 {column b is not unique}} +do_test conflict-9.8 { + execsql {COMMIT} + execsql {SELECT * FROM t3} +} {2} +do_test conflict-9.9 { + catchsql { + BEGIN; + UPDATE t3 SET x=x+1; + UPDATE t2 SET b=b+1 WHERE b=1; + SELECT * FROM t2; + } +} {1 {column b is not unique}} +do_test conflict-9.10 { + execsql {COMMIT} + execsql {SELECT * FROM t3} +} {3} +do_test conflict-9.11 { + catchsql { + INSERT INTO t2 VALUES(3,3,3,1,3); + SELECT * FROM t2; + } +} {1 {column d is not unique}} +do_test conflict-9.12 { + catchsql { + UPDATE t2 SET d=d+1 WHERE d=1; + SELECT * FROM t2; + } +} {1 {column d is not unique}} +do_test conflict-9.13 { + catchsql { + BEGIN; + UPDATE t3 SET x=x+1; + INSERT INTO t2 VALUES(3,3,3,1,3); + SELECT * FROM t2; + } +} {1 {column d is not unique}} +do_test conflict-9.14 { + execsql {COMMIT} + execsql {SELECT * FROM t3} +} {4} +do_test conflict-9.15 { + catchsql { + BEGIN; + UPDATE t3 SET x=x+1; + UPDATE t2 SET d=d+1 WHERE d=1; + SELECT * FROM t2; + } +} {1 {column d is not unique}} +do_test conflict-9.16 { + execsql {COMMIT} + execsql {SELECT * FROM t3} +} {5} +do_test conflict-9.17 { + catchsql { + INSERT INTO t2 VALUES(3,3,3,3,1); + SELECT * FROM t2; + } +} {1 {column e is not unique}} +do_test conflict-9.18 { + catchsql { + UPDATE t2 SET e=e+1 WHERE e=1; + SELECT * FROM t2; + } +} {1 {column e is not unique}} +do_test conflict-9.19 { + catchsql { + BEGIN; + UPDATE t3 SET x=x+1; + INSERT INTO t2 VALUES(3,3,3,3,1); + SELECT * FROM t2; + } +} {1 {column e is not unique}} +do_test conflict-9.20 { + catch {execsql {COMMIT}} + execsql {SELECT * FROM t3} +} {5} +do_test conflict-9.21 { + catchsql { + BEGIN; + UPDATE t3 SET x=x+1; + UPDATE t2 SET e=e+1 WHERE e=1; + SELECT * FROM t2; + } +} {1 {column e is not unique}} +do_test conflict-9.22 { + catch {execsql {COMMIT}} + execsql {SELECT * FROM t3} +} {5} +do_test conflict-9.23 { + catchsql { + INSERT INTO t2 VALUES(3,3,1,3,3); + SELECT * FROM t2; + } +} {0 {2 2 2 2 2 3 3 1 3 3}} +do_test conflict-9.24 { + catchsql { + UPDATE t2 SET c=c-1 WHERE c=2; + SELECT * FROM t2; + } +} {0 {2 2 1 2 2}} +do_test conflict-9.25 { + catchsql { + BEGIN; + UPDATE t3 SET x=x+1; + INSERT INTO t2 VALUES(3,3,1,3,3); + SELECT * FROM t2; + } +} {0 {3 3 1 3 3}} +do_test conflict-9.26 { + catch {execsql {COMMIT}} + execsql {SELECT * FROM t3} +} {6} + +do_test conflict-10.1 { + catchsql { + DELETE FROM t1; + BEGIN; + INSERT OR ROLLBACK INTO t1 VALUES(1,2); + INSERT OR ROLLBACK INTO t1 VALUES(1,3); + COMMIT; + } + execsql {SELECT * FROM t1} +} {} +do_test conflict-10.2 { + catchsql { + CREATE TABLE t4(x); + CREATE UNIQUE INDEX t4x ON t4(x); + BEGIN; + INSERT OR ROLLBACK INTO t4 VALUES(1); + INSERT OR ROLLBACK INTO t4 VALUES(1); + COMMIT; + } + execsql {SELECT * FROM t4} +} {} + +# Ticket #1171. Make sure statement rollbacks do not +# damage the database. +# +do_test conflict-11.1 { + execsql { + -- Create a database object (pages 2, 3 of the file) + BEGIN; + CREATE TABLE abc(a UNIQUE, b, c); + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, 6); + INSERT INTO abc VALUES(7, 8, 9); + COMMIT; + } + + + # Set a small cache size so that changes will spill into + # the database file. + execsql { + PRAGMA cache_size = 10; + } + + # Make lots of changes. Because of the small cache, some + # (most?) of these changes will spill into the disk file. + # In other words, some of the changes will not be held in + # cache. + # + execsql { + BEGIN; + -- Make sure the pager is in EXCLUSIVE state. + CREATE TABLE def(d, e, f); + INSERT INTO def VALUES + ('xxxxxxxxxxxxxxx', 'yyyyyyyyyyyyyyyy', 'zzzzzzzzzzzzzzzz'); + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + DELETE FROM abc WHERE a = 4; + } + + # Execute a statement that does a statement rollback due to + # a constraint failure. + # + catchsql { + INSERT INTO abc SELECT 10, 20, 30 FROM def; + } + + # Rollback the database. Verify that the state of the ABC table + # is unchanged from the beginning of the transaction. In other words, + # make sure the DELETE on table ABC that occurred within the transaction + # had no effect. + # + execsql { + ROLLBACK; + SELECT * FROM abc; + } +} {1 2 3 4 5 6 7 8 9} +integrity_check conflict-11.2 + +# Repeat test conflict-11.1 but this time commit. +# +do_test conflict-11.3 { + execsql { + BEGIN; + -- Make sure the pager is in EXCLUSIVE state. + UPDATE abc SET a=a+1; + CREATE TABLE def(d, e, f); + INSERT INTO def VALUES + ('xxxxxxxxxxxxxxx', 'yyyyyyyyyyyyyyyy', 'zzzzzzzzzzzzzzzz'); + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + DELETE FROM abc WHERE a = 4; + } + catchsql { + INSERT INTO abc SELECT 10, 20, 30 FROM def; + } + execsql { + ROLLBACK; + SELECT * FROM abc; + } +} {1 2 3 4 5 6 7 8 9} +# Repeat test conflict-11.1 but this time commit. +# +do_test conflict-11.5 { + execsql { + BEGIN; + -- Make sure the pager is in EXCLUSIVE state. + CREATE TABLE def(d, e, f); + INSERT INTO def VALUES + ('xxxxxxxxxxxxxxx', 'yyyyyyyyyyyyyyyy', 'zzzzzzzzzzzzzzzz'); + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + INSERT INTO def SELECT * FROM def; + DELETE FROM abc WHERE a = 4; + } + catchsql { + INSERT INTO abc SELECT 10, 20, 30 FROM def; + } + execsql { + COMMIT; + SELECT * FROM abc; + } +} {1 2 3 7 8 9} +integrity_check conflict-11.6 + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt.test b/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt.test new file mode 100644 index 0000000..be69d5c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt.test @@ -0,0 +1,169 @@ +# 2004 August 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. +# +# $Id: corrupt.test,v 1.8 2005/02/19 08:18:06 danielk1977 Exp $ + +catch {file delete -force test.db} +catch {file delete -force test.db-journal} + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Construct a large database for testing. +# +do_test corrupt-1.1 { + execsql { + BEGIN; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(randstr(100,100)); + INSERT INTO t1 VALUES(randstr(90,90)); + INSERT INTO t1 VALUES(randstr(80,80)); + INSERT INTO t1 SELECT x || randstr(5,5) FROM t1; + INSERT INTO t1 SELECT x || randstr(6,6) FROM t1; + INSERT INTO t1 SELECT x || randstr(7,7) FROM t1; + INSERT INTO t1 SELECT x || randstr(8,8) FROM t1; + INSERT INTO t1 VALUES(randstr(3000,3000)); + INSERT INTO t1 SELECT x || randstr(9,9) FROM t1; + INSERT INTO t1 SELECT x || randstr(10,10) FROM t1; + INSERT INTO t1 SELECT x || randstr(11,11) FROM t1; + INSERT INTO t1 SELECT x || randstr(12,12) FROM t1; + CREATE INDEX t1i1 ON t1(x); + CREATE TABLE t2 AS SELECT * FROM t1; + DELETE FROM t2 WHERE rowid%5!=0; + COMMIT; + } +} {} +integrity_check corrupt-1.2 + +# Copy file $from into $to +# +proc copy_file {from to} { + set f [open $from] + fconfigure $f -translation binary + set t [open $to w] + fconfigure $t -translation binary + puts -nonewline $t [read $f [file size $from]] + close $t + close $f +} + +# Setup for the tests. Make a backup copy of the good database in test.bu. +# Create a string of garbage data that is 256 bytes long. +# +copy_file test.db test.bu +set fsize [file size test.db] +set junk "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" +while {[string length $junk]<256} {append junk $junk} +set junk [string range $junk 0 255] + +# Go through the database and write garbage data into each 256 segment +# of the file. Then do various operations on the file to make sure that +# the database engine can recover gracefully from the corruption. +# +for {set i [expr {1*256}]} {$i<$fsize-256} {incr i 256} { + set tn [expr {$i/256}] + db close + copy_file test.bu test.db + set fd [open test.db r+] + fconfigure $fd -translation binary + seek $fd $i + puts -nonewline $fd $junk + close $fd + do_test corrupt-2.$tn.1 { + sqlite3 db test.db + catchsql {SELECT count(*) FROM sqlite_master} + set x {} + } {} + do_test corrupt-2.$tn.2 { + catchsql {SELECT count(*) FROM t1} + set x {} + } {} + do_test corrupt-2.$tn.3 { + catchsql {SELECT count(*) FROM t1 WHERE x>'abcdef'} + set x {} + } {} + do_test corrupt-2.$tn.4 { + catchsql {SELECT count(*) FROM t2} + set x {} + } {} + do_test corrupt-2.$tn.5 { + catchsql {CREATE TABLE t3 AS SELECT * FROM t1} + set x {} + } {} + do_test corrupt-2.$tn.6 { + catchsql {DROP TABLE t1} + set x {} + } {} + do_test corrupt-2.$tn.7 { + catchsql {PRAGMA integrity_check} + set x {} + } {} +} + +#------------------------------------------------------------------------ +# For these tests, swap the rootpage entries of t1 (a table) and t1i1 (an +# index on t1) in sqlite_master. Then perform a few different queries +# and make sure this is detected as corruption. +# +do_test corrupt-3.1 { + db close + copy_file test.bu test.db + sqlite3 db test.db + list +} {} +do_test corrupt-3.2 { + set t1_r [execsql {SELECT rootpage FROM sqlite_master WHERE name = 't1i1'}] + set t1i1_r [execsql {SELECT rootpage FROM sqlite_master WHERE name = 't1'}] + set cookie [expr [execsql {PRAGMA schema_version}] + 1] + execsql " + PRAGMA writable_schema = 1; + UPDATE sqlite_master SET rootpage = $t1_r WHERE name = 't1'; + UPDATE sqlite_master SET rootpage = $t1i1_r WHERE name = 't1i1'; + PRAGMA writable_schema = 0; + PRAGMA schema_version = $cookie; + " +} {} + +# This one tests the case caught by code in checkin [2313]. +do_test corrupt-3.3 { + db close + sqlite3 db test.db + catchsql { + INSERT INTO t1 VALUES('abc'); + } +} {1 {database disk image is malformed}} +do_test corrupt-3.4 { + db close + sqlite3 db test.db + catchsql { + SELECT * FROM t1; + } +} {1 {database disk image is malformed}} +do_test corrupt-3.5 { + db close + sqlite3 db test.db + catchsql { + SELECT * FROM t1 WHERE oid = 10; + } +} {1 {database disk image is malformed}} +do_test corrupt-3.6 { + db close + sqlite3 db test.db + catchsql { + SELECT * FROM t1 WHERE x = 'abcde'; + } +} {1 {database disk image is malformed}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt2.test new file mode 100644 index 0000000..cba53f6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt2.test @@ -0,0 +1,135 @@ +# 2004 August 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. +# +# $Id: corrupt2.test,v 1.4 2007/03/13 16:32:25 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# The following tests - corrupt2-1.* - create some databases corrupted in +# specific ways and ensure that SQLite detects them as corrupt. +# +do_test corrupt2-1.1 { + execsql { + CREATE TABLE abc(a, b, c); + } +} {} + +do_test corrupt2-1.2 { + + # Corrupt the 16 byte magic string at the start of the file + file delete -force corrupt.db + file delete -force corrupt.db-journal + copy_file test.db corrupt.db + set f [open corrupt.db RDWR] + seek $f 8 start + puts $f blah + close $f + + sqlite3 db2 corrupt.db + catchsql { + SELECT * FROM sqlite_master; + } db2 +} {1 {file is encrypted or is not a database}} + +do_test corrupt2-1.3 { + db2 close + + # Corrupt the page-size (bytes 16 and 17 of page 1). + file delete -force corrupt.db + file delete -force corrupt.db-journal + copy_file test.db corrupt.db + set f [open corrupt.db RDWR] + fconfigure $f -encoding binary + seek $f 16 start + puts -nonewline $f "\x00\xFF" + close $f + + sqlite3 db2 corrupt.db + catchsql { + SELECT * FROM sqlite_master; + } db2 +} {1 {file is encrypted or is not a database}} + +do_test corrupt2-1.4 { + db2 close + + # Corrupt the free-block list on page 1. + file delete -force corrupt.db + file delete -force corrupt.db-journal + copy_file test.db corrupt.db + set f [open corrupt.db RDWR] + fconfigure $f -encoding binary + seek $f 101 start + puts -nonewline $f "\xFF\xFF" + close $f + + sqlite3 db2 corrupt.db + catchsql { + SELECT * FROM sqlite_master; + } db2 +} {1 {database disk image is malformed}} + +do_test corrupt2-1.5 { + db2 close + + # Corrupt the free-block list on page 1. + file delete -force corrupt.db + file delete -force corrupt.db-journal + copy_file test.db corrupt.db + set f [open corrupt.db RDWR] + fconfigure $f -encoding binary + seek $f 101 start + puts -nonewline $f "\x00\xC8" + seek $f 200 start + puts -nonewline $f "\x00\x00" + puts -nonewline $f "\x10\x00" + close $f + + sqlite3 db2 corrupt.db + catchsql { + SELECT * FROM sqlite_master; + } db2 +} {1 {database disk image is malformed}} +db2 close + +# Corrupt a database by having 2 indices of the same name: +do_test corrupt2-2.1 { + + file delete -force corrupt.db + file delete -force corrupt.db-journal + copy_file test.db corrupt.db + + sqlite3 db2 corrupt.db + execsql { + CREATE INDEX a1 ON abc(a); + CREATE INDEX a2 ON abc(b); + PRAGMA writable_schema = 1; + UPDATE sqlite_master + SET name = 'a3', sql = 'CREATE INDEX a3' || substr(sql, 16, 10000) + WHERE type = 'index'; + PRAGMA writable_schema = 0; + } db2 + + db2 close + sqlite3 db2 corrupt.db + catchsql { + SELECT * FROM sqlite_master; + } db2 +} {1 {malformed database schema - index a3 already exists}} + +db2 close + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt3.test new file mode 100644 index 0000000..af69f2e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt3.test @@ -0,0 +1,109 @@ +# 2007 April 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. +# +# $Id: corrupt3.test,v 1.2 2007/04/06 21:42:22 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# We must have the page_size pragma for these tests to work. +# +ifcapable !pager_pragmas { + finish_test + return +} + +# Create a database with an overflow page. +# +do_test corrupt3-1.1 { + set bigstring [string repeat 0123456789 200] + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=1024; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES($bigstring); + } + file size test.db +} [expr {1024*3}] + +# Verify that the file format is as we expect. The page size +# should be 1024 bytes. The only record should have a single +# overflow page. The overflow page is page 3. The pointer to +# the overflow page is on the last 4 bytes of page 2. +# +do_test corrupt3-1.2 { + hexio_get_int [hexio_read test.db 16 2] +} 1024 ;# The page size is 1024 +do_test corrupt3-1.3 { + hexio_get_int [hexio_read test.db 20 1] +} 0 ;# Unused bytes per page is 0 +do_test corrupt3-1.4 { + hexio_get_int [hexio_read test.db 2044 4] +} 3 ;# Overflow page is 3 +do_test corrupt3-1.5 { + hexio_get_int [hexio_read test.db 2048 4] +} 0 ;# First chained overflow is 0 + +integrity_check corrupt3-1.6 + +# Make the overflow chain loop back on itself. See if the +# corruption is detected. (Actually, the last pointer in +# an overflow chain is ignored, so this is not an error.) +# +do_test corrupt3-1.7 { + db close + hexio_write test.db 2048 [hexio_render_int32 3] + sqlite3 db test.db + catchsql { + SELECT x FROM t1 + } +} [list 0 $bigstring] +integrity_check corrupt3-1.8 + +# Change the pointer for the first page of the overflow +# change to be a non-existant page. +# +do_test corrupt3-1.9 { + db close + hexio_write test.db 2044 [hexio_render_int32 4] + sqlite3 db test.db + catchsql { + SELECT substr(x,1,10) FROM t1 + } +} [list 0 0123456789] +do_test corrupt3-1.10 { + catchsql { + PRAGMA integrity_check + } +} {0 {{*** in database main *** +On tree page 2 cell 0: invalid page number 4 +Page 3 is never used}}} +do_test corrupt3-1.11 { + db close + hexio_write test.db 2044 [hexio_render_int32 0] + sqlite3 db test.db + catchsql { + SELECT substr(x,1,10) FROM t1 + } +} [list 1 {database disk image is malformed}] +do_test corrupt3-1.12 { + catchsql { + PRAGMA integrity_check + } +} {0 {{*** in database main *** +On tree page 2 cell 0: 1 of 1 pages missing from overflow list starting at 0 +Page 3 is never used}}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt4.test new file mode 100644 index 0000000..952df70 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/corrupt4.test @@ -0,0 +1,74 @@ +# 2007 Sept 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. +# +# $Id: corrupt4.test,v 1.1 2007/09/07 14:32:07 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# We must have the page_size pragma for these tests to work. +# +ifcapable !pager_pragmas { + finish_test + return +} + +# Create a database with a freelist containing at least two pages. +# +do_test corrupt4-1.1 { + set bigstring [string repeat 0123456789 200] + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=1024; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES($bigstring); + CREATE TABLE t2(y); + INSERT INTO t2 VALUES(1); + DROP TABLE t1; + } + file size test.db +} [expr {1024*4}] + +# Verify that there are two pages on the freelist. +# +do_test corrupt4-1.2 { + execsql {PRAGMA freelist_count} +} {2} + +# Get the page number for the trunk of the freelist. +# +set trunkpgno [hexio_get_int [hexio_read test.db 32 4]] +set baseaddr [expr {($trunkpgno-1)*1024}] + +# Verify that the trunk of the freelist has exactly one +# leaf. +# +do_test corrupt4-1.3 { + hexio_get_int [hexio_read test.db [expr {$::baseaddr+4}] 4] +} {1} + +# Insert a negative number as the number of leaves on the trunk. +# Then try to add a new element to the freelist. +# +do_test corrupt4-1.4 { + hexio_write test.db [expr {$::baseaddr+4}] [hexio_render_int32 -100000000] + db close + sqlite3 db test.db + catchsql { + DROP TABLE t2 + } +} {1 {database disk image is malformed}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/crash.test b/libraries/sqlite/unix/sqlite-3.5.1/test/crash.test new file mode 100644 index 0000000..0d58f84 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/crash.test @@ -0,0 +1,403 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# The focus of this file is testing the ability of the database to +# uses its rollback journal to recover intact (no database corruption) +# from a power failure during the middle of a COMMIT. The OS interface +# modules are overloaded using the modified I/O routines found in test6.c. +# These routines allow us to simulate the kind of file damage that +# occurs after a power failure. +# +# $Id: crash.test,v 1.25 2007/08/20 14:23:44 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !crashtest { + finish_test + return +} + +set repeats 100 +#set repeats 10 + +# The following procedure computes a "signature" for table "abc". If +# abc changes in any way, the signature should change. +proc signature {} { + return [db eval {SELECT count(*), md5sum(a), md5sum(b), md5sum(c) FROM abc}] +} +proc signature2 {} { + return [db eval {SELECT count(*), md5sum(a), md5sum(b), md5sum(c) FROM abc2}] +} + +#-------------------------------------------------------------------------- +# Simple crash test: +# +# crash-1.1: Create a database with a table with two rows. +# crash-1.2: Run a 'DELETE FROM abc WHERE a = 1' that crashes during +# the first journal-sync. +# crash-1.3: Ensure the database is in the same state as after crash-1.1. +# crash-1.4: Run a 'DELETE FROM abc WHERE a = 1' that crashes during +# the first database-sync. +# crash-1.5: Ensure the database is in the same state as after crash-1.1. +# crash-1.6: Run a 'DELETE FROM abc WHERE a = 1' that crashes during +# the second journal-sync. +# crash-1.7: Ensure the database is in the same state as after crash-1.1. +# +# Tests 1.8 through 1.11 test for crashes on the third journal sync and +# second database sync. Neither of these is required in such a small test +# case, so these tests are just to verify that the test infrastructure +# operates as expected. +# +do_test crash-1.1 { + execsql { + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, 6); + } + set ::sig [signature] + expr 0 +} {0} +do_test crash-1.2 { + crashsql -delay 1 -file test.db-journal { + DELETE FROM abc WHERE a = 1; + } +} {1 {child process exited abnormally}} +do_test crash-1.3 { + signature +} $::sig +do_test crash-1.4 { + crashsql -delay 1 -file test.db { + DELETE FROM abc WHERE a = 1; + } +} {1 {child process exited abnormally}} +do_test crash-1.5 { + signature +} $::sig +do_test crash-1.6 { + crashsql -delay 2 -file test.db-journal { + DELETE FROM abc WHERE a = 1; + } +} {1 {child process exited abnormally}} +do_test crash-1.7 { + catchsql { + SELECT * FROM abc; + } +} {0 {1 2 3 4 5 6}} + +do_test crash-1.8 { + crashsql -delay 3 -file test.db-journal { + DELETE FROM abc WHERE a = 1; + } +} {0 {}} +do_test crash-1.9 { + catchsql { + SELECT * FROM abc; + } +} {0 {4 5 6}} +do_test crash-1.10 { + crashsql -delay 2 -file test.db { + DELETE FROM abc WHERE a = 4; + } +} {0 {}} +do_test crash-1.11 { + catchsql { + SELECT * FROM abc; + } +} {0 {}} + +#-------------------------------------------------------------------------- +# The following tests test recovery when both the database file and the the +# journal file contain corrupt data. This can happen after pages are +# written to the database file before a transaction is committed due to +# cache-pressure. +# +# crash-2.1: Insert 18 pages of data into the database. +# crash-2.2: Check the database file size looks ok. +# crash-2.3: Delete 15 or so pages (with a 10 page page-cache), then crash. +# crash-2.4: Ensure the database is in the same state as after crash-2.1. +# +# Test cases crash-2.5 and crash-2.6 check that the database is OK if the +# crash occurs during the main database file sync. But this isn't really +# different from the crash-1.* cases. +# +do_test crash-2.1 { + execsql { BEGIN } + for {set n 0} {$n < 1000} {incr n} { + execsql "INSERT INTO abc VALUES($n, [expr 2*$n], [expr 3*$n])" + } + execsql { COMMIT } + set ::sig [signature] + execsql { SELECT sum(a), sum(b), sum(c) from abc } +} {499500 999000 1498500} +do_test crash-2.2 { + expr ([file size test.db] / 1024)>16 +} {1} +do_test crash-2.3 { + crashsql -delay 2 -file test.db-journal { + DELETE FROM abc WHERE a < 800; + } +} {1 {child process exited abnormally}} +do_test crash-2.4 { + signature +} $sig +do_test crash-2.5 { + crashsql -delay 1 -file test.db { + DELETE FROM abc WHERE a<800; + } +} {1 {child process exited abnormally}} +do_test crash-2.6 { + signature +} $sig + +#-------------------------------------------------------------------------- +# The crash-3.* test cases are essentially the same test as test case +# crash-2.*, but with a more complicated data set. +# +# The test is repeated a few times with different seeds for the random +# number generator in the crashing executable. Because there is no way to +# seed the random number generator directly, some SQL is added to the test +# case to 'use up' a different quantity random numbers before the test SQL +# is executed. +# + +# Make sure the file is much bigger than the pager-cache (10 pages). This +# ensures that cache-spills happen regularly. +do_test crash-3.0 { + execsql { + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + } + expr ([file size test.db] / 1024) > 450 +} {1} +for {set i 1} {$i < $repeats} {incr i} { + set sig [signature] + do_test crash-3.$i.1 { + crashsql -delay [expr $i%5 + 1] -file test.db-journal " + BEGIN; + SELECT random() FROM abc LIMIT $i; + INSERT INTO abc VALUES(randstr(10,10), 0, 0); + DELETE FROM abc WHERE random()%10!=0; + COMMIT; + " + } {1 {child process exited abnormally}} + do_test crash-3.$i.2 { + signature + } $sig +} + +#-------------------------------------------------------------------------- +# The following test cases - crash-4.* - test the correct recovery of the +# database when a crash occurs during a multi-file transaction. +# +# crash-4.1.*: Test recovery when crash occurs during sync() of the +# main database journal file. +# crash-4.2.*: Test recovery when crash occurs during sync() of an +# attached database journal file. +# crash-4.3.*: Test recovery when crash occurs during sync() of the master +# journal file. +# +do_test crash-4.0 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA aux.default_cache_size = 10; + CREATE TABLE aux.abc2 AS SELECT 2*a as a, 2*b as b, 2*c as c FROM abc; + } + expr ([file size test2.db] / 1024) > 450 +} {1} + +set fin 0 +for {set i 1} {$i<$repeats} {incr i} { + set sig [signature] + set sig2 [signature2] + do_test crash-4.1.$i.1 { + set c [crashsql -delay $i -file test.db-journal " + ATTACH 'test2.db' AS aux; + BEGIN; + SELECT randstr($i,$i) FROM abc LIMIT $i; + INSERT INTO abc VALUES(randstr(10,10), 0, 0); + DELETE FROM abc WHERE random()%10!=0; + INSERT INTO abc2 VALUES(randstr(10,10), 0, 0); + DELETE FROM abc2 WHERE random()%10!=0; + COMMIT; + "] + if { $c == {0 {}} } { + set ::fin 1 + set c {1 {child process exited abnormally}} + } + set c + } {1 {child process exited abnormally}} + if {$::fin} break + do_test crash-4.1.$i.2 { + signature + } $sig + do_test crash-4.1.$i.3 { + signature2 + } $sig2 +} +set i 0 +set fin 0 +while {[incr i]} { + set sig [signature] + set sig2 [signature2] + set ::fin 0 + do_test crash-4.2.$i.1 { + set c [crashsql -delay $i -file test2.db-journal " + ATTACH 'test2.db' AS aux; + BEGIN; + SELECT randstr($i,$i) FROM abc LIMIT $i; + INSERT INTO abc VALUES(randstr(10,10), 0, 0); + DELETE FROM abc WHERE random()%10!=0; + INSERT INTO abc2 VALUES(randstr(10,10), 0, 0); + DELETE FROM abc2 WHERE random()%10!=0; + COMMIT; + "] + if { $c == {0 {}} } { + set ::fin 1 + set c {1 {child process exited abnormally}} + } + set c + } {1 {child process exited abnormally}} + if { $::fin } break + do_test crash-4.2.$i.2 { + signature + } $sig + do_test crash-4.2.$i.3 { + signature2 + } $sig2 +} +for {set i 1} {$i < 5} {incr i} { + set sig [signature] + set sig2 [signature2] + do_test crash-4.3.$i.1 { + crashsql -delay 1 -file test.db-mj* " + ATTACH 'test2.db' AS aux; + BEGIN; + SELECT random() FROM abc LIMIT $i; + INSERT INTO abc VALUES(randstr(10,10), 0, 0); + DELETE FROM abc WHERE random()%10!=0; + INSERT INTO abc2 VALUES(randstr(10,10), 0, 0); + DELETE FROM abc2 WHERE random()%10!=0; + COMMIT; + " + } {1 {child process exited abnormally}} + do_test crash-4.3.$i.2 { + signature + } $sig + do_test crash-4.3.$i.3 { + signature2 + } $sig2 +} + +#-------------------------------------------------------------------------- +# The following test cases - crash-5.* - exposes a bug that existed in the +# sqlite3pager_movepage() API used by auto-vacuum databases. +# database when a crash occurs during a multi-file transaction. See comments +# in test crash-5.3 for details. +# +db close +file delete -force test.db +sqlite3 db test.db +do_test crash-5.1 { + execsql { + CREATE TABLE abc(a, b, c); -- Root page 3 + INSERT INTO abc VALUES(randstr(1500,1500), 0, 0); -- Overflow page 4 + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + } +} {} +do_test crash-5.2 { + expr [file size test.db] / 1024 +} [expr [string match [execsql {pragma auto_vacuum}] 1] ? 11 : 10] +set sig [signature] +do_test crash-5.3 { +# The SQL below is used to expose a bug that existed in +# sqlite3pager_movepage() during development of the auto-vacuum feature. It +# functions as follows: +# +# 1: Begin a transaction. +# 2: Put page 4 on the free-list (was the overflow page for the row deleted). +# 3: Write data to page 4 (it becomes the overflow page for the row inserted). +# The old page 4 data has been written to the journal file, but the +# journal file has not been sync()hronized. +# 4: Create a table, which calls sqlite3pager_movepage() to move page 4 +# to the end of the database (page 12) to make room for the new root-page. +# 5: Put pressure on the pager-cache. This results in page 4 being written +# to the database file to make space in the cache to load a new page. The +# bug was that page 4 was written to the database file before the journal +# is sync()hronized. +# 6: Commit. A crash occurs during the sync of the journal file. +# +# End result: Before the bug was fixed, data has been written to page 4 of the +# database file and the journal file does not contain trustworthy rollback +# data for this page. +# + crashsql -delay 1 -file test.db-journal { + BEGIN; -- 1 + DELETE FROM abc WHERE oid = 1; -- 2 + INSERT INTO abc VALUES(randstr(1500,1500), 0, 0); -- 3 + CREATE TABLE abc2(a, b, c); -- 4 + SELECT * FROM abc; -- 5 + COMMIT; -- 6 + } +} {1 {child process exited abnormally}} +integrity_check crash-5.4 +do_test crash-5.5 { + signature +} $sig + +#-------------------------------------------------------------------------- +# The following test cases - crash-6.* - test that a DROP TABLE operation +# is correctly rolled back in the event of a crash while the database file +# is being written. This is mainly to test that all pages are written to the +# journal file before truncation in an auto-vacuum database. +# +do_test crash-6.1 { + crashsql -delay 1 -file test.db { + DROP TABLE abc; + } +} {1 {child process exited abnormally}} +do_test crash-6.2 { + signature +} $sig + +#-------------------------------------------------------------------------- +# These test cases test the case where the master journal file name is +# corrupted slightly so that the corruption has to be detected by the +# checksum. +do_test crash-7.1 { + crashsql -delay 1 -file test.db { + ATTACH 'test2.db' AS aux; + BEGIN; + INSERT INTO abc VALUES(randstr(1500,1500), 0, 0); + INSERT INTO abc2 VALUES(randstr(1500,1500), 0, 0); + COMMIT; + } + + # Change the checksum value for the master journal name. + set f [open test.db-journal a] + fconfigure $f -encoding binary + seek $f [expr [file size test.db-journal] - 12] + puts -nonewline $f "\00\00\00\00" + close $f +} {} +do_test crash-7.2 { + signature +} $sig + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/crash2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/crash2.test new file mode 100644 index 0000000..4320779 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/crash2.test @@ -0,0 +1,132 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# The focus of this file is testing the ability of the database to +# uses its rollback journal to recover intact (no database corruption) +# from a power failure during the middle of a COMMIT. Even more +# specifically, the tests in this file verify this functionality +# for storage mediums with various sector sizes. +# +# $Id: crash2.test,v 1.5 2007/08/24 11:52:29 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !crashtest { + finish_test + return +} + +db close + +# This test is designed to check that the crash-test infrastructure +# can create files that do not consist of an integer number of +# simulated disk blocks (i.e. 3KB file using 2KB disk blocks). +# +do_test crash2-1.1 { + crashsql -delay 500 -file test.db -blocksize 2048 { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=1024; + BEGIN; + CREATE TABLE abc AS SELECT 1 AS a, 2 AS b, 3 AS c; + CREATE TABLE def AS SELECT 1 AS d, 2 AS e, 3 AS f; + COMMIT; + } + file size test.db +} {3072} + +for {set ii 0} {$ii < 5} {incr ii} { + + # Simple test using the database created above: Create a new + # table so that page 1 and page 4 are modified. Using a + # block-size of 2048 and page-size of 1024, this means + # pages 2 and 3 must also be saved in the journal to avoid + # risking corruption. + # + # The loop is so that this test can be run with a couple + # of different seeds for the random number generator. + # + do_test crash2-1.2.$ii { + crashsql -file test.db -blocksize 2048 [subst { + [string repeat {SELECT random();} $ii] + CREATE TABLE hij(h, i, j); + }] + sqlite3 db test.db + db eval {PRAGMA integrity_check} + } {ok} +} + +proc signature {} { + return [db eval {SELECT count(*), md5sum(a), md5sum(b), md5sum(c) FROM abc}] +} + +# Test case for crashing during journal sync with simulated +# sector-size values from 1024 to 8192. +# +do_test crash2-2.0 { + execsql BEGIN + for {set n 0} {$n < 1000} {incr n} { + execsql "INSERT INTO abc VALUES($n, [expr 2*$n], [expr 3*$n])" + } + execsql { + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + } + execsql COMMIT + expr ([file size test.db] / 1024) > 450 +} {1} +for {set i 1} {$i < 30} {incr i} { + set sig [signature] + set sector [expr 1024 * 1<<($i%4)] + db close + do_test crash2-2.$i.1 { + crashsql -blocksize $sector -delay [expr $i%5 + 1] -file test.db-journal " + BEGIN; + SELECT random() FROM abc LIMIT $i; + INSERT INTO abc SELECT randstr(10,10), 0, 0 FROM abc WHERE random()%2==0; + DELETE FROM abc WHERE random()%2!=0; + COMMIT; + " + } {1 {child process exited abnormally}} + do_test crash2-2.$i.2 { + sqlite3 db test.db + signature + } $sig +} + + +# Test case for crashing during database sync with simulated +# sector-size values from 1024 to 8192. +# +for {set i 1} {$i < 10} {incr i} { + set sig [signature] + set sector [expr 1024 * 1<<($i%4)] + db close + do_test crash2-3.$i.1 { + crashsql -blocksize $sector -file test.db " + BEGIN; + SELECT random() FROM abc LIMIT $i; + INSERT INTO abc SELECT randstr(10,10), 0, 0 FROM abc WHERE random()%2==0; + DELETE FROM abc WHERE random()%2!=0; + COMMIT; + " + } {1 {child process exited abnormally}} + do_test crash2-3.$i.2 { + sqlite3 db test.db + signature + } $sig +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/crash3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/crash3.test new file mode 100644 index 0000000..5154de6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/crash3.test @@ -0,0 +1,191 @@ +# 2007 August 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests that verify that SQLite can correctly rollback +# databases after crashes when using the special IO modes triggered +# by device IOCAP flags. +# +# $Id: crash3.test,v 1.3 2007/08/24 11:52:29 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !crashtest { + finish_test + return +} + +proc do_test2 {name tcl res1 res2} { + set script [subst -nocommands { + do_test $name { + set res1 {$res1} + set res2 {$res2} + set res [eval {$tcl}] + if {[set res] eq [set res1] || [set res] eq [set res2]} { + set res "{[set res1]} or {[set res2]}" + } + set res + } {{$res1} or {$res2}} + }] + uplevel $script +} + +# This block tests crash-recovery when the IOCAP_ATOMIC flags is set. +# +# Each iteration of the following loop sets up the database to contain +# the following schema and data: +# +# CREATE TABLE abc(a, b, c); +# INSERT INTO abc VALUES(1, 2, 3); +# +# Then execute the SQL statement, scheduling a crash for part-way through +# the first sync() of either the database file or the journal file (often +# the journal file is not required - meaning no crash occurs). +# +# After the crash (or absence of a crash), open the database and +# verify that: +# +# * The integrity check passes, and +# * The contents of table abc is either {1 2 3} or the value specified +# to the right of the SQL statement below. +# +# The procedure is repeated 10 times for each SQL statement. Five times +# with the crash scheduled for midway through the first journal sync (if +# any), and five times with the crash midway through the database sync. +# +set tn 1 +foreach {sql res2} [list \ + {INSERT INTO abc VALUES(4, 5, 6)} {1 2 3 4 5 6} \ + {DELETE FROM abc} {} \ + {INSERT INTO abc SELECT * FROM abc} {1 2 3 1 2 3} \ + {UPDATE abc SET a = 2} {2 2 3} \ + {INSERT INTO abc VALUES(4, 5, randstr(1000,1000))} {n/a} \ + {CREATE TABLE def(d, e, f)} {n/a} \ +] { + for {set ii 0} {$ii < 10} {incr ii} { + + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + do_test crash3-1.$tn.1 { + execsql { + PRAGMA page_size = 1024; + BEGIN; + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + COMMIT; + } + } {} + db close + + set crashfile test.db + if {($ii%2)==0} { append crashfile -journal } + set rand "SELECT randstr($tn,$tn);" + do_test crash3-1.$tn.2 [subst { + crashsql -file $crashfile -char atomic {$rand $sql} + sqlite3 db test.db + execsql { PRAGMA integrity_check; } + }] {ok} + + do_test2 crash3-1.$tn.3 { + execsql { SELECT * FROM abc } + } {1 2 3} $res2 + + incr tn + } +} + +# This block tests both the IOCAP_SEQUENTIAL and IOCAP_SAFE_APPEND flags. +# +db close +file delete -force test.db test.db-journal +sqlite3 db test.db +do_test crash3-2.0 { + execsql { + BEGIN; + CREATE TABLE abc(a PRIMARY KEY, b, c); + CREATE TABLE def(d PRIMARY KEY, e, f); + PRAGMA default_cache_size = 10; + INSERT INTO abc VALUES(randstr(10,1000),randstr(10,1000),randstr(10,1000)); + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + COMMIT; + } +} {} + +set tn 1 +foreach {::crashfile ::delay ::char} { + test.db 1 sequential + test.db 1 safe_append + test.db-journal 1 sequential + test.db-journal 1 safe_append + test.db-journal 2 safe_append + test.db-journal 2 sequential + test.db-journal 3 sequential + test.db-journal 3 safe_append +} { + for {set ii 0} {$ii < 100} {incr ii} { + set ::SQL [subst { + SELECT randstr($ii,$ii+10); + BEGIN; + DELETE FROM abc WHERE random()%5; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) + FROM abc + WHERE (random()%5)==0; + DELETE FROM def WHERE random()%5; + INSERT INTO def + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) + FROM def + WHERE (random()%5)==0; + COMMIT; + }] + + do_test crash3-2.$tn.$ii { + crashsql -file $::crashfile -delay $::delay -char $::char $::SQL + db close + sqlite3 db test.db + execsql {PRAGMA integrity_check} + } {ok} + } + incr tn +} + +# The following block tests an interaction between IOCAP_ATOMIC and +# IOCAP_SEQUENTIAL. At one point, if both flags were set, small +# journal files that contained only a single page, but were required +# for some other reason (i.e. nTrunk) were not being written to +# disk. +# +for {set ii 0} {$ii < 10} {incr ii} { + db close + file delete -force test.db test.db-journal + crashsql -file test.db -char {sequential atomic} { + CREATE TABLE abc(a, b, c); + } + sqlite3 db test.db + do_test crash3-3.$ii { + execsql {PRAGMA integrity_check} + } {ok} +} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/createtab.test b/libraries/sqlite/unix/sqlite-3.5.1/test/createtab.test new file mode 100644 index 0000000..3f036b7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/createtab.test @@ -0,0 +1,146 @@ +# 2007 May 02 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing that it is OK to create new tables +# and indices while creating existing tables and indices. +# +# $Id: createtab.test,v 1.3 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable autovacuum { + set upperBound 2 +} else { + set upperBound 0 +} + +# Run these tests for all possible values of autovacuum. +# +for {set av 0} {$av<=$upperBound} {incr av} { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + + # Create a table that spans multiple pages. It is important + # that part of the database be in pages beyond the root page. + # + do_test createtab-$av.1 { + execsql "PRAGMA auto_vacuum=$av" + execsql { + PRAGMA page_size=1024; + CREATE TABLE t1(x INTEGER PRIMARY KEY, y); + INSERT INTO t1 VALUES(1, hex(randomblob(200))); + INSERT INTO t1 VALUES(2, hex(randomblob(200))); + INSERT INTO t1 VALUES(3, hex(randomblob(200))); + INSERT INTO t1 VALUES(4, hex(randomblob(200))); + SELECT count(*) FROM t1; + } + } {4} + + set isUtf16 0 + ifcapable utf16 { + set isUtf16 [expr {[execsql {PRAGMA encoding}] != "UTF-8"}] + } + + do_test createtab-$av.2 { + file size test.db + } [expr {1024*(4+($av!=0)+(${isUtf16}*2))}] + + # Start reading the table + # + do_test createtab-$av.3 { + set STMT [sqlite3_prepare db {SELECT x FROM t1} -1 TAIL] + sqlite3_step $STMT + } {SQLITE_ROW} + do_test createtab-$av.4 { + sqlite3_column_int $STMT 0 + } {1} + + # While still reading the table, create a new table. + # + do_test createtab-$av.5 { + execsql { + CREATE TABLE t2(a,b); + INSERT INTO t2 VALUES(1,2); + SELECT * FROM t2; + } + } {1 2} + + # Continue reading the original table. + # + do_test createtab-$av.6 { + sqlite3_column_int $STMT 0 + } {1} + do_test createtab-$av.7 { + sqlite3_step $STMT + } {SQLITE_ROW} + do_test createtab-$av.8 { + sqlite3_column_int $STMT 0 + } {2} + + # Do another cycle of creating a new database table while contining + # to read the original table. + # + do_test createtab-$av.11 { + execsql { + CREATE TABLE t3(a,b); + INSERT INTO t3 VALUES(4,5); + SELECT * FROM t3; + } + } {4 5} + do_test createtab-$av.12 { + sqlite3_column_int $STMT 0 + } {2} + do_test createtab-$av.13 { + sqlite3_step $STMT + } {SQLITE_ROW} + do_test createtab-$av.14 { + sqlite3_column_int $STMT 0 + } {3} + + # One more cycle. + # + do_test createtab-$av.21 { + execsql { + CREATE TABLE t4(a,b); + INSERT INTO t4 VALUES('abc','xyz'); + SELECT * FROM t4; + } + } {abc xyz} + do_test createtab-$av.22 { + sqlite3_column_int $STMT 0 + } {3} + do_test createtab-$av.23 { + sqlite3_step $STMT + } {SQLITE_ROW} + do_test createtab-$av.24 { + sqlite3_column_int $STMT 0 + } {4} + + # Finish reading. Do an integrity check on the database. + # + do_test createtab-$av.30 { + sqlite3_step $STMT + } {SQLITE_DONE} + do_test createtab-$av.31 { + sqlite3_finalize $STMT + } {SQLITE_OK} + do_test createtab-$av.32 { + execsql { + SELECT name FROM sqlite_master WHERE type='table' ORDER BY 1 + } + } {t1 t2 t3 t4} + integrity_check createtab-$av.40 + +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/date.test b/libraries/sqlite/unix/sqlite-3.5.1/test/date.test new file mode 100644 index 0000000..b26db27 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/date.test @@ -0,0 +1,354 @@ +# 2003 October 31 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing date and time functions. +# +# $Id: date.test,v 1.23 2007/08/31 17:42:48 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Skip this whole file if date and time functions are omitted +# at compile-time +# +ifcapable {!datetime} { + finish_test + return +} + +proc datetest {tnum expr result} { + do_test date-$tnum [subst { + execsql "SELECT coalesce($expr,'NULL')" + }] [list $result] +} +set tcl_precision 15 +datetest 1.1 julianday('2000-01-01') 2451544.5 +datetest 1.2 julianday('1970-01-01') 2440587.5 +datetest 1.3 julianday('1910-04-20') 2418781.5 +datetest 1.4 julianday('1986-02-09') 2446470.5 +datetest 1.5 julianday('12:00:00') 2451545.0 +datetest 1.6 {julianday('2000-01-01 12:00:00')} 2451545.0 +datetest 1.7 {julianday('2000-01-01 12:00')} 2451545.0 +datetest 1.8 julianday('bogus') NULL +datetest 1.9 julianday('1999-12-31') 2451543.5 +datetest 1.10 julianday('1999-12-32') NULL +datetest 1.11 julianday('1999-13-01') NULL +datetest 1.12 julianday('2003-02-31') 2452701.5 +datetest 1.13 julianday('2003-03-03') 2452701.5 +datetest 1.14 julianday('+2000-01-01') NULL +datetest 1.15 julianday('200-01-01') NULL +datetest 1.16 julianday('2000-1-01') NULL +datetest 1.17 julianday('2000-01-1') NULL +datetest 1.18.1 {julianday('2000-01-01 12:00:00')} 2451545.0 +datetest 1.18.2 {julianday('2000-01-01T12:00:00')} 2451545.0 +datetest 1.18.3 {julianday('2000-01-01 T12:00:00')} 2451545.0 +datetest 1.18.4 {julianday('2000-01-01T 12:00:00')} 2451545.0 +datetest 1.18.4 {julianday('2000-01-01 T 12:00:00')} 2451545.0 +datetest 1.19 {julianday('2000-01-01 12:00:00.1')} 2451545.00000116 +datetest 1.20 {julianday('2000-01-01 12:00:00.01')} 2451545.00000012 +datetest 1.21 {julianday('2000-01-01 12:00:00.001')} 2451545.00000001 +datetest 1.22 {julianday('2000-01-01 12:00:00.')} NULL +datetest 1.23 julianday(12345.6) 12345.6 +datetest 1.24 {julianday('2001-01-01 12:00:00 bogus')} NULL +datetest 1.25 {julianday('2001-01-01 bogus')} NULL +datetest 1.26 {julianday('2001-01-01 12:60:00')} NULL +datetest 1.27 {julianday('2001-01-01 12:59:60')} NULL +datetest 1.28 {julianday('2001-00-01')} NULL +datetest 1.29 {julianday('2001-01-00')} NULL + +datetest 2.1 datetime(0,'unixepoch') {1970-01-01 00:00:00} +datetest 2.1b datetime(0,'unixepoc') NULL +datetest 2.1c datetime(0,'unixepochx') NULL +datetest 2.1d datetime('2003-10-22','unixepoch') NULL +datetest 2.2 datetime(946684800,'unixepoch') {2000-01-01 00:00:00} +datetest 2.3 {date('2003-10-22','weekday 0')} 2003-10-26 +datetest 2.4 {date('2003-10-22','weekday 1')} 2003-10-27 +datetest 2.4a {date('2003-10-22','weekday 1')} 2003-10-27 +datetest 2.4b {date('2003-10-22','weekday 1x')} 2003-10-27 +datetest 2.4c {date('2003-10-22','weekday -1')} NULL +datetest 2.4d {date('2003-10-22','weakday 1x')} NULL +datetest 2.4e {date('2003-10-22','weekday ')} NULL +datetest 2.5 {date('2003-10-22','weekday 2')} 2003-10-28 +datetest 2.6 {date('2003-10-22','weekday 3')} 2003-10-22 +datetest 2.7 {date('2003-10-22','weekday 4')} 2003-10-23 +datetest 2.8 {date('2003-10-22','weekday 5')} 2003-10-24 +datetest 2.9 {date('2003-10-22','weekday 6')} 2003-10-25 +datetest 2.10 {date('2003-10-22','weekday 7')} NULL +datetest 2.11 {date('2003-10-22','weekday 5.5')} NULL +datetest 2.12 {datetime('2003-10-22 12:34','weekday 0')} {2003-10-26 12:34:00} +datetest 2.13 {datetime('2003-10-22 12:34','start of month')} \ + {2003-10-01 00:00:00} +datetest 2.14 {datetime('2003-10-22 12:34','start of year')} \ + {2003-01-01 00:00:00} +datetest 2.15 {datetime('2003-10-22 12:34','start of day')} \ + {2003-10-22 00:00:00} +datetest 2.15a {datetime('2003-10-22 12:34','start of')} NULL +datetest 2.15b {datetime('2003-10-22 12:34','start of bogus')} NULL +datetest 2.16 time('12:34:56.43') 12:34:56 +datetest 2.17 {datetime('2003-10-22 12:34','1 day')} {2003-10-23 12:34:00} +datetest 2.18 {datetime('2003-10-22 12:34','+1 day')} {2003-10-23 12:34:00} +datetest 2.19 {datetime('2003-10-22 12:34','+1.25 day')} {2003-10-23 18:34:00} +datetest 2.20 {datetime('2003-10-22 12:34','-1.0 day')} {2003-10-21 12:34:00} +datetest 2.21 {datetime('2003-10-22 12:34','1 month')} {2003-11-22 12:34:00} +datetest 2.22 {datetime('2003-10-22 12:34','11 month')} {2004-09-22 12:34:00} +datetest 2.23 {datetime('2003-10-22 12:34','-13 month')} {2002-09-22 12:34:00} +datetest 2.24 {datetime('2003-10-22 12:34','1.5 months')} {2003-12-07 12:34:00} +datetest 2.25 {datetime('2003-10-22 12:34','-5 years')} {1998-10-22 12:34:00} +datetest 2.26 {datetime('2003-10-22 12:34','+10.5 minutes')} \ + {2003-10-22 12:44:30} +datetest 2.27 {datetime('2003-10-22 12:34','-1.25 hours')} \ + {2003-10-22 11:19:00} +datetest 2.28 {datetime('2003-10-22 12:34','11.25 seconds')} \ + {2003-10-22 12:34:11} +datetest 2.29 {datetime('2003-10-22 12:24','+5 bogus')} NULL +datetest 2.30 {datetime('2003-10-22 12:24','+++')} NULL +datetest 2.31 {datetime('2003-10-22 12:24','+12.3e4 femtoseconds')} NULL +datetest 2.32 {datetime('2003-10-22 12:24','+12.3e4 uS')} NULL +datetest 2.33 {datetime('2003-10-22 12:24','+1 abc')} NULL +datetest 2.34 {datetime('2003-10-22 12:24','+1 abcd')} NULL +datetest 2.35 {datetime('2003-10-22 12:24','+1 abcde')} NULL +datetest 2.36 {datetime('2003-10-22 12:24','+1 abcdef')} NULL +datetest 2.37 {datetime('2003-10-22 12:24','+1 abcdefg')} NULL +datetest 2.38 {datetime('2003-10-22 12:24','+1 abcdefgh')} NULL +datetest 2.39 {datetime('2003-10-22 12:24','+1 abcdefghi')} NULL +datetest 2.40 {datetime()} NULL + + +datetest 3.1 {strftime('%d','2003-10-31 12:34:56.432')} 31 +datetest 3.2 {strftime('%f','2003-10-31 12:34:56.432')} 56.432 +datetest 3.3 {strftime('%H','2003-10-31 12:34:56.432')} 12 +datetest 3.4 {strftime('%j','2003-10-31 12:34:56.432')} 304 +datetest 3.5 {strftime('%J','2003-10-31 12:34:56.432')} 2452944.02426426 +datetest 3.6 {strftime('%m','2003-10-31 12:34:56.432')} 10 +datetest 3.7 {strftime('%M','2003-10-31 12:34:56.432')} 34 +datetest 3.8 {strftime('%s','2003-10-31 12:34:56.432')} 1067603696 +datetest 3.9 {strftime('%S','2003-10-31 12:34:56.432')} 56 +datetest 3.10 {strftime('%w','2003-10-31 12:34:56.432')} 5 +datetest 3.11.1 {strftime('%W','2003-10-31 12:34:56.432')} 43 +datetest 3.11.2 {strftime('%W','2004-01-01')} 00 +datetest 3.11.3 {strftime('%W','2004-01-02')} 00 +datetest 3.11.4 {strftime('%W','2004-01-03')} 00 +datetest 3.11.5 {strftime('%W','2004-01-04')} 00 +datetest 3.11.6 {strftime('%W','2004-01-05')} 01 +datetest 3.11.7 {strftime('%W','2004-01-06')} 01 +datetest 3.11.8 {strftime('%W','2004-01-07')} 01 +datetest 3.11.9 {strftime('%W','2004-01-08')} 01 +datetest 3.11.10 {strftime('%W','2004-01-09')} 01 +datetest 3.11.11 {strftime('%W','2004-07-18')} 28 +datetest 3.11.12 {strftime('%W','2004-12-31')} 52 +datetest 3.11.13 {strftime('%W','2007-12-31')} 53 +datetest 3.11.14 {strftime('%W','2007-01-01')} 01 +datetest 3.11.15 {strftime('%W %j',2454109.04140970)} {02 008} +datetest 3.11.16 {strftime('%W %j',2454109.04140971)} {02 008} +datetest 3.11.17 {strftime('%W %j',2454109.04140972)} {02 008} +datetest 3.11.18 {strftime('%W %j',2454109.04140973)} {02 008} +datetest 3.11.19 {strftime('%W %j',2454109.04140974)} {02 008} +datetest 3.11.20 {strftime('%W %j',2454109.04140975)} {02 008} +datetest 3.11.21 {strftime('%W %j',2454109.04140976)} {02 008} +datetest 3.11.22 {strftime('%W %j',2454109.04140977)} {02 008} +datetest 3.11.22 {strftime('%W %j',2454109.04140978)} {02 008} +datetest 3.11.22 {strftime('%W %j',2454109.04140979)} {02 008} +datetest 3.11.22 {strftime('%W %j',2454109.04140980)} {02 008} +datetest 3.12 {strftime('%Y','2003-10-31 12:34:56.432')} 2003 +datetest 3.13 {strftime('%%','2003-10-31 12:34:56.432')} % +datetest 3.14 {strftime('%_','2003-10-31 12:34:56.432')} NULL +datetest 3.15 {strftime('%Y-%m-%d','2003-10-31')} 2003-10-31 +proc repeat {n txt} { + set x {} + while {$n>0} { + append x $txt + incr n -1 + } + return $x +} +datetest 3.16 "strftime('[repeat 200 %Y]','2003-10-31')" [repeat 200 2003] +datetest 3.17 "strftime('[repeat 200 abc%m123]','2003-10-31')" \ + [repeat 200 abc10123] + +# Ticket #2276. Make sure leading zeros are inserted where appropriate. +# +datetest 3.20 \ + {strftime('%d/%f/%H/%W/%j/%m/%M/%S/%Y','0421-01-02 03:04:05.006')} \ + 02/05.006/03/00/002/01/04/05/0421 + +set sqlite_current_time 1157124367 +datetest 4.1 {date('now')} {2006-09-01} +set sqlite_current_time 0 + +datetest 5.1 {datetime('1994-04-16 14:00:00 +05:00')} {1994-04-16 09:00:00} +datetest 5.2 {datetime('1994-04-16 14:00:00 -05:15')} {1994-04-16 19:15:00} +datetest 5.3 {datetime('1994-04-16 05:00:00 +08:30')} {1994-04-15 20:30:00} +datetest 5.4 {datetime('1994-04-16 14:00:00 -11:55')} {1994-04-17 01:55:00} +datetest 5.5 {datetime('1994-04-16 14:00:00 -11:60')} NULL +datetest 5.4 {datetime('1994-04-16 14:00:00 -11:55 ')} {1994-04-17 01:55:00} +datetest 5.4 {datetime('1994-04-16 14:00:00 -11:55 x')} NULL + +# localtime->utc and utc->localtime conversions. These tests only work +# if the localtime is in the US Eastern Time (the time in Charlotte, NC +# and in New York.) +# +set tzoffset [db one { + SELECT CAST(24*(julianday('2006-09-01') - + julianday('2006-09-01','localtime'))+0.5 + AS INT) +}] +if {$tzoffset==4} { + datetest 6.1 {datetime('2000-10-29 05:59:00','localtime')}\ + {2000-10-29 01:59:00} + datetest 6.2 {datetime('2000-10-29 06:00:00','localtime')}\ + {2000-10-29 01:00:00} + datetest 6.3 {datetime('2000-04-02 06:59:00','localtime')}\ + {2000-04-02 01:59:00} + datetest 6.4 {datetime('2000-04-02 07:00:00','localtime')}\ + {2000-04-02 03:00:00} + datetest 6.5 {datetime('2000-10-29 01:59:00','utc')} {2000-10-29 05:59:00} + datetest 6.6 {datetime('2000-10-29 02:00:00','utc')} {2000-10-29 07:00:00} + datetest 6.7 {datetime('2000-04-02 01:59:00','utc')} {2000-04-02 06:59:00} + datetest 6.8 {datetime('2000-04-02 02:00:00','utc')} {2000-04-02 06:00:00} + + datetest 6.10 {datetime('2000-01-01 12:00:00','localtime')} \ + {2000-01-01 07:00:00} + datetest 6.11 {datetime('1969-01-01 12:00:00','localtime')} \ + {1969-01-01 07:00:00} + datetest 6.12 {datetime('2039-01-01 12:00:00','localtime')} \ + {2039-01-01 07:00:00} + datetest 6.13 {datetime('2000-07-01 12:00:00','localtime')} \ + {2000-07-01 08:00:00} + datetest 6.14 {datetime('1969-07-01 12:00:00','localtime')} \ + {1969-07-01 07:00:00} + datetest 6.15 {datetime('2039-07-01 12:00:00','localtime')} \ + {2039-07-01 07:00:00} + set sqlite_current_time \ + [db eval {SELECT strftime('%s','2000-07-01 12:34:56')}] + datetest 6.16 {datetime('now','localtime')} {2000-07-01 08:34:56} + datetest 6.17 {datetime('now','localtimex')} NULL + datetest 6.18 {datetime('now','localtim')} NULL + set sqlite_current_time 0 +} + +# These two are a bit of a scam. They are added to ensure that 100% of +# the date.c file is covered by testing, even when the time-zone +# is not -0400 (the condition for running of the block of tests above). +# +datetest 6.19 {datetime('2039-07-01 12:00:00','localtime',null)} NULL +datetest 6.20 {datetime('2039-07-01 12:00:00','utc',null)} NULL + +# Date-time functions that contain NULL arguments return a NULL +# result. +# +datetest 7.1 {datetime(null)} NULL +datetest 7.2 {datetime('now',null)} NULL +datetest 7.3 {datetime('now','localtime',null)} NULL +datetest 7.4 {time(null)} NULL +datetest 7.5 {time('now',null)} NULL +datetest 7.6 {time('now','localtime',null)} NULL +datetest 7.7 {date(null)} NULL +datetest 7.8 {date('now',null)} NULL +datetest 7.9 {date('now','localtime',null)} NULL +datetest 7.10 {julianday(null)} NULL +datetest 7.11 {julianday('now',null)} NULL +datetest 7.12 {julianday('now','localtime',null)} NULL +datetest 7.13 {strftime(null,'now')} NULL +datetest 7.14 {strftime('%s',null)} NULL +datetest 7.15 {strftime('%s','now',null)} NULL +datetest 7.16 {strftime('%s','now','localtime',null)} NULL + +# Test modifiers when the date begins as a julian day number - to +# make sure the HH:MM:SS is preserved. Ticket #551. +# +set sqlite_current_time [db eval {SELECT strftime('%s','2003-10-22 12:34:00')}] +datetest 8.1 {datetime('now','weekday 0')} {2003-10-26 12:34:00} +datetest 8.2 {datetime('now','weekday 1')} {2003-10-27 12:34:00} +datetest 8.3 {datetime('now','weekday 2')} {2003-10-28 12:34:00} +datetest 8.4 {datetime('now','weekday 3')} {2003-10-22 12:34:00} +datetest 8.5 {datetime('now','start of month')} {2003-10-01 00:00:00} +datetest 8.6 {datetime('now','start of year')} {2003-01-01 00:00:00} +datetest 8.7 {datetime('now','start of day')} {2003-10-22 00:00:00} +datetest 8.8 {datetime('now','1 day')} {2003-10-23 12:34:00} +datetest 8.9 {datetime('now','+1 day')} {2003-10-23 12:34:00} +datetest 8.10 {datetime('now','+1.25 day')} {2003-10-23 18:34:00} +datetest 8.11 {datetime('now','-1.0 day')} {2003-10-21 12:34:00} +datetest 8.12 {datetime('now','1 month')} {2003-11-22 12:34:00} +datetest 8.13 {datetime('now','11 month')} {2004-09-22 12:34:00} +datetest 8.14 {datetime('now','-13 month')} {2002-09-22 12:34:00} +datetest 8.15 {datetime('now','1.5 months')} {2003-12-07 12:34:00} +datetest 8.16 {datetime('now','-5 years')} {1998-10-22 12:34:00} +datetest 8.17 {datetime('now','+10.5 minutes')} {2003-10-22 12:44:30} +datetest 8.18 {datetime('now','-1.25 hours')} {2003-10-22 11:19:00} +datetest 8.19 {datetime('now','11.25 seconds')} {2003-10-22 12:34:11} +datetest 8.90 {datetime('now','abcdefghijklmnopqrstuvwyxzABCDEFGHIJLMNOP')} NULL +set sqlite_current_time 0 + +# Negative years work. Example: '-4713-11-26' is JD 1.5. +# +datetest 9.1 {julianday('-4713-11-24 12:00:00')} {0.0} +datetest 9.2 {julianday(datetime(5))} {5.0} +datetest 9.3 {julianday(datetime(10))} {10.0} +datetest 9.4 {julianday(datetime(100))} {100.0} +datetest 9.5 {julianday(datetime(1000))} {1000.0} +datetest 9.6 {julianday(datetime(10000))} {10000.0} +datetest 9.7 {julianday(datetime(100000))} {100000.0} + +# datetime() with just an HH:MM:SS correctly inserts the date 2000-01-01. +# +datetest 10.1 {datetime('01:02:03')} {2000-01-01 01:02:03} +datetest 10.2 {date('01:02:03')} {2000-01-01} +datetest 10.3 {strftime('%Y-%m-%d %H:%M','01:02:03')} {2000-01-01 01:02} + +# Test the new HH:MM:SS modifier +# +datetest 11.1 {datetime('2004-02-28 20:00:00', '-01:20:30')} \ + {2004-02-28 18:39:30} +datetest 11.2 {datetime('2004-02-28 20:00:00', '+12:30:00')} \ + {2004-02-29 08:30:00} +datetest 11.3 {datetime('2004-02-28 20:00:00', '+12:30')} \ + {2004-02-29 08:30:00} +datetest 11.4 {datetime('2004-02-28 20:00:00', '12:30')} \ + {2004-02-29 08:30:00} +datetest 11.5 {datetime('2004-02-28 20:00:00', '-12:00')} \ + {2004-02-28 08:00:00} +datetest 11.6 {datetime('2004-02-28 20:00:00', '-12:01')} \ + {2004-02-28 07:59:00} +datetest 11.7 {datetime('2004-02-28 20:00:00', '-11:59')} \ + {2004-02-28 08:01:00} +datetest 11.8 {datetime('2004-02-28 20:00:00', '11:59')} \ + {2004-02-29 07:59:00} +datetest 11.9 {datetime('2004-02-28 20:00:00', '12:01')} \ + {2004-02-29 08:01:00} +datetest 11.10 {datetime('2004-02-28 20:00:00', '12:60')} NULL + +# Ticket #1964 +datetest 12.1 {datetime('2005-09-01')} {2005-09-01 00:00:00} +datetest 12.2 {datetime('2005-09-01','+0 hours')} {2005-09-01 00:00:00} + +# Ticket #1991 +do_test date-13.1 { + execsql { + SELECT strftime('%Y-%m-%d %H:%M:%f', julianday('2006-09-24T10:50:26.047')) + } +} {{2006-09-24 10:50:26.047}} + +# Ticket #2153 +datetest 13.2 {strftime('%Y-%m-%d %H:%M:%S', '2007-01-01 12:34:59.6')} \ + {2007-01-01 12:34:59} +datetest 13.3 {strftime('%Y-%m-%d %H:%M:%f', '2007-01-01 12:34:59.6')} \ + {2007-01-01 12:34:59.600} +datetest 13.4 {strftime('%Y-%m-%d %H:%M:%S', '2007-01-01 12:59:59.6')} \ + {2007-01-01 12:59:59} +datetest 13.5 {strftime('%Y-%m-%d %H:%M:%f', '2007-01-01 12:59:59.6')} \ + {2007-01-01 12:59:59.600} +datetest 13.6 {strftime('%Y-%m-%d %H:%M:%S', '2007-01-01 23:59:59.6')} \ + {2007-01-01 23:59:59} +datetest 13.7 {strftime('%Y-%m-%d %H:%M:%f', '2007-01-01 23:59:59.6')} \ + {2007-01-01 23:59:59.600} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/default.test b/libraries/sqlite/unix/sqlite-3.5.1/test/default.test new file mode 100644 index 0000000..9a2ab1a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/default.test @@ -0,0 +1,52 @@ +# 2005 August 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this file is testing corner cases of the DEFAULT syntax +# on table definitions. +# +# $Id: default.test,v 1.2 2005/08/20 03:03:04 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable bloblit { + do_test default-1.1 { + execsql { + CREATE TABLE t1( + a INTEGER, + b BLOB DEFAULT x'6869' + ); + INSERT INTO t1(a) VALUES(1); + SELECT * from t1; + } + } {1 hi} +} +do_test default-1.2 { + execsql { + CREATE TABLE t2( + x INTEGER, + y INTEGER DEFAULT NULL + ); + INSERT INTO t2(x) VALUES(1); + SELECT * FROM t2; + } +} {1 {}} +do_test default-1.3 { + catchsql { + CREATE TABLE t3( + x INTEGER, + y INTEGER DEFAULT (max(x,5)) + ) + } +} {1 {default value of column [y] is not constant}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/delete.test b/libraries/sqlite/unix/sqlite-3.5.1/test/delete.test new file mode 100644 index 0000000..0409ba9 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/delete.test @@ -0,0 +1,313 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the DELETE FROM statement. +# +# $Id: delete.test,v 1.21 2006/01/03 00:33:50 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Try to delete from a non-existant table. +# +do_test delete-1.1 { + set v [catch {execsql {DELETE FROM test1}} msg] + lappend v $msg +} {1 {no such table: test1}} + +# Try to delete from sqlite_master +# +do_test delete-2.1 { + set v [catch {execsql {DELETE FROM sqlite_master}} msg] + lappend v $msg +} {1 {table sqlite_master may not be modified}} + +# Delete selected entries from a table with and without an index. +# +do_test delete-3.1.1 { + execsql {CREATE TABLE table1(f1 int, f2 int)} + execsql {INSERT INTO table1 VALUES(1,2)} + execsql {INSERT INTO table1 VALUES(2,4)} + execsql {INSERT INTO table1 VALUES(3,8)} + execsql {INSERT INTO table1 VALUES(4,16)} + execsql {SELECT * FROM table1 ORDER BY f1} +} {1 2 2 4 3 8 4 16} +do_test delete-3.1.2 { + execsql {DELETE FROM table1 WHERE f1=3} +} {} +do_test delete-3.1.3 { + execsql {SELECT * FROM table1 ORDER BY f1} +} {1 2 2 4 4 16} +do_test delete-3.1.4 { + execsql {CREATE INDEX index1 ON table1(f1)} + execsql {PRAGMA count_changes=on} + ifcapable explain { + execsql {EXPLAIN DELETE FROM table1 WHERE f1=3} + } + execsql {DELETE FROM 'table1' WHERE f1=3} +} {0} +do_test delete-3.1.5 { + execsql {SELECT * FROM table1 ORDER BY f1} +} {1 2 2 4 4 16} +do_test delete-3.1.6.1 { + execsql {DELETE FROM table1 WHERE f1=2} +} {1} +do_test delete-3.1.6.2 { + db changes +} 1 +do_test delete-3.1.7 { + execsql {SELECT * FROM table1 ORDER BY f1} +} {1 2 4 16} +integrity_check delete-3.2 + + +# Semantic errors in the WHERE clause +# +do_test delete-4.1 { + execsql {CREATE TABLE table2(f1 int, f2 int)} + set v [catch {execsql {DELETE FROM table2 WHERE f3=5}} msg] + lappend v $msg +} {1 {no such column: f3}} + +do_test delete-4.2 { + set v [catch {execsql {DELETE FROM table2 WHERE xyzzy(f1+4)}} msg] + lappend v $msg +} {1 {no such function: xyzzy}} +integrity_check delete-4.3 + +# Lots of deletes +# +do_test delete-5.1.1 { + execsql {DELETE FROM table1} +} {2} +do_test delete-5.1.2 { + execsql {SELECT count(*) FROM table1} +} {0} +do_test delete-5.2.1 { + execsql {BEGIN TRANSACTION} + for {set i 1} {$i<=200} {incr i} { + execsql "INSERT INTO table1 VALUES($i,[expr {$i*$i}])" + } + execsql {COMMIT} + execsql {SELECT count(*) FROM table1} +} {200} +do_test delete-5.2.2 { + execsql {DELETE FROM table1} +} {200} +do_test delete-5.2.3 { + execsql {BEGIN TRANSACTION} + for {set i 1} {$i<=200} {incr i} { + execsql "INSERT INTO table1 VALUES($i,[expr {$i*$i}])" + } + execsql {COMMIT} + execsql {SELECT count(*) FROM table1} +} {200} +do_test delete-5.2.4 { + execsql {PRAGMA count_changes=off} + execsql {DELETE FROM table1} +} {} +do_test delete-5.2.5 { + execsql {SELECT count(*) FROM table1} +} {0} +do_test delete-5.2.6 { + execsql {BEGIN TRANSACTION} + for {set i 1} {$i<=200} {incr i} { + execsql "INSERT INTO table1 VALUES($i,[expr {$i*$i}])" + } + execsql {COMMIT} + execsql {SELECT count(*) FROM table1} +} {200} +do_test delete-5.3 { + for {set i 1} {$i<=200} {incr i 4} { + execsql "DELETE FROM table1 WHERE f1==$i" + } + execsql {SELECT count(*) FROM table1} +} {150} +do_test delete-5.4.1 { + execsql "DELETE FROM table1 WHERE f1>50" + db changes +} [db one {SELECT count(*) FROM table1 WHERE f1>50}] +do_test delete-5.4.2 { + execsql {SELECT count(*) FROM table1} +} {37} +do_test delete-5.5 { + for {set i 1} {$i<=70} {incr i 3} { + execsql "DELETE FROM table1 WHERE f1==$i" + } + execsql {SELECT f1 FROM table1 ORDER BY f1} +} {2 3 6 8 11 12 14 15 18 20 23 24 26 27 30 32 35 36 38 39 42 44 47 48 50} +do_test delete-5.6 { + for {set i 1} {$i<40} {incr i} { + execsql "DELETE FROM table1 WHERE f1==$i" + } + execsql {SELECT f1 FROM table1 ORDER BY f1} +} {42 44 47 48 50} +do_test delete-5.7 { + execsql "DELETE FROM table1 WHERE f1!=48" + execsql {SELECT f1 FROM table1 ORDER BY f1} +} {48} +integrity_check delete-5.8 + + +# Delete large quantities of data. We want to test the List overflow +# mechanism in the vdbe. +# +do_test delete-6.1 { + execsql {BEGIN; DELETE FROM table1} + for {set i 1} {$i<=3000} {incr i} { + execsql "INSERT INTO table1 VALUES($i,[expr {$i*$i}])" + } + execsql {DELETE FROM table2} + for {set i 1} {$i<=3000} {incr i} { + execsql "INSERT INTO table2 VALUES($i,[expr {$i*$i}])" + } + execsql {COMMIT} + execsql {SELECT count(*) FROM table1} +} {3000} +do_test delete-6.2 { + execsql {SELECT count(*) FROM table2} +} {3000} +do_test delete-6.3 { + execsql {SELECT f1 FROM table1 WHERE f1<10 ORDER BY f1} +} {1 2 3 4 5 6 7 8 9} +do_test delete-6.4 { + execsql {SELECT f1 FROM table2 WHERE f1<10 ORDER BY f1} +} {1 2 3 4 5 6 7 8 9} +do_test delete-6.5.1 { + execsql {DELETE FROM table1 WHERE f1>7} + db changes +} {2993} +do_test delete-6.5.2 { + execsql {SELECT f1 FROM table1 ORDER BY f1} +} {1 2 3 4 5 6 7} +do_test delete-6.6 { + execsql {DELETE FROM table2 WHERE f1>7} + execsql {SELECT f1 FROM table2 ORDER BY f1} +} {1 2 3 4 5 6 7} +do_test delete-6.7 { + execsql {DELETE FROM table1} + execsql {SELECT f1 FROM table1} +} {} +do_test delete-6.8 { + execsql {INSERT INTO table1 VALUES(2,3)} + execsql {SELECT f1 FROM table1} +} {2} +do_test delete-6.9 { + execsql {DELETE FROM table2} + execsql {SELECT f1 FROM table2} +} {} +do_test delete-6.10 { + execsql {INSERT INTO table2 VALUES(2,3)} + execsql {SELECT f1 FROM table2} +} {2} +integrity_check delete-6.11 + +do_test delete-7.1 { + execsql { + CREATE TABLE t3(a); + INSERT INTO t3 VALUES(1); + INSERT INTO t3 SELECT a+1 FROM t3; + INSERT INTO t3 SELECT a+2 FROM t3; + SELECT * FROM t3; + } +} {1 2 3 4} +ifcapable {trigger} { + do_test delete-7.2 { + execsql { + CREATE TABLE cnt(del); + INSERT INTO cnt VALUES(0); + CREATE TRIGGER r1 AFTER DELETE ON t3 FOR EACH ROW BEGIN + UPDATE cnt SET del=del+1; + END; + DELETE FROM t3 WHERE a<2; + SELECT * FROM t3; + } + } {2 3 4} + do_test delete-7.3 { + execsql { + SELECT * FROM cnt; + } + } {1} + do_test delete-7.4 { + execsql { + DELETE FROM t3; + SELECT * FROM t3; + } + } {} + do_test delete-7.5 { + execsql { + SELECT * FROM cnt; + } + } {4} + do_test delete-7.6 { + execsql { + INSERT INTO t3 VALUES(1); + INSERT INTO t3 SELECT a+1 FROM t3; + INSERT INTO t3 SELECT a+2 FROM t3; + CREATE TABLE t4 AS SELECT * FROM t3; + PRAGMA count_changes=ON; + DELETE FROM t3; + DELETE FROM t4; + } + } {4 4} +} ;# endif trigger +ifcapable {!trigger} { + execsql {DELETE FROM t3} +} +integrity_check delete-7.7 + +# Make sure error messages are consistent when attempting to delete +# from a read-only database. Ticket #304. +# +do_test delete-8.0 { + execsql { + PRAGMA count_changes=OFF; + INSERT INTO t3 VALUES(123); + SELECT * FROM t3; + } +} {123} +db close +catch {file attributes test.db -permissions 0444} +catch {file attributes test.db -readonly 1} +sqlite3 db test.db +set ::DB [sqlite3_connection_pointer db] +do_test delete-8.1 { + catchsql { + DELETE FROM t3; + } +} {1 {attempt to write a readonly database}} +do_test delete-8.2 { + execsql {SELECT * FROM t3} +} {123} +do_test delete-8.3 { + catchsql { + DELETE FROM t3 WHERE 1; + } +} {1 {attempt to write a readonly database}} +do_test delete-8.4 { + execsql {SELECT * FROM t3} +} {123} + +# Update for v3: In v2 the DELETE statement would succeed because no +# database writes actually occur. Version 3 refuses to open a transaction +# on a read-only file, so the statement fails. +do_test delete-8.5 { + catchsql { + DELETE FROM t3 WHERE a<100; + } +# v2 result: {0 {}} +} {1 {attempt to write a readonly database}} +do_test delete-8.6 { + execsql {SELECT * FROM t3} +} {123} +integrity_check delete-8.7 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/delete2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/delete2.test new file mode 100644 index 0000000..659cc56 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/delete2.test @@ -0,0 +1,99 @@ +# 2003 September 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is a test to replicate the bug reported by +# ticket #842. +# +# Ticket #842 was a database corruption problem caused by a DELETE that +# removed an index entry by not the main table entry. To recreate the +# problem do this: +# +# (1) Create a table with an index. Insert some data into that table. +# (2) Start a query on the table but do not complete the query. +# (3) Try to delete a single entry from the table. +# +# Step 3 will fail because there is still a read cursor on the table. +# But the database is corrupted by the DELETE. It turns out that the +# index entry was deleted first, before the table entry. And the index +# delete worked. Thus an entry was deleted from the index but not from +# the table. +# +# The solution to the problem was to detect that the table is locked +# before the index entry is deleted. +# +# $Id: delete2.test,v 1.7 2006/08/16 16:42:48 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table that has an index. +# +do_test delete2-1.1 { + set DB [sqlite3_connection_pointer db] + execsql { + CREATE TABLE q(s string, id string, constraint pk_q primary key(id)); + BEGIN; + INSERT INTO q(s,id) VALUES('hello','id.1'); + INSERT INTO q(s,id) VALUES('goodbye','id.2'); + INSERT INTO q(s,id) VALUES('again','id.3'); + END; + SELECT * FROM q; + } +} {hello id.1 goodbye id.2 again id.3} +do_test delete2-1.2 { + execsql { + SELECT * FROM q WHERE id='id.1'; + } +} {hello id.1} +integrity_check delete2-1.3 + +# Start a query on the table. The query should not use the index. +# Do not complete the query, thus leaving the table locked. +# +do_test delete2-1.4 { + set STMT [sqlite3_prepare $DB {SELECT * FROM q} -1 TAIL] + sqlite3_step $STMT +} SQLITE_ROW +integrity_check delete2-1.5 + +# Try to delete a row from the table while a read is in process. +# As of 2006-08-16, this is allowed. (It used to fail with SQLITE_LOCKED.) +# +do_test delete2-1.6 { + catchsql { + DELETE FROM q WHERE rowid=1 + } +} {0 {}} +integrity_check delete2-1.7 +do_test delete2-1.8 { + execsql { + SELECT * FROM q; + } +} {goodbye id.2 again id.3} + +# Finalize the query, thus clearing the lock on the table. Then +# retry the delete. The delete should work this time. +# +do_test delete2-1.9 { + sqlite3_finalize $STMT + catchsql { + DELETE FROM q WHERE rowid=1 + } +} {0 {}} +integrity_check delete2-1.10 +do_test delete2-1.11 { + execsql { + SELECT * FROM q; + } +} {goodbye id.2 again id.3} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/delete3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/delete3.test new file mode 100644 index 0000000..a31f6ec --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/delete3.test @@ -0,0 +1,57 @@ +# 2005 August 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is a test of the DELETE command where a +# large number of rows are deleted. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table that contains a large number of rows. +# +do_test delete3-1.1 { + execsql { + CREATE TABLE t1(x integer primary key); + BEGIN; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 SELECT x+2 FROM t1; + INSERT INTO t1 SELECT x+4 FROM t1; + INSERT INTO t1 SELECT x+8 FROM t1; + INSERT INTO t1 SELECT x+16 FROM t1; + INSERT INTO t1 SELECT x+32 FROM t1; + INSERT INTO t1 SELECT x+64 FROM t1; + INSERT INTO t1 SELECT x+128 FROM t1; + INSERT INTO t1 SELECT x+256 FROM t1; + INSERT INTO t1 SELECT x+512 FROM t1; + INSERT INTO t1 SELECT x+1024 FROM t1; + INSERT INTO t1 SELECT x+2048 FROM t1; + INSERT INTO t1 SELECT x+4096 FROM t1; + INSERT INTO t1 SELECT x+8192 FROM t1; + INSERT INTO t1 SELECT x+16384 FROM t1; + INSERT INTO t1 SELECT x+32768 FROM t1; + INSERT INTO t1 SELECT x+65536 FROM t1; + INSERT INTO t1 SELECT x+131072 FROM t1; + INSERT INTO t1 SELECT x+262144 FROM t1; + COMMIT; + SELECT count(*) FROM t1; + } +} {524288} +do_test delete3-1.2 { + execsql { + DELETE FROM t1 WHERE x%2==0; + SELECT count(*) FROM t1; + } +} {262144} +integrity_check delete3-1.3 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/descidx1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/descidx1.test new file mode 100644 index 0000000..9c26b7f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/descidx1.test @@ -0,0 +1,337 @@ +# 2005 December 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is descending indices. +# +# $Id: descidx1.test,v 1.7 2006/07/11 14:17:52 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +db eval {PRAGMA legacy_file_format=OFF} + +# This procedure sets the value of the file-format in file 'test.db' +# to $newval. Also, the schema cookie is incremented. +# +proc set_file_format {newval} { + set bt [btree_open test.db 10 0] + btree_begin_transaction $bt + set meta [btree_get_meta $bt] + lset meta 2 $newval ;# File format + lset meta 1 [expr [lindex $meta 1]+1] ;# Schema cookie + eval "btree_update_meta $bt $meta" + btree_commit $bt + btree_close $bt +} + +# This procedure returns the value of the file-format in file 'test.db'. +# +proc get_file_format {{fname test.db}} { + set bt [btree_open $fname 10 0] + set meta [btree_get_meta $bt] + btree_close $bt + lindex $meta 2 +} + +# Verify that the file format starts as 4. +# +do_test descidx1-1.1 { + execsql { + CREATE TABLE t1(a,b); + CREATE INDEX i1 ON t1(b ASC); + } + get_file_format +} {4} +do_test descidx1-1.2 { + execsql { + CREATE INDEX i2 ON t1(a DESC); + } + get_file_format +} {4} + +# Put some information in the table and verify that the descending +# index actually works. +# +do_test descidx1-2.1 { + execsql { + INSERT INTO t1 VALUES(1,1); + INSERT INTO t1 VALUES(2,2); + INSERT INTO t1 SELECT a+2, a+2 FROM t1; + INSERT INTO t1 SELECT a+4, a+4 FROM t1; + SELECT b FROM t1 WHERE a>3 AND a<7; + } +} {6 5 4} +do_test descidx1-2.2 { + execsql { + SELECT a FROM t1 WHERE b>3 AND b<7; + } +} {4 5 6} +do_test descidx1-2.3 { + execsql { + SELECT b FROM t1 WHERE a>=3 AND a<7; + } +} {6 5 4 3} +do_test descidx1-2.4 { + execsql { + SELECT b FROM t1 WHERE a>3 AND a<=7; + } +} {7 6 5 4} +do_test descidx1-2.5 { + execsql { + SELECT b FROM t1 WHERE a>=3 AND a<=7; + } +} {7 6 5 4 3} +do_test descidx1-2.6 { + execsql { + SELECT a FROM t1 WHERE b>=3 AND b<=7; + } +} {3 4 5 6 7} + +# This procedure executes the SQL. Then it checks to see if the OP_Sort +# opcode was executed. If an OP_Sort did occur, then "sort" is appended +# to the result. If no OP_Sort happened, then "nosort" is appended. +# +# This procedure is used to check to make sure sorting is or is not +# occurring as expected. +# +proc cksort {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return $data +} + +# Test sorting using a descending index. +# +do_test descidx1-3.1 { + cksort {SELECT a FROM t1 ORDER BY a} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx1-3.2 { + cksort {SELECT a FROM t1 ORDER BY a ASC} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx1-3.3 { + cksort {SELECT a FROM t1 ORDER BY a DESC} +} {8 7 6 5 4 3 2 1 nosort} +do_test descidx1-3.4 { + cksort {SELECT b FROM t1 ORDER BY a} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx1-3.5 { + cksort {SELECT b FROM t1 ORDER BY a ASC} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx1-3.6 { + cksort {SELECT b FROM t1 ORDER BY a DESC} +} {8 7 6 5 4 3 2 1 nosort} +do_test descidx1-3.7 { + cksort {SELECT a FROM t1 ORDER BY b} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx1-3.8 { + cksort {SELECT a FROM t1 ORDER BY b ASC} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx1-3.9 { + cksort {SELECT a FROM t1 ORDER BY b DESC} +} {8 7 6 5 4 3 2 1 nosort} +do_test descidx1-3.10 { + cksort {SELECT b FROM t1 ORDER BY b} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx1-3.11 { + cksort {SELECT b FROM t1 ORDER BY b ASC} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx1-3.12 { + cksort {SELECT b FROM t1 ORDER BY b DESC} +} {8 7 6 5 4 3 2 1 nosort} + +do_test descidx1-3.21 { + cksort {SELECT a FROM t1 WHERE a>3 AND a<8 ORDER BY a} +} {4 5 6 7 nosort} +do_test descidx1-3.22 { + cksort {SELECT a FROM t1 WHERE a>3 AND a<8 ORDER BY a ASC} +} {4 5 6 7 nosort} +do_test descidx1-3.23 { + cksort {SELECT a FROM t1 WHERE a>3 AND a<8 ORDER BY a DESC} +} {7 6 5 4 nosort} +do_test descidx1-3.24 { + cksort {SELECT b FROM t1 WHERE a>3 AND a<8 ORDER BY a} +} {4 5 6 7 nosort} +do_test descidx1-3.25 { + cksort {SELECT b FROM t1 WHERE a>3 AND a<8 ORDER BY a ASC} +} {4 5 6 7 nosort} +do_test descidx1-3.26 { + cksort {SELECT b FROM t1 WHERE a>3 AND a<8 ORDER BY a DESC} +} {7 6 5 4 nosort} + +# Create a table with indices that are descending on some terms and +# ascending on others. +# +ifcapable bloblit { + do_test descidx1-4.1 { + execsql { + CREATE TABLE t2(a INT, b TEXT, c BLOB, d REAL); + CREATE INDEX i3 ON t2(a ASC, b DESC, c ASC); + CREATE INDEX i4 ON t2(b DESC, a ASC, d DESC); + INSERT INTO t2 VALUES(1,'one',x'31',1.0); + INSERT INTO t2 VALUES(2,'two',x'3232',2.0); + INSERT INTO t2 VALUES(3,'three',x'333333',3.0); + INSERT INTO t2 VALUES(4,'four',x'34343434',4.0); + INSERT INTO t2 VALUES(5,'five',x'3535353535',5.0); + INSERT INTO t2 VALUES(6,'six',x'363636363636',6.0); + INSERT INTO t2 VALUES(2,'two',x'323232',2.1); + INSERT INTO t2 VALUES(2,'zwei',x'3232',2.2); + INSERT INTO t2 VALUES(2,NULL,NULL,2.3); + SELECT count(*) FROM t2; + } + } {9} + do_test descidx1-4.2 { + execsql { + SELECT d FROM t2 ORDER BY a; + } + } {1.0 2.2 2.0 2.1 2.3 3.0 4.0 5.0 6.0} + do_test descidx1-4.3 { + execsql { + SELECT d FROM t2 WHERE a>=2; + } + } {2.2 2.0 2.1 2.3 3.0 4.0 5.0 6.0} + do_test descidx1-4.4 { + execsql { + SELECT d FROM t2 WHERE a>2; + } + } {3.0 4.0 5.0 6.0} + do_test descidx1-4.5 { + execsql { + SELECT d FROM t2 WHERE a=2 AND b>'two'; + } + } {2.2} + do_test descidx1-4.6 { + execsql { + SELECT d FROM t2 WHERE a=2 AND b>='two'; + } + } {2.2 2.0 2.1} + do_test descidx1-4.7 { + execsql { + SELECT d FROM t2 WHERE a=2 AND b<'two'; + } + } {} + do_test descidx1-4.8 { + execsql { + SELECT d FROM t2 WHERE a=2 AND b<='two'; + } + } {2.0 2.1} +} + +do_test descidx1-5.1 { + execsql { + CREATE TABLE t3(a,b,c,d); + CREATE INDEX t3i1 ON t3(a DESC, b ASC, c DESC, d ASC); + INSERT INTO t3 VALUES(0,0,0,0); + INSERT INTO t3 VALUES(0,0,0,1); + INSERT INTO t3 VALUES(0,0,1,0); + INSERT INTO t3 VALUES(0,0,1,1); + INSERT INTO t3 VALUES(0,1,0,0); + INSERT INTO t3 VALUES(0,1,0,1); + INSERT INTO t3 VALUES(0,1,1,0); + INSERT INTO t3 VALUES(0,1,1,1); + INSERT INTO t3 VALUES(1,0,0,0); + INSERT INTO t3 VALUES(1,0,0,1); + INSERT INTO t3 VALUES(1,0,1,0); + INSERT INTO t3 VALUES(1,0,1,1); + INSERT INTO t3 VALUES(1,1,0,0); + INSERT INTO t3 VALUES(1,1,0,1); + INSERT INTO t3 VALUES(1,1,1,0); + INSERT INTO t3 VALUES(1,1,1,1); + SELECT count(*) FROM t3; + } +} {16} +do_test descidx1-5.2 { + cksort { + SELECT a||b||c||d FROM t3 ORDER BY a,b,c,d; + } +} {0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 1010 1011 1100 1101 1110 1111 sort} +do_test descidx1-5.3 { + cksort { + SELECT a||b||c||d FROM t3 ORDER BY a DESC, b ASC, c DESC, d ASC; + } +} {1010 1011 1000 1001 1110 1111 1100 1101 0010 0011 0000 0001 0110 0111 0100 0101 nosort} +do_test descidx1-5.4 { + cksort { + SELECT a||b||c||d FROM t3 ORDER BY a ASC, b DESC, c ASC, d DESC; + } +} {0101 0100 0111 0110 0001 0000 0011 0010 1101 1100 1111 1110 1001 1000 1011 1010 nosort} +do_test descidx1-5.5 { + cksort { + SELECT a||b||c FROM t3 WHERE d=0 ORDER BY a DESC, b ASC, c DESC + } +} {101 100 111 110 001 000 011 010 nosort} +do_test descidx1-5.6 { + cksort { + SELECT a||b||c FROM t3 WHERE d=0 ORDER BY a ASC, b DESC, c ASC + } +} {010 011 000 001 110 111 100 101 nosort} +do_test descidx1-5.7 { + cksort { + SELECT a||b||c FROM t3 WHERE d=0 ORDER BY a ASC, b DESC, c DESC + } +} {011 010 001 000 111 110 101 100 sort} +do_test descidx1-5.8 { + cksort { + SELECT a||b||c FROM t3 WHERE d=0 ORDER BY a ASC, b ASC, c ASC + } +} {000 001 010 011 100 101 110 111 sort} +do_test descidx1-5.9 { + cksort { + SELECT a||b||c FROM t3 WHERE d=0 ORDER BY a DESC, b DESC, c ASC + } +} {110 111 100 101 010 011 000 001 sort} + +# Test the legacy_file_format pragma here because we have access to +# the get_file_format command. +# +ifcapable legacyformat { + do_test descidx1-6.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql {PRAGMA legacy_file_format} + } {1} +} else { + do_test descidx1-6.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql {PRAGMA legacy_file_format} + } {0} +} +do_test descidx1-6.2 { + execsql {PRAGMA legacy_file_format=YES} + execsql {PRAGMA legacy_file_format} +} {1} +do_test descidx1-6.3 { + execsql { + CREATE TABLE t1(a,b,c); + } + get_file_format +} {1} +do_test descidx1-6.4 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql {PRAGMA legacy_file_format=NO} + execsql {PRAGMA legacy_file_format} +} {0} +do_test descidx1-6.5 { + execsql { + CREATE TABLE t1(a,b,c); + } + get_file_format +} {4} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/descidx2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/descidx2.test new file mode 100644 index 0000000..3c6b392 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/descidx2.test @@ -0,0 +1,184 @@ +# 2005 December 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is descending indices. +# +# $Id: descidx2.test,v 1.4 2006/07/11 14:17:52 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +db eval {PRAGMA legacy_file_format=OFF} + +# This procedure sets the value of the file-format in file 'test.db' +# to $newval. Also, the schema cookie is incremented. +# +proc set_file_format {newval} { + set bt [btree_open test.db 10 0] + btree_begin_transaction $bt + set meta [btree_get_meta $bt] + lset meta 2 $newval ;# File format + lset meta 1 [expr [lindex $meta 1]+1] ;# Schema cookie + eval "btree_update_meta $bt $meta" + btree_commit $bt + btree_close $bt +} + +# This procedure returns the value of the file-format in file 'test.db'. +# +proc get_file_format {{fname test.db}} { + set bt [btree_open $fname 10 0] + set meta [btree_get_meta $bt] + btree_close $bt + lindex $meta 2 +} + +# Verify that the file format starts as 4 +# +do_test descidx2-1.1 { + execsql { + CREATE TABLE t1(a,b); + CREATE INDEX i1 ON t1(b ASC); + } + get_file_format +} {4} +do_test descidx2-1.2 { + execsql { + CREATE INDEX i2 ON t1(a DESC); + } + get_file_format +} {4} + +# Before adding any information to the database, set the file format +# back to three. Then close and reopen the database. With the file +# format set to three, SQLite should ignore the DESC argument on the +# index. +# +do_test descidx2-2.0 { + set_file_format 3 + db close + sqlite3 db test.db + get_file_format +} {3} + +# Put some information in the table and verify that the DESC +# on the index is ignored. +# +do_test descidx2-2.1 { + execsql { + INSERT INTO t1 VALUES(1,1); + INSERT INTO t1 VALUES(2,2); + INSERT INTO t1 SELECT a+2, a+2 FROM t1; + INSERT INTO t1 SELECT a+4, a+4 FROM t1; + SELECT b FROM t1 WHERE a>3 AND a<7; + } +} {4 5 6} +do_test descidx2-2.2 { + execsql { + SELECT a FROM t1 WHERE b>3 AND b<7; + } +} {4 5 6} +do_test descidx2-2.3 { + execsql { + SELECT b FROM t1 WHERE a>=3 AND a<7; + } +} {3 4 5 6} +do_test descidx2-2.4 { + execsql { + SELECT b FROM t1 WHERE a>3 AND a<=7; + } +} {4 5 6 7} +do_test descidx2-2.5 { + execsql { + SELECT b FROM t1 WHERE a>=3 AND a<=7; + } +} {3 4 5 6 7} +do_test descidx2-2.6 { + execsql { + SELECT a FROM t1 WHERE b>=3 AND b<=7; + } +} {3 4 5 6 7} + +# This procedure executes the SQL. Then it checks to see if the OP_Sort +# opcode was executed. If an OP_Sort did occur, then "sort" is appended +# to the result. If no OP_Sort happened, then "nosort" is appended. +# +# This procedure is used to check to make sure sorting is or is not +# occurring as expected. +# +proc cksort {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return $data +} + +# Test sorting using a descending index. +# +do_test descidx2-3.1 { + cksort {SELECT a FROM t1 ORDER BY a} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx2-3.2 { + cksort {SELECT a FROM t1 ORDER BY a ASC} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx2-3.3 { + cksort {SELECT a FROM t1 ORDER BY a DESC} +} {8 7 6 5 4 3 2 1 nosort} +do_test descidx2-3.4 { + cksort {SELECT b FROM t1 ORDER BY a} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx2-3.5 { + cksort {SELECT b FROM t1 ORDER BY a ASC} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx2-3.6 { + cksort {SELECT b FROM t1 ORDER BY a DESC} +} {8 7 6 5 4 3 2 1 nosort} +do_test descidx2-3.7 { + cksort {SELECT a FROM t1 ORDER BY b} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx2-3.8 { + cksort {SELECT a FROM t1 ORDER BY b ASC} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx2-3.9 { + cksort {SELECT a FROM t1 ORDER BY b DESC} +} {8 7 6 5 4 3 2 1 nosort} +do_test descidx2-3.10 { + cksort {SELECT b FROM t1 ORDER BY b} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx2-3.11 { + cksort {SELECT b FROM t1 ORDER BY b ASC} +} {1 2 3 4 5 6 7 8 nosort} +do_test descidx2-3.12 { + cksort {SELECT b FROM t1 ORDER BY b DESC} +} {8 7 6 5 4 3 2 1 nosort} + +do_test descidx2-3.21 { + cksort {SELECT a FROM t1 WHERE a>3 AND a<8 ORDER BY a} +} {4 5 6 7 nosort} +do_test descidx2-3.22 { + cksort {SELECT a FROM t1 WHERE a>3 AND a<8 ORDER BY a ASC} +} {4 5 6 7 nosort} +do_test descidx2-3.23 { + cksort {SELECT a FROM t1 WHERE a>3 AND a<8 ORDER BY a DESC} +} {7 6 5 4 nosort} +do_test descidx2-3.24 { + cksort {SELECT b FROM t1 WHERE a>3 AND a<8 ORDER BY a} +} {4 5 6 7 nosort} +do_test descidx2-3.25 { + cksort {SELECT b FROM t1 WHERE a>3 AND a<8 ORDER BY a ASC} +} {4 5 6 7 nosort} +do_test descidx2-3.26 { + cksort {SELECT b FROM t1 WHERE a>3 AND a<8 ORDER BY a DESC} +} {7 6 5 4 nosort} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/descidx3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/descidx3.test new file mode 100644 index 0000000..98b60e6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/descidx3.test @@ -0,0 +1,155 @@ +# 2006 January 02 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is descending indices. +# +# $Id: descidx3.test,v 1.5 2006/07/11 14:17:52 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !bloblit { + finish_test + return +} +db eval {PRAGMA legacy_file_format=OFF} + +# This procedure sets the value of the file-format in file 'test.db' +# to $newval. Also, the schema cookie is incremented. +# +proc set_file_format {newval} { + set bt [btree_open test.db 10 0] + btree_begin_transaction $bt + set meta [btree_get_meta $bt] + lset meta 2 $newval ;# File format + lset meta 1 [expr [lindex $meta 1]+1] ;# Schema cookie + eval "btree_update_meta $bt $meta" + btree_commit $bt + btree_close $bt +} + +# This procedure returns the value of the file-format in file 'test.db'. +# +proc get_file_format {{fname test.db}} { + set bt [btree_open $fname 10 0] + set meta [btree_get_meta $bt] + btree_close $bt + lindex $meta 2 +} + +# Verify that the file format starts as 4. +# +do_test descidx3-1.1 { + execsql { + CREATE TABLE t1(i INTEGER PRIMARY KEY,a,b,c,d); + CREATE INDEX t1i1 ON t1(a DESC, b ASC, c DESC); + CREATE INDEX t1i2 ON t1(b DESC, c ASC, d DESC); + } + get_file_format +} {4} + +# Put some information in the table and verify that the descending +# index actually works. +# +do_test descidx3-2.1 { + execsql { + INSERT INTO t1 VALUES(1, NULL, NULL, NULL, NULL); + INSERT INTO t1 VALUES(2, 2, 2, 2, 2); + INSERT INTO t1 VALUES(3, 3, 3, 3, 3); + INSERT INTO t1 VALUES(4, 2.5, 2.5, 2.5, 2.5); + INSERT INTO t1 VALUES(5, -5, -5, -5, -5); + INSERT INTO t1 VALUES(6, 'six', 'six', 'six', 'six'); + INSERT INTO t1 VALUES(7, x'77', x'77', x'77', x'77'); + INSERT INTO t1 VALUES(8, 'eight', 'eight', 'eight', 'eight'); + INSERT INTO t1 VALUES(9, x'7979', x'7979', x'7979', x'7979'); + SELECT count(*) FROM t1; + } +} 9 +do_test descidx3-2.2 { + execsql { + SELECT i FROM t1 ORDER BY a; + } +} {1 5 2 4 3 8 6 7 9} +do_test descidx3-2.3 { + execsql { + SELECT i FROM t1 ORDER BY a DESC; + } +} {9 7 6 8 3 4 2 5 1} + +# The "natural" order for the index is decreasing +do_test descidx3-2.4 { + execsql { + SELECT i FROM t1 WHERE a<=x'7979'; + } +} {9 7 6 8 3 4 2 5} +do_test descidx3-2.5 { + execsql { + SELECT i FROM t1 WHERE a>-99; + } +} {9 7 6 8 3 4 2 5} + +# Even when all values of t1.a are the same, sorting by A returns +# the rows in reverse order because this the natural order of the +# index. +# +do_test descidx3-3.1 { + execsql { + UPDATE t1 SET a=1; + SELECT i FROM t1 ORDER BY a; + } +} {9 7 6 8 3 4 2 5 1} +do_test descidx3-3.2 { + execsql { + SELECT i FROM t1 WHERE a=1 AND b>0 AND b<'zzz' + } +} {2 4 3 8 6} +do_test descidx3-3.3 { + execsql { + SELECT i FROM t1 WHERE b>0 AND b<'zzz' + } +} {6 8 3 4 2} +do_test descidx3-3.4 { + execsql { + SELECT i FROM t1 WHERE a=1 AND b>-9999 AND b-9999 AND b0 AND b<'zzz'; + } + } {8 6 2 4 3} + do_test descidx3-4.2 { + execsql { + UPDATE t1 SET a=1; + SELECT i FROM t1 WHERE a IN (1,2) AND b>0 AND b<'zzz'; + } + } {2 4 3 8 6} + do_test descidx3-4.3 { + execsql { + UPDATE t1 SET b=2; + SELECT i FROM t1 WHERE a IN (1,2) AND b>0 AND b<'zzz'; + } + } {9 7 6 8 3 4 2 5 1} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/diskfull.test b/libraries/sqlite/unix/sqlite-3.5.1/test/diskfull.test new file mode 100644 index 0000000..0983fec --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/diskfull.test @@ -0,0 +1,115 @@ +# 2001 October 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing for correct handling of disk full +# errors. +# +# $Id: diskfull.test,v 1.7 2007/08/24 03:51:34 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +set sqlite_io_error_persist 0 +set sqlite_io_error_hit 0 +set sqlite_io_error_pending 0 +do_test diskfull-1.1 { + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(randstr(1000,1000)); + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + CREATE INDEX t1i1 ON t1(x); + CREATE TABLE t2 AS SELECT x AS a, x AS b FROM t1; + CREATE INDEX t2i1 ON t2(b); + } +} {} +set sqlite_diskfull_pending 0 +integrity_check diskfull-1.2 +do_test diskfull-1.3 { + set sqlite_diskfull_pending 1 + catchsql { + INSERT INTO t1 SELECT * FROM t1; + } +} {1 {database or disk is full}} +set sqlite_diskfull_pending 0 +integrity_check diskfull-1.4 +do_test diskfull-1.5 { + set sqlite_diskfull_pending 1 + catchsql { + DELETE FROM t1; + } +} {1 {database or disk is full}} +set sqlite_diskfull_pending 0 +set sqlite_io_error_hit 0 +integrity_check diskfull-1.6 + +proc do_diskfull_test {prefix sql} { + set ::go 1 + set ::sql $sql + set ::i 1 + while {$::go} { + incr ::i + do_test ${prefix}.$::i.1 { + set ::sqlite_diskfull_pending $::i + set ::sqlite_diskfull 0 + set r [catchsql $::sql] + if {!$::sqlite_diskfull} { + set r {1 {database or disk is full}} + set ::go 0 + } + if {$r=="1 {disk I/O error}"} { + set r {1 {database or disk is full}} + } + set r + } {1 {database or disk is full}} + set ::sqlite_diskfull_pending 0 + db close + sqlite3 db test.db + integrity_check ${prefix}.$::i.2 + } +} + +do_diskfull_test diskfull-2 VACUUM + +# db close +# file delete -force test.db +# file delete -force test.db-journal +# sqlite3 db test.db +# +# do_test diskfull-3.1 { +# execsql { +# PRAGMA default_cache_size = 10; +# CREATE TABLE t3(a, b, UNIQUE(a, b)); +# INSERT INTO t3 VALUES( randstr(100, 100), randstr(100, 100) ); +# INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; +# INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; +# INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; +# INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; +# INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; +# INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; +# INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; +# UPDATE t3 +# SET b = (SELECT a FROM t3 WHERE rowid = (SELECT max(rowid)-1 FROM t3)) +# WHERE rowid = (SELECT max(rowid) FROM t3); +# PRAGMA cache_size; +# } +# } {10} +# breakpoint +# do_diskfull_test diskfull-3.2 { +# BEGIN; +# INSERT INTO t3 VALUES( randstr(100, 100), randstr(100, 100) ); +# UPDATE t3 SET a = b; +# COMMIT; +# } + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/distinctagg.test b/libraries/sqlite/unix/sqlite-3.5.1/test/distinctagg.test new file mode 100644 index 0000000..b2191ea --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/distinctagg.test @@ -0,0 +1,57 @@ +# 2005 September 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is the DISTINCT modifier on aggregate functions. +# +# $Id: distinctagg.test,v 1.2 2005/09/12 23:03:17 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test distinctagg-1.1 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(1,3,4); + INSERT INTO t1 VALUES(1,3,5); + SELECT count(distinct a), + count(distinct b), + count(distinct c), + count(all a) FROM t1; + } +} {1 2 3 3} +do_test distinctagg-1.2 { + execsql { + SELECT b, count(distinct c) FROM t1 GROUP BY b ORDER BY b + } +} {2 1 3 2} +do_test distinctagg-1.3 { + execsql { + INSERT INTO t1 SELECT a+1, b+3, c+5 FROM t1; + INSERT INTO t1 SELECT a+2, b+6, c+10 FROM t1; + INSERT INTO t1 SELECT a+4, b+12, c+20 FROM t1; + SELECT count(*), count(distinct a), count(distinct b) FROM t1 + } +} {24 8 16} +do_test distinctagg-1.4 { + execsql { + SELECT a, count(distinct c) FROM t1 GROUP BY a ORDER BY a + } +} {1 3 2 3 3 3 4 3 5 3 6 3 7 3 8 3} + +do_test distinctagg-2.1 { + catchsql { + SELECT count(distinct) FROM t1; + } +} {1 {DISTINCT in aggregate must be followed by an expression}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/enc.test b/libraries/sqlite/unix/sqlite-3.5.1/test/enc.test new file mode 100644 index 0000000..5c24bbb --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/enc.test @@ -0,0 +1,172 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus of +# this file is testing the SQLite routines used for converting between the +# various suported unicode encodings (UTF-8, UTF-16, UTF-16le and +# UTF-16be). +# +# $Id: enc.test,v 1.7 2007/05/23 16:23:09 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Skip this test if the build does not support multiple encodings. +# +ifcapable {!utf16} { + finish_test + return +} + +proc do_bincmp_test {testname got expect} { + binary scan $expect \c* expectvals + binary scan $got \c* gotvals + do_test $testname [list set dummy $gotvals] $expectvals +} + +# $utf16 is a UTF-16 encoded string. Swap each pair of bytes around +# to change the byte-order of the string. +proc swap_byte_order {utf16} { + binary scan $utf16 \c* ints + + foreach {a b} $ints { + lappend ints2 $b + lappend ints2 $a + } + + return [binary format \c* $ints2] +} + +# +# Test that the SQLite routines for converting between UTF encodings +# produce the same results as their TCL counterparts. +# +# $testname is the prefix to be used for the test names. +# $str is a string to use for testing (encoded in UTF-8, as normal for TCL). +# +# The test procedure is: +# 1. Convert the string from UTF-8 to UTF-16le and check that the TCL and +# SQLite routines produce the same results. +# +# 2. Convert the string from UTF-8 to UTF-16be and check that the TCL and +# SQLite routines produce the same results. +# +# 3. Use the SQLite routines to convert the native machine order UTF-16 +# representation back to the original UTF-8. Check that the result +# matches the original representation. +# +# 4. Add a byte-order mark to each of the UTF-16 representations and +# check that the SQLite routines can convert them back to UTF-8. For +# byte-order mark info, refer to section 3.10 of the unicode standard. +# +# 5. Take the byte-order marked UTF-16 strings from step 4 and ensure +# that SQLite can convert them both to native byte order UTF-16 +# strings, sans BOM. +# +# Coverage: +# +# sqlite_utf8to16be (step 2) +# sqlite_utf8to16le (step 1) +# sqlite_utf16to8 (steps 3, 4) +# sqlite_utf16to16le (step 5) +# sqlite_utf16to16be (step 5) +# +proc test_conversion {testname str} { + + # Step 1. + set utf16le_sqlite3 [test_translate $str UTF8 UTF16LE] + set utf16le_tcl [encoding convertto unicode $str] + append utf16le_tcl "\x00\x00" + if { $::tcl_platform(byteOrder)!="littleEndian" } { + set utf16le_tcl [swap_byte_order $utf16le_tcl] + } + do_bincmp_test $testname.1 $utf16le_sqlite3 $utf16le_tcl + set utf16le $utf16le_tcl + + # Step 2. + set utf16be_sqlite3 [test_translate $str UTF8 UTF16BE] + set utf16be_tcl [encoding convertto unicode $str] + append utf16be_tcl "\x00\x00" + if { $::tcl_platform(byteOrder)=="littleEndian" } { + set utf16be_tcl [swap_byte_order $utf16be_tcl] + } + do_bincmp_test $testname.2 $utf16be_sqlite3 $utf16be_tcl + set utf16be $utf16be_tcl + + # Step 3. + if { $::tcl_platform(byteOrder)=="littleEndian" } { + set utf16 $utf16le + } else { + set utf16 $utf16be + } + set utf8_sqlite3 [test_translate $utf16 UTF16 UTF8] + do_bincmp_test $testname.3 $utf8_sqlite3 [binarize $str] + + # Step 4 (little endian). + append utf16le_bom "\xFF\xFE" $utf16le + set utf8_sqlite3 [test_translate $utf16le_bom UTF16 UTF8 1] + do_bincmp_test $testname.4.le $utf8_sqlite3 [binarize $str] + + # Step 4 (big endian). + append utf16be_bom "\xFE\xFF" $utf16be + set utf8_sqlite3 [test_translate $utf16be_bom UTF16 UTF8] + do_bincmp_test $testname.4.be $utf8_sqlite3 [binarize $str] + + # Step 5 (little endian to little endian). + set utf16_sqlite3 [test_translate $utf16le_bom UTF16LE UTF16LE] + do_bincmp_test $testname.5.le.le $utf16_sqlite3 $utf16le + + # Step 5 (big endian to big endian). + set utf16_sqlite3 [test_translate $utf16be_bom UTF16 UTF16BE] + do_bincmp_test $testname.5.be.be $utf16_sqlite3 $utf16be + + # Step 5 (big endian to little endian). + set utf16_sqlite3 [test_translate $utf16be_bom UTF16 UTF16LE] + do_bincmp_test $testname.5.be.le $utf16_sqlite3 $utf16le + + # Step 5 (little endian to big endian). + set utf16_sqlite3 [test_translate $utf16le_bom UTF16 UTF16BE] + do_bincmp_test $testname.5.le.be $utf16_sqlite3 $utf16be +} + +translate_selftest + +test_conversion enc-1 "hello world" +test_conversion enc-2 "sqlite" +test_conversion enc-3 "" +test_conversion enc-X "\u0100" +test_conversion enc-4 "\u1234" +test_conversion enc-5 "\u4321abc" +test_conversion enc-6 "\u4321\u1234" +test_conversion enc-7 [string repeat "abcde\u00EF\u00EE\uFFFCabc" 100] +test_conversion enc-8 [string repeat "\u007E\u007F\u0080\u0081" 100] +test_conversion enc-9 [string repeat "\u07FE\u07FF\u0800\u0801\uFFF0" 100] +test_conversion enc-10 [string repeat "\uE000" 100] + +proc test_collate {enc zLeft zRight} { + return [string compare $zLeft $zRight] +} +add_test_collate $::DB 0 0 1 +do_test enc-11.1 { + execsql { + CREATE TABLE ab(a COLLATE test_collate, b); + INSERT INTO ab VALUES(CAST (X'C388' AS TEXT), X'888800'); + INSERT INTO ab VALUES(CAST (X'C0808080808080808080808080808080808080808080808080808080808080808080808080808080808080808080808080808080808388' AS TEXT), X'888800'); + CREATE INDEX ab_i ON ab(a, b); + } +} {} +do_test enc-11.2 { + set cp200 "\u00C8" + execsql { + SELECT count(*) FROM ab WHERE a = $::cp200; + } +} {2} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/enc2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/enc2.test new file mode 100644 index 0000000..03d4cbd --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/enc2.test @@ -0,0 +1,554 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus of +# this file is testing the SQLite routines used for converting between the +# various suported unicode encodings (UTF-8, UTF-16, UTF-16le and +# UTF-16be). +# +# $Id: enc2.test,v 1.28 2006/09/23 20:36:03 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If UTF16 support is disabled, ignore the tests in this file +# +ifcapable {!utf16} { + finish_test + return +} + +# The rough organisation of tests in this file is: +# +# enc2.1.*: Simple tests with a UTF-8 db. +# enc2.2.*: Simple tests with a UTF-16LE db. +# enc2.3.*: Simple tests with a UTF-16BE db. +# enc2.4.*: Test that attached databases must have the same text encoding +# as the main database. +# enc2.5.*: Test the behaviour of the library when a collation sequence is +# not available for the most desirable text encoding. +# enc2.6.*: Similar test for user functions. +# enc2.7.*: Test that the VerifyCookie opcode protects against assuming the +# wrong text encoding for the database. +# enc2.8.*: Test sqlite3_complete16() +# + +db close + +# Return the UTF-8 representation of the supplied UTF-16 string $str. +proc utf8 {str} { + # If $str ends in two 0x00 0x00 bytes, knock these off before + # converting to UTF-8 using TCL. + binary scan $str \c* vals + if {[lindex $vals end]==0 && [lindex $vals end-1]==0} { + set str [binary format \c* [lrange $vals 0 end-2]] + } + + set r [encoding convertfrom unicode $str] + return $r +} + +# +# This proc contains all the tests in this file. It is run +# three times. Each time the file 'test.db' contains a database +# with the following contents: +set dbcontents { + CREATE TABLE t1(a PRIMARY KEY, b, c); + INSERT INTO t1 VALUES('one', 'I', 1); +} +# This proc tests that we can open and manipulate the test.db +# database, and that it is possible to retreive values in +# various text encodings. +# +proc run_test_script {t enc} { + +# Open the database and pull out a (the) row. +do_test $t.1 { + sqlite3 db test.db; set DB [sqlite3_connection_pointer db] + execsql {SELECT * FROM t1} +} {one I 1} + +# Insert some data +do_test $t.2 { + execsql {INSERT INTO t1 VALUES('two', 'II', 2);} + execsql {SELECT * FROM t1} +} {one I 1 two II 2} + +# Insert some data +do_test $t.3 { + execsql { + INSERT INTO t1 VALUES('three','III',3); + INSERT INTO t1 VALUES('four','IV',4); + INSERT INTO t1 VALUES('five','V',5); + } + execsql {SELECT * FROM t1} +} {one I 1 two II 2 three III 3 four IV 4 five V 5} + +# Use the index +do_test $t.4 { + execsql { + SELECT * FROM t1 WHERE a = 'one'; + } +} {one I 1} +do_test $t.5 { + execsql { + SELECT * FROM t1 WHERE a = 'four'; + } +} {four IV 4} +ifcapable subquery { + do_test $t.6 { + execsql { + SELECT * FROM t1 WHERE a IN ('one', 'two'); + } + } {one I 1 two II 2} +} + +# Now check that we can retrieve data in both UTF-16 and UTF-8 +do_test $t.7 { + set STMT [sqlite3_prepare $DB "SELECT a FROM t1 WHERE c>3;" -1 TAIL] + sqlite3_step $STMT + sqlite3_column_text $STMT 0 +} {four} + +do_test $t.8 { + sqlite3_step $STMT + utf8 [sqlite3_column_text16 $STMT 0] +} {five} + +do_test $t.9 { + sqlite3_finalize $STMT +} SQLITE_OK + +ifcapable vacuum { + execsql VACUUM +} + +do_test $t.10 { + db eval {PRAGMA encoding} +} $enc + +} + +# The three unicode encodings understood by SQLite. +set encodings [list UTF-8 UTF-16le UTF-16be] + +set sqlite_os_trace 0 +set i 1 +foreach enc $encodings { + file delete -force test.db + sqlite3 db test.db + db eval "PRAGMA encoding = \"$enc\"" + execsql $dbcontents + do_test enc2-$i.0.1 { + db eval {PRAGMA encoding} + } $enc + do_test enc2-$i.0.2 { + db eval {PRAGMA encoding=UTF8} + db eval {PRAGMA encoding} + } $enc + do_test enc2-$i.0.3 { + db eval {PRAGMA encoding=UTF16le} + db eval {PRAGMA encoding} + } $enc + do_test enc2-$i.0.4 { + db eval {PRAGMA encoding=UTF16be} + db eval {PRAGMA encoding} + } $enc + + db close + run_test_script enc2-$i $enc + db close + incr i +} + +# Test that it is an error to try to attach a database with a different +# encoding to the main database. +do_test enc2-4.1 { + file delete -force test.db + sqlite3 db test.db + db eval "PRAGMA encoding = 'UTF-8'" + db eval "CREATE TABLE abc(a, b, c);" +} {} +do_test enc2-4.2 { + file delete -force test2.db + sqlite3 db2 test2.db + db2 eval "PRAGMA encoding = 'UTF-16'" + db2 eval "CREATE TABLE abc(a, b, c);" +} {} +do_test enc2-4.3 { + catchsql { + ATTACH 'test2.db' as aux; + } +} {1 {attached databases must use the same text encoding as main database}} + +db2 close +db close + +# The following tests - enc2-5.* - test that SQLite selects the correct +# collation sequence when more than one is available. + +set ::values [list one two three four five] +set ::test_collate_enc INVALID +proc test_collate {enc lhs rhs} { + set ::test_collate_enc $enc + set l [lsearch -exact $::values $lhs] + set r [lsearch -exact $::values $rhs] + set res [expr $l - $r] + # puts "enc=$enc lhs=$lhs/$l rhs=$rhs/$r res=$res" + return $res +} + +file delete -force test.db +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +do_test enc2-5.0 { + execsql { + CREATE TABLE t5(a); + INSERT INTO t5 VALUES('one'); + INSERT INTO t5 VALUES('two'); + INSERT INTO t5 VALUES('five'); + INSERT INTO t5 VALUES('three'); + INSERT INTO t5 VALUES('four'); + } +} {} +do_test enc2-5.1 { + add_test_collate $DB 1 1 1 + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate;}] + lappend res $::test_collate_enc +} {one two three four five UTF-8} +do_test enc2-5.2 { + add_test_collate $DB 0 1 0 + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] + lappend res $::test_collate_enc +} {one two three four five UTF-16LE} +do_test enc2-5.3 { + add_test_collate $DB 0 0 1 + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] + lappend res $::test_collate_enc +} {one two three four five UTF-16BE} + +db close +file delete -force test.db +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +execsql {pragma encoding = 'UTF-16LE'} +do_test enc2-5.4 { + execsql { + CREATE TABLE t5(a); + INSERT INTO t5 VALUES('one'); + INSERT INTO t5 VALUES('two'); + INSERT INTO t5 VALUES('five'); + INSERT INTO t5 VALUES('three'); + INSERT INTO t5 VALUES('four'); + } +} {} +do_test enc2-5.5 { + add_test_collate $DB 1 1 1 + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] + lappend res $::test_collate_enc +} {one two three four five UTF-16LE} +do_test enc2-5.6 { + add_test_collate $DB 1 0 1 + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] + lappend res $::test_collate_enc +} {one two three four five UTF-16BE} +do_test enc2-5.7 { + add_test_collate $DB 1 0 0 + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] + lappend res $::test_collate_enc +} {one two three four five UTF-8} + +db close +file delete -force test.db +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +execsql {pragma encoding = 'UTF-16BE'} +do_test enc2-5.8 { + execsql { + CREATE TABLE t5(a); + INSERT INTO t5 VALUES('one'); + INSERT INTO t5 VALUES('two'); + INSERT INTO t5 VALUES('five'); + INSERT INTO t5 VALUES('three'); + INSERT INTO t5 VALUES('four'); + } +} {} +do_test enc2-5.9 { + add_test_collate $DB 1 1 1 + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] + lappend res $::test_collate_enc +} {one two three four five UTF-16BE} +do_test enc2-5.10 { + add_test_collate $DB 1 1 0 + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] + lappend res $::test_collate_enc +} {one two three four five UTF-16LE} +do_test enc2-5.11 { + add_test_collate $DB 1 0 0 + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate}] + lappend res $::test_collate_enc +} {one two three four five UTF-8} + +# Also test that a UTF-16 collation factory works. +do_test enc2-5-12 { + add_test_collate $DB 0 0 0 + catchsql { + SELECT * FROM t5 ORDER BY 1 COLLATE test_collate + } +} {1 {no such collation sequence: test_collate}} +do_test enc2-5.13 { + add_test_collate_needed $DB + set res [execsql {SELECT * FROM t5 ORDER BY 1 COLLATE test_collate; }] + lappend res $::test_collate_enc +} {one two three four five UTF-16BE} +do_test enc2-5.14 { + set ::sqlite_last_needed_collation +} test_collate + +db close +file delete -force test.db + +do_test enc2-5.15 { + sqlite3 db test.db; set ::DB [sqlite3_connection_pointer db] + add_test_collate_needed $::DB + set ::sqlite_last_needed_collation +} {} +do_test enc2-5.16 { + execsql {CREATE TABLE t1(a varchar collate test_collate);} +} {} +do_test enc2-5.17 { + set ::sqlite_last_needed_collation +} {test_collate} + +# The following tests - enc2-6.* - test that SQLite selects the correct +# user function when more than one is available. + +proc test_function {enc arg} { + return "$enc $arg" +} + +db close +file delete -force test.db +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +execsql {pragma encoding = 'UTF-8'} +do_test enc2-6.0 { + execsql { + CREATE TABLE t5(a); + INSERT INTO t5 VALUES('one'); + } +} {} +do_test enc2-6.1 { + add_test_function $DB 1 1 1 + execsql { + SELECT test_function('sqlite') + } +} {{UTF-8 sqlite}} +db close +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +do_test enc2-6.2 { + add_test_function $DB 0 1 0 + execsql { + SELECT test_function('sqlite') + } +} {{UTF-16LE sqlite}} +db close +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +do_test enc2-6.3 { + add_test_function $DB 0 0 1 + execsql { + SELECT test_function('sqlite') + } +} {{UTF-16BE sqlite}} + +db close +file delete -force test.db +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +execsql {pragma encoding = 'UTF-16LE'} +do_test enc2-6.3 { + execsql { + CREATE TABLE t5(a); + INSERT INTO t5 VALUES('sqlite'); + } +} {} +do_test enc2-6.4 { + add_test_function $DB 1 1 1 + execsql { + SELECT test_function('sqlite') + } +} {{UTF-16LE sqlite}} +db close +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +do_test enc2-6.5 { + add_test_function $DB 0 1 0 + execsql { + SELECT test_function('sqlite') + } +} {{UTF-16LE sqlite}} +db close +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +do_test enc2-6.6 { + add_test_function $DB 0 0 1 + execsql { + SELECT test_function('sqlite') + } +} {{UTF-16BE sqlite}} + +db close +file delete -force test.db +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +execsql {pragma encoding = 'UTF-16BE'} +do_test enc2-6.7 { + execsql { + CREATE TABLE t5(a); + INSERT INTO t5 VALUES('sqlite'); + } +} {} +do_test enc2-6.8 { + add_test_function $DB 1 1 1 + execsql { + SELECT test_function('sqlite') + } +} {{UTF-16BE sqlite}} +db close +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +do_test enc2-6.9 { + add_test_function $DB 0 1 0 + execsql { + SELECT test_function('sqlite') + } +} {{UTF-16LE sqlite}} +db close +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +do_test enc2-6.10 { + add_test_function $DB 0 0 1 + execsql { + SELECT test_function('sqlite') + } +} {{UTF-16BE sqlite}} + + +db close +file delete -force test.db + +# The following tests - enc2-7.* - function as follows: +# +# 1: Open an empty database file assuming UTF-16 encoding. +# 2: Open the same database with a different handle assuming UTF-8. Create +# a table using this handle. +# 3: Read the sqlite_master table from the first handle. +# 4: Ensure the first handle recognises the database encoding is UTF-8. +# +do_test enc2-7.1 { + sqlite3 db test.db + execsql { + PRAGMA encoding = 'UTF-16'; + SELECT * FROM sqlite_master; + } +} {} +do_test enc2-7.2 { + set enc [execsql { + PRAGMA encoding; + }] + string range $enc 0 end-2 ;# Chop off the "le" or "be" +} {UTF-16} +do_test enc2-7.3 { + sqlite3 db2 test.db + execsql { + PRAGMA encoding = 'UTF-8'; + CREATE TABLE abc(a, b, c); + } db2 +} {} +do_test enc2-7.4 { + execsql { + SELECT * FROM sqlite_master; + } +} "table abc abc [expr $AUTOVACUUM?3:2] {CREATE TABLE abc(a, b, c)}" +do_test enc2-7.5 { + execsql { + PRAGMA encoding; + } +} {UTF-8} + +db close +db2 close + +proc utf16 {utf8} { + set utf16 [encoding convertto unicode $utf8] + append utf16 "\x00\x00" + return $utf16 +} +ifcapable {complete} { + do_test enc2-8.1 { + sqlite3_complete16 [utf16 "SELECT * FROM t1;"] + } {1} + do_test enc2-8.2 { + sqlite3_complete16 [utf16 "SELECT * FROM"] + } {0} +} + +# Test that the encoding of an empty database may still be set after the +# (empty) schema has been initialized. +file delete -force test.db +do_test enc2-9.1 { + sqlite3 db test.db + execsql { + PRAGMA encoding = 'UTF-8'; + PRAGMA encoding; + } +} {UTF-8} +do_test enc2-9.2 { + sqlite3 db test.db + execsql { + PRAGMA encoding = 'UTF-16le'; + PRAGMA encoding; + } +} {UTF-16le} +do_test enc2-9.3 { + sqlite3 db test.db + execsql { + SELECT * FROM sqlite_master; + PRAGMA encoding = 'UTF-8'; + PRAGMA encoding; + } +} {UTF-8} +do_test enc2-9.4 { + sqlite3 db test.db + execsql { + PRAGMA encoding = 'UTF-16le'; + CREATE TABLE abc(a, b, c); + PRAGMA encoding; + } +} {UTF-16le} +do_test enc2-9.5 { + sqlite3 db test.db + execsql { + PRAGMA encoding = 'UTF-8'; + PRAGMA encoding; + } +} {UTF-16le} + +# Ticket #1987. +# Disallow encoding changes once the encoding has been set. +# +do_test enc2-10.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + db eval { + PRAGMA encoding=UTF16; + CREATE TABLE t1(a); + PRAGMA encoding=UTF8; + CREATE TABLE t2(b); + } + db close + sqlite3 db test.db + db eval { + SELECT name FROM sqlite_master + } +} {t1 t2} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/enc3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/enc3.test new file mode 100644 index 0000000..64f5807 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/enc3.test @@ -0,0 +1,81 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# The focus of this file is testing of the proper handling of conversions +# to the native text representation. +# +# $Id: enc3.test,v 1.6 2007/05/10 21:14:03 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {utf16} { + do_test enc3-1.1 { + execsql { + PRAGMA encoding=utf16le; + PRAGMA encoding; + } + } {UTF-16le} +} +do_test enc3-1.2 { + execsql { + CREATE TABLE t1(x,y); + INSERT INTO t1 VALUES('abc''123',5); + SELECT * FROM t1 + } +} {abc'123 5} +do_test enc3-1.3 { + execsql { + SELECT quote(x) || ' ' || quote(y) FROM t1 + } +} {{'abc''123' 5}} +ifcapable {bloblit} { + do_test enc3-1.4 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(x'616263646566',NULL); + SELECT * FROM t1 + } + } {abcdef {}} + do_test enc3-1.5 { + execsql { + SELECT quote(x) || ' ' || quote(y) FROM t1 + } + } {{X'616263646566' NULL}} +} +ifcapable {bloblit && utf16} { + do_test enc3-2.1 { + execsql { + PRAGMA encoding + } + } {UTF-16le} + do_test enc3-2.2 { + execsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES(x'61006200630064006500'); + SELECT CAST(a AS text) FROM t2 WHERE a LIKE 'abc%'; + } + } {abcde} + do_test enc3-2.3 { + execsql { + SELECT CAST(x'61006200630064006500' AS text); + } + } {abcde} + do_test enc3-2.4 { + execsql { + SELECT rowid FROM t2 WHERE a LIKE x'610062002500'; + } + } {1} +} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/exclusive.test b/libraries/sqlite/unix/sqlite-3.5.1/test/exclusive.test new file mode 100644 index 0000000..20e012f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/exclusive.test @@ -0,0 +1,449 @@ +# 2007 March 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus +# of these tests is exclusive access mode (i.e. the thing activated by +# "PRAGMA locking_mode = EXCLUSIVE"). +# +# $Id: exclusive.test,v 1.6 2007/08/12 20:07:59 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!pager_pragmas} { + finish_test + return +} + +file delete -force test2.db-journal +file delete -force test2.db +file delete -force test3.db-journal +file delete -force test3.db +file delete -force test4.db-journal +file delete -force test4.db + +# The locking mode for the TEMP table is always "exclusive" for +# on-disk tables and "normal" for in-memory tables. +# +if {[info exists TEMP_STORE] && $TEMP_STORE>=2} { + set temp_mode normal +} else { + set temp_mode exclusive +} + +#---------------------------------------------------------------------- +# Test cases exclusive-1.X test the PRAGMA logic. +# +do_test exclusive-1.0 { + execsql { + pragma locking_mode; + pragma main.locking_mode; + pragma temp.locking_mode; + } +} [list normal normal $temp_mode] +do_test exclusive-1.1 { + execsql { + pragma locking_mode = exclusive; + } +} {exclusive} +do_test exclusive-1.2 { + execsql { + pragma locking_mode; + pragma main.locking_mode; + pragma temp.locking_mode; + } +} [list exclusive exclusive $temp_mode] +do_test exclusive-1.3 { + execsql { + pragma locking_mode = normal; + } +} {normal} +do_test exclusive-1.4 { + execsql { + pragma locking_mode; + pragma main.locking_mode; + pragma temp.locking_mode; + } +} [list normal normal $temp_mode] +do_test exclusive-1.5 { + execsql { + pragma locking_mode = invalid; + } +} {normal} +do_test exclusive-1.6 { + execsql { + pragma locking_mode; + pragma main.locking_mode; + pragma temp.locking_mode; + } +} [list normal normal $temp_mode] +do_test exclusive-1.7 { + execsql { + pragma locking_mode = exclusive; + ATTACH 'test2.db' as aux; + } + execsql { + pragma main.locking_mode; + pragma aux.locking_mode; + } +} {exclusive exclusive} +do_test exclusive-1.8 { + execsql { + pragma main.locking_mode = normal; + } + execsql { + pragma main.locking_mode; + pragma temp.locking_mode; + pragma aux.locking_mode; + } +} [list normal $temp_mode exclusive] +do_test exclusive-1.9 { + execsql { + pragma locking_mode; + } +} {exclusive} +do_test exclusive-1.10 { + execsql { + ATTACH 'test3.db' as aux2; + } + execsql { + pragma main.locking_mode; + pragma aux.locking_mode; + pragma aux2.locking_mode; + } +} {normal exclusive exclusive} +do_test exclusive-1.11 { + execsql { + pragma aux.locking_mode = normal; + } + execsql { + pragma main.locking_mode; + pragma aux.locking_mode; + pragma aux2.locking_mode; + } +} {normal normal exclusive} +do_test exclusive-1.12 { + execsql { + pragma locking_mode = normal; + } + execsql { + pragma main.locking_mode; + pragma temp.locking_mode; + pragma aux.locking_mode; + pragma aux2.locking_mode; + } +} [list normal $temp_mode normal normal] +do_test exclusive-1.13 { + execsql { + ATTACH 'test4.db' as aux3; + } + execsql { + pragma main.locking_mode; + pragma temp.locking_mode; + pragma aux.locking_mode; + pragma aux2.locking_mode; + pragma aux3.locking_mode; + } +} [list normal $temp_mode normal normal normal] + +do_test exclusive-1.99 { + execsql { + DETACH aux; + DETACH aux2; + DETACH aux3; + } +} {} + +#---------------------------------------------------------------------- +# Test cases exclusive-2.X verify that connections in exclusive +# locking_mode do not relinquish locks. +# +do_test exclusive-2.0 { + execsql { + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + PRAGMA locking_mode = exclusive; + } +} {exclusive} +do_test exclusive-2.1 { + sqlite3 db2 test.db + execsql { + INSERT INTO abc VALUES(4, 5, 6); + SELECT * FROM abc; + } db2 +} {1 2 3 4 5 6} +do_test exclusive-2.2 { + # This causes connection 'db' (in exclusive mode) to establish + # a shared-lock on the db. The other connection should now be + # locked out as a writer. + execsql { + SELECT * FROM abc; + } db +} {1 2 3 4 5 6} +do_test exclusive-2.4 { + execsql { + SELECT * FROM abc; + } db2 +} {1 2 3 4 5 6} +do_test exclusive-2.5 { + catchsql { + INSERT INTO abc VALUES(7, 8, 9); + } db2 +} {1 {database is locked}} +sqlite3_soft_heap_limit 0 +do_test exclusive-2.6 { + # Because connection 'db' only has a shared-lock, the other connection + # will be able to get a RESERVED, but will fail to upgrade to EXCLUSIVE. + execsql { + BEGIN; + INSERT INTO abc VALUES(7, 8, 9); + } db2 + catchsql { + COMMIT + } db2 +} {1 {database is locked}} +do_test exclusive-2.7 { + catchsql { + COMMIT + } db2 +} {1 {database is locked}} +do_test exclusive-2.8 { + execsql { + ROLLBACK; + } db2 +} {} +sqlite3_soft_heap_limit $soft_limit + +do_test exclusive-2.9 { + # Write the database to establish the exclusive lock with connection 'db. + execsql { + INSERT INTO abc VALUES(7, 8, 9); + } db + catchsql { + SELECT * FROM abc; + } db2 +} {1 {database is locked}} +do_test exclusive-2.10 { + # Changing the locking-mode does not release any locks. + execsql { + PRAGMA locking_mode = normal; + } db + catchsql { + SELECT * FROM abc; + } db2 +} {1 {database is locked}} +do_test exclusive-2.11 { + # After changing the locking mode, accessing the db releases locks. + execsql { + SELECT * FROM abc; + } db + execsql { + SELECT * FROM abc; + } db2 +} {1 2 3 4 5 6 7 8 9} +db2 close + +#---------------------------------------------------------------------- +# Tests exclusive-3.X - test that a connection in exclusive mode +# truncates instead of deletes the journal file when committing +# a transaction. +# +proc filestate {fname} { + set exists 0 + set content 0 + if {[file exists $fname]} { + set exists 1 + set content [expr {[file size $fname] > 0}] + } + list $exists $content +} +do_test exclusive-3.0 { + filestate test.db-journal +} {0 0} +do_test exclusive-3.1 { + execsql { + PRAGMA locking_mode = exclusive; + BEGIN; + DELETE FROM abc; + } + filestate test.db-journal +} {1 1} +do_test exclusive-3.2 { + execsql { + COMMIT; + } + filestate test.db-journal +} {1 0} +do_test exclusive-3.3 { + execsql { + INSERT INTO abc VALUES('A', 'B', 'C'); + SELECT * FROM abc; + } +} {A B C} +do_test exclusive-3.4 { + execsql { + BEGIN; + UPDATE abc SET a = 1, b = 2, c = 3; + ROLLBACK; + SELECT * FROM abc; + } +} {A B C} +do_test exclusive-3.5 { + filestate test.db-journal +} {1 0} +do_test exclusive-3.6 { + execsql { + PRAGMA locking_mode = normal; + SELECT * FROM abc; + } + filestate test.db-journal +} {0 0} + +#---------------------------------------------------------------------- +# Tests exclusive-4.X - test that rollback works correctly when +# in exclusive-access mode. +# + +# The following procedure computes a "signature" for table "t3". If +# T3 changes in any way, the signature should change. +# +# This is used to test ROLLBACK. We gather a signature for t3, then +# make lots of changes to t3, then rollback and take another signature. +# The two signatures should be the same. +# +proc signature {} { + return [db eval {SELECT count(*), md5sum(x) FROM t3}] +} + +do_test exclusive-4.0 { + execsql { PRAGMA locking_mode = exclusive; } + execsql { PRAGMA default_cache_size = 10; } + execsql { + BEGIN; + CREATE TABLE t3(x TEXT); + INSERT INTO t3 VALUES(randstr(10,400)); + INSERT INTO t3 VALUES(randstr(10,400)); + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + COMMIT; + } + execsql {SELECT count(*) FROM t3;} +} {32} + +set ::X [signature] +do_test exclusive-4.1 { + execsql { + BEGIN; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + SELECT count(*) FROM t3; + ROLLBACK; + } + signature +} $::X + +do_test exclusive-4.2 { + execsql { + BEGIN; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + ROLLBACK; + } + signature +} $::X + +do_test exclusive-4.3 { + execsql { + INSERT INTO t3 SELECT randstr(10,400) FROM t3 WHERE random()%10==0; + } +} {} + +do_test exclusive-4.4 { + catch {set ::X [signature]} +} {0} +do_test exclusive-4.5 { + execsql { + PRAGMA locking_mode = NORMAL; + DROP TABLE t3; + DROP TABLE abc; + } +} {normal} + +#---------------------------------------------------------------------- +# Tests exclusive-5.X - test that statement journals are truncated +# instead of deleted when in exclusive access mode. +# + +# Close and reopen the database so that the temp database is no +# longer active. +# +db close +sqlite db test.db + + +do_test exclusive-5.0 { + execsql { + CREATE TABLE abc(a UNIQUE, b UNIQUE, c UNIQUE); + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc SELECT a+1, b+1, c+1 FROM abc; + } +} {} +do_test exclusive-5.1 { + # Three files are open: The db, journal and statement-journal. + set sqlite_open_file_count +} {3} +do_test exclusive-5.2 { + execsql { + COMMIT; + } + # One file open: the db. + set sqlite_open_file_count +} {1} +do_test exclusive-5.3 { + execsql { + PRAGMA locking_mode = exclusive; + BEGIN; + INSERT INTO abc VALUES(5, 6, 7); + } + # Two files open: the db and journal. + set sqlite_open_file_count +} {2} +do_test exclusive-5.4 { + execsql { + INSERT INTO abc SELECT a+10, b+10, c+10 FROM abc; + } + # Three files are open: The db, journal and statement-journal. + set sqlite_open_file_count +} {3} +do_test exclusive-5.5 { + execsql { + COMMIT; + } + # Three files are still open: The db, journal and statement-journal. + set sqlite_open_file_count +} {3} +do_test exclusive-5.6 { + execsql { + PRAGMA locking_mode = normal; + SELECT * FROM abc; + } +} {normal 1 2 3 2 3 4 5 6 7 11 12 13 12 13 14 15 16 17} +do_test exclusive-5.7 { + # Just the db open. + set sqlite_open_file_count +} {1} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/exclusive2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/exclusive2.test new file mode 100644 index 0000000..d0eeb19 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/exclusive2.test @@ -0,0 +1,297 @@ +# 2007 March 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: exclusive2.test,v 1.8 2007/08/12 20:07:59 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!pager_pragmas} { + finish_test + return +} + +# This module does not work right if the cache spills at unexpected +# moments. So disable the soft-heap-limit. +# +sqlite3_soft_heap_limit 0 + +proc pagerChangeCounter {filename {new ""}} { + set fd [open $filename RDWR] + fconfigure $fd -translation binary -encoding binary + if {$new ne ""} { + seek $fd 24 + set a [expr {($new&0xFF000000)>>24}] + set b [expr {($new&0x00FF0000)>>16}] + set c [expr {($new&0x0000FF00)>>8}] + set d [expr {($new&0x000000FF)}] + puts -nonewline $fd [binary format cccc $a $b $c $d] + flush $fd + } + + seek $fd 24 + foreach {a b c d} [list 0 0 0 0] {} + binary scan [read $fd 4] cccc a b c d + set ret [expr ($a&0x000000FF)<<24] + incr ret [expr ($b&0x000000FF)<<16] + incr ret [expr ($c&0x000000FF)<<8] + incr ret [expr ($d&0x000000FF)<<0] + + close $fd + return $ret +} + +proc readPagerChangeCounter {filename} { + set fd [open $filename RDONLY] + fconfigure $fd -translation binary -encoding binary + + seek $fd 24 + foreach {a b c d} [list 0 0 0 0] {} + binary scan [read $fd 4] cccc a b c d + set ret [expr ($a&0x000000FF)<<24] + incr ret [expr ($b&0x000000FF)<<16] + incr ret [expr ($c&0x000000FF)<<8] + incr ret [expr ($d&0x000000FF)<<0] + + close $fd + return $ret +} + + +proc t1sig {{db db}} { + execsql {SELECT count(*), md5sum(a) FROM t1} $db +} +do_test exclusive2-1.0 { + readPagerChangeCounter test.db +} {0} + +#----------------------------------------------------------------------- +# The following tests - exclusive2-1.X - check that: +# +# 1-3: Build a database with connection 1, calculate a signature. +# 4-9: Modify the database using a second connection in a way that +# does not modify the freelist, then reset the pager change-counter +# to the value it had before the modifications. +# 8: Check that using the first connection, the database signature +# is still the same. This is because it uses the in-memory cache. +# It can't tell the db has changed because we reset the change-counter. +# 9: Increment the change-counter. +# 10: Ensure that the first connection now sees the updated database. It +# sees the change-counter has been incremented and discards the +# invalid in-memory cache. +# +# This will only work if the database cache is large enough to hold +# the entire database. In the case of 1024 byte pages, this means +# the cache size must be at least 17. Otherwise, some pages will be +# loaded from the database file in step 8. +# +do_test exclusive2-1.1 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1(a) VALUES(randstr(10, 400)); + INSERT INTO t1(a) VALUES(randstr(10, 400)); + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + COMMIT; + SELECT count(*) FROM t1; + } +} {64} +do_test exclusive2-1.2.1 { + # Make sure the pager cache is large enough to store the + # entire database. + set nPage [expr [file size test.db]/1024] + if {$::SQLITE_DEFAULT_CACHE_SIZE < $nPage} { + execsql "PRAGMA cache_size = $nPage" + } + expr {[execsql {PRAGMA cache_size}] >= $nPage} +} {1} +do_test exclusive2-1.2 { + set ::sig [t1sig] + readPagerChangeCounter test.db +} {1} +do_test exclusive2-1.3 { + t1sig +} $::sig +do_test exclusive2-1.4 { + sqlite3 db2 test.db + t1sig db2 +} $::sig +do_test exclusive2-1.5 { + execsql { + UPDATE t1 SET b=a, a=NULL; + } db2 + expr {[t1sig db2] eq $::sig} +} 0 +do_test exclusive2-1.6 { + readPagerChangeCounter test.db +} {2} +do_test exclusive2-1.7 { + pagerChangeCounter test.db 1 +} {1} +do_test exclusive2-1.9 { + t1sig + expr {[t1sig] eq $::sig} +} {1} +do_test exclusive2-1.10 { + pagerChangeCounter test.db 2 +} {2} +do_test exclusive2-1.11 { + expr {[t1sig] eq $::sig} +} {0} + +#-------------------------------------------------------------------- +# These tests - exclusive2-2.X - are similar to exclusive2-1.X, +# except that they are run with locking_mode=EXCLUSIVE. +# +# 1-3: Build a database with exclusive-access connection 1, +# calculate a signature. +# 4: Corrupt the database by writing 10000 bytes of garbage +# starting at the beginning of page 2. Check that connection 1 +# still works. It should be accessing the in-memory cache. +# 5-6: Modify the dataase change-counter. Connection 1 still works +# entirely from in-memory cache, because it doesn't check the +# change-counter. +# 7-8 Set the locking-mode back to normal. After the db is unlocked, +# SQLite detects the modified change-counter and discards the +# in-memory cache. Then it finds the corruption caused in step 4.... +# +# As above, this test is only applicable if the pager cache is +# large enough to hold the entire database. With 1024 byte pages, +# this means 19 pages. We also need to disable the soft-heap-limit +# to prevent memory-induced cache spills. +# +do_test exclusive2-2.1 { + execsql {PRAGMA locking_mode = exclusive;} + execsql { + BEGIN; + DELETE FROM t1; + INSERT INTO t1(a) VALUES(randstr(10, 400)); + INSERT INTO t1(a) VALUES(randstr(10, 400)); + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + INSERT INTO t1(a) SELECT randstr(10, 400) FROM t1; + COMMIT; + SELECT count(*) FROM t1; + } +} {64} +do_test exclusive2-2.2.1 { + # Make sure the pager cache is large enough to store the + # entire database. + set nPage [expr [file size test.db]/1024] + if {$::SQLITE_DEFAULT_CACHE_SIZE < $nPage} { + execsql "PRAGMA cache_size = $nPage" + } + expr {[execsql {PRAGMA cache_size}] >= $nPage} +} {1} +do_test exclusive2-2.2 { + set ::sig [t1sig] + readPagerChangeCounter test.db +} {3} +do_test exclusive2-2.3 { + t1sig +} $::sig + +do_test exclusive2-2.4 { + set fd [open test.db RDWR] + seek $fd 1024 + puts -nonewline $fd [string repeat [binary format c 0] 10000] + flush $fd + close $fd + t1sig +} $::sig + +do_test exclusive2-2.5 { + pagerChangeCounter test.db 5 +} {5} +do_test exclusive2-2.6 { + t1sig +} $::sig +do_test exclusive2-2.7 { + execsql {PRAGMA locking_mode = normal} + t1sig +} $::sig + +do_test exclusive2-2.8 { + set rc [catch {t1sig} msg] + list $rc $msg +} {1 {database disk image is malformed}} + +#-------------------------------------------------------------------- +# These tests - exclusive2-3.X - verify that the pager change-counter +# is only incremented by the first change when in exclusive access +# mode. In normal mode, the change-counter is incremented once +# per write-transaction. +# + +db close +db2 close +file delete -force test.db +file delete -force test.db-journal + +do_test exclusive2-3.0 { + sqlite3 db test.db + execsql { + BEGIN; + CREATE TABLE t1(a UNIQUE); + INSERT INTO t1 VALUES(randstr(10, 400)); + INSERT INTO t1 VALUES(randstr(10, 400)); + COMMIT; + } + readPagerChangeCounter test.db +} {1} +do_test exclusive2-3.1 { + execsql { + INSERT INTO t1 VALUES(randstr(10, 400)); + } + readPagerChangeCounter test.db +} {2} +do_test exclusive2-3.2 { + execsql { + INSERT INTO t1 VALUES(randstr(10, 400)); + } + readPagerChangeCounter test.db +} {3} +do_test exclusive2-3.3 { + execsql { + PRAGMA locking_mode = exclusive; + INSERT INTO t1 VALUES(randstr(10, 400)); + } + readPagerChangeCounter test.db +} {4} +do_test exclusive2-3.4 { + execsql { + INSERT INTO t1 VALUES(randstr(10, 400)); + } + readPagerChangeCounter test.db +} {4} +do_test exclusive2-3.5 { + execsql { + PRAGMA locking_mode = normal; + INSERT INTO t1 VALUES(randstr(10, 400)); + } + readPagerChangeCounter test.db +} {4} +do_test exclusive2-3.6 { + execsql { + INSERT INTO t1 VALUES(randstr(10, 400)); + } + readPagerChangeCounter test.db +} {5} +sqlite3_soft_heap_limit $soft_limit + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/exclusive3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/exclusive3.test new file mode 100644 index 0000000..941f61b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/exclusive3.test @@ -0,0 +1,59 @@ +# 2007 March 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file runs the tests in the file ioerr.test with +# exclusive access mode enabled. +# +# $Id: exclusive3.test,v 1.3 2007/03/30 16:01:55 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!pager_pragmas} { + finish_test + return +} + +rename finish_test really_finish_test2 +proc finish_test {} {} +set ISQUICK 1 + +rename sqlite3 real_sqlite3 +proc sqlite3 {args} { + set r [eval "real_sqlite3 $args"] + if { [llength $args] == 2 } { + [lindex $args 0] eval {pragma locking_mode = exclusive} + } + set r +} + +rename do_test really_do_test +proc do_test {args} { + set sc [concat really_do_test "exclusive-[lindex $args 0]" \ + [lrange $args 1 end]] + eval $sc +} + +#source $testdir/rollback.test +#source $testdir/select1.test +#source $testdir/select2.test + +source $testdir/malloc.test +source $testdir/ioerr.test + + +rename sqlite3 "" +rename real_sqlite3 sqlite3 +rename finish_test "" +rename really_finish_test2 finish_test +rename do_test "" +rename really_do_test do_test +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/expr.test b/libraries/sqlite/unix/sqlite-3.5.1/test/expr.test new file mode 100644 index 0000000..8c2a09b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/expr.test @@ -0,0 +1,699 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing expressions. +# +# $Id: expr.test,v 1.59 2007/09/01 18:24:55 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table to work with. +# +execsql {CREATE TABLE test1(i1 int, i2 int, r1 real, r2 real, t1 text, t2 text)} +execsql {INSERT INTO test1 VALUES(1,2,1.1,2.2,'hello','world')} +proc test_expr {name settings expr result} { + do_test $name [format { + execsql {BEGIN; UPDATE test1 SET %s; SELECT %s FROM test1; ROLLBACK;} + } $settings $expr] $result +} + +test_expr expr-1.1 {i1=10, i2=20} {i1+i2} 30 +test_expr expr-1.2 {i1=10, i2=20} {i1-i2} -10 +test_expr expr-1.3 {i1=10, i2=20} {i1*i2} 200 +test_expr expr-1.4 {i1=10, i2=20} {i1/i2} 0 +test_expr expr-1.5 {i1=10, i2=20} {i2/i1} 2 +test_expr expr-1.6 {i1=10, i2=20} {i2i1} 1 +test_expr expr-1.9 {i1=10, i2=20} {i2>=i1} 1 +test_expr expr-1.10 {i1=10, i2=20} {i2!=i1} 1 +test_expr expr-1.11 {i1=10, i2=20} {i2=i1} 0 +test_expr expr-1.12 {i1=10, i2=20} {i2<>i1} 1 +test_expr expr-1.13 {i1=10, i2=20} {i2==i1} 0 +test_expr expr-1.14 {i1=20, i2=20} {i2i1} 0 +test_expr expr-1.17 {i1=20, i2=20} {i2>=i1} 1 +test_expr expr-1.18 {i1=20, i2=20} {i2!=i1} 0 +test_expr expr-1.19 {i1=20, i2=20} {i2=i1} 1 +test_expr expr-1.20 {i1=20, i2=20} {i2<>i1} 0 +test_expr expr-1.21 {i1=20, i2=20} {i2==i1} 1 +test_expr expr-1.22 {i1=1, i2=2, r1=3.0} {i1+i2*r1} {7.0} +test_expr expr-1.23 {i1=1, i2=2, r1=3.0} {(i1+i2)*r1} {9.0} +test_expr expr-1.24 {i1=1, i2=2} {min(i1,i2,i1+i2,i1-i2)} {-1} +test_expr expr-1.25 {i1=1, i2=2} {max(i1,i2,i1+i2,i1-i2)} {3} +test_expr expr-1.26 {i1=1, i2=2} {max(i1,i2,i1+i2,i1-i2)} {3} +test_expr expr-1.27 {i1=1, i2=2} {i1==1 AND i2=2} {1} +test_expr expr-1.28 {i1=1, i2=2} {i1=2 AND i2=1} {0} +test_expr expr-1.29 {i1=1, i2=2} {i1=1 AND i2=1} {0} +test_expr expr-1.30 {i1=1, i2=2} {i1=2 AND i2=2} {0} +test_expr expr-1.31 {i1=1, i2=2} {i1==1 OR i2=2} {1} +test_expr expr-1.32 {i1=1, i2=2} {i1=2 OR i2=1} {0} +test_expr expr-1.33 {i1=1, i2=2} {i1=1 OR i2=1} {1} +test_expr expr-1.34 {i1=1, i2=2} {i1=2 OR i2=2} {1} +test_expr expr-1.35 {i1=1, i2=2} {i1-i2=-1} {1} +test_expr expr-1.36 {i1=1, i2=0} {not i1} {0} +test_expr expr-1.37 {i1=1, i2=0} {not i2} {1} +test_expr expr-1.38 {i1=1} {-i1} {-1} +test_expr expr-1.39 {i1=1} {+i1} {1} +test_expr expr-1.40 {i1=1, i2=2} {+(i2+i1)} {3} +test_expr expr-1.41 {i1=1, i2=2} {-(i2+i1)} {-3} +test_expr expr-1.42 {i1=1, i2=2} {i1|i2} {3} +test_expr expr-1.42b {i1=1, i2=2} {4|2} {6} +test_expr expr-1.43 {i1=1, i2=2} {i1&i2} {0} +test_expr expr-1.43b {i1=1, i2=2} {4&5} {4} +test_expr expr-1.44 {i1=1} {~i1} {-2} +test_expr expr-1.45 {i1=1, i2=3} {i1<>i2} {4} +test_expr expr-1.47 {i1=9999999999, i2=8888888888} {i1i2} 1 +test_expr expr-1.50 {i1=99999999999, i2=99999999998} {i1i2} 1 +test_expr expr-1.53 {i1=099999999999, i2=99999999999} {i1i2} 0 +test_expr expr-1.56 {i1=25, i2=11} {i1%i2} 3 +test_expr expr-1.58 {i1=NULL, i2=1} {coalesce(i1+i2,99)} 99 +test_expr expr-1.59 {i1=1, i2=NULL} {coalesce(i1+i2,99)} 99 +test_expr expr-1.60 {i1=NULL, i2=NULL} {coalesce(i1+i2,99)} 99 +test_expr expr-1.61 {i1=NULL, i2=1} {coalesce(i1-i2,99)} 99 +test_expr expr-1.62 {i1=1, i2=NULL} {coalesce(i1-i2,99)} 99 +test_expr expr-1.63 {i1=NULL, i2=NULL} {coalesce(i1-i2,99)} 99 +test_expr expr-1.64 {i1=NULL, i2=1} {coalesce(i1*i2,99)} 99 +test_expr expr-1.65 {i1=1, i2=NULL} {coalesce(i1*i2,99)} 99 +test_expr expr-1.66 {i1=NULL, i2=NULL} {coalesce(i1*i2,99)} 99 +test_expr expr-1.67 {i1=NULL, i2=1} {coalesce(i1/i2,99)} 99 +test_expr expr-1.68 {i1=1, i2=NULL} {coalesce(i1/i2,99)} 99 +test_expr expr-1.69 {i1=NULL, i2=NULL} {coalesce(i1/i2,99)} 99 +test_expr expr-1.70 {i1=NULL, i2=1} {coalesce(i1i2,99)} 99 +test_expr expr-1.72 {i1=NULL, i2=NULL} {coalesce(i1<=i2,99)} 99 +test_expr expr-1.73 {i1=NULL, i2=1} {coalesce(i1>=i2,99)} 99 +test_expr expr-1.74 {i1=1, i2=NULL} {coalesce(i1!=i2,99)} 99 +test_expr expr-1.75 {i1=NULL, i2=NULL} {coalesce(i1==i2,99)} 99 +test_expr expr-1.76 {i1=NULL, i2=NULL} {coalesce(not i1,99)} 99 +test_expr expr-1.77 {i1=NULL, i2=NULL} {coalesce(-i1,99)} 99 +test_expr expr-1.78 {i1=NULL, i2=NULL} {coalesce(i1 IS NULL AND i2=5,99)} 99 +test_expr expr-1.79 {i1=NULL, i2=NULL} {coalesce(i1 IS NULL OR i2=5,99)} 1 +test_expr expr-1.80 {i1=NULL, i2=NULL} {coalesce(i1=5 AND i2 IS NULL,99)} 99 +test_expr expr-1.81 {i1=NULL, i2=NULL} {coalesce(i1=5 OR i2 IS NULL,99)} 1 +test_expr expr-1.82 {i1=NULL, i2=3} {coalesce(min(i1,i2,1),99)} 99 +test_expr expr-1.83 {i1=NULL, i2=3} {coalesce(max(i1,i2,1),99)} 99 +test_expr expr-1.84 {i1=3, i2=NULL} {coalesce(min(i1,i2,1),99)} 99 +test_expr expr-1.85 {i1=3, i2=NULL} {coalesce(max(i1,i2,1),99)} 99 +test_expr expr-1.86 {i1=3, i2=8} {5 between i1 and i2} 1 +test_expr expr-1.87 {i1=3, i2=8} {5 not between i1 and i2} 0 +test_expr expr-1.88 {i1=3, i2=8} {55 between i1 and i2} 0 +test_expr expr-1.89 {i1=3, i2=8} {55 not between i1 and i2} 1 +test_expr expr-1.90 {i1=3, i2=NULL} {5 between i1 and i2} {{}} +test_expr expr-1.91 {i1=3, i2=NULL} {5 not between i1 and i2} {{}} +test_expr expr-1.92 {i1=3, i2=NULL} {2 between i1 and i2} 0 +test_expr expr-1.93 {i1=3, i2=NULL} {2 not between i1 and i2} 1 +test_expr expr-1.94 {i1=NULL, i2=8} {2 between i1 and i2} {{}} +test_expr expr-1.95 {i1=NULL, i2=8} {2 not between i1 and i2} {{}} +test_expr expr-1.94 {i1=NULL, i2=8} {55 between i1 and i2} 0 +test_expr expr-1.95 {i1=NULL, i2=8} {55 not between i1 and i2} 1 +test_expr expr-1.96 {i1=NULL, i2=3} {coalesce(i1<>i2,99)} 99 +test_expr expr-1.98 {i1=NULL, i2=NULL} {coalesce(i1|i2,99)} 99 +test_expr expr-1.99 {i1=32, i2=NULL} {coalesce(i1&i2,99)} 99 +test_expr expr-1.100 {i1=1, i2=''} {i1=i2} 0 +test_expr expr-1.101 {i1=0, i2=''} {i1=i2} 0 + +# Check for proper handling of 64-bit integer values. +# +test_expr expr-1.102 {i1=40, i2=1} {i2<1} 1 + +test_expr expr-1.106 {i1=0} {(1<<63)/-1} -9223372036854775808 +test_expr expr-1.107 {i1=0} {(1<<63)%-1} 0 + +test_expr expr-2.1 {r1=1.23, r2=2.34} {r1+r2} 3.57 +test_expr expr-2.2 {r1=1.23, r2=2.34} {r1-r2} -1.11 +test_expr expr-2.3 {r1=1.23, r2=2.34} {r1*r2} 2.8782 +set tcl_precision 15 +test_expr expr-2.4 {r1=1.23, r2=2.34} {r1/r2} 0.525641025641026 +test_expr expr-2.5 {r1=1.23, r2=2.34} {r2/r1} 1.90243902439024 +test_expr expr-2.6 {r1=1.23, r2=2.34} {r2r1} 1 +test_expr expr-2.9 {r1=1.23, r2=2.34} {r2>=r1} 1 +test_expr expr-2.10 {r1=1.23, r2=2.34} {r2!=r1} 1 +test_expr expr-2.11 {r1=1.23, r2=2.34} {r2=r1} 0 +test_expr expr-2.12 {r1=1.23, r2=2.34} {r2<>r1} 1 +test_expr expr-2.13 {r1=1.23, r2=2.34} {r2==r1} 0 +test_expr expr-2.14 {r1=2.34, r2=2.34} {r2r1} 0 +test_expr expr-2.17 {r1=2.34, r2=2.34} {r2>=r1} 1 +test_expr expr-2.18 {r1=2.34, r2=2.34} {r2!=r1} 0 +test_expr expr-2.19 {r1=2.34, r2=2.34} {r2=r1} 1 +test_expr expr-2.20 {r1=2.34, r2=2.34} {r2<>r1} 0 +test_expr expr-2.21 {r1=2.34, r2=2.34} {r2==r1} 1 +test_expr expr-2.22 {r1=1.23, r2=2.34} {min(r1,r2,r1+r2,r1-r2)} {-1.11} +test_expr expr-2.23 {r1=1.23, r2=2.34} {max(r1,r2,r1+r2,r1-r2)} {3.57} +test_expr expr-2.24 {r1=25.0, r2=11.0} {r1%r2} 3.0 +test_expr expr-2.25 {r1=1.23, r2=NULL} {coalesce(r1+r2,99.0)} 99.0 +test_expr expr-2.26 {r1=1e300, r2=1e300} {coalesce((r1*r2)*0.0,99.0)} 99.0 + +test_expr expr-3.1 {t1='abc', t2='xyz'} {t1t2} 0 +test_expr expr-3.8 {t1='xyz', t2='abc'} {t1>t2} 1 +test_expr expr-3.9 {t1='abc', t2='abc'} {t1>t2} 0 +test_expr expr-3.10 {t1='abc', t2='xyz'} {t1>=t2} 0 +test_expr expr-3.11 {t1='xyz', t2='abc'} {t1>=t2} 1 +test_expr expr-3.12 {t1='abc', t2='abc'} {t1>=t2} 1 +test_expr expr-3.13 {t1='abc', t2='xyz'} {t1=t2} 0 +test_expr expr-3.14 {t1='xyz', t2='abc'} {t1=t2} 0 +test_expr expr-3.15 {t1='abc', t2='abc'} {t1=t2} 1 +test_expr expr-3.16 {t1='abc', t2='xyz'} {t1==t2} 0 +test_expr expr-3.17 {t1='xyz', t2='abc'} {t1==t2} 0 +test_expr expr-3.18 {t1='abc', t2='abc'} {t1==t2} 1 +test_expr expr-3.19 {t1='abc', t2='xyz'} {t1<>t2} 1 +test_expr expr-3.20 {t1='xyz', t2='abc'} {t1<>t2} 1 +test_expr expr-3.21 {t1='abc', t2='abc'} {t1<>t2} 0 +test_expr expr-3.22 {t1='abc', t2='xyz'} {t1!=t2} 1 +test_expr expr-3.23 {t1='xyz', t2='abc'} {t1!=t2} 1 +test_expr expr-3.24 {t1='abc', t2='abc'} {t1!=t2} 0 +test_expr expr-3.25 {t1=NULL, t2='hi'} {t1 isnull} 1 +test_expr expr-3.25b {t1=NULL, t2='hi'} {t1 is null} 1 +test_expr expr-3.26 {t1=NULL, t2='hi'} {t2 isnull} 0 +test_expr expr-3.27 {t1=NULL, t2='hi'} {t1 notnull} 0 +test_expr expr-3.28 {t1=NULL, t2='hi'} {t2 notnull} 1 +test_expr expr-3.28b {t1=NULL, t2='hi'} {t2 is not null} 1 +test_expr expr-3.29 {t1='xyz', t2='abc'} {t1||t2} {xyzabc} +test_expr expr-3.30 {t1=NULL, t2='abc'} {t1||t2} {{}} +test_expr expr-3.31 {t1='xyz', t2=NULL} {t1||t2} {{}} +test_expr expr-3.32 {t1='xyz', t2='abc'} {t1||' hi '||t2} {{xyz hi abc}} +test_expr epxr-3.33 {t1='abc', t2=NULL} {coalesce(t1t2,99)} 99 +test_expr epxr-3.36 {t1='abc', t2=NULL} {coalesce(t2>t1,99)} 99 +test_expr epxr-3.37 {t1='abc', t2=NULL} {coalesce(t1<=t2,99)} 99 +test_expr epxr-3.38 {t1='abc', t2=NULL} {coalesce(t2<=t1,99)} 99 +test_expr epxr-3.39 {t1='abc', t2=NULL} {coalesce(t1>=t2,99)} 99 +test_expr epxr-3.40 {t1='abc', t2=NULL} {coalesce(t2>=t1,99)} 99 +test_expr epxr-3.41 {t1='abc', t2=NULL} {coalesce(t1==t2,99)} 99 +test_expr epxr-3.42 {t1='abc', t2=NULL} {coalesce(t2==t1,99)} 99 +test_expr epxr-3.43 {t1='abc', t2=NULL} {coalesce(t1!=t2,99)} 99 +test_expr epxr-3.44 {t1='abc', t2=NULL} {coalesce(t2!=t1,99)} 99 + +test_expr expr-4.1 {t1='abc', t2='Abc'} {t1t2} 1 +test_expr expr-4.3 {t1='abc', t2='Bbc'} {t1t2} 1 +test_expr expr-4.5 {t1='0', t2='0.0'} {t1==t2} 0 +test_expr expr-4.6 {t1='0.000', t2='0.0'} {t1==t2} 0 +test_expr expr-4.7 {t1=' 0.000', t2=' 0.0'} {t1==t2} 0 +test_expr expr-4.8 {t1='0.0', t2='abc'} {t1r2} 0 +test_expr expr-4.11 {r1='abc', r2='Abc'} {r1r2} 1 +test_expr expr-4.13 {r1='abc', r2='Bbc'} {r1r2} 1 +test_expr expr-4.15 {r1='0', r2='0.0'} {r1==r2} 1 +test_expr expr-4.16 {r1='0.000', r2='0.0'} {r1==r2} 1 +test_expr expr-4.17 {r1=' 0.000', r2=' 0.0'} {r1==r2} 0 +test_expr expr-4.18 {r1='0.0', r2='abc'} {r1r2} 0 + +# CSL is true if LIKE is case sensitive and false if not. +# NCSL is the opposite. Use these variables as the result +# on operations where case makes a difference. +set CSL $sqlite_options(casesensitivelike) +set NCSL [expr {!$CSL}] + +test_expr expr-5.1 {t1='abc', t2='xyz'} {t1 LIKE t2} 0 +test_expr expr-5.2a {t1='abc', t2='abc'} {t1 LIKE t2} 1 +test_expr expr-5.2b {t1='abc', t2='ABC'} {t1 LIKE t2} $NCSL +test_expr expr-5.3a {t1='abc', t2='a_c'} {t1 LIKE t2} 1 +test_expr expr-5.3b {t1='abc', t2='A_C'} {t1 LIKE t2} $NCSL +test_expr expr-5.4 {t1='abc', t2='abc_'} {t1 LIKE t2} 0 +test_expr expr-5.5a {t1='abc', t2='a%c'} {t1 LIKE t2} 1 +test_expr expr-5.5b {t1='abc', t2='A%C'} {t1 LIKE t2} $NCSL +test_expr expr-5.5c {t1='abdc', t2='a%c'} {t1 LIKE t2} 1 +test_expr expr-5.5d {t1='ac', t2='a%c'} {t1 LIKE t2} 1 +test_expr expr-5.5e {t1='ac', t2='A%C'} {t1 LIKE t2} $NCSL +test_expr expr-5.6a {t1='abxyzzyc', t2='a%c'} {t1 LIKE t2} 1 +test_expr expr-5.6b {t1='abxyzzyc', t2='A%C'} {t1 LIKE t2} $NCSL +test_expr expr-5.7a {t1='abxyzzy', t2='a%c'} {t1 LIKE t2} 0 +test_expr expr-5.7b {t1='abxyzzy', t2='A%C'} {t1 LIKE t2} 0 +test_expr expr-5.8a {t1='abxyzzycx', t2='a%c'} {t1 LIKE t2} 0 +test_expr expr-5.8b {t1='abxyzzycy', t2='a%cx'} {t1 LIKE t2} 0 +test_expr expr-5.8c {t1='abxyzzycx', t2='A%C'} {t1 LIKE t2} 0 +test_expr expr-5.8d {t1='abxyzzycy', t2='A%CX'} {t1 LIKE t2} 0 +test_expr expr-5.9a {t1='abc', t2='a%_c'} {t1 LIKE t2} 1 +test_expr expr-5.9b {t1='ac', t2='a%_c'} {t1 LIKE t2} 0 +test_expr expr-5.9c {t1='abc', t2='A%_C'} {t1 LIKE t2} $NCSL +test_expr expr-5.9d {t1='ac', t2='A%_C'} {t1 LIKE t2} 0 +test_expr expr-5.10a {t1='abxyzzyc', t2='a%_c'} {t1 LIKE t2} 1 +test_expr expr-5.10b {t1='abxyzzyc', t2='A%_C'} {t1 LIKE t2} $NCSL +test_expr expr-5.11 {t1='abc', t2='xyz'} {t1 NOT LIKE t2} 1 +test_expr expr-5.12a {t1='abc', t2='abc'} {t1 NOT LIKE t2} 0 +test_expr expr-5.12b {t1='abc', t2='ABC'} {t1 NOT LIKE t2} $CSL +test_expr expr-5.13 {t1='A'} {t1 LIKE 'A%_'} 0 +test_expr expr-5.14 {t1='AB'} {t1 LIKE 'A%b' ESCAPE 'b'} 0 + +# The following tests only work on versions of TCL that support Unicode +# +if {"\u1234"!="u1234"} { + test_expr expr-5.13a "t1='a\u0080c', t2='a_c'" {t1 LIKE t2} 1 + test_expr expr-5.13b "t1='a\u0080c', t2='A_C'" {t1 LIKE t2} $NCSL + test_expr expr-5.14a "t1='a\u07FFc', t2='a_c'" {t1 LIKE t2} 1 + test_expr expr-5.14b "t1='a\u07FFc', t2='A_C'" {t1 LIKE t2} $NCSL + test_expr expr-5.15a "t1='a\u0800c', t2='a_c'" {t1 LIKE t2} 1 + test_expr expr-5.15b "t1='a\u0800c', t2='A_C'" {t1 LIKE t2} $NCSL + test_expr expr-5.16a "t1='a\uFFFFc', t2='a_c'" {t1 LIKE t2} 1 + test_expr expr-5.16b "t1='a\uFFFFc', t2='A_C'" {t1 LIKE t2} $NCSL + test_expr expr-5.17 "t1='a\u0080', t2='A__'" {t1 LIKE t2} 0 + test_expr expr-5.18 "t1='a\u07FF', t2='A__'" {t1 LIKE t2} 0 + test_expr expr-5.19 "t1='a\u0800', t2='A__'" {t1 LIKE t2} 0 + test_expr expr-5.20 "t1='a\uFFFF', t2='A__'" {t1 LIKE t2} 0 + test_expr expr-5.21a "t1='ax\uABCD', t2='a_\uABCD'" {t1 LIKE t2} 1 + test_expr expr-5.21b "t1='ax\uABCD', t2='A_\uABCD'" {t1 LIKE t2} $NCSL + test_expr expr-5.22a "t1='ax\u1234', t2='a%\u1234'" {t1 LIKE t2} 1 + test_expr expr-5.22b "t1='ax\u1234', t2='A%\u1234'" {t1 LIKE t2} $NCSL + test_expr expr-5.23a "t1='ax\uFEDC', t2='a_%'" {t1 LIKE t2} 1 + test_expr expr-5.23b "t1='ax\uFEDC', t2='A_%'" {t1 LIKE t2} $NCSL + test_expr expr-5.24a "t1='ax\uFEDCy\uFEDC', t2='a%\uFEDC'" {t1 LIKE t2} 1 + test_expr expr-5.24b "t1='ax\uFEDCy\uFEDC', t2='A%\uFEDC'" {t1 LIKE t2} $NCSL +} + +test_expr expr-5.54 {t1='abc', t2=NULL} {t1 LIKE t2} {{}} +test_expr expr-5.55 {t1='abc', t2=NULL} {t1 NOT LIKE t2} {{}} +test_expr expr-5.56 {t1='abc', t2=NULL} {t2 LIKE t1} {{}} +test_expr expr-5.57 {t1='abc', t2=NULL} {t2 NOT LIKE t1} {{}} + +# LIKE expressions that use ESCAPE characters. +test_expr expr-5.58a {t1='abc', t2='a_c'} {t1 LIKE t2 ESCAPE '7'} 1 +test_expr expr-5.58b {t1='abc', t2='A_C'} {t1 LIKE t2 ESCAPE '7'} $NCSL +test_expr expr-5.59a {t1='a_c', t2='a7_c'} {t1 LIKE t2 ESCAPE '7'} 1 +test_expr expr-5.59b {t1='a_c', t2='A7_C'} {t1 LIKE t2 ESCAPE '7'} $NCSL +test_expr expr-5.60a {t1='abc', t2='a7_c'} {t1 LIKE t2 ESCAPE '7'} 0 +test_expr expr-5.60b {t1='abc', t2='A7_C'} {t1 LIKE t2 ESCAPE '7'} 0 +test_expr expr-5.61a {t1='a7Xc', t2='a7_c'} {t1 LIKE t2 ESCAPE '7'} 0 +test_expr expr-5.61b {t1='a7Xc', t2='A7_C'} {t1 LIKE t2 ESCAPE '7'} 0 +test_expr expr-5.62a {t1='abcde', t2='a%e'} {t1 LIKE t2 ESCAPE '7'} 1 +test_expr expr-5.62b {t1='abcde', t2='A%E'} {t1 LIKE t2 ESCAPE '7'} $NCSL +test_expr expr-5.63a {t1='abcde', t2='a7%e'} {t1 LIKE t2 ESCAPE '7'} 0 +test_expr expr-5.63b {t1='abcde', t2='A7%E'} {t1 LIKE t2 ESCAPE '7'} 0 +test_expr expr-5.64a {t1='a7cde', t2='a7%e'} {t1 LIKE t2 ESCAPE '7'} 0 +test_expr expr-5.64b {t1='a7cde', t2='A7%E'} {t1 LIKE t2 ESCAPE '7'} 0 +test_expr expr-5.65a {t1='a7cde', t2='a77%e'} {t1 LIKE t2 ESCAPE '7'} 1 +test_expr expr-5.65b {t1='a7cde', t2='A77%E'} {t1 LIKE t2 ESCAPE '7'} $NCSL +test_expr expr-5.66a {t1='abc7', t2='a%77'} {t1 LIKE t2 ESCAPE '7'} 1 +test_expr expr-5.66b {t1='abc7', t2='A%77'} {t1 LIKE t2 ESCAPE '7'} $NCSL +test_expr expr-5.67a {t1='abc_', t2='a%7_'} {t1 LIKE t2 ESCAPE '7'} 1 +test_expr expr-5.67b {t1='abc_', t2='A%7_'} {t1 LIKE t2 ESCAPE '7'} $NCSL +test_expr expr-5.68a {t1='abc7', t2='a%7_'} {t1 LIKE t2 ESCAPE '7'} 0 +test_expr expr-5.68b {t1='abc7', t2='A%7_'} {t1 LIKE t2 ESCAPE '7'} 0 + +# These are the same test as the block above, but using a multi-byte +# character as the escape character. +if {"\u1234"!="u1234"} { + test_expr expr-5.69a "t1='abc', t2='a_c'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 1 + test_expr expr-5.69b "t1='abc', t2='A_C'" \ + "t1 LIKE t2 ESCAPE '\u1234'" $NCSL + test_expr expr-5.70a "t1='a_c', t2='a\u1234_c'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 1 + test_expr expr-5.70b "t1='a_c', t2='A\u1234_C'" \ + "t1 LIKE t2 ESCAPE '\u1234'" $NCSL + test_expr expr-5.71a "t1='abc', t2='a\u1234_c'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 + test_expr expr-5.71b "t1='abc', t2='A\u1234_C'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 + test_expr expr-5.72a "t1='a\u1234Xc', t2='a\u1234_c'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 + test_expr expr-5.72b "t1='a\u1234Xc', t2='A\u1234_C'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 + test_expr expr-5.73a "t1='abcde', t2='a%e'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 1 + test_expr expr-5.73b "t1='abcde', t2='A%E'" \ + "t1 LIKE t2 ESCAPE '\u1234'" $NCSL + test_expr expr-5.74a "t1='abcde', t2='a\u1234%e'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 + test_expr expr-5.74b "t1='abcde', t2='A\u1234%E'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 + test_expr expr-5.75a "t1='a\u1234cde', t2='a\u1234%e'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 + test_expr expr-5.75b "t1='a\u1234cde', t2='A\u1234%E'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 + test_expr expr-5.76a "t1='a\u1234cde', t2='a\u1234\u1234%e'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 1 + test_expr expr-5.76b "t1='a\u1234cde', t2='A\u1234\u1234%E'" \ + "t1 LIKE t2 ESCAPE '\u1234'" $NCSL + test_expr expr-5.77a "t1='abc\u1234', t2='a%\u1234\u1234'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 1 + test_expr expr-5.77b "t1='abc\u1234', t2='A%\u1234\u1234'" \ + "t1 LIKE t2 ESCAPE '\u1234'" $NCSL + test_expr expr-5.78a "t1='abc_', t2='a%\u1234_'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 1 + test_expr expr-5.78b "t1='abc_', t2='A%\u1234_'" \ + "t1 LIKE t2 ESCAPE '\u1234'" $NCSL + test_expr expr-5.79a "t1='abc\u1234', t2='a%\u1234_'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 + test_expr expr-5.79b "t1='abc\u1234', t2='A%\u1234_'" \ + "t1 LIKE t2 ESCAPE '\u1234'" 0 +} + +test_expr expr-6.1 {t1='abc', t2='xyz'} {t1 GLOB t2} 0 +test_expr expr-6.2 {t1='abc', t2='ABC'} {t1 GLOB t2} 0 +test_expr expr-6.3 {t1='abc', t2='A?C'} {t1 GLOB t2} 0 +test_expr expr-6.4 {t1='abc', t2='a?c'} {t1 GLOB t2} 1 +test_expr expr-6.5 {t1='abc', t2='abc?'} {t1 GLOB t2} 0 +test_expr expr-6.6 {t1='abc', t2='A*C'} {t1 GLOB t2} 0 +test_expr expr-6.7 {t1='abc', t2='a*c'} {t1 GLOB t2} 1 +test_expr expr-6.8 {t1='abxyzzyc', t2='a*c'} {t1 GLOB t2} 1 +test_expr expr-6.9 {t1='abxyzzy', t2='a*c'} {t1 GLOB t2} 0 +test_expr expr-6.10 {t1='abxyzzycx', t2='a*c'} {t1 GLOB t2} 0 +test_expr expr-6.11 {t1='abc', t2='xyz'} {t1 NOT GLOB t2} 1 +test_expr expr-6.12 {t1='abc', t2='abc'} {t1 NOT GLOB t2} 0 +test_expr expr-6.13 {t1='abc', t2='a[bx]c'} {t1 GLOB t2} 1 +test_expr expr-6.14 {t1='abc', t2='a[cx]c'} {t1 GLOB t2} 0 +test_expr expr-6.15 {t1='abc', t2='a[a-d]c'} {t1 GLOB t2} 1 +test_expr expr-6.16 {t1='abc', t2='a[^a-d]c'} {t1 GLOB t2} 0 +test_expr expr-6.17 {t1='abc', t2='a[A-Dc]c'} {t1 GLOB t2} 0 +test_expr expr-6.18 {t1='abc', t2='a[^A-Dc]c'} {t1 GLOB t2} 1 +test_expr expr-6.19 {t1='abc', t2='a[]b]c'} {t1 GLOB t2} 1 +test_expr expr-6.20 {t1='abc', t2='a[^]b]c'} {t1 GLOB t2} 0 +test_expr expr-6.21a {t1='abcdefg', t2='a*[de]g'} {t1 GLOB t2} 0 +test_expr expr-6.21b {t1='abcdefg', t2='a*[df]g'} {t1 GLOB t2} 1 +test_expr expr-6.21c {t1='abcdefg', t2='a*[d-h]g'} {t1 GLOB t2} 1 +test_expr expr-6.21d {t1='abcdefg', t2='a*[b-e]g'} {t1 GLOB t2} 0 +test_expr expr-6.22a {t1='abcdefg', t2='a*[^de]g'} {t1 GLOB t2} 1 +test_expr expr-6.22b {t1='abcdefg', t2='a*[^def]g'} {t1 GLOB t2} 0 +test_expr expr-6.23 {t1='abcdefg', t2='a*?g'} {t1 GLOB t2} 1 +test_expr expr-6.24 {t1='ac', t2='a*c'} {t1 GLOB t2} 1 +test_expr expr-6.25 {t1='ac', t2='a*?c'} {t1 GLOB t2} 0 +test_expr expr-6.26 {t1='a*c', t2='a[*]c'} {t1 GLOB t2} 1 +test_expr expr-6.27 {t1='a?c', t2='a[?]c'} {t1 GLOB t2} 1 +test_expr expr-6.28 {t1='a[c', t2='a[[]c'} {t1 GLOB t2} 1 + + +# These tests only work on versions of TCL that support Unicode +# +if {"\u1234"!="u1234"} { + test_expr expr-6.26 "t1='a\u0080c', t2='a?c'" {t1 GLOB t2} 1 + test_expr expr-6.27 "t1='a\u07ffc', t2='a?c'" {t1 GLOB t2} 1 + test_expr expr-6.28 "t1='a\u0800c', t2='a?c'" {t1 GLOB t2} 1 + test_expr expr-6.29 "t1='a\uffffc', t2='a?c'" {t1 GLOB t2} 1 + test_expr expr-6.30 "t1='a\u1234', t2='a?'" {t1 GLOB t2} 1 + test_expr expr-6.31 "t1='a\u1234', t2='a??'" {t1 GLOB t2} 0 + test_expr expr-6.32 "t1='ax\u1234', t2='a?\u1234'" {t1 GLOB t2} 1 + test_expr expr-6.33 "t1='ax\u1234', t2='a*\u1234'" {t1 GLOB t2} 1 + test_expr expr-6.34 "t1='ax\u1234y\u1234', t2='a*\u1234'" {t1 GLOB t2} 1 + test_expr expr-6.35 "t1='a\u1234b', t2='a\[x\u1234y\]b'" {t1 GLOB t2} 1 + test_expr expr-6.36 "t1='a\u1234b', t2='a\[\u1233-\u1235\]b'" {t1 GLOB t2} 1 + test_expr expr-6.37 "t1='a\u1234b', t2='a\[\u1234-\u124f\]b'" {t1 GLOB t2} 1 + test_expr expr-6.38 "t1='a\u1234b', t2='a\[\u1235-\u124f\]b'" {t1 GLOB t2} 0 + test_expr expr-6.39 "t1='a\u1234b', t2='a\[a-\u1235\]b'" {t1 GLOB t2} 1 + test_expr expr-6.40 "t1='a\u1234b', t2='a\[a-\u1234\]b'" {t1 GLOB t2} 1 + test_expr expr-6.41 "t1='a\u1234b', t2='a\[a-\u1233\]b'" {t1 GLOB t2} 0 +} + +test_expr expr-6.51 {t1='ABC', t2='xyz'} {t1 GLOB t2} 0 +test_expr expr-6.52 {t1='ABC', t2='abc'} {t1 GLOB t2} 0 +test_expr expr-6.53 {t1='ABC', t2='a?c'} {t1 GLOB t2} 0 +test_expr expr-6.54 {t1='ABC', t2='A?C'} {t1 GLOB t2} 1 +test_expr expr-6.55 {t1='ABC', t2='abc?'} {t1 GLOB t2} 0 +test_expr expr-6.56 {t1='ABC', t2='a*c'} {t1 GLOB t2} 0 +test_expr expr-6.57 {t1='ABC', t2='A*C'} {t1 GLOB t2} 1 +test_expr expr-6.58 {t1='ABxyzzyC', t2='A*C'} {t1 GLOB t2} 1 +test_expr expr-6.59 {t1='ABxyzzy', t2='A*C'} {t1 GLOB t2} 0 +test_expr expr-6.60 {t1='ABxyzzyCx', t2='A*C'} {t1 GLOB t2} 0 +test_expr expr-6.61 {t1='ABC', t2='xyz'} {t1 NOT GLOB t2} 1 +test_expr expr-6.62 {t1='ABC', t2='ABC'} {t1 NOT GLOB t2} 0 +test_expr expr-6.63 {t1='ABC', t2='A[Bx]C'} {t1 GLOB t2} 1 +test_expr expr-6.64 {t1='ABC', t2='A[Cx]C'} {t1 GLOB t2} 0 +test_expr expr-6.65 {t1='ABC', t2='A[A-D]C'} {t1 GLOB t2} 1 +test_expr expr-6.66 {t1='ABC', t2='A[^A-D]C'} {t1 GLOB t2} 0 +test_expr expr-6.67 {t1='ABC', t2='A[a-dC]C'} {t1 GLOB t2} 0 +test_expr expr-6.68 {t1='ABC', t2='A[^a-dC]C'} {t1 GLOB t2} 1 +test_expr expr-6.69a {t1='ABC', t2='A[]B]C'} {t1 GLOB t2} 1 +test_expr expr-6.69b {t1='A]C', t2='A[]B]C'} {t1 GLOB t2} 1 +test_expr expr-6.70a {t1='ABC', t2='A[^]B]C'} {t1 GLOB t2} 0 +test_expr expr-6.70b {t1='AxC', t2='A[^]B]C'} {t1 GLOB t2} 1 +test_expr expr-6.70c {t1='A]C', t2='A[^]B]C'} {t1 GLOB t2} 0 +test_expr expr-6.71 {t1='ABCDEFG', t2='A*[DE]G'} {t1 GLOB t2} 0 +test_expr expr-6.72 {t1='ABCDEFG', t2='A*[^DE]G'} {t1 GLOB t2} 1 +test_expr expr-6.73 {t1='ABCDEFG', t2='A*?G'} {t1 GLOB t2} 1 +test_expr expr-6.74 {t1='AC', t2='A*C'} {t1 GLOB t2} 1 +test_expr expr-6.75 {t1='AC', t2='A*?C'} {t1 GLOB t2} 0 + +test_expr expr-6.63 {t1=NULL, t2='a*?c'} {t1 GLOB t2} {{}} +test_expr expr-6.64 {t1='ac', t2=NULL} {t1 GLOB t2} {{}} +test_expr expr-6.65 {t1=NULL, t2='a*?c'} {t1 NOT GLOB t2} {{}} +test_expr expr-6.66 {t1='ac', t2=NULL} {t1 NOT GLOB t2} {{}} + +# Check that the affinity of a CAST expression is calculated correctly. +ifcapable cast { + test_expr expr-6.67 {t1='01', t2=1} {t1 = t2} 0 + test_expr expr-6.68 {t1='1', t2=1} {t1 = t2} 1 + test_expr expr-6.69 {t1='01', t2=1} {CAST(t1 AS INTEGER) = t2} 1 +} + +test_expr expr-case.1 {i1=1, i2=2} \ + {CASE WHEN i1 = i2 THEN 'eq' ELSE 'ne' END} ne +test_expr expr-case.2 {i1=2, i2=2} \ + {CASE WHEN i1 = i2 THEN 'eq' ELSE 'ne' END} eq +test_expr expr-case.3 {i1=NULL, i2=2} \ + {CASE WHEN i1 = i2 THEN 'eq' ELSE 'ne' END} ne +test_expr expr-case.4 {i1=2, i2=NULL} \ + {CASE WHEN i1 = i2 THEN 'eq' ELSE 'ne' END} ne +test_expr expr-case.5 {i1=2} \ + {CASE i1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'error' END} two +test_expr expr-case.6 {i1=1} \ + {CASE i1 WHEN 1 THEN 'one' WHEN NULL THEN 'two' ELSE 'error' END} one +test_expr expr-case.7 {i1=2} \ + {CASE i1 WHEN 1 THEN 'one' WHEN NULL THEN 'two' ELSE 'error' END} error +test_expr expr-case.8 {i1=3} \ + {CASE i1 WHEN 1 THEN 'one' WHEN NULL THEN 'two' ELSE 'error' END} error +test_expr expr-case.9 {i1=3} \ + {CASE i1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'error' END} error +test_expr expr-case.10 {i1=3} \ + {CASE i1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' END} {{}} +test_expr expr-case.11 {i1=null} \ + {CASE i1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 3 END} 3 +test_expr expr-case.12 {i1=1} \ + {CASE i1 WHEN 1 THEN null WHEN 2 THEN 'two' ELSE 3 END} {{}} +test_expr expr-case.13 {i1=7} \ + { CASE WHEN i1 < 5 THEN 'low' + WHEN i1 < 10 THEN 'medium' + WHEN i1 < 15 THEN 'high' ELSE 'error' END} medium + + +# The sqliteExprIfFalse and sqliteExprIfTrue routines are only +# executed as part of a WHERE clause. Create a table suitable +# for testing these functions. +# +execsql {DROP TABLE test1} +execsql {CREATE TABLE test1(a int, b int);} +for {set i 1} {$i<=20} {incr i} { + execsql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])" +} +execsql "INSERT INTO test1 VALUES(NULL,0)" +do_test expr-7.1 { + execsql {SELECT * FROM test1 ORDER BY a} +} {{} 0 1 2 2 4 3 8 4 16 5 32 6 64 7 128 8 256 9 512 10 1024 11 2048 12 4096 13 8192 14 16384 15 32768 16 65536 17 131072 18 262144 19 524288 20 1048576} + +proc test_expr2 {name expr result} { + do_test $name [format { + execsql {SELECT a FROM test1 WHERE %s ORDER BY a} + } $expr] $result +} + +test_expr2 expr-7.2 {a<10 AND a>8} {9} +test_expr2 expr-7.3 {a<=10 AND a>=8} {8 9 10} +test_expr2 expr-7.4 {a>=8 AND a<=10} {8 9 10} +test_expr2 expr-7.5 {a>=20 OR a<=1} {1 20} +test_expr2 expr-7.6 {b!=4 AND a<=3} {1 3} +test_expr2 expr-7.7 {b==8 OR b==16 OR b==32} {3 4 5} +test_expr2 expr-7.8 {NOT b<>8 OR b==1024} {3 10} +test_expr2 expr-7.9 {b LIKE '10%'} {10 20} +test_expr2 expr-7.10 {b LIKE '_4'} {6} +test_expr2 expr-7.11 {a GLOB '1?'} {10 11 12 13 14 15 16 17 18 19} +test_expr2 expr-7.12 {b GLOB '1*4'} {10 14} +test_expr2 expr-7.13 {b GLOB '*1[456]'} {4} +test_expr2 expr-7.14 {a ISNULL} {{}} +test_expr2 expr-7.15 {a NOTNULL AND a<3} {1 2} +test_expr2 expr-7.16 {a AND a<3} {1 2} +test_expr2 expr-7.17 {NOT a} {} +test_expr2 expr-7.18 {a==11 OR (b>1000 AND b<2000)} {10 11} +test_expr2 expr-7.19 {a<=1 OR a>=20} {1 20} +test_expr2 expr-7.20 {a<1 OR a>20} {} +test_expr2 expr-7.21 {a>19 OR a<1} {20} +test_expr2 expr-7.22 {a!=1 OR a=100} \ + {2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} +test_expr2 expr-7.23 {(a notnull AND a<4) OR a==8} {1 2 3 8} +test_expr2 expr-7.24 {a LIKE '2_' OR a==8} {8 20} +test_expr2 expr-7.25 {a GLOB '2?' OR a==8} {8 20} +test_expr2 expr-7.26 {a isnull OR a=8} {{} 8} +test_expr2 expr-7.27 {a notnull OR a=8} \ + {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} +test_expr2 expr-7.28 {a<0 OR b=0} {{}} +test_expr2 expr-7.29 {b=0 OR a<0} {{}} +test_expr2 expr-7.30 {a<0 AND b=0} {} +test_expr2 expr-7.31 {b=0 AND a<0} {} +test_expr2 expr-7.32 {a IS NULL AND (a<0 OR b=0)} {{}} +test_expr2 expr-7.33 {a IS NULL AND (b=0 OR a<0)} {{}} +test_expr2 expr-7.34 {a IS NULL AND (a<0 AND b=0)} {} +test_expr2 expr-7.35 {a IS NULL AND (b=0 AND a<0)} {} +test_expr2 expr-7.32 {(a<0 OR b=0) AND a IS NULL} {{}} +test_expr2 expr-7.33 {(b=0 OR a<0) AND a IS NULL} {{}} +test_expr2 expr-7.34 {(a<0 AND b=0) AND a IS NULL} {} +test_expr2 expr-7.35 {(b=0 AND a<0) AND a IS NULL} {} +test_expr2 expr-7.36 {a<2 OR (a<0 OR b=0)} {{} 1} +test_expr2 expr-7.37 {a<2 OR (b=0 OR a<0)} {{} 1} +test_expr2 expr-7.38 {a<2 OR (a<0 AND b=0)} {1} +test_expr2 expr-7.39 {a<2 OR (b=0 AND a<0)} {1} +test_expr2 expr-7.40 {((a<2 OR a IS NULL) AND b<3) OR b>1e10} {{} 1} +test_expr2 expr-7.41 {a BETWEEN -1 AND 1} {1} +test_expr2 expr-7.42 {a NOT BETWEEN 2 AND 100} {1} +test_expr2 expr-7.43 {(b+1234)||'this is a string that is at least 32 characters long' BETWEEN 1 AND 2} {} +test_expr2 expr-7.44 {123||'xabcdefghijklmnopqrstuvwyxz01234567890'||a BETWEEN '123a' AND '123b'} {} +test_expr2 expr-7.45 {((123||'xabcdefghijklmnopqrstuvwyxz01234567890'||a) BETWEEN '123a' AND '123b')<0} {} +test_expr2 expr-7.46 {((123||'xabcdefghijklmnopqrstuvwyxz01234567890'||a) BETWEEN '123a' AND '123z')>0} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} + +test_expr2 expr-7.50 {((a between 1 and 2 OR 0) AND 1) OR 0} {1 2} +test_expr2 expr-7.51 {((a not between 3 and 100 OR 0) AND 1) OR 0} {1 2} + +ifcapable subquery { + test_expr2 expr-7.52 {((a in (1,2) OR 0) AND 1) OR 0} {1 2} + test_expr2 expr-7.53 \ + {((a not in (3,4,5,6,7,8,9,10) OR 0) AND a<11) OR 0} {1 2} +} +test_expr2 expr-7.54 {((a>0 OR 0) AND a<3) OR 0} {1 2} +ifcapable subquery { + test_expr2 expr-7.55 {((a in (1,2) OR 0) IS NULL AND 1) OR 0} {{}} + test_expr2 expr-7.56 \ + {((a not in (3,4,5,6,7,8,9,10) IS NULL OR 0) AND 1) OR 0} {{}} +} +test_expr2 expr-7.57 {((a>0 IS NULL OR 0) AND 1) OR 0} {{}} + +test_expr2 expr-7.58 {(a||'')<='1'} {1} + +test_expr2 expr-7.59 {LIKE('10%',b)} {10 20} +test_expr2 expr-7.60 {LIKE('_4',b)} {6} +test_expr2 expr-7.61 {GLOB('1?',a)} {10 11 12 13 14 15 16 17 18 19} +test_expr2 expr-7.62 {GLOB('1*4',b)} {10 14} +test_expr2 expr-7.63 {GLOB('*1[456]',b)} {4} + +# Test the CURRENT_TIME, CURRENT_DATE, and CURRENT_TIMESTAMP expressions. +# +set sqlite_current_time 1157124849 +do_test expr-8.1 { + execsql {SELECT CURRENT_TIME} +} {15:34:09} +do_test expr-8.2 { + execsql {SELECT CURRENT_DATE} +} {2006-09-01} +do_test expr-8.3 { + execsql {SELECT CURRENT_TIMESTAMP} +} {{2006-09-01 15:34:09}} +ifcapable datetime { + do_test expr-8.4 { + execsql {SELECT CURRENT_TIME==time('now');} + } 1 + do_test expr-8.5 { + execsql {SELECT CURRENT_DATE==date('now');} + } 1 + do_test expr-8.6 { + execsql {SELECT CURRENT_TIMESTAMP==datetime('now');} + } 1 +} +set sqlite_current_time 0 + +do_test expr-9.1 { + execsql {SELECT round(-('-'||'123'))} +} 123.0 + +# Test an error message that can be generated by the LIKE expression +do_test expr-10.1 { + catchsql {SELECT 'abc' LIKE 'abc' ESCAPE ''} +} {1 {ESCAPE expression must be a single character}} +do_test expr-10.2 { + catchsql {SELECT 'abc' LIKE 'abc' ESCAPE 'ab'} +} {1 {ESCAPE expression must be a single character}} + +# If we specify an integer constant that is bigger than the largest +# possible integer, code the integer as a real number. +# +do_test expr-11.1 { + execsql {SELECT typeof(9223372036854775807)} +} {integer} +do_test expr-11.2 { + execsql {SELECT typeof(00000009223372036854775807)} +} {integer} +do_test expr-11.3 { + execsql {SELECT typeof(+9223372036854775807)} +} {integer} +do_test expr-11.4 { + execsql {SELECT typeof(+000000009223372036854775807)} +} {integer} +do_test expr-11.5 { + execsql {SELECT typeof(9223372036854775808)} +} {real} +do_test expr-11.6 { + execsql {SELECT typeof(00000009223372036854775808)} +} {real} +do_test expr-11.7 { + execsql {SELECT typeof(+9223372036854775808)} +} {real} +do_test expr-11.8 { + execsql {SELECT typeof(+0000009223372036854775808)} +} {real} +do_test expr-11.11 { + execsql {SELECT typeof(-9223372036854775808)} +} {integer} +do_test expr-11.12 { + execsql {SELECT typeof(-00000009223372036854775808)} +} {integer} +do_test expr-11.13 { + execsql {SELECT typeof(-9223372036854775809)} +} {real} +do_test expr-11.14 { + execsql {SELECT typeof(-00000009223372036854775809)} +} {real} + +# These two statements used to leak memory (because of missing %destructor +# directives in parse.y). +do_test expr-12.1 { + catchsql { + SELECT (CASE a>4 THEN 1 ELSE 0 END) FROM test1; + } +} {1 {near "THEN": syntax error}} +do_test expr-12.2 { + catchsql { + SELECT (CASE WHEN a>4 THEN 1 ELSE 0) FROM test1; + } +} {1 {near ")": syntax error}} + +do_test expr-13.1 { + execsql { + SELECT 12345678901234567890; + } +} {1.23456789012346e+19} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/filefmt.test b/libraries/sqlite/unix/sqlite-3.5.1/test/filefmt.test new file mode 100644 index 0000000..dc4fe5b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/filefmt.test @@ -0,0 +1,115 @@ +# 2007 April 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify database file format. +# +# $Id: filefmt.test,v 1.2 2007/04/06 21:42:22 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +db close +file delete -force test.db test.db-journal + +# Database begins with valid 16-byte header string. +# +do_test filefmt-1.1 { + sqlite3 db test.db + db eval {CREATE TABLE t1(x)} + db close + hexio_read test.db 0 16 +} {53514C69746520666F726D6174203300} + +# If the 16-byte header is changed, the file will not open +# +do_test filefmt-1.2 { + hexio_write test.db 0 54 + set x [catch {sqlite3 db test.db} err] + lappend x $err +} {0 {}} +do_test filefmt-1.3 { + catchsql { + SELECT count(*) FROM sqlite_master + } +} {1 {file is encrypted or is not a database}} +do_test filefmt-1.4 { + db close + hexio_write test.db 0 53 + sqlite3 db test.db + catchsql { + SELECT count(*) FROM sqlite_master + } +} {0 1} + +# The page-size is stored at offset 16 +# +ifcapable pager_pragmas { + foreach pagesize {512 1024 2048 4096 8192 16384 32768} { + if {[info exists SQLITE_MAX_PAGE_SIZE] + && $pagesize>$SQLITE_MAX_PAGE_SIZE} continue + do_test filefmt-1.5.$pagesize.1 { + db close + file delete -force test.db + sqlite3 db test.db + db eval "PRAGMA auto_vacuum=OFF" + db eval "PRAGMA page_size=$pagesize" + db eval {CREATE TABLE t1(x)} + file size test.db + } [expr $pagesize*2] + do_test filefmt-1.5.$pagesize.2 { + hexio_get_int [hexio_read test.db 16 2] + } $pagesize + } +} + +# The page-size must be a power of 2 +# +do_test filefmt-1.6 { + db close + hexio_write test.db 16 [hexio_render_int16 1025] + sqlite3 db test.db + catchsql { + SELECT count(*) FROM sqlite_master + } +} {1 {file is encrypted or is not a database}} + + +# The page-size must be at least 512 bytes +# +do_test filefmt-1.7 { + db close + hexio_write test.db 16 [hexio_render_int16 256] + sqlite3 db test.db + catchsql { + SELECT count(*) FROM sqlite_master + } +} {1 {file is encrypted or is not a database}} + +# Usable space per page (page-size minus unused space per page) +# must be at least 500 bytes +# +ifcapable pager_pragmas { + do_test filefmt-1.8 { + db close + file delete -force test.db + sqlite3 db test.db + db eval {PRAGMA page_size=512; CREATE TABLE t1(x)} + db close + hexio_write test.db 20 10 + sqlite3 db test.db + catchsql { + SELECT count(*) FROM sqlite_master + } + } {1 {file is encrypted or is not a database}} +} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fkey1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fkey1.test new file mode 100644 index 0000000..52b52d3 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fkey1.test @@ -0,0 +1,77 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for foreign keys. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!foreignkey} { + finish_test + return +} + +# Create a table and some data to work with. +# +do_test fkey1-1.0 { + execsql { + CREATE TABLE t1( + a INTEGER PRIMARY KEY, + b INTEGER + REFERENCES t1 ON DELETE CASCADE + REFERENCES t2, + c TEXT, + FOREIGN KEY (b,c) REFERENCES t2(x,y) ON UPDATE CASCADE + ); + } +} {} +do_test fkey1-1.1 { + execsql { + CREATE TABLE t2( + x INTEGER PRIMARY KEY, + y TEXT + ); + } +} {} +do_test fkey1-1.2 { + execsql { + CREATE TABLE t3( + a INTEGER REFERENCES t2 ON INSERT RESTRICT, + b INTEGER REFERENCES t1, + FOREIGN KEY (a,b) REFERENCES t2(x,y) + ); + } +} {} + +do_test fkey1-2.1 { + execsql { + CREATE TABLE t4(a integer primary key); + CREATE TABLE t5(x references t4); + CREATE TABLE t6(x references t4); + CREATE TABLE t7(x references t4); + CREATE TABLE t8(x references t4); + CREATE TABLE t9(x references t4); + CREATE TABLE t10(x references t4); + DROP TABLE t7; + DROP TABLE t9; + DROP TABLE t5; + DROP TABLE t8; + DROP TABLE t6; + DROP TABLE t10; + } +} {} + + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/format4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/format4.test new file mode 100644 index 0000000..14d7947 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/format4.test @@ -0,0 +1,65 @@ +# 2005 December 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that the new serial_type +# values of 8 (integer 0) and 9 (integer 1) work correctly. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +db eval {PRAGMA legacy_file_format=OFF} + +# The size of the database depends on whether or not autovacuum +# is enabled. +# +ifcapable autovacuum { + if {[db one {PRAGMA auto_vacuum}]} { + set small 3072 + set large 5120 + } else { + set small 2048 + set large 4096 + } +} else { + set small 2048 + set large 4096 +} + +do_test format4-1.1 { + execsql { + CREATE TABLE t1(x0,x1,x2,x3,x4,x5,x6,x7,x8,x9); + INSERT INTO t1 VALUES(0,0,0,0,0,0,0,0,0,0); + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + } + file size test.db +} $small +do_test format4-1.2 { + execsql { + UPDATE t1 SET x0=1, x1=1, x2=1, x3=1, x4=1, x5=1, x6=1, x7=1, x8=1, x9=1 + } + file size test.db +} $small +do_test format4-1.3 { + execsql { + UPDATE t1 SET x0=2, x1=2, x2=2, x3=2, x4=2, x5=2, x6=2, x7=2, x8=2, x9=2 + } + file size test.db +} $large + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1a.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1a.test new file mode 100644 index 0000000..b63e79a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1a.test @@ -0,0 +1,186 @@ +# 2006 September 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS1 module. +# +# $Id: fts1a.test,v 1.4 2006/09/28 19:43:32 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +# Construct a full-text search table containing five keywords: +# one, two, three, four, and five, in various combinations. The +# rowid for each will be a bitmask for the elements it contains. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts1(content); + INSERT INTO t1(content) VALUES('one'); + INSERT INTO t1(content) VALUES('two'); + INSERT INTO t1(content) VALUES('one two'); + INSERT INTO t1(content) VALUES('three'); + INSERT INTO t1(content) VALUES('one three'); + INSERT INTO t1(content) VALUES('two three'); + INSERT INTO t1(content) VALUES('one two three'); + INSERT INTO t1(content) VALUES('four'); + INSERT INTO t1(content) VALUES('one four'); + INSERT INTO t1(content) VALUES('two four'); + INSERT INTO t1(content) VALUES('one two four'); + INSERT INTO t1(content) VALUES('three four'); + INSERT INTO t1(content) VALUES('one three four'); + INSERT INTO t1(content) VALUES('two three four'); + INSERT INTO t1(content) VALUES('one two three four'); + INSERT INTO t1(content) VALUES('five'); + INSERT INTO t1(content) VALUES('one five'); + INSERT INTO t1(content) VALUES('two five'); + INSERT INTO t1(content) VALUES('one two five'); + INSERT INTO t1(content) VALUES('three five'); + INSERT INTO t1(content) VALUES('one three five'); + INSERT INTO t1(content) VALUES('two three five'); + INSERT INTO t1(content) VALUES('one two three five'); + INSERT INTO t1(content) VALUES('four five'); + INSERT INTO t1(content) VALUES('one four five'); + INSERT INTO t1(content) VALUES('two four five'); + INSERT INTO t1(content) VALUES('one two four five'); + INSERT INTO t1(content) VALUES('three four five'); + INSERT INTO t1(content) VALUES('one three four five'); + INSERT INTO t1(content) VALUES('two three four five'); + INSERT INTO t1(content) VALUES('one two three four five'); +} + +do_test fts1a-1.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts1a-1.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two'} +} {3 7 11 15 19 23 27 31} +do_test fts1a-1.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two one'} +} {3 7 11 15 19 23 27 31} +do_test fts1a-1.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two three'} +} {7 15 23 31} +do_test fts1a-1.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one three two'} +} {7 15 23 31} +do_test fts1a-1.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two three one'} +} {7 15 23 31} +do_test fts1a-1.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two one three'} +} {7 15 23 31} +do_test fts1a-1.8 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three one two'} +} {7 15 23 31} +do_test fts1a-1.9 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three two one'} +} {7 15 23 31} +do_test fts1a-1.10 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two THREE'} +} {7 15 23 31} +do_test fts1a-1.11 { + execsql {SELECT rowid FROM t1 WHERE content MATCH ' ONE Two three '} +} {7 15 23 31} + +do_test fts1a-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one"'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts1a-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two"'} +} {3 7 11 15 19 23 27 31} +do_test fts1a-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"two one"'} +} {} +do_test fts1a-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two three"'} +} {7 15 23 31} +do_test fts1a-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three two"'} +} {} +do_test fts1a-2.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two three four"'} +} {15 31} +do_test fts1a-2.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three two four"'} +} {} +do_test fts1a-2.8 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three five"'} +} {21} +do_test fts1a-2.9 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three" five'} +} {21 29} +do_test fts1a-2.10 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five "one three"'} +} {21 29} +do_test fts1a-2.11 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five "one three" four'} +} {29} +do_test fts1a-2.12 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five four "one three"'} +} {29} +do_test fts1a-2.13 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three" four five'} +} {29} + +do_test fts1a-3.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts1a-3.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one -two'} +} {1 5 9 13 17 21 25 29} +do_test fts1a-3.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '-two one'} +} {1 5 9 13 17 21 25 29} + +do_test fts1a-4.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one OR two'} +} {1 2 3 5 6 7 9 10 11 13 14 15 17 18 19 21 22 23 25 26 27 29 30 31} +do_test fts1a-4.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two" OR three'} +} {3 4 5 6 7 11 12 13 14 15 19 20 21 22 23 27 28 29 30 31} +do_test fts1a-4.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three OR "one two"'} +} {3 4 5 6 7 11 12 13 14 15 19 20 21 22 23 27 28 29 30 31} +do_test fts1a-4.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two OR three'} +} {3 5 7 11 13 15 19 21 23 27 29 31} +do_test fts1a-4.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three OR two one'} +} {3 5 7 11 13 15 19 21 23 27 29 31} +do_test fts1a-4.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two OR three OR four'} +} {3 5 7 9 11 13 15 19 21 23 25 27 29 31} +do_test fts1a-4.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two OR three OR four one'} +} {3 5 7 9 11 13 15 19 21 23 25 27 29 31} + +# Test the ability to handle NULL content +# +do_test fts1a-5.1 { + execsql {INSERT INTO t1(content) VALUES(NULL)} +} {} +do_test fts1a-5.2 { + set rowid [db last_insert_rowid] + execsql {SELECT content FROM t1 WHERE rowid=$rowid} +} {{}} +do_test fts1a-5.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH NULL} +} {} + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1b.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1b.test new file mode 100644 index 0000000..2bbe1aa --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1b.test @@ -0,0 +1,147 @@ +# 2006 September 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS1 module. +# +# $Id: fts1b.test,v 1.4 2006/09/18 02:12:48 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +# Fill the full-text index "t1" with phrases in english, spanish, +# and german. For the i-th row, fill in the names for the bits +# that are set in the value of i. The least significant bit is +# 1. For example, the value 5 is 101 in binary which will be +# converted to "one three" in english. +# +proc fill_multilanguage_fulltext_t1 {} { + set english {one two three four five} + set spanish {un dos tres cuatro cinco} + set german {eine zwei drei vier funf} + + for {set i 1} {$i<=31} {incr i} { + set cmd "INSERT INTO t1 VALUES" + set vset {} + foreach lang {english spanish german} { + set words {} + for {set j 0; set k 1} {$j<5} {incr j; incr k $k} { + if {$k&$i} {lappend words [lindex [set $lang] $j]} + } + lappend vset "'$words'" + } + set sql "INSERT INTO t1(english,spanish,german) VALUES([join $vset ,])" + # puts $sql + db eval $sql + } +} + +# Construct a full-text search table containing five keywords: +# one, two, three, four, and five, in various combinations. The +# rowid for each will be a bitmask for the elements it contains. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts1(english,spanish,german); +} +fill_multilanguage_fulltext_t1 + +do_test fts1b-1.1 { + execsql {SELECT rowid FROM t1 WHERE english MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts1b-1.2 { + execsql {SELECT rowid FROM t1 WHERE spanish MATCH 'one'} +} {} +do_test fts1b-1.3 { + execsql {SELECT rowid FROM t1 WHERE german MATCH 'one'} +} {} +do_test fts1b-1.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts1b-1.5 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'one dos drei'} +} {7 15 23 31} +do_test fts1b-1.6 { + execsql {SELECT english, spanish, german FROM t1 WHERE rowid=1} +} {one un eine} +do_test fts1b-1.7 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"one un"'} +} {} + +do_test fts1b-2.1 { + execsql { + CREATE VIRTUAL TABLE t2 USING fts1(from,to); + INSERT INTO t2([from],[to]) VALUES ('one two three', 'four five six'); + SELECT [from], [to] FROM t2 + } +} {{one two three} {four five six}} + + +# Compute an SQL string that contains the words one, two, three,... to +# describe bits set in the value $i. Only the lower 5 bits are examined. +# +proc wordset {i} { + set x {} + for {set j 0; set k 1} {$j<5} {incr j; incr k $k} { + if {$k&$i} {lappend x [lindex {one two three four five} $j]} + } + return '$x' +} + +# Create a new FTS table with three columns: +# +# norm: words for the bits of rowid +# plusone: words for the bits of rowid+1 +# invert: words for the bits of ~rowid +# +db eval { + CREATE VIRTUAL TABLE t4 USING fts1([norm],'plusone',"invert"); +} +for {set i 1} {$i<=15} {incr i} { + set vset [list [wordset $i] [wordset [expr {$i+1}]] [wordset [expr {~$i}]]] + db eval "INSERT INTO t4(norm,plusone,invert) VALUES([join $vset ,]);" +} + +do_test fts1b-4.1 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one'} +} {1 3 5 7 9 11 13 15} +do_test fts1b-4.2 { + execsql {SELECT rowid FROM t4 WHERE norm MATCH 'one'} +} {1 3 5 7 9 11 13 15} +do_test fts1b-4.3 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'one'} +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15} +do_test fts1b-4.4 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'plusone:one'} +} {2 4 6 8 10 12 14} +do_test fts1b-4.5 { + execsql {SELECT rowid FROM t4 WHERE plusone MATCH 'one'} +} {2 4 6 8 10 12 14} +do_test fts1b-4.6 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one plusone:two'} +} {1 5 9 13} +do_test fts1b-4.7 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one two'} +} {1 3 5 7 9 11 13 15} +do_test fts1b-4.8 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'plusone:two norm:one'} +} {1 5 9 13} +do_test fts1b-4.9 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'two norm:one'} +} {1 3 5 7 9 11 13 15} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1c.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1c.test new file mode 100644 index 0000000..a124695 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1c.test @@ -0,0 +1,1213 @@ +# 2006 September 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS1 module. +# +# $Id: fts1c.test,v 1.11 2006/10/04 17:35:28 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +# Create a table of sample email data. The data comes from email +# archives of Enron executives that was published as part of the +# litigation against that company. +# +do_test fts1c-1.1 { + db eval { + CREATE VIRTUAL TABLE email USING fts1([from],[to],subject,body); + BEGIN TRANSACTION; +INSERT INTO email([from],[to],subject,body) VALUES('savita.puthigai@enron.com', 'traders.eol@enron.com, traders.eol@enron.com', 'EnronOnline- Change to Autohedge', 'Effective Monday, October 22, 2001 the following changes will be made to the Autohedge functionality on EnronOnline. + +The volume on the hedge will now respect the minimum volume and volume increment settings on the parent product. See rules below: + +? If the transaction volume on the child is less than half of the parent''s minimum volume no hedge will occur. +? If the transaction volume on the child is more than half the parent''s minimum volume but less than half the volume increment on the parent, the hedge will volume will be the parent''s minimum volume. +? For all other volumes, the same rounding rules will apply based on the volume increment on the parent product. + +Please see example below: + +Parent''s Settings: +Minimum: 5000 +Increment: 1000 + +Volume on Autohedge transaction Volume Hedged +1 - 2499 0 +2500 - 5499 5000 +5500 - 6499 6000'); +INSERT INTO email([from],[to],subject,body) VALUES('dana.davis@enron.com', 'laynie.east@enron.com, lisa.king@enron.com, lisa.best@enron.com,', 'Leaving Early', 'FYI: +If it''s ok with everyone''s needs, I would like to leave @4pm. If you think +you will need my assistance past the 4 o''clock hour just let me know; I''ll +be more than willing to stay.'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'louise.kitchen@enron.com', '<> - CC02.06.02', 'The following expense report is ready for approval: + +Employee Name: Christopher F. Calger +Status last changed by: Mollie E. Gustafson Ms +Expense Report Name: CC02.06.02 +Report Total: $3,972.93 +Amount Due Employee: $3,972.93 + + +To approve this expense report, click on the following link for Concur Expense. +http://expensexms.enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('jeff.duff@enron.com', 'julie.johnson@enron.com', 'Work request', 'Julie, + +Could you print off the current work request report by 1:30 today? + +Gentlemen, + +I''d like to review this today at 1:30 in our office. Also, could you provide +me with your activity reports so I can have Julie enter this information. + +JD'); +INSERT INTO email([from],[to],subject,body) VALUES('v.weldon@enron.com', 'gary.l.carrier@usa.dupont.com, scott.joyce@bankofamerica.com', 'Enron News', 'This could turn into something big.... +http://biz.yahoo.com/rf/010129/n29305829.html'); +INSERT INTO email([from],[to],subject,body) VALUES('mark.haedicke@enron.com', 'paul.simons@enron.com', 'Re: First Polish Deal!', 'Congrats! Things seem to be building rapidly now on the Continent. Mark'); +INSERT INTO email([from],[to],subject,body) VALUES('e..carter@enron.com', 't..robinson@enron.com', 'FW: Producers Newsletter 9-24-2001', ' +The producer lumber pricing sheet. + -----Original Message----- +From: Johnson, Jay +Sent: Tuesday, October 16, 2001 3:42 PM +To: Carter, Karen E. +Subject: FW: Producers Newsletter 9-24-2001 + + + + -----Original Message----- +From: Daigre, Sergai +Sent: Friday, September 21, 2001 8:33 PM +Subject: Producers Newsletter 9-24-2001 + + '); +INSERT INTO email([from],[to],subject,body) VALUES('david.delainey@enron.com', 'kenneth.lay@enron.com', 'Greater Houston Partnership', 'Ken, in response to the letter from Mr Miguel San Juan, my suggestion would +be to offer up the Falcon for their use; however, given the tight time frame +and your recent visit with Mr. Fox that it would be difficult for either you +or me to participate. + +I spoke to Max and he agrees with this approach. + +I hope this meets with your approval. + +Regards +Delainey'); +INSERT INTO email([from],[to],subject,body) VALUES('lachandra.fenceroy@enron.com', 'lindy.donoho@enron.com', 'FW: Bus Applications Meeting Follow Up', 'Lindy, + +Here is the original memo we discussed earlier. Please provide any information that you may have. + +Your cooperation is greatly appreciated. + +Thanks, + +lachandra.fenceroy@enron.com +713.853.3884 +877.498.3401 Pager + + -----Original Message----- +From: Bisbee, Joanne +Sent: Wednesday, September 26, 2001 7:50 AM +To: Fenceroy, LaChandra +Subject: FW: Bus Applications Meeting Follow Up + +Lachandra, Please get with David Duff today and see what this is about. Who are our TW accounting business users? + + -----Original Message----- +From: Koh, Wendy +Sent: Tuesday, September 25, 2001 2:41 PM +To: Bisbee, Joanne +Subject: Bus Applications Meeting Follow Up + +Lisa brought up a TW change effective Nov 1. It involves eliminating a turnback surcharge. I have no other information, but you might check with the business folks for any system changes required. + +Wendy'); +INSERT INTO email([from],[to],subject,body) VALUES('danny.mccarty@enron.com', 'fran.fagan@enron.com', 'RE: worksheets', 'Fran, + If Julie''s merit needs to be lump sum, just move it over to that column. Also, send me Eric Gadd''s sheets as well. Thanks. +Dan + + -----Original Message----- +From: Fagan, Fran +Sent: Thursday, December 20, 2001 11:10 AM +To: McCarty, Danny +Subject: worksheets + +As discussed, attached are your sheets for bonus and merit. + +Thanks, + +Fran Fagan +Sr. HR Rep +713.853.5219 + + + << File: McCartyMerit.xls >> << File: mccartyBonusCommercial_UnP.xls >> + +'); +INSERT INTO email([from],[to],subject,body) VALUES('bert.meyers@enron.com', 'shift.dl-portland@enron.com', 'OCTOBER SCHEDULE', 'TEAM, + +PLEASE SEND ME ANY REQUESTS THAT YOU HAVE FOR OCTOBER. SO FAR I HAVE THEM FOR LEAF. I WOULD LIKE TO HAVE IT DONE BY THE 15TH OF THE MONTH. ANY QUESTIONS PLEASE GIVE ME A CALL. + +BERT'); +INSERT INTO email([from],[to],subject,body) VALUES('errol.mclaughlin@enron.com', 'john.arnold@enron.com, bilal.bajwa@enron.com, john.griffith@enron.com,', 'TRV Notification: (NG - PROPT P/L - 09/27/2001)', 'The report named: NG - PROPT P/L , published as of 09/27/2001 is now available for viewing on the website.'); +INSERT INTO email([from],[to],subject,body) VALUES('patrice.mims@enron.com', 'calvin.eakins@enron.com', 'Re: Small business supply assistance', 'Hi Calvin + + +I spoke with Rickey (boy, is he long-winded!!). Gave him the name of our +credit guy, Russell Diamond. + +Thank for your help!'); +INSERT INTO email([from],[to],subject,body) VALUES('legal <.hall@enron.com>', 'stephanie.panus@enron.com', 'Termination update', 'City of Vernon and Salt River Project terminated their contracts. I will fax these notices to you.'); +INSERT INTO email([from],[to],subject,body) VALUES('d..steffes@enron.com', 'richard.shapiro@enron.com', 'EES / ENA Government Affairs Staffing & Outside Services', 'Rick -- + +Here is the information on staffing and outside services. Call if you need anything else. + +Jim + + '); +INSERT INTO email([from],[to],subject,body) VALUES('gelliott@industrialinfo.com', 'pcopello@industrialinfo.com', 'ECAAR (Gavin), WSCC (Diablo Canyon), & NPCC (Seabrook)', 'Dear Power Outage Database Customer, +Attached you will find an excel document. The outages contained within are forced or rescheduled outages. Your daily delivery will still contain these outages. +In addition to the two excel documents, there is a dbf file that is formatted like your daily deliveries you receive nightly. This will enable you to load the data into your regular database. Any questions please let me know. Thanks. +Greg Elliott +IIR, Inc. +713-783-5147 x 3481 +outages@industrialinfo.com +THE INFORMATION CONTAINED IN THIS E-MAIL IS LEGALLY PRIVILEGED AND CONFIDENTIAL INFORMATION INTENDED ONLY FOR THE USE OF THE INDIVIDUAL OR ENTITY NAMED ABOVE. YOU ARE HEREBY NOTIFIED THAT ANY DISSEMINATION, DISTRIBUTION, OR COPY OF THIS E-MAIL TO UNAUTHORIZED ENTITIES IS STRICTLY PROHIBITED. IF YOU HAVE RECEIVED THIS +E-MAIL IN ERROR, PLEASE DELETE IT. + - OUTAGE.dbf + - 111201R.xls + - 111201.xls '); +INSERT INTO email([from],[to],subject,body) VALUES('enron.announcements@enron.com', 'all_ena_egm_eim@enron.com', 'EWS Brown Bag', 'MARK YOUR LUNCH CALENDARS NOW ! + +You are invited to attend the EWS Brown Bag Lunch Series + +Featuring: RAY BOWEN, COO + +Topic: Enron Industrial Markets + +Thursday, March 15, 2001 +11:30 am - 12:30 pm +EB 5 C2 + + +You bring your lunch, Limited Seating +We provide drinks and dessert. RSVP x 3-9610'); +INSERT INTO email([from],[to],subject,body) VALUES('chris.germany@enron.com', 'ingrid.immer@williams.com', 'Re: About St Pauls', 'Sounds good to me. I bet this is next to the Warick?? Hotel. + + + + +"Immer, Ingrid" on 12/21/2000 11:48:47 AM +To: "''chris.germany@enron.com''" +cc: +Subject: About St Pauls + + + + + <> +? +?http://www.stpaulshouston.org/about.html + +Chris, + +I like the looks of this place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. service and a candlelight service at 5:00 p.m., +among others. + +Let me know.?? ii + + - About St Pauls.url + +'); +INSERT INTO email([from],[to],subject,body) VALUES('nas@cpuc.ca.gov', 'skatz@sempratrading.com, kmccrea@sablaw.com, thompson@wrightlaw.com,', 'Reply Brief filed July 31, 2000', ' - CPUC01-#76371-v1-Revised_Reply_Brief__Due_today_7_31_.doc'); +INSERT INTO email([from],[to],subject,body) VALUES('gascontrol@aglresources.com', 'dscott4@enron.com, lcampbel@enron.com', 'Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder', 'Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder +As discussed in the Winter Operations Meeting on Sept.29,2000, +E-Gas(Emergency Gas) will not be offered this winter as a service from AGLC. +Marketers and Poolers can receive gas via Peaking and IBSS nominations(daisy +chain) from other marketers up to the 6 p.m. Same Day 2 nomination cycle. +'); +INSERT INTO email([from],[to],subject,body) VALUES('dutch.quigley@enron.com', 'rwolkwitz@powermerchants.com', '', ' + +Here is a goody for you'); +INSERT INTO email([from],[to],subject,body) VALUES('ryan.o''rourke@enron.com', 'k..allen@enron.com, randy.bhatia@enron.com, frank.ermis@enron.com,', 'TRV Notification: (West VaR - 11/07/2001)', 'The report named: West VaR , published as of 11/07/2001 is now available for viewing on the website.'); +INSERT INTO email([from],[to],subject,body) VALUES('mjones7@txu.com', 'cstone1@txu.com, ggreen2@txu.com, timpowell@txu.com,', 'Enron / HPL Actuals for July 10, 2000', 'Teco Tap 10.000 / Enron ; 110.000 / HPL IFERC + +LS HPL LSK IC 30.000 / Enron +'); +INSERT INTO email([from],[to],subject,body) VALUES('susan.pereira@enron.com', 'kkw816@aol.com', 'soccer practice', 'Kathy- + +Is it safe to assume that practice is cancelled for tonight?? + +Susan Pereira'); +INSERT INTO email([from],[to],subject,body) VALUES('mark.whitt@enron.com', 'barry.tycholiz@enron.com', 'Huber Internal Memo', 'Please look at this. I didn''t know how deep to go with the desk. Do you think this works. + + '); +INSERT INTO email([from],[to],subject,body) VALUES('m..forney@enron.com', 'george.phillips@enron.com', '', 'George, +Give me a call and we will further discuss opportunities on the 13st floor. + +Thanks, +JMForney +3-7160'); +INSERT INTO email([from],[to],subject,body) VALUES('brad.mckay@enron.com', 'angusmcka@aol.com', 'Re: (no subject)', 'not yet'); +INSERT INTO email([from],[to],subject,body) VALUES('adam.bayer@enron.com', 'jonathan.mckay@enron.com', 'FW: Curve Fetch File', 'Here is the curve fetch file sent to me. It has plenty of points in it. If you give me a list of which ones you need we may be able to construct a secondary worksheet to vlookup the values. + +adam +35227 + + + -----Original Message----- +From: Royed, Jeff +Sent: Tuesday, September 25, 2001 11:37 AM +To: Bayer, Adam +Subject: Curve Fetch File + +Let me know if it works. It may be required to have a certain version of Oracle for it to work properly. + + + +Jeff Royed +Enron +Energy Operations +Phone: 713-853-5295'); +INSERT INTO email([from],[to],subject,body) VALUES('matt.smith@enron.com', 'yan.wang@enron.com', 'Report Formats', 'Yan, + +The merged reports look great. I believe the only orientation changes are to +"unmerge" the following six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 +23 West_3 +25 CIG_WIC + +The orientation of the individual reports should be correct. Thanks. + +Mat + +PS. Just a reminder to add the "*" by the title of calculated points.'); +INSERT INTO email([from],[to],subject,body) VALUES('michelle.lokay@enron.com', 'jimboman@bigfoot.com', 'Egyptian Festival', '---------------------- Forwarded by Michelle Lokay/ET&S/Enron on 09/07/2000 +10:08 AM --------------------------- + + +"Karkour, Randa" on 09/07/2000 09:01:04 AM +To: "''Agheb (E-mail)" , "Leila Mankarious (E-mail)" +, "''Marymankarious (E-mail)" +, "Michelle lokay (E-mail)" , "Ramy +Mankarious (E-mail)" +cc: + +Subject: Egyptian Festival + + + <> + + http://www.egyptianfestival.com/ + + - Egyptian Festival.url +'); +INSERT INTO email([from],[to],subject,body) VALUES('errol.mclaughlin@enron.com', 'sherry.dawson@enron.com', 'Urgent!!! --- New EAST books', 'This has to be done.................................. + +Thanks +---------------------- Forwarded by Errol McLaughlin/Corp/Enron on 12/20/2000 +08:39 AM --------------------------- + + + + From: William Kelly @ ECT 12/20/2000 08:31 AM + + +To: Kam Keiser/HOU/ECT@ECT, Darron C Giron/HOU/ECT@ECT, David +Baumbach/HOU/ECT@ECT, Errol McLaughlin/Corp/Enron@ENRON +cc: Kimat Singla/HOU/ECT@ECT, Kulvinder Fowler/NA/Enron@ENRON, Kyle R +Lilly/HOU/ECT@ECT, Jeff Royed/Corp/Enron@ENRON, Alejandra +Chavez/NA/Enron@ENRON, Crystal Hyde/HOU/ECT@ECT + +Subject: New EAST books + +We have new book names in TAGG for our intramonth portfolios and it is +extremely important that any deal booked to the East is communicated quickly +to someone on my team. I know it will take some time for the new names to +sink in and I do not want us to miss any positions or P&L. + +Thanks for your help on this. + +New: +Scott Neal : East Northeast +Dick Jenkins: East Marketeast + +WK +'); +INSERT INTO email([from],[to],subject,body) VALUES('david.forster@enron.com', 'eol.wide@enron.com', 'Change to Stack Manager', 'Effective immediately, there is a change to the Stack Manager which will +affect any Inactive Child. + +An inactive Child with links to Parent products will not have their +calculated prices updated until the Child product is Activated. + +When the Child Product is activated, the price will be recalculated and +updated BEFORE it is displayed on the web. + +This means that if you are inputting a basis price on a Child product, you +will not see the final, calculated price until you Activate the product, at +which time the customer will also see it. + +If you have any questions, please contact the Help Desk on: + +Americas: 713 853 4357 +Europe: + 44 (0) 20 7783 7783 +Asia/Australia: +61 2 9229 2300 + +Dave'); +INSERT INTO email([from],[to],subject,body) VALUES('vince.kaminski@enron.com', 'jhh1@email.msn.com', 'Re: Light reading - see pieces beginning on page 7', 'John, + +I saw it. Very interesting. + +Vince + + + + + +"John H Herbert" on 07/28/2000 08:38:08 AM +To: "Vince J Kaminski" +cc: +Subject: Light reading - see pieces beginning on page 7 + + +Cheers and have a nice weekend, + + +JHHerbert + + + + + - gd000728.pdf + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('matthew.lenhart@enron.com', 'mmmarcantel@equiva.com', 'RE:', 'i will try to line up a pig for you '); +INSERT INTO email([from],[to],subject,body) VALUES('jae.black@enron.com', 'claudette.harvey@enron.com, chaun.roberts@enron.com, judy.martinez@enron.com,', 'Disaster Recovery Equipment', 'As a reminder...there are several pieces of equipment that are set up on the 30th Floor, as well as on our floor, for the Disaster Recovery Team. PLEASE DO NOT TAKE, BORROW OR USE this equipment. Should you need to use another computer system, other than yours, or make conference calls please work with your Assistant to help find or set up equipment for you to use. + +Thanks for your understanding in this matter. + +T.Jae Black +East Power Trading +Assistant to Kevin Presto +off. 713-853-5800 +fax 713-646-8272 +cell 713-539-4760'); +INSERT INTO email([from],[to],subject,body) VALUES('eric.bass@enron.com', 'dale.neuner@enron.com', '5 X 24', 'Dale, + +Have you heard anything more on the 5 X 24s? We would like to get this +product out ASAP. + + +Thanks, + +Eric'); +INSERT INTO email([from],[to],subject,body) VALUES('messenger@smartreminders.com', 'm..tholt@enron.com', '10% Coupon - PrintPal Printer Cartridges - 100% Guaranteed', '[IMAGE] +[IMAGE][IMAGE][IMAGE] +Dear SmartReminders Member, + [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] + + + + + + + + + + + + + + + + + + + + + +We respect your privacy and are a Certified Participant of the BBBOnLine + Privacy Program. To be removed from future offers,click here. +SmartReminders.com is a permission based service. To unsubscribe click here . '); +INSERT INTO email([from],[to],subject,body) VALUES('benjamin.rogers@enron.com', 'mark.bernstein@enron.com', '', 'The guy you are talking about left CIN under a "cloud of suspicion" sort of +speak. He was the one who got into several bad deals and PPA''s in California +for CIN, thus he left on a bad note. Let me know if you need more detail +than that, I felt this was the type of info you were looking for. Thanks! +Ben'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'michelle.cash@enron.com', 'Expense Report Receipts Not Received', 'Employee Name: Michelle Cash +Report Name: Houston Cellular 8-11-01 +Report Date: 12/13/01 +Report ID: 594D37C9ED2111D5B452 +Submitted On: 12/13/01 + +You are only allowed 2 reports with receipts outstanding. Your expense reports will not be paid until you meet this requirement.'); +INSERT INTO email([from],[to],subject,body) VALUES('susan.mara@enron.com', 'ray.alvarez@enron.com, mark.palmer@enron.com, karen.denne@enron.com,', 'CAISO Emergency Motion -- to discontinue market-based rates for', 'FYI. the latest broadside against the generators. + +Sue Mara +Enron Corp. +Tel: (415) 782-7802 +Fax:(415) 782-7854 +----- Forwarded by Susan J Mara/NA/Enron on 06/08/2001 12:24 PM ----- + + + "Milner, Marcie" 06/08/2001 11:13 AM To: "''smara@enron.com''" cc: Subject: CAISO Emergency Motion + + +Sue, did you see this emergency motion the CAISO filed today? Apparently +they are requesting that FERC discontinue market-based rates immediately and +grant refunds plus interest on the difference between cost-based rates and +market revenues received back to May 2000. They are requesting the +commission act within 14 days. Have you heard anything about what they are +doing? + +Marcie + +http://www.caiso.com/docs/2001/06/08/200106081005526469.pdf +'); +INSERT INTO email([from],[to],subject,body) VALUES('fletcher.sturm@enron.com', 'eloy.escobar@enron.com', 'Re: General Brinks Position Meeting', 'Eloy, + +Who is General Brinks? + +Fletch'); +INSERT INTO email([from],[to],subject,body) VALUES('nailia.dindarova@enron.com', 'richard.shapiro@enron.com', 'Documents for Mark Frevert (on EU developments and lessons from', 'Rick, + +Here are the documents that Peter has prepared for Mark Frevert. + +Nailia +---------------------- Forwarded by Nailia Dindarova/LON/ECT on 25/06/2001 +16:36 --------------------------- + + +Nailia Dindarova +25/06/2001 15:36 +To: Michael Brown/Enron@EUEnronXGate +cc: Ross Sankey/Enron@EUEnronXGate, Eric Shaw/ENRON@EUEnronXGate, Peter +Styles/LON/ECT@ECT + +Subject: Documents for Mark Frevert (on EU developments and lessons from +California) + +Michael, + + +These are the documents that Peter promised to give to you for Mark Frevert. +He has now handed them to him in person but asked me to transmit them +electronically to you, as well as Eric and Ross. + +Nailia + + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('peggy.a.kostial@accenture.com', 'dave.samuels@enron.com', 'EOL-Accenture Deal Sheet', 'Dave - + +Attached are our comments and suggested changes. Please call to review. + +On the time line for completion, we have four critical steps to complete: + Finalize market analysis to refine business case, specifically + projected revenue stream + Complete counterparty surveying, including targeting 3 CPs for letters + of intent + Review Enron asset base for potential reuse/ licensing + Contract negotiations + +Joe will come back to us with an updated time line, but it is my +expectation that we are still on the same schedule (we just begun week +three) with possibly a week or so slippage.....contract negotiations will +probably be the critical path. + +We will send our cut at the actual time line here shortly. Thanks, + +Peggy + +(See attached file: accenture-dealpoints v2.doc) + - accenture-dealpoints v2.doc '); +INSERT INTO email([from],[to],subject,body) VALUES('thomas.martin@enron.com', 'thomas.martin@enron.com', 'Re: Guadalupe Power Partners LP', '---------------------- Forwarded by Thomas A Martin/HOU/ECT on 03/20/2001 +03:49 PM --------------------------- + + +Thomas A Martin +10/11/2000 03:55 PM +To: Patrick Wade/HOU/ECT@ECT +cc: +Subject: Re: Guadalupe Power Partners LP + +The deal is physically served at Oasis Waha or Oasis Katy and is priced at +either HSC, Waha or Katytailgate GD at buyers option three days prior to +NYMEX close. + +'); +INSERT INTO email([from],[to],subject,body) VALUES('judy.townsend@enron.com', 'dan.junek@enron.com, chris.germany@enron.com', 'Columbia Distribution''s Capacity Available for Release - Sum', '---------------------- Forwarded by Judy Townsend/HOU/ECT on 03/09/2001 11:04 +AM --------------------------- + + +agoddard@nisource.com on 03/08/2001 09:16:57 AM +To: " - *Koch, Kent" , " - +*Millar, Debra" , " - *Burke, Lynn" + +cc: " - *Heckathorn, Tom" +Subject: Columbia Distribution''s Capacity Available for Release - Sum + + +Attached is Columbia Distribution''s notice of capacity available for release +for +the summer of 2001 (Apr. 2001 through Oct. 2001). + +Please note that the deadline for bids is 3:00pm EST on March 20, 2001. + +If you have any questions, feel free to contact any of the representatives +listed +at the bottom of the attachment. + +Aaron Goddard + + + + + - 2001Summer.doc +'); +INSERT INTO email([from],[to],subject,body) VALUES('rhonda.denton@enron.com', 'tim.belden@enron.com, dana.davis@enron.com, genia.fitzgerald@enron.com,', 'Split Rock Energy LLC', 'We have received the executed EEI contract from this CP dated 12/12/2000. +Copies will be distributed to Legal and Credit.'); +INSERT INTO email([from],[to],subject,body) VALUES('kerrymcelroy@dwt.com', 'jack.speer@alcoa.com, crow@millernash.com, michaelearly@earthlink.net,', 'Oral Argument Request', ' - Oral Argument Request.doc'); +INSERT INTO email([from],[to],subject,body) VALUES('mike.carson@enron.com', 'rlmichaelis@hormel.com', '', 'Did you come in town this wk end..... My new number at our house is : +713-668-3712...... my cell # is 281-381-7332 + +the kid'); +INSERT INTO email([from],[to],subject,body) VALUES('cooper.richey@enron.com', 'trycooper@hotmail.com', 'FW: Contact Info', ' + +-----Original Message----- +From: Punja, Karim +Sent: Thursday, December 13, 2001 2:35 PM +To: Richey, Cooper +Subject: Contact Info + + +Cooper, + +Its been a real pleasure working with you (even though it was for only a small amount of time) +I hope we can stay in touch. + +Home# 234-0249 +email: kpunja@hotmail.com + +Take Care, + +Karim. + '); +INSERT INTO email([from],[to],subject,body) VALUES('bjm30@earthlink.net', 'mcguinn.k@enron.com, mcguinn.ian@enron.com, mcguinn.stephen@enron.com,', 'email address change', 'Hello all. + +I haven''t talked to many of you via email recently but I do want to give you +my new address for your email file: + + bjm30@earthlink.net + +I hope all is well. + +Brian McGuinn'); +INSERT INTO email([from],[to],subject,body) VALUES('shelley.corman@enron.com', 'steve.hotte@enron.com', 'Flat Panels', 'Can you please advise what is going on with the flat panels that we had planned to distribute to our gas logistics team. It was in the budget and we had the okay, but now I''m hearing there is some hold-up & the units are stored on 44. + +Shelley'); +INSERT INTO email([from],[to],subject,body) VALUES('sara.davidson@enron.com', 'john.schwartzenburg@enron.com, scott.dieball@enron.com, recipients@enron.com,', '2001 Enron Law Conference (Distribution List 2)', ' Enron Law Conference + +San Antonio, Texas May 2-4, 2001 Westin Riverwalk + + See attached memo for more details!! + + +? Registration for the law conference this year will be handled through an +Online RSVP Form on the Enron Law Conference Website at +http://lawconference.corp.enron.com. The website is still under construction +and will not be available until Thursday, March 15, 2001. + +? We will send you another e-mail to confirm when the Law Conference Website +is operational. + +? Please complete the Online RSVP Form as soon as it is available and submit +it no later than Friday, March 30th. + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('tori.kuykendall@enron.com', 'heath.b.taylor@accenture.com', 'Re:', 'hey - thats funny about john - he definitely remembers him - i''ll call pat +and let him know - we are coming on saturday - i just havent had a chance to +call you guys back -- looking forward to it -- i probably need the +directions again though'); +INSERT INTO email([from],[to],subject,body) VALUES('darron.giron@enron.com', 'bryce.baxter@enron.com', 'Re: Feedback for Audrey Cook', 'Bryce, + +I''ll get it done today. + +DG 3-9573 + + + + + + From: Bryce Baxter 06/12/2000 07:15 PM + + +To: Darron C Giron/HOU/ECT@ECT +cc: +Subject: Feedback for Audrey Cook + +You were identified as a reviewer for Audrey Cook. If possible, could you +complete her feedback by end of business Wednesday? It will really help me +in the PRC process to have your input. Thanks. + +'); +INSERT INTO email([from],[to],subject,body) VALUES('casey.evans@enron.com', 'stephanie.sever@enron.com', 'Gas EOL ID', 'Stephanie, + +In conjunction with the recent movement of several power traders, they are changing the names of their gas books as well. The names of the new gas books and traders are as follows: + +PWR-NG-LT-SPP: Mike Carson +PWR-NG-LT-SERC: Jeff King + +If you need to know their power desk to map their ID to their gas books, those desks are as follows: + +EPMI-LT-SPP: Mike Carson +EPMI-LT-SERC: Jeff King + +I will be in training this afternoon, but will be back when class is over. Let me know if you have any questions. + +Thanks for your help! +Casey'); +INSERT INTO email([from],[to],subject,body) VALUES('darrell.schoolcraft@enron.com', 'david.roensch@enron.com, kimberly.watson@enron.com, michelle.lokay@enron.com,', 'Postings', 'Please see the attached. + + +ds + + + + + '); +INSERT INTO email([from],[to],subject,body) VALUES('mcominsky@aol.com', 'cpatman@bracepatt.com, james_derrick@enron.com', 'Jurisprudence Luncheon', 'Carrin & Jim -- + +It was an honor and a pleasure to meet both of you yesterday. I know we will +have fun working together on this very special event. + +Jeff left the jurisprudence luncheon lists for me before he left on vacation. + I wasn''t sure whether he transmitted them to you as well. Would you please +advise me if you would like them sent to you? I can email the MS Excel files +or I can fax the hard copies to you. Please advise what is most convenient. + +I plan to be in town through the holidays and can be reached by phone, email, +or cell phone at any time. My cell phone number is 713/705-4829. + +Thanks again for your interest in the ADL''s work. Martin. + +Martin B. Cominsky +Director, Southwest Region +Anti-Defamation League +713/627-3490, ext. 122 +713/627-2011 (fax) +MCominsky@aol.com'); +INSERT INTO email([from],[to],subject,body) VALUES('phillip.love@enron.com', 'todagost@utmb.edu, gbsonnta@utmb.edu', 'New President', 'I had a little bird put a word in my ear. Is there any possibility for Ben +Raimer to be Bush''s secretary of HHS? Just curious about that infamous UTMB +rumor mill. Hope things are well, happy holidays. +PL'); +INSERT INTO email([from],[to],subject,body) VALUES('marie.heard@enron.com', 'ehamilton@fna.com', 'ISDA Master Agreement', 'Erin: + +Pursuant to your request, attached are the Schedule to the ISDA Master Agreement, together with Paragraph 13 to the ISDA Credit Support Annex. Please let me know if you need anything else. We look forward to hearing your comments. + +Marie + +Marie Heard +Senior Legal Specialist +Enron North America Corp. +Phone: (713) 853-3907 +Fax: (713) 646-3490 +marie.heard@enron.com + + '); +INSERT INTO email([from],[to],subject,body) VALUES('andrea.ring@enron.com', 'beverly.beaty@enron.com', 'Re: Tennessee Buy - Louis Dreyfus', 'Beverly - once again thanks so much for your help on this. + + + + '); +INSERT INTO email([from],[to],subject,body) VALUES('karolyn.criado@enron.com', 'j..bonin@enron.com, felicia.case@enron.com, b..clapp@enron.com,', 'Price List week of Oct. 8-9, 2001', ' +Please contact me if you have any questions regarding last weeks prices. + +Thank you, +Karolyn Criado +3-9441 + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('kevin.presto@enron.com', 'edward.baughman@enron.com, billy.braddock@enron.com', 'Associated', 'Please begin working on filling our Associated short position in 02. I would like to take this risk off the books. + +In addition, please find out what a buy-out of VEPCO would cost us. With Rogers transitioning to run our retail risk management, I would like to clean up our customer positions. + +We also need to continue to explore a JEA buy-out. + +Thanks.'); +INSERT INTO email([from],[to],subject,body) VALUES('stacy.dickson@enron.com', 'gregg.penman@enron.com', 'RE: Constellation TC 5-7-01', 'Gregg, + +I am at home with a sick baby. (Lots of fun!) I will call you about this +tomorrow. + +Stacy'); +INSERT INTO email([from],[to],subject,body) VALUES('joe.quenet@enron.com', 'dfincher@utilicorp.com', '', 'hey big guy.....check this out..... + + w ww.gorelieberman-2000.com/'); +INSERT INTO email([from],[to],subject,body) VALUES('k..allen@enron.com', 'jacqestc@aol.com', '', 'Jacques, + +I sent you a fax of Kevin Kolb''s comments on the release. The payoff on the note would be $36,248 ($36090(principal) + $158 (accrued interest)). +This is assuming we wrap this up on Tuesday. + +Please email to confirm that their changes are ok so I can set up a meeting on Tuesday to reach closure. + +Phillip'); +INSERT INTO email([from],[to],subject,body) VALUES('kourtney.nelson@enron.com', 'mike.swerzbin@enron.com', 'Adjusted L/R Balance', 'Mike, + +I placed the adjusted L/R Balance on the Enronwest site. It is under the "Staff/Kourtney Nelson". There are two links: + +1) "Adj L_R" is the same data/format from the weekly strategy meeting. +2) "New Gen 2001_2002" link has all of the supply side info that is used to calculate the L/R balance + -Please note the Data Flag column, a value of "3" indicates the project was cancelled, on hold, etc and is not included in the calc. + +Both of these sheets are interactive Excel spreadsheets and thus you can play around with the data as you please. Also, James Bruce is working to get his gen report on the web. That will help with your access to information on new gen. + +Please let me know if you have any questions or feedback, + +Kourtney + + + +Kourtney Nelson +Fundamental Analysis +Enron North America +(503) 464-8280 +kourtney.nelson@enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('d..thomas@enron.com', 'naveed.ahmed@enron.com', 'FW: Current Enron TCC Portfolio', ' + +-----Original Message----- +From: Grace, Rebecca M. +Sent: Monday, December 17, 2001 9:44 AM +To: Thomas, Paul D. +Cc: Cashion, Jim; Allen, Thresa A.; May, Tom +Subject: RE: Current Enron TCC Portfolio + + +Paul, + +I reviewed NY''s list. I agree with all of their contracts numbers and mw amounts. + +Call if you have any more questions. + +Rebecca + + + + -----Original Message----- +From: Thomas, Paul D. +Sent: Monday, December 17, 2001 9:08 AM +To: Grace, Rebecca M. +Subject: FW: Current Enron TCC Portfolio + + << File: enrontccs.xls >> +Rebecca, +Let me know if you see any differences. + +Paul +X 3-0403 +-----Original Message----- +From: Thomas, Paul D. +Sent: Monday, December 17, 2001 9:04 AM +To: Ahmed, Naveed +Subject: FW: Current Enron TCC Portfolio + + + + +-----Original Message----- +From: Thomas, Paul D. +Sent: Thursday, December 13, 2001 10:01 AM +To: Baughman, Edward D. +Subject: Current Enron TCC Portfolio + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('stephanie.panus@enron.com', 'william.bradford@enron.com, debbie.brackett@enron.com,', 'Coastal Merchant Energy/El Paso Merchant Energy', 'Coastal Merchant Energy, L.P. merged with and into El Paso Merchant Energy, +L.P., effective February 1, 2001, with the surviving entity being El Paso +Merchant Energy, L.P. We currently have ISDA Master Agreements with both +counterparties. Please see the attached memo regarding the existing Masters +and let us know which agreement should be terminated. + +Thanks, +Stephanie +'); +INSERT INTO email([from],[to],subject,body) VALUES('kam.keiser@enron.com', 'c..kenne@enron.com', 'RE: What about this too???', ' + + -----Original Message----- +From: Kenne, Dawn C. +Sent: Wednesday, February 06, 2002 11:50 AM +To: Keiser, Kam +Subject: What about this too??? + + + << File: Netco Trader Matrix.xls >> + '); +INSERT INTO email([from],[to],subject,body) VALUES('chris.meyer@enron.com', 'joe.parks@enron.com', 'Centana', 'Talked to Chip. We do need Cash Committe approval given the netting feature of your deal, which means Batch Funding Request. Please update per my previous e-mail and forward. + +Thanks + +chris +x31666'); +INSERT INTO email([from],[to],subject,body) VALUES('debra.perlingiere@enron.com', 'jworman@academyofhealth.com', '', 'Have a great weekend! Happy Fathers Day! + + +Debra Perlingiere +Enron North America Corp. +1400 Smith Street, EB 3885 +Houston, Texas 77002 +dperlin@enron.com +Phone 713-853-7658 +Fax 713-646-3490'); +INSERT INTO email([from],[to],subject,body) VALUES('outlook.team@enron.com', '', 'Demo by Martha Janousek of Dashboard & Pipeline Profile / Julia &', 'CALENDAR ENTRY: APPOINTMENT + +Description: + Demo by Martha Janousek of Dashboard & Pipeline Profile / Julia & Dir Rpts. - 4102 + +Date: 1/5/2001 +Time: 9:00 AM - 10:00 AM (Central Standard Time) + +Chairperson: Outlook Migration Team + +Detailed Description:'); +INSERT INTO email([from],[to],subject,body) VALUES('diana.seifert@enron.com', 'mark.taylor@enron.com', 'Guest access Chile', 'Hello Mark, + +Justin Boyd told me that your can help me with questions regarding Chile. +We got a request for guest access through MG. +The company is called Escondida and is a subsidiary of BHP Australia. + +Please advise if I can set up a guest account or not. +F.Y.I.: MG is planning to put a "in w/h Chile" contract for Copper on-line as +soon as Enron has done the due diligence for this country. +Thanks ! + + +Best regards + +Diana Seifert +EOL PCG'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'mark.whitt@enron.com', '<> - 121001', 'The Approval status has changed on the following report: + +Status last changed by: Barry L. Tycholiz +Expense Report Name: 121001 +Report Total: $198.98 +Amount Due Employee: $198.98 +Amount Approved: $198.98 +Amount Paid: $0.00 +Approval Status: Approved +Payment Status: Pending + + +To review this expense report, click on the following link for Concur Expense. +http://expensexms.enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('kevin.hyatt@enron.com', '', 'Technical Support', 'Outside the U.S., please refer to the list below: + +Australia: +1800 678-515 +support@palm-au.com + +Canada: +1905 305-6530 +support@palm.com + +New Zealand: +0800 446-398 +support@palm-nz.com + +U.K.: +0171 867 0108 +eurosupport@palm.3com.com + +Please refer to the Worldwide Customer Support card for a complete technical support contact list.'); +INSERT INTO email([from],[to],subject,body) VALUES('geoff.storey@enron.com', 'dutch.quigley@enron.com', 'RE:', 'duke contact? + + -----Original Message----- +From: Quigley, Dutch +Sent: Wednesday, October 31, 2001 10:14 AM +To: Storey, Geoff +Subject: RE: + +bp corp Albert LaMore 281-366-4962 + +running the reports now + + + -----Original Message----- +From: Storey, Geoff +Sent: Wednesday, October 31, 2001 10:10 AM +To: Quigley, Dutch +Subject: RE: + +give me a contact over there too +BP + + + -----Original Message----- +From: Quigley, Dutch +Sent: Wednesday, October 31, 2001 9:42 AM +To: Storey, Geoff +Subject: + +Coral Jeff Whitnah 713-767-5374 +Relaint Steve McGinn 713-207-4000'); +INSERT INTO email([from],[to],subject,body) VALUES('pete.davis@enron.com', 'pete.davis@enron.com', 'Start Date: 4/22/01; HourAhead hour: 3; ', 'Start Date: 4/22/01; HourAhead hour: 3; No ancillary schedules awarded. +Variances detected. +Variances detected in Load schedule. + + LOG MESSAGES: + +PARSING FILE -->> O:\Portland\WestDesk\California Scheduling\ISO Final +Schedules\2001042203.txt + +---- Load Schedule ---- +$$$ Variance found in table tblLoads. + Details: (Hour: 3 / Preferred: 1.92 / Final: 1.89) + TRANS_TYPE: FINAL + LOAD_ID: PGE4 + MKT_TYPE: 2 + TRANS_DATE: 4/22/01 + SC_ID: EPMI + +'); +INSERT INTO email([from],[to],subject,body) VALUES('john.postlethwaite@enron.com', 'john.zufferli@enron.com', 'Reference', 'John, hope things are going well up there for you. The big day is almost here for you and Jessica. I was wondering if I could use your name as a job reference if need be. I am just trying to get everything in order just in case something happens. + +John'); +INSERT INTO email([from],[to],subject,body) VALUES('jeffrey.shankman@enron.com', 'lschiffm@jonesday.com', 'Re:', 'I saw you called on the cell this a.m. Sorry I missed you. (I was in the +shower). I have had a shitty week--I suspect my silence (not only to you, +but others) after our phone call is a result of the week. I''m seeing Glen at +11:15....talk to you'); +INSERT INTO email([from],[to],subject,body) VALUES('litebytz@enron.com', '', 'Lite Bytz RSVP', ' +This week''s Lite Bytz presentation will feature the following TOOLZ speaker: + +Richard McDougall +Solaris 8 +Thursday, June 7, 2001 + +If you have not already signed up, please RSVP via email to litebytz@enron.com by the end of the day Tuesday, June 5, 2001. + +*Remember: this is now a Brown Bag Event--so bring your lunch and we will provide cookies and drinks. + +Click below for more details. + +http://home.enron.com:84/messaging/litebytztoolzprint.jpg'); + COMMIT; + } +} {} + +############################################################################### +# Everything above just builds an interesting test database. The actual +# tests come after this comment. +############################################################################### + +do_test fts1c-1.2 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark' + } +} {6 17 25 38 40 42 73 74} +do_test fts1c-1.3 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'susan' + } +} {24 40} +do_test fts1c-1.4 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark susan' + } +} {40} +do_test fts1c-1.5 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'susan mark' + } +} {40} +do_test fts1c-1.6 { + execsql { + SELECT rowid FROM email WHERE email MATCH '"mark susan"' + } +} {} +do_test fts1c-1.7 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark -susan' + } +} {6 17 25 38 42 73 74} +do_test fts1c-1.8 { + execsql { + SELECT rowid FROM email WHERE email MATCH '-mark susan' + } +} {24} +do_test fts1c-1.9 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark OR susan' + } +} {6 17 24 25 38 40 42 73 74} + +# Some simple tests of the automatic "offsets(email)" column. In the sample +# data set above, only one message, number 20, contains the words +# "gas" and "reminder" in both body and subject. +# +do_test fts1c-2.1 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'gas reminder' + } +} {20 {2 0 42 3 2 1 54 8 3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} +do_test fts1c-2.2 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'subject:gas reminder' + } +} {20 {2 0 42 3 2 1 54 8 3 1 54 8}} +do_test fts1c-2.3 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'body:gas reminder' + } +} {20 {2 1 54 8 3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} +do_test fts1c-2.4 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE subject MATCH 'gas reminder' + } +} {20 {2 0 42 3 2 1 54 8}} +do_test fts1c-2.5 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH 'gas reminder' + } +} {20 {3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} + +# Document 32 contains 5 instances of the world "child". But only +# 3 of them are paired with "product". Make sure only those instances +# that match the phrase appear in the offsets(email) list. +# +do_test fts1c-3.1 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH 'child product' AND +rowid=32 + } +} {32 {3 0 94 5 3 0 114 5 3 0 207 5 3 1 213 7 3 0 245 5 3 1 251 7 3 0 409 5 3 1 415 7 3 1 493 7}} +do_test fts1c-3.2 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH '"child product"' + } +} {32 {3 0 207 5 3 1 213 7 3 0 245 5 3 1 251 7 3 0 409 5 3 1 415 7}} + +# Snippet generator tests +# +do_test fts1c-4.1 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'subject:gas reminder' + } +} {{Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder}} +do_test fts1c-4.2 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'christmas candlelight' + } +} {{... place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. service and a candlelight service at 5:00 p.m., +among others. ...}} + +do_test fts1c-4.3 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'deal sheet potential reuse' + } +} {{EOL-Accenture Deal Sheet ... intent + Review Enron asset base for potential reuse/ licensing + Contract negotiations ...}} +do_test fts1c-4.4 { + execsql { + SELECT snippet(email,'<<<','>>>',' ') FROM email + WHERE email MATCH 'deal sheet potential reuse' + } +} {{EOL-Accenture <<>> <<>> intent + Review Enron asset base for <<>> <<>>/ licensing + Contract negotiations }} +do_test fts1c-4.5 { + execsql { + SELECT snippet(email,'<<<','>>>',' ') FROM email + WHERE email MATCH 'first things' + } +} {{Re: <<>> Polish Deal! Congrats! <<>> seem to be building rapidly now on the }} +do_test fts1c-4.6 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'chris is here' + } +} {{chris.germany@enron.com ... Sounds good to me. I bet this is next to the Warick?? Hotel. ... place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. ...}} +do_test fts1c-4.7 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH '"pursuant to"' + } +} {{Erin: + +Pursuant to your request, attached are the Schedule to ...}} +do_test fts1c-4.8 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'ancillary load davis' + } +} {{pete.davis@enron.com ... Start Date: 4/22/01; HourAhead hour: 3; No ancillary schedules awarded. +Variances detected. +Variances detected in Load schedule. + + LOG MESSAGES: + +PARSING ...}} + +# Combinations of AND and OR operators: +# +do_test fts1c-5.1 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'questar enron OR com' + } +} {{matt.smith@enron.com ... six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 ...}} +do_test fts1c-5.2 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'enron OR com questar' + } +} {{matt.smith@enron.com ... six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 ...}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1d.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1d.test new file mode 100644 index 0000000..ea23034 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1d.test @@ -0,0 +1,65 @@ +# 2006 October 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS1 module, and in particular +# the Porter stemmer. +# +# $Id: fts1d.test,v 1.1 2006/10/01 18:41:21 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +do_test fts1d-1.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts1(content, tokenize porter); + INSERT INTO t1(rowid, content) VALUES(1, 'running and jumping'); + SELECT rowid FROM t1 WHERE content MATCH 'run jump'; + } +} {1} +do_test fts1d-1.2 { + execsql { + SELECT snippet(t1) FROM t1 WHERE t1 MATCH 'run jump'; + } +} {{running and jumping}} +do_test fts1d-1.3 { + execsql { + INSERT INTO t1(rowid, content) + VALUES(2, 'abcdefghijklmnopqrstuvwyxz'); + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH 'abcdefghijqrstuvwyxz' + } +} {2 abcdefghijklmnopqrstuvwyxz} +do_test fts1d-1.4 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH 'abcdefghijXXXXqrstuvwyxz' + } +} {2 abcdefghijklmnopqrstuvwyxz} +do_test fts1d-1.5 { + execsql { + INSERT INTO t1(rowid, content) + VALUES(3, 'The value is 123456789'); + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH '123789' + } +} {3 {The value is 123456789}} +do_test fts1d-1.6 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH '123000000789' + } +} {3 {The value is 123456789}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1e.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1e.test new file mode 100644 index 0000000..479cfac --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1e.test @@ -0,0 +1,85 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing deletions in the FTS1 module. +# +# $Id: fts1e.test,v 1.1 2006/10/19 23:28:35 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +# Construct a full-text search table containing keywords which are the +# ordinal numbers of the bit positions set for a sequence of integers, +# which are used for the rowid. There are a total of 30 INSERT and +# DELETE statements, so that we'll test both the segmentMerge() merge +# (over the first 16) and the termSelect() merge (over the level-1 +# segment and 14 level-0 segments). +db eval { + CREATE VIRTUAL TABLE t1 USING fts1(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'one'); + INSERT INTO t1 (rowid, content) VALUES(2, 'two'); + INSERT INTO t1 (rowid, content) VALUES(3, 'one two'); + INSERT INTO t1 (rowid, content) VALUES(4, 'three'); + DELETE FROM t1 WHERE rowid = 1; + INSERT INTO t1 (rowid, content) VALUES(5, 'one three'); + INSERT INTO t1 (rowid, content) VALUES(6, 'two three'); + INSERT INTO t1 (rowid, content) VALUES(7, 'one two three'); + DELETE FROM t1 WHERE rowid = 4; + INSERT INTO t1 (rowid, content) VALUES(8, 'four'); + INSERT INTO t1 (rowid, content) VALUES(9, 'one four'); + INSERT INTO t1 (rowid, content) VALUES(10, 'two four'); + DELETE FROM t1 WHERE rowid = 7; + INSERT INTO t1 (rowid, content) VALUES(11, 'one two four'); + INSERT INTO t1 (rowid, content) VALUES(12, 'three four'); + INSERT INTO t1 (rowid, content) VALUES(13, 'one three four'); + DELETE FROM t1 WHERE rowid = 10; + INSERT INTO t1 (rowid, content) VALUES(14, 'two three four'); + INSERT INTO t1 (rowid, content) VALUES(15, 'one two three four'); + INSERT INTO t1 (rowid, content) VALUES(16, 'five'); + DELETE FROM t1 WHERE rowid = 13; + INSERT INTO t1 (rowid, content) VALUES(17, 'one five'); + INSERT INTO t1 (rowid, content) VALUES(18, 'two five'); + INSERT INTO t1 (rowid, content) VALUES(19, 'one two five'); + DELETE FROM t1 WHERE rowid = 16; + INSERT INTO t1 (rowid, content) VALUES(20, 'three five'); + INSERT INTO t1 (rowid, content) VALUES(21, 'one three five'); + INSERT INTO t1 (rowid, content) VALUES(22, 'two three five'); + DELETE FROM t1 WHERE rowid = 19; + DELETE FROM t1 WHERE rowid = 22; +} + +do_test fts1f-1.1 { + execsql {SELECT COUNT(*) FROM t1} +} {14} + +do_test fts1e-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {3 5 9 11 15 17 21} + +do_test fts1e-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two'} +} {2 3 6 11 14 15 18} + +do_test fts1e-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three'} +} {5 6 12 14 15 20 21} + +do_test fts1e-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'four'} +} {8 9 11 12 14 15} + +do_test fts1e-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five'} +} {17 18 20 21} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1f.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1f.test new file mode 100644 index 0000000..19dea0a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1f.test @@ -0,0 +1,90 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing updates in the FTS1 module. +# +# $Id: fts1f.test,v 1.2 2007/02/23 00:14:06 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +# Construct a full-text search table containing keywords which are the +# ordinal numbers of the bit positions set for a sequence of integers, +# which are used for the rowid. There are a total of 31 INSERT, +# UPDATE, and DELETE statements, so that we'll test both the +# segmentMerge() merge (over the first 16) and the termSelect() merge +# (over the level-1 segment and 15 level-0 segments). +db eval { + CREATE VIRTUAL TABLE t1 USING fts1(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'one'); + INSERT INTO t1 (rowid, content) VALUES(2, 'two'); + INSERT INTO t1 (rowid, content) VALUES(3, 'one two'); + INSERT INTO t1 (rowid, content) VALUES(4, 'three'); + INSERT INTO t1 (rowid, content) VALUES(5, 'one three'); + INSERT INTO t1 (rowid, content) VALUES(6, 'two three'); + INSERT INTO t1 (rowid, content) VALUES(7, 'one two three'); + DELETE FROM t1 WHERE rowid = 4; + INSERT INTO t1 (rowid, content) VALUES(8, 'four'); + UPDATE t1 SET content = 'update one three' WHERE rowid = 1; + INSERT INTO t1 (rowid, content) VALUES(9, 'one four'); + INSERT INTO t1 (rowid, content) VALUES(10, 'two four'); + DELETE FROM t1 WHERE rowid = 7; + INSERT INTO t1 (rowid, content) VALUES(11, 'one two four'); + INSERT INTO t1 (rowid, content) VALUES(12, 'three four'); + INSERT INTO t1 (rowid, content) VALUES(13, 'one three four'); + DELETE FROM t1 WHERE rowid = 10; + INSERT INTO t1 (rowid, content) VALUES(14, 'two three four'); + INSERT INTO t1 (rowid, content) VALUES(15, 'one two three four'); + UPDATE t1 SET content = 'update two five' WHERE rowid = 8; + INSERT INTO t1 (rowid, content) VALUES(16, 'five'); + DELETE FROM t1 WHERE rowid = 13; + INSERT INTO t1 (rowid, content) VALUES(17, 'one five'); + INSERT INTO t1 (rowid, content) VALUES(18, 'two five'); + INSERT INTO t1 (rowid, content) VALUES(19, 'one two five'); + DELETE FROM t1 WHERE rowid = 16; + INSERT INTO t1 (rowid, content) VALUES(20, 'three five'); + INSERT INTO t1 (rowid, content) VALUES(21, 'one three five'); + INSERT INTO t1 (rowid, content) VALUES(22, 'two three five'); + DELETE FROM t1 WHERE rowid = 19; + UPDATE t1 SET content = 'update' WHERE rowid = 15; +} + +do_test fts1f-1.1 { + execsql {SELECT COUNT(*) FROM t1} +} {16} + +do_test fts1f-2.0 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'update'} +} {1 8 15} + +do_test fts1f-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 9 11 17 21} + +do_test fts1f-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two'} +} {2 3 6 8 11 14 18 22} + +do_test fts1f-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three'} +} {1 5 6 12 14 20 21 22} + +do_test fts1f-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'four'} +} {9 11 12 14} + +do_test fts1f-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five'} +} {8 17 18 20 21 22} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1i.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1i.test new file mode 100644 index 0000000..803b93b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1i.test @@ -0,0 +1,88 @@ +# 2007 January 17 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite fts1 library. The +# focus here is testing handling of UPDATE when using UTF-16-encoded +# databases. +# +# $Id: fts1i.test,v 1.2 2007/01/24 03:43:20 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + + +# Return the UTF-16 representation of the supplied UTF-8 string $str. +# If $nt is true, append two 0x00 bytes as a nul terminator. +# NOTE(shess) Copied from capi3.test. +proc utf16 {str {nt 1}} { + set r [encoding convertto unicode $str] + if {$nt} { + append r "\x00\x00" + } + return $r +} + +db eval { + PRAGMA encoding = "UTF-16le"; + CREATE VIRTUAL TABLE t1 USING fts1(content); +} + +do_test fts1i-1.0 { + execsql {PRAGMA encoding} +} {UTF-16le} + +do_test fts1i-1.1 { + execsql {INSERT INTO t1 (rowid, content) VALUES(1, 'one')} + execsql {SELECT content FROM t1 WHERE rowid = 1} +} {one} + +do_test fts1i-1.2 { + set sql "INSERT INTO t1 (rowid, content) VALUES(2, 'two')" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 2} +} {two} + +do_test fts1i-1.3 { + set sql "INSERT INTO t1 (rowid, content) VALUES(3, 'three')" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + set sql "UPDATE t1 SET content = 'trois' WHERE rowid = 3" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 3} +} {trois} + +do_test fts1i-1.4 { + set sql16 [utf16 {INSERT INTO t1 (rowid, content) VALUES(4, 'four')}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 4} +} {four} + +do_test fts1i-1.5 { + set sql16 [utf16 {INSERT INTO t1 (rowid, content) VALUES(5, 'five')}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + set sql "UPDATE t1 SET content = 'cinq' WHERE rowid = 5" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 5} +} {cinq} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1j.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1j.test new file mode 100644 index 0000000..5ff0d0e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1j.test @@ -0,0 +1,89 @@ +# 2007 February 6 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. This +# tests creating fts1 tables in an attached database. +# +# $Id: fts1j.test,v 1.1 2007/02/07 01:01:18 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +# Clean up anything left over from a previous pass. +file delete -force test2.db +file delete -force test2.db-journal +sqlite3 db2 test2.db + +db eval { + CREATE VIRTUAL TABLE t3 USING fts1(content); + INSERT INTO t3 (rowid, content) VALUES(1, "hello world"); +} + +db2 eval { + CREATE VIRTUAL TABLE t1 USING fts1(content); + INSERT INTO t1 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t1 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t1 (rowid, content) VALUES(3, "cruel world"); +} + +# This has always worked because the t1_* tables used by fts1 will be +# the defaults. +do_test fts1j-1.1 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + SELECT rowid FROM t1 WHERE t1 MATCH 'hello'; + DETACH DATABASE two; + } +} {1 2} +# Make certain we're detached if there was an error. +catch {db eval {DETACH DATABASE two}} + +# In older code, this appears to work fine, but the t2_* tables used +# by fts1 will be created in database 'main' instead of database +# 'two'. It appears to work fine because the tables end up being the +# defaults, but obviously is badly broken if you hope to use things +# other than in the exact same ATTACH setup. +do_test fts1j-1.2 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + CREATE VIRTUAL TABLE two.t2 USING fts1(content); + INSERT INTO t2 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t2 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t2 (rowid, content) VALUES(3, "cruel world"); + SELECT rowid FROM t2 WHERE t2 MATCH 'hello'; + DETACH DATABASE two; + } +} {1 2} +catch {db eval {DETACH DATABASE two}} + +# In older code, this broke because the fts1 code attempted to create +# t3_* tables in database 'main', but they already existed. Normally +# this wouldn't happen without t3 itself existing, in which case the +# fts1 code would never be called in the first place. +do_test fts1j-1.3 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + + CREATE VIRTUAL TABLE two.t3 USING fts1(content); + INSERT INTO two.t3 (rowid, content) VALUES(2, "hello there"); + INSERT INTO two.t3 (rowid, content) VALUES(3, "cruel world"); + SELECT rowid FROM two.t3 WHERE t3 MATCH 'hello'; + + DETACH DATABASE two; + } db2 +} {2} +catch {db eval {DETACH DATABASE two}} + +catch {db2 close} +file delete -force test2.db + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1k.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1k.test new file mode 100644 index 0000000..2fffa41 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1k.test @@ -0,0 +1,69 @@ +# 2007 March 28 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing isspace/isalnum/tolower problems with the +# FTS1 module. Unfortunately, this code isn't a really principled set +# of tests, because it's impossible to know where new uses of these +# functions might appear. +# +# $Id: fts1k.test,v 1.1 2007/03/29 16:30:41 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +# Tests that startsWith() (calls isspace, tolower, isalnum) can handle +# hi-bit chars. parseSpec() also calls isalnum here. +do_test fts1k-1.1 { + execsql "CREATE VIRTUAL TABLE t1 USING fts1(content, \x80)" +} {} + +# Additionally tests isspace() call in getToken(), and isalnum() call +# in tokenListToIdList(). +do_test fts1k-1.2 { + catch { + execsql "CREATE VIRTUAL TABLE t2 USING fts1(content, tokenize \x80)" + } + sqlite3_errmsg $DB +} "unknown tokenizer: \x80" + +# Additionally test final isalnum() in startsWith(). +do_test fts1k-1.3 { + execsql "CREATE VIRTUAL TABLE t3 USING fts1(content, tokenize\x80)" +} {} + +# The snippet-generation code has calls to isspace() which are sort of +# hard to get to. It finds convenient breakpoints by starting ~40 +# chars before and after the matched term, and scanning ~10 chars +# around that position for isspace() characters. The long word with +# embedded hi-bit chars causes one of these isspace() calls to be +# exercised. The version with a couple extra spaces should cause the +# other isspace() call to be exercised. [Both cases have been tested +# in the debugger, but I'm hoping to continue to catch it if simple +# constant changes change things slightly. +# +# The trailing and leading hi-bit chars help with code which tests for +# isspace() to coalesce multiple spaces. + +set word "\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80" +set phrase1 "$word $word $word target $word $word $word" +set phrase2 "$word $word $word target $word $word $word" + +db eval {CREATE VIRTUAL TABLE t4 USING fts1(content)} +db eval "INSERT INTO t4 (content) VALUES ('$phrase1')" +db eval "INSERT INTO t4 (content) VALUES ('$phrase2')" + +do_test fts1k-1.4 { + execsql {SELECT rowid, length(snippet(t4)) FROM t4 WHERE t4 MATCH 'target'} +} {1 111 2 117} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1l.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1l.test new file mode 100644 index 0000000..924be33 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1l.test @@ -0,0 +1,65 @@ +# 2007 April 9 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. fts1 +# DELETE handling assumed all fields were non-null. This was not +# the intention at all. +# +# $Id: fts1l.test,v 1.1 2007/04/09 20:45:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts1(col_a, col_b); + + INSERT INTO t1(rowid, col_a, col_b) VALUES(1, 'testing', 'testing'); + INSERT INTO t1(rowid, col_a, col_b) VALUES(2, 'only a', null); + INSERT INTO t1(rowid, col_a, col_b) VALUES(3, null, 'only b'); + INSERT INTO t1(rowid, col_a, col_b) VALUES(4, null, null); +} + +do_test fts1m-1.0 { + execsql { + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {2 2 4} + +do_test fts1m-1.1 { + execsql { + DELETE FROM t1 WHERE rowid = 1; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {1 1 3} + +do_test fts1m-1.2 { + execsql { + DELETE FROM t1 WHERE rowid = 2; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 1 2} + +do_test fts1m-1.3 { + execsql { + DELETE FROM t1 WHERE rowid = 3; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 0 1} + +do_test fts1m-1.4 { + execsql { + DELETE FROM t1 WHERE rowid = 4; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 0 0} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1m.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1m.test new file mode 100644 index 0000000..c2f8f91 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1m.test @@ -0,0 +1,50 @@ +# 2007 July 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing the FTS1 module, specifically snippet +# generation. Extracted from fts2o.test. +# +# $Id: fts1m.test,v 1.1 2007/07/25 00:25:20 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is not defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +#--------------------------------------------------------------------- +# These tests, fts1m-1.*, test that ticket #2429 is fixed. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts1(a, b, c); + INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one four two'); +} +do_test fts1m-1.1 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE c MATCH 'four'; + } +} {1 {one four two}} +do_test fts1m-1.2 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE b MATCH 'four'; + } +} {1 {one four}} +do_test fts1m-1.3 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE a MATCH 'four'; + } +} {1 {one three four}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1n.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1n.test new file mode 100644 index 0000000..2f102b4 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1n.test @@ -0,0 +1,45 @@ +# 2007 July 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing the FTS1 module for errors in the handling +# of SQLITE_SCHEMA. +# +# $Id: fts1n.test,v 1.1 2007/07/25 00:38:06 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is not defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +do_test fts1m-1.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts1(a, b, c); + INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one two'); + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + } +} {{one three four} {one four} {one two}} + +# This test was crashing at one point. +# +do_test fts1m-1.2 { + execsql { + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + CREATE TABLE t3(a, b, c); + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + } +} {{one three four} {one four} {one two} {one three four} {one four} {one two}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1o.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1o.test new file mode 100644 index 0000000..92666c6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1o.test @@ -0,0 +1,138 @@ +# 2007 July 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing the FTS1 module rename functionality. Mostly +# copied from fts2o.test. +# +# $Id: fts1o.test,v 1.2 2007/08/30 20:01:33 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is not defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts1(a, b, c); + INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one four two'); +} + +#--------------------------------------------------------------------- +# Test that it is possible to rename an fts1 table. +# +do_test fts1o-1.1 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {t1 t1_content t1_term} +do_test fts1o-1.2 { + execsql { ALTER TABLE t1 RENAME to fts_t1; } +} {} +do_test fts1o-1.3 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts1o-1.4 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_term} + +# See what happens when renaming the fts1 table fails. +# +do_test fts1o-2.1 { + catchsql { + CREATE TABLE t1_term(a, b, c); + ALTER TABLE fts_t1 RENAME to t1; + } +} {1 {SQL logic error or missing database}} +do_test fts1o-2.2 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts1o-2.3 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_term t1_term} + +# See what happens when renaming the fts1 table fails inside a transaction. +# +do_test fts1o-3.1 { + execsql { + BEGIN; + INSERT INTO fts_t1(a, b, c) VALUES('one two three', 'one four', 'one two'); + } +} {} +do_test fts1o-3.2 { + catchsql { + ALTER TABLE fts_t1 RENAME to t1; + } +} {1 {SQL logic error or missing database}} +# NOTE(shess) rowid AS rowid to defeat caching. Otherwise, this +# seg-faults, I suspect that there's something up with a stale +# virtual-table reference, but I'm not quite sure how it happens here +# but not for fts2o.test. +do_test fts1o-3.3 { + execsql { SELECT rowid AS rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts1o-3.4 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_term t1_term} +do_test fts1o-3.5 { + execsql COMMIT + execsql {SELECT a FROM fts_t1} +} {{one three four} {one two three}} +do_test fts1o-3.6 { + execsql { SELECT a, b, c FROM fts_t1 WHERE c MATCH 'four'; } +} {{one three four} {one four} {one four two}} + +#--------------------------------------------------------------------- +# Test that it is possible to rename an fts1 table in an attached +# database. +# +file delete -force test2.db test2.db-journal + +do_test fts1o-4.1 { + execsql { + DROP TABLE t1_term; + ALTER TABLE fts_t1 RENAME to t1; + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + } +} {{one three four} {one four} {one four two} {one two three} {one four} {one two}} + +do_test fts1o-4.2 { + execsql { + ATTACH 'test2.db' AS aux; + CREATE VIRTUAL TABLE aux.t1 USING fts1(a, b, c); + INSERT INTO aux.t1(a, b, c) VALUES( + 'neung song sahm', 'neung see', 'neung see song' + ); + } +} {} + +do_test fts1o-4.3 { + execsql { SELECT a, b, c FROM aux.t1 WHERE a MATCH 'song'; } +} {{neung song sahm} {neung see} {neung see song}} + +do_test fts1o-4.4 { + execsql { SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } +} {{one three four} {one four} {one four two} {one two three} {one four} {one two}} + +do_test fts1o-4.5 { + execsql { ALTER TABLE aux.t1 RENAME TO t2 } +} {} + +do_test fts1o-4.6 { + execsql { SELECT a, b, c FROM t2 WHERE a MATCH 'song'; } +} {{neung song sahm} {neung see} {neung see song}} + +do_test fts1o-4.7 { + execsql { SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } +} {{one three four} {one four} {one four two} {one two three} {one four} {one two}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts1porter.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1porter.test new file mode 100644 index 0000000..0ca87a0 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts1porter.test @@ -0,0 +1,23590 @@ +# 2006 October 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS1 module, and in particular +# the Porter stemmer. +# +# $Id: fts1porter.test,v 1.5 2006/10/03 19:37:37 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS1 is defined, omit this file. +ifcapable !fts1 { + finish_test + return +} + +# Test data for the Porter stemmer. The first word of each line +# is the input. The second word is the desired output. +# +# This test data is taken from http://www.tartarus.org/martin/PorterStemmer/ +# There is no claim of copyright made on that page, but you should +# probably contact the author (Martin Porter - the inventor of the +# Porter Stemmer algorithm) if you want to use this test data in a +# commerical product of some kind. The stemmer code in FTS1 is a +# complete rewrite from scratch based on the algorithm specification +# and does not contain any code under copyright. +# +set porter_test_data { + a a + aaron aaron + abaissiez abaissiez + abandon abandon + abandoned abandon + abase abas + abash abash + abate abat + abated abat + abatement abat + abatements abat + abates abat + abbess abbess + abbey abbei + abbeys abbei + abbominable abbomin + abbot abbot + abbots abbot + abbreviated abbrevi + abed ab + abel abel + aberga aberga + abergavenny abergavenni + abet abet + abetting abet + abhominable abhomin + abhor abhor + abhorr abhorr + abhorred abhor + abhorring abhor + abhors abhor + abhorson abhorson + abide abid + abides abid + abilities abil + ability abil + abject abject + abjectly abjectli + abjects abject + abjur abjur + abjure abjur + able abl + abler abler + aboard aboard + abode abod + aboded abod + abodements abod + aboding abod + abominable abomin + abominably abomin + abominations abomin + abortive abort + abortives abort + abound abound + abounding abound + about about + above abov + abr abr + abraham abraham + abram abram + abreast abreast + abridg abridg + abridge abridg + abridged abridg + abridgment abridg + abroach abroach + abroad abroad + abrogate abrog + abrook abrook + abrupt abrupt + abruption abrupt + abruptly abruptli + absence absenc + absent absent + absey absei + absolute absolut + absolutely absolut + absolv absolv + absolver absolv + abstains abstain + abstemious abstemi + abstinence abstin + abstract abstract + absurd absurd + absyrtus absyrtu + abundance abund + abundant abund + abundantly abundantli + abus abu + abuse abus + abused abus + abuser abus + abuses abus + abusing abus + abutting abut + aby abi + abysm abysm + ac ac + academe academ + academes academ + accent accent + accents accent + accept accept + acceptable accept + acceptance accept + accepted accept + accepts accept + access access + accessary accessari + accessible access + accidence accid + accident accid + accidental accident + accidentally accident + accidents accid + accite accit + accited accit + accites accit + acclamations acclam + accommodate accommod + accommodated accommod + accommodation accommod + accommodations accommod + accommodo accommodo + accompanied accompani + accompany accompani + accompanying accompani + accomplices accomplic + accomplish accomplish + accomplished accomplish + accomplishing accomplish + accomplishment accomplish + accompt accompt + accord accord + accordant accord + accorded accord + accordeth accordeth + according accord + accordingly accordingli + accords accord + accost accost + accosted accost + account account + accountant account + accounted account + accounts account + accoutred accoutr + accoutrement accoutr + accoutrements accoutr + accrue accru + accumulate accumul + accumulated accumul + accumulation accumul + accurs accur + accursed accurs + accurst accurst + accus accu + accusation accus + accusations accus + accusative accus + accusativo accusativo + accuse accus + accused accus + accuser accus + accusers accus + accuses accus + accuseth accuseth + accusing accus + accustom accustom + accustomed accustom + ace ac + acerb acerb + ache ach + acheron acheron + aches ach + achiev achiev + achieve achiev + achieved achiev + achievement achiev + achievements achiev + achiever achiev + achieves achiev + achieving achiev + achilles achil + aching ach + achitophel achitophel + acknowledg acknowledg + acknowledge acknowledg + acknowledged acknowledg + acknowledgment acknowledg + acknown acknown + acold acold + aconitum aconitum + acordo acordo + acorn acorn + acquaint acquaint + acquaintance acquaint + acquainted acquaint + acquaints acquaint + acquir acquir + acquire acquir + acquisition acquisit + acquit acquit + acquittance acquitt + acquittances acquitt + acquitted acquit + acre acr + acres acr + across across + act act + actaeon actaeon + acted act + acting act + action action + actions action + actium actium + active activ + actively activ + activity activ + actor actor + actors actor + acts act + actual actual + acture actur + acute acut + acutely acut + ad ad + adage adag + adallas adalla + adam adam + adamant adam + add add + added ad + adder adder + adders adder + addeth addeth + addict addict + addicted addict + addiction addict + adding ad + addition addit + additions addit + addle addl + address address + addressing address + addrest addrest + adds add + adhere adher + adheres adher + adieu adieu + adieus adieu + adjacent adjac + adjoin adjoin + adjoining adjoin + adjourn adjourn + adjudg adjudg + adjudged adjudg + adjunct adjunct + administer administ + administration administr + admir admir + admirable admir + admiral admir + admiration admir + admire admir + admired admir + admirer admir + admiring admir + admiringly admiringli + admission admiss + admit admit + admits admit + admittance admitt + admitted admit + admitting admit + admonish admonish + admonishing admonish + admonishment admonish + admonishments admonish + admonition admonit + ado ado + adonis adoni + adopt adopt + adopted adopt + adoptedly adoptedli + adoption adopt + adoptious adopti + adopts adopt + ador ador + adoration ador + adorations ador + adore ador + adorer ador + adores ador + adorest adorest + adoreth adoreth + adoring ador + adorn adorn + adorned adorn + adornings adorn + adornment adorn + adorns adorn + adown adown + adramadio adramadio + adrian adrian + adriana adriana + adriano adriano + adriatic adriat + adsum adsum + adulation adul + adulterate adulter + adulterates adulter + adulterers adulter + adulteress adulteress + adulteries adulteri + adulterous adulter + adultery adulteri + adultress adultress + advanc advanc + advance advanc + advanced advanc + advancement advanc + advancements advanc + advances advanc + advancing advanc + advantage advantag + advantageable advantag + advantaged advantag + advantageous advantag + advantages advantag + advantaging advantag + advent advent + adventur adventur + adventure adventur + adventures adventur + adventuring adventur + adventurous adventur + adventurously adventur + adversaries adversari + adversary adversari + adverse advers + adversely advers + adversities advers + adversity advers + advertis adverti + advertise advertis + advertised advertis + advertisement advertis + advertising advertis + advice advic + advis advi + advise advis + advised advis + advisedly advisedli + advises advis + advisings advis + advocate advoc + advocation advoc + aeacida aeacida + aeacides aeacid + aedile aedil + aediles aedil + aegeon aegeon + aegion aegion + aegles aegl + aemelia aemelia + aemilia aemilia + aemilius aemiliu + aeneas aenea + aeolus aeolu + aer aer + aerial aerial + aery aeri + aesculapius aesculapiu + aeson aeson + aesop aesop + aetna aetna + afar afar + afear afear + afeard afeard + affability affabl + affable affabl + affair affair + affaire affair + affairs affair + affect affect + affectation affect + affectations affect + affected affect + affectedly affectedli + affecteth affecteth + affecting affect + affection affect + affectionate affection + affectionately affection + affections affect + affects affect + affeer affeer + affianc affianc + affiance affianc + affianced affianc + affied affi + affin affin + affined affin + affinity affin + affirm affirm + affirmation affirm + affirmatives affirm + afflict afflict + afflicted afflict + affliction afflict + afflictions afflict + afflicts afflict + afford afford + affordeth affordeth + affords afford + affray affrai + affright affright + affrighted affright + affrights affright + affront affront + affronted affront + affy affi + afield afield + afire afir + afloat afloat + afoot afoot + afore afor + aforehand aforehand + aforesaid aforesaid + afraid afraid + afresh afresh + afric afric + africa africa + african african + afront afront + after after + afternoon afternoon + afterward afterward + afterwards afterward + ag ag + again again + against against + agamemmon agamemmon + agamemnon agamemnon + agate agat + agaz agaz + age ag + aged ag + agenor agenor + agent agent + agents agent + ages ag + aggravate aggrav + aggrief aggrief + agile agil + agincourt agincourt + agitation agit + aglet aglet + agnize agniz + ago ago + agone agon + agony agoni + agree agre + agreed agre + agreeing agre + agreement agreement + agrees agre + agrippa agrippa + aground aground + ague agu + aguecheek aguecheek + agued agu + agueface aguefac + agues agu + ah ah + aha aha + ahungry ahungri + ai ai + aialvolio aialvolio + aiaria aiaria + aid aid + aidance aidanc + aidant aidant + aided aid + aiding aid + aidless aidless + aids aid + ail ail + aim aim + aimed aim + aimest aimest + aiming aim + aims aim + ainsi ainsi + aio aio + air air + aired air + airless airless + airs air + airy airi + ajax ajax + akilling akil + al al + alabaster alabast + alack alack + alacrity alacr + alarbus alarbu + alarm alarm + alarms alarm + alarum alarum + alarums alarum + alas ala + alb alb + alban alban + albans alban + albany albani + albeit albeit + albion albion + alchemist alchemist + alchemy alchemi + alcibiades alcibiad + alcides alcid + alder alder + alderman alderman + aldermen aldermen + ale al + alecto alecto + alehouse alehous + alehouses alehous + alencon alencon + alengon alengon + aleppo aleppo + ales al + alewife alewif + alexander alexand + alexanders alexand + alexandria alexandria + alexandrian alexandrian + alexas alexa + alias alia + alice alic + alien alien + aliena aliena + alight alight + alighted alight + alights alight + aliis alii + alike alik + alisander alisand + alive aliv + all all + alla alla + allay allai + allayed allai + allaying allai + allayment allay + allayments allay + allays allai + allegation alleg + allegations alleg + allege alleg + alleged alleg + allegiance allegi + allegiant allegi + alley allei + alleys allei + allhallowmas allhallowma + alliance allianc + allicholy allicholi + allied alli + allies alli + alligant allig + alligator allig + allons allon + allot allot + allots allot + allotted allot + allottery allotteri + allow allow + allowance allow + allowed allow + allowing allow + allows allow + allur allur + allure allur + allurement allur + alluring allur + allusion allus + ally alli + allycholly allycholli + almain almain + almanac almanac + almanack almanack + almanacs almanac + almighty almighti + almond almond + almost almost + alms alm + almsman almsman + aloes alo + aloft aloft + alone alon + along along + alonso alonso + aloof aloof + aloud aloud + alphabet alphabet + alphabetical alphabet + alphonso alphonso + alps alp + already alreadi + also also + alt alt + altar altar + altars altar + alter alter + alteration alter + altered alter + alters alter + althaea althaea + although although + altitude altitud + altogether altogeth + alton alton + alway alwai + always alwai + am am + amaimon amaimon + amain amain + amaking amak + amamon amamon + amaz amaz + amaze amaz + amazed amaz + amazedly amazedli + amazedness amazed + amazement amaz + amazes amaz + amazeth amazeth + amazing amaz + amazon amazon + amazonian amazonian + amazons amazon + ambassador ambassador + ambassadors ambassador + amber amber + ambiguides ambiguid + ambiguities ambigu + ambiguous ambigu + ambition ambit + ambitions ambit + ambitious ambiti + ambitiously ambiti + amble ambl + ambled ambl + ambles ambl + ambling ambl + ambo ambo + ambuscadoes ambuscado + ambush ambush + amen amen + amend amend + amended amend + amendment amend + amends amend + amerce amerc + america america + ames am + amiable amiabl + amid amid + amidst amidst + amiens amien + amis ami + amiss amiss + amities amiti + amity amiti + amnipotent amnipot + among among + amongst amongst + amorous amor + amorously amor + amort amort + amount amount + amounts amount + amour amour + amphimacus amphimacu + ample ampl + ampler ampler + amplest amplest + amplified amplifi + amplify amplifi + amply ampli + ampthill ampthil + amurath amurath + amyntas amynta + an an + anatomiz anatomiz + anatomize anatom + anatomy anatomi + ancestor ancestor + ancestors ancestor + ancestry ancestri + anchises anchis + anchor anchor + anchorage anchorag + anchored anchor + anchoring anchor + anchors anchor + anchovies anchovi + ancient ancient + ancientry ancientri + ancients ancient + ancus ancu + and and + andirons andiron + andpholus andpholu + andren andren + andrew andrew + andromache andromach + andronici andronici + andronicus andronicu + anew anew + ang ang + angel angel + angelica angelica + angelical angel + angelo angelo + angels angel + anger anger + angerly angerli + angers anger + anges ang + angiers angier + angl angl + anglais anglai + angle angl + angler angler + angleterre angleterr + angliae anglia + angling angl + anglish anglish + angrily angrili + angry angri + anguish anguish + angus angu + animal anim + animals anim + animis animi + anjou anjou + ankle ankl + anna anna + annals annal + anne ann + annex annex + annexed annex + annexions annexion + annexment annex + annothanize annothan + announces announc + annoy annoi + annoyance annoy + annoying annoi + annual annual + anoint anoint + anointed anoint + anon anon + another anoth + anselmo anselmo + answer answer + answerable answer + answered answer + answerest answerest + answering answer + answers answer + ant ant + ante ant + antenor antenor + antenorides antenorid + anteroom anteroom + anthem anthem + anthems anthem + anthony anthoni + anthropophagi anthropophagi + anthropophaginian anthropophaginian + antiates antiat + antic antic + anticipate anticip + anticipates anticip + anticipatest anticipatest + anticipating anticip + anticipation anticip + antick antick + anticly anticli + antics antic + antidote antidot + antidotes antidot + antigonus antigonu + antiopa antiopa + antipathy antipathi + antipholus antipholu + antipholuses antipholus + antipodes antipod + antiquary antiquari + antique antiqu + antiquity antiqu + antium antium + antoniad antoniad + antonio antonio + antonius antoniu + antony antoni + antres antr + anvil anvil + any ani + anybody anybodi + anyone anyon + anything anyth + anywhere anywher + ap ap + apace apac + apart apart + apartment apart + apartments apart + ape ap + apemantus apemantu + apennines apennin + apes ap + apiece apiec + apish apish + apollinem apollinem + apollo apollo + apollodorus apollodoru + apology apolog + apoplex apoplex + apoplexy apoplexi + apostle apostl + apostles apostl + apostrophas apostropha + apoth apoth + apothecary apothecari + appal appal + appall appal + appalled appal + appals appal + apparel apparel + apparell apparel + apparelled apparel + apparent appar + apparently appar + apparition apparit + apparitions apparit + appeach appeach + appeal appeal + appeals appeal + appear appear + appearance appear + appeared appear + appeareth appeareth + appearing appear + appears appear + appeas appea + appease appeas + appeased appeas + appelant appel + appele appel + appelee appele + appeles appel + appelez appelez + appellant appel + appellants appel + appelons appelon + appendix appendix + apperil apperil + appertain appertain + appertaining appertain + appertainings appertain + appertains appertain + appertinent appertin + appertinents appertin + appetite appetit + appetites appetit + applaud applaud + applauded applaud + applauding applaud + applause applaus + applauses applaus + apple appl + apples appl + appletart appletart + appliance applianc + appliances applianc + applications applic + applied appli + applies appli + apply appli + applying appli + appoint appoint + appointed appoint + appointment appoint + appointments appoint + appoints appoint + apprehend apprehend + apprehended apprehend + apprehends apprehend + apprehension apprehens + apprehensions apprehens + apprehensive apprehens + apprendre apprendr + apprenne apprenn + apprenticehood apprenticehood + appris appri + approach approach + approachers approach + approaches approach + approacheth approacheth + approaching approach + approbation approb + approof approof + appropriation appropri + approv approv + approve approv + approved approv + approvers approv + approves approv + appurtenance appurten + appurtenances appurten + apricocks apricock + april april + apron apron + aprons apron + apt apt + apter apter + aptest aptest + aptly aptli + aptness apt + aqua aqua + aquilon aquilon + aquitaine aquitain + arabia arabia + arabian arabian + araise arais + arbitrate arbitr + arbitrating arbitr + arbitrator arbitr + arbitrement arbitr + arbors arbor + arbour arbour + arc arc + arch arch + archbishop archbishop + archbishopric archbishopr + archdeacon archdeacon + arched arch + archelaus archelau + archer archer + archers archer + archery archeri + archibald archibald + archidamus archidamu + architect architect + arcu arcu + arde ard + arden arden + ardent ardent + ardour ardour + are ar + argal argal + argier argier + argo argo + argosies argosi + argosy argosi + argu argu + argue argu + argued argu + argues argu + arguing argu + argument argument + arguments argument + argus argu + ariachne ariachn + ariadne ariadn + ariel ariel + aries ari + aright aright + arinado arinado + arinies arini + arion arion + arise aris + arises aris + ariseth ariseth + arising aris + aristode aristod + aristotle aristotl + arithmetic arithmet + arithmetician arithmetician + ark ark + arm arm + arma arma + armado armado + armadoes armado + armagnac armagnac + arme arm + armed arm + armenia armenia + armies armi + armigero armigero + arming arm + armipotent armipot + armor armor + armour armour + armourer armour + armourers armour + armours armour + armoury armouri + arms arm + army armi + arn arn + aroint aroint + arose aros + arouse arous + aroused arous + arragon arragon + arraign arraign + arraigned arraign + arraigning arraign + arraignment arraign + arrant arrant + arras arra + array arrai + arrearages arrearag + arrest arrest + arrested arrest + arrests arrest + arriv arriv + arrival arriv + arrivance arriv + arrive arriv + arrived arriv + arrives arriv + arriving arriv + arrogance arrog + arrogancy arrog + arrogant arrog + arrow arrow + arrows arrow + art art + artemidorus artemidoru + arteries arteri + arthur arthur + article articl + articles articl + articulate articul + artificer artific + artificial artifici + artillery artilleri + artire artir + artist artist + artists artist + artless artless + artois artoi + arts art + artus artu + arviragus arviragu + as as + asaph asaph + ascanius ascaniu + ascend ascend + ascended ascend + ascendeth ascendeth + ascends ascend + ascension ascens + ascent ascent + ascribe ascrib + ascribes ascrib + ash ash + asham asham + ashamed asham + asher asher + ashes ash + ashford ashford + ashore ashor + ashouting ashout + ashy ashi + asia asia + aside asid + ask ask + askance askanc + asked ask + asker asker + asketh asketh + asking ask + asks ask + aslant aslant + asleep asleep + asmath asmath + asp asp + aspect aspect + aspects aspect + aspen aspen + aspersion aspers + aspic aspic + aspicious aspici + aspics aspic + aspir aspir + aspiration aspir + aspire aspir + aspiring aspir + asquint asquint + ass ass + assail assail + assailable assail + assailant assail + assailants assail + assailed assail + assaileth assaileth + assailing assail + assails assail + assassination assassin + assault assault + assaulted assault + assaults assault + assay assai + assaying assai + assays assai + assemblance assembl + assemble assembl + assembled assembl + assemblies assembl + assembly assembl + assent assent + asses ass + assez assez + assign assign + assigned assign + assigns assign + assinico assinico + assist assist + assistance assist + assistances assist + assistant assist + assistants assist + assisted assist + assisting assist + associate associ + associated associ + associates associ + assuage assuag + assubjugate assubjug + assum assum + assume assum + assumes assum + assumption assumpt + assur assur + assurance assur + assure assur + assured assur + assuredly assuredli + assures assur + assyrian assyrian + astonish astonish + astonished astonish + astraea astraea + astray astrai + astrea astrea + astronomer astronom + astronomers astronom + astronomical astronom + astronomy astronomi + asunder asund + at at + atalanta atalanta + ate at + ates at + athenian athenian + athenians athenian + athens athen + athol athol + athversary athversari + athwart athwart + atlas atla + atomies atomi + atomy atomi + atone aton + atonement aton + atonements aton + atropos atropo + attach attach + attached attach + attachment attach + attain attain + attainder attaind + attains attain + attaint attaint + attainted attaint + attainture attaintur + attempt attempt + attemptable attempt + attempted attempt + attempting attempt + attempts attempt + attend attend + attendance attend + attendant attend + attendants attend + attended attend + attendents attend + attendeth attendeth + attending attend + attends attend + attent attent + attention attent + attentive attent + attentivenes attentiven + attest attest + attested attest + attir attir + attire attir + attired attir + attires attir + attorney attornei + attorneyed attornei + attorneys attornei + attorneyship attorneyship + attract attract + attraction attract + attractive attract + attracts attract + attribute attribut + attributed attribut + attributes attribut + attribution attribut + attributive attribut + atwain atwain + au au + aubrey aubrei + auburn auburn + aucun aucun + audacious audaci + audaciously audaci + audacity audac + audible audibl + audience audienc + audis audi + audit audit + auditor auditor + auditors auditor + auditory auditori + audre audr + audrey audrei + aufidius aufidiu + aufidiuses aufidius + auger auger + aught aught + augment augment + augmentation augment + augmented augment + augmenting augment + augurer augur + augurers augur + augures augur + auguring augur + augurs augur + augury auguri + august august + augustus augustu + auld auld + aumerle aumerl + aunchient aunchient + aunt aunt + aunts aunt + auricular auricular + aurora aurora + auspicious auspici + aussi aussi + austere auster + austerely auster + austereness auster + austerity auster + austria austria + aut aut + authentic authent + author author + authorities author + authority author + authorized author + authorizing author + authors author + autolycus autolycu + autre autr + autumn autumn + auvergne auvergn + avail avail + avails avail + avarice avaric + avaricious avarici + avaunt avaunt + ave av + aveng aveng + avenge aveng + avenged aveng + averring aver + avert avert + aves av + avez avez + avis avi + avoid avoid + avoided avoid + avoiding avoid + avoids avoid + avoirdupois avoirdupoi + avouch avouch + avouched avouch + avouches avouch + avouchment avouch + avow avow + aw aw + await await + awaits await + awak awak + awake awak + awaked awak + awaken awaken + awakened awaken + awakens awaken + awakes awak + awaking awak + award award + awards award + awasy awasi + away awai + awe aw + aweary aweari + aweless aweless + awful aw + awhile awhil + awkward awkward + awl awl + awooing awoo + awork awork + awry awri + axe ax + axle axl + axletree axletre + ay ay + aye ay + ayez ayez + ayli ayli + azur azur + azure azur + b b + ba ba + baa baa + babbl babbl + babble babbl + babbling babbl + babe babe + babes babe + babies babi + baboon baboon + baboons baboon + baby babi + babylon babylon + bacare bacar + bacchanals bacchan + bacchus bacchu + bach bach + bachelor bachelor + bachelors bachelor + back back + backbite backbit + backbitten backbitten + backing back + backs back + backward backward + backwardly backwardli + backwards backward + bacon bacon + bacons bacon + bad bad + bade bade + badge badg + badged badg + badges badg + badly badli + badness bad + baes bae + baffl baffl + baffle baffl + baffled baffl + bag bag + baggage baggag + bagot bagot + bagpipe bagpip + bags bag + bail bail + bailiff bailiff + baillez baillez + baily baili + baisant baisant + baisees baise + baiser baiser + bait bait + baited bait + baiting bait + baitings bait + baits bait + bajazet bajazet + bak bak + bake bake + baked bake + baker baker + bakers baker + bakes bake + baking bake + bal bal + balanc balanc + balance balanc + balcony balconi + bald bald + baldrick baldrick + bale bale + baleful bale + balk balk + ball ball + ballad ballad + ballads ballad + ballast ballast + ballasting ballast + ballet ballet + ballow ballow + balls ball + balm balm + balms balm + balmy balmi + balsam balsam + balsamum balsamum + balth balth + balthasar balthasar + balthazar balthazar + bames bame + ban ban + banbury banburi + band band + bandied bandi + banding band + bandit bandit + banditti banditti + banditto banditto + bands band + bandy bandi + bandying bandi + bane bane + banes bane + bang bang + bangor bangor + banish banish + banished banish + banishers banish + banishment banish + banister banist + bank bank + bankrout bankrout + bankrupt bankrupt + bankrupts bankrupt + banks bank + banner banner + bannerets banneret + banners banner + banning ban + banns bann + banquet banquet + banqueted banquet + banqueting banquet + banquets banquet + banquo banquo + bans ban + baptism baptism + baptista baptista + baptiz baptiz + bar bar + barbarian barbarian + barbarians barbarian + barbarism barbar + barbarous barbar + barbary barbari + barbason barbason + barbed barb + barber barber + barbermonger barbermong + bard bard + bardolph bardolph + bards bard + bare bare + bared bare + barefac barefac + barefaced barefac + barefoot barefoot + bareheaded barehead + barely bare + bareness bare + barful bar + bargain bargain + bargains bargain + barge barg + bargulus bargulu + baring bare + bark bark + barking bark + barkloughly barkloughli + barks bark + barky barki + barley barlei + barm barm + barn barn + barnacles barnacl + barnardine barnardin + barne barn + barnes barn + barnet barnet + barns barn + baron baron + barons baron + barony baroni + barr barr + barrabas barraba + barrel barrel + barrels barrel + barren barren + barrenly barrenli + barrenness barren + barricado barricado + barricadoes barricado + barrow barrow + bars bar + barson barson + barter barter + bartholomew bartholomew + bas ba + basan basan + base base + baseless baseless + basely base + baseness base + baser baser + bases base + basest basest + bashful bash + bashfulness bash + basilisco basilisco + basilisk basilisk + basilisks basilisk + basimecu basimecu + basin basin + basingstoke basingstok + basins basin + basis basi + bask bask + basket basket + baskets basket + bass bass + bassanio bassanio + basset basset + bassianus bassianu + basta basta + bastard bastard + bastardizing bastard + bastardly bastardli + bastards bastard + bastardy bastardi + basted bast + bastes bast + bastinado bastinado + basting bast + bat bat + batailles batail + batch batch + bate bate + bated bate + bates bate + bath bath + bathe bath + bathed bath + bathing bath + baths bath + bating bate + batler batler + bats bat + batt batt + battalia battalia + battalions battalion + batten batten + batter batter + battering batter + batters batter + battery batteri + battle battl + battled battl + battlefield battlefield + battlements battlement + battles battl + batty batti + bauble baubl + baubles baubl + baubling baubl + baulk baulk + bavin bavin + bawcock bawcock + bawd bawd + bawdry bawdri + bawds bawd + bawdy bawdi + bawl bawl + bawling bawl + bay bai + baying bai + baynard baynard + bayonne bayonn + bays bai + be be + beach beach + beached beach + beachy beachi + beacon beacon + bead bead + beaded bead + beadle beadl + beadles beadl + beads bead + beadsmen beadsmen + beagle beagl + beagles beagl + beak beak + beaks beak + beam beam + beamed beam + beams beam + bean bean + beans bean + bear bear + beard beard + bearded beard + beardless beardless + beards beard + bearer bearer + bearers bearer + bearest bearest + beareth beareth + bearing bear + bears bear + beast beast + beastliest beastliest + beastliness beastli + beastly beastli + beasts beast + beat beat + beated beat + beaten beaten + beating beat + beatrice beatric + beats beat + beau beau + beaufort beaufort + beaumond beaumond + beaumont beaumont + beauteous beauteou + beautied beauti + beauties beauti + beautified beautifi + beautiful beauti + beautify beautifi + beauty beauti + beaver beaver + beavers beaver + became becam + because becaus + bechanc bechanc + bechance bechanc + bechanced bechanc + beck beck + beckon beckon + beckons beckon + becks beck + becom becom + become becom + becomed becom + becomes becom + becoming becom + becomings becom + bed bed + bedabbled bedabbl + bedash bedash + bedaub bedaub + bedazzled bedazzl + bedchamber bedchamb + bedclothes bedcloth + bedded bed + bedeck bedeck + bedecking bedeck + bedew bedew + bedfellow bedfellow + bedfellows bedfellow + bedford bedford + bedlam bedlam + bedrench bedrench + bedrid bedrid + beds bed + bedtime bedtim + bedward bedward + bee bee + beef beef + beefs beef + beehives beehiv + been been + beer beer + bees bee + beest beest + beetle beetl + beetles beetl + beeves beev + befall befal + befallen befallen + befalls befal + befell befel + befits befit + befitted befit + befitting befit + befor befor + before befor + beforehand beforehand + befortune befortun + befriend befriend + befriended befriend + befriends befriend + beg beg + began began + beget beget + begets beget + begetting beget + begg begg + beggar beggar + beggared beggar + beggarly beggarli + beggarman beggarman + beggars beggar + beggary beggari + begging beg + begin begin + beginners beginn + beginning begin + beginnings begin + begins begin + begnawn begnawn + begone begon + begot begot + begotten begotten + begrimed begrim + begs beg + beguil beguil + beguile beguil + beguiled beguil + beguiles beguil + beguiling beguil + begun begun + behalf behalf + behalfs behalf + behav behav + behaved behav + behavedst behavedst + behavior behavior + behaviors behavior + behaviour behaviour + behaviours behaviour + behead behead + beheaded behead + beheld beheld + behest behest + behests behest + behind behind + behold behold + beholder behold + beholders behold + beholdest beholdest + beholding behold + beholds behold + behoof behoof + behooffull behoofful + behooves behoov + behove behov + behoves behov + behowls behowl + being be + bel bel + belarius belariu + belch belch + belching belch + beldam beldam + beldame beldam + beldams beldam + belee bele + belgia belgia + belie beli + belied beli + belief belief + beliest beliest + believ believ + believe believ + believed believ + believes believ + believest believest + believing believ + belike belik + bell bell + bellario bellario + belle bell + bellied belli + bellies belli + bellman bellman + bellona bellona + bellow bellow + bellowed bellow + bellowing bellow + bellows bellow + bells bell + belly belli + bellyful belly + belman belman + belmont belmont + belock belock + belong belong + belonging belong + belongings belong + belongs belong + belov belov + beloved belov + beloving belov + below below + belt belt + belzebub belzebub + bemadding bemad + bemet bemet + bemete bemet + bemoan bemoan + bemoaned bemoan + bemock bemock + bemoil bemoil + bemonster bemonst + ben ben + bench bench + bencher bencher + benches bench + bend bend + bended bend + bending bend + bends bend + bene bene + beneath beneath + benedicite benedicit + benedick benedick + benediction benedict + benedictus benedictu + benefactors benefactor + benefice benefic + beneficial benefici + benefit benefit + benefited benefit + benefits benefit + benetted benet + benevolence benevol + benevolences benevol + benied beni + benison benison + bennet bennet + bent bent + bentii bentii + bentivolii bentivolii + bents bent + benumbed benumb + benvolio benvolio + bepaint bepaint + bepray beprai + bequeath bequeath + bequeathed bequeath + bequeathing bequeath + bequest bequest + ber ber + berard berard + berattle berattl + beray berai + bere bere + bereave bereav + bereaved bereav + bereaves bereav + bereft bereft + bergamo bergamo + bergomask bergomask + berhym berhym + berhyme berhym + berkeley berkelei + bermoothes bermooth + bernardo bernardo + berod berod + berowne berown + berri berri + berries berri + berrord berrord + berry berri + bertram bertram + berwick berwick + bescreen bescreen + beseech beseech + beseeched beseech + beseechers beseech + beseeching beseech + beseek beseek + beseem beseem + beseemeth beseemeth + beseeming beseem + beseems beseem + beset beset + beshrew beshrew + beside besid + besides besid + besieg besieg + besiege besieg + besieged besieg + beslubber beslubb + besmear besmear + besmeared besmear + besmirch besmirch + besom besom + besort besort + besotted besot + bespake bespak + bespeak bespeak + bespice bespic + bespoke bespok + bespotted bespot + bess bess + bessy bessi + best best + bestained bestain + bested best + bestial bestial + bestir bestir + bestirr bestirr + bestow bestow + bestowed bestow + bestowing bestow + bestows bestow + bestraught bestraught + bestrew bestrew + bestrid bestrid + bestride bestrid + bestrides bestrid + bet bet + betake betak + beteem beteem + bethink bethink + bethought bethought + bethrothed bethroth + bethump bethump + betid betid + betide betid + betideth betideth + betime betim + betimes betim + betoken betoken + betook betook + betossed betoss + betray betrai + betrayed betrai + betraying betrai + betrays betrai + betrims betrim + betroth betroth + betrothed betroth + betroths betroth + bett bett + betted bet + better better + bettered better + bettering better + betters better + betting bet + bettre bettr + between between + betwixt betwixt + bevel bevel + beverage beverag + bevis bevi + bevy bevi + bewail bewail + bewailed bewail + bewailing bewail + bewails bewail + beware bewar + bewasted bewast + beweep beweep + bewept bewept + bewet bewet + bewhored bewhor + bewitch bewitch + bewitched bewitch + bewitchment bewitch + bewray bewrai + beyond beyond + bezonian bezonian + bezonians bezonian + bianca bianca + bianco bianco + bias bia + bibble bibbl + bickerings bicker + bid bid + bidden bidden + bidding bid + biddings bid + biddy biddi + bide bide + bides bide + biding bide + bids bid + bien bien + bier bier + bifold bifold + big big + bigamy bigami + biggen biggen + bigger bigger + bigness big + bigot bigot + bilberry bilberri + bilbo bilbo + bilboes bilbo + bilbow bilbow + bill bill + billeted billet + billets billet + billiards billiard + billing bill + billow billow + billows billow + bills bill + bin bin + bind bind + bindeth bindeth + binding bind + binds bind + biondello biondello + birch birch + bird bird + birding bird + birdlime birdlim + birds bird + birnam birnam + birth birth + birthday birthdai + birthdom birthdom + birthplace birthplac + birthright birthright + birthrights birthright + births birth + bis bi + biscuit biscuit + bishop bishop + bishops bishop + bisson bisson + bit bit + bitch bitch + bite bite + biter biter + bites bite + biting bite + bits bit + bitt bitt + bitten bitten + bitter bitter + bitterest bitterest + bitterly bitterli + bitterness bitter + blab blab + blabb blabb + blabbing blab + blabs blab + black black + blackamoor blackamoor + blackamoors blackamoor + blackberries blackberri + blackberry blackberri + blacker blacker + blackest blackest + blackfriars blackfriar + blackheath blackheath + blackmere blackmer + blackness black + blacks black + bladder bladder + bladders bladder + blade blade + bladed blade + blades blade + blains blain + blam blam + blame blame + blamed blame + blameful blame + blameless blameless + blames blame + blanc blanc + blanca blanca + blanch blanch + blank blank + blanket blanket + blanks blank + blaspheme blasphem + blaspheming blasphem + blasphemous blasphem + blasphemy blasphemi + blast blast + blasted blast + blasting blast + blastments blastment + blasts blast + blaz blaz + blaze blaze + blazes blaze + blazing blaze + blazon blazon + blazoned blazon + blazoning blazon + bleach bleach + bleaching bleach + bleak bleak + blear blear + bleared blear + bleat bleat + bleated bleat + bleats bleat + bled bled + bleed bleed + bleedest bleedest + bleedeth bleedeth + bleeding bleed + bleeds bleed + blemish blemish + blemishes blemish + blench blench + blenches blench + blend blend + blended blend + blent blent + bless bless + blessed bless + blessedly blessedli + blessedness blessed + blesses bless + blesseth blesseth + blessing bless + blessings bless + blest blest + blew blew + blind blind + blinded blind + blindfold blindfold + blinding blind + blindly blindli + blindness blind + blinds blind + blink blink + blinking blink + bliss bliss + blist blist + blister blister + blisters blister + blithe blith + blithild blithild + bloat bloat + block block + blockish blockish + blocks block + blois bloi + blood blood + blooded blood + bloodhound bloodhound + bloodied bloodi + bloodier bloodier + bloodiest bloodiest + bloodily bloodili + bloodless bloodless + bloods blood + bloodshed bloodsh + bloodshedding bloodshed + bloodstained bloodstain + bloody bloodi + bloom bloom + blooms bloom + blossom blossom + blossoming blossom + blossoms blossom + blot blot + blots blot + blotted blot + blotting blot + blount blount + blow blow + blowed blow + blowers blower + blowest blowest + blowing blow + blown blown + blows blow + blowse blows + blubb blubb + blubber blubber + blubbering blubber + blue blue + bluecaps bluecap + bluest bluest + blunt blunt + blunted blunt + blunter blunter + bluntest bluntest + blunting blunt + bluntly bluntli + bluntness blunt + blunts blunt + blur blur + blurr blurr + blurs blur + blush blush + blushes blush + blushest blushest + blushing blush + blust blust + bluster bluster + blusterer bluster + blusters bluster + bo bo + boar boar + board board + boarded board + boarding board + boards board + boarish boarish + boars boar + boast boast + boasted boast + boastful boast + boasting boast + boasts boast + boat boat + boats boat + boatswain boatswain + bob bob + bobb bobb + boblibindo boblibindo + bobtail bobtail + bocchus bocchu + bode bode + boded bode + bodements bodement + bodes bode + bodg bodg + bodied bodi + bodies bodi + bodiless bodiless + bodily bodili + boding bode + bodkin bodkin + body bodi + bodykins bodykin + bog bog + boggle boggl + boggler boggler + bogs bog + bohemia bohemia + bohemian bohemian + bohun bohun + boil boil + boiling boil + boils boil + boist boist + boisterous boister + boisterously boister + boitier boitier + bold bold + bolden bolden + bolder bolder + boldest boldest + boldly boldli + boldness bold + bolds bold + bolingbroke bolingbrok + bolster bolster + bolt bolt + bolted bolt + bolter bolter + bolters bolter + bolting bolt + bolts bolt + bombard bombard + bombards bombard + bombast bombast + bon bon + bona bona + bond bond + bondage bondag + bonded bond + bondmaid bondmaid + bondman bondman + bondmen bondmen + bonds bond + bondslave bondslav + bone bone + boneless boneless + bones bone + bonfire bonfir + bonfires bonfir + bonjour bonjour + bonne bonn + bonnet bonnet + bonneted bonnet + bonny bonni + bonos bono + bonto bonto + bonville bonvil + bood bood + book book + bookish bookish + books book + boon boon + boor boor + boorish boorish + boors boor + boot boot + booted boot + booties booti + bootless bootless + boots boot + booty booti + bor bor + bora bora + borachio borachio + bordeaux bordeaux + border border + bordered border + borderers border + borders border + bore bore + boreas borea + bores bore + boring bore + born born + borne born + borough borough + boroughs borough + borrow borrow + borrowed borrow + borrower borrow + borrowing borrow + borrows borrow + bosko bosko + boskos bosko + bosky boski + bosom bosom + bosoms bosom + boson boson + boss boss + bosworth bosworth + botch botch + botcher botcher + botches botch + botchy botchi + both both + bots bot + bottle bottl + bottled bottl + bottles bottl + bottom bottom + bottomless bottomless + bottoms bottom + bouciqualt bouciqualt + bouge boug + bough bough + boughs bough + bought bought + bounce bounc + bouncing bounc + bound bound + bounded bound + bounden bounden + boundeth boundeth + bounding bound + boundless boundless + bounds bound + bounteous bounteou + bounteously bounteous + bounties bounti + bountiful bounti + bountifully bountifulli + bounty bounti + bourbier bourbier + bourbon bourbon + bourchier bourchier + bourdeaux bourdeaux + bourn bourn + bout bout + bouts bout + bove bove + bow bow + bowcase bowcas + bowed bow + bowels bowel + bower bower + bowing bow + bowl bowl + bowler bowler + bowling bowl + bowls bowl + bows bow + bowsprit bowsprit + bowstring bowstr + box box + boxes box + boy boi + boyet boyet + boyish boyish + boys boi + brabant brabant + brabantio brabantio + brabble brabbl + brabbler brabbler + brac brac + brace brace + bracelet bracelet + bracelets bracelet + brach brach + bracy braci + brag brag + bragg bragg + braggardism braggard + braggards braggard + braggart braggart + braggarts braggart + bragged brag + bragging brag + bragless bragless + brags brag + braid braid + braided braid + brain brain + brained brain + brainford brainford + brainish brainish + brainless brainless + brains brain + brainsick brainsick + brainsickly brainsickli + brake brake + brakenbury brakenburi + brakes brake + brambles brambl + bran bran + branch branch + branches branch + branchless branchless + brand brand + branded brand + brandish brandish + brandon brandon + brands brand + bras bra + brass brass + brassy brassi + brat brat + brats brat + brav brav + brave brave + braved brave + bravely brave + braver braver + bravery braveri + braves brave + bravest bravest + braving brave + brawl brawl + brawler brawler + brawling brawl + brawls brawl + brawn brawn + brawns brawn + bray brai + braying brai + braz braz + brazen brazen + brazier brazier + breach breach + breaches breach + bread bread + breadth breadth + break break + breaker breaker + breakfast breakfast + breaking break + breaks break + breast breast + breasted breast + breasting breast + breastplate breastplat + breasts breast + breath breath + breathe breath + breathed breath + breather breather + breathers breather + breathes breath + breathest breathest + breathing breath + breathless breathless + breaths breath + brecknock brecknock + bred bred + breech breech + breeches breech + breeching breech + breed breed + breeder breeder + breeders breeder + breeding breed + breeds breed + breese brees + breeze breez + breff breff + bretagne bretagn + brethen brethen + bretheren bretheren + brethren brethren + brevis brevi + brevity breviti + brew brew + brewage brewag + brewer brewer + brewers brewer + brewing brew + brews brew + briareus briareu + briars briar + brib brib + bribe bribe + briber briber + bribes bribe + brick brick + bricklayer bricklay + bricks brick + bridal bridal + bride bride + bridegroom bridegroom + bridegrooms bridegroom + brides bride + bridge bridg + bridgenorth bridgenorth + bridges bridg + bridget bridget + bridle bridl + bridled bridl + brief brief + briefer briefer + briefest briefest + briefly briefli + briefness brief + brier brier + briers brier + brigandine brigandin + bright bright + brighten brighten + brightest brightest + brightly brightli + brightness bright + brim brim + brimful brim + brims brim + brimstone brimston + brinded brind + brine brine + bring bring + bringer bringer + bringeth bringeth + bringing bring + bringings bring + brings bring + brinish brinish + brink brink + brisk brisk + brisky briski + bristle bristl + bristled bristl + bristly bristli + bristol bristol + bristow bristow + britain britain + britaine britain + britaines britain + british british + briton briton + britons briton + brittany brittani + brittle brittl + broach broach + broached broach + broad broad + broader broader + broadsides broadsid + brocas broca + brock brock + brogues brogu + broil broil + broiling broil + broils broil + broke broke + broken broken + brokenly brokenli + broker broker + brokers broker + brokes broke + broking broke + brooch brooch + brooches brooch + brood brood + brooded brood + brooding brood + brook brook + brooks brook + broom broom + broomstaff broomstaff + broth broth + brothel brothel + brother brother + brotherhood brotherhood + brotherhoods brotherhood + brotherly brotherli + brothers brother + broths broth + brought brought + brow brow + brown brown + browner browner + brownist brownist + browny browni + brows brow + browse brows + browsing brows + bruis brui + bruise bruis + bruised bruis + bruises bruis + bruising bruis + bruit bruit + bruited bruit + brundusium brundusium + brunt brunt + brush brush + brushes brush + brute brute + brutish brutish + brutus brutu + bubble bubbl + bubbles bubbl + bubbling bubbl + bubukles bubukl + buck buck + bucket bucket + buckets bucket + bucking buck + buckingham buckingham + buckle buckl + buckled buckl + buckler buckler + bucklers buckler + bucklersbury bucklersburi + buckles buckl + buckram buckram + bucks buck + bud bud + budded bud + budding bud + budge budg + budger budger + budget budget + buds bud + buff buff + buffet buffet + buffeting buffet + buffets buffet + bug bug + bugbear bugbear + bugle bugl + bugs bug + build build + builded build + buildeth buildeth + building build + buildings build + builds build + built built + bulk bulk + bulks bulk + bull bull + bullcalf bullcalf + bullen bullen + bullens bullen + bullet bullet + bullets bullet + bullocks bullock + bulls bull + bully bulli + bulmer bulmer + bulwark bulwark + bulwarks bulwark + bum bum + bumbast bumbast + bump bump + bumper bumper + bums bum + bunch bunch + bunches bunch + bundle bundl + bung bung + bunghole bunghol + bungle bungl + bunting bunt + buoy buoi + bur bur + burbolt burbolt + burd burd + burden burden + burdened burden + burdening burden + burdenous burden + burdens burden + burgh burgh + burgher burgher + burghers burgher + burglary burglari + burgomasters burgomast + burgonet burgonet + burgundy burgundi + burial burial + buried buri + burier burier + buriest buriest + burly burli + burn burn + burned burn + burnet burnet + burneth burneth + burning burn + burnish burnish + burns burn + burnt burnt + burr burr + burrows burrow + burs bur + burst burst + bursting burst + bursts burst + burthen burthen + burthens burthen + burton burton + bury buri + burying buri + bush bush + bushels bushel + bushes bush + bushy bushi + busied busi + busily busili + busines busin + business busi + businesses busi + buskin buskin + busky buski + buss buss + busses buss + bussing buss + bustle bustl + bustling bustl + busy busi + but but + butcheed butche + butcher butcher + butchered butcher + butcheries butcheri + butcherly butcherli + butchers butcher + butchery butcheri + butler butler + butt butt + butter butter + buttered butter + butterflies butterfli + butterfly butterfli + butterwoman butterwoman + buttery butteri + buttock buttock + buttocks buttock + button button + buttonhole buttonhol + buttons button + buttress buttress + buttry buttri + butts butt + buxom buxom + buy bui + buyer buyer + buying bui + buys bui + buzz buzz + buzzard buzzard + buzzards buzzard + buzzers buzzer + buzzing buzz + by by + bye bye + byzantium byzantium + c c + ca ca + cabbage cabbag + cabileros cabilero + cabin cabin + cabins cabin + cable cabl + cables cabl + cackling cackl + cacodemon cacodemon + caddis caddi + caddisses caddiss + cade cade + cadence cadenc + cadent cadent + cades cade + cadmus cadmu + caduceus caduceu + cadwal cadwal + cadwallader cadwallad + caelius caeliu + caelo caelo + caesar caesar + caesarion caesarion + caesars caesar + cage cage + caged cage + cagion cagion + cain cain + caithness caith + caitiff caitiff + caitiffs caitiff + caius caiu + cak cak + cake cake + cakes cake + calaber calab + calais calai + calamities calam + calamity calam + calchas calcha + calculate calcul + calen calen + calendar calendar + calendars calendar + calf calf + caliban caliban + calibans caliban + calipolis calipoli + cality caliti + caliver caliv + call call + callat callat + called call + callet callet + calling call + calls call + calm calm + calmest calmest + calmly calmli + calmness calm + calms calm + calpurnia calpurnia + calumniate calumni + calumniating calumni + calumnious calumni + calumny calumni + calve calv + calved calv + calves calv + calveskins calveskin + calydon calydon + cam cam + cambio cambio + cambria cambria + cambric cambric + cambrics cambric + cambridge cambridg + cambyses cambys + came came + camel camel + camelot camelot + camels camel + camest camest + camillo camillo + camlet camlet + camomile camomil + camp camp + campeius campeiu + camping camp + camps camp + can can + canakin canakin + canaries canari + canary canari + cancel cancel + cancell cancel + cancelled cancel + cancelling cancel + cancels cancel + cancer cancer + candidatus candidatu + candied candi + candle candl + candles candl + candlesticks candlestick + candy candi + canidius canidiu + cank cank + canker canker + cankerblossom cankerblossom + cankers canker + cannibally cannib + cannibals cannib + cannon cannon + cannoneer cannon + cannons cannon + cannot cannot + canon canon + canoniz canoniz + canonize canon + canonized canon + canons canon + canopied canopi + canopies canopi + canopy canopi + canst canst + canstick canstick + canterbury canterburi + cantle cantl + cantons canton + canus canu + canvas canva + canvass canvass + canzonet canzonet + cap cap + capability capabl + capable capabl + capacities capac + capacity capac + caparison caparison + capdv capdv + cape cape + capel capel + capels capel + caper caper + capers caper + capet capet + caphis caphi + capilet capilet + capitaine capitain + capital capit + capite capit + capitol capitol + capitulate capitul + capocchia capocchia + capon capon + capons capon + capp capp + cappadocia cappadocia + capriccio capriccio + capricious caprici + caps cap + capt capt + captain captain + captains captain + captainship captainship + captious captiou + captivate captiv + captivated captiv + captivates captiv + captive captiv + captives captiv + captivity captiv + captum captum + capucius capuciu + capulet capulet + capulets capulet + car car + carack carack + caracks carack + carat carat + caraways carawai + carbonado carbonado + carbuncle carbuncl + carbuncled carbuncl + carbuncles carbuncl + carcanet carcanet + carcase carcas + carcases carcas + carcass carcass + carcasses carcass + card card + cardecue cardecu + carded card + carders carder + cardinal cardin + cardinally cardin + cardinals cardin + cardmaker cardmak + cards card + carduus carduu + care care + cared care + career career + careers career + careful care + carefully carefulli + careless careless + carelessly carelessli + carelessness careless + cares care + caret caret + cargo cargo + carl carl + carlisle carlisl + carlot carlot + carman carman + carmen carmen + carnal carnal + carnally carnal + carnarvonshire carnarvonshir + carnation carnat + carnations carnat + carol carol + carous carou + carouse carous + caroused carous + carouses carous + carousing carous + carp carp + carpenter carpent + carper carper + carpet carpet + carpets carpet + carping carp + carriage carriag + carriages carriag + carried carri + carrier carrier + carriers carrier + carries carri + carrion carrion + carrions carrion + carry carri + carrying carri + cars car + cart cart + carters carter + carthage carthag + carts cart + carv carv + carve carv + carved carv + carver carver + carves carv + carving carv + cas ca + casa casa + casaer casaer + casca casca + case case + casement casement + casements casement + cases case + cash cash + cashier cashier + casing case + cask cask + casket casket + casketed casket + caskets casket + casque casqu + casques casqu + cassado cassado + cassandra cassandra + cassibelan cassibelan + cassio cassio + cassius cassiu + cassocks cassock + cast cast + castalion castalion + castaway castawai + castaways castawai + casted cast + caster caster + castigate castig + castigation castig + castile castil + castiliano castiliano + casting cast + castle castl + castles castl + casts cast + casual casual + casually casual + casualties casualti + casualty casualti + cat cat + cataian cataian + catalogue catalogu + cataplasm cataplasm + cataracts cataract + catarrhs catarrh + catastrophe catastroph + catch catch + catcher catcher + catches catch + catching catch + cate cate + catechising catechis + catechism catech + catechize catech + cater cater + caterpillars caterpillar + caters cater + caterwauling caterwaul + cates cate + catesby catesbi + cathedral cathedr + catlike catlik + catling catl + catlings catl + cato cato + cats cat + cattle cattl + caucasus caucasu + caudle caudl + cauf cauf + caught caught + cauldron cauldron + caus cau + cause caus + caused caus + causeless causeless + causer causer + causes caus + causest causest + causeth causeth + cautel cautel + cautelous cautel + cautels cautel + cauterizing cauter + caution caution + cautions caution + cavaleiro cavaleiro + cavalery cavaleri + cavaliers cavali + cave cave + cavern cavern + caverns cavern + caves cave + caveto caveto + caviary caviari + cavil cavil + cavilling cavil + cawdor cawdor + cawdron cawdron + cawing caw + ce ce + ceas cea + cease ceas + ceases ceas + ceaseth ceaseth + cedar cedar + cedars cedar + cedius cediu + celebrate celebr + celebrated celebr + celebrates celebr + celebration celebr + celerity celer + celestial celesti + celia celia + cell cell + cellar cellar + cellarage cellarag + celsa celsa + cement cement + censer censer + censor censor + censorinus censorinu + censur censur + censure censur + censured censur + censurers censur + censures censur + censuring censur + centaur centaur + centaurs centaur + centre centr + cents cent + centuries centuri + centurion centurion + centurions centurion + century centuri + cerberus cerberu + cerecloth cerecloth + cerements cerement + ceremonial ceremoni + ceremonies ceremoni + ceremonious ceremoni + ceremoniously ceremoni + ceremony ceremoni + ceres cere + cerns cern + certain certain + certainer certain + certainly certainli + certainties certainti + certainty certainti + certes cert + certificate certif + certified certifi + certifies certifi + certify certifi + ces ce + cesario cesario + cess cess + cesse cess + cestern cestern + cetera cetera + cette cett + chaces chace + chaf chaf + chafe chafe + chafed chafe + chafes chafe + chaff chaff + chaffless chaffless + chafing chafe + chain chain + chains chain + chair chair + chairs chair + chalic chalic + chalice chalic + chalices chalic + chalk chalk + chalks chalk + chalky chalki + challeng challeng + challenge challeng + challenged challeng + challenger challeng + challengers challeng + challenges challeng + cham cham + chamber chamber + chamberers chamber + chamberlain chamberlain + chamberlains chamberlain + chambermaid chambermaid + chambermaids chambermaid + chambers chamber + chameleon chameleon + champ champ + champagne champagn + champain champain + champains champain + champion champion + champions champion + chanc chanc + chance chanc + chanced chanc + chancellor chancellor + chances chanc + chandler chandler + chang chang + change chang + changeable changeabl + changed chang + changeful chang + changeling changel + changelings changel + changer changer + changes chang + changest changest + changing chang + channel channel + channels channel + chanson chanson + chant chant + chanticleer chanticl + chanting chant + chantries chantri + chantry chantri + chants chant + chaos chao + chap chap + chape chape + chapel chapel + chapeless chapeless + chapels chapel + chaplain chaplain + chaplains chaplain + chapless chapless + chaplet chaplet + chapmen chapmen + chaps chap + chapter chapter + character charact + charactered charact + characterless characterless + characters charact + charactery characteri + characts charact + charbon charbon + chare chare + chares chare + charg charg + charge charg + charged charg + chargeful charg + charges charg + chargeth chargeth + charging charg + chariest chariest + chariness chari + charing chare + chariot chariot + chariots chariot + charitable charit + charitably charit + charities chariti + charity chariti + charlemain charlemain + charles charl + charm charm + charmed charm + charmer charmer + charmeth charmeth + charmian charmian + charming charm + charmingly charmingli + charms charm + charneco charneco + charnel charnel + charolois charoloi + charon charon + charter charter + charters charter + chartreux chartreux + chary chari + charybdis charybdi + chas cha + chase chase + chased chase + chaser chaser + chaseth chaseth + chasing chase + chaste chast + chastely chast + chastis chasti + chastise chastis + chastised chastis + chastisement chastis + chastity chastiti + chat chat + chatham chatham + chatillon chatillon + chats chat + chatt chatt + chattels chattel + chatter chatter + chattering chatter + chattles chattl + chaud chaud + chaunted chaunt + chaw chaw + chawdron chawdron + che che + cheap cheap + cheapen cheapen + cheaper cheaper + cheapest cheapest + cheaply cheapli + cheapside cheapsid + cheat cheat + cheated cheat + cheater cheater + cheaters cheater + cheating cheat + cheats cheat + check check + checked check + checker checker + checking check + checks check + cheek cheek + cheeks cheek + cheer cheer + cheered cheer + cheerer cheerer + cheerful cheer + cheerfully cheerfulli + cheering cheer + cheerless cheerless + cheerly cheerli + cheers cheer + cheese chees + chequer chequer + cher cher + cherish cherish + cherished cherish + cherisher cherish + cherishes cherish + cherishing cherish + cherries cherri + cherry cherri + cherrypit cherrypit + chertsey chertsei + cherub cherub + cherubims cherubim + cherubin cherubin + cherubins cherubin + cheshu cheshu + chess chess + chest chest + chester chester + chestnut chestnut + chestnuts chestnut + chests chest + chetas cheta + chev chev + cheval cheval + chevalier chevali + chevaliers chevali + cheveril cheveril + chew chew + chewed chew + chewet chewet + chewing chew + chez chez + chi chi + chick chick + chicken chicken + chickens chicken + chicurmurco chicurmurco + chid chid + chidden chidden + chide chide + chiders chider + chides chide + chiding chide + chief chief + chiefest chiefest + chiefly chiefli + chien chien + child child + childed child + childeric childer + childhood childhood + childhoods childhood + childing child + childish childish + childishness childish + childlike childlik + childness child + children children + chill chill + chilling chill + chime chime + chimes chime + chimney chimnei + chimneypiece chimneypiec + chimneys chimnei + chimurcho chimurcho + chin chin + china china + chine chine + chines chine + chink chink + chinks chink + chins chin + chipp chipp + chipper chipper + chips chip + chiron chiron + chirping chirp + chirrah chirrah + chirurgeonly chirurgeonli + chisel chisel + chitopher chitoph + chivalrous chivalr + chivalry chivalri + choice choic + choicely choic + choicest choicest + choir choir + choirs choir + chok chok + choke choke + choked choke + chokes choke + choking choke + choler choler + choleric choler + cholers choler + chollors chollor + choose choos + chooser chooser + chooses choos + chooseth chooseth + choosing choos + chop chop + chopine chopin + choplogic choplog + chopp chopp + chopped chop + chopping chop + choppy choppi + chops chop + chopt chopt + chor chor + choristers chorist + chorus choru + chose chose + chosen chosen + chough chough + choughs chough + chrish chrish + christ christ + christen christen + christendom christendom + christendoms christendom + christening christen + christenings christen + christian christian + christianlike christianlik + christians christian + christmas christma + christom christom + christopher christoph + christophero christophero + chronicle chronicl + chronicled chronicl + chronicler chronicl + chroniclers chronicl + chronicles chronicl + chrysolite chrysolit + chuck chuck + chucks chuck + chud chud + chuffs chuff + church church + churches church + churchman churchman + churchmen churchmen + churchyard churchyard + churchyards churchyard + churl churl + churlish churlish + churlishly churlishli + churls churl + churn churn + chus chu + cicatrice cicatric + cicatrices cicatric + cicely cice + cicero cicero + ciceter cicet + ciel ciel + ciitzens ciitzen + cilicia cilicia + cimber cimber + cimmerian cimmerian + cinable cinabl + cincture cinctur + cinders cinder + cine cine + cinna cinna + cinque cinqu + cipher cipher + ciphers cipher + circa circa + circe circ + circle circl + circled circl + circlets circlet + circling circl + circuit circuit + circum circum + circumcised circumcis + circumference circumfer + circummur circummur + circumscrib circumscrib + circumscribed circumscrib + circumscription circumscript + circumspect circumspect + circumstance circumst + circumstanced circumstanc + circumstances circumst + circumstantial circumstanti + circumvent circumv + circumvention circumvent + cistern cistern + citadel citadel + cital cital + cite cite + cited cite + cites cite + cities citi + citing cite + citizen citizen + citizens citizen + cittern cittern + city citi + civet civet + civil civil + civility civil + civilly civilli + clack clack + clad clad + claim claim + claiming claim + claims claim + clamb clamb + clamber clamber + clammer clammer + clamor clamor + clamorous clamor + clamors clamor + clamour clamour + clamours clamour + clang clang + clangor clangor + clap clap + clapp clapp + clapped clap + clapper clapper + clapping clap + claps clap + clare clare + clarence clarenc + claret claret + claribel claribel + clasp clasp + clasps clasp + clatter clatter + claud claud + claudio claudio + claudius claudiu + clause claus + claw claw + clawed claw + clawing claw + claws claw + clay clai + clays clai + clean clean + cleanliest cleanliest + cleanly cleanli + cleans clean + cleanse cleans + cleansing cleans + clear clear + clearer clearer + clearest clearest + clearly clearli + clearness clear + clears clear + cleave cleav + cleaving cleav + clef clef + cleft cleft + cleitus cleitu + clemency clemenc + clement clement + cleomenes cleomen + cleopatpa cleopatpa + cleopatra cleopatra + clepeth clepeth + clept clept + clerestories clerestori + clergy clergi + clergyman clergyman + clergymen clergymen + clerk clerk + clerkly clerkli + clerks clerk + clew clew + client client + clients client + cliff cliff + clifford clifford + cliffords clifford + cliffs cliff + clifton clifton + climate climat + climature climatur + climb climb + climbed climb + climber climber + climbeth climbeth + climbing climb + climbs climb + clime clime + cling cling + clink clink + clinking clink + clinquant clinquant + clip clip + clipp clipp + clipper clipper + clippeth clippeth + clipping clip + clipt clipt + clitus clitu + clo clo + cloak cloak + cloakbag cloakbag + cloaks cloak + clock clock + clocks clock + clod clod + cloddy cloddi + clodpole clodpol + clog clog + clogging clog + clogs clog + cloister cloister + cloistress cloistress + cloquence cloquenc + clos clo + close close + closed close + closely close + closeness close + closer closer + closes close + closest closest + closet closet + closing close + closure closur + cloten cloten + clotens cloten + cloth cloth + clothair clothair + clotharius clothariu + clothe cloth + clothes cloth + clothier clothier + clothiers clothier + clothing cloth + cloths cloth + clotpoles clotpol + clotpoll clotpol + cloud cloud + clouded cloud + cloudiness cloudi + clouds cloud + cloudy cloudi + clout clout + clouted clout + clouts clout + cloven cloven + clover clover + cloves clove + clovest clovest + clowder clowder + clown clown + clownish clownish + clowns clown + cloy cloi + cloyed cloi + cloying cloi + cloyless cloyless + cloyment cloyment + cloys cloi + club club + clubs club + cluck cluck + clung clung + clust clust + clusters cluster + clutch clutch + clyster clyster + cneius cneiu + cnemies cnemi + co co + coach coach + coaches coach + coachmakers coachmak + coact coact + coactive coactiv + coagulate coagul + coal coal + coals coal + coarse coars + coarsely coars + coast coast + coasting coast + coasts coast + coat coat + coated coat + coats coat + cobble cobbl + cobbled cobbl + cobbler cobbler + cobham cobham + cobloaf cobloaf + cobweb cobweb + cobwebs cobweb + cock cock + cockatrice cockatric + cockatrices cockatric + cockle cockl + cockled cockl + cockney cocknei + cockpit cockpit + cocks cock + cocksure cocksur + coctus coctu + cocytus cocytu + cod cod + codding cod + codling codl + codpiece codpiec + codpieces codpiec + cods cod + coelestibus coelestibu + coesar coesar + coeur coeur + coffer coffer + coffers coffer + coffin coffin + coffins coffin + cog cog + cogging cog + cogitation cogit + cogitations cogit + cognition cognit + cognizance cogniz + cogscomb cogscomb + cohabitants cohabit + coher coher + cohere coher + coherence coher + coherent coher + cohorts cohort + coif coif + coign coign + coil coil + coin coin + coinage coinag + coiner coiner + coining coin + coins coin + col col + colbrand colbrand + colchos colcho + cold cold + colder colder + coldest coldest + coldly coldli + coldness cold + coldspur coldspur + colebrook colebrook + colic colic + collar collar + collars collar + collateral collater + colleagued colleagu + collect collect + collected collect + collection collect + college colleg + colleges colleg + collied colli + collier collier + colliers collier + collop collop + collusion collus + colme colm + colmekill colmekil + coloquintida coloquintida + color color + colors color + colossus colossu + colour colour + colourable colour + coloured colour + colouring colour + colours colour + colt colt + colted colt + colts colt + columbine columbin + columbines columbin + colville colvil + com com + comagene comagen + comart comart + comb comb + combat combat + combatant combat + combatants combat + combated combat + combating combat + combin combin + combinate combin + combination combin + combine combin + combined combin + combless combless + combustion combust + come come + comedian comedian + comedians comedian + comedy comedi + comeliness comeli + comely come + comer comer + comers comer + comes come + comest comest + comet comet + cometh cometh + comets comet + comfect comfect + comfit comfit + comfits comfit + comfort comfort + comfortable comfort + comforted comfort + comforter comfort + comforting comfort + comfortless comfortless + comforts comfort + comic comic + comical comic + coming come + comings come + cominius cominiu + comma comma + command command + commande command + commanded command + commander command + commanders command + commanding command + commandment command + commandments command + commands command + comme comm + commenc commenc + commence commenc + commenced commenc + commencement commenc + commences commenc + commencing commenc + commend commend + commendable commend + commendation commend + commendations commend + commended commend + commending commend + commends commend + comment comment + commentaries commentari + commenting comment + comments comment + commerce commerc + commingled commingl + commiseration commiser + commission commiss + commissioners commission + commissions commiss + commit commit + commits commit + committ committ + committed commit + committing commit + commix commix + commixed commix + commixtion commixt + commixture commixtur + commodious commodi + commodities commod + commodity commod + common common + commonalty commonalti + commoner common + commoners common + commonly commonli + commons common + commonweal commonw + commonwealth commonwealth + commotion commot + commotions commot + commune commun + communicat communicat + communicate commun + communication commun + communities commun + community commun + comonty comonti + compact compact + companies compani + companion companion + companions companion + companionship companionship + company compani + compar compar + comparative compar + compare compar + compared compar + comparing compar + comparison comparison + comparisons comparison + compartner compartn + compass compass + compasses compass + compassing compass + compassion compass + compassionate compassion + compeers compeer + compel compel + compell compel + compelled compel + compelling compel + compels compel + compensation compens + competence compet + competency compet + competent compet + competitor competitor + competitors competitor + compil compil + compile compil + compiled compil + complain complain + complainer complain + complainest complainest + complaining complain + complainings complain + complains complain + complaint complaint + complaints complaint + complement complement + complements complement + complete complet + complexion complexion + complexioned complexion + complexions complexion + complices complic + complies compli + compliment compliment + complimental compliment + compliments compliment + complot complot + complots complot + complotted complot + comply compli + compos compo + compose compos + composed compos + composition composit + compost compost + composture compostur + composure composur + compound compound + compounded compound + compounds compound + comprehend comprehend + comprehended comprehend + comprehends comprehend + compremises compremis + compris compri + comprising compris + compromis compromi + compromise compromis + compt compt + comptible comptibl + comptrollers comptrol + compulsatory compulsatori + compulsion compuls + compulsive compuls + compunctious compuncti + computation comput + comrade comrad + comrades comrad + comutual comutu + con con + concave concav + concavities concav + conceal conceal + concealed conceal + concealing conceal + concealment conceal + concealments conceal + conceals conceal + conceit conceit + conceited conceit + conceitless conceitless + conceits conceit + conceiv conceiv + conceive conceiv + conceived conceiv + conceives conceiv + conceiving conceiv + conception concept + conceptions concept + conceptious concepti + concern concern + concernancy concern + concerneth concerneth + concerning concern + concernings concern + concerns concern + conclave conclav + conclud conclud + conclude conclud + concluded conclud + concludes conclud + concluding conclud + conclusion conclus + conclusions conclus + concolinel concolinel + concord concord + concubine concubin + concupiscible concupisc + concupy concupi + concur concur + concurring concur + concurs concur + condemn condemn + condemnation condemn + condemned condemn + condemning condemn + condemns condemn + condescend condescend + condign condign + condition condit + conditionally condition + conditions condit + condole condol + condolement condol + condoling condol + conduce conduc + conduct conduct + conducted conduct + conducting conduct + conductor conductor + conduit conduit + conduits conduit + conected conect + coney conei + confection confect + confectionary confectionari + confections confect + confederacy confederaci + confederate confeder + confederates confeder + confer confer + conference confer + conferr conferr + conferring confer + confess confess + confessed confess + confesses confess + confesseth confesseth + confessing confess + confession confess + confessions confess + confessor confessor + confidence confid + confident confid + confidently confid + confin confin + confine confin + confined confin + confineless confineless + confiners confin + confines confin + confining confin + confirm confirm + confirmation confirm + confirmations confirm + confirmed confirm + confirmer confirm + confirmers confirm + confirming confirm + confirmities confirm + confirms confirm + confiscate confisc + confiscated confisc + confiscation confisc + confixed confix + conflict conflict + conflicting conflict + conflicts conflict + confluence confluenc + conflux conflux + conform conform + conformable conform + confound confound + confounded confound + confounding confound + confounds confound + confront confront + confronted confront + confus confu + confused confus + confusedly confusedli + confusion confus + confusions confus + confutation confut + confutes confut + congeal congeal + congealed congeal + congealment congeal + congee conge + conger conger + congest congest + congied congi + congratulate congratul + congreeing congre + congreeted congreet + congregate congreg + congregated congreg + congregation congreg + congregations congreg + congruent congruent + congruing congru + conies coni + conjectural conjectur + conjecture conjectur + conjectures conjectur + conjoin conjoin + conjoined conjoin + conjoins conjoin + conjointly conjointli + conjunct conjunct + conjunction conjunct + conjunctive conjunct + conjur conjur + conjuration conjur + conjurations conjur + conjure conjur + conjured conjur + conjurer conjur + conjurers conjur + conjures conjur + conjuring conjur + conjuro conjuro + conn conn + connected connect + connive conniv + conqu conqu + conquer conquer + conquered conquer + conquering conquer + conqueror conqueror + conquerors conqueror + conquers conquer + conquest conquest + conquests conquest + conquring conqur + conrade conrad + cons con + consanguineous consanguin + consanguinity consanguin + conscienc conscienc + conscience conscienc + consciences conscienc + conscionable conscion + consecrate consecr + consecrated consecr + consecrations consecr + consent consent + consented consent + consenting consent + consents consent + consequence consequ + consequences consequ + consequently consequ + conserve conserv + conserved conserv + conserves conserv + consider consid + considerance consider + considerate consider + consideration consider + considerations consider + considered consid + considering consid + considerings consid + considers consid + consign consign + consigning consign + consist consist + consisteth consisteth + consisting consist + consistory consistori + consists consist + consolate consol + consolation consol + consonancy conson + consonant conson + consort consort + consorted consort + consortest consortest + conspectuities conspectu + conspir conspir + conspiracy conspiraci + conspirant conspir + conspirator conspir + conspirators conspir + conspire conspir + conspired conspir + conspirers conspir + conspires conspir + conspiring conspir + constable constabl + constables constabl + constance constanc + constancies constanc + constancy constanc + constant constant + constantine constantin + constantinople constantinopl + constantly constantli + constellation constel + constitution constitut + constrain constrain + constrained constrain + constraineth constraineth + constrains constrain + constraint constraint + constring constr + construction construct + construe constru + consul consul + consuls consul + consulship consulship + consulships consulship + consult consult + consulting consult + consults consult + consum consum + consume consum + consumed consum + consumes consum + consuming consum + consummate consumm + consummation consumm + consumption consumpt + consumptions consumpt + contagion contagion + contagious contagi + contain contain + containing contain + contains contain + contaminate contamin + contaminated contamin + contemn contemn + contemned contemn + contemning contemn + contemns contemn + contemplate contempl + contemplation contempl + contemplative contempl + contempt contempt + contemptible contempt + contempts contempt + contemptuous contemptu + contemptuously contemptu + contend contend + contended contend + contending contend + contendon contendon + content content + contenta contenta + contented content + contenteth contenteth + contention content + contentious contenti + contentless contentless + contento contento + contents content + contest contest + contestation contest + continence contin + continency contin + continent contin + continents contin + continu continu + continual continu + continually continu + continuance continu + continuantly continuantli + continuate continu + continue continu + continued continu + continuer continu + continues continu + continuing continu + contract contract + contracted contract + contracting contract + contraction contract + contradict contradict + contradicted contradict + contradiction contradict + contradicts contradict + contraries contrari + contrarieties contrarieti + contrariety contrarieti + contrarious contrari + contrariously contrari + contrary contrari + contre contr + contribution contribut + contributors contributor + contrite contrit + contriv contriv + contrive contriv + contrived contriv + contriver contriv + contrives contriv + contriving contriv + control control + controll control + controller control + controlling control + controlment control + controls control + controversy controversi + contumelious contumeli + contumeliously contumeli + contumely contum + contusions contus + convenience conveni + conveniences conveni + conveniency conveni + convenient conveni + conveniently conveni + convented convent + conventicles conventicl + convents convent + convers conver + conversant convers + conversation convers + conversations convers + converse convers + conversed convers + converses convers + conversing convers + conversion convers + convert convert + converted convert + convertest convertest + converting convert + convertite convertit + convertites convertit + converts convert + convey convei + conveyance convey + conveyances convey + conveyers convey + conveying convei + convict convict + convicted convict + convince convinc + convinced convinc + convinces convinc + convive conviv + convocation convoc + convoy convoi + convulsions convuls + cony coni + cook cook + cookery cookeri + cooks cook + cool cool + cooled cool + cooling cool + cools cool + coop coop + coops coop + cop cop + copatain copatain + cope cope + cophetua cophetua + copied copi + copies copi + copious copiou + copper copper + copperspur copperspur + coppice coppic + copulation copul + copulatives copul + copy copi + cor cor + coragio coragio + coral coral + coram coram + corambus corambu + coranto coranto + corantos coranto + corbo corbo + cord cord + corded cord + cordelia cordelia + cordial cordial + cordis cordi + cords cord + core core + corin corin + corinth corinth + corinthian corinthian + coriolanus coriolanu + corioli corioli + cork cork + corky corki + cormorant cormor + corn corn + cornelia cornelia + cornelius corneliu + corner corner + corners corner + cornerstone cornerston + cornets cornet + cornish cornish + corns corn + cornuto cornuto + cornwall cornwal + corollary corollari + coronal coron + coronation coron + coronet coronet + coronets coronet + corporal corpor + corporals corpor + corporate corpor + corpse corps + corpulent corpul + correct correct + corrected correct + correcting correct + correction correct + correctioner correction + corrects correct + correspondence correspond + correspondent correspond + corresponding correspond + corresponsive correspons + corrigible corrig + corrival corriv + corrivals corriv + corroborate corrobor + corrosive corros + corrupt corrupt + corrupted corrupt + corrupter corrupt + corrupters corrupt + corruptible corrupt + corruptibly corrupt + corrupting corrupt + corruption corrupt + corruptly corruptli + corrupts corrupt + corse cors + corses cors + corslet corslet + cosmo cosmo + cost cost + costard costard + costermongers costermong + costlier costlier + costly costli + costs cost + cot cot + cote cote + coted cote + cotsall cotsal + cotsole cotsol + cotswold cotswold + cottage cottag + cottages cottag + cotus cotu + couch couch + couched couch + couching couch + couchings couch + coude coud + cough cough + coughing cough + could could + couldst couldst + coulter coulter + council council + councillor councillor + councils council + counsel counsel + counsell counsel + counsellor counsellor + counsellors counsellor + counselor counselor + counselors counselor + counsels counsel + count count + counted count + countenanc countenanc + countenance counten + countenances counten + counter counter + counterchange counterchang + countercheck countercheck + counterfeit counterfeit + counterfeited counterfeit + counterfeiting counterfeit + counterfeitly counterfeitli + counterfeits counterfeit + countermand countermand + countermands countermand + countermines countermin + counterpart counterpart + counterpoints counterpoint + counterpois counterpoi + counterpoise counterpois + counters counter + countervail countervail + countess countess + countesses countess + counties counti + counting count + countless countless + countries countri + countrv countrv + country countri + countryman countryman + countrymen countrymen + counts count + county counti + couper couper + couple coupl + coupled coupl + couplement couplement + couples coupl + couplet couplet + couplets couplet + cour cour + courage courag + courageous courag + courageously courag + courages courag + courier courier + couriers courier + couronne couronn + cours cour + course cours + coursed cours + courser courser + coursers courser + courses cours + coursing cours + court court + courted court + courteous courteou + courteously courteous + courtesan courtesan + courtesies courtesi + courtesy courtesi + courtezan courtezan + courtezans courtezan + courtier courtier + courtiers courtier + courtlike courtlik + courtly courtli + courtney courtnei + courts court + courtship courtship + cousin cousin + cousins cousin + couterfeit couterfeit + coutume coutum + covenant coven + covenants coven + covent covent + coventry coventri + cover cover + covered cover + covering cover + coverlet coverlet + covers cover + covert covert + covertly covertli + coverture covertur + covet covet + coveted covet + coveting covet + covetings covet + covetous covet + covetously covet + covetousness covet + covets covet + cow cow + coward coward + cowarded coward + cowardice cowardic + cowardly cowardli + cowards coward + cowardship cowardship + cowish cowish + cowl cowl + cowslip cowslip + cowslips cowslip + cox cox + coxcomb coxcomb + coxcombs coxcomb + coy coi + coystrill coystril + coz coz + cozen cozen + cozenage cozenag + cozened cozen + cozener cozen + cozeners cozen + cozening cozen + coziers cozier + crab crab + crabbed crab + crabs crab + crack crack + cracked crack + cracker cracker + crackers cracker + cracking crack + cracks crack + cradle cradl + cradled cradl + cradles cradl + craft craft + crafted craft + craftied crafti + craftier craftier + craftily craftili + crafts craft + craftsmen craftsmen + crafty crafti + cram cram + cramm cramm + cramp cramp + cramps cramp + crams cram + cranking crank + cranks crank + cranmer cranmer + crannied cranni + crannies cranni + cranny cranni + crants crant + crare crare + crash crash + crassus crassu + crav crav + crave crave + craved crave + craven craven + cravens craven + craves crave + craveth craveth + craving crave + crawl crawl + crawling crawl + crawls crawl + craz craz + crazed craze + crazy crazi + creaking creak + cream cream + create creat + created creat + creates creat + creating creat + creation creation + creator creator + creature creatur + creatures creatur + credence credenc + credent credent + credible credibl + credit credit + creditor creditor + creditors creditor + credo credo + credulity credul + credulous credul + creed creed + creek creek + creeks creek + creep creep + creeping creep + creeps creep + crept crept + crescent crescent + crescive cresciv + cressets cresset + cressid cressid + cressida cressida + cressids cressid + cressy cressi + crest crest + crested crest + crestfall crestfal + crestless crestless + crests crest + cretan cretan + crete crete + crevice crevic + crew crew + crews crew + crib crib + cribb cribb + cribs crib + cricket cricket + crickets cricket + cried cri + criedst criedst + crier crier + cries cri + criest criest + crieth crieth + crime crime + crimeful crime + crimeless crimeless + crimes crime + criminal crimin + crimson crimson + cringe cring + cripple crippl + crisp crisp + crisped crisp + crispian crispian + crispianus crispianu + crispin crispin + critic critic + critical critic + critics critic + croak croak + croaking croak + croaks croak + crocodile crocodil + cromer cromer + cromwell cromwel + crone crone + crook crook + crookback crookback + crooked crook + crooking crook + crop crop + cropp cropp + crosby crosbi + cross cross + crossed cross + crosses cross + crossest crossest + crossing cross + crossings cross + crossly crossli + crossness cross + crost crost + crotchets crotchet + crouch crouch + crouching crouch + crow crow + crowd crowd + crowded crowd + crowding crowd + crowds crowd + crowflowers crowflow + crowing crow + crowkeeper crowkeep + crown crown + crowned crown + crowner crowner + crownet crownet + crownets crownet + crowning crown + crowns crown + crows crow + crudy crudi + cruel cruel + cruell cruell + crueller crueller + cruelly cruelli + cruels cruel + cruelty cruelti + crum crum + crumble crumbl + crumbs crumb + crupper crupper + crusadoes crusado + crush crush + crushed crush + crushest crushest + crushing crush + crust crust + crusts crust + crusty crusti + crutch crutch + crutches crutch + cry cry + crying cry + crystal crystal + crystalline crystallin + crystals crystal + cub cub + cubbert cubbert + cubiculo cubiculo + cubit cubit + cubs cub + cuckold cuckold + cuckoldly cuckoldli + cuckolds cuckold + cuckoo cuckoo + cucullus cucullu + cudgel cudgel + cudgeled cudgel + cudgell cudgel + cudgelling cudgel + cudgels cudgel + cue cue + cues cue + cuff cuff + cuffs cuff + cuique cuiqu + cull cull + culling cull + cullion cullion + cullionly cullionli + cullions cullion + culpable culpabl + culverin culverin + cum cum + cumber cumber + cumberland cumberland + cunning cun + cunningly cunningli + cunnings cun + cuore cuor + cup cup + cupbearer cupbear + cupboarding cupboard + cupid cupid + cupids cupid + cuppele cuppel + cups cup + cur cur + curan curan + curate curat + curb curb + curbed curb + curbing curb + curbs curb + curd curd + curdied curdi + curds curd + cure cure + cured cure + cureless cureless + curer curer + cures cure + curfew curfew + curing cure + curio curio + curiosity curios + curious curiou + curiously curious + curl curl + curled curl + curling curl + curls curl + currance curranc + currants currant + current current + currents current + currish currish + curry curri + curs cur + curse curs + cursed curs + curses curs + cursies cursi + cursing curs + cursorary cursorari + curst curst + curster curster + curstest curstest + curstness curst + cursy cursi + curtail curtail + curtain curtain + curtains curtain + curtal curtal + curtis curti + curtle curtl + curtsied curtsi + curtsies curtsi + curtsy curtsi + curvet curvet + curvets curvet + cushes cush + cushion cushion + cushions cushion + custalorum custalorum + custard custard + custody custodi + custom custom + customary customari + customed custom + customer custom + customers custom + customs custom + custure custur + cut cut + cutler cutler + cutpurse cutpurs + cutpurses cutpurs + cuts cut + cutter cutter + cutting cut + cuttle cuttl + cxsar cxsar + cyclops cyclop + cydnus cydnu + cygnet cygnet + cygnets cygnet + cym cym + cymbals cymbal + cymbeline cymbelin + cyme cyme + cynic cynic + cynthia cynthia + cypress cypress + cypriot cypriot + cyprus cypru + cyrus cyru + cytherea cytherea + d d + dabbled dabbl + dace dace + dad dad + daedalus daedalu + daemon daemon + daff daff + daffed daf + daffest daffest + daffodils daffodil + dagger dagger + daggers dagger + dagonet dagonet + daily daili + daintier daintier + dainties dainti + daintiest daintiest + daintily daintili + daintiness dainti + daintry daintri + dainty dainti + daisied daisi + daisies daisi + daisy daisi + dale dale + dalliance dallianc + dallied dalli + dallies dalli + dally dalli + dallying dalli + dalmatians dalmatian + dam dam + damage damag + damascus damascu + damask damask + damasked damask + dame dame + dames dame + damm damm + damn damn + damnable damnabl + damnably damnabl + damnation damnat + damned damn + damns damn + damoiselle damoisel + damon damon + damosella damosella + damp damp + dams dam + damsel damsel + damsons damson + dan dan + danc danc + dance danc + dancer dancer + dances danc + dancing danc + dandle dandl + dandy dandi + dane dane + dang dang + danger danger + dangerous danger + dangerously danger + dangers danger + dangling dangl + daniel daniel + danish danish + dank dank + dankish dankish + danskers dansker + daphne daphn + dappled dappl + dapples dappl + dar dar + dardan dardan + dardanian dardanian + dardanius dardaniu + dare dare + dared dare + dareful dare + dares dare + darest darest + daring dare + darius dariu + dark dark + darken darken + darkening darken + darkens darken + darker darker + darkest darkest + darkling darkl + darkly darkli + darkness dark + darling darl + darlings darl + darnel darnel + darraign darraign + dart dart + darted dart + darter darter + dartford dartford + darting dart + darts dart + dash dash + dashes dash + dashing dash + dastard dastard + dastards dastard + dat dat + datchet datchet + date date + dated date + dateless dateless + dates date + daub daub + daughter daughter + daughters daughter + daunt daunt + daunted daunt + dauntless dauntless + dauphin dauphin + daventry daventri + davy davi + daw daw + dawn dawn + dawning dawn + daws daw + day dai + daylight daylight + days dai + dazzle dazzl + dazzled dazzl + dazzling dazzl + de de + dead dead + deadly deadli + deaf deaf + deafing deaf + deafness deaf + deafs deaf + deal deal + dealer dealer + dealers dealer + dealest dealest + dealing deal + dealings deal + deals deal + dealt dealt + dean dean + deanery deaneri + dear dear + dearer dearer + dearest dearest + dearly dearli + dearness dear + dears dear + dearth dearth + dearths dearth + death death + deathbed deathb + deathful death + deaths death + deathsman deathsman + deathsmen deathsmen + debarred debar + debase debas + debate debat + debated debat + debatement debat + debateth debateth + debating debat + debauch debauch + debile debil + debility debil + debitor debitor + debonair debonair + deborah deborah + debosh debosh + debt debt + debted debt + debtor debtor + debtors debtor + debts debt + debuty debuti + decay decai + decayed decai + decayer decay + decaying decai + decays decai + deceas decea + decease deceas + deceased deceas + deceit deceit + deceitful deceit + deceits deceit + deceiv deceiv + deceivable deceiv + deceive deceiv + deceived deceiv + deceiver deceiv + deceivers deceiv + deceives deceiv + deceivest deceivest + deceiveth deceiveth + deceiving deceiv + december decemb + decent decent + deceptious decepti + decerns decern + decide decid + decides decid + decimation decim + decipher deciph + deciphers deciph + decision decis + decius deciu + deck deck + decking deck + decks deck + deckt deckt + declare declar + declares declar + declension declens + declensions declens + declin declin + decline declin + declined declin + declines declin + declining declin + decoct decoct + decorum decorum + decreas decrea + decrease decreas + decreasing decreas + decree decre + decreed decre + decrees decre + decrepit decrepit + dedicate dedic + dedicated dedic + dedicates dedic + dedication dedic + deed deed + deedless deedless + deeds deed + deem deem + deemed deem + deep deep + deeper deeper + deepest deepest + deeply deepli + deeps deep + deepvow deepvow + deer deer + deesse deess + defac defac + deface defac + defaced defac + defacer defac + defacers defac + defacing defac + defam defam + default default + defeat defeat + defeated defeat + defeats defeat + defeatures defeatur + defect defect + defective defect + defects defect + defence defenc + defences defenc + defend defend + defendant defend + defended defend + defender defend + defenders defend + defending defend + defends defend + defense defens + defensible defens + defensive defens + defer defer + deferr deferr + defiance defianc + deficient defici + defied defi + defies defi + defil defil + defile defil + defiler defil + defiles defil + defiling defil + define defin + definement defin + definite definit + definitive definit + definitively definit + deflow deflow + deflower deflow + deflowered deflow + deform deform + deformed deform + deformities deform + deformity deform + deftly deftli + defunct defunct + defunction defunct + defuse defus + defy defi + defying defi + degenerate degener + degraded degrad + degree degre + degrees degre + deified deifi + deifying deifi + deign deign + deigned deign + deiphobus deiphobu + deities deiti + deity deiti + deja deja + deject deject + dejected deject + delabreth delabreth + delay delai + delayed delai + delaying delai + delays delai + delectable delect + deliberate deliber + delicate delic + delicates delic + delicious delici + deliciousness delici + delight delight + delighted delight + delightful delight + delights delight + delinquents delinqu + deliv deliv + deliver deliv + deliverance deliver + delivered deliv + delivering deliv + delivers deliv + delivery deliveri + delphos delpho + deluded delud + deluding delud + deluge delug + delve delv + delver delver + delves delv + demand demand + demanded demand + demanding demand + demands demand + demean demean + demeanor demeanor + demeanour demeanour + demerits demerit + demesnes demesn + demetrius demetriu + demi demi + demigod demigod + demise demis + demoiselles demoisel + demon demon + demonstrable demonstr + demonstrate demonstr + demonstrated demonstr + demonstrating demonstr + demonstration demonstr + demonstrative demonstr + demure demur + demurely demur + demuring demur + den den + denay denai + deni deni + denial denial + denials denial + denied deni + denier denier + denies deni + deniest deniest + denis deni + denmark denmark + dennis denni + denny denni + denote denot + denoted denot + denotement denot + denounc denounc + denounce denounc + denouncing denounc + dens den + denunciation denunci + deny deni + denying deni + deo deo + depart depart + departed depart + departest departest + departing depart + departure departur + depeche depech + depend depend + dependant depend + dependants depend + depended depend + dependence depend + dependences depend + dependency depend + dependent depend + dependents depend + depender depend + depending depend + depends depend + deplore deplor + deploring deplor + depopulate depopul + depos depo + depose depos + deposed depos + deposing depos + depositaries depositari + deprav deprav + depravation deprav + deprave deprav + depraved deprav + depraves deprav + depress depress + depriv depriv + deprive depriv + depth depth + depths depth + deputation deput + depute deput + deputed deput + deputies deputi + deputing deput + deputy deputi + deracinate deracin + derby derbi + dercetas derceta + dere dere + derides derid + derision deris + deriv deriv + derivation deriv + derivative deriv + derive deriv + derived deriv + derives deriv + derogate derog + derogately derog + derogation derog + des de + desartless desartless + descant descant + descend descend + descended descend + descending descend + descends descend + descension descens + descent descent + descents descent + describe describ + described describ + describes describ + descried descri + description descript + descriptions descript + descry descri + desdemon desdemon + desdemona desdemona + desert desert + deserts desert + deserv deserv + deserve deserv + deserved deserv + deservedly deservedli + deserver deserv + deservers deserv + deserves deserv + deservest deservest + deserving deserv + deservings deserv + design design + designment design + designments design + designs design + desir desir + desire desir + desired desir + desirers desir + desires desir + desirest desirest + desiring desir + desirous desir + desist desist + desk desk + desolate desol + desolation desol + desp desp + despair despair + despairing despair + despairs despair + despatch despatch + desperate desper + desperately desper + desperation desper + despis despi + despise despis + despised despis + despiser despis + despiseth despiseth + despising despis + despite despit + despiteful despit + despoiled despoil + dest dest + destin destin + destined destin + destinies destini + destiny destini + destitute destitut + destroy destroi + destroyed destroi + destroyer destroy + destroyers destroy + destroying destroi + destroys destroi + destruction destruct + destructions destruct + det det + detain detain + detains detain + detect detect + detected detect + detecting detect + detection detect + detector detector + detects detect + detention detent + determin determin + determinate determin + determination determin + determinations determin + determine determin + determined determin + determines determin + detest detest + detestable detest + detested detest + detesting detest + detests detest + detract detract + detraction detract + detractions detract + deucalion deucalion + deuce deuc + deum deum + deux deux + devant devant + devesting devest + device devic + devices devic + devil devil + devilish devilish + devils devil + devis devi + devise devis + devised devis + devises devis + devising devis + devoid devoid + devonshire devonshir + devote devot + devoted devot + devotion devot + devour devour + devoured devour + devourers devour + devouring devour + devours devour + devout devout + devoutly devoutli + dew dew + dewberries dewberri + dewdrops dewdrop + dewlap dewlap + dewlapp dewlapp + dews dew + dewy dewi + dexter dexter + dexteriously dexteri + dexterity dexter + di di + diable diabl + diablo diablo + diadem diadem + dial dial + dialect dialect + dialogue dialogu + dialogued dialogu + dials dial + diameter diamet + diamond diamond + diamonds diamond + dian dian + diana diana + diaper diaper + dibble dibbl + dic dic + dice dice + dicers dicer + dich dich + dick dick + dickens dicken + dickon dickon + dicky dicki + dictator dictat + diction diction + dictynna dictynna + did did + diddle diddl + didest didest + dido dido + didst didst + die die + died di + diedst diedst + dies di + diest diest + diet diet + dieted diet + dieter dieter + dieu dieu + diff diff + differ differ + difference differ + differences differ + differency differ + different differ + differing differ + differs differ + difficile difficil + difficult difficult + difficulties difficulti + difficulty difficulti + diffidence diffid + diffidences diffid + diffus diffu + diffused diffus + diffusest diffusest + dig dig + digest digest + digested digest + digestion digest + digestions digest + digg digg + digging dig + dighton dighton + dignified dignifi + dignifies dignifi + dignify dignifi + dignities digniti + dignity digniti + digress digress + digressing digress + digression digress + digs dig + digt digt + dilate dilat + dilated dilat + dilations dilat + dilatory dilatori + dild dild + dildos dildo + dilemma dilemma + dilemmas dilemma + diligence dilig + diligent dilig + diluculo diluculo + dim dim + dimension dimens + dimensions dimens + diminish diminish + diminishing diminish + diminution diminut + diminutive diminut + diminutives diminut + dimm dimm + dimmed dim + dimming dim + dimpled dimpl + dimples dimpl + dims dim + din din + dine dine + dined dine + diner diner + dines dine + ding ding + dining dine + dinner dinner + dinners dinner + dinnertime dinnertim + dint dint + diomed diom + diomede diomed + diomedes diomed + dion dion + dip dip + dipp dipp + dipping dip + dips dip + dir dir + dire dire + direct direct + directed direct + directing direct + direction direct + directions direct + directitude directitud + directive direct + directly directli + directs direct + direful dire + direness dire + direst direst + dirge dirg + dirges dirg + dirt dirt + dirty dirti + dis di + disability disabl + disable disabl + disabled disabl + disabling disabl + disadvantage disadvantag + disagree disagre + disallow disallow + disanimates disanim + disannul disannul + disannuls disannul + disappointed disappoint + disarm disarm + disarmed disarm + disarmeth disarmeth + disarms disarm + disaster disast + disasters disast + disastrous disastr + disbench disbench + disbranch disbranch + disburdened disburden + disburs disbur + disburse disburs + disbursed disburs + discandy discandi + discandying discandi + discard discard + discarded discard + discase discas + discased discas + discern discern + discerner discern + discerning discern + discernings discern + discerns discern + discharg discharg + discharge discharg + discharged discharg + discharging discharg + discipled discipl + disciples discipl + disciplin disciplin + discipline disciplin + disciplined disciplin + disciplines disciplin + disclaim disclaim + disclaiming disclaim + disclaims disclaim + disclos disclo + disclose disclos + disclosed disclos + discloses disclos + discolour discolour + discoloured discolour + discolours discolour + discomfit discomfit + discomfited discomfit + discomfiture discomfitur + discomfort discomfort + discomfortable discomfort + discommend discommend + disconsolate disconsol + discontent discont + discontented discont + discontentedly discontentedli + discontenting discont + discontents discont + discontinue discontinu + discontinued discontinu + discord discord + discordant discord + discords discord + discourse discours + discoursed discours + discourser discours + discourses discours + discoursive discours + discourtesy discourtesi + discov discov + discover discov + discovered discov + discoverers discover + discoveries discoveri + discovering discov + discovers discov + discovery discoveri + discredit discredit + discredited discredit + discredits discredit + discreet discreet + discreetly discreetli + discretion discret + discretions discret + discuss discuss + disdain disdain + disdained disdain + disdaineth disdaineth + disdainful disdain + disdainfully disdainfulli + disdaining disdain + disdains disdain + disdnguish disdnguish + diseas disea + disease diseas + diseased diseas + diseases diseas + disedg disedg + disembark disembark + disfigure disfigur + disfigured disfigur + disfurnish disfurnish + disgorge disgorg + disgrac disgrac + disgrace disgrac + disgraced disgrac + disgraceful disgrac + disgraces disgrac + disgracing disgrac + disgracious disgraci + disguis disgui + disguise disguis + disguised disguis + disguiser disguis + disguises disguis + disguising disguis + dish dish + dishabited dishabit + dishclout dishclout + dishearten dishearten + disheartens dishearten + dishes dish + dishonest dishonest + dishonestly dishonestli + dishonesty dishonesti + dishonor dishonor + dishonorable dishonor + dishonors dishonor + dishonour dishonour + dishonourable dishonour + dishonoured dishonour + dishonours dishonour + disinherit disinherit + disinherited disinherit + disjoin disjoin + disjoining disjoin + disjoins disjoin + disjoint disjoint + disjunction disjunct + dislik dislik + dislike dislik + disliken disliken + dislikes dislik + dislimns dislimn + dislocate disloc + dislodg dislodg + disloyal disloy + disloyalty disloyalti + dismal dismal + dismantle dismantl + dismantled dismantl + dismask dismask + dismay dismai + dismayed dismai + dismemb dismemb + dismember dismemb + dismes dism + dismiss dismiss + dismissed dismiss + dismissing dismiss + dismission dismiss + dismount dismount + dismounted dismount + disnatur disnatur + disobedience disobedi + disobedient disobedi + disobey disobei + disobeys disobei + disorb disorb + disorder disord + disordered disord + disorderly disorderli + disorders disord + disparage disparag + disparagement disparag + disparagements disparag + dispark dispark + dispatch dispatch + dispensation dispens + dispense dispens + dispenses dispens + dispers disper + disperse dispers + dispersed dispers + dispersedly dispersedli + dispersing dispers + dispiteous dispit + displac displac + displace displac + displaced displac + displant displant + displanting displant + display displai + displayed displai + displeas displea + displease displeas + displeased displeas + displeasing displeas + displeasure displeasur + displeasures displeasur + disponge dispong + disport disport + disports disport + dispos dispo + dispose dispos + disposed dispos + disposer dispos + disposing dispos + disposition disposit + dispositions disposit + dispossess dispossess + dispossessing dispossess + disprais disprai + dispraise disprais + dispraising disprais + dispraisingly dispraisingli + dispropertied disproperti + disproportion disproport + disproportioned disproport + disprov disprov + disprove disprov + disproved disprov + dispursed dispurs + disputable disput + disputation disput + disputations disput + dispute disput + disputed disput + disputes disput + disputing disput + disquantity disquant + disquiet disquiet + disquietly disquietli + disrelish disrelish + disrobe disrob + disseat disseat + dissemble dissembl + dissembled dissembl + dissembler dissembl + dissemblers dissembl + dissembling dissembl + dissembly dissembl + dissension dissens + dissensions dissens + dissentious dissenti + dissever dissev + dissipation dissip + dissolute dissolut + dissolutely dissolut + dissolution dissolut + dissolutions dissolut + dissolv dissolv + dissolve dissolv + dissolved dissolv + dissolves dissolv + dissuade dissuad + dissuaded dissuad + distaff distaff + distaffs distaff + distain distain + distains distain + distance distanc + distant distant + distaste distast + distasted distast + distasteful distast + distemp distemp + distemper distemp + distemperature distemperatur + distemperatures distemperatur + distempered distemp + distempering distemp + distil distil + distill distil + distillation distil + distilled distil + distills distil + distilment distil + distinct distinct + distinction distinct + distinctly distinctli + distingue distingu + distinguish distinguish + distinguishes distinguish + distinguishment distinguish + distract distract + distracted distract + distractedly distractedli + distraction distract + distractions distract + distracts distract + distrain distrain + distraught distraught + distress distress + distressed distress + distresses distress + distressful distress + distribute distribut + distributed distribut + distribution distribut + distrust distrust + distrustful distrust + disturb disturb + disturbed disturb + disturbers disturb + disturbing disturb + disunite disunit + disvalued disvalu + disvouch disvouch + dit dit + ditch ditch + ditchers ditcher + ditches ditch + dites dite + ditties ditti + ditty ditti + diurnal diurnal + div div + dive dive + diver diver + divers diver + diversely divers + diversity divers + divert divert + diverted divert + diverts divert + dives dive + divest divest + dividable divid + dividant divid + divide divid + divided divid + divides divid + divideth divideth + divin divin + divination divin + divine divin + divinely divin + divineness divin + diviner divin + divines divin + divinest divinest + divining divin + divinity divin + division divis + divisions divis + divorc divorc + divorce divorc + divorced divorc + divorcement divorc + divorcing divorc + divulg divulg + divulge divulg + divulged divulg + divulging divulg + dizy dizi + dizzy dizzi + do do + doating doat + dobbin dobbin + dock dock + docks dock + doct doct + doctor doctor + doctors doctor + doctrine doctrin + document document + dodge dodg + doe doe + doer doer + doers doer + does doe + doest doest + doff doff + dog dog + dogberry dogberri + dogfish dogfish + dogg dogg + dogged dog + dogs dog + doigts doigt + doing do + doings do + doit doit + doits doit + dolabella dolabella + dole dole + doleful dole + doll doll + dollar dollar + dollars dollar + dolor dolor + dolorous dolor + dolour dolour + dolours dolour + dolphin dolphin + dolt dolt + dolts dolt + domestic domest + domestics domest + dominance domin + dominations domin + dominator domin + domine domin + domineer domin + domineering domin + dominical domin + dominion dominion + dominions dominion + domitius domitiu + dommelton dommelton + don don + donalbain donalbain + donation donat + donc donc + doncaster doncast + done done + dong dong + donn donn + donne donn + donner donner + donnerai donnerai + doom doom + doomsday doomsdai + door door + doorkeeper doorkeep + doors door + dorcas dorca + doreus doreu + doricles doricl + dormouse dormous + dorothy dorothi + dorset dorset + dorsetshire dorsetshir + dost dost + dotage dotag + dotant dotant + dotard dotard + dotards dotard + dote dote + doted dote + doters doter + dotes dote + doteth doteth + doth doth + doting dote + double doubl + doubled doubl + doubleness doubl + doubler doubler + doublet doublet + doublets doublet + doubling doubl + doubly doubli + doubt doubt + doubted doubt + doubtful doubt + doubtfully doubtfulli + doubting doubt + doubtless doubtless + doubts doubt + doug doug + dough dough + doughty doughti + doughy doughi + douglas dougla + dout dout + doute dout + douts dout + dove dove + dovehouse dovehous + dover dover + doves dove + dow dow + dowager dowag + dowdy dowdi + dower dower + dowerless dowerless + dowers dower + dowlas dowla + dowle dowl + down down + downfall downfal + downright downright + downs down + downstairs downstair + downtrod downtrod + downward downward + downwards downward + downy downi + dowries dowri + dowry dowri + dowsabel dowsabel + doxy doxi + dozed doze + dozen dozen + dozens dozen + dozy dozi + drab drab + drabbing drab + drabs drab + drachma drachma + drachmas drachma + draff draff + drag drag + dragg dragg + dragged drag + dragging drag + dragon dragon + dragonish dragonish + dragons dragon + drain drain + drained drain + drains drain + drake drake + dram dram + dramatis dramati + drank drank + draught draught + draughts draught + drave drave + draw draw + drawbridge drawbridg + drawer drawer + drawers drawer + draweth draweth + drawing draw + drawling drawl + drawn drawn + draws draw + drayman drayman + draymen draymen + dread dread + dreaded dread + dreadful dread + dreadfully dreadfulli + dreading dread + dreads dread + dream dream + dreamer dreamer + dreamers dreamer + dreaming dream + dreams dream + dreamt dreamt + drearning drearn + dreary dreari + dreg dreg + dregs dreg + drench drench + drenched drench + dress dress + dressed dress + dresser dresser + dressing dress + dressings dress + drest drest + drew drew + dribbling dribbl + dried dri + drier drier + dries dri + drift drift + drily drili + drink drink + drinketh drinketh + drinking drink + drinkings drink + drinks drink + driv driv + drive drive + drivelling drivel + driven driven + drives drive + driveth driveth + driving drive + drizzle drizzl + drizzled drizzl + drizzles drizzl + droit droit + drollery drolleri + dromio dromio + dromios dromio + drone drone + drones drone + droop droop + droopeth droopeth + drooping droop + droops droop + drop drop + dropheir dropheir + droplets droplet + dropp dropp + dropper dropper + droppeth droppeth + dropping drop + droppings drop + drops drop + dropsied dropsi + dropsies dropsi + dropsy dropsi + dropt dropt + dross dross + drossy drossi + drought drought + drove drove + droven droven + drovier drovier + drown drown + drowned drown + drowning drown + drowns drown + drows drow + drowse drows + drowsily drowsili + drowsiness drowsi + drowsy drowsi + drudge drudg + drudgery drudgeri + drudges drudg + drug drug + drugg drugg + drugs drug + drum drum + drumble drumbl + drummer drummer + drumming drum + drums drum + drunk drunk + drunkard drunkard + drunkards drunkard + drunken drunken + drunkenly drunkenli + drunkenness drunken + dry dry + dryness dryness + dst dst + du du + dub dub + dubb dubb + ducat ducat + ducats ducat + ducdame ducdam + duchess duchess + duchies duchi + duchy duchi + duck duck + ducking duck + ducks duck + dudgeon dudgeon + due due + duellist duellist + duello duello + duer duer + dues due + duff duff + dug dug + dugs dug + duke duke + dukedom dukedom + dukedoms dukedom + dukes duke + dulcet dulcet + dulche dulch + dull dull + dullard dullard + duller duller + dullest dullest + dulling dull + dullness dull + dulls dull + dully dulli + dulness dul + duly duli + dumain dumain + dumb dumb + dumbe dumb + dumbly dumbl + dumbness dumb + dump dump + dumps dump + dun dun + duncan duncan + dung dung + dungeon dungeon + dungeons dungeon + dunghill dunghil + dunghills dunghil + dungy dungi + dunnest dunnest + dunsinane dunsinan + dunsmore dunsmor + dunstable dunstabl + dupp dupp + durance duranc + during dure + durst durst + dusky duski + dust dust + dusted dust + dusty dusti + dutch dutch + dutchman dutchman + duteous duteou + duties duti + dutiful duti + duty duti + dwarf dwarf + dwarfish dwarfish + dwell dwell + dwellers dweller + dwelling dwell + dwells dwell + dwelt dwelt + dwindle dwindl + dy dy + dye dye + dyed dy + dyer dyer + dying dy + e e + each each + eager eager + eagerly eagerli + eagerness eager + eagle eagl + eagles eagl + eaning ean + eanlings eanl + ear ear + earing ear + earl earl + earldom earldom + earlier earlier + earliest earliest + earliness earli + earls earl + early earli + earn earn + earned earn + earnest earnest + earnestly earnestli + earnestness earnest + earns earn + ears ear + earth earth + earthen earthen + earthlier earthlier + earthly earthli + earthquake earthquak + earthquakes earthquak + earthy earthi + eas ea + ease eas + eased eas + easeful eas + eases eas + easier easier + easiest easiest + easiliest easiliest + easily easili + easiness easi + easing eas + east east + eastcheap eastcheap + easter easter + eastern eastern + eastward eastward + easy easi + eat eat + eaten eaten + eater eater + eaters eater + eating eat + eats eat + eaux eaux + eaves eav + ebb ebb + ebbing eb + ebbs ebb + ebon ebon + ebony eboni + ebrew ebrew + ecce ecc + echapper echapp + echo echo + echoes echo + eclips eclip + eclipse eclips + eclipses eclips + ecolier ecoli + ecoutez ecoutez + ecstacy ecstaci + ecstasies ecstasi + ecstasy ecstasi + ecus ecu + eden eden + edg edg + edgar edgar + edge edg + edged edg + edgeless edgeless + edges edg + edict edict + edicts edict + edifice edific + edifices edific + edified edifi + edifies edifi + edition edit + edm edm + edmund edmund + edmunds edmund + edmundsbury edmundsburi + educate educ + educated educ + education educ + edward edward + eel eel + eels eel + effect effect + effected effect + effectless effectless + effects effect + effectual effectu + effectually effectu + effeminate effemin + effigies effigi + effus effu + effuse effus + effusion effus + eftest eftest + egal egal + egally egal + eget eget + egeus egeu + egg egg + eggs egg + eggshell eggshel + eglamour eglamour + eglantine eglantin + egma egma + ego ego + egregious egregi + egregiously egregi + egress egress + egypt egypt + egyptian egyptian + egyptians egyptian + eie eie + eight eight + eighteen eighteen + eighth eighth + eightpenny eightpenni + eighty eighti + eisel eisel + either either + eject eject + eke ek + el el + elbe elb + elbow elbow + elbows elbow + eld eld + elder elder + elders elder + eldest eldest + eleanor eleanor + elect elect + elected elect + election elect + elegancy eleg + elegies elegi + element element + elements element + elephant eleph + elephants eleph + elevated elev + eleven eleven + eleventh eleventh + elf elf + elflocks elflock + eliads eliad + elinor elinor + elizabeth elizabeth + ell ell + elle ell + ellen ellen + elm elm + eloquence eloqu + eloquent eloqu + else els + elsewhere elsewher + elsinore elsinor + eltham eltham + elves elv + elvish elvish + ely eli + elysium elysium + em em + emballing embal + embalm embalm + embalms embalm + embark embark + embarked embark + embarquements embarqu + embassade embassad + embassage embassag + embassies embassi + embassy embassi + embattailed embattail + embattl embattl + embattle embattl + embay embai + embellished embellish + embers ember + emblaze emblaz + emblem emblem + emblems emblem + embodied embodi + embold embold + emboldens embolden + emboss emboss + embossed emboss + embounded embound + embowel embowel + embowell embowel + embrac embrac + embrace embrac + embraced embrac + embracement embrac + embracements embrac + embraces embrac + embracing embrac + embrasures embrasur + embroider embroid + embroidery embroideri + emhracing emhrac + emilia emilia + eminence emin + eminent emin + eminently emin + emmanuel emmanuel + emnity emniti + empale empal + emperal emper + emperess emperess + emperial emperi + emperor emperor + empery emperi + emphasis emphasi + empire empir + empirics empir + empiricutic empiricut + empleached empleach + employ emploi + employed emploi + employer employ + employment employ + employments employ + empoison empoison + empress empress + emptied empti + emptier emptier + empties empti + emptiness empti + empty empti + emptying empti + emulate emul + emulation emul + emulations emul + emulator emul + emulous emul + en en + enact enact + enacted enact + enacts enact + enactures enactur + enamell enamel + enamelled enamel + enamour enamour + enamoured enamour + enanmour enanmour + encamp encamp + encamped encamp + encave encav + enceladus enceladu + enchaf enchaf + enchafed enchaf + enchant enchant + enchanted enchant + enchanting enchant + enchantingly enchantingli + enchantment enchant + enchantress enchantress + enchants enchant + enchas encha + encircle encircl + encircled encircl + enclos enclo + enclose enclos + enclosed enclos + encloses enclos + encloseth encloseth + enclosing enclos + enclouded encloud + encompass encompass + encompassed encompass + encompasseth encompasseth + encompassment encompass + encore encor + encorporal encorpor + encount encount + encounter encount + encountered encount + encounters encount + encourage encourag + encouraged encourag + encouragement encourag + encrimsoned encrimson + encroaching encroach + encumb encumb + end end + endamage endamag + endamagement endamag + endanger endang + endart endart + endear endear + endeared endear + endeavour endeavour + endeavours endeavour + ended end + ender ender + ending end + endings end + endite endit + endless endless + endow endow + endowed endow + endowments endow + endows endow + ends end + endu endu + endue endu + endur endur + endurance endur + endure endur + endured endur + endures endur + enduring endur + endymion endymion + eneas enea + enemies enemi + enemy enemi + enernies enerni + enew enew + enfeebled enfeebl + enfeebles enfeebl + enfeoff enfeoff + enfetter enfett + enfoldings enfold + enforc enforc + enforce enforc + enforced enforc + enforcedly enforcedli + enforcement enforc + enforces enforc + enforcest enforcest + enfranched enfranch + enfranchis enfranchi + enfranchise enfranchis + enfranchised enfranchis + enfranchisement enfranchis + enfreed enfre + enfreedoming enfreedom + engag engag + engage engag + engaged engag + engagements engag + engaging engag + engaol engaol + engend engend + engender engend + engenders engend + engilds engild + engine engin + engineer engin + enginer engin + engines engin + engirt engirt + england england + english english + englishman englishman + englishmen englishmen + engluts englut + englutted englut + engraffed engraf + engraft engraft + engrafted engraft + engrav engrav + engrave engrav + engross engross + engrossed engross + engrossest engrossest + engrossing engross + engrossments engross + enguard enguard + enigma enigma + enigmatical enigmat + enjoin enjoin + enjoined enjoin + enjoy enjoi + enjoyed enjoi + enjoyer enjoy + enjoying enjoi + enjoys enjoi + enkindle enkindl + enkindled enkindl + enlard enlard + enlarg enlarg + enlarge enlarg + enlarged enlarg + enlargement enlarg + enlargeth enlargeth + enlighten enlighten + enlink enlink + enmesh enmesh + enmities enmiti + enmity enmiti + ennoble ennobl + ennobled ennobl + enobarb enobarb + enobarbus enobarbu + enon enon + enormity enorm + enormous enorm + enough enough + enow enow + enpatron enpatron + enpierced enpierc + enquir enquir + enquire enquir + enquired enquir + enrag enrag + enrage enrag + enraged enrag + enrages enrag + enrank enrank + enrapt enrapt + enrich enrich + enriched enrich + enriches enrich + enridged enridg + enrings enr + enrob enrob + enrobe enrob + enroll enrol + enrolled enrol + enrooted enroot + enrounded enround + enschedul enschedul + ensconce ensconc + ensconcing ensconc + enseamed enseam + ensear ensear + enseigne enseign + enseignez enseignez + ensemble ensembl + enshelter enshelt + enshielded enshield + enshrines enshrin + ensign ensign + ensigns ensign + enskied enski + ensman ensman + ensnare ensnar + ensnared ensnar + ensnareth ensnareth + ensteep ensteep + ensu ensu + ensue ensu + ensued ensu + ensues ensu + ensuing ensu + enswathed enswath + ent ent + entail entail + entame entam + entangled entangl + entangles entangl + entendre entendr + enter enter + entered enter + entering enter + enterprise enterpris + enterprises enterpris + enters enter + entertain entertain + entertained entertain + entertainer entertain + entertaining entertain + entertainment entertain + entertainments entertain + enthrall enthral + enthralled enthral + enthron enthron + enthroned enthron + entice entic + enticements entic + enticing entic + entire entir + entirely entir + entitle entitl + entitled entitl + entitling entitl + entomb entomb + entombed entomb + entrails entrail + entrance entranc + entrances entranc + entrap entrap + entrapp entrapp + entre entr + entreat entreat + entreated entreat + entreaties entreati + entreating entreat + entreatments entreat + entreats entreat + entreaty entreati + entrench entrench + entry entri + entwist entwist + envelop envelop + envenom envenom + envenomed envenom + envenoms envenom + envied envi + envies envi + envious enviou + enviously envious + environ environ + environed environ + envoy envoi + envy envi + envying envi + enwheel enwheel + enwombed enwomb + enwraps enwrap + ephesian ephesian + ephesians ephesian + ephesus ephesu + epicure epicur + epicurean epicurean + epicures epicur + epicurism epicur + epicurus epicuru + epidamnum epidamnum + epidaurus epidauru + epigram epigram + epilepsy epilepsi + epileptic epilept + epilogue epilogu + epilogues epilogu + epistles epistl + epistrophus epistrophu + epitaph epitaph + epitaphs epitaph + epithet epithet + epitheton epitheton + epithets epithet + epitome epitom + equal equal + equalities equal + equality equal + equall equal + equally equal + equalness equal + equals equal + equinoctial equinocti + equinox equinox + equipage equipag + equity equiti + equivocal equivoc + equivocate equivoc + equivocates equivoc + equivocation equivoc + equivocator equivoc + er er + erbear erbear + erbearing erbear + erbears erbear + erbeat erbeat + erblows erblow + erboard erboard + erborne erborn + ercame ercam + ercast ercast + ercharg ercharg + ercharged ercharg + ercharging ercharg + ercles ercl + ercome ercom + ercover ercov + ercrows ercrow + erdoing erdo + ere er + erebus erebu + erect erect + erected erect + erecting erect + erection erect + erects erect + erewhile erewhil + erflourish erflourish + erflow erflow + erflowing erflow + erflows erflow + erfraught erfraught + erga erga + ergalled ergal + erglanced erglanc + ergo ergo + ergone ergon + ergrow ergrow + ergrown ergrown + ergrowth ergrowth + erhang erhang + erhanging erhang + erhasty erhasti + erhear erhear + erheard erheard + eringoes eringo + erjoy erjoi + erleap erleap + erleaps erleap + erleavens erleaven + erlook erlook + erlooking erlook + ermaster ermast + ermengare ermengar + ermount ermount + ern ern + ernight ernight + eros ero + erpaid erpaid + erparted erpart + erpast erpast + erpays erpai + erpeer erpeer + erperch erperch + erpicturing erpictur + erpingham erpingham + erposting erpost + erpow erpow + erpress erpress + erpressed erpress + err err + errand errand + errands errand + errant errant + errate errat + erraught erraught + erreaches erreach + erred er + errest errest + erring er + erroneous erron + error error + errors error + errs err + errule errul + errun errun + erset erset + ershade ershad + ershades ershad + ershine ershin + ershot ershot + ersized ersiz + erskip erskip + erslips erslip + erspreads erspread + erst erst + erstare erstar + erstep erstep + erstunk erstunk + ersway erswai + ersways erswai + erswell erswel + erta erta + ertake ertak + erteemed erteem + erthrow erthrow + erthrown erthrown + erthrows erthrow + ertook ertook + ertop ertop + ertopping ertop + ertrip ertrip + erturn erturn + erudition erudit + eruption erupt + eruptions erupt + ervalues ervalu + erwalk erwalk + erwatch erwatch + erween erween + erweens erween + erweigh erweigh + erweighs erweigh + erwhelm erwhelm + erwhelmed erwhelm + erworn erworn + es es + escalus escalu + escap escap + escape escap + escaped escap + escapes escap + eschew eschew + escoted escot + esill esil + especial especi + especially especi + esperance esper + espials espial + espied espi + espies espi + espous espou + espouse espous + espy espi + esquire esquir + esquires esquir + essay essai + essays essai + essence essenc + essential essenti + essentially essenti + esses ess + essex essex + est est + establish establish + established establish + estate estat + estates estat + esteem esteem + esteemed esteem + esteemeth esteemeth + esteeming esteem + esteems esteem + estimable estim + estimate estim + estimation estim + estimations estim + estime estim + estranged estrang + estridge estridg + estridges estridg + et et + etc etc + etceteras etcetera + ete et + eternal etern + eternally etern + eterne etern + eternity etern + eterniz eterniz + etes et + ethiop ethiop + ethiope ethiop + ethiopes ethiop + ethiopian ethiopian + etna etna + eton eton + etre etr + eunuch eunuch + eunuchs eunuch + euphrates euphrat + euphronius euphroniu + euriphile euriphil + europa europa + europe europ + ev ev + evade evad + evades evad + evans evan + evasion evas + evasions evas + eve ev + even even + evening even + evenly evenli + event event + eventful event + events event + ever ever + everlasting everlast + everlastingly everlastingli + evermore evermor + every everi + everyone everyon + everything everyth + everywhere everywher + evidence evid + evidences evid + evident evid + evil evil + evilly evilli + evils evil + evitate evit + ewe ew + ewer ewer + ewers ewer + ewes ew + exact exact + exacted exact + exactest exactest + exacting exact + exaction exact + exactions exact + exactly exactli + exacts exact + exalt exalt + exalted exalt + examin examin + examination examin + examinations examin + examine examin + examined examin + examines examin + exampl exampl + example exampl + exampled exampl + examples exampl + exasperate exasper + exasperates exasper + exceed exce + exceeded exceed + exceedeth exceedeth + exceeding exceed + exceedingly exceedingli + exceeds exce + excel excel + excelled excel + excellence excel + excellencies excel + excellency excel + excellent excel + excellently excel + excelling excel + excels excel + except except + excepted except + excepting except + exception except + exceptions except + exceptless exceptless + excess excess + excessive excess + exchang exchang + exchange exchang + exchanged exchang + exchequer exchequ + exchequers exchequ + excite excit + excited excit + excitements excit + excites excit + exclaim exclaim + exclaims exclaim + exclamation exclam + exclamations exclam + excludes exclud + excommunicate excommun + excommunication excommun + excrement excrement + excrements excrement + excursion excurs + excursions excurs + excus excu + excusable excus + excuse excus + excused excus + excuses excus + excusez excusez + excusing excus + execrable execr + execrations execr + execute execut + executed execut + executing execut + execution execut + executioner execution + executioners execution + executor executor + executors executor + exempt exempt + exempted exempt + exequies exequi + exercise exercis + exercises exercis + exeter exet + exeunt exeunt + exhal exhal + exhalation exhal + exhalations exhal + exhale exhal + exhales exhal + exhaust exhaust + exhibit exhibit + exhibiters exhibit + exhibition exhibit + exhort exhort + exhortation exhort + exigent exig + exil exil + exile exil + exiled exil + exion exion + exist exist + exists exist + exit exit + exits exit + exorciser exorcis + exorcisms exorc + exorcist exorcist + expect expect + expectance expect + expectancy expect + expectation expect + expectations expect + expected expect + expecters expect + expecting expect + expects expect + expedience expedi + expedient expedi + expediently expedi + expedition expedit + expeditious expediti + expel expel + expell expel + expelling expel + expels expel + expend expend + expense expens + expenses expens + experienc experienc + experience experi + experiences experi + experiment experi + experimental experiment + experiments experi + expert expert + expertness expert + expiate expiat + expiation expiat + expir expir + expiration expir + expire expir + expired expir + expires expir + expiring expir + explication explic + exploit exploit + exploits exploit + expos expo + expose expos + exposing expos + exposition exposit + expositor expositor + expostulate expostul + expostulation expostul + exposture expostur + exposure exposur + expound expound + expounded expound + express express + expressed express + expresseth expresseth + expressing express + expressive express + expressly expressli + expressure expressur + expuls expul + expulsion expuls + exquisite exquisit + exsufflicate exsuffl + extant extant + extemporal extempor + extemporally extempor + extempore extempor + extend extend + extended extend + extends extend + extent extent + extenuate extenu + extenuated extenu + extenuates extenu + extenuation extenu + exterior exterior + exteriorly exteriorli + exteriors exterior + extermin extermin + extern extern + external extern + extinct extinct + extincted extinct + extincture extinctur + extinguish extinguish + extirp extirp + extirpate extirp + extirped extirp + extol extol + extoll extol + extolment extol + exton exton + extort extort + extorted extort + extortion extort + extortions extort + extra extra + extract extract + extracted extract + extracting extract + extraordinarily extraordinarili + extraordinary extraordinari + extraught extraught + extravagancy extravag + extravagant extravag + extreme extrem + extremely extrem + extremes extrem + extremest extremest + extremities extrem + extremity extrem + exuent exuent + exult exult + exultation exult + ey ey + eyas eya + eyases eyas + eye ey + eyeball eyebal + eyeballs eyebal + eyebrow eyebrow + eyebrows eyebrow + eyed ei + eyeless eyeless + eyelid eyelid + eyelids eyelid + eyes ey + eyesight eyesight + eyestrings eyestr + eying ei + eyne eyn + eyrie eyri + fa fa + fabian fabian + fable fabl + fables fabl + fabric fabric + fabulous fabul + fac fac + face face + faced face + facere facer + faces face + faciant faciant + facile facil + facility facil + facinerious facineri + facing face + facit facit + fact fact + faction faction + factionary factionari + factions faction + factious factiou + factor factor + factors factor + faculties faculti + faculty faculti + fade fade + faded fade + fadeth fadeth + fadge fadg + fading fade + fadings fade + fadom fadom + fadoms fadom + fagot fagot + fagots fagot + fail fail + failing fail + fails fail + fain fain + faint faint + fainted faint + fainter fainter + fainting faint + faintly faintli + faintness faint + faints faint + fair fair + fairer fairer + fairest fairest + fairies fairi + fairing fair + fairings fair + fairly fairli + fairness fair + fairs fair + fairwell fairwel + fairy fairi + fais fai + fait fait + faites fait + faith faith + faithful faith + faithfull faithful + faithfully faithfulli + faithless faithless + faiths faith + faitors faitor + fal fal + falchion falchion + falcon falcon + falconbridge falconbridg + falconer falcon + falconers falcon + fall fall + fallacy fallaci + fallen fallen + falleth falleth + falliable falliabl + fallible fallibl + falling fall + fallow fallow + fallows fallow + falls fall + fally falli + falorous falor + false fals + falsehood falsehood + falsely fals + falseness fals + falser falser + falsify falsifi + falsing fals + falstaff falstaff + falstaffs falstaff + falter falter + fam fam + fame fame + famed fame + familiar familiar + familiarity familiar + familiarly familiarli + familiars familiar + family famili + famine famin + famish famish + famished famish + famous famou + famoused famous + famously famous + fan fan + fanatical fanat + fancies fanci + fancy fanci + fane fane + fanes fane + fang fang + fangled fangl + fangless fangless + fangs fang + fann fann + fanning fan + fans fan + fantasied fantasi + fantasies fantasi + fantastic fantast + fantastical fantast + fantastically fantast + fantasticoes fantastico + fantasy fantasi + fap fap + far far + farborough farborough + farced farc + fardel fardel + fardels fardel + fare fare + fares fare + farewell farewel + farewells farewel + fariner farin + faring fare + farm farm + farmer farmer + farmhouse farmhous + farms farm + farre farr + farrow farrow + farther farther + farthest farthest + farthing farth + farthingale farthingal + farthingales farthingal + farthings farth + fartuous fartuou + fas fa + fashion fashion + fashionable fashion + fashioning fashion + fashions fashion + fast fast + fasted fast + fasten fasten + fastened fasten + faster faster + fastest fastest + fasting fast + fastly fastli + fastolfe fastolf + fasts fast + fat fat + fatal fatal + fatally fatal + fate fate + fated fate + fates fate + father father + fathered father + fatherless fatherless + fatherly fatherli + fathers father + fathom fathom + fathomless fathomless + fathoms fathom + fatigate fatig + fatness fat + fats fat + fatted fat + fatter fatter + fattest fattest + fatting fat + fatuus fatuu + fauconbridge fauconbridg + faulconbridge faulconbridg + fault fault + faultiness faulti + faultless faultless + faults fault + faulty faulti + fausse fauss + fauste faust + faustuses faustus + faut faut + favor favor + favorable favor + favorably favor + favors favor + favour favour + favourable favour + favoured favour + favouredly favouredli + favourer favour + favourers favour + favouring favour + favourite favourit + favourites favourit + favours favour + favout favout + fawn fawn + fawneth fawneth + fawning fawn + fawns fawn + fay fai + fe fe + fealty fealti + fear fear + feared fear + fearest fearest + fearful fear + fearfull fearful + fearfully fearfulli + fearfulness fear + fearing fear + fearless fearless + fears fear + feast feast + feasted feast + feasting feast + feasts feast + feat feat + feated feat + feater feater + feather feather + feathered feather + feathers feather + featly featli + feats feat + featur featur + feature featur + featured featur + featureless featureless + features featur + february februari + fecks feck + fed fed + fedary fedari + federary federari + fee fee + feeble feebl + feebled feebl + feebleness feebl + feebling feebl + feebly feebli + feed feed + feeder feeder + feeders feeder + feedeth feedeth + feeding feed + feeds feed + feel feel + feeler feeler + feeling feel + feelingly feelingli + feels feel + fees fee + feet feet + fehemently fehement + feign feign + feigned feign + feigning feign + feil feil + feith feith + felicitate felicit + felicity felic + fell fell + fellest fellest + fellies felli + fellow fellow + fellowly fellowli + fellows fellow + fellowship fellowship + fellowships fellowship + fells fell + felon felon + felonious feloni + felony feloni + felt felt + female femal + females femal + feminine feminin + fen fen + fenc fenc + fence fenc + fencer fencer + fencing fenc + fends fend + fennel fennel + fenny fenni + fens fen + fenton fenton + fer fer + ferdinand ferdinand + fere fere + fernseed fernse + ferrara ferrara + ferrers ferrer + ferret ferret + ferry ferri + ferryman ferryman + fertile fertil + fertility fertil + fervency fervenc + fervour fervour + fery feri + fest fest + feste fest + fester fester + festinate festin + festinately festin + festival festiv + festivals festiv + fet fet + fetch fetch + fetches fetch + fetching fetch + fetlock fetlock + fetlocks fetlock + fett fett + fetter fetter + fettering fetter + fetters fetter + fettle fettl + feu feu + feud feud + fever fever + feverous fever + fevers fever + few few + fewer fewer + fewest fewest + fewness few + fickle fickl + fickleness fickl + fico fico + fiction fiction + fiddle fiddl + fiddler fiddler + fiddlestick fiddlestick + fidele fidel + fidelicet fidelicet + fidelity fidel + fidius fidiu + fie fie + field field + fielded field + fields field + fiend fiend + fiends fiend + fierce fierc + fiercely fierc + fierceness fierc + fiery fieri + fife fife + fifes fife + fifteen fifteen + fifteens fifteen + fifteenth fifteenth + fifth fifth + fifty fifti + fiftyfold fiftyfold + fig fig + fight fight + fighter fighter + fightest fightest + fighteth fighteth + fighting fight + fights fight + figo figo + figs fig + figur figur + figure figur + figured figur + figures figur + figuring figur + fike fike + fil fil + filberts filbert + filch filch + filches filch + filching filch + file file + filed file + files file + filial filial + filius filiu + fill fill + filled fill + fillet fillet + filling fill + fillip fillip + fills fill + filly filli + film film + fils fil + filth filth + filths filth + filthy filthi + fin fin + finally final + finch finch + find find + finder finder + findeth findeth + finding find + findings find + finds find + fine fine + fineless fineless + finely fine + finem finem + fineness fine + finer finer + fines fine + finest finest + fing fing + finger finger + fingering finger + fingers finger + fingre fingr + fingres fingr + finical finic + finish finish + finished finish + finisher finish + finless finless + finn finn + fins fin + finsbury finsburi + fir fir + firago firago + fire fire + firebrand firebrand + firebrands firebrand + fired fire + fires fire + firework firework + fireworks firework + firing fire + firk firk + firm firm + firmament firmament + firmly firmli + firmness firm + first first + firstlings firstl + fish fish + fisher fisher + fishermen fishermen + fishers fisher + fishes fish + fishified fishifi + fishmonger fishmong + fishpond fishpond + fisnomy fisnomi + fist fist + fisting fist + fists fist + fistula fistula + fit fit + fitchew fitchew + fitful fit + fitly fitli + fitment fitment + fitness fit + fits fit + fitted fit + fitter fitter + fittest fittest + fitteth fitteth + fitting fit + fitzwater fitzwat + five five + fivepence fivep + fives five + fix fix + fixed fix + fixes fix + fixeth fixeth + fixing fix + fixture fixtur + fl fl + flag flag + flagging flag + flagon flagon + flagons flagon + flags flag + flail flail + flakes flake + flaky flaki + flam flam + flame flame + flamen flamen + flamens flamen + flames flame + flaming flame + flaminius flaminiu + flanders flander + flannel flannel + flap flap + flaring flare + flash flash + flashes flash + flashing flash + flask flask + flat flat + flatly flatli + flatness flat + flats flat + flatt flatt + flatter flatter + flattered flatter + flatterer flatter + flatterers flatter + flatterest flatterest + flatteries flatteri + flattering flatter + flatters flatter + flattery flatteri + flaunts flaunt + flavio flavio + flavius flaviu + flaw flaw + flaws flaw + flax flax + flaxen flaxen + flay flai + flaying flai + flea flea + fleance fleanc + fleas flea + flecked fleck + fled fled + fledge fledg + flee flee + fleec fleec + fleece fleec + fleeces fleec + fleer fleer + fleering fleer + fleers fleer + fleet fleet + fleeter fleeter + fleeting fleet + fleming fleme + flemish flemish + flesh flesh + fleshes flesh + fleshly fleshli + fleshment fleshment + fleshmonger fleshmong + flew flew + flexible flexibl + flexure flexur + flibbertigibbet flibbertigibbet + flickering flicker + flidge flidg + fliers flier + flies fli + flieth flieth + flight flight + flights flight + flighty flighti + flinch flinch + fling fling + flint flint + flints flint + flinty flinti + flirt flirt + float float + floated float + floating float + flock flock + flocks flock + flood flood + floodgates floodgat + floods flood + floor floor + flora flora + florence florenc + florentine florentin + florentines florentin + florentius florentiu + florizel florizel + flote flote + floulish floulish + flour flour + flourish flourish + flourishes flourish + flourisheth flourisheth + flourishing flourish + flout flout + flouted flout + flouting flout + flouts flout + flow flow + flowed flow + flower flower + flowerets floweret + flowers flower + flowing flow + flown flown + flows flow + fluellen fluellen + fluent fluent + flung flung + flush flush + flushing flush + fluster fluster + flute flute + flutes flute + flutter flutter + flux flux + fluxive fluxiv + fly fly + flying fly + fo fo + foal foal + foals foal + foam foam + foamed foam + foaming foam + foams foam + foamy foami + fob fob + focative foc + fodder fodder + foe foe + foeman foeman + foemen foemen + foes foe + fog fog + foggy foggi + fogs fog + foh foh + foi foi + foil foil + foiled foil + foils foil + foin foin + foining foin + foins foin + fois foi + foison foison + foisons foison + foist foist + foix foix + fold fold + folded fold + folds fold + folio folio + folk folk + folks folk + follies folli + follow follow + followed follow + follower follow + followers follow + followest followest + following follow + follows follow + folly folli + fond fond + fonder fonder + fondly fondli + fondness fond + font font + fontibell fontibel + food food + fool fool + fooleries fooleri + foolery fooleri + foolhardy foolhardi + fooling fool + foolish foolish + foolishly foolishli + foolishness foolish + fools fool + foot foot + football footbal + footboy footboi + footboys footboi + footed foot + footfall footfal + footing foot + footman footman + footmen footmen + footpath footpath + footsteps footstep + footstool footstool + fopp fopp + fopped fop + foppery fopperi + foppish foppish + fops fop + for for + forage forag + foragers forag + forbade forbad + forbear forbear + forbearance forbear + forbears forbear + forbid forbid + forbidden forbidden + forbiddenly forbiddenli + forbids forbid + forbod forbod + forborne forborn + forc forc + force forc + forced forc + forceful forc + forceless forceless + forces forc + forcible forcibl + forcibly forcibl + forcing forc + ford ford + fordid fordid + fordo fordo + fordoes fordo + fordone fordon + fore fore + forecast forecast + forefather forefath + forefathers forefath + forefinger forefing + forego forego + foregone foregon + forehand forehand + forehead forehead + foreheads forehead + forehorse forehors + foreign foreign + foreigner foreign + foreigners foreign + foreknowing foreknow + foreknowledge foreknowledg + foremost foremost + forenamed forenam + forenoon forenoon + forerun forerun + forerunner forerunn + forerunning forerun + foreruns forerun + foresaid foresaid + foresaw foresaw + foresay foresai + foresee forese + foreseeing forese + foresees forese + foreshow foreshow + foreskirt foreskirt + forespent foresp + forest forest + forestall forestal + forestalled forestal + forester forest + foresters forest + forests forest + foretell foretel + foretelling foretel + foretells foretel + forethink forethink + forethought forethought + foretold foretold + forever forev + foreward foreward + forewarn forewarn + forewarned forewarn + forewarning forewarn + forfeit forfeit + forfeited forfeit + forfeiters forfeit + forfeiting forfeit + forfeits forfeit + forfeiture forfeitur + forfeitures forfeitur + forfend forfend + forfended forfend + forg forg + forgave forgav + forge forg + forged forg + forgeries forgeri + forgery forgeri + forges forg + forget forget + forgetful forget + forgetfulness forget + forgetive forget + forgets forget + forgetting forget + forgive forgiv + forgiven forgiven + forgiveness forgiv + forgo forgo + forgoing forgo + forgone forgon + forgot forgot + forgotten forgotten + fork fork + forked fork + forks fork + forlorn forlorn + form form + formal formal + formally formal + formed form + former former + formerly formerli + formless formless + forms form + fornication fornic + fornications fornic + fornicatress fornicatress + forres forr + forrest forrest + forsake forsak + forsaken forsaken + forsaketh forsaketh + forslow forslow + forsook forsook + forsooth forsooth + forspent forspent + forspoke forspok + forswear forswear + forswearing forswear + forswore forswor + forsworn forsworn + fort fort + forted fort + forth forth + forthcoming forthcom + forthlight forthlight + forthright forthright + forthwith forthwith + fortification fortif + fortifications fortif + fortified fortifi + fortifies fortifi + fortify fortifi + fortinbras fortinbra + fortitude fortitud + fortnight fortnight + fortress fortress + fortresses fortress + forts fort + fortun fortun + fortuna fortuna + fortunate fortun + fortunately fortun + fortune fortun + fortuned fortun + fortunes fortun + fortward fortward + forty forti + forum forum + forward forward + forwarding forward + forwardness forward + forwards forward + forwearied forweari + fosset fosset + fost fost + foster foster + fostered foster + fought fought + foughten foughten + foul foul + fouler fouler + foulest foulest + foully foulli + foulness foul + found found + foundation foundat + foundations foundat + founded found + founder founder + fount fount + fountain fountain + fountains fountain + founts fount + four four + fourscore fourscor + fourteen fourteen + fourth fourth + foutra foutra + fowl fowl + fowler fowler + fowling fowl + fowls fowl + fox fox + foxes fox + foxship foxship + fracted fract + fraction fraction + fractions fraction + fragile fragil + fragment fragment + fragments fragment + fragrant fragrant + frail frail + frailer frailer + frailties frailti + frailty frailti + fram fram + frame frame + framed frame + frames frame + frampold frampold + fran fran + francais francai + france franc + frances franc + franchise franchis + franchised franchis + franchisement franchis + franchises franchis + franciae francia + francis franci + francisca francisca + franciscan franciscan + francisco francisco + frank frank + franker franker + frankfort frankfort + franklin franklin + franklins franklin + frankly frankli + frankness frank + frantic frantic + franticly franticli + frateretto frateretto + fratrum fratrum + fraud fraud + fraudful fraud + fraught fraught + fraughtage fraughtag + fraughting fraught + fray frai + frays frai + freckl freckl + freckled freckl + freckles freckl + frederick frederick + free free + freed freed + freedom freedom + freedoms freedom + freehearted freeheart + freelier freelier + freely freeli + freeman freeman + freemen freemen + freeness freeness + freer freer + frees free + freestone freeston + freetown freetown + freeze freez + freezes freez + freezing freez + freezings freez + french french + frenchman frenchman + frenchmen frenchmen + frenchwoman frenchwoman + frenzy frenzi + frequent frequent + frequents frequent + fresh fresh + fresher fresher + freshes fresh + freshest freshest + freshly freshli + freshness fresh + fret fret + fretful fret + frets fret + fretted fret + fretten fretten + fretting fret + friar friar + friars friar + friday fridai + fridays fridai + friend friend + friended friend + friending friend + friendless friendless + friendliness friendli + friendly friendli + friends friend + friendship friendship + friendships friendship + frieze friez + fright fright + frighted fright + frightened frighten + frightful fright + frighting fright + frights fright + fringe fring + fringed fring + frippery fripperi + frisk frisk + fritters fritter + frivolous frivol + fro fro + frock frock + frog frog + frogmore frogmor + froissart froissart + frolic frolic + from from + front front + fronted front + frontier frontier + frontiers frontier + fronting front + frontlet frontlet + fronts front + frost frost + frosts frost + frosty frosti + froth froth + froward froward + frown frown + frowning frown + frowningly frowningli + frowns frown + froze froze + frozen frozen + fructify fructifi + frugal frugal + fruit fruit + fruiterer fruiter + fruitful fruit + fruitfully fruitfulli + fruitfulness fruit + fruition fruition + fruitless fruitless + fruits fruit + frush frush + frustrate frustrat + frutify frutifi + fry fry + fubb fubb + fuel fuel + fugitive fugit + fulfil fulfil + fulfill fulfil + fulfilling fulfil + fulfils fulfil + full full + fullam fullam + fuller fuller + fullers fuller + fullest fullest + fullness full + fully fulli + fulness ful + fulsome fulsom + fulvia fulvia + fum fum + fumble fumbl + fumbles fumbl + fumblest fumblest + fumbling fumbl + fume fume + fumes fume + fuming fume + fumiter fumit + fumitory fumitori + fun fun + function function + functions function + fundamental fundament + funeral funer + funerals funer + fur fur + furbish furbish + furies furi + furious furiou + furlongs furlong + furnace furnac + furnaces furnac + furnish furnish + furnished furnish + furnishings furnish + furniture furnitur + furnival furniv + furor furor + furr furr + furrow furrow + furrowed furrow + furrows furrow + furth furth + further further + furtherance further + furtherer further + furthermore furthermor + furthest furthest + fury furi + furze furz + furzes furz + fust fust + fustian fustian + fustilarian fustilarian + fusty fusti + fut fut + future futur + futurity futur + g g + gabble gabbl + gaberdine gaberdin + gabriel gabriel + gad gad + gadding gad + gads gad + gadshill gadshil + gag gag + gage gage + gaged gage + gagg gagg + gaging gage + gagne gagn + gain gain + gained gain + gainer gainer + gaingiving gaingiv + gains gain + gainsaid gainsaid + gainsay gainsai + gainsaying gainsai + gainsays gainsai + gainst gainst + gait gait + gaited gait + galathe galath + gale gale + galen galen + gales gale + gall gall + gallant gallant + gallantly gallantli + gallantry gallantri + gallants gallant + galled gall + gallery galleri + galley gallei + galleys gallei + gallia gallia + gallian gallian + galliard galliard + galliasses galliass + gallimaufry gallimaufri + galling gall + gallons gallon + gallop gallop + galloping gallop + gallops gallop + gallow gallow + galloway gallowai + gallowglasses gallowglass + gallows gallow + gallowses gallows + galls gall + gallus gallu + gam gam + gambol gambol + gambold gambold + gambols gambol + gamboys gamboi + game game + gamers gamer + games game + gamesome gamesom + gamester gamest + gaming game + gammon gammon + gamut gamut + gan gan + gangren gangren + ganymede ganymed + gaol gaol + gaoler gaoler + gaolers gaoler + gaols gaol + gap gap + gape gape + gapes gape + gaping gape + gar gar + garb garb + garbage garbag + garboils garboil + garcon garcon + gard gard + garde gard + garden garden + gardener garden + gardeners garden + gardens garden + gardez gardez + gardiner gardin + gardon gardon + gargantua gargantua + gargrave gargrav + garish garish + garland garland + garlands garland + garlic garlic + garment garment + garments garment + garmet garmet + garner garner + garners garner + garnish garnish + garnished garnish + garret garret + garrison garrison + garrisons garrison + gart gart + garter garter + garterd garterd + gartering garter + garters garter + gascony gasconi + gash gash + gashes gash + gaskins gaskin + gasp gasp + gasping gasp + gasted gast + gastness gast + gat gat + gate gate + gated gate + gates gate + gath gath + gather gather + gathered gather + gathering gather + gathers gather + gatories gatori + gatory gatori + gaud gaud + gaudeo gaudeo + gaudy gaudi + gauge gaug + gaul gaul + gaultree gaultre + gaunt gaunt + gauntlet gauntlet + gauntlets gauntlet + gav gav + gave gave + gavest gavest + gawded gawd + gawds gawd + gawsey gawsei + gay gai + gayness gay + gaz gaz + gaze gaze + gazed gaze + gazer gazer + gazers gazer + gazes gaze + gazeth gazeth + gazing gaze + gear gear + geck geck + geese gees + geffrey geffrei + geld geld + gelded geld + gelding geld + gelida gelida + gelidus gelidu + gelt gelt + gem gem + geminy gemini + gems gem + gen gen + gender gender + genders gender + general gener + generally gener + generals gener + generation gener + generations gener + generative gener + generosity generos + generous gener + genitive genit + genitivo genitivo + genius geniu + gennets gennet + genoa genoa + genoux genoux + gens gen + gent gent + gentilhomme gentilhomm + gentility gentil + gentle gentl + gentlefolks gentlefolk + gentleman gentleman + gentlemanlike gentlemanlik + gentlemen gentlemen + gentleness gentl + gentler gentler + gentles gentl + gentlest gentlest + gentlewoman gentlewoman + gentlewomen gentlewomen + gently gentli + gentry gentri + george georg + gerard gerard + germaines germain + germains germain + german german + germane german + germans german + germany germani + gertrude gertrud + gest gest + gests gest + gesture gestur + gestures gestur + get get + getrude getrud + gets get + getter getter + getting get + ghastly ghastli + ghost ghost + ghosted ghost + ghostly ghostli + ghosts ghost + gi gi + giant giant + giantess giantess + giantlike giantlik + giants giant + gib gib + gibber gibber + gibbet gibbet + gibbets gibbet + gibe gibe + giber giber + gibes gibe + gibing gibe + gibingly gibingli + giddily giddili + giddiness giddi + giddy giddi + gift gift + gifts gift + gig gig + giglets giglet + giglot giglot + gilbert gilbert + gild gild + gilded gild + gilding gild + gilliams gilliam + gillian gillian + gills gill + gillyvors gillyvor + gilt gilt + gimmal gimmal + gimmers gimmer + gin gin + ging ging + ginger ginger + gingerbread gingerbread + gingerly gingerli + ginn ginn + gins gin + gioucestershire gioucestershir + gipes gipe + gipsies gipsi + gipsy gipsi + gird gird + girded gird + girdle girdl + girdled girdl + girdles girdl + girdling girdl + girl girl + girls girl + girt girt + girth girth + gis gi + giv giv + give give + given given + giver giver + givers giver + gives give + givest givest + giveth giveth + giving give + givings give + glad glad + gladded glad + gladding glad + gladly gladli + gladness glad + glamis glami + glanc glanc + glance glanc + glanced glanc + glances glanc + glancing glanc + glanders glander + glansdale glansdal + glare glare + glares glare + glass glass + glasses glass + glassy glassi + glaz glaz + glazed glaze + gleams gleam + glean glean + gleaned glean + gleaning glean + gleeful gleeful + gleek gleek + gleeking gleek + gleeks gleek + glend glend + glendower glendow + glib glib + glide glide + glided glide + glides glide + glideth glideth + gliding glide + glimmer glimmer + glimmering glimmer + glimmers glimmer + glimpse glimps + glimpses glimps + glist glist + glistening glisten + glister glister + glistering glister + glisters glister + glitt glitt + glittering glitter + globe globe + globes globe + glooming gloom + gloomy gloomi + glories glori + glorified glorifi + glorify glorifi + glorious gloriou + gloriously glorious + glory glori + glose glose + gloss gloss + glosses gloss + glou glou + glouceste gloucest + gloucester gloucest + gloucestershire gloucestershir + glove glove + glover glover + gloves glove + glow glow + glowed glow + glowing glow + glowworm glowworm + gloz gloz + gloze gloze + glozes gloze + glu glu + glue glue + glued glu + glues glue + glut glut + glutt glutt + glutted glut + glutton glutton + gluttoning glutton + gluttony gluttoni + gnarled gnarl + gnarling gnarl + gnat gnat + gnats gnat + gnaw gnaw + gnawing gnaw + gnawn gnawn + gnaws gnaw + go go + goad goad + goaded goad + goads goad + goal goal + goat goat + goatish goatish + goats goat + gobbets gobbet + gobbo gobbo + goblet goblet + goblets goblet + goblin goblin + goblins goblin + god god + godded god + godden godden + goddess goddess + goddesses goddess + goddild goddild + godfather godfath + godfathers godfath + godhead godhead + godlike godlik + godliness godli + godly godli + godmother godmoth + gods god + godson godson + goer goer + goers goer + goes goe + goest goest + goeth goeth + goffe goff + gogs gog + going go + gold gold + golden golden + goldenly goldenli + goldsmith goldsmith + goldsmiths goldsmith + golgotha golgotha + goliases golias + goliath goliath + gon gon + gondola gondola + gondolier gondoli + gone gone + goneril goneril + gong gong + gonzago gonzago + gonzalo gonzalo + good good + goodfellow goodfellow + goodlier goodlier + goodliest goodliest + goodly goodli + goodman goodman + goodness good + goodnight goodnight + goodrig goodrig + goods good + goodwife goodwif + goodwill goodwil + goodwin goodwin + goodwins goodwin + goodyear goodyear + goodyears goodyear + goose goos + gooseberry gooseberri + goosequills goosequil + goot goot + gor gor + gorbellied gorbelli + gorboduc gorboduc + gordian gordian + gore gore + gored gore + gorg gorg + gorge gorg + gorgeous gorgeou + gorget gorget + gorging gorg + gorgon gorgon + gormandize gormand + gormandizing gormand + gory gori + gosling gosl + gospel gospel + gospels gospel + goss goss + gossamer gossam + gossip gossip + gossiping gossip + gossiplike gossiplik + gossips gossip + got got + goth goth + goths goth + gotten gotten + gourd gourd + gout gout + gouts gout + gouty gouti + govern govern + governance govern + governed govern + governess gover + government govern + governor governor + governors governor + governs govern + gower gower + gown gown + gowns gown + grac grac + grace grace + graced grace + graceful grace + gracefully gracefulli + graceless graceless + graces grace + gracing grace + gracious graciou + graciously gracious + gradation gradat + graff graff + graffing graf + graft graft + grafted graft + grafters grafter + grain grain + grained grain + grains grain + gramercies gramerci + gramercy gramerci + grammar grammar + grand grand + grandam grandam + grandame grandam + grandchild grandchild + grande grand + grandeur grandeur + grandfather grandfath + grandjurors grandjuror + grandmother grandmoth + grandpre grandpr + grandsir grandsir + grandsire grandsir + grandsires grandsir + grange grang + grant grant + granted grant + granting grant + grants grant + grape grape + grapes grape + grapple grappl + grapples grappl + grappling grappl + grasp grasp + grasped grasp + grasps grasp + grass grass + grasshoppers grasshopp + grassy grassi + grate grate + grated grate + grateful grate + grates grate + gratiano gratiano + gratify gratifi + gratii gratii + gratillity gratil + grating grate + gratis grati + gratitude gratitud + gratulate gratul + grav grav + grave grave + gravediggers gravedigg + gravel gravel + graveless graveless + gravell gravel + gravely grave + graven graven + graveness grave + graver graver + graves grave + gravest gravest + gravestone graveston + gravities graviti + gravity graviti + gravy gravi + gray grai + graymalkin graymalkin + graz graz + graze graze + grazed graze + grazing graze + grease greas + greases greas + greasily greasili + greasy greasi + great great + greater greater + greatest greatest + greatly greatli + greatness great + grecian grecian + grecians grecian + gree gree + greece greec + greed greed + greedily greedili + greediness greedi + greedy greedi + greeing gree + greek greek + greekish greekish + greeks greek + green green + greener greener + greenly greenli + greens green + greensleeves greensleev + greenwich greenwich + greenwood greenwood + greet greet + greeted greet + greeting greet + greetings greet + greets greet + greg greg + gregory gregori + gremio gremio + grew grew + grey grei + greybeard greybeard + greybeards greybeard + greyhound greyhound + greyhounds greyhound + grief grief + griefs grief + griev griev + grievance grievanc + grievances grievanc + grieve griev + grieved griev + grieves griev + grievest grievest + grieving griev + grievingly grievingli + grievous grievou + grievously grievous + griffin griffin + griffith griffith + grim grim + grime grime + grimly grimli + grin grin + grind grind + grinding grind + grindstone grindston + grinning grin + grip grip + gripe gripe + gripes gripe + griping gripe + grise grise + grisly grisli + grissel grissel + grize grize + grizzle grizzl + grizzled grizzl + groan groan + groaning groan + groans groan + groat groat + groats groat + groin groin + groom groom + grooms groom + grop grop + groping grope + gros gro + gross gross + grosser grosser + grossly grossli + grossness gross + ground ground + grounded ground + groundlings groundl + grounds ground + grove grove + grovel grovel + grovelling grovel + groves grove + grow grow + groweth groweth + growing grow + grown grown + grows grow + growth growth + grub grub + grubb grubb + grubs grub + grudge grudg + grudged grudg + grudges grudg + grudging grudg + gruel gruel + grumble grumbl + grumblest grumblest + grumbling grumbl + grumblings grumbl + grumio grumio + grund grund + grunt grunt + gualtier gualtier + guard guard + guardage guardag + guardant guardant + guarded guard + guardian guardian + guardians guardian + guards guard + guardsman guardsman + gud gud + gudgeon gudgeon + guerdon guerdon + guerra guerra + guess guess + guesses guess + guessingly guessingli + guest guest + guests guest + guiana guiana + guichard guichard + guide guid + guided guid + guider guider + guiderius guideriu + guides guid + guiding guid + guidon guidon + guienne guienn + guil guil + guildenstern guildenstern + guilders guilder + guildford guildford + guildhall guildhal + guile guil + guiled guil + guileful guil + guilfords guilford + guilt guilt + guiltian guiltian + guiltier guiltier + guiltily guiltili + guiltiness guilti + guiltless guiltless + guilts guilt + guilty guilti + guinea guinea + guinever guinev + guise guis + gul gul + gules gule + gulf gulf + gulfs gulf + gull gull + gulls gull + gum gum + gumm gumm + gums gum + gun gun + gunner gunner + gunpowder gunpowd + guns gun + gurnet gurnet + gurney gurnei + gust gust + gusts gust + gusty gusti + guts gut + gutter gutter + guy gui + guynes guyn + guysors guysor + gypsy gypsi + gyve gyve + gyved gyve + gyves gyve + h h + ha ha + haberdasher haberdash + habiliment habili + habiliments habili + habit habit + habitation habit + habited habit + habits habit + habitude habitud + hack hack + hacket hacket + hackney hacknei + hacks hack + had had + hadst hadst + haec haec + haeres haer + hag hag + hagar hagar + haggard haggard + haggards haggard + haggish haggish + haggled haggl + hags hag + hail hail + hailed hail + hailstone hailston + hailstones hailston + hair hair + hairless hairless + hairs hair + hairy hairi + hal hal + halberd halberd + halberds halberd + halcyon halcyon + hale hale + haled hale + hales hale + half half + halfcan halfcan + halfpence halfpenc + halfpenny halfpenni + halfpennyworth halfpennyworth + halfway halfwai + halidom halidom + hall hall + halloa halloa + halloing hallo + hallond hallond + halloo halloo + hallooing halloo + hallow hallow + hallowed hallow + hallowmas hallowma + hallown hallown + hals hal + halt halt + halter halter + halters halter + halting halt + halts halt + halves halv + ham ham + hames hame + hamlet hamlet + hammer hammer + hammered hammer + hammering hammer + hammers hammer + hamper hamper + hampton hampton + hams ham + hamstring hamstr + hand hand + handed hand + handful hand + handicraft handicraft + handicraftsmen handicraftsmen + handing hand + handiwork handiwork + handkercher handkerch + handkerchers handkerch + handkerchief handkerchief + handle handl + handled handl + handles handl + handless handless + handlest handlest + handling handl + handmaid handmaid + handmaids handmaid + hands hand + handsaw handsaw + handsome handsom + handsomely handsom + handsomeness handsom + handwriting handwrit + handy handi + hang hang + hanged hang + hangers hanger + hangeth hangeth + hanging hang + hangings hang + hangman hangman + hangmen hangmen + hangs hang + hannibal hannib + hap hap + hapless hapless + haply hapli + happ happ + happen happen + happened happen + happier happier + happies happi + happiest happiest + happily happili + happiness happi + happy happi + haps hap + harbinger harbing + harbingers harbing + harbor harbor + harbour harbour + harbourage harbourag + harbouring harbour + harbours harbour + harcourt harcourt + hard hard + harder harder + hardest hardest + hardiest hardiest + hardiment hardiment + hardiness hardi + hardly hardli + hardness hard + hardocks hardock + hardy hardi + hare hare + harelip harelip + hares hare + harfleur harfleur + hark hark + harlot harlot + harlotry harlotri + harlots harlot + harm harm + harmed harm + harmful harm + harming harm + harmless harmless + harmonious harmoni + harmony harmoni + harms harm + harness har + harp harp + harper harper + harpier harpier + harping harp + harpy harpi + harried harri + harrow harrow + harrows harrow + harry harri + harsh harsh + harshly harshli + harshness harsh + hart hart + harts hart + harum harum + harvest harvest + has ha + hast hast + haste hast + hasted hast + hasten hasten + hastes hast + hastily hastili + hasting hast + hastings hast + hasty hasti + hat hat + hatch hatch + hatches hatch + hatchet hatchet + hatching hatch + hatchment hatchment + hate hate + hated hate + hateful hate + hater hater + haters hater + hates hate + hateth hateth + hatfield hatfield + hath hath + hating hate + hatred hatr + hats hat + haud haud + hauf hauf + haught haught + haughtiness haughti + haughty haughti + haunch haunch + haunches haunch + haunt haunt + haunted haunt + haunting haunt + haunts haunt + hautboy hautboi + hautboys hautboi + have have + haven haven + havens haven + haver haver + having have + havings have + havior havior + haviour haviour + havoc havoc + hawk hawk + hawking hawk + hawks hawk + hawthorn hawthorn + hawthorns hawthorn + hay hai + hazard hazard + hazarded hazard + hazards hazard + hazel hazel + hazelnut hazelnut + he he + head head + headborough headborough + headed head + headier headier + heading head + headland headland + headless headless + headlong headlong + heads head + headsman headsman + headstrong headstrong + heady headi + heal heal + healed heal + healing heal + heals heal + health health + healthful health + healths health + healthsome healthsom + healthy healthi + heap heap + heaping heap + heaps heap + hear hear + heard heard + hearer hearer + hearers hearer + hearest hearest + heareth heareth + hearing hear + hearings hear + heark heark + hearken hearken + hearkens hearken + hears hear + hearsay hearsai + hearse hears + hearsed hears + hearst hearst + heart heart + heartache heartach + heartbreak heartbreak + heartbreaking heartbreak + hearted heart + hearten hearten + hearth hearth + hearths hearth + heartily heartili + heartiness hearti + heartless heartless + heartlings heartl + heartly heartli + hearts heart + heartsick heartsick + heartstrings heartstr + hearty hearti + heat heat + heated heat + heath heath + heathen heathen + heathenish heathenish + heating heat + heats heat + heauties heauti + heav heav + heave heav + heaved heav + heaven heaven + heavenly heavenli + heavens heaven + heaves heav + heavier heavier + heaviest heaviest + heavily heavili + heaviness heavi + heaving heav + heavings heav + heavy heavi + hebona hebona + hebrew hebrew + hecate hecat + hectic hectic + hector hector + hectors hector + hecuba hecuba + hedg hedg + hedge hedg + hedgehog hedgehog + hedgehogs hedgehog + hedges hedg + heed heed + heeded heed + heedful heed + heedfull heedful + heedfully heedfulli + heedless heedless + heel heel + heels heel + hefted heft + hefts heft + heifer heifer + heifers heifer + heigh heigh + height height + heighten heighten + heinous heinou + heinously heinous + heir heir + heiress heiress + heirless heirless + heirs heir + held held + helen helen + helena helena + helenus helenu + helias helia + helicons helicon + hell hell + hellespont hellespont + hellfire hellfir + hellish hellish + helm helm + helmed helm + helmet helmet + helmets helmet + helms helm + help help + helper helper + helpers helper + helpful help + helping help + helpless helpless + helps help + helter helter + hem hem + heme heme + hemlock hemlock + hemm hemm + hemp hemp + hempen hempen + hems hem + hen hen + hence henc + henceforth henceforth + henceforward henceforward + henchman henchman + henri henri + henricus henricu + henry henri + hens hen + hent hent + henton henton + her her + herald herald + heraldry heraldri + heralds herald + herb herb + herbert herbert + herblets herblet + herbs herb + herculean herculean + hercules hercul + herd herd + herds herd + herdsman herdsman + herdsmen herdsmen + here here + hereabout hereabout + hereabouts hereabout + hereafter hereaft + hereby herebi + hereditary hereditari + hereford hereford + herefordshire herefordshir + herein herein + hereof hereof + heresies heresi + heresy heresi + heretic heret + heretics heret + hereto hereto + hereupon hereupon + heritage heritag + heritier heriti + hermes herm + hermia hermia + hermione hermion + hermit hermit + hermitage hermitag + hermits hermit + herne hern + hero hero + herod herod + herods herod + heroes hero + heroic heroic + heroical heroic + herring her + herrings her + hers her + herself herself + hesperides hesperid + hesperus hesperu + hest hest + hests hest + heure heur + heureux heureux + hew hew + hewgh hewgh + hewing hew + hewn hewn + hews hew + hey hei + heyday heydai + hibocrates hibocr + hic hic + hiccups hiccup + hick hick + hid hid + hidden hidden + hide hide + hideous hideou + hideously hideous + hideousness hideous + hides hide + hidest hidest + hiding hide + hie hie + hied hi + hiems hiem + hies hi + hig hig + high high + higher higher + highest highest + highly highli + highmost highmost + highness high + hight hight + highway highwai + highways highwai + hilding hild + hildings hild + hill hill + hillo hillo + hilloa hilloa + hills hill + hilt hilt + hilts hilt + hily hili + him him + himself himself + hinc hinc + hinckley hincklei + hind hind + hinder hinder + hindered hinder + hinders hinder + hindmost hindmost + hinds hind + hing hing + hinge hing + hinges hing + hint hint + hip hip + hipp hipp + hipparchus hipparchu + hippolyta hippolyta + hips hip + hir hir + hire hire + hired hire + hiren hiren + hirtius hirtiu + his hi + hisperia hisperia + hiss hiss + hisses hiss + hissing hiss + hist hist + historical histor + history histori + hit hit + hither hither + hitherto hitherto + hitherward hitherward + hitherwards hitherward + hits hit + hitting hit + hive hive + hives hive + hizzing hizz + ho ho + hoa hoa + hoar hoar + hoard hoard + hoarded hoard + hoarding hoard + hoars hoar + hoarse hoars + hoary hoari + hob hob + hobbididence hobbidid + hobby hobbi + hobbyhorse hobbyhors + hobgoblin hobgoblin + hobnails hobnail + hoc hoc + hod hod + hodge hodg + hog hog + hogs hog + hogshead hogshead + hogsheads hogshead + hois hoi + hoise hois + hoist hoist + hoisted hoist + hoists hoist + holborn holborn + hold hold + holden holden + holder holder + holdeth holdeth + holdfast holdfast + holding hold + holds hold + hole hole + holes hole + holidam holidam + holidame holidam + holiday holidai + holidays holidai + holier holier + holiest holiest + holily holili + holiness holi + holla holla + holland holland + hollander holland + hollanders holland + holloa holloa + holloaing holloa + hollow hollow + hollowly hollowli + hollowness hollow + holly holli + holmedon holmedon + holofernes holofern + holp holp + holy holi + homage homag + homager homag + home home + homely home + homes home + homespuns homespun + homeward homeward + homewards homeward + homicide homicid + homicides homicid + homily homili + hominem hominem + hommes homm + homo homo + honest honest + honester honest + honestest honestest + honestly honestli + honesty honesti + honey honei + honeycomb honeycomb + honeying honei + honeyless honeyless + honeysuckle honeysuckl + honeysuckles honeysuckl + honi honi + honneur honneur + honor honor + honorable honor + honorably honor + honorato honorato + honors honor + honour honour + honourable honour + honourably honour + honoured honour + honourest honourest + honourible honour + honouring honour + honours honour + hoo hoo + hood hood + hooded hood + hoodman hoodman + hoods hood + hoodwink hoodwink + hoof hoof + hoofs hoof + hook hook + hooking hook + hooks hook + hoop hoop + hoops hoop + hoot hoot + hooted hoot + hooting hoot + hoots hoot + hop hop + hope hope + hopeful hope + hopeless hopeless + hopes hope + hopest hopest + hoping hope + hopkins hopkin + hoppedance hopped + hor hor + horace horac + horatio horatio + horizon horizon + horn horn + hornbook hornbook + horned horn + horner horner + horning horn + hornpipes hornpip + horns horn + horologe horolog + horrible horribl + horribly horribl + horrid horrid + horrider horrid + horridly horridli + horror horror + horrors horror + hors hor + horse hors + horseback horseback + horsed hors + horsehairs horsehair + horseman horseman + horsemanship horsemanship + horsemen horsemen + horses hors + horseway horsewai + horsing hors + hortensio hortensio + hortensius hortensiu + horum horum + hose hose + hospitable hospit + hospital hospit + hospitality hospit + host host + hostage hostag + hostages hostag + hostess hostess + hostile hostil + hostility hostil + hostilius hostiliu + hosts host + hot hot + hotly hotli + hotspur hotspur + hotter hotter + hottest hottest + hound hound + hounds hound + hour hour + hourly hourli + hours hour + hous hou + house hous + household household + householder household + householders household + households household + housekeeper housekeep + housekeepers housekeep + housekeeping housekeep + houseless houseless + houses hous + housewife housewif + housewifery housewiferi + housewives housew + hovel hovel + hover hover + hovered hover + hovering hover + hovers hover + how how + howbeit howbeit + howe how + howeer howeer + however howev + howl howl + howled howl + howlet howlet + howling howl + howls howl + howsoe howso + howsoever howsoev + howsome howsom + hoxes hox + hoy hoi + hoyday hoydai + hubert hubert + huddled huddl + huddling huddl + hue hue + hued hu + hues hue + hug hug + huge huge + hugely huge + hugeness huge + hugg hugg + hugger hugger + hugh hugh + hugs hug + hujus huju + hulk hulk + hulks hulk + hull hull + hulling hull + hullo hullo + hum hum + human human + humane human + humanely human + humanity human + humble humbl + humbled humbl + humbleness humbl + humbler humbler + humbles humbl + humblest humblest + humbling humbl + humbly humbl + hume hume + humh humh + humidity humid + humility humil + humming hum + humor humor + humorous humor + humors humor + humour humour + humourists humourist + humours humour + humphrey humphrei + humphry humphri + hums hum + hundred hundr + hundreds hundr + hundredth hundredth + hung hung + hungarian hungarian + hungary hungari + hunger hunger + hungerford hungerford + hungerly hungerli + hungry hungri + hunt hunt + hunted hunt + hunter hunter + hunters hunter + hunteth hunteth + hunting hunt + huntington huntington + huntress huntress + hunts hunt + huntsman huntsman + huntsmen huntsmen + hurdle hurdl + hurl hurl + hurling hurl + hurls hurl + hurly hurli + hurlyburly hurlyburli + hurricano hurricano + hurricanoes hurricano + hurried hurri + hurries hurri + hurry hurri + hurt hurt + hurting hurt + hurtled hurtl + hurtless hurtless + hurtling hurtl + hurts hurt + husband husband + husbanded husband + husbandless husbandless + husbandry husbandri + husbands husband + hush hush + hushes hush + husht husht + husks husk + huswife huswif + huswifes huswif + hutch hutch + hybla hybla + hydra hydra + hyen hyen + hymen hymen + hymenaeus hymenaeu + hymn hymn + hymns hymn + hyperboles hyperbol + hyperbolical hyperbol + hyperion hyperion + hypocrisy hypocrisi + hypocrite hypocrit + hypocrites hypocrit + hyrcan hyrcan + hyrcania hyrcania + hyrcanian hyrcanian + hyssop hyssop + hysterica hysterica + i i + iachimo iachimo + iaculis iaculi + iago iago + iament iament + ibat ibat + icarus icaru + ice ic + iceland iceland + ici ici + icicle icicl + icicles icicl + icy ici + idea idea + ideas idea + idem idem + iden iden + ides id + idiot idiot + idiots idiot + idle idl + idleness idl + idles idl + idly idli + idol idol + idolatrous idolatr + idolatry idolatri + ield ield + if if + ifs if + ignis igni + ignoble ignobl + ignobly ignobl + ignominious ignomini + ignominy ignomini + ignomy ignomi + ignorance ignor + ignorant ignor + ii ii + iii iii + iiii iiii + il il + ilbow ilbow + ild ild + ilion ilion + ilium ilium + ill ill + illegitimate illegitim + illiterate illiter + illness ill + illo illo + ills ill + illume illum + illumin illumin + illuminate illumin + illumineth illumineth + illusion illus + illusions illus + illustrate illustr + illustrated illustr + illustrious illustri + illyria illyria + illyrian illyrian + ils il + im im + image imag + imagery imageri + images imag + imagin imagin + imaginary imaginari + imagination imagin + imaginations imagin + imagine imagin + imagining imagin + imaginings imagin + imbar imbar + imbecility imbecil + imbrue imbru + imitari imitari + imitate imit + imitated imit + imitation imit + imitations imit + immaculate immacul + immanity imman + immask immask + immaterial immateri + immediacy immediaci + immediate immedi + immediately immedi + imminence immin + imminent immin + immoderate immoder + immoderately immoder + immodest immodest + immoment immoment + immortal immort + immortaliz immortaliz + immortally immort + immur immur + immured immur + immures immur + imogen imogen + imp imp + impaint impaint + impair impair + impairing impair + impale impal + impaled impal + impanelled impanel + impart impart + imparted impart + impartial imparti + impartment impart + imparts impart + impasted impast + impatience impati + impatient impati + impatiently impati + impawn impawn + impeach impeach + impeached impeach + impeachment impeach + impeachments impeach + impedes imped + impediment impedi + impediments impedi + impenetrable impenetr + imperator imper + imperceiverant imperceiver + imperfect imperfect + imperfection imperfect + imperfections imperfect + imperfectly imperfectli + imperial imperi + imperious imperi + imperiously imperi + impertinency impertin + impertinent impertin + impeticos impetico + impetuosity impetuos + impetuous impetu + impieties impieti + impiety impieti + impious impiou + implacable implac + implements implement + implies impli + implor implor + implorators implor + implore implor + implored implor + imploring implor + impon impon + import import + importance import + importancy import + important import + importantly importantli + imported import + importeth importeth + importing import + importless importless + imports import + importun importun + importunacy importunaci + importunate importun + importune importun + importunes importun + importunity importun + impos impo + impose impos + imposed impos + imposition imposit + impositions imposit + impossibilities imposs + impossibility imposs + impossible imposs + imposthume imposthum + impostor impostor + impostors impostor + impotence impot + impotent impot + impounded impound + impregnable impregn + imprese impres + impress impress + impressed impress + impressest impressest + impression impress + impressure impressur + imprimendum imprimendum + imprimis imprimi + imprint imprint + imprinted imprint + imprison imprison + imprisoned imprison + imprisoning imprison + imprisonment imprison + improbable improb + improper improp + improve improv + improvident improvid + impudence impud + impudency impud + impudent impud + impudently impud + impudique impudiqu + impugn impugn + impugns impugn + impure impur + imputation imput + impute imput + in in + inaccessible inaccess + inaidable inaid + inaudible inaud + inauspicious inauspici + incaged incag + incantations incant + incapable incap + incardinate incardin + incarnadine incarnadin + incarnate incarn + incarnation incarn + incens incen + incense incens + incensed incens + incensement incens + incenses incens + incensing incens + incertain incertain + incertainties incertainti + incertainty incertainti + incessant incess + incessantly incessantli + incest incest + incestuous incestu + inch inch + incharitable incharit + inches inch + incidency incid + incident incid + incision incis + incite incit + incites incit + incivil incivil + incivility incivil + inclin inclin + inclinable inclin + inclination inclin + incline inclin + inclined inclin + inclines inclin + inclining inclin + inclips inclip + include includ + included includ + includes includ + inclusive inclus + incomparable incompar + incomprehensible incomprehens + inconsiderate inconsider + inconstancy inconst + inconstant inconst + incontinency incontin + incontinent incontin + incontinently incontin + inconvenience inconveni + inconveniences inconveni + inconvenient inconveni + incony inconi + incorporate incorpor + incorps incorp + incorrect incorrect + increas increa + increase increas + increases increas + increaseth increaseth + increasing increas + incredible incred + incredulous incredul + incur incur + incurable incur + incurr incurr + incurred incur + incursions incurs + ind ind + inde ind + indebted indebt + indeed inde + indent indent + indented indent + indenture indentur + indentures indentur + index index + indexes index + india india + indian indian + indict indict + indicted indict + indictment indict + indies indi + indifferency indiffer + indifferent indiffer + indifferently indiffer + indigent indig + indigest indigest + indigested indigest + indign indign + indignation indign + indignations indign + indigne indign + indignities indign + indignity indign + indirect indirect + indirection indirect + indirections indirect + indirectly indirectli + indiscreet indiscreet + indiscretion indiscret + indispos indispo + indisposition indisposit + indissoluble indissolubl + indistinct indistinct + indistinguish indistinguish + indistinguishable indistinguish + indited indit + individable individ + indrench indrench + indu indu + indubitate indubit + induc induc + induce induc + induced induc + inducement induc + induction induct + inductions induct + indue indu + indued indu + indues indu + indulgence indulg + indulgences indulg + indulgent indulg + indurance indur + industrious industri + industriously industri + industry industri + inequality inequ + inestimable inestim + inevitable inevit + inexecrable inexecr + inexorable inexor + inexplicable inexplic + infallible infal + infallibly infal + infamonize infamon + infamous infam + infamy infami + infancy infanc + infant infant + infants infant + infect infect + infected infect + infecting infect + infection infect + infections infect + infectious infecti + infectiously infecti + infects infect + infer infer + inference infer + inferior inferior + inferiors inferior + infernal infern + inferr inferr + inferreth inferreth + inferring infer + infest infest + infidel infidel + infidels infidel + infinite infinit + infinitely infinit + infinitive infinit + infirm infirm + infirmities infirm + infirmity infirm + infixed infix + infixing infix + inflam inflam + inflame inflam + inflaming inflam + inflammation inflamm + inflict inflict + infliction inflict + influence influenc + influences influenc + infold infold + inform inform + informal inform + information inform + informations inform + informed inform + informer inform + informs inform + infortunate infortun + infring infr + infringe infring + infringed infring + infus infu + infuse infus + infused infus + infusing infus + infusion infus + ingener ingen + ingenious ingeni + ingeniously ingeni + inglorious inglori + ingots ingot + ingraffed ingraf + ingraft ingraft + ingrate ingrat + ingrated ingrat + ingrateful ingrat + ingratitude ingratitud + ingratitudes ingratitud + ingredient ingredi + ingredients ingredi + ingross ingross + inhabit inhabit + inhabitable inhabit + inhabitants inhabit + inhabited inhabit + inhabits inhabit + inhearse inhears + inhearsed inhears + inherent inher + inherit inherit + inheritance inherit + inherited inherit + inheriting inherit + inheritor inheritor + inheritors inheritor + inheritrix inheritrix + inherits inherit + inhibited inhibit + inhibition inhibit + inhoop inhoop + inhuman inhuman + iniquities iniqu + iniquity iniqu + initiate initi + injointed injoint + injunction injunct + injunctions injunct + injur injur + injure injur + injurer injur + injuries injuri + injurious injuri + injury injuri + injustice injustic + ink ink + inkhorn inkhorn + inkle inkl + inkles inkl + inkling inkl + inky inki + inlaid inlaid + inland inland + inlay inlai + inly inli + inmost inmost + inn inn + inner inner + innkeeper innkeep + innocence innoc + innocency innoc + innocent innoc + innocents innoc + innovation innov + innovator innov + inns inn + innumerable innumer + inoculate inocul + inordinate inordin + inprimis inprimi + inquir inquir + inquire inquir + inquiry inquiri + inquisition inquisit + inquisitive inquisit + inroads inroad + insane insan + insanie insani + insatiate insati + insconce insconc + inscrib inscrib + inscription inscript + inscriptions inscript + inscroll inscrol + inscrutable inscrut + insculp insculp + insculpture insculptur + insensible insens + inseparable insepar + inseparate insepar + insert insert + inserted insert + inset inset + inshell inshel + inshipp inshipp + inside insid + insinewed insinew + insinuate insinu + insinuateth insinuateth + insinuating insinu + insinuation insinu + insisted insist + insisting insist + insisture insistur + insociable insoci + insolence insol + insolent insol + insomuch insomuch + inspir inspir + inspiration inspir + inspirations inspir + inspire inspir + inspired inspir + install instal + installed instal + instalment instal + instance instanc + instances instanc + instant instant + instantly instantli + instate instat + instead instead + insteeped insteep + instigate instig + instigated instig + instigation instig + instigations instig + instigator instig + instinct instinct + instinctively instinct + institute institut + institutions institut + instruct instruct + instructed instruct + instruction instruct + instructions instruct + instructs instruct + instrument instrument + instrumental instrument + instruments instrument + insubstantial insubstanti + insufficience insuffici + insufficiency insuffici + insult insult + insulted insult + insulting insult + insultment insult + insults insult + insupportable insupport + insuppressive insuppress + insurrection insurrect + insurrections insurrect + int int + integer integ + integritas integrita + integrity integr + intellect intellect + intellects intellect + intellectual intellectu + intelligence intellig + intelligencer intelligenc + intelligencing intelligenc + intelligent intellig + intelligis intelligi + intelligo intelligo + intemperance intemper + intemperate intemper + intend intend + intended intend + intendeth intendeth + intending intend + intendment intend + intends intend + intenible inten + intent intent + intention intent + intentively intent + intents intent + inter inter + intercept intercept + intercepted intercept + intercepter intercept + interception intercept + intercepts intercept + intercession intercess + intercessors intercessor + interchained interchain + interchang interchang + interchange interchang + interchangeably interchang + interchangement interchang + interchanging interchang + interdiction interdict + interest interest + interim interim + interims interim + interior interior + interjections interject + interjoin interjoin + interlude interlud + intermingle intermingl + intermission intermiss + intermissive intermiss + intermit intermit + intermix intermix + intermixed intermix + interpose interpos + interposer interpos + interposes interpos + interpret interpret + interpretation interpret + interpreted interpret + interpreter interpret + interpreters interpret + interprets interpret + interr interr + interred inter + interrogatories interrogatori + interrupt interrupt + interrupted interrupt + interrupter interrupt + interruptest interruptest + interruption interrupt + interrupts interrupt + intertissued intertissu + intervallums intervallum + interview interview + intestate intest + intestine intestin + intil intil + intimate intim + intimation intim + intitled intitl + intituled intitul + into into + intolerable intoler + intoxicates intox + intreasured intreasur + intreat intreat + intrench intrench + intrenchant intrench + intricate intric + intrinse intrins + intrinsicate intrins + intrude intrud + intruder intrud + intruding intrud + intrusion intrus + inundation inund + inure inur + inurn inurn + invade invad + invades invad + invasion invas + invasive invas + invectively invect + invectives invect + inveigled inveigl + invent invent + invented invent + invention invent + inventions invent + inventor inventor + inventorially inventori + inventoried inventori + inventors inventor + inventory inventori + inverness inver + invert invert + invest invest + invested invest + investing invest + investments invest + inveterate inveter + invincible invinc + inviolable inviol + invised invis + invisible invis + invitation invit + invite invit + invited invit + invites invit + inviting invit + invitis inviti + invocate invoc + invocation invoc + invoke invok + invoked invok + invulnerable invulner + inward inward + inwardly inwardli + inwardness inward + inwards inward + ionia ionia + ionian ionian + ipse ips + ipswich ipswich + ira ira + irae ira + iras ira + ire ir + ireful ir + ireland ireland + iris iri + irish irish + irishman irishman + irishmen irishmen + irks irk + irksome irksom + iron iron + irons iron + irreconcil irreconcil + irrecoverable irrecover + irregular irregular + irregulous irregul + irreligious irreligi + irremovable irremov + irreparable irrepar + irresolute irresolut + irrevocable irrevoc + is is + isabel isabel + isabella isabella + isbel isbel + isbels isbel + iscariot iscariot + ise is + ish ish + isidore isidor + isis isi + island island + islander island + islanders island + islands island + isle isl + isles isl + israel israel + issu issu + issue issu + issued issu + issueless issueless + issues issu + issuing issu + ist ist + ista ista + it it + italian italian + italy itali + itch itch + itches itch + itching itch + item item + items item + iteration iter + ithaca ithaca + its it + itself itself + itshall itshal + iv iv + ivory ivori + ivy ivi + iwis iwi + ix ix + j j + jacet jacet + jack jack + jackanapes jackanap + jacks jack + jacksauce jacksauc + jackslave jackslav + jacob jacob + jade jade + jaded jade + jades jade + jail jail + jakes jake + jamany jamani + james jame + jamy jami + jane jane + jangled jangl + jangling jangl + january januari + janus janu + japhet japhet + jaquenetta jaquenetta + jaques jaqu + jar jar + jarring jar + jars jar + jarteer jarteer + jasons jason + jaunce jaunc + jauncing jaunc + jaundice jaundic + jaundies jaundi + jaw jaw + jawbone jawbon + jaws jaw + jay jai + jays jai + jc jc + je je + jealous jealou + jealousies jealousi + jealousy jealousi + jeer jeer + jeering jeer + jelly jelli + jenny jenni + jeopardy jeopardi + jephtha jephtha + jephthah jephthah + jerkin jerkin + jerkins jerkin + jerks jerk + jeronimy jeronimi + jerusalem jerusalem + jeshu jeshu + jesses jess + jessica jessica + jest jest + jested jest + jester jester + jesters jester + jesting jest + jests jest + jesu jesu + jesus jesu + jet jet + jets jet + jew jew + jewel jewel + jeweller jewel + jewels jewel + jewess jewess + jewish jewish + jewry jewri + jews jew + jezebel jezebel + jig jig + jigging jig + jill jill + jills jill + jingling jingl + joan joan + job job + jockey jockei + jocund jocund + jog jog + jogging jog + john john + johns john + join join + joinder joinder + joined join + joiner joiner + joineth joineth + joins join + joint joint + jointed joint + jointing joint + jointly jointli + jointress jointress + joints joint + jointure jointur + jollity jolliti + jolly jolli + jolt jolt + joltheads jolthead + jordan jordan + joseph joseph + joshua joshua + jot jot + jour jour + jourdain jourdain + journal journal + journey journei + journeying journei + journeyman journeyman + journeymen journeymen + journeys journei + jove jove + jovem jovem + jovial jovial + jowl jowl + jowls jowl + joy joi + joyed joi + joyful joy + joyfully joyfulli + joyless joyless + joyous joyou + joys joi + juan juan + jud jud + judas juda + judases judas + jude jude + judg judg + judge judg + judged judg + judgement judgement + judges judg + judgest judgest + judging judg + judgment judgment + judgments judgment + judicious judici + jug jug + juggle juggl + juggled juggl + juggler juggler + jugglers juggler + juggling juggl + jugs jug + juice juic + juiced juic + jul jul + jule jule + julia julia + juliet juliet + julietta julietta + julio julio + julius juliu + july juli + jump jump + jumpeth jumpeth + jumping jump + jumps jump + june june + junes june + junior junior + junius juniu + junkets junket + juno juno + jupiter jupit + jure jure + jurement jurement + jurisdiction jurisdict + juror juror + jurors juror + jury juri + jurymen jurymen + just just + justeius justeiu + justest justest + justice justic + justicer justic + justicers justic + justices justic + justification justif + justified justifi + justify justifi + justle justl + justled justl + justles justl + justling justl + justly justli + justness just + justs just + jutting jut + jutty jutti + juvenal juven + kam kam + kate kate + kated kate + kates kate + katharine katharin + katherina katherina + katherine katherin + kecksies kecksi + keech keech + keel keel + keels keel + keen keen + keenness keen + keep keep + keepdown keepdown + keeper keeper + keepers keeper + keepest keepest + keeping keep + keeps keep + keiser keiser + ken ken + kendal kendal + kennel kennel + kent kent + kentish kentish + kentishman kentishman + kentishmen kentishmen + kept kept + kerchief kerchief + kerely kere + kern kern + kernal kernal + kernel kernel + kernels kernel + kerns kern + kersey kersei + kettle kettl + kettledrum kettledrum + kettledrums kettledrum + key kei + keys kei + kibe kibe + kibes kibe + kick kick + kicked kick + kickshaws kickshaw + kickshawses kickshaws + kicky kicki + kid kid + kidney kidnei + kikely kike + kildare kildar + kill kill + killed kill + killer killer + killeth killeth + killing kill + killingworth killingworth + kills kill + kiln kiln + kimbolton kimbolton + kin kin + kind kind + kinder kinder + kindest kindest + kindle kindl + kindled kindl + kindless kindless + kindlier kindlier + kindling kindl + kindly kindli + kindness kind + kindnesses kind + kindred kindr + kindreds kindr + kinds kind + kine kine + king king + kingdom kingdom + kingdoms kingdom + kingly kingli + kings king + kinred kinr + kins kin + kinsman kinsman + kinsmen kinsmen + kinswoman kinswoman + kirtle kirtl + kirtles kirtl + kiss kiss + kissed kiss + kisses kiss + kissing kiss + kitchen kitchen + kitchens kitchen + kite kite + kites kite + kitten kitten + kj kj + kl kl + klll klll + knack knack + knacks knack + knapp knapp + knav knav + knave knave + knaveries knaveri + knavery knaveri + knaves knave + knavish knavish + knead knead + kneaded knead + kneading knead + knee knee + kneel kneel + kneeling kneel + kneels kneel + knees knee + knell knell + knew knew + knewest knewest + knife knife + knight knight + knighted knight + knighthood knighthood + knighthoods knighthood + knightly knightli + knights knight + knit knit + knits knit + knitters knitter + knitteth knitteth + knives knive + knobs knob + knock knock + knocking knock + knocks knock + knog knog + knoll knoll + knot knot + knots knot + knotted knot + knotty knotti + know know + knower knower + knowest knowest + knowing know + knowingly knowingli + knowings know + knowledge knowledg + known known + knows know + l l + la la + laban laban + label label + labell label + labienus labienu + labio labio + labor labor + laboring labor + labors labor + labour labour + laboured labour + labourer labour + labourers labour + labouring labour + labours labour + laboursome laboursom + labras labra + labyrinth labyrinth + lac lac + lace lace + laced lace + lacedaemon lacedaemon + laces lace + lacies laci + lack lack + lackbeard lackbeard + lacked lack + lackey lackei + lackeying lackei + lackeys lackei + lacking lack + lacks lack + lad lad + ladder ladder + ladders ladder + lade lade + laden laden + ladies ladi + lading lade + lads lad + lady ladi + ladybird ladybird + ladyship ladyship + ladyships ladyship + laer laer + laertes laert + lafeu lafeu + lag lag + lagging lag + laid laid + lain lain + laissez laissez + lake lake + lakes lake + lakin lakin + lam lam + lamb lamb + lambert lambert + lambkin lambkin + lambkins lambkin + lambs lamb + lame lame + lamely lame + lameness lame + lament lament + lamentable lament + lamentably lament + lamentation lament + lamentations lament + lamented lament + lamenting lament + lamentings lament + laments lament + lames lame + laming lame + lammas lamma + lammastide lammastid + lamound lamound + lamp lamp + lampass lampass + lamps lamp + lanc lanc + lancaster lancast + lance lanc + lances lanc + lanceth lanceth + lanch lanch + land land + landed land + landing land + landless landless + landlord landlord + landmen landmen + lands land + lane lane + lanes lane + langage langag + langley langlei + langton langton + language languag + languageless languageless + languages languag + langues langu + languish languish + languished languish + languishes languish + languishing languish + languishings languish + languishment languish + languor languor + lank lank + lantern lantern + lanterns lantern + lanthorn lanthorn + lap lap + lapis lapi + lapland lapland + lapp lapp + laps lap + lapse laps + lapsed laps + lapsing laps + lapwing lapw + laquais laquai + larded lard + larder larder + larding lard + lards lard + large larg + largely larg + largeness larg + larger larger + largess largess + largest largest + lark lark + larks lark + larron larron + lartius lartiu + larum larum + larums larum + las la + lascivious lascivi + lash lash + lass lass + lasses lass + last last + lasted last + lasting last + lastly lastli + lasts last + latch latch + latches latch + late late + lated late + lately late + later later + latest latest + lath lath + latin latin + latten latten + latter latter + lattice lattic + laud laud + laudable laudabl + laudis laudi + laugh laugh + laughable laughabl + laughed laugh + laugher laugher + laughest laughest + laughing laugh + laughs laugh + laughter laughter + launce launc + launcelot launcelot + launces launc + launch launch + laund laund + laundress laundress + laundry laundri + laur laur + laura laura + laurel laurel + laurels laurel + laurence laurenc + laus lau + lavache lavach + lave lave + lavee lave + lavender lavend + lavina lavina + lavinia lavinia + lavish lavish + lavishly lavishli + lavolt lavolt + lavoltas lavolta + law law + lawful law + lawfully lawfulli + lawless lawless + lawlessly lawlessli + lawn lawn + lawns lawn + lawrence lawrenc + laws law + lawyer lawyer + lawyers lawyer + lay lai + layer layer + layest layest + laying lai + lays lai + lazar lazar + lazars lazar + lazarus lazaru + lazy lazi + lc lc + ld ld + ldst ldst + le le + lead lead + leaden leaden + leader leader + leaders leader + leadest leadest + leading lead + leads lead + leaf leaf + leagu leagu + league leagu + leagued leagu + leaguer leaguer + leagues leagu + leah leah + leak leak + leaky leaki + lean lean + leander leander + leaner leaner + leaning lean + leanness lean + leans lean + leap leap + leaped leap + leaping leap + leaps leap + leapt leapt + lear lear + learn learn + learned learn + learnedly learnedli + learning learn + learnings learn + learns learn + learnt learnt + leas lea + lease leas + leases leas + leash leash + leasing leas + least least + leather leather + leathern leathern + leav leav + leave leav + leaven leaven + leavening leaven + leaver leaver + leaves leav + leaving leav + leavy leavi + lecher lecher + lecherous lecher + lechers lecher + lechery lecheri + lecon lecon + lecture lectur + lectures lectur + led led + leda leda + leech leech + leeches leech + leek leek + leeks leek + leer leer + leers leer + lees lee + leese lees + leet leet + leets leet + left left + leg leg + legacies legaci + legacy legaci + legate legat + legatine legatin + lege lege + legerity leger + leges lege + legg legg + legion legion + legions legion + legitimate legitim + legitimation legitim + legs leg + leicester leicest + leicestershire leicestershir + leiger leiger + leigers leiger + leisure leisur + leisurely leisur + leisures leisur + leman leman + lemon lemon + lena lena + lend lend + lender lender + lending lend + lendings lend + lends lend + length length + lengthen lengthen + lengthens lengthen + lengths length + lenity leniti + lennox lennox + lent lent + lenten lenten + lentus lentu + leo leo + leon leon + leonardo leonardo + leonati leonati + leonato leonato + leonatus leonatu + leontes leont + leopard leopard + leopards leopard + leper leper + leperous leper + lepidus lepidu + leprosy leprosi + lequel lequel + lers ler + les le + less less + lessen lessen + lessens lessen + lesser lesser + lesson lesson + lessoned lesson + lessons lesson + lest lest + lestrake lestrak + let let + lethargied lethargi + lethargies lethargi + lethargy lethargi + lethe leth + lets let + lett lett + letter letter + letters letter + letting let + lettuce lettuc + leur leur + leve leve + level level + levell level + levelled level + levels level + leven leven + levers lever + leviathan leviathan + leviathans leviathan + levied levi + levies levi + levity leviti + levy levi + levying levi + lewd lewd + lewdly lewdli + lewdness lewd + lewdsters lewdster + lewis lewi + liable liabl + liar liar + liars liar + libbard libbard + libelling libel + libels libel + liberal liber + liberality liber + liberte libert + liberties liberti + libertine libertin + libertines libertin + liberty liberti + library librari + libya libya + licence licenc + licens licen + license licens + licentious licenti + lichas licha + licio licio + lick lick + licked lick + licker licker + lictors lictor + lid lid + lids lid + lie lie + lied li + lief lief + liefest liefest + liege lieg + liegeman liegeman + liegemen liegemen + lien lien + lies li + liest liest + lieth lieth + lieu lieu + lieutenant lieuten + lieutenantry lieutenantri + lieutenants lieuten + lieve liev + life life + lifeblood lifeblood + lifeless lifeless + lifelings lifel + lift lift + lifted lift + lifter lifter + lifteth lifteth + lifting lift + lifts lift + lig lig + ligarius ligariu + liggens liggen + light light + lighted light + lighten lighten + lightens lighten + lighter lighter + lightest lightest + lightly lightli + lightness light + lightning lightn + lightnings lightn + lights light + lik lik + like like + liked like + likeliest likeliest + likelihood likelihood + likelihoods likelihood + likely like + likeness like + liker liker + likes like + likest likest + likewise likewis + liking like + likings like + lilies lili + lily lili + lim lim + limander limand + limb limb + limbeck limbeck + limbecks limbeck + limber limber + limbo limbo + limbs limb + lime lime + limed lime + limehouse limehous + limekilns limekiln + limit limit + limitation limit + limited limit + limits limit + limn limn + limp limp + limping limp + limps limp + lin lin + lincoln lincoln + lincolnshire lincolnshir + line line + lineal lineal + lineally lineal + lineament lineament + lineaments lineament + lined line + linen linen + linens linen + lines line + ling ling + lingare lingar + linger linger + lingered linger + lingers linger + linguist linguist + lining line + link link + links link + linsey linsei + linstock linstock + linta linta + lion lion + lionel lionel + lioness lioness + lions lion + lip lip + lipp lipp + lips lip + lipsbury lipsburi + liquid liquid + liquor liquor + liquorish liquorish + liquors liquor + lirra lirra + lisbon lisbon + lisp lisp + lisping lisp + list list + listen listen + listening listen + lists list + literatured literatur + lither lither + litter litter + little littl + littlest littlest + liv liv + live live + lived live + livelier liveli + livelihood livelihood + livelong livelong + lively live + liver liver + liveries liveri + livers liver + livery liveri + lives live + livest livest + liveth liveth + livia livia + living live + livings live + lizard lizard + lizards lizard + ll ll + lll lll + llous llou + lnd lnd + lo lo + loa loa + loach loach + load load + loaden loaden + loading load + loads load + loaf loaf + loam loam + loan loan + loath loath + loathe loath + loathed loath + loather loather + loathes loath + loathing loath + loathly loathli + loathness loath + loathsome loathsom + loathsomeness loathsom + loathsomest loathsomest + loaves loav + lob lob + lobbies lobbi + lobby lobbi + local local + lochaber lochab + lock lock + locked lock + locking lock + lockram lockram + locks lock + locusts locust + lode lode + lodg lodg + lodge lodg + lodged lodg + lodgers lodger + lodges lodg + lodging lodg + lodgings lodg + lodovico lodovico + lodowick lodowick + lofty lofti + log log + logger logger + loggerhead loggerhead + loggerheads loggerhead + loggets logget + logic logic + logs log + loins loin + loiter loiter + loiterer loiter + loiterers loiter + loitering loiter + lolling loll + lolls loll + lombardy lombardi + london london + londoners london + lone lone + loneliness loneli + lonely lone + long long + longaville longavil + longboat longboat + longed long + longer longer + longest longest + longeth longeth + longing long + longings long + longly longli + longs long + longtail longtail + loo loo + loof loof + look look + looked look + looker looker + lookers looker + lookest lookest + looking look + looks look + loon loon + loop loop + loos loo + loose loos + loosed loos + loosely loos + loosen loosen + loosing loos + lop lop + lopp lopp + loquitur loquitur + lord lord + lorded lord + lording lord + lordings lord + lordliness lordli + lordly lordli + lords lord + lordship lordship + lordships lordship + lorenzo lorenzo + lorn lorn + lorraine lorrain + lorship lorship + los lo + lose lose + loser loser + losers loser + loses lose + losest losest + loseth loseth + losing lose + loss loss + losses loss + lost lost + lot lot + lots lot + lott lott + lottery lotteri + loud loud + louder louder + loudly loudli + lour lour + loureth loureth + louring lour + louse lous + louses lous + lousy lousi + lout lout + louted lout + louts lout + louvre louvr + lov lov + love love + loved love + lovedst lovedst + lovel lovel + lovelier loveli + loveliness loveli + lovell lovel + lovely love + lover lover + lovered lover + lovers lover + loves love + lovest lovest + loveth loveth + loving love + lovingly lovingli + low low + lowe low + lower lower + lowest lowest + lowing low + lowliness lowli + lowly lowli + lown lown + lowness low + loyal loyal + loyally loyal + loyalties loyalti + loyalty loyalti + lozel lozel + lt lt + lubber lubber + lubberly lubberli + luc luc + luccicos luccico + luce luce + lucentio lucentio + luces luce + lucetta lucetta + luciana luciana + lucianus lucianu + lucifer lucif + lucifier lucifi + lucilius luciliu + lucina lucina + lucio lucio + lucius luciu + luck luck + luckier luckier + luckiest luckiest + luckily luckili + luckless luckless + lucky lucki + lucre lucr + lucrece lucrec + lucretia lucretia + lucullius luculliu + lucullus lucullu + lucy luci + lud lud + ludlow ludlow + lug lug + lugg lugg + luggage luggag + luke luke + lukewarm lukewarm + lull lull + lulla lulla + lullaby lullabi + lulls lull + lumbert lumbert + lump lump + lumpish lumpish + luna luna + lunacies lunaci + lunacy lunaci + lunatic lunat + lunatics lunat + lunes lune + lungs lung + lupercal luperc + lurch lurch + lure lure + lurk lurk + lurketh lurketh + lurking lurk + lurks lurk + luscious lusciou + lush lush + lust lust + lusted lust + luster luster + lustful lust + lustier lustier + lustiest lustiest + lustig lustig + lustihood lustihood + lustily lustili + lustre lustr + lustrous lustrou + lusts lust + lusty lusti + lute lute + lutes lute + lutestring lutestr + lutheran lutheran + luxurious luxuri + luxuriously luxuri + luxury luxuri + ly ly + lycaonia lycaonia + lycurguses lycurgus + lydia lydia + lye lye + lyen lyen + lying ly + lym lym + lymoges lymog + lynn lynn + lysander lysand + m m + ma ma + maan maan + mab mab + macbeth macbeth + maccabaeus maccabaeu + macdonwald macdonwald + macduff macduff + mace mace + macedon macedon + maces mace + machiavel machiavel + machination machin + machinations machin + machine machin + mack mack + macmorris macmorri + maculate macul + maculation macul + mad mad + madam madam + madame madam + madams madam + madcap madcap + madded mad + madding mad + made made + madeira madeira + madly madli + madman madman + madmen madmen + madness mad + madonna madonna + madrigals madrig + mads mad + maecenas maecena + maggot maggot + maggots maggot + magic magic + magical magic + magician magician + magistrate magistr + magistrates magistr + magnanimity magnanim + magnanimous magnanim + magni magni + magnifi magnifi + magnificence magnific + magnificent magnific + magnifico magnifico + magnificoes magnifico + magnus magnu + mahomet mahomet + mahu mahu + maid maid + maiden maiden + maidenhead maidenhead + maidenheads maidenhead + maidenhood maidenhood + maidenhoods maidenhood + maidenliest maidenliest + maidenly maidenli + maidens maiden + maidhood maidhood + maids maid + mail mail + mailed mail + mails mail + maim maim + maimed maim + maims maim + main main + maincourse maincours + maine main + mainly mainli + mainmast mainmast + mains main + maintain maintain + maintained maintain + maintains maintain + maintenance mainten + mais mai + maison maison + majestas majesta + majestee majeste + majestic majest + majestical majest + majestically majest + majesties majesti + majesty majesti + major major + majority major + mak mak + make make + makeless makeless + maker maker + makers maker + makes make + makest makest + maketh maketh + making make + makings make + mal mal + mala mala + maladies maladi + malady maladi + malapert malapert + malcolm malcolm + malcontent malcont + malcontents malcont + male male + maledictions maledict + malefactions malefact + malefactor malefactor + malefactors malefactor + males male + malevolence malevol + malevolent malevol + malhecho malhecho + malice malic + malicious malici + maliciously malici + malign malign + malignancy malign + malignant malign + malignantly malignantli + malkin malkin + mall mall + mallard mallard + mallet mallet + mallows mallow + malmsey malmsei + malt malt + maltworms maltworm + malvolio malvolio + mamillius mamilliu + mammering mammer + mammet mammet + mammets mammet + mammock mammock + man man + manacle manacl + manacles manacl + manage manag + managed manag + manager manag + managing manag + manakin manakin + manchus manchu + mandate mandat + mandragora mandragora + mandrake mandrak + mandrakes mandrak + mane mane + manent manent + manes mane + manet manet + manfully manfulli + mangle mangl + mangled mangl + mangles mangl + mangling mangl + mangy mangi + manhood manhood + manhoods manhood + manifest manifest + manifested manifest + manifests manifest + manifold manifold + manifoldly manifoldli + manka manka + mankind mankind + manlike manlik + manly manli + mann mann + manna manna + manner manner + mannerly mannerli + manners manner + manningtree manningtre + mannish mannish + manor manor + manors manor + mans man + mansion mansion + mansionry mansionri + mansions mansion + manslaughter manslaught + mantle mantl + mantled mantl + mantles mantl + mantua mantua + mantuan mantuan + manual manual + manure manur + manured manur + manus manu + many mani + map map + mapp mapp + maps map + mar mar + marble marbl + marbled marbl + marcade marcad + marcellus marcellu + march march + marches march + marcheth marcheth + marching march + marchioness marchio + marchpane marchpan + marcians marcian + marcius marciu + marcus marcu + mardian mardian + mare mare + mares mare + marg marg + margarelon margarelon + margaret margaret + marge marg + margent margent + margery margeri + maria maria + marian marian + mariana mariana + maries mari + marigold marigold + mariner marin + mariners marin + maritime maritim + marjoram marjoram + mark mark + marked mark + market market + marketable market + marketplace marketplac + markets market + marking mark + markman markman + marks mark + marl marl + marle marl + marmoset marmoset + marquess marquess + marquis marqui + marr marr + marriage marriag + marriages marriag + married marri + marries marri + marring mar + marrow marrow + marrowless marrowless + marrows marrow + marry marri + marrying marri + mars mar + marseilles marseil + marsh marsh + marshal marshal + marshalsea marshalsea + marshalship marshalship + mart mart + marted mart + martem martem + martext martext + martial martial + martin martin + martino martino + martius martiu + martlemas martlema + martlet martlet + marts mart + martyr martyr + martyrs martyr + marullus marullu + marv marv + marvel marvel + marvell marvel + marvellous marvel + marvellously marvel + marvels marvel + mary mari + mas ma + masculine masculin + masham masham + mask mask + masked mask + masker masker + maskers masker + masking mask + masks mask + mason mason + masonry masonri + masons mason + masque masqu + masquers masquer + masques masqu + masquing masqu + mass mass + massacre massacr + massacres massacr + masses mass + massy massi + mast mast + mastcr mastcr + master master + masterdom masterdom + masterest masterest + masterless masterless + masterly masterli + masterpiece masterpiec + masters master + mastership mastership + mastic mastic + mastiff mastiff + mastiffs mastiff + masts mast + match match + matches match + matcheth matcheth + matching match + matchless matchless + mate mate + mated mate + mater mater + material materi + mates mate + mathematics mathemat + matin matin + matron matron + matrons matron + matter matter + matters matter + matthew matthew + mattock mattock + mattress mattress + mature matur + maturity matur + maud maud + maudlin maudlin + maugre maugr + maul maul + maund maund + mauri mauri + mauritania mauritania + mauvais mauvai + maw maw + maws maw + maxim maxim + may mai + mayday maydai + mayest mayest + mayor mayor + maypole maypol + mayst mayst + maz maz + maze maze + mazed maze + mazes maze + mazzard mazzard + me me + meacock meacock + mead mead + meadow meadow + meadows meadow + meads mead + meagre meagr + meal meal + meals meal + mealy meali + mean mean + meanders meander + meaner meaner + meanest meanest + meaneth meaneth + meaning mean + meanings mean + meanly meanli + means mean + meant meant + meantime meantim + meanwhile meanwhil + measles measl + measur measur + measurable measur + measure measur + measured measur + measureless measureless + measures measur + measuring measur + meat meat + meats meat + mechanic mechan + mechanical mechan + mechanicals mechan + mechanics mechan + mechante mechant + med med + medal medal + meddle meddl + meddler meddler + meddling meddl + mede mede + medea medea + media media + mediation mediat + mediators mediat + medice medic + medicinal medicin + medicine medicin + medicines medicin + meditate medit + meditates medit + meditating medit + meditation medit + meditations medit + mediterranean mediterranean + mediterraneum mediterraneum + medlar medlar + medlars medlar + meed meed + meeds meed + meek meek + meekly meekli + meekness meek + meet meet + meeter meeter + meetest meetest + meeting meet + meetings meet + meetly meetli + meetness meet + meets meet + meg meg + mehercle mehercl + meilleur meilleur + meiny meini + meisen meisen + melancholies melancholi + melancholy melancholi + melford melford + mell mell + mellifluous melliflu + mellow mellow + mellowing mellow + melodious melodi + melody melodi + melt melt + melted melt + melteth melteth + melting melt + melts melt + melun melun + member member + members member + memento memento + memorable memor + memorandums memorandum + memorial memori + memorials memori + memories memori + memoriz memoriz + memorize memor + memory memori + memphis memphi + men men + menac menac + menace menac + menaces menac + menaphon menaphon + menas mena + mend mend + mended mend + mender mender + mending mend + mends mend + menecrates menecr + menelaus menelau + menenius meneniu + mental mental + menteith menteith + mention mention + mentis menti + menton menton + mephostophilus mephostophilu + mer mer + mercatante mercatant + mercatio mercatio + mercenaries mercenari + mercenary mercenari + mercer mercer + merchandise merchandis + merchandized merchand + merchant merchant + merchants merchant + mercies merci + merciful merci + mercifully mercifulli + merciless merciless + mercurial mercuri + mercuries mercuri + mercury mercuri + mercutio mercutio + mercy merci + mere mere + mered mere + merely mere + merest merest + meridian meridian + merit merit + merited merit + meritorious meritori + merits merit + merlin merlin + mermaid mermaid + mermaids mermaid + merops merop + merrier merrier + merriest merriest + merrily merrili + merriman merriman + merriment merriment + merriments merriment + merriness merri + merry merri + mervailous mervail + mes me + mesh mesh + meshes mesh + mesopotamia mesopotamia + mess mess + message messag + messages messag + messala messala + messaline messalin + messenger messeng + messengers messeng + messes mess + messina messina + met met + metal metal + metals metal + metamorphis metamorphi + metamorphoses metamorphos + metaphor metaphor + metaphysical metaphys + metaphysics metaphys + mete mete + metellus metellu + meteor meteor + meteors meteor + meteyard meteyard + metheglin metheglin + metheglins metheglin + methink methink + methinks methink + method method + methods method + methought methought + methoughts methought + metre metr + metres metr + metropolis metropoli + mette mett + mettle mettl + mettled mettl + meus meu + mew mew + mewed mew + mewling mewl + mexico mexico + mi mi + mice mice + michael michael + michaelmas michaelma + micher micher + miching mich + mickle mickl + microcosm microcosm + mid mid + midas mida + middest middest + middle middl + middleham middleham + midnight midnight + midriff midriff + midst midst + midsummer midsumm + midway midwai + midwife midwif + midwives midwiv + mienne mienn + might might + mightful might + mightier mightier + mightiest mightiest + mightily mightili + mightiness mighti + mightst mightst + mighty mighti + milan milan + milch milch + mild mild + milder milder + mildest mildest + mildew mildew + mildews mildew + mildly mildli + mildness mild + mile mile + miles mile + milford milford + militarist militarist + military militari + milk milk + milking milk + milkmaid milkmaid + milks milk + milksops milksop + milky milki + mill mill + mille mill + miller miller + milliner millin + million million + millioned million + millions million + mills mill + millstones millston + milo milo + mimic mimic + minc minc + mince minc + minces minc + mincing minc + mind mind + minded mind + minding mind + mindless mindless + minds mind + mine mine + mineral miner + minerals miner + minerva minerva + mines mine + mingle mingl + mingled mingl + mingling mingl + minikin minikin + minim minim + minime minim + minimo minimo + minimus minimu + mining mine + minion minion + minions minion + minist minist + minister minist + ministers minist + ministration ministr + minnow minnow + minnows minnow + minola minola + minority minor + minos mino + minotaurs minotaur + minstrel minstrel + minstrels minstrel + minstrelsy minstrelsi + mint mint + mints mint + minute minut + minutely minut + minutes minut + minx minx + mio mio + mir mir + mirable mirabl + miracle miracl + miracles miracl + miraculous miracul + miranda miranda + mire mire + mirror mirror + mirrors mirror + mirth mirth + mirthful mirth + miry miri + mis mi + misadventur misadventur + misadventure misadventur + misanthropos misanthropo + misapplied misappli + misbecame misbecam + misbecom misbecom + misbecome misbecom + misbegot misbegot + misbegotten misbegotten + misbeliever misbeliev + misbelieving misbeliev + misbhav misbhav + miscall miscal + miscalled miscal + miscarried miscarri + miscarries miscarri + miscarry miscarri + miscarrying miscarri + mischance mischanc + mischances mischanc + mischief mischief + mischiefs mischief + mischievous mischiev + misconceived misconceiv + misconst misconst + misconster misconst + misconstruction misconstruct + misconstrued misconstru + misconstrues misconstru + miscreant miscreant + miscreate miscreat + misdeed misde + misdeeds misde + misdemean misdemean + misdemeanours misdemeanour + misdoubt misdoubt + misdoubteth misdoubteth + misdoubts misdoubt + misenum misenum + miser miser + miserable miser + miserably miser + misericorde misericord + miseries miseri + misers miser + misery miseri + misfortune misfortun + misfortunes misfortun + misgive misgiv + misgives misgiv + misgiving misgiv + misgoverned misgovern + misgovernment misgovern + misgraffed misgraf + misguide misguid + mishap mishap + mishaps mishap + misheard misheard + misinterpret misinterpret + mislead mislead + misleader mislead + misleaders mislead + misleading mislead + misled misl + mislike mislik + misord misord + misplac misplac + misplaced misplac + misplaces misplac + mispris mispri + misprised mispris + misprision mispris + misprizing mispriz + misproud misproud + misquote misquot + misreport misreport + miss miss + missed miss + misses miss + misshap misshap + misshapen misshapen + missheathed missheath + missing miss + missingly missingli + missions mission + missive missiv + missives missiv + misspoke misspok + mist mist + mista mista + mistak mistak + mistake mistak + mistaken mistaken + mistakes mistak + mistaketh mistaketh + mistaking mistak + mistakings mistak + mistemp mistemp + mistempered mistemp + misterm misterm + mistful mist + misthink misthink + misthought misthought + mistletoe mistleto + mistook mistook + mistreadings mistread + mistress mistress + mistresses mistress + mistresss mistresss + mistriship mistriship + mistrust mistrust + mistrusted mistrust + mistrustful mistrust + mistrusting mistrust + mists mist + misty misti + misus misu + misuse misus + misused misus + misuses misus + mites mite + mithridates mithrid + mitigate mitig + mitigation mitig + mix mix + mixed mix + mixture mixtur + mixtures mixtur + mm mm + mnd mnd + moan moan + moans moan + moat moat + moated moat + mobled mobl + mock mock + mockable mockabl + mocker mocker + mockeries mockeri + mockers mocker + mockery mockeri + mocking mock + mocks mock + mockvater mockvat + mockwater mockwat + model model + modena modena + moderate moder + moderately moder + moderation moder + modern modern + modest modest + modesties modesti + modestly modestli + modesty modesti + modicums modicum + modo modo + module modul + moe moe + moi moi + moiety moieti + moist moist + moisten moisten + moisture moistur + moldwarp moldwarp + mole mole + molehill molehil + moles mole + molest molest + molestation molest + mollification mollif + mollis molli + molten molten + molto molto + mome mome + moment moment + momentary momentari + moming mome + mon mon + monachum monachum + monarch monarch + monarchies monarchi + monarchize monarch + monarcho monarcho + monarchs monarch + monarchy monarchi + monast monast + monastery monasteri + monastic monast + monday mondai + monde mond + money monei + moneys monei + mong mong + monger monger + mongers monger + monging mong + mongrel mongrel + mongrels mongrel + mongst mongst + monk monk + monkey monkei + monkeys monkei + monks monk + monmouth monmouth + monopoly monopoli + mons mon + monsieur monsieur + monsieurs monsieur + monster monster + monsters monster + monstrous monstrou + monstrously monstrous + monstrousness monstrous + monstruosity monstruos + montacute montacut + montage montag + montague montagu + montagues montagu + montano montano + montant montant + montez montez + montferrat montferrat + montgomery montgomeri + month month + monthly monthli + months month + montjoy montjoi + monument monument + monumental monument + monuments monument + mood mood + moods mood + moody moodi + moon moon + moonbeams moonbeam + moonish moonish + moonlight moonlight + moons moon + moonshine moonshin + moonshines moonshin + moor moor + moorfields moorfield + moors moor + moorship moorship + mop mop + mope mope + moping mope + mopping mop + mopsa mopsa + moral moral + moraler moral + morality moral + moralize moral + mordake mordak + more more + moreover moreov + mores more + morgan morgan + mori mori + morisco morisco + morn morn + morning morn + mornings morn + morocco morocco + morris morri + morrow morrow + morrows morrow + morsel morsel + morsels morsel + mort mort + mortal mortal + mortality mortal + mortally mortal + mortals mortal + mortar mortar + mortgaged mortgag + mortified mortifi + mortifying mortifi + mortimer mortim + mortimers mortim + mortis morti + mortise mortis + morton morton + mose mose + moss moss + mossgrown mossgrown + most most + mote mote + moth moth + mother mother + mothers mother + moths moth + motion motion + motionless motionless + motions motion + motive motiv + motives motiv + motley motlei + mots mot + mought mought + mould mould + moulded mould + mouldeth mouldeth + moulds mould + mouldy mouldi + moult moult + moulten moulten + mounch mounch + mounseur mounseur + mounsieur mounsieur + mount mount + mountain mountain + mountaineer mountain + mountaineers mountain + mountainous mountain + mountains mountain + mountant mountant + mountanto mountanto + mountebank mountebank + mountebanks mountebank + mounted mount + mounteth mounteth + mounting mount + mounts mount + mourn mourn + mourned mourn + mourner mourner + mourners mourner + mournful mourn + mournfully mournfulli + mourning mourn + mourningly mourningli + mournings mourn + mourns mourn + mous mou + mouse mous + mousetrap mousetrap + mousing mous + mouth mouth + mouthed mouth + mouths mouth + mov mov + movables movabl + move move + moveable moveabl + moveables moveabl + moved move + mover mover + movers mover + moves move + moveth moveth + moving move + movingly movingli + movousus movousu + mow mow + mowbray mowbrai + mower mower + mowing mow + mows mow + moy moi + moys moi + moyses moys + mrs mr + much much + muck muck + mud mud + mudded mud + muddied muddi + muddy muddi + muffins muffin + muffl muffl + muffle muffl + muffled muffl + muffler muffler + muffling muffl + mugger mugger + mugs mug + mulberries mulberri + mulberry mulberri + mule mule + mules mule + muleteers mulet + mulier mulier + mulieres mulier + muliteus muliteu + mull mull + mulmutius mulmutiu + multiplied multipli + multiply multipli + multiplying multipli + multipotent multipot + multitude multitud + multitudes multitud + multitudinous multitudin + mum mum + mumble mumbl + mumbling mumbl + mummers mummer + mummy mummi + mun mun + munch munch + muniments muniment + munition munit + murd murd + murder murder + murdered murder + murderer murder + murderers murder + murdering murder + murderous murder + murders murder + mure mure + murk murk + murkiest murkiest + murky murki + murmur murmur + murmurers murmur + murmuring murmur + murrain murrain + murray murrai + murrion murrion + murther murther + murtherer murther + murtherers murther + murthering murther + murtherous murther + murthers murther + mus mu + muscadel muscadel + muscovites muscovit + muscovits muscovit + muscovy muscovi + muse muse + muses muse + mush mush + mushrooms mushroom + music music + musical music + musician musician + musicians musician + musics music + musing muse + musings muse + musk musk + musket musket + muskets musket + muskos musko + muss muss + mussel mussel + mussels mussel + must must + mustachio mustachio + mustard mustard + mustardseed mustardse + muster muster + mustering muster + musters muster + musty musti + mutability mutabl + mutable mutabl + mutation mutat + mutations mutat + mute mute + mutes mute + mutest mutest + mutine mutin + mutineer mutin + mutineers mutin + mutines mutin + mutinies mutini + mutinous mutin + mutiny mutini + mutius mutiu + mutter mutter + muttered mutter + mutton mutton + muttons mutton + mutual mutual + mutualities mutual + mutually mutual + muzzl muzzl + muzzle muzzl + muzzled muzzl + mv mv + mww mww + my my + mynheers mynheer + myrmidon myrmidon + myrmidons myrmidon + myrtle myrtl + myself myself + myst myst + mysteries mysteri + mystery mysteri + n n + nag nag + nage nage + nags nag + naiads naiad + nail nail + nails nail + nak nak + naked nake + nakedness naked + nal nal + nam nam + name name + named name + nameless nameless + namely name + names name + namest namest + naming name + nan nan + nance nanc + nap nap + nape nape + napes nape + napkin napkin + napkins napkin + naples napl + napless napless + napping nap + naps nap + narbon narbon + narcissus narcissu + narines narin + narrow narrow + narrowly narrowli + naso naso + nasty nasti + nathaniel nathaniel + natifs natif + nation nation + nations nation + native nativ + nativity nativ + natur natur + natural natur + naturalize natur + naturally natur + nature natur + natured natur + natures natur + natus natu + naught naught + naughtily naughtili + naughty naughti + navarre navarr + nave nave + navel navel + navigation navig + navy navi + nay nai + nayward nayward + nayword nayword + nazarite nazarit + ne ne + neaf neaf + neamnoins neamnoin + neanmoins neanmoin + neapolitan neapolitan + neapolitans neapolitan + near near + nearer nearer + nearest nearest + nearly nearli + nearness near + neat neat + neatly neatli + neb neb + nebour nebour + nebuchadnezzar nebuchadnezzar + nec nec + necessaries necessari + necessarily necessarili + necessary necessari + necessitied necess + necessities necess + necessity necess + neck neck + necklace necklac + necks neck + nectar nectar + ned ned + nedar nedar + need need + needed need + needer needer + needful need + needfull needful + needing need + needle needl + needles needl + needless needless + needly needli + needs need + needy needi + neer neer + neeze neez + nefas nefa + negation negat + negative neg + negatives neg + neglect neglect + neglected neglect + neglecting neglect + neglectingly neglectingli + neglection neglect + negligence neglig + negligent neglig + negotiate negoti + negotiations negoti + negro negro + neigh neigh + neighbors neighbor + neighbour neighbour + neighbourhood neighbourhood + neighbouring neighbour + neighbourly neighbourli + neighbours neighbour + neighing neigh + neighs neigh + neither neither + nell nell + nemean nemean + nemesis nemesi + neoptolemus neoptolemu + nephew nephew + nephews nephew + neptune neptun + ner ner + nereides nereid + nerissa nerissa + nero nero + neroes nero + ners ner + nerve nerv + nerves nerv + nervii nervii + nervy nervi + nessus nessu + nest nest + nestor nestor + nests nest + net net + nether nether + netherlands netherland + nets net + nettle nettl + nettled nettl + nettles nettl + neuter neuter + neutral neutral + nev nev + never never + nevil nevil + nevils nevil + new new + newborn newborn + newer newer + newest newest + newgate newgat + newly newli + newness new + news new + newsmongers newsmong + newt newt + newts newt + next next + nibbling nibbl + nicanor nicanor + nice nice + nicely nice + niceness nice + nicer nicer + nicety niceti + nicholas nichola + nick nick + nickname nicknam + nicks nick + niece niec + nieces niec + niggard niggard + niggarding niggard + niggardly niggardli + nigh nigh + night night + nightcap nightcap + nightcaps nightcap + nighted night + nightgown nightgown + nightingale nightingal + nightingales nightingal + nightly nightli + nightmare nightmar + nights night + nightwork nightwork + nihil nihil + nile nile + nill nill + nilus nilu + nimble nimbl + nimbleness nimbl + nimbler nimbler + nimbly nimbl + nine nine + nineteen nineteen + ning ning + ningly ningli + ninny ninni + ninth ninth + ninus ninu + niobe niob + niobes niob + nip nip + nipp nipp + nipping nip + nipple nippl + nips nip + nit nit + nly nly + nnight nnight + nnights nnight + no no + noah noah + nob nob + nobility nobil + nobis nobi + noble nobl + nobleman nobleman + noblemen noblemen + nobleness nobl + nobler nobler + nobles nobl + noblesse nobless + noblest noblest + nobly nobli + nobody nobodi + noces noce + nod nod + nodded nod + nodding nod + noddle noddl + noddles noddl + noddy noddi + nods nod + noes noe + nointed noint + nois noi + noise nois + noiseless noiseless + noisemaker noisemak + noises nois + noisome noisom + nole nole + nominate nomin + nominated nomin + nomination nomin + nominativo nominativo + non non + nonage nonag + nonce nonc + none none + nonino nonino + nonny nonni + nonpareil nonpareil + nonsuits nonsuit + nony noni + nook nook + nooks nook + noon noon + noonday noondai + noontide noontid + nor nor + norbery norberi + norfolk norfolk + norman norman + normandy normandi + normans norman + north north + northampton northampton + northamptonshire northamptonshir + northerly northerli + northern northern + northgate northgat + northumberland northumberland + northumberlands northumberland + northward northward + norway norwai + norways norwai + norwegian norwegian + norweyan norweyan + nos no + nose nose + nosegays nosegai + noseless noseless + noses nose + noster noster + nostra nostra + nostril nostril + nostrils nostril + not not + notable notabl + notably notabl + notary notari + notch notch + note note + notebook notebook + noted note + notedly notedli + notes note + notest notest + noteworthy noteworthi + nothing noth + nothings noth + notice notic + notify notifi + noting note + notion notion + notorious notori + notoriously notori + notre notr + notwithstanding notwithstand + nought nought + noun noun + nouns noun + nourish nourish + nourished nourish + nourisher nourish + nourishes nourish + nourisheth nourisheth + nourishing nourish + nourishment nourish + nous nou + novel novel + novelties novelti + novelty novelti + noverbs noverb + novi novi + novice novic + novices novic + novum novum + now now + nowhere nowher + noyance noyanc + ns ns + nt nt + nubibus nubibu + numa numa + numb numb + number number + numbered number + numbering number + numberless numberless + numbers number + numbness numb + nun nun + nuncio nuncio + nuncle nuncl + nunnery nunneri + nuns nun + nuntius nuntiu + nuptial nuptial + nurs nur + nurse nurs + nursed nurs + nurser nurser + nursery nurseri + nurses nurs + nurseth nurseth + nursh nursh + nursing nurs + nurtur nurtur + nurture nurtur + nut nut + nuthook nuthook + nutmeg nutmeg + nutmegs nutmeg + nutriment nutriment + nuts nut + nutshell nutshel + ny ny + nym nym + nymph nymph + nymphs nymph + o o + oak oak + oaken oaken + oaks oak + oared oar + oars oar + oatcake oatcak + oaten oaten + oath oath + oathable oathabl + oaths oath + oats oat + ob ob + obduracy obduraci + obdurate obdur + obedience obedi + obedient obedi + obeisance obeis + oberon oberon + obey obei + obeyed obei + obeying obei + obeys obei + obidicut obidicut + object object + objected object + objections object + objects object + oblation oblat + oblations oblat + obligation oblig + obligations oblig + obliged oblig + oblique obliqu + oblivion oblivion + oblivious oblivi + obloquy obloqui + obscene obscen + obscenely obscen + obscur obscur + obscure obscur + obscured obscur + obscurely obscur + obscures obscur + obscuring obscur + obscurity obscur + obsequies obsequi + obsequious obsequi + obsequiously obsequi + observ observ + observance observ + observances observ + observancy observ + observant observ + observants observ + observation observ + observe observ + observed observ + observer observ + observers observ + observing observ + observingly observingli + obsque obsqu + obstacle obstacl + obstacles obstacl + obstinacy obstinaci + obstinate obstin + obstinately obstin + obstruct obstruct + obstruction obstruct + obstructions obstruct + obtain obtain + obtained obtain + obtaining obtain + occasion occas + occasions occas + occident occid + occidental occident + occulted occult + occupat occupat + occupation occup + occupations occup + occupied occupi + occupies occupi + occupy occupi + occurrence occurr + occurrences occurr + occurrents occurr + ocean ocean + oceans ocean + octavia octavia + octavius octaviu + ocular ocular + od od + odd odd + oddest oddest + oddly oddli + odds odd + ode od + odes od + odious odiou + odoriferous odorifer + odorous odor + odour odour + odours odour + ods od + oeillades oeillad + oes oe + oeuvres oeuvr + of of + ofephesus ofephesu + off off + offal offal + offence offenc + offenceful offenc + offences offenc + offend offend + offended offend + offendendo offendendo + offender offend + offenders offend + offendeth offendeth + offending offend + offendress offendress + offends offend + offense offens + offenseless offenseless + offenses offens + offensive offens + offer offer + offered offer + offering offer + offerings offer + offers offer + offert offert + offic offic + office offic + officed offic + officer offic + officers offic + offices offic + official offici + officious offici + offspring offspr + oft oft + often often + oftener often + oftentimes oftentim + oh oh + oil oil + oils oil + oily oili + old old + oldcastle oldcastl + olden olden + older older + oldest oldest + oldness old + olive oliv + oliver oliv + olivers oliv + olives oliv + olivia olivia + olympian olympian + olympus olympu + oman oman + omans oman + omen omen + ominous omin + omission omiss + omit omit + omittance omitt + omitted omit + omitting omit + omne omn + omnes omn + omnipotent omnipot + on on + once onc + one on + ones on + oneyers oney + ongles ongl + onion onion + onions onion + only onli + onset onset + onward onward + onwards onward + oo oo + ooze ooz + oozes ooz + oozy oozi + op op + opal opal + ope op + open open + opener open + opening open + openly openli + openness open + opens open + operant oper + operate oper + operation oper + operations oper + operative oper + opes op + oph oph + ophelia ophelia + opinion opinion + opinions opinion + opportune opportun + opportunities opportun + opportunity opportun + oppos oppo + oppose oppos + opposed oppos + opposeless opposeless + opposer oppos + opposers oppos + opposes oppos + opposing oppos + opposite opposit + opposites opposit + opposition opposit + oppositions opposit + oppress oppress + oppressed oppress + oppresses oppress + oppresseth oppresseth + oppressing oppress + oppression oppress + oppressor oppressor + opprest opprest + opprobriously opprobri + oppugnancy oppugn + opulency opul + opulent opul + or or + oracle oracl + oracles oracl + orange orang + oration orat + orator orat + orators orat + oratory oratori + orb orb + orbed orb + orbs orb + orchard orchard + orchards orchard + ord ord + ordain ordain + ordained ordain + ordaining ordain + order order + ordered order + ordering order + orderless orderless + orderly orderli + orders order + ordinance ordin + ordinant ordin + ordinaries ordinari + ordinary ordinari + ordnance ordnanc + ords ord + ordure ordur + ore or + organ organ + organs organ + orgillous orgil + orient orient + orifex orifex + origin origin + original origin + orisons orison + ork ork + orlando orlando + orld orld + orleans orlean + ornament ornament + ornaments ornament + orodes orod + orphan orphan + orphans orphan + orpheus orpheu + orsino orsino + ort ort + orthography orthographi + orts ort + oscorbidulchos oscorbidulcho + osier osier + osiers osier + osprey osprei + osr osr + osric osric + ossa ossa + ost ost + ostent ostent + ostentare ostentar + ostentation ostent + ostents ostent + ostler ostler + ostlers ostler + ostrich ostrich + osw osw + oswald oswald + othello othello + other other + othergates otherg + others other + otherwhere otherwher + otherwhiles otherwhil + otherwise otherwis + otter otter + ottoman ottoman + ottomites ottomit + oublie oubli + ouches ouch + ought ought + oui oui + ounce ounc + ounces ounc + ouphes ouph + our our + ours our + ourself ourself + ourselves ourselv + ousel ousel + out out + outbids outbid + outbrave outbrav + outbraves outbrav + outbreak outbreak + outcast outcast + outcries outcri + outcry outcri + outdar outdar + outdare outdar + outdares outdar + outdone outdon + outfac outfac + outface outfac + outfaced outfac + outfacing outfac + outfly outfli + outfrown outfrown + outgo outgo + outgoes outgo + outgrown outgrown + outjest outjest + outlaw outlaw + outlawry outlawri + outlaws outlaw + outliv outliv + outlive outliv + outlives outliv + outliving outliv + outlook outlook + outlustres outlustr + outpriz outpriz + outrage outrag + outrageous outrag + outrages outrag + outran outran + outright outright + outroar outroar + outrun outrun + outrunning outrun + outruns outrun + outscold outscold + outscorn outscorn + outsell outsel + outsells outsel + outside outsid + outsides outsid + outspeaks outspeak + outsport outsport + outstare outstar + outstay outstai + outstood outstood + outstretch outstretch + outstretched outstretch + outstrike outstrik + outstrip outstrip + outstripped outstrip + outswear outswear + outvenoms outvenom + outward outward + outwardly outwardli + outwards outward + outwear outwear + outweighs outweigh + outwent outwent + outworn outworn + outworths outworth + oven oven + over over + overawe overaw + overbear overbear + overblown overblown + overboard overboard + overbold overbold + overborne overborn + overbulk overbulk + overbuys overbui + overcame overcam + overcast overcast + overcharg overcharg + overcharged overcharg + overcome overcom + overcomes overcom + overdone overdon + overearnest overearnest + overfar overfar + overflow overflow + overflown overflown + overglance overgl + overgo overgo + overgone overgon + overgorg overgorg + overgrown overgrown + overhead overhead + overhear overhear + overheard overheard + overhold overhold + overjoyed overjoi + overkind overkind + overland overland + overleather overleath + overlive overl + overlook overlook + overlooking overlook + overlooks overlook + overmaster overmast + overmounting overmount + overmuch overmuch + overpass overpass + overpeer overp + overpeering overp + overplus overplu + overrul overrul + overrun overrun + overscutch overscutch + overset overset + overshades overshad + overshine overshin + overshines overshin + overshot overshot + oversights oversight + overspread overspread + overstain overstain + overswear overswear + overt overt + overta overta + overtake overtak + overtaketh overtaketh + overthrow overthrow + overthrown overthrown + overthrows overthrow + overtook overtook + overtopp overtopp + overture overtur + overturn overturn + overwatch overwatch + overween overween + overweening overween + overweigh overweigh + overwhelm overwhelm + overwhelming overwhelm + overworn overworn + ovid ovid + ovidius ovidiu + ow ow + owe ow + owed ow + owedst owedst + owen owen + owes ow + owest owest + oweth oweth + owing ow + owl owl + owls owl + own own + owner owner + owners owner + owning own + owns own + owy owi + ox ox + oxen oxen + oxford oxford + oxfordshire oxfordshir + oxlips oxlip + oyes oy + oyster oyster + p p + pabble pabbl + pabylon pabylon + pac pac + pace pace + paced pace + paces pace + pacified pacifi + pacify pacifi + pacing pace + pack pack + packet packet + packets packet + packhorses packhors + packing pack + packings pack + packs pack + packthread packthread + pacorus pacoru + paction paction + pad pad + paddle paddl + paddling paddl + paddock paddock + padua padua + pagan pagan + pagans pagan + page page + pageant pageant + pageants pageant + pages page + pah pah + paid paid + pail pail + pailfuls pail + pails pail + pain pain + pained pain + painful pain + painfully painfulli + pains pain + paint paint + painted paint + painter painter + painting paint + paintings paint + paints paint + pair pair + paired pair + pairs pair + pajock pajock + pal pal + palabras palabra + palace palac + palaces palac + palamedes palamed + palate palat + palates palat + palatine palatin + palating palat + pale pale + paled pale + paleness pale + paler paler + pales pale + palestine palestin + palfrey palfrei + palfreys palfrei + palisadoes palisado + pall pall + pallabris pallabri + pallas palla + pallets pallet + palm palm + palmer palmer + palmers palmer + palms palm + palmy palmi + palpable palpabl + palsied palsi + palsies palsi + palsy palsi + palt palt + palter palter + paltry paltri + paly pali + pamp pamp + pamper pamper + pamphlets pamphlet + pan pan + pancackes pancack + pancake pancak + pancakes pancak + pandar pandar + pandars pandar + pandarus pandaru + pander pander + panderly panderli + panders pander + pandulph pandulph + panel panel + pang pang + panging pang + pangs pang + pannier pannier + pannonians pannonian + pansa pansa + pansies pansi + pant pant + pantaloon pantaloon + panted pant + pantheon pantheon + panther panther + panthino panthino + panting pant + pantingly pantingli + pantler pantler + pantry pantri + pants pant + pap pap + papal papal + paper paper + papers paper + paphlagonia paphlagonia + paphos papho + papist papist + paps pap + par par + parable parabl + paracelsus paracelsu + paradise paradis + paradox paradox + paradoxes paradox + paragon paragon + paragons paragon + parallel parallel + parallels parallel + paramour paramour + paramours paramour + parapets parapet + paraquito paraquito + parasite parasit + parasites parasit + parca parca + parcel parcel + parcell parcel + parcels parcel + parch parch + parched parch + parching parch + parchment parchment + pard pard + pardon pardon + pardona pardona + pardoned pardon + pardoner pardon + pardoning pardon + pardonne pardonn + pardonner pardonn + pardonnez pardonnez + pardons pardon + pare pare + pared pare + parel parel + parent parent + parentage parentag + parents parent + parfect parfect + paring pare + parings pare + paris pari + parish parish + parishioners parishion + parisians parisian + paritors paritor + park park + parks park + parle parl + parler parler + parles parl + parley parlei + parlez parlez + parliament parliament + parlors parlor + parlour parlour + parlous parlou + parmacity parmac + parolles parol + parricide parricid + parricides parricid + parrot parrot + parrots parrot + parsley parslei + parson parson + part part + partake partak + partaken partaken + partaker partak + partakers partak + parted part + parthia parthia + parthian parthian + parthians parthian + parti parti + partial partial + partialize partial + partially partial + participate particip + participation particip + particle particl + particular particular + particularities particular + particularize particular + particularly particularli + particulars particular + parties parti + parting part + partisan partisan + partisans partisan + partition partit + partizan partizan + partlet partlet + partly partli + partner partner + partners partner + partridge partridg + parts part + party parti + pas pa + pash pash + pashed pash + pashful pash + pass pass + passable passabl + passado passado + passage passag + passages passag + passant passant + passed pass + passenger passeng + passengers passeng + passes pass + passeth passeth + passing pass + passio passio + passion passion + passionate passion + passioning passion + passions passion + passive passiv + passport passport + passy passi + past past + paste past + pasterns pastern + pasties pasti + pastime pastim + pastimes pastim + pastoral pastor + pastorals pastor + pastors pastor + pastry pastri + pasture pastur + pastures pastur + pasty pasti + pat pat + patay patai + patch patch + patchery patcheri + patches patch + pate pate + pated pate + patent patent + patents patent + paternal patern + pates pate + path path + pathetical pathet + paths path + pathway pathwai + pathways pathwai + patience patienc + patient patient + patiently patient + patients patient + patines patin + patrician patrician + patricians patrician + patrick patrick + patrimony patrimoni + patroclus patroclu + patron patron + patronage patronag + patroness patro + patrons patron + patrum patrum + patter patter + pattern pattern + patterns pattern + pattle pattl + pauca pauca + paucas pauca + paul paul + paulina paulina + paunch paunch + paunches paunch + pause paus + pauser pauser + pauses paus + pausingly pausingli + pauvres pauvr + pav pav + paved pave + pavement pavement + pavilion pavilion + pavilions pavilion + pavin pavin + paw paw + pawn pawn + pawns pawn + paws paw + pax pax + pay pai + payest payest + paying pai + payment payment + payments payment + pays pai + paysan paysan + paysans paysan + pe pe + peace peac + peaceable peaceabl + peaceably peaceabl + peaceful peac + peacemakers peacemak + peaces peac + peach peach + peaches peach + peacock peacock + peacocks peacock + peak peak + peaking peak + peal peal + peals peal + pear pear + peard peard + pearl pearl + pearls pearl + pears pear + peas pea + peasant peasant + peasantry peasantri + peasants peasant + peascod peascod + pease peas + peaseblossom peaseblossom + peat peat + peaten peaten + peating peat + pebble pebbl + pebbled pebbl + pebbles pebbl + peck peck + pecks peck + peculiar peculiar + pecus pecu + pedant pedant + pedantical pedant + pedascule pedascul + pede pede + pedestal pedest + pedigree pedigre + pedlar pedlar + pedlars pedlar + pedro pedro + peds ped + peel peel + peep peep + peeped peep + peeping peep + peeps peep + peer peer + peereth peereth + peering peer + peerless peerless + peers peer + peesel peesel + peevish peevish + peevishly peevishli + peflur peflur + peg peg + pegasus pegasu + pegs peg + peise peis + peised peis + peize peiz + pelf pelf + pelican pelican + pelion pelion + pell pell + pella pella + pelleted pellet + peloponnesus peloponnesu + pelt pelt + pelting pelt + pembroke pembrok + pen pen + penalties penalti + penalty penalti + penance penanc + pence penc + pencil pencil + pencill pencil + pencils pencil + pendant pendant + pendent pendent + pendragon pendragon + pendulous pendul + penelope penelop + penetrable penetr + penetrate penetr + penetrative penetr + penitence penit + penitent penit + penitential penitenti + penitently penit + penitents penit + penker penker + penknife penknif + penn penn + penned pen + penning pen + pennons pennon + penny penni + pennyworth pennyworth + pennyworths pennyworth + pens pen + pense pens + pension pension + pensioners pension + pensive pensiv + pensived pensiv + pensively pensiv + pent pent + pentecost pentecost + penthesilea penthesilea + penthouse penthous + penurious penuri + penury penuri + peopl peopl + people peopl + peopled peopl + peoples peopl + pepin pepin + pepper pepper + peppercorn peppercorn + peppered pepper + per per + peradventure peradventur + peradventures peradventur + perceiv perceiv + perceive perceiv + perceived perceiv + perceives perceiv + perceiveth perceiveth + perch perch + perchance perchanc + percies perci + percussion percuss + percy perci + perdie perdi + perdita perdita + perdition perdit + perdonato perdonato + perdu perdu + perdurable perdur + perdurably perdur + perdy perdi + pere pere + peregrinate peregrin + peremptorily peremptorili + peremptory peremptori + perfect perfect + perfected perfect + perfecter perfect + perfectest perfectest + perfection perfect + perfections perfect + perfectly perfectli + perfectness perfect + perfidious perfidi + perfidiously perfidi + perforce perforc + perform perform + performance perform + performances perform + performed perform + performer perform + performers perform + performing perform + performs perform + perfum perfum + perfume perfum + perfumed perfum + perfumer perfum + perfumes perfum + perge perg + perhaps perhap + periapts periapt + perigort perigort + perigouna perigouna + peril peril + perilous peril + perils peril + period period + periods period + perish perish + perished perish + perishest perishest + perisheth perisheth + perishing perish + periwig periwig + perjur perjur + perjure perjur + perjured perjur + perjuries perjuri + perjury perjuri + perk perk + perkes perk + permafoy permafoi + permanent perman + permission permiss + permissive permiss + permit permit + permitted permit + pernicious pernici + perniciously pernici + peroration peror + perpend perpend + perpendicular perpendicular + perpendicularly perpendicularli + perpetual perpetu + perpetually perpetu + perpetuity perpetu + perplex perplex + perplexed perplex + perplexity perplex + pers per + persecuted persecut + persecutions persecut + persecutor persecutor + perseus perseu + persever persev + perseverance persever + persevers persev + persia persia + persian persian + persist persist + persisted persist + persistency persist + persistive persist + persists persist + person person + personae persona + personage personag + personages personag + personal person + personally person + personate person + personated person + personates person + personating person + persons person + perspective perspect + perspectively perspect + perspectives perspect + perspicuous perspicu + persuade persuad + persuaded persuad + persuades persuad + persuading persuad + persuasion persuas + persuasions persuas + pert pert + pertain pertain + pertaining pertain + pertains pertain + pertaunt pertaunt + pertinent pertin + pertly pertli + perturb perturb + perturbation perturb + perturbations perturb + perturbed perturb + perus peru + perusal perus + peruse perus + perused perus + perusing perus + perverse pervers + perversely pervers + perverseness pervers + pervert pervert + perverted pervert + peseech peseech + pest pest + pester pester + pestiferous pestifer + pestilence pestil + pestilent pestil + pet pet + petar petar + peter peter + petit petit + petition petit + petitionary petitionari + petitioner petition + petitioners petition + petitions petit + peto peto + petrarch petrarch + petruchio petruchio + petter petter + petticoat petticoat + petticoats petticoat + pettiness petti + pettish pettish + pettitoes pettito + petty petti + peu peu + pew pew + pewter pewter + pewterer pewter + phaethon phaethon + phaeton phaeton + phantasime phantasim + phantasimes phantasim + phantasma phantasma + pharamond pharamond + pharaoh pharaoh + pharsalia pharsalia + pheasant pheasant + pheazar pheazar + phebe phebe + phebes phebe + pheebus pheebu + pheeze pheez + phibbus phibbu + philadelphos philadelpho + philario philario + philarmonus philarmonu + philemon philemon + philip philip + philippan philippan + philippe philipp + philippi philippi + phillida phillida + philo philo + philomel philomel + philomela philomela + philosopher philosoph + philosophers philosoph + philosophical philosoph + philosophy philosophi + philostrate philostr + philotus philotu + phlegmatic phlegmat + phoebe phoeb + phoebus phoebu + phoenicia phoenicia + phoenicians phoenician + phoenix phoenix + phorbus phorbu + photinus photinu + phrase phrase + phraseless phraseless + phrases phrase + phrygia phrygia + phrygian phrygian + phrynia phrynia + physic physic + physical physic + physician physician + physicians physician + physics physic + pia pia + pibble pibbl + pible pibl + picardy picardi + pick pick + pickaxe pickax + pickaxes pickax + pickbone pickbon + picked pick + pickers picker + picking pick + pickle pickl + picklock picklock + pickpurse pickpurs + picks pick + pickt pickt + pickthanks pickthank + pictur pictur + picture pictur + pictured pictur + pictures pictur + pid pid + pie pie + piec piec + piece piec + pieces piec + piecing piec + pied pi + piedness pied + pier pier + pierc pierc + pierce pierc + pierced pierc + pierces pierc + pierceth pierceth + piercing pierc + piercy pierci + piers pier + pies pi + piety pieti + pig pig + pigeon pigeon + pigeons pigeon + pight pight + pigmy pigmi + pigrogromitus pigrogromitu + pike pike + pikes pike + pil pil + pilate pilat + pilates pilat + pilchers pilcher + pile pile + piles pile + pilf pilf + pilfering pilfer + pilgrim pilgrim + pilgrimage pilgrimag + pilgrims pilgrim + pill pill + pillage pillag + pillagers pillag + pillar pillar + pillars pillar + pillicock pillicock + pillory pillori + pillow pillow + pillows pillow + pills pill + pilot pilot + pilots pilot + pimpernell pimpernel + pin pin + pinch pinch + pinched pinch + pinches pinch + pinching pinch + pindarus pindaru + pine pine + pined pine + pines pine + pinfold pinfold + pining pine + pinion pinion + pink pink + pinn pinn + pinnace pinnac + pins pin + pinse pins + pint pint + pintpot pintpot + pioned pion + pioneers pioneer + pioner pioner + pioners pioner + pious piou + pip pip + pipe pipe + piper piper + pipers piper + pipes pipe + piping pipe + pippin pippin + pippins pippin + pirate pirat + pirates pirat + pisa pisa + pisanio pisanio + pish pish + pismires pismir + piss piss + pissing piss + pistol pistol + pistols pistol + pit pit + pitch pitch + pitched pitch + pitcher pitcher + pitchers pitcher + pitchy pitchi + piteous piteou + piteously piteous + pitfall pitfal + pith pith + pithless pithless + pithy pithi + pitie piti + pitied piti + pities piti + pitiful piti + pitifully pitifulli + pitiless pitiless + pits pit + pittance pittanc + pittie pitti + pittikins pittikin + pity piti + pitying piti + pius piu + plac plac + place place + placed place + placentio placentio + places place + placeth placeth + placid placid + placing place + plack plack + placket placket + plackets placket + plagu plagu + plague plagu + plagued plagu + plagues plagu + plaguing plagu + plaguy plagui + plain plain + plainer plainer + plainest plainest + plaining plain + plainings plain + plainly plainli + plainness plain + plains plain + plainsong plainsong + plaintful plaint + plaintiff plaintiff + plaintiffs plaintiff + plaints plaint + planched planch + planet planet + planetary planetari + planets planet + planks plank + plant plant + plantage plantag + plantagenet plantagenet + plantagenets plantagenet + plantain plantain + plantation plantat + planted plant + planteth planteth + plants plant + plash plash + plashy plashi + plast plast + plaster plaster + plasterer plaster + plat plat + plate plate + plated plate + plates plate + platform platform + platforms platform + plats plat + platted plat + plausible plausibl + plausive plausiv + plautus plautu + play plai + played plai + player player + players player + playeth playeth + playfellow playfellow + playfellows playfellow + playhouse playhous + playing plai + plays plai + plea plea + pleach pleach + pleached pleach + plead plead + pleaded plead + pleader pleader + pleaders pleader + pleading plead + pleads plead + pleas plea + pleasance pleasanc + pleasant pleasant + pleasantly pleasantli + please pleas + pleased pleas + pleaser pleaser + pleasers pleaser + pleases pleas + pleasest pleasest + pleaseth pleaseth + pleasing pleas + pleasure pleasur + pleasures pleasur + plebeians plebeian + plebeii plebeii + plebs pleb + pledge pledg + pledges pledg + pleines plein + plenitude plenitud + plenteous plenteou + plenteously plenteous + plenties plenti + plentiful plenti + plentifully plentifulli + plenty plenti + pless pless + plessed pless + plessing pless + pliant pliant + plied pli + plies pli + plight plight + plighted plight + plighter plighter + plod plod + plodded plod + plodders plodder + plodding plod + plods plod + plood plood + ploody ploodi + plot plot + plots plot + plotted plot + plotter plotter + plough plough + ploughed plough + ploughman ploughman + ploughmen ploughmen + plow plow + plows plow + pluck pluck + plucked pluck + plucker plucker + plucking pluck + plucks pluck + plue plue + plum plum + plume plume + plumed plume + plumes plume + plummet plummet + plump plump + plumpy plumpi + plums plum + plung plung + plunge plung + plunged plung + plural plural + plurisy plurisi + plus plu + pluto pluto + plutus plutu + ply ply + po po + pocket pocket + pocketing pocket + pockets pocket + pocky pocki + pody podi + poem poem + poesy poesi + poet poet + poetical poetic + poetry poetri + poets poet + poictiers poictier + poinards poinard + poins poin + point point + pointblank pointblank + pointed point + pointing point + points point + pois poi + poise pois + poising pois + poison poison + poisoned poison + poisoner poison + poisoning poison + poisonous poison + poisons poison + poke poke + poking poke + pol pol + polack polack + polacks polack + poland poland + pold pold + pole pole + poleaxe poleax + polecat polecat + polecats polecat + polemon polemon + poles pole + poli poli + policies polici + policy polici + polish polish + polished polish + politic polit + politician politician + politicians politician + politicly politicli + polixenes polixen + poll poll + polluted pollut + pollution pollut + polonius poloniu + poltroons poltroon + polusion polus + polydamus polydamu + polydore polydor + polyxena polyxena + pomander pomand + pomegranate pomegran + pomewater pomewat + pomfret pomfret + pomgarnet pomgarnet + pommel pommel + pomp pomp + pompeius pompeiu + pompey pompei + pompion pompion + pompous pompou + pomps pomp + pond pond + ponder ponder + ponderous ponder + ponds pond + poniard poniard + poniards poniard + pont pont + pontic pontic + pontifical pontif + ponton ponton + pooh pooh + pool pool + poole pool + poop poop + poor poor + poorer poorer + poorest poorest + poorly poorli + pop pop + pope pope + popedom popedom + popilius popiliu + popingay popingai + popish popish + popp popp + poppy poppi + pops pop + popular popular + popularity popular + populous popul + porch porch + porches porch + pore pore + poring pore + pork pork + porn porn + porpentine porpentin + porridge porridg + porringer porring + port port + portable portabl + portage portag + portal portal + portance portanc + portcullis portculli + portend portend + portends portend + portent portent + portentous portent + portents portent + porter porter + porters porter + portia portia + portion portion + portly portli + portotartarossa portotartarossa + portrait portrait + portraiture portraitur + ports port + portugal portug + pose pose + posied posi + posies posi + position posit + positive posit + positively posit + posse poss + possess possess + possessed possess + possesses possess + possesseth possesseth + possessing possess + possession possess + possessions possess + possessor possessor + posset posset + possets posset + possibilities possibl + possibility possibl + possible possibl + possibly possibl + possitable possit + post post + poste post + posted post + posterior posterior + posteriors posterior + posterity poster + postern postern + posterns postern + posters poster + posthorse posthors + posthorses posthors + posthumus posthumu + posting post + postmaster postmast + posts post + postscript postscript + posture postur + postures postur + posy posi + pot pot + potable potabl + potations potat + potato potato + potatoes potato + potch potch + potency potenc + potent potent + potentates potent + potential potenti + potently potent + potents potent + pothecary pothecari + pother pother + potion potion + potions potion + potpan potpan + pots pot + potter potter + potting pot + pottle pottl + pouch pouch + poulter poulter + poultice poultic + poultney poultnei + pouncet pouncet + pound pound + pounds pound + pour pour + pourest pourest + pouring pour + pourquoi pourquoi + pours pour + pout pout + poverty poverti + pow pow + powd powd + powder powder + power power + powerful power + powerfully powerfulli + powerless powerless + powers power + pox pox + poys poi + poysam poysam + prabbles prabbl + practic practic + practice practic + practiced practic + practicer practic + practices practic + practicing practic + practis practi + practisants practis + practise practis + practiser practis + practisers practis + practises practis + practising practis + praeclarissimus praeclarissimu + praemunire praemunir + praetor praetor + praetors praetor + pragging prag + prague pragu + prain prain + prains prain + prais prai + praise prais + praised prais + praises prais + praisest praisest + praiseworthy praiseworthi + praising prais + prancing pranc + prank prank + pranks prank + prat prat + prate prate + prated prate + prater prater + prating prate + prattle prattl + prattler prattler + prattling prattl + prave prave + prawls prawl + prawns prawn + pray prai + prayer prayer + prayers prayer + praying prai + prays prai + pre pre + preach preach + preached preach + preachers preacher + preaches preach + preaching preach + preachment preachment + pread pread + preambulate preambul + precedence preced + precedent preced + preceding preced + precept precept + preceptial precepti + precepts precept + precinct precinct + precious preciou + preciously precious + precipice precipic + precipitating precipit + precipitation precipit + precise precis + precisely precis + preciseness precis + precisian precisian + precor precor + precurse precurs + precursors precursor + predeceased predeceas + predecessor predecessor + predecessors predecessor + predestinate predestin + predicament predica + predict predict + prediction predict + predictions predict + predominance predomin + predominant predomin + predominate predomin + preeches preech + preeminence preemin + preface prefac + prefer prefer + preferment prefer + preferments prefer + preferr preferr + preferreth preferreth + preferring prefer + prefers prefer + prefiguring prefigur + prefix prefix + prefixed prefix + preformed preform + pregnancy pregnanc + pregnant pregnant + pregnantly pregnantli + prejudicates prejud + prejudice prejudic + prejudicial prejudici + prelate prelat + premeditated premedit + premeditation premedit + premised premis + premises premis + prenez prenez + prenominate prenomin + prentice prentic + prentices prentic + preordinance preordin + prepar prepar + preparation prepar + preparations prepar + prepare prepar + prepared prepar + preparedly preparedli + prepares prepar + preparing prepar + prepost prepost + preposterous preposter + preposterously preposter + prerogatifes prerogatif + prerogative prerog + prerogatived prerogativ + presage presag + presagers presag + presages presag + presageth presageth + presaging presag + prescience prescienc + prescribe prescrib + prescript prescript + prescription prescript + prescriptions prescript + prescripts prescript + presence presenc + presences presenc + present present + presentation present + presented present + presenter present + presenters present + presenteth presenteth + presenting present + presently present + presentment present + presents present + preserv preserv + preservation preserv + preservative preserv + preserve preserv + preserved preserv + preserver preserv + preservers preserv + preserving preserv + president presid + press press + pressed press + presser presser + presses press + pressing press + pressure pressur + pressures pressur + prest prest + prester prester + presume presum + presumes presum + presuming presum + presumption presumpt + presumptuous presumptu + presuppos presuppo + pret pret + pretence pretenc + pretences pretenc + pretend pretend + pretended pretend + pretending pretend + pretense pretens + pretext pretext + pretia pretia + prettier prettier + prettiest prettiest + prettily prettili + prettiness pretti + pretty pretti + prevail prevail + prevailed prevail + prevaileth prevaileth + prevailing prevail + prevailment prevail + prevails prevail + prevent prevent + prevented prevent + prevention prevent + preventions prevent + prevents prevent + prey prei + preyful prey + preys prei + priam priam + priami priami + priamus priamu + pribbles pribbl + price price + prick prick + pricked prick + pricket pricket + pricking prick + pricks prick + pricksong pricksong + pride pride + prides pride + pridge pridg + prie prie + pried pri + prief prief + pries pri + priest priest + priesthood priesthood + priests priest + prig prig + primal primal + prime prime + primer primer + primero primero + primest primest + primitive primit + primo primo + primogenity primogen + primrose primros + primroses primros + primy primi + prince princ + princely princ + princes princ + princess princess + principal princip + principalities princip + principality princip + principle principl + principles principl + princox princox + prings pring + print print + printed print + printing print + printless printless + prints print + prioress prioress + priories priori + priority prioriti + priory priori + priscian priscian + prison prison + prisoner prison + prisoners prison + prisonment prison + prisonnier prisonni + prisons prison + pristine pristin + prithe prith + prithee prithe + privacy privaci + private privat + privately privat + privates privat + privilage privilag + privileg privileg + privilege privileg + privileged privileg + privileges privileg + privilegio privilegio + privily privili + privity priviti + privy privi + priz priz + prize prize + prized prize + prizer prizer + prizes prize + prizest prizest + prizing prize + pro pro + probable probabl + probal probal + probation probat + proceed proce + proceeded proceed + proceeders proceed + proceeding proceed + proceedings proceed + proceeds proce + process process + procession process + proclaim proclaim + proclaimed proclaim + proclaimeth proclaimeth + proclaims proclaim + proclamation proclam + proclamations proclam + proconsul proconsul + procrastinate procrastin + procreant procreant + procreants procreant + procreation procreat + procrus procru + proculeius proculeiu + procur procur + procurator procur + procure procur + procured procur + procures procur + procuring procur + prodigal prodig + prodigality prodig + prodigally prodig + prodigals prodig + prodigies prodigi + prodigious prodigi + prodigiously prodigi + prodigy prodigi + proditor proditor + produc produc + produce produc + produced produc + produces produc + producing produc + proface profac + profan profan + profanation profan + profane profan + profaned profan + profanely profan + profaneness profan + profaners profan + profaning profan + profess profess + professed profess + professes profess + profession profess + professions profess + professors professor + proffer proffer + proffered proffer + profferer proffer + proffers proffer + proficient profici + profit profit + profitable profit + profitably profit + profited profit + profiting profit + profitless profitless + profits profit + profound profound + profoundest profoundest + profoundly profoundli + progenitors progenitor + progeny progeni + progne progn + prognosticate prognost + prognostication prognost + progress progress + progression progress + prohibit prohibit + prohibition prohibit + project project + projection project + projects project + prolixious prolixi + prolixity prolix + prologue prologu + prologues prologu + prolong prolong + prolongs prolong + promethean promethean + prometheus prometheu + promis promi + promise promis + promised promis + promises promis + promiseth promiseth + promising promis + promontory promontori + promotion promot + promotions promot + prompt prompt + prompted prompt + promptement promptement + prompter prompter + prompting prompt + prompts prompt + prompture promptur + promulgate promulg + prone prone + prononcer prononc + prononcez prononcez + pronoun pronoun + pronounc pronounc + pronounce pronounc + pronounced pronounc + pronouncing pronounc + pronouns pronoun + proof proof + proofs proof + prop prop + propagate propag + propagation propag + propend propend + propension propens + proper proper + properer proper + properly properli + propertied properti + properties properti + property properti + prophecies propheci + prophecy propheci + prophesied prophesi + prophesier prophesi + prophesy prophesi + prophesying prophesi + prophet prophet + prophetess prophetess + prophetic prophet + prophetically prophet + prophets prophet + propinquity propinqu + propontic propont + proportion proport + proportionable proportion + proportions proport + propos propo + propose propos + proposed propos + proposer propos + proposes propos + proposing propos + proposition proposit + propositions proposit + propounded propound + propp propp + propre propr + propriety proprieti + props prop + propugnation propugn + prorogue prorogu + prorogued prorogu + proscription proscript + proscriptions proscript + prose prose + prosecute prosecut + prosecution prosecut + proselytes proselyt + proserpina proserpina + prosp prosp + prospect prospect + prosper prosper + prosperity prosper + prospero prospero + prosperous prosper + prosperously prosper + prospers prosper + prostitute prostitut + prostrate prostrat + protect protect + protected protect + protection protect + protector protector + protectors protector + protectorship protectorship + protectress protectress + protects protect + protest protest + protestation protest + protestations protest + protested protest + protester protest + protesting protest + protests protest + proteus proteu + protheus protheu + protract protract + protractive protract + proud proud + prouder prouder + proudest proudest + proudlier proudlier + proudly proudli + prouds proud + prov prov + provand provand + prove prove + proved prove + provender provend + proverb proverb + proverbs proverb + proves prove + proveth proveth + provide provid + provided provid + providence provid + provident provid + providently provid + provider provid + provides provid + province provinc + provinces provinc + provincial provinci + proving prove + provision provis + proviso proviso + provocation provoc + provok provok + provoke provok + provoked provok + provoker provok + provokes provok + provoketh provoketh + provoking provok + provost provost + prowess prowess + prudence prudenc + prudent prudent + prun prun + prune prune + prunes prune + pruning prune + pry pry + prying pry + psalm psalm + psalmist psalmist + psalms psalm + psalteries psalteri + ptolemies ptolemi + ptolemy ptolemi + public public + publican publican + publication public + publicly publicli + publicola publicola + publish publish + published publish + publisher publish + publishing publish + publius publiu + pucelle pucel + puck puck + pudder pudder + pudding pud + puddings pud + puddle puddl + puddled puddl + pudency pudenc + pueritia pueritia + puff puff + puffing puf + puffs puff + pugging pug + puis pui + puissance puissanc + puissant puissant + puke puke + puking puke + pulcher pulcher + puling pule + pull pull + puller puller + pullet pullet + pulling pull + pulls pull + pulpit pulpit + pulpiter pulpit + pulpits pulpit + pulse puls + pulsidge pulsidg + pump pump + pumpion pumpion + pumps pump + pun pun + punched punch + punish punish + punished punish + punishes punish + punishment punish + punishments punish + punk punk + punto punto + puny puni + pupil pupil + pupils pupil + puppet puppet + puppets puppet + puppies puppi + puppy puppi + pur pur + purblind purblind + purchas purcha + purchase purchas + purchased purchas + purchases purchas + purchaseth purchaseth + purchasing purchas + pure pure + purely pure + purer purer + purest purest + purg purg + purgation purgat + purgative purg + purgatory purgatori + purge purg + purged purg + purgers purger + purging purg + purifies purifi + purifying purifi + puritan puritan + purity puriti + purlieus purlieu + purple purpl + purpled purpl + purples purpl + purport purport + purpos purpo + purpose purpos + purposed purpos + purposely purpos + purposes purpos + purposeth purposeth + purposing purpos + purr purr + purs pur + purse purs + pursents pursent + purses purs + pursu pursu + pursue pursu + pursued pursu + pursuers pursuer + pursues pursu + pursuest pursuest + pursueth pursueth + pursuing pursu + pursuit pursuit + pursuivant pursuiv + pursuivants pursuiv + pursy pursi + purus puru + purveyor purveyor + push push + pushes push + pusillanimity pusillanim + put put + putrefy putrefi + putrified putrifi + puts put + putter putter + putting put + puttock puttock + puzzel puzzel + puzzle puzzl + puzzled puzzl + puzzles puzzl + py py + pygmalion pygmalion + pygmies pygmi + pygmy pygmi + pyramid pyramid + pyramides pyramid + pyramids pyramid + pyramis pyrami + pyramises pyramis + pyramus pyramu + pyrenean pyrenean + pyrrhus pyrrhu + pythagoras pythagora + qu qu + quadrangle quadrangl + quae quae + quaff quaff + quaffing quaf + quagmire quagmir + quail quail + quailing quail + quails quail + quaint quaint + quaintly quaintli + quak quak + quake quak + quakes quak + qualification qualif + qualified qualifi + qualifies qualifi + qualify qualifi + qualifying qualifi + qualite qualit + qualities qualiti + quality qualiti + qualm qualm + qualmish qualmish + quam quam + quand quand + quando quando + quantities quantiti + quantity quantiti + quare quar + quarrel quarrel + quarrell quarrel + quarreller quarrel + quarrelling quarrel + quarrelous quarrel + quarrels quarrel + quarrelsome quarrelsom + quarries quarri + quarry quarri + quart quart + quarter quarter + quartered quarter + quartering quarter + quarters quarter + quarts quart + quasi quasi + quat quat + quatch quatch + quay quai + que que + quean quean + queas quea + queasiness queasi + queasy queasi + queen queen + queens queen + quell quell + queller queller + quench quench + quenched quench + quenching quench + quenchless quenchless + quern quern + quest quest + questant questant + question question + questionable question + questioned question + questioning question + questionless questionless + questions question + questrists questrist + quests quest + queubus queubu + qui qui + quick quick + quicken quicken + quickens quicken + quicker quicker + quicklier quicklier + quickly quickli + quickness quick + quicksand quicksand + quicksands quicksand + quicksilverr quicksilverr + quid quid + quiddities quidditi + quiddits quiddit + quier quier + quiet quiet + quieter quieter + quietly quietli + quietness quiet + quietus quietu + quill quill + quillets quillet + quills quill + quilt quilt + quinapalus quinapalu + quince quinc + quinces quinc + quintain quintain + quintessence quintess + quintus quintu + quip quip + quips quip + quire quir + quiring quir + quirk quirk + quirks quirk + quis qui + quit quit + quite quit + quits quit + quittance quittanc + quitted quit + quitting quit + quiver quiver + quivering quiver + quivers quiver + quo quo + quod quod + quoifs quoif + quoint quoint + quoit quoit + quoits quoit + quondam quondam + quoniam quoniam + quote quot + quoted quot + quotes quot + quoth quoth + quotidian quotidian + r r + rabbit rabbit + rabble rabbl + rabblement rabblement + race race + rack rack + rackers racker + racket racket + rackets racket + racking rack + racks rack + radiance radianc + radiant radiant + radish radish + rafe rafe + raft raft + rag rag + rage rage + rages rage + rageth rageth + ragg ragg + ragged rag + raggedness ragged + raging rage + ragozine ragozin + rags rag + rah rah + rail rail + railed rail + railer railer + railest railest + raileth raileth + railing rail + rails rail + raiment raiment + rain rain + rainbow rainbow + raineth raineth + raining rain + rainold rainold + rains rain + rainy raini + rais rai + raise rais + raised rais + raises rais + raising rais + raisins raisin + rak rak + rake rake + rakers raker + rakes rake + ral ral + rald rald + ralph ralph + ram ram + rambures rambur + ramm ramm + rampallian rampallian + rampant rampant + ramping ramp + rampir rampir + ramps ramp + rams ram + ramsey ramsei + ramston ramston + ran ran + rance ranc + rancorous rancor + rancors rancor + rancour rancour + random random + rang rang + range rang + ranged rang + rangers ranger + ranges rang + ranging rang + rank rank + ranker ranker + rankest rankest + ranking rank + rankle rankl + rankly rankli + rankness rank + ranks rank + ransack ransack + ransacking ransack + ransom ransom + ransomed ransom + ransoming ransom + ransomless ransomless + ransoms ransom + rant rant + ranting rant + rap rap + rape rape + rapes rape + rapier rapier + rapiers rapier + rapine rapin + raps rap + rapt rapt + rapture raptur + raptures raptur + rar rar + rare rare + rarely rare + rareness rare + rarer rarer + rarest rarest + rarities rariti + rarity rariti + rascal rascal + rascalliest rascalliest + rascally rascal + rascals rascal + rased rase + rash rash + rasher rasher + rashly rashli + rashness rash + rat rat + ratcatcher ratcatch + ratcliff ratcliff + rate rate + rated rate + rately rate + rates rate + rather rather + ratherest ratherest + ratified ratifi + ratifiers ratifi + ratify ratifi + rating rate + rational ration + ratolorum ratolorum + rats rat + ratsbane ratsban + rattle rattl + rattles rattl + rattling rattl + rature ratur + raught raught + rav rav + rave rave + ravel ravel + raven raven + ravening raven + ravenous raven + ravens raven + ravenspurgh ravenspurgh + raves rave + ravin ravin + raving rave + ravish ravish + ravished ravish + ravisher ravish + ravishing ravish + ravishments ravish + raw raw + rawer rawer + rawly rawli + rawness raw + ray rai + rayed rai + rays rai + raz raz + raze raze + razed raze + razes raze + razeth razeth + razing raze + razor razor + razorable razor + razors razor + razure razur + re re + reach reach + reaches reach + reacheth reacheth + reaching reach + read read + reader reader + readiest readiest + readily readili + readiness readi + reading read + readins readin + reads read + ready readi + real real + really realli + realm realm + realms realm + reap reap + reapers reaper + reaping reap + reaps reap + rear rear + rears rear + rearward rearward + reason reason + reasonable reason + reasonably reason + reasoned reason + reasoning reason + reasonless reasonless + reasons reason + reave reav + rebate rebat + rebato rebato + rebeck rebeck + rebel rebel + rebell rebel + rebelling rebel + rebellion rebellion + rebellious rebelli + rebels rebel + rebound rebound + rebuk rebuk + rebuke rebuk + rebukeable rebuk + rebuked rebuk + rebukes rebuk + rebus rebu + recall recal + recant recant + recantation recant + recanter recant + recanting recant + receipt receipt + receipts receipt + receiv receiv + receive receiv + received receiv + receiver receiv + receives receiv + receivest receivest + receiveth receiveth + receiving receiv + receptacle receptacl + rechate rechat + reciprocal reciproc + reciprocally reciproc + recite recit + recited recit + reciterai reciterai + reck reck + recking reck + reckless reckless + reckon reckon + reckoned reckon + reckoning reckon + reckonings reckon + recks reck + reclaim reclaim + reclaims reclaim + reclusive reclus + recognizance recogniz + recognizances recogniz + recoil recoil + recoiling recoil + recollected recollect + recomforted recomfort + recomforture recomfortur + recommend recommend + recommended recommend + recommends recommend + recompens recompen + recompense recompens + reconcil reconcil + reconcile reconcil + reconciled reconcil + reconcilement reconcil + reconciler reconcil + reconciles reconcil + reconciliation reconcili + record record + recordation record + recorded record + recorder record + recorders record + records record + recount recount + recounted recount + recounting recount + recountments recount + recounts recount + recourse recours + recov recov + recover recov + recoverable recover + recovered recov + recoveries recoveri + recovers recov + recovery recoveri + recreant recreant + recreants recreant + recreate recreat + recreation recreat + rectify rectifi + rector rector + rectorship rectorship + recure recur + recured recur + red red + redbreast redbreast + redder redder + reddest reddest + rede rede + redeem redeem + redeemed redeem + redeemer redeem + redeeming redeem + redeems redeem + redeliver redeliv + redemption redempt + redime redim + redness red + redoubled redoubl + redoubted redoubt + redound redound + redress redress + redressed redress + redresses redress + reduce reduc + reechy reechi + reed reed + reeds reed + reek reek + reeking reek + reeks reek + reeky reeki + reel reel + reeleth reeleth + reeling reel + reels reel + refell refel + refer refer + reference refer + referr referr + referred refer + refigured refigur + refin refin + refined refin + reflect reflect + reflecting reflect + reflection reflect + reflex reflex + reform reform + reformation reform + reformed reform + refractory refractori + refrain refrain + refresh refresh + refreshing refresh + reft reft + refts reft + refuge refug + refus refu + refusal refus + refuse refus + refused refus + refusest refusest + refusing refus + reg reg + regal regal + regalia regalia + regan regan + regard regard + regardance regard + regarded regard + regardfully regardfulli + regarding regard + regards regard + regenerate regener + regent regent + regentship regentship + regia regia + regiment regiment + regiments regiment + regina regina + region region + regions region + regist regist + register regist + registers regist + regreet regreet + regreets regreet + regress regress + reguerdon reguerdon + regular regular + rehears rehear + rehearsal rehears + rehearse rehears + reign reign + reigned reign + reignier reignier + reigning reign + reigns reign + rein rein + reinforc reinforc + reinforce reinforc + reinforcement reinforc + reins rein + reiterate reiter + reject reject + rejected reject + rejoic rejoic + rejoice rejoic + rejoices rejoic + rejoiceth rejoiceth + rejoicing rejoic + rejoicingly rejoicingli + rejoindure rejoindur + rejourn rejourn + rel rel + relapse relaps + relate relat + relates relat + relation relat + relations relat + relative rel + releas relea + release releas + released releas + releasing releas + relent relent + relenting relent + relents relent + reliances relianc + relics relic + relief relief + reliev reliev + relieve reliev + relieved reliev + relieves reliev + relieving reliev + religion religion + religions religion + religious religi + religiously religi + relinquish relinquish + reliques reliqu + reliquit reliquit + relish relish + relume relum + rely reli + relying reli + remain remain + remainder remaind + remainders remaind + remained remain + remaineth remaineth + remaining remain + remains remain + remark remark + remarkable remark + remediate remedi + remedied remedi + remedies remedi + remedy remedi + rememb rememb + remember rememb + remembered rememb + remembers rememb + remembrance remembr + remembrancer remembranc + remembrances remembr + remercimens remercimen + remiss remiss + remission remiss + remissness remiss + remit remit + remnant remnant + remnants remnant + remonstrance remonstr + remorse remors + remorseful remors + remorseless remorseless + remote remot + remotion remot + remov remov + remove remov + removed remov + removedness removed + remover remov + removes remov + removing remov + remunerate remuner + remuneration remuner + rence renc + rend rend + render render + rendered render + renders render + rendezvous rendezv + renegado renegado + renege reneg + reneges reneg + renew renew + renewed renew + renewest renewest + renounce renounc + renouncement renounc + renouncing renounc + renowmed renowm + renown renown + renowned renown + rent rent + rents rent + repaid repaid + repair repair + repaired repair + repairing repair + repairs repair + repass repass + repast repast + repasture repastur + repay repai + repaying repai + repays repai + repeal repeal + repealing repeal + repeals repeal + repeat repeat + repeated repeat + repeating repeat + repeats repeat + repel repel + repent repent + repentance repent + repentant repent + repented repent + repenting repent + repents repent + repetition repetit + repetitions repetit + repin repin + repine repin + repining repin + replant replant + replenish replenish + replenished replenish + replete replet + replication replic + replied repli + replies repli + repliest repliest + reply repli + replying repli + report report + reported report + reporter report + reportest reportest + reporting report + reportingly reportingli + reports report + reposal repos + repose repos + reposeth reposeth + reposing repos + repossess repossess + reprehend reprehend + reprehended reprehend + reprehending reprehend + represent repres + representing repres + reprieve repriev + reprieves repriev + reprisal repris + reproach reproach + reproaches reproach + reproachful reproach + reproachfully reproachfulli + reprobate reprob + reprobation reprob + reproof reproof + reprov reprov + reprove reprov + reproveable reprov + reproves reprov + reproving reprov + repugn repugn + repugnancy repugn + repugnant repugn + repulse repuls + repulsed repuls + repurchas repurcha + repured repur + reputation reput + repute reput + reputed reput + reputeless reputeless + reputes reput + reputing reput + request request + requested request + requesting request + requests request + requiem requiem + requir requir + require requir + required requir + requires requir + requireth requireth + requiring requir + requisite requisit + requisites requisit + requit requit + requital requit + requite requit + requited requit + requites requit + rer rer + rere rere + rers rer + rescu rescu + rescue rescu + rescued rescu + rescues rescu + rescuing rescu + resemblance resembl + resemble resembl + resembled resembl + resembles resembl + resembleth resembleth + resembling resembl + reserv reserv + reservation reserv + reserve reserv + reserved reserv + reserves reserv + reside resid + residence resid + resident resid + resides resid + residing resid + residue residu + resign resign + resignation resign + resist resist + resistance resist + resisted resist + resisting resist + resists resist + resolute resolut + resolutely resolut + resolutes resolut + resolution resolut + resolv resolv + resolve resolv + resolved resolv + resolvedly resolvedli + resolves resolv + resolveth resolveth + resort resort + resorted resort + resounding resound + resounds resound + respeaking respeak + respect respect + respected respect + respecting respect + respective respect + respectively respect + respects respect + respice respic + respite respit + respites respit + responsive respons + respose respos + ress ress + rest rest + rested rest + resteth resteth + restful rest + resting rest + restitution restitut + restless restless + restor restor + restoration restor + restorative restor + restore restor + restored restor + restores restor + restoring restor + restrain restrain + restrained restrain + restraining restrain + restrains restrain + restraint restraint + rests rest + resty resti + resum resum + resume resum + resumes resum + resurrections resurrect + retail retail + retails retail + retain retain + retainers retain + retaining retain + retell retel + retention retent + retentive retent + retinue retinu + retir retir + retire retir + retired retir + retirement retir + retires retir + retiring retir + retold retold + retort retort + retorts retort + retourne retourn + retract retract + retreat retreat + retrograde retrograd + rets ret + return return + returned return + returnest returnest + returneth returneth + returning return + returns return + revania revania + reveal reveal + reveals reveal + revel revel + reveler revel + revell revel + reveller revel + revellers revel + revelling revel + revelry revelri + revels revel + reveng reveng + revenge reveng + revenged reveng + revengeful reveng + revengement reveng + revenger reveng + revengers reveng + revenges reveng + revenging reveng + revengingly revengingli + revenue revenu + revenues revenu + reverb reverb + reverberate reverber + reverbs reverb + reverenc reverenc + reverence rever + reverend reverend + reverent rever + reverently rever + revers rever + reverse revers + reversion revers + reverted revert + review review + reviewest reviewest + revil revil + revile revil + revisits revisit + reviv reviv + revive reviv + revives reviv + reviving reviv + revok revok + revoke revok + revokement revok + revolt revolt + revolted revolt + revolting revolt + revolts revolt + revolution revolut + revolutions revolut + revolve revolv + revolving revolv + reward reward + rewarded reward + rewarder reward + rewarding reward + rewards reward + reword reword + reworded reword + rex rex + rey rei + reynaldo reynaldo + rford rford + rful rful + rfull rfull + rhapsody rhapsodi + rheims rheim + rhenish rhenish + rhesus rhesu + rhetoric rhetor + rheum rheum + rheumatic rheumat + rheums rheum + rheumy rheumi + rhinoceros rhinocero + rhodes rhode + rhodope rhodop + rhubarb rhubarb + rhym rhym + rhyme rhyme + rhymers rhymer + rhymes rhyme + rhyming rhyme + rialto rialto + rib rib + ribald ribald + riband riband + ribands riband + ribaudred ribaudr + ribb ribb + ribbed rib + ribbon ribbon + ribbons ribbon + ribs rib + rice rice + rich rich + richard richard + richer richer + riches rich + richest richest + richly richli + richmond richmond + richmonds richmond + rid rid + riddance riddanc + ridden ridden + riddle riddl + riddles riddl + riddling riddl + ride ride + rider rider + riders rider + rides ride + ridest ridest + rideth rideth + ridge ridg + ridges ridg + ridiculous ridicul + riding ride + rids rid + rien rien + ries ri + rifle rifl + rift rift + rifted rift + rig rig + rigg rigg + riggish riggish + right right + righteous righteou + righteously righteous + rightful right + rightfully rightfulli + rightly rightli + rights right + rigol rigol + rigorous rigor + rigorously rigor + rigour rigour + ril ril + rim rim + rin rin + rinaldo rinaldo + rind rind + ring ring + ringing ring + ringleader ringlead + ringlets ringlet + rings ring + ringwood ringwood + riot riot + rioter rioter + rioting riot + riotous riotou + riots riot + rip rip + ripe ripe + ripely ripe + ripen ripen + ripened ripen + ripeness ripe + ripening ripen + ripens ripen + riper riper + ripest ripest + riping ripe + ripp ripp + ripping rip + rise rise + risen risen + rises rise + riseth riseth + rish rish + rising rise + rite rite + rites rite + rivage rivag + rival rival + rivality rival + rivall rival + rivals rival + rive rive + rived rive + rivelled rivel + river river + rivers river + rivet rivet + riveted rivet + rivets rivet + rivo rivo + rj rj + rless rless + road road + roads road + roam roam + roaming roam + roan roan + roar roar + roared roar + roarers roarer + roaring roar + roars roar + roast roast + roasted roast + rob rob + roba roba + robas roba + robb robb + robbed rob + robber robber + robbers robber + robbery robberi + robbing rob + robe robe + robed robe + robert robert + robes robe + robin robin + robs rob + robustious robusti + rochester rochest + rochford rochford + rock rock + rocks rock + rocky rocki + rod rod + rode rode + roderigo roderigo + rods rod + roe roe + roes roe + roger roger + rogero rogero + rogue rogu + roguery rogueri + rogues rogu + roguish roguish + roi roi + roisting roist + roll roll + rolled roll + rolling roll + rolls roll + rom rom + romage romag + roman roman + romano romano + romanos romano + romans roman + rome rome + romeo romeo + romish romish + rondure rondur + ronyon ronyon + rood rood + roof roof + roofs roof + rook rook + rooks rook + rooky rooki + room room + rooms room + root root + rooted root + rootedly rootedli + rooteth rooteth + rooting root + roots root + rope rope + ropery roperi + ropes rope + roping rope + ros ro + rosalind rosalind + rosalinda rosalinda + rosalinde rosalind + rosaline rosalin + roscius rosciu + rose rose + rosed rose + rosemary rosemari + rosencrantz rosencrantz + roses rose + ross ross + rosy rosi + rot rot + rote rote + roted rote + rother rother + rotherham rotherham + rots rot + rotted rot + rotten rotten + rottenness rotten + rotting rot + rotundity rotund + rouen rouen + rough rough + rougher rougher + roughest roughest + roughly roughli + roughness rough + round round + rounded round + roundel roundel + rounder rounder + roundest roundest + rounding round + roundly roundli + rounds round + roundure roundur + rous rou + rouse rous + roused rous + rousillon rousillon + rously rousli + roussi roussi + rout rout + routed rout + routs rout + rove rove + rover rover + row row + rowel rowel + rowland rowland + rowlands rowland + roy roi + royal royal + royalize royal + royally royal + royalties royalti + royalty royalti + roynish roynish + rs rs + rt rt + rub rub + rubb rubb + rubbing rub + rubbish rubbish + rubies rubi + rubious rubiou + rubs rub + ruby rubi + rud rud + rudand rudand + rudder rudder + ruddiness ruddi + ruddock ruddock + ruddy ruddi + rude rude + rudely rude + rudeness rude + ruder ruder + rudesby rudesbi + rudest rudest + rudiments rudiment + rue rue + rued ru + ruff ruff + ruffian ruffian + ruffians ruffian + ruffle ruffl + ruffling ruffl + ruffs ruff + rug rug + rugby rugbi + rugemount rugemount + rugged rug + ruin ruin + ruinate ruinat + ruined ruin + ruining ruin + ruinous ruinou + ruins ruin + rul rul + rule rule + ruled rule + ruler ruler + rulers ruler + rules rule + ruling rule + rumble rumbl + ruminaies ruminai + ruminat ruminat + ruminate rumin + ruminated rumin + ruminates rumin + rumination rumin + rumor rumor + rumour rumour + rumourer rumour + rumours rumour + rump rump + run run + runagate runag + runagates runag + runaway runawai + runaways runawai + rung rung + runn runn + runner runner + runners runner + running run + runs run + rupture ruptur + ruptures ruptur + rural rural + rush rush + rushes rush + rushing rush + rushling rushl + rushy rushi + russet russet + russia russia + russian russian + russians russian + rust rust + rusted rust + rustic rustic + rustically rustic + rustics rustic + rustle rustl + rustling rustl + rusts rust + rusty rusti + rut rut + ruth ruth + ruthful ruth + ruthless ruthless + rutland rutland + ruttish ruttish + ry ry + rye rye + rything ryth + s s + sa sa + saba saba + sabbath sabbath + sable sabl + sables sabl + sack sack + sackbuts sackbut + sackcloth sackcloth + sacked sack + sackerson sackerson + sacks sack + sacrament sacrament + sacred sacr + sacrific sacrif + sacrifice sacrific + sacrificers sacrific + sacrifices sacrific + sacrificial sacrifici + sacrificing sacrif + sacrilegious sacrilegi + sacring sacr + sad sad + sadder sadder + saddest saddest + saddle saddl + saddler saddler + saddles saddl + sadly sadli + sadness sad + saf saf + safe safe + safeguard safeguard + safely safe + safer safer + safest safest + safeties safeti + safety safeti + saffron saffron + sag sag + sage sage + sagittary sagittari + said said + saidst saidst + sail sail + sailing sail + sailmaker sailmak + sailor sailor + sailors sailor + sails sail + sain sain + saint saint + sainted saint + saintlike saintlik + saints saint + saith saith + sake sake + sakes sake + sala sala + salad salad + salamander salamand + salary salari + sale sale + salerio salerio + salicam salicam + salique saliqu + salisbury salisburi + sall sall + sallet sallet + sallets sallet + sallies salli + sallow sallow + sally salli + salmon salmon + salmons salmon + salt salt + salter salter + saltiers saltier + saltness salt + saltpetre saltpetr + salutation salut + salutations salut + salute salut + saluted salut + salutes salut + saluteth saluteth + salv salv + salvation salvat + salve salv + salving salv + same same + samingo samingo + samp samp + sampire sampir + sample sampl + sampler sampler + sampson sampson + samson samson + samsons samson + sancta sancta + sanctified sanctifi + sanctifies sanctifi + sanctify sanctifi + sanctimonies sanctimoni + sanctimonious sanctimoni + sanctimony sanctimoni + sanctities sanctiti + sanctity sanctiti + sanctuarize sanctuar + sanctuary sanctuari + sand sand + sandal sandal + sandbag sandbag + sanded sand + sands sand + sandy sandi + sandys sandi + sang sang + sanguine sanguin + sanguis sangui + sanity saniti + sans san + santrailles santrail + sap sap + sapient sapient + sapit sapit + sapless sapless + sapling sapl + sapphire sapphir + sapphires sapphir + saracens saracen + sarcenet sarcenet + sard sard + sardians sardian + sardinia sardinia + sardis sardi + sarum sarum + sat sat + satan satan + satchel satchel + sate sate + sated sate + satiate satiat + satiety satieti + satin satin + satire satir + satirical satir + satis sati + satisfaction satisfact + satisfied satisfi + satisfies satisfi + satisfy satisfi + satisfying satisfi + saturday saturdai + saturdays saturdai + saturn saturn + saturnine saturnin + saturninus saturninu + satyr satyr + satyrs satyr + sauc sauc + sauce sauc + sauced sauc + saucers saucer + sauces sauc + saucily saucili + sauciness sauci + saucy sauci + sauf sauf + saunder saunder + sav sav + savage savag + savagely savag + savageness savag + savagery savageri + savages savag + save save + saved save + saves save + saving save + saviour saviour + savory savori + savour savour + savouring savour + savours savour + savoury savouri + savoy savoi + saw saw + sawed saw + sawest sawest + sawn sawn + sawpit sawpit + saws saw + sawyer sawyer + saxons saxon + saxony saxoni + saxton saxton + say sai + sayest sayest + saying sai + sayings sai + says sai + sayst sayst + sblood sblood + sc sc + scab scab + scabbard scabbard + scabs scab + scaffold scaffold + scaffoldage scaffoldag + scal scal + scald scald + scalded scald + scalding scald + scale scale + scaled scale + scales scale + scaling scale + scall scall + scalp scalp + scalps scalp + scaly scali + scamble scambl + scambling scambl + scamels scamel + scan scan + scandal scandal + scandaliz scandaliz + scandalous scandal + scandy scandi + scann scann + scant scant + scanted scant + scanter scanter + scanting scant + scantling scantl + scants scant + scap scap + scape scape + scaped scape + scapes scape + scapeth scapeth + scar scar + scarce scarc + scarcely scarc + scarcity scarciti + scare scare + scarecrow scarecrow + scarecrows scarecrow + scarf scarf + scarfed scarf + scarfs scarf + scaring scare + scarlet scarlet + scarr scarr + scarre scarr + scars scar + scarus scaru + scath scath + scathe scath + scathful scath + scatt scatt + scatter scatter + scattered scatter + scattering scatter + scatters scatter + scelera scelera + scelerisque scelerisqu + scene scene + scenes scene + scent scent + scented scent + scept scept + scepter scepter + sceptre sceptr + sceptred sceptr + sceptres sceptr + schedule schedul + schedules schedul + scholar scholar + scholarly scholarli + scholars scholar + school school + schoolboy schoolboi + schoolboys schoolboi + schoolfellows schoolfellow + schooling school + schoolmaster schoolmast + schoolmasters schoolmast + schools school + sciatica sciatica + sciaticas sciatica + science scienc + sciences scienc + scimitar scimitar + scion scion + scions scion + scissors scissor + scoff scoff + scoffer scoffer + scoffing scof + scoffs scoff + scoggin scoggin + scold scold + scolding scold + scolds scold + sconce sconc + scone scone + scope scope + scopes scope + scorch scorch + scorched scorch + score score + scored score + scores score + scoring score + scorn scorn + scorned scorn + scornful scorn + scornfully scornfulli + scorning scorn + scorns scorn + scorpion scorpion + scorpions scorpion + scot scot + scotch scotch + scotches scotch + scotland scotland + scots scot + scottish scottish + scoundrels scoundrel + scour scour + scoured scour + scourg scourg + scourge scourg + scouring scour + scout scout + scouts scout + scowl scowl + scrap scrap + scrape scrape + scraping scrape + scraps scrap + scratch scratch + scratches scratch + scratching scratch + scream scream + screams scream + screech screech + screeching screech + screen screen + screens screen + screw screw + screws screw + scribbl scribbl + scribbled scribbl + scribe scribe + scribes scribe + scrimers scrimer + scrip scrip + scrippage scrippag + scripture scriptur + scriptures scriptur + scrivener scriven + scroll scroll + scrolls scroll + scroop scroop + scrowl scrowl + scroyles scroyl + scrubbed scrub + scruple scrupl + scruples scrupl + scrupulous scrupul + scuffles scuffl + scuffling scuffl + scullion scullion + sculls scull + scum scum + scurril scurril + scurrility scurril + scurrilous scurril + scurvy scurvi + scuse scuse + scut scut + scutcheon scutcheon + scutcheons scutcheon + scylla scylla + scythe scyth + scythed scyth + scythia scythia + scythian scythian + sdeath sdeath + se se + sea sea + seacoal seacoal + seafaring seafar + seal seal + sealed seal + sealing seal + seals seal + seam seam + seamen seamen + seamy seami + seaport seaport + sear sear + searce searc + search search + searchers searcher + searches search + searcheth searcheth + searching search + seared sear + seas sea + seasick seasick + seaside seasid + season season + seasoned season + seasons season + seat seat + seated seat + seats seat + sebastian sebastian + second second + secondarily secondarili + secondary secondari + seconded second + seconds second + secrecy secreci + secret secret + secretaries secretari + secretary secretari + secretly secretli + secrets secret + sect sect + sectary sectari + sects sect + secundo secundo + secure secur + securely secur + securing secur + security secur + sedg sedg + sedge sedg + sedges sedg + sedgy sedgi + sedition sedit + seditious sediti + seduc seduc + seduce seduc + seduced seduc + seducer seduc + seducing seduc + see see + seed seed + seeded seed + seedness seed + seeds seed + seedsman seedsman + seein seein + seeing see + seek seek + seeking seek + seeks seek + seel seel + seeling seel + seely seeli + seem seem + seemed seem + seemers seemer + seemest seemest + seemeth seemeth + seeming seem + seemingly seemingli + seemly seemli + seems seem + seen seen + seer seer + sees see + seese sees + seest seest + seethe seeth + seethes seeth + seething seeth + seeting seet + segregation segreg + seigneur seigneur + seigneurs seigneur + seiz seiz + seize seiz + seized seiz + seizes seiz + seizeth seizeth + seizing seiz + seizure seizur + seld seld + seldom seldom + select select + seleucus seleucu + self self + selfsame selfsam + sell sell + seller seller + selling sell + sells sell + selves selv + semblable semblabl + semblably semblabl + semblance semblanc + semblances semblanc + semblative sembl + semi semi + semicircle semicircl + semiramis semirami + semper semper + sempronius semproniu + senate senat + senator senat + senators senat + send send + sender sender + sendeth sendeth + sending send + sends send + seneca seneca + senior senior + seniory seniori + senis seni + sennet sennet + senoys senoi + sense sens + senseless senseless + senses sens + sensible sensibl + sensibly sensibl + sensual sensual + sensuality sensual + sent sent + sentenc sentenc + sentence sentenc + sentences sentenc + sententious sententi + sentinel sentinel + sentinels sentinel + separable separ + separate separ + separated separ + separates separ + separation separ + septentrion septentrion + sepulchre sepulchr + sepulchres sepulchr + sepulchring sepulchr + sequel sequel + sequence sequenc + sequent sequent + sequest sequest + sequester sequest + sequestration sequestr + sere sere + serenis sereni + serge serg + sergeant sergeant + serious seriou + seriously serious + sermon sermon + sermons sermon + serpent serpent + serpentine serpentin + serpents serpent + serpigo serpigo + serv serv + servant servant + servanted servant + servants servant + serve serv + served serv + server server + serves serv + serveth serveth + service servic + serviceable servic + services servic + servile servil + servility servil + servilius serviliu + serving serv + servingman servingman + servingmen servingmen + serviteur serviteur + servitor servitor + servitors servitor + servitude servitud + sessa sessa + session session + sessions session + sestos sesto + set set + setebos setebo + sets set + setter setter + setting set + settle settl + settled settl + settlest settlest + settling settl + sev sev + seven seven + sevenfold sevenfold + sevennight sevennight + seventeen seventeen + seventh seventh + seventy seventi + sever sever + several sever + severally sever + severals sever + severe sever + severed sever + severely sever + severest severest + severing sever + severity sever + severn severn + severs sever + sew sew + seward seward + sewer sewer + sewing sew + sex sex + sexes sex + sexton sexton + sextus sextu + seymour seymour + seyton seyton + sfoot sfoot + sh sh + shackle shackl + shackles shackl + shade shade + shades shade + shadow shadow + shadowed shadow + shadowing shadow + shadows shadow + shadowy shadowi + shady shadi + shafalus shafalu + shaft shaft + shafts shaft + shag shag + shak shak + shake shake + shaked shake + shaken shaken + shakes shake + shaking shake + shales shale + shall shall + shallenge shalleng + shallow shallow + shallowest shallowest + shallowly shallowli + shallows shallow + shalt shalt + sham sham + shambles shambl + shame shame + shamed shame + shameful shame + shamefully shamefulli + shameless shameless + shames shame + shamest shamest + shaming shame + shank shank + shanks shank + shap shap + shape shape + shaped shape + shapeless shapeless + shapen shapen + shapes shape + shaping shape + shar shar + shard shard + sharded shard + shards shard + share share + shared share + sharers sharer + shares share + sharing share + shark shark + sharp sharp + sharpen sharpen + sharpened sharpen + sharpens sharpen + sharper sharper + sharpest sharpest + sharply sharpli + sharpness sharp + sharps sharp + shatter shatter + shav shav + shave shave + shaven shaven + shaw shaw + she she + sheaf sheaf + sheal sheal + shear shear + shearers shearer + shearing shear + shearman shearman + shears shear + sheath sheath + sheathe sheath + sheathed sheath + sheathes sheath + sheathing sheath + sheaved sheav + sheaves sheav + shed shed + shedding shed + sheds shed + sheen sheen + sheep sheep + sheepcote sheepcot + sheepcotes sheepcot + sheeps sheep + sheepskins sheepskin + sheer sheer + sheet sheet + sheeted sheet + sheets sheet + sheffield sheffield + shelf shelf + shell shell + shells shell + shelt shelt + shelter shelter + shelters shelter + shelves shelv + shelving shelv + shelvy shelvi + shent shent + shepherd shepherd + shepherdes shepherd + shepherdess shepherdess + shepherdesses shepherdess + shepherds shepherd + sher sher + sheriff sheriff + sherris sherri + shes she + sheweth sheweth + shield shield + shielded shield + shields shield + shift shift + shifted shift + shifting shift + shifts shift + shilling shill + shillings shill + shin shin + shine shine + shines shine + shineth shineth + shining shine + shins shin + shiny shini + ship ship + shipboard shipboard + shipman shipman + shipmaster shipmast + shipmen shipmen + shipp shipp + shipped ship + shipping ship + ships ship + shipt shipt + shipwreck shipwreck + shipwrecking shipwreck + shipwright shipwright + shipwrights shipwright + shire shire + shirley shirlei + shirt shirt + shirts shirt + shive shive + shiver shiver + shivering shiver + shivers shiver + shoal shoal + shoals shoal + shock shock + shocks shock + shod shod + shoe shoe + shoeing shoe + shoemaker shoemak + shoes shoe + shog shog + shone shone + shook shook + shoon shoon + shoot shoot + shooter shooter + shootie shooti + shooting shoot + shoots shoot + shop shop + shops shop + shore shore + shores shore + shorn shorn + short short + shortcake shortcak + shorten shorten + shortened shorten + shortens shorten + shorter shorter + shortly shortli + shortness short + shot shot + shotten shotten + shoughs shough + should should + shoulder shoulder + shouldering shoulder + shoulders shoulder + shouldst shouldst + shout shout + shouted shout + shouting shout + shouts shout + shov shov + shove shove + shovel shovel + shovels shovel + show show + showed show + shower shower + showers shower + showest showest + showing show + shown shown + shows show + shreds shred + shrew shrew + shrewd shrewd + shrewdly shrewdli + shrewdness shrewd + shrewish shrewish + shrewishly shrewishli + shrewishness shrewish + shrews shrew + shrewsbury shrewsburi + shriek shriek + shrieking shriek + shrieks shriek + shrieve shriev + shrift shrift + shrill shrill + shriller shriller + shrills shrill + shrilly shrilli + shrimp shrimp + shrine shrine + shrink shrink + shrinking shrink + shrinks shrink + shriv shriv + shrive shrive + shriver shriver + shrives shrive + shriving shrive + shroud shroud + shrouded shroud + shrouding shroud + shrouds shroud + shrove shrove + shrow shrow + shrows shrow + shrub shrub + shrubs shrub + shrug shrug + shrugs shrug + shrunk shrunk + shudd shudd + shudders shudder + shuffl shuffl + shuffle shuffl + shuffled shuffl + shuffling shuffl + shun shun + shunless shunless + shunn shunn + shunned shun + shunning shun + shuns shun + shut shut + shuts shut + shuttle shuttl + shy shy + shylock shylock + si si + sibyl sibyl + sibylla sibylla + sibyls sibyl + sicil sicil + sicilia sicilia + sicilian sicilian + sicilius siciliu + sicils sicil + sicily sicili + sicinius siciniu + sick sick + sicken sicken + sickens sicken + sicker sicker + sickle sickl + sicklemen sicklemen + sicklied sickli + sickliness sickli + sickly sickli + sickness sick + sicles sicl + sicyon sicyon + side side + sided side + sides side + siege sieg + sieges sieg + sienna sienna + sies si + sieve siev + sift sift + sifted sift + sigeia sigeia + sigh sigh + sighed sigh + sighing sigh + sighs sigh + sight sight + sighted sight + sightless sightless + sightly sightli + sights sight + sign sign + signal signal + signet signet + signieur signieur + significant signific + significants signific + signified signifi + signifies signifi + signify signifi + signifying signifi + signior signior + signiories signiori + signiors signior + signiory signiori + signor signor + signories signori + signs sign + signum signum + silenc silenc + silence silenc + silenced silenc + silencing silenc + silent silent + silently silent + silius siliu + silk silk + silken silken + silkman silkman + silks silk + silliest silliest + silliness silli + silling sill + silly silli + silva silva + silver silver + silvered silver + silverly silverli + silvia silvia + silvius silviu + sima sima + simile simil + similes simil + simois simoi + simon simon + simony simoni + simp simp + simpcox simpcox + simple simpl + simpleness simpl + simpler simpler + simples simpl + simplicity simplic + simply simpli + simular simular + simulation simul + sin sin + since sinc + sincere sincer + sincerely sincer + sincerity sincer + sinel sinel + sinew sinew + sinewed sinew + sinews sinew + sinewy sinewi + sinful sin + sinfully sinfulli + sing sing + singe sing + singeing sing + singer singer + singes sing + singeth singeth + singing sing + single singl + singled singl + singleness singl + singly singli + sings sing + singular singular + singulariter singularit + singularities singular + singularity singular + singuled singul + sinister sinist + sink sink + sinking sink + sinks sink + sinn sinn + sinner sinner + sinners sinner + sinning sin + sinon sinon + sins sin + sip sip + sipping sip + sir sir + sire sire + siren siren + sirrah sirrah + sirs sir + sist sist + sister sister + sisterhood sisterhood + sisterly sisterli + sisters sister + sit sit + sith sith + sithence sithenc + sits sit + sitting sit + situate situat + situation situat + situations situat + siward siward + six six + sixpence sixpenc + sixpences sixpenc + sixpenny sixpenni + sixteen sixteen + sixth sixth + sixty sixti + siz siz + size size + sizes size + sizzle sizzl + skains skain + skamble skambl + skein skein + skelter skelter + skies ski + skilful skil + skilfully skilfulli + skill skill + skilless skilless + skillet skillet + skillful skill + skills skill + skim skim + skimble skimbl + skin skin + skinker skinker + skinny skinni + skins skin + skip skip + skipp skipp + skipper skipper + skipping skip + skirmish skirmish + skirmishes skirmish + skirr skirr + skirted skirt + skirts skirt + skittish skittish + skulking skulk + skull skull + skulls skull + sky sky + skyey skyei + skyish skyish + slab slab + slack slack + slackly slackli + slackness slack + slain slain + slake slake + sland sland + slander slander + slandered slander + slanderer slander + slanderers slander + slandering slander + slanderous slander + slanders slander + slash slash + slaught slaught + slaughter slaughter + slaughtered slaughter + slaughterer slaughter + slaughterman slaughterman + slaughtermen slaughtermen + slaughterous slaughter + slaughters slaughter + slave slave + slaver slaver + slavery slaveri + slaves slave + slavish slavish + slay slai + slayeth slayeth + slaying slai + slays slai + sleave sleav + sledded sled + sleek sleek + sleekly sleekli + sleep sleep + sleeper sleeper + sleepers sleeper + sleepest sleepest + sleeping sleep + sleeps sleep + sleepy sleepi + sleeve sleev + sleeves sleev + sleid sleid + sleided sleid + sleight sleight + sleights sleight + slender slender + slenderer slender + slenderly slenderli + slept slept + slew slew + slewest slewest + slice slice + slid slid + slide slide + slides slide + sliding slide + slight slight + slighted slight + slightest slightest + slightly slightli + slightness slight + slights slight + slily slili + slime slime + slimy slimi + slings sling + slink slink + slip slip + slipp slipp + slipper slipper + slippers slipper + slippery slipperi + slips slip + slish slish + slit slit + sliver sliver + slobb slobb + slomber slomber + slop slop + slope slope + slops slop + sloth sloth + slothful sloth + slough slough + slovenly slovenli + slovenry slovenri + slow slow + slower slower + slowly slowli + slowness slow + slubber slubber + slug slug + sluggard sluggard + sluggardiz sluggardiz + sluggish sluggish + sluic sluic + slumb slumb + slumber slumber + slumbers slumber + slumbery slumberi + slunk slunk + slut slut + sluts slut + sluttery slutteri + sluttish sluttish + sluttishness sluttish + sly sly + slys sly + smack smack + smacking smack + smacks smack + small small + smaller smaller + smallest smallest + smallness small + smalus smalu + smart smart + smarting smart + smartly smartli + smatch smatch + smatter smatter + smear smear + smell smell + smelling smell + smells smell + smelt smelt + smil smil + smile smile + smiled smile + smiles smile + smilest smilest + smilets smilet + smiling smile + smilingly smilingli + smirch smirch + smirched smirch + smit smit + smite smite + smites smite + smith smith + smithfield smithfield + smock smock + smocks smock + smok smok + smoke smoke + smoked smoke + smokes smoke + smoking smoke + smoky smoki + smooth smooth + smoothed smooth + smoothing smooth + smoothly smoothli + smoothness smooth + smooths smooth + smote smote + smoth smoth + smother smother + smothered smother + smothering smother + smug smug + smulkin smulkin + smutch smutch + snaffle snaffl + snail snail + snails snail + snake snake + snakes snake + snaky snaki + snap snap + snapp snapp + snapper snapper + snar snar + snare snare + snares snare + snarl snarl + snarleth snarleth + snarling snarl + snatch snatch + snatchers snatcher + snatches snatch + snatching snatch + sneak sneak + sneaking sneak + sneap sneap + sneaping sneap + sneck sneck + snip snip + snipe snipe + snipt snipt + snore snore + snores snore + snoring snore + snorting snort + snout snout + snow snow + snowballs snowbal + snowed snow + snowy snowi + snuff snuff + snuffs snuff + snug snug + so so + soak soak + soaking soak + soaks soak + soar soar + soaring soar + soars soar + sob sob + sobbing sob + sober sober + soberly soberli + sobriety sobrieti + sobs sob + sociable sociabl + societies societi + society societi + socks sock + socrates socrat + sod sod + sodden sodden + soe soe + soever soever + soft soft + soften soften + softens soften + softer softer + softest softest + softly softli + softness soft + soil soil + soiled soil + soilure soilur + soit soit + sojourn sojourn + sol sol + sola sola + solace solac + solanio solanio + sold sold + soldat soldat + solder solder + soldest soldest + soldier soldier + soldiers soldier + soldiership soldiership + sole sole + solely sole + solem solem + solemn solemn + solemness solem + solemnities solemn + solemnity solemn + solemniz solemniz + solemnize solemn + solemnized solemn + solemnly solemnli + soles sole + solicit solicit + solicitation solicit + solicited solicit + soliciting solicit + solicitings solicit + solicitor solicitor + solicits solicit + solid solid + solidares solidar + solidity solid + solinus solinu + solitary solitari + solomon solomon + solon solon + solum solum + solus solu + solyman solyman + some some + somebody somebodi + someone someon + somerset somerset + somerville somervil + something someth + sometime sometim + sometimes sometim + somever somev + somewhat somewhat + somewhere somewher + somewhither somewhith + somme somm + son son + sonance sonanc + song song + songs song + sonnet sonnet + sonneting sonnet + sonnets sonnet + sons son + sont sont + sonties sonti + soon soon + sooner sooner + soonest soonest + sooth sooth + soothe sooth + soothers soother + soothing sooth + soothsay soothsai + soothsayer soothsay + sooty sooti + sop sop + sophister sophist + sophisticated sophist + sophy sophi + sops sop + sorcerer sorcer + sorcerers sorcer + sorceress sorceress + sorceries sorceri + sorcery sorceri + sore sore + sorel sorel + sorely sore + sorer sorer + sores sore + sorrier sorrier + sorriest sorriest + sorrow sorrow + sorrowed sorrow + sorrowest sorrowest + sorrowful sorrow + sorrowing sorrow + sorrows sorrow + sorry sorri + sort sort + sortance sortanc + sorted sort + sorting sort + sorts sort + sossius sossiu + sot sot + soto soto + sots sot + sottish sottish + soud soud + sought sought + soul soul + sould sould + soulless soulless + souls soul + sound sound + sounded sound + sounder sounder + soundest soundest + sounding sound + soundless soundless + soundly soundli + soundness sound + soundpost soundpost + sounds sound + sour sour + source sourc + sources sourc + sourest sourest + sourly sourli + sours sour + sous sou + souse sous + south south + southam southam + southampton southampton + southerly southerli + southern southern + southward southward + southwark southwark + southwell southwel + souviendrai souviendrai + sov sov + sovereign sovereign + sovereignest sovereignest + sovereignly sovereignli + sovereignty sovereignti + sovereignvours sovereignvour + sow sow + sowing sow + sowl sowl + sowter sowter + space space + spaces space + spacious spaciou + spade spade + spades spade + spain spain + spak spak + spake spake + spakest spakest + span span + spangle spangl + spangled spangl + spaniard spaniard + spaniel spaniel + spaniels spaniel + spanish spanish + spann spann + spans span + spar spar + spare spare + spares spare + sparing spare + sparingly sparingli + spark spark + sparkle sparkl + sparkles sparkl + sparkling sparkl + sparks spark + sparrow sparrow + sparrows sparrow + sparta sparta + spartan spartan + spavin spavin + spavins spavin + spawn spawn + speak speak + speaker speaker + speakers speaker + speakest speakest + speaketh speaketh + speaking speak + speaks speak + spear spear + speargrass speargrass + spears spear + special special + specialities special + specially special + specialties specialti + specialty specialti + specify specifi + speciously specious + spectacle spectacl + spectacled spectacl + spectacles spectacl + spectators spectat + spectatorship spectatorship + speculation specul + speculations specul + speculative specul + sped sped + speech speech + speeches speech + speechless speechless + speed speed + speeded speed + speedier speedier + speediest speediest + speedily speedili + speediness speedi + speeding speed + speeds speed + speedy speedi + speens speen + spell spell + spelling spell + spells spell + spelt spelt + spencer spencer + spend spend + spendest spendest + spending spend + spends spend + spendthrift spendthrift + spent spent + sperato sperato + sperm sperm + spero spero + sperr sperr + spher spher + sphere sphere + sphered sphere + spheres sphere + spherical spheric + sphery spheri + sphinx sphinx + spice spice + spiced spice + spicery spiceri + spices spice + spider spider + spiders spider + spied spi + spies spi + spieth spieth + spightfully spightfulli + spigot spigot + spill spill + spilling spill + spills spill + spilt spilt + spilth spilth + spin spin + spinii spinii + spinners spinner + spinster spinster + spinsters spinster + spire spire + spirit spirit + spirited spirit + spiritless spiritless + spirits spirit + spiritual spiritu + spiritualty spiritualti + spirt spirt + spit spit + spital spital + spite spite + spited spite + spiteful spite + spites spite + spits spit + spitted spit + spitting spit + splay splai + spleen spleen + spleenful spleen + spleens spleen + spleeny spleeni + splendour splendour + splenitive splenit + splinter splinter + splinters splinter + split split + splits split + splitted split + splitting split + spoil spoil + spoils spoil + spok spok + spoke spoke + spoken spoken + spokes spoke + spokesman spokesman + sponge spong + spongy spongi + spoon spoon + spoons spoon + sport sport + sportful sport + sporting sport + sportive sportiv + sports sport + spot spot + spotless spotless + spots spot + spotted spot + spousal spousal + spouse spous + spout spout + spouting spout + spouts spout + sprag sprag + sprang sprang + sprat sprat + sprawl sprawl + spray sprai + sprays sprai + spread spread + spreading spread + spreads spread + sprighted spright + sprightful spright + sprightly sprightli + sprigs sprig + spring spring + springe spring + springes spring + springeth springeth + springhalt springhalt + springing spring + springs spring + springtime springtim + sprinkle sprinkl + sprinkles sprinkl + sprite sprite + sprited sprite + spritely sprite + sprites sprite + spriting sprite + sprout sprout + spruce spruce + sprung sprung + spun spun + spur spur + spurio spurio + spurn spurn + spurns spurn + spurr spurr + spurrer spurrer + spurring spur + spurs spur + spy spy + spying spy + squabble squabbl + squadron squadron + squadrons squadron + squand squand + squar squar + square squar + squarer squarer + squares squar + squash squash + squeak squeak + squeaking squeak + squeal squeal + squealing squeal + squeezes squeez + squeezing squeez + squele squel + squier squier + squints squint + squiny squini + squire squir + squires squir + squirrel squirrel + st st + stab stab + stabb stabb + stabbed stab + stabbing stab + stable stabl + stableness stabl + stables stabl + stablish stablish + stablishment stablish + stabs stab + stacks stack + staff staff + stafford stafford + staffords stafford + staffordshire staffordshir + stag stag + stage stage + stages stage + stagger stagger + staggering stagger + staggers stagger + stags stag + staid staid + staider staider + stain stain + stained stain + staines stain + staineth staineth + staining stain + stainless stainless + stains stain + stair stair + stairs stair + stake stake + stakes stake + stale stale + staled stale + stalk stalk + stalking stalk + stalks stalk + stall stall + stalling stall + stalls stall + stamford stamford + stammer stammer + stamp stamp + stamped stamp + stamps stamp + stanch stanch + stanchless stanchless + stand stand + standard standard + standards standard + stander stander + standers stander + standest standest + standeth standeth + standing stand + stands stand + staniel staniel + stanley stanlei + stanze stanz + stanzo stanzo + stanzos stanzo + staple stapl + staples stapl + star star + stare stare + stared stare + stares stare + staring stare + starings stare + stark stark + starkly starkli + starlight starlight + starling starl + starr starr + starry starri + stars star + start start + started start + starting start + startingly startingli + startle startl + startles startl + starts start + starv starv + starve starv + starved starv + starvelackey starvelackei + starveling starvel + starveth starveth + starving starv + state state + statelier stateli + stately state + states state + statesman statesman + statesmen statesmen + statilius statiliu + station station + statist statist + statists statist + statue statu + statues statu + stature statur + statures statur + statute statut + statutes statut + stave stave + staves stave + stay stai + stayed stai + stayest stayest + staying stai + stays stai + stead stead + steaded stead + steadfast steadfast + steadier steadier + steads stead + steal steal + stealer stealer + stealers stealer + stealing steal + steals steal + stealth stealth + stealthy stealthi + steed steed + steeds steed + steel steel + steeled steel + steely steeli + steep steep + steeped steep + steeple steepl + steeples steepl + steeps steep + steepy steepi + steer steer + steerage steerag + steering steer + steers steer + stelled stell + stem stem + stemming stem + stench stench + step step + stepdame stepdam + stephano stephano + stephen stephen + stepmothers stepmoth + stepp stepp + stepping step + steps step + sterile steril + sterility steril + sterling sterl + stern stern + sternage sternag + sterner sterner + sternest sternest + sternness stern + steterat steterat + stew stew + steward steward + stewards steward + stewardship stewardship + stewed stew + stews stew + stick stick + sticking stick + stickler stickler + sticks stick + stiff stiff + stiffen stiffen + stiffly stiffli + stifle stifl + stifled stifl + stifles stifl + stigmatic stigmat + stigmatical stigmat + stile stile + still still + stiller stiller + stillest stillest + stillness still + stilly stilli + sting sting + stinging sting + stingless stingless + stings sting + stink stink + stinking stink + stinkingly stinkingli + stinks stink + stint stint + stinted stint + stints stint + stir stir + stirr stirr + stirred stir + stirrer stirrer + stirrers stirrer + stirreth stirreth + stirring stir + stirrup stirrup + stirrups stirrup + stirs stir + stitchery stitcheri + stitches stitch + stithied stithi + stithy stithi + stoccadoes stoccado + stoccata stoccata + stock stock + stockfish stockfish + stocking stock + stockings stock + stockish stockish + stocks stock + stog stog + stogs stog + stoics stoic + stokesly stokesli + stol stol + stole stole + stolen stolen + stolest stolest + stomach stomach + stomachers stomach + stomaching stomach + stomachs stomach + ston ston + stone stone + stonecutter stonecutt + stones stone + stonish stonish + stony stoni + stood stood + stool stool + stools stool + stoop stoop + stooping stoop + stoops stoop + stop stop + stope stope + stopp stopp + stopped stop + stopping stop + stops stop + stor stor + store store + storehouse storehous + storehouses storehous + stores store + stories stori + storm storm + stormed storm + storming storm + storms storm + stormy stormi + story stori + stoup stoup + stoups stoup + stout stout + stouter stouter + stoutly stoutli + stoutness stout + stover stover + stow stow + stowage stowag + stowed stow + strachy strachi + stragglers straggler + straggling straggl + straight straight + straightest straightest + straightway straightwai + strain strain + strained strain + straining strain + strains strain + strait strait + straited strait + straiter straiter + straitly straitli + straitness strait + straits strait + strand strand + strange strang + strangely strang + strangeness strang + stranger stranger + strangers stranger + strangest strangest + strangle strangl + strangled strangl + strangler strangler + strangles strangl + strangling strangl + strappado strappado + straps strap + stratagem stratagem + stratagems stratagem + stratford stratford + strato strato + straw straw + strawberries strawberri + strawberry strawberri + straws straw + strawy strawi + stray strai + straying strai + strays strai + streak streak + streaks streak + stream stream + streamers streamer + streaming stream + streams stream + streching strech + street street + streets street + strength strength + strengthen strengthen + strengthened strengthen + strengthless strengthless + strengths strength + stretch stretch + stretched stretch + stretches stretch + stretching stretch + strew strew + strewing strew + strewings strew + strewments strewment + stricken stricken + strict strict + stricter stricter + strictest strictest + strictly strictli + stricture strictur + stride stride + strides stride + striding stride + strife strife + strifes strife + strik strik + strike strike + strikers striker + strikes strike + strikest strikest + striking strike + string string + stringless stringless + strings string + strip strip + stripes stripe + stripling stripl + striplings stripl + stripp stripp + stripping strip + striv striv + strive strive + strives strive + striving strive + strok strok + stroke stroke + strokes stroke + strond strond + stronds strond + strong strong + stronger stronger + strongest strongest + strongly strongli + strooke strook + strossers strosser + strove strove + strown strown + stroy stroi + struck struck + strucken strucken + struggle struggl + struggles struggl + struggling struggl + strumpet strumpet + strumpeted strumpet + strumpets strumpet + strung strung + strut strut + struts strut + strutted strut + strutting strut + stubble stubbl + stubborn stubborn + stubbornest stubbornest + stubbornly stubbornli + stubbornness stubborn + stuck stuck + studded stud + student student + students student + studied studi + studies studi + studious studiou + studiously studious + studs stud + study studi + studying studi + stuff stuff + stuffing stuf + stuffs stuff + stumble stumbl + stumbled stumbl + stumblest stumblest + stumbling stumbl + stump stump + stumps stump + stung stung + stupefy stupefi + stupid stupid + stupified stupifi + stuprum stuprum + sturdy sturdi + sty sty + styga styga + stygian stygian + styl styl + style style + styx styx + su su + sub sub + subcontracted subcontract + subdu subdu + subdue subdu + subdued subdu + subduements subduement + subdues subdu + subduing subdu + subject subject + subjected subject + subjection subject + subjects subject + submerg submerg + submission submiss + submissive submiss + submit submit + submits submit + submitting submit + suborn suborn + subornation suborn + suborned suborn + subscrib subscrib + subscribe subscrib + subscribed subscrib + subscribes subscrib + subscription subscript + subsequent subsequ + subsidies subsidi + subsidy subsidi + subsist subsist + subsisting subsist + substance substanc + substances substanc + substantial substanti + substitute substitut + substituted substitut + substitutes substitut + substitution substitut + subtile subtil + subtilly subtilli + subtle subtl + subtleties subtleti + subtlety subtleti + subtly subtli + subtractors subtractor + suburbs suburb + subversion subvers + subverts subvert + succedant succed + succeed succe + succeeded succeed + succeeders succeed + succeeding succeed + succeeds succe + success success + successantly successantli + successes success + successful success + successfully successfulli + succession success + successive success + successively success + successor successor + successors successor + succour succour + succours succour + such such + suck suck + sucker sucker + suckers sucker + sucking suck + suckle suckl + sucks suck + sudden sudden + suddenly suddenli + sue sue + sued su + suerly suerli + sues sue + sueth sueth + suff suff + suffer suffer + sufferance suffer + sufferances suffer + suffered suffer + suffering suffer + suffers suffer + suffic suffic + suffice suffic + sufficed suffic + suffices suffic + sufficeth sufficeth + sufficiency suffici + sufficient suffici + sufficiently suffici + sufficing suffic + sufficit sufficit + suffigance suffig + suffocate suffoc + suffocating suffoc + suffocation suffoc + suffolk suffolk + suffrage suffrag + suffrages suffrag + sug sug + sugar sugar + sugarsop sugarsop + suggest suggest + suggested suggest + suggesting suggest + suggestion suggest + suggestions suggest + suggests suggest + suis sui + suit suit + suitable suitabl + suited suit + suiting suit + suitor suitor + suitors suitor + suits suit + suivez suivez + sullen sullen + sullens sullen + sullied sulli + sullies sulli + sully sulli + sulph sulph + sulpherous sulpher + sulphur sulphur + sulphurous sulphur + sultan sultan + sultry sultri + sum sum + sumless sumless + summ summ + summa summa + summary summari + summer summer + summers summer + summit summit + summon summon + summoners summon + summons summon + sumpter sumpter + sumptuous sumptuou + sumptuously sumptuous + sums sum + sun sun + sunbeams sunbeam + sunburning sunburn + sunburnt sunburnt + sund sund + sunday sundai + sundays sundai + sunder sunder + sunders sunder + sundry sundri + sung sung + sunk sunk + sunken sunken + sunny sunni + sunrising sunris + suns sun + sunset sunset + sunshine sunshin + sup sup + super super + superficial superfici + superficially superfici + superfluity superflu + superfluous superflu + superfluously superflu + superflux superflux + superior superior + supernal supern + supernatural supernatur + superpraise superprais + superscript superscript + superscription superscript + superserviceable superservic + superstition superstit + superstitious superstiti + superstitiously superstiti + supersubtle supersubtl + supervise supervis + supervisor supervisor + supp supp + supper supper + suppers supper + suppertime suppertim + supping sup + supplant supplant + supple suppl + suppler suppler + suppliance supplianc + suppliant suppliant + suppliants suppliant + supplicant supplic + supplication supplic + supplications supplic + supplie suppli + supplied suppli + supplies suppli + suppliest suppliest + supply suppli + supplyant supplyant + supplying suppli + supplyment supplyment + support support + supportable support + supportance support + supported support + supporter support + supporters support + supporting support + supportor supportor + suppos suppo + supposal suppos + suppose suppos + supposed suppos + supposes suppos + supposest supposest + supposing suppos + supposition supposit + suppress suppress + suppressed suppress + suppresseth suppresseth + supremacy supremaci + supreme suprem + sups sup + sur sur + surance suranc + surcease surceas + surd surd + sure sure + surecard surecard + surely sure + surer surer + surest surest + sureties sureti + surety sureti + surfeit surfeit + surfeited surfeit + surfeiter surfeit + surfeiting surfeit + surfeits surfeit + surge surg + surgeon surgeon + surgeons surgeon + surgere surger + surgery surgeri + surges surg + surly surli + surmis surmi + surmise surmis + surmised surmis + surmises surmis + surmount surmount + surmounted surmount + surmounts surmount + surnam surnam + surname surnam + surnamed surnam + surpasseth surpasseth + surpassing surpass + surplice surplic + surplus surplu + surpris surpri + surprise surpris + surprised surpris + surrender surrend + surrey surrei + surreys surrei + survey survei + surveyest surveyest + surveying survei + surveyor surveyor + surveyors surveyor + surveys survei + survive surviv + survives surviv + survivor survivor + susan susan + suspect suspect + suspected suspect + suspecting suspect + suspects suspect + suspend suspend + suspense suspens + suspicion suspicion + suspicions suspicion + suspicious suspici + suspiration suspir + suspire suspir + sust sust + sustain sustain + sustaining sustain + sutler sutler + sutton sutton + suum suum + swabber swabber + swaddling swaddl + swag swag + swagg swagg + swagger swagger + swaggerer swagger + swaggerers swagger + swaggering swagger + swain swain + swains swain + swallow swallow + swallowed swallow + swallowing swallow + swallows swallow + swam swam + swan swan + swans swan + sward sward + sware sware + swarm swarm + swarming swarm + swart swart + swarth swarth + swarths swarth + swarthy swarthi + swashers swasher + swashing swash + swath swath + swathing swath + swathling swathl + sway swai + swaying swai + sways swai + swear swear + swearer swearer + swearers swearer + swearest swearest + swearing swear + swearings swear + swears swear + sweat sweat + sweaten sweaten + sweating sweat + sweats sweat + sweaty sweati + sweep sweep + sweepers sweeper + sweeps sweep + sweet sweet + sweeten sweeten + sweetens sweeten + sweeter sweeter + sweetest sweetest + sweetheart sweetheart + sweeting sweet + sweetly sweetli + sweetmeats sweetmeat + sweetness sweet + sweets sweet + swell swell + swelling swell + swellings swell + swells swell + swelter swelter + sweno sweno + swept swept + swerve swerv + swerver swerver + swerving swerv + swift swift + swifter swifter + swiftest swiftest + swiftly swiftli + swiftness swift + swill swill + swills swill + swim swim + swimmer swimmer + swimmers swimmer + swimming swim + swims swim + swine swine + swineherds swineherd + swing swing + swinge swing + swinish swinish + swinstead swinstead + switches switch + swits swit + switzers switzer + swol swol + swoll swoll + swoln swoln + swoon swoon + swooned swoon + swooning swoon + swoons swoon + swoop swoop + swoopstake swoopstak + swor swor + sword sword + sworder sworder + swords sword + swore swore + sworn sworn + swounded swound + swounds swound + swum swum + swung swung + sy sy + sycamore sycamor + sycorax sycorax + sylla sylla + syllable syllabl + syllables syllabl + syllogism syllog + symbols symbol + sympathise sympathis + sympathiz sympathiz + sympathize sympath + sympathized sympath + sympathy sympathi + synagogue synagogu + synod synod + synods synod + syracuse syracus + syracusian syracusian + syracusians syracusian + syria syria + syrups syrup + t t + ta ta + taber taber + table tabl + tabled tabl + tables tabl + tablet tablet + tabor tabor + taborer tabor + tabors tabor + tabourines tabourin + taciturnity taciturn + tack tack + tackle tackl + tackled tackl + tackles tackl + tackling tackl + tacklings tackl + taddle taddl + tadpole tadpol + taffeta taffeta + taffety taffeti + tag tag + tagrag tagrag + tah tah + tail tail + tailor tailor + tailors tailor + tails tail + taint taint + tainted taint + tainting taint + taints taint + tainture taintur + tak tak + take take + taken taken + taker taker + takes take + takest takest + taketh taketh + taking take + tal tal + talbot talbot + talbotites talbotit + talbots talbot + tale tale + talent talent + talents talent + taleporter taleport + tales tale + talk talk + talked talk + talker talker + talkers talker + talkest talkest + talking talk + talks talk + tall tall + taller taller + tallest tallest + tallies talli + tallow tallow + tally talli + talons talon + tam tam + tambourines tambourin + tame tame + tamed tame + tamely tame + tameness tame + tamer tamer + tames tame + taming tame + tamora tamora + tamworth tamworth + tan tan + tang tang + tangle tangl + tangled tangl + tank tank + tanlings tanl + tann tann + tanned tan + tanner tanner + tanquam tanquam + tanta tanta + tantaene tantaen + tap tap + tape tape + taper taper + tapers taper + tapestries tapestri + tapestry tapestri + taphouse taphous + tapp tapp + tapster tapster + tapsters tapster + tar tar + tardied tardi + tardily tardili + tardiness tardi + tardy tardi + tarentum tarentum + targe targ + targes targ + target target + targets target + tarpeian tarpeian + tarquin tarquin + tarquins tarquin + tarr tarr + tarre tarr + tarriance tarrianc + tarried tarri + tarries tarri + tarry tarri + tarrying tarri + tart tart + tartar tartar + tartars tartar + tartly tartli + tartness tart + task task + tasker tasker + tasking task + tasks task + tassel tassel + taste tast + tasted tast + tastes tast + tasting tast + tatt tatt + tatter tatter + tattered tatter + tatters tatter + tattle tattl + tattling tattl + tattlings tattl + taught taught + taunt taunt + taunted taunt + taunting taunt + tauntingly tauntingli + taunts taunt + taurus tauru + tavern tavern + taverns tavern + tavy tavi + tawdry tawdri + tawny tawni + tax tax + taxation taxat + taxations taxat + taxes tax + taxing tax + tc tc + te te + teach teach + teacher teacher + teachers teacher + teaches teach + teachest teachest + teacheth teacheth + teaching teach + team team + tear tear + tearful tear + tearing tear + tears tear + tearsheet tearsheet + teat teat + tedious tediou + tediously tedious + tediousness tedious + teem teem + teeming teem + teems teem + teen teen + teeth teeth + teipsum teipsum + telamon telamon + telamonius telamoniu + tell tell + teller teller + telling tell + tells tell + tellus tellu + temp temp + temper temper + temperality temper + temperance temper + temperate temper + temperately temper + tempers temper + tempest tempest + tempests tempest + tempestuous tempestu + temple templ + temples templ + temporal tempor + temporary temporari + temporiz temporiz + temporize tempor + temporizer tempor + temps temp + tempt tempt + temptation temptat + temptations temptat + tempted tempt + tempter tempter + tempters tempter + tempteth tempteth + tempting tempt + tempts tempt + ten ten + tenable tenabl + tenant tenant + tenantius tenantiu + tenantless tenantless + tenants tenant + tench tench + tend tend + tendance tendanc + tended tend + tender tender + tendered tender + tenderly tenderli + tenderness tender + tenders tender + tending tend + tends tend + tenedos tenedo + tenement tenement + tenements tenement + tenfold tenfold + tennis tenni + tenour tenour + tenours tenour + tens ten + tent tent + tented tent + tenth tenth + tenths tenth + tents tent + tenure tenur + tenures tenur + tercel tercel + tereus tereu + term term + termagant termag + termed term + terminations termin + termless termless + terms term + terra terra + terrace terrac + terram terram + terras terra + terre terr + terrene terren + terrestrial terrestri + terrible terribl + terribly terribl + territories territori + territory territori + terror terror + terrors terror + tertian tertian + tertio tertio + test test + testament testament + tested test + tester tester + testern testern + testify testifi + testimonied testimoni + testimonies testimoni + testimony testimoni + testiness testi + testril testril + testy testi + tetchy tetchi + tether tether + tetter tetter + tevil tevil + tewksbury tewksburi + text text + tgv tgv + th th + thaes thae + thames thame + than than + thane thane + thanes thane + thank thank + thanked thank + thankful thank + thankfully thankfulli + thankfulness thank + thanking thank + thankings thank + thankless thankless + thanks thank + thanksgiving thanksgiv + thasos thaso + that that + thatch thatch + thaw thaw + thawing thaw + thaws thaw + the the + theatre theatr + theban theban + thebes thebe + thee thee + theft theft + thefts theft + thein thein + their their + theirs their + theise theis + them them + theme theme + themes theme + themselves themselv + then then + thence thenc + thenceforth thenceforth + theoric theoric + there there + thereabout thereabout + thereabouts thereabout + thereafter thereaft + thereat thereat + thereby therebi + therefore therefor + therein therein + thereof thereof + thereon thereon + thereto thereto + thereunto thereunto + thereupon thereupon + therewith therewith + therewithal therewith + thersites thersit + these these + theseus theseu + thessalian thessalian + thessaly thessali + thetis theti + thews thew + they thei + thick thick + thicken thicken + thickens thicken + thicker thicker + thickest thickest + thicket thicket + thickskin thickskin + thief thief + thievery thieveri + thieves thiev + thievish thievish + thigh thigh + thighs thigh + thimble thimbl + thimbles thimbl + thin thin + thine thine + thing thing + things thing + think think + thinkest thinkest + thinking think + thinkings think + thinks think + thinkst thinkst + thinly thinli + third third + thirdly thirdli + thirds third + thirst thirst + thirsting thirst + thirsts thirst + thirsty thirsti + thirteen thirteen + thirties thirti + thirtieth thirtieth + thirty thirti + this thi + thisby thisbi + thisne thisn + thistle thistl + thistles thistl + thither thither + thitherward thitherward + thoas thoa + thomas thoma + thorn thorn + thorns thorn + thorny thorni + thorough thorough + thoroughly thoroughli + those those + thou thou + though though + thought thought + thoughtful thought + thoughts thought + thousand thousand + thousands thousand + thracian thracian + thraldom thraldom + thrall thrall + thralled thrall + thralls thrall + thrash thrash + thrasonical thrason + thread thread + threadbare threadbar + threaden threaden + threading thread + threat threat + threaten threaten + threatening threaten + threatens threaten + threatest threatest + threats threat + three three + threefold threefold + threepence threepenc + threepile threepil + threes three + threescore threescor + thresher thresher + threshold threshold + threw threw + thrice thrice + thrift thrift + thriftless thriftless + thrifts thrift + thrifty thrifti + thrill thrill + thrilling thrill + thrills thrill + thrive thrive + thrived thrive + thrivers thriver + thrives thrive + thriving thrive + throat throat + throats throat + throbbing throb + throbs throb + throca throca + throe throe + throes throe + thromuldo thromuldo + thron thron + throne throne + throned throne + thrones throne + throng throng + thronging throng + throngs throng + throstle throstl + throttle throttl + through through + throughfare throughfar + throughfares throughfar + throughly throughli + throughout throughout + throw throw + thrower thrower + throwest throwest + throwing throw + thrown thrown + throws throw + thrum thrum + thrumm thrumm + thrush thrush + thrust thrust + thrusteth thrusteth + thrusting thrust + thrusts thrust + thumb thumb + thumbs thumb + thump thump + thund thund + thunder thunder + thunderbolt thunderbolt + thunderbolts thunderbolt + thunderer thunder + thunders thunder + thunderstone thunderston + thunderstroke thunderstrok + thurio thurio + thursday thursdai + thus thu + thwack thwack + thwart thwart + thwarted thwart + thwarting thwart + thwartings thwart + thy thy + thyme thyme + thymus thymu + thyreus thyreu + thyself thyself + ti ti + tib tib + tiber tiber + tiberio tiberio + tibey tibei + ticed tice + tick tick + tickl tickl + tickle tickl + tickled tickl + tickles tickl + tickling tickl + ticklish ticklish + tiddle tiddl + tide tide + tides tide + tidings tide + tidy tidi + tie tie + tied ti + ties ti + tiff tiff + tiger tiger + tigers tiger + tight tight + tightly tightli + tike tike + til til + tile tile + till till + tillage tillag + tilly tilli + tilt tilt + tilter tilter + tilth tilth + tilting tilt + tilts tilt + tiltyard tiltyard + tim tim + timandra timandra + timber timber + time time + timeless timeless + timelier timeli + timely time + times time + timon timon + timor timor + timorous timor + timorously timor + tinct tinct + tincture tinctur + tinctures tinctur + tinder tinder + tingling tingl + tinker tinker + tinkers tinker + tinsel tinsel + tiny tini + tip tip + tipp tipp + tippling tippl + tips tip + tipsy tipsi + tiptoe tipto + tir tir + tire tire + tired tire + tires tire + tirest tirest + tiring tire + tirra tirra + tirrits tirrit + tis ti + tish tish + tisick tisick + tissue tissu + titan titan + titania titania + tithe tith + tithed tith + tithing tith + titinius titiniu + title titl + titled titl + titleless titleless + titles titl + tittle tittl + tittles tittl + titular titular + titus titu + tn tn + to to + toad toad + toads toad + toadstool toadstool + toast toast + toasted toast + toasting toast + toasts toast + toaze toaz + toby tobi + tock tock + tod tod + today todai + todpole todpol + tods tod + toe toe + toes toe + tofore tofor + toge toge + toged toge + together togeth + toil toil + toiled toil + toiling toil + toils toil + token token + tokens token + told told + toledo toledo + tolerable toler + toll toll + tolling toll + tom tom + tomb tomb + tombe tomb + tombed tomb + tombless tombless + tomboys tomboi + tombs tomb + tomorrow tomorrow + tomyris tomyri + ton ton + tongs tong + tongu tongu + tongue tongu + tongued tongu + tongueless tongueless + tongues tongu + tonight tonight + too too + took took + tool tool + tools tool + tooth tooth + toothache toothach + toothpick toothpick + toothpicker toothpick + top top + topas topa + topful top + topgallant topgal + topless topless + topmast topmast + topp topp + topping top + topple toppl + topples toppl + tops top + topsail topsail + topsy topsi + torch torch + torchbearer torchbear + torchbearers torchbear + torcher torcher + torches torch + torchlight torchlight + tore tore + torment torment + tormenta tormenta + tormente torment + tormented torment + tormenting torment + tormentors tormentor + torments torment + torn torn + torrent torrent + tortive tortiv + tortoise tortois + tortur tortur + torture tortur + tortured tortur + torturer tortur + torturers tortur + tortures tortur + torturest torturest + torturing tortur + toryne toryn + toss toss + tossed toss + tosseth tosseth + tossing toss + tot tot + total total + totally total + tott tott + tottered totter + totters totter + tou tou + touch touch + touched touch + touches touch + toucheth toucheth + touching touch + touchstone touchston + tough tough + tougher tougher + toughness tough + touraine tourain + tournaments tournament + tours tour + tous tou + tout tout + touze touz + tow tow + toward toward + towardly towardli + towards toward + tower tower + towering tower + towers tower + town town + towns town + township township + townsman townsman + townsmen townsmen + towton towton + toy toi + toys toi + trace trace + traces trace + track track + tract tract + tractable tractabl + trade trade + traded trade + traders trader + trades trade + tradesman tradesman + tradesmen tradesmen + trading trade + tradition tradit + traditional tradit + traduc traduc + traduced traduc + traducement traduc + traffic traffic + traffickers traffick + traffics traffic + tragedian tragedian + tragedians tragedian + tragedies tragedi + tragedy tragedi + tragic tragic + tragical tragic + trail trail + train train + trained train + training train + trains train + trait trait + traitor traitor + traitorly traitorli + traitorous traitor + traitorously traitor + traitors traitor + traitress traitress + traject traject + trammel trammel + trample trampl + trampled trampl + trampling trampl + tranc tranc + trance tranc + tranio tranio + tranquil tranquil + tranquillity tranquil + transcendence transcend + transcends transcend + transferred transfer + transfigur transfigur + transfix transfix + transform transform + transformation transform + transformations transform + transformed transform + transgress transgress + transgresses transgress + transgressing transgress + transgression transgress + translate translat + translated translat + translates translat + translation translat + transmigrates transmigr + transmutation transmut + transparent transpar + transport transport + transportance transport + transported transport + transporting transport + transports transport + transpose transpos + transshape transshap + trap trap + trapp trapp + trappings trap + traps trap + trash trash + travail travail + travails travail + travel travel + traveler travel + traveling travel + travell travel + travelled travel + traveller travel + travellers travel + travellest travellest + travelling travel + travels travel + travers traver + traverse travers + tray trai + treacherous treacher + treacherously treacher + treachers treacher + treachery treacheri + tread tread + treading tread + treads tread + treason treason + treasonable treason + treasonous treason + treasons treason + treasure treasur + treasurer treasur + treasures treasur + treasuries treasuri + treasury treasuri + treat treat + treaties treati + treatise treatis + treats treat + treaty treati + treble trebl + trebled trebl + trebles trebl + trebonius treboniu + tree tree + trees tree + tremble trembl + trembled trembl + trembles trembl + tremblest tremblest + trembling trembl + tremblingly tremblingli + tremor tremor + trempling trempl + trench trench + trenchant trenchant + trenched trench + trencher trencher + trenchering trencher + trencherman trencherman + trenchers trencher + trenches trench + trenching trench + trent trent + tres tre + trespass trespass + trespasses trespass + tressel tressel + tresses tress + treys trei + trial trial + trials trial + trib trib + tribe tribe + tribes tribe + tribulation tribul + tribunal tribun + tribune tribun + tribunes tribun + tributaries tributari + tributary tributari + tribute tribut + tributes tribut + trice trice + trick trick + tricking trick + trickling trickl + tricks trick + tricksy tricksi + trident trident + tried tri + trier trier + trifle trifl + trifled trifl + trifler trifler + trifles trifl + trifling trifl + trigon trigon + trill trill + trim trim + trimly trimli + trimm trimm + trimmed trim + trimming trim + trims trim + trinculo trinculo + trinculos trinculo + trinkets trinket + trip trip + tripartite tripartit + tripe tripe + triple tripl + triplex triplex + tripoli tripoli + tripolis tripoli + tripp tripp + tripping trip + trippingly trippingli + trips trip + tristful trist + triton triton + triumph triumph + triumphant triumphant + triumphantly triumphantli + triumpher triumpher + triumphers triumpher + triumphing triumph + triumphs triumph + triumvir triumvir + triumvirate triumvir + triumvirs triumvir + triumviry triumviri + trivial trivial + troat troat + trod trod + trodden trodden + troiant troiant + troien troien + troilus troilu + troiluses troilus + trojan trojan + trojans trojan + troll troll + tromperies tromperi + trompet trompet + troop troop + trooping troop + troops troop + trop trop + trophies trophi + trophy trophi + tropically tropic + trot trot + troth troth + trothed troth + troths troth + trots trot + trotting trot + trouble troubl + troubled troubl + troubler troubler + troubles troubl + troublesome troublesom + troublest troublest + troublous troublou + trough trough + trout trout + trouts trout + trovato trovato + trow trow + trowel trowel + trowest trowest + troy troi + troyan troyan + troyans troyan + truant truant + truce truce + truckle truckl + trudge trudg + true true + trueborn trueborn + truepenny truepenni + truer truer + truest truest + truie truie + trull trull + trulls trull + truly truli + trump trump + trumpery trumperi + trumpet trumpet + trumpeter trumpet + trumpeters trumpet + trumpets trumpet + truncheon truncheon + truncheoners truncheon + trundle trundl + trunk trunk + trunks trunk + trust trust + trusted trust + truster truster + trusters truster + trusting trust + trusts trust + trusty trusti + truth truth + truths truth + try try + ts ts + tu tu + tuae tuae + tub tub + tubal tubal + tubs tub + tuck tuck + tucket tucket + tuesday tuesdai + tuft tuft + tufts tuft + tug tug + tugg tugg + tugging tug + tuition tuition + tullus tullu + tully tulli + tumble tumbl + tumbled tumbl + tumbler tumbler + tumbling tumbl + tumult tumult + tumultuous tumultu + tun tun + tune tune + tuneable tuneabl + tuned tune + tuners tuner + tunes tune + tunis tuni + tuns tun + tupping tup + turban turban + turbans turban + turbulence turbul + turbulent turbul + turd turd + turf turf + turfy turfi + turk turk + turkey turkei + turkeys turkei + turkish turkish + turks turk + turlygod turlygod + turmoil turmoil + turmoiled turmoil + turn turn + turnbull turnbul + turncoat turncoat + turncoats turncoat + turned turn + turneth turneth + turning turn + turnips turnip + turns turn + turph turph + turpitude turpitud + turquoise turquois + turret turret + turrets turret + turtle turtl + turtles turtl + turvy turvi + tuscan tuscan + tush tush + tut tut + tutor tutor + tutored tutor + tutors tutor + tutto tutto + twain twain + twang twang + twangling twangl + twas twa + tway twai + tweaks tweak + tween tween + twelfth twelfth + twelve twelv + twelvemonth twelvemonth + twentieth twentieth + twenty twenti + twere twere + twice twice + twig twig + twiggen twiggen + twigs twig + twilight twilight + twill twill + twilled twill + twin twin + twine twine + twink twink + twinkle twinkl + twinkled twinkl + twinkling twinkl + twinn twinn + twins twin + twire twire + twist twist + twisted twist + twit twit + twits twit + twitting twit + twixt twixt + two two + twofold twofold + twopence twopenc + twopences twopenc + twos two + twould twould + tyb tyb + tybalt tybalt + tybalts tybalt + tyburn tyburn + tying ty + tyke tyke + tymbria tymbria + type type + types type + typhon typhon + tyrannical tyrann + tyrannically tyrann + tyrannize tyrann + tyrannous tyrann + tyranny tyranni + tyrant tyrant + tyrants tyrant + tyrian tyrian + tyrrel tyrrel + u u + ubique ubiqu + udders udder + udge udg + uds ud + uglier uglier + ugliest ugliest + ugly ugli + ulcer ulcer + ulcerous ulcer + ulysses ulyss + um um + umber umber + umbra umbra + umbrage umbrag + umfrevile umfrevil + umpire umpir + umpires umpir + un un + unable unabl + unaccommodated unaccommod + unaccompanied unaccompani + unaccustom unaccustom + unaching unach + unacquainted unacquaint + unactive unact + unadvis unadvi + unadvised unadvis + unadvisedly unadvisedli + unagreeable unagre + unanel unanel + unanswer unansw + unappeas unappea + unapproved unapprov + unapt unapt + unaptness unapt + unarm unarm + unarmed unarm + unarms unarm + unassail unassail + unassailable unassail + unattainted unattaint + unattempted unattempt + unattended unattend + unauspicious unauspici + unauthorized unauthor + unavoided unavoid + unawares unawar + unback unback + unbak unbak + unbanded unband + unbar unbar + unbarb unbarb + unbashful unbash + unbated unbat + unbatter unbatt + unbecoming unbecom + unbefitting unbefit + unbegot unbegot + unbegotten unbegotten + unbelieved unbeliev + unbend unbend + unbent unbent + unbewail unbewail + unbid unbid + unbidden unbidden + unbind unbind + unbinds unbind + unbitted unbit + unbless unbless + unblest unblest + unbloodied unbloodi + unblown unblown + unbodied unbodi + unbolt unbolt + unbolted unbolt + unbonneted unbonnet + unbookish unbookish + unborn unborn + unbosom unbosom + unbound unbound + unbounded unbound + unbow unbow + unbowed unbow + unbrac unbrac + unbraced unbrac + unbraided unbraid + unbreathed unbreath + unbred unbr + unbreech unbreech + unbridled unbridl + unbroke unbrok + unbruis unbrui + unbruised unbruis + unbuckle unbuckl + unbuckles unbuckl + unbuckling unbuckl + unbuild unbuild + unburden unburden + unburdens unburden + unburied unburi + unburnt unburnt + unburthen unburthen + unbutton unbutton + unbuttoning unbutton + uncapable uncap + uncape uncap + uncase uncas + uncasing uncas + uncaught uncaught + uncertain uncertain + uncertainty uncertainti + unchain unchain + unchanging unchang + uncharge uncharg + uncharged uncharg + uncharitably uncharit + unchary unchari + unchaste unchast + uncheck uncheck + unchilded unchild + uncivil uncivil + unclaim unclaim + unclasp unclasp + uncle uncl + unclean unclean + uncleanliness uncleanli + uncleanly uncleanli + uncleanness unclean + uncles uncl + unclew unclew + unclog unclog + uncoined uncoin + uncolted uncolt + uncomeliness uncomeli + uncomfortable uncomfort + uncompassionate uncompassion + uncomprehensive uncomprehens + unconfinable unconfin + unconfirm unconfirm + unconfirmed unconfirm + unconquer unconqu + unconquered unconqu + unconsidered unconsid + unconstant unconst + unconstrain unconstrain + unconstrained unconstrain + uncontemn uncontemn + uncontroll uncontrol + uncorrected uncorrect + uncounted uncount + uncouple uncoupl + uncourteous uncourt + uncouth uncouth + uncover uncov + uncovered uncov + uncropped uncrop + uncross uncross + uncrown uncrown + unction unction + unctuous unctuou + uncuckolded uncuckold + uncurable uncur + uncurbable uncurb + uncurbed uncurb + uncurls uncurl + uncurrent uncurr + uncurse uncurs + undaunted undaunt + undeaf undeaf + undeck undeck + undeeded undeed + under under + underbearing underbear + underborne underborn + undercrest undercrest + underfoot underfoot + undergo undergo + undergoes undergo + undergoing undergo + undergone undergon + underground underground + underhand underhand + underlings underl + undermine undermin + underminers undermin + underneath underneath + underprizing underpr + underprop underprop + understand understand + understandeth understandeth + understanding understand + understandings understand + understands understand + understood understood + underta underta + undertake undertak + undertakeing undertak + undertaker undertak + undertakes undertak + undertaking undertak + undertakings undertak + undertook undertook + undervalu undervalu + undervalued undervalu + underwent underw + underwrit underwrit + underwrite underwrit + undescried undescri + undeserved undeserv + undeserver undeserv + undeservers undeserv + undeserving undeserv + undetermin undetermin + undid undid + undinted undint + undiscernible undiscern + undiscover undiscov + undishonoured undishonour + undispos undispo + undistinguishable undistinguish + undistinguished undistinguish + undividable undivid + undivided undivid + undivulged undivulg + undo undo + undoes undo + undoing undo + undone undon + undoubted undoubt + undoubtedly undoubtedli + undream undream + undress undress + undressed undress + undrown undrown + unduteous undut + undutiful unduti + une un + uneared unear + unearned unearn + unearthly unearthli + uneasines uneasin + uneasy uneasi + uneath uneath + uneducated uneduc + uneffectual uneffectu + unelected unelect + unequal unequ + uneven uneven + unexamin unexamin + unexecuted unexecut + unexpected unexpect + unexperienc unexperienc + unexperient unexperi + unexpressive unexpress + unfair unfair + unfaithful unfaith + unfallible unfal + unfam unfam + unfashionable unfashion + unfasten unfasten + unfather unfath + unfathered unfath + unfed unf + unfeed unfe + unfeeling unfeel + unfeigned unfeign + unfeignedly unfeignedli + unfellowed unfellow + unfelt unfelt + unfenced unfenc + unfilial unfili + unfill unfil + unfinish unfinish + unfirm unfirm + unfit unfit + unfitness unfit + unfix unfix + unfledg unfledg + unfold unfold + unfolded unfold + unfoldeth unfoldeth + unfolding unfold + unfolds unfold + unfool unfool + unforc unforc + unforced unforc + unforfeited unforfeit + unfortified unfortifi + unfortunate unfortun + unfought unfought + unfrequented unfrequ + unfriended unfriend + unfurnish unfurnish + ungain ungain + ungalled ungal + ungart ungart + ungarter ungart + ungenitur ungenitur + ungentle ungentl + ungentleness ungentl + ungently ungent + ungird ungird + ungodly ungodli + ungor ungor + ungot ungot + ungotten ungotten + ungovern ungovern + ungracious ungraci + ungrateful ungrat + ungravely ungrav + ungrown ungrown + unguarded unguard + unguem unguem + unguided unguid + unhack unhack + unhair unhair + unhallow unhallow + unhallowed unhallow + unhand unhand + unhandled unhandl + unhandsome unhandsom + unhang unhang + unhappied unhappi + unhappily unhappili + unhappiness unhappi + unhappy unhappi + unhardened unharden + unharm unharm + unhatch unhatch + unheard unheard + unhearts unheart + unheedful unheed + unheedfully unheedfulli + unheedy unheedi + unhelpful unhelp + unhidden unhidden + unholy unholi + unhop unhop + unhopefullest unhopefullest + unhorse unhors + unhospitable unhospit + unhous unhou + unhoused unhous + unhurtful unhurt + unicorn unicorn + unicorns unicorn + unimproved unimprov + uninhabitable uninhabit + uninhabited uninhabit + unintelligent unintellig + union union + unions union + unite unit + united unit + unity uniti + universal univers + universe univers + universities univers + university univers + unjointed unjoint + unjust unjust + unjustice unjustic + unjustly unjustli + unkennel unkennel + unkept unkept + unkind unkind + unkindest unkindest + unkindly unkindli + unkindness unkind + unking unk + unkinglike unkinglik + unkiss unkiss + unknit unknit + unknowing unknow + unknown unknown + unlace unlac + unlaid unlaid + unlawful unlaw + unlawfully unlawfulli + unlearn unlearn + unlearned unlearn + unless unless + unlesson unlesson + unletter unlett + unlettered unlett + unlick unlick + unlike unlik + unlikely unlik + unlimited unlimit + unlineal unlin + unlink unlink + unload unload + unloaded unload + unloading unload + unloads unload + unlock unlock + unlocks unlock + unlook unlook + unlooked unlook + unloos unloo + unloose unloos + unlov unlov + unloving unlov + unluckily unluckili + unlucky unlucki + unmade unmad + unmake unmak + unmanly unmanli + unmann unmann + unmanner unmann + unmannerd unmannerd + unmannerly unmannerli + unmarried unmarri + unmask unmask + unmasked unmask + unmasking unmask + unmasks unmask + unmast unmast + unmatch unmatch + unmatchable unmatch + unmatched unmatch + unmeasurable unmeasur + unmeet unmeet + unmellowed unmellow + unmerciful unmerci + unmeritable unmerit + unmeriting unmerit + unminded unmind + unmindfull unmindful + unmingled unmingl + unmitigable unmitig + unmitigated unmitig + unmix unmix + unmoan unmoan + unmov unmov + unmoved unmov + unmoving unmov + unmuffles unmuffl + unmuffling unmuffl + unmusical unmus + unmuzzle unmuzzl + unmuzzled unmuzzl + unnatural unnatur + unnaturally unnatur + unnaturalness unnatur + unnecessarily unnecessarili + unnecessary unnecessari + unneighbourly unneighbourli + unnerved unnerv + unnoble unnobl + unnoted unnot + unnumb unnumb + unnumber unnumb + unowed unow + unpack unpack + unpaid unpaid + unparagon unparagon + unparallel unparallel + unpartial unparti + unpath unpath + unpaved unpav + unpay unpai + unpeaceable unpeac + unpeg unpeg + unpeople unpeopl + unpeopled unpeopl + unperfect unperfect + unperfectness unperfect + unpick unpick + unpin unpin + unpink unpink + unpitied unpiti + unpitifully unpitifulli + unplagu unplagu + unplausive unplaus + unpleas unplea + unpleasant unpleas + unpleasing unpleas + unpolicied unpolici + unpolish unpolish + unpolished unpolish + unpolluted unpollut + unpossess unpossess + unpossessing unpossess + unpossible unposs + unpractis unpracti + unpregnant unpregn + unpremeditated unpremedit + unprepar unprepar + unprepared unprepar + unpress unpress + unprevailing unprevail + unprevented unprev + unpriz unpriz + unprizable unpriz + unprofitable unprofit + unprofited unprofit + unproper unprop + unproperly unproperli + unproportion unproport + unprovide unprovid + unprovided unprovid + unprovident unprovid + unprovokes unprovok + unprun unprun + unpruned unprun + unpublish unpublish + unpurged unpurg + unpurpos unpurpo + unqualitied unqual + unqueen unqueen + unquestion unquest + unquestionable unquestion + unquiet unquiet + unquietly unquietli + unquietness unquiet + unraised unrais + unrak unrak + unread unread + unready unreadi + unreal unreal + unreasonable unreason + unreasonably unreason + unreclaimed unreclaim + unreconciled unreconcil + unreconciliable unreconcili + unrecounted unrecount + unrecuring unrecur + unregarded unregard + unregist unregist + unrelenting unrel + unremovable unremov + unremovably unremov + unreprievable unrepriev + unresolv unresolv + unrespected unrespect + unrespective unrespect + unrest unrest + unrestor unrestor + unrestrained unrestrain + unreveng unreveng + unreverend unreverend + unreverent unrever + unrevers unrev + unrewarded unreward + unrighteous unright + unrightful unright + unripe unrip + unripp unripp + unrivall unrival + unroll unrol + unroof unroof + unroosted unroost + unroot unroot + unrough unrough + unruly unruli + unsafe unsaf + unsaluted unsalut + unsanctified unsanctifi + unsatisfied unsatisfi + unsavoury unsavouri + unsay unsai + unscalable unscal + unscann unscann + unscarr unscarr + unschool unschool + unscorch unscorch + unscour unscour + unscratch unscratch + unseal unseal + unseam unseam + unsearch unsearch + unseason unseason + unseasonable unseason + unseasonably unseason + unseasoned unseason + unseconded unsecond + unsecret unsecret + unseduc unseduc + unseeing unse + unseeming unseem + unseemly unseemli + unseen unseen + unseminar unseminar + unseparable unsepar + unserviceable unservic + unset unset + unsettle unsettl + unsettled unsettl + unsever unsev + unsex unsex + unshak unshak + unshaked unshak + unshaken unshaken + unshaped unshap + unshapes unshap + unsheath unsheath + unsheathe unsheath + unshorn unshorn + unshout unshout + unshown unshown + unshrinking unshrink + unshrubb unshrubb + unshunn unshunn + unshunnable unshunn + unsifted unsift + unsightly unsightli + unsinew unsinew + unsisting unsist + unskilful unskil + unskilfully unskilfulli + unskillful unskil + unslipping unslip + unsmirched unsmirch + unsoil unsoil + unsolicited unsolicit + unsorted unsort + unsought unsought + unsound unsound + unsounded unsound + unspeak unspeak + unspeakable unspeak + unspeaking unspeak + unsphere unspher + unspoke unspok + unspoken unspoken + unspotted unspot + unsquar unsquar + unstable unstabl + unstaid unstaid + unstain unstain + unstained unstain + unstanched unstanch + unstate unstat + unsteadfast unsteadfast + unstooping unstoop + unstringed unstring + unstuff unstuff + unsubstantial unsubstanti + unsuitable unsuit + unsuiting unsuit + unsullied unsulli + unsunn unsunn + unsur unsur + unsure unsur + unsuspected unsuspect + unsway unswai + unswayable unsway + unswayed unswai + unswear unswear + unswept unswept + unsworn unsworn + untainted untaint + untalk untalk + untangle untangl + untangled untangl + untasted untast + untaught untaught + untempering untemp + untender untend + untent untent + untented untent + unthankful unthank + unthankfulness unthank + unthink unthink + unthought unthought + unthread unthread + unthrift unthrift + unthrifts unthrift + unthrifty unthrifti + untie unti + untied unti + until until + untimber untimb + untimely untim + untir untir + untirable untir + untired untir + untitled untitl + unto unto + untold untold + untouch untouch + untoward untoward + untowardly untowardli + untraded untrad + untrain untrain + untrained untrain + untread untread + untreasur untreasur + untried untri + untrimmed untrim + untrod untrod + untrodden untrodden + untroubled untroubl + untrue untru + untrussing untruss + untruth untruth + untruths untruth + untucked untuck + untun untun + untune untun + untuneable untun + untutor untutor + untutored untutor + untwine untwin + unurg unurg + unus unu + unused unus + unusual unusu + unvalued unvalu + unvanquish unvanquish + unvarnish unvarnish + unveil unveil + unveiling unveil + unvenerable unvener + unvex unvex + unviolated unviol + unvirtuous unvirtu + unvisited unvisit + unvulnerable unvulner + unwares unwar + unwarily unwarili + unwash unwash + unwatch unwatch + unwearied unweari + unwed unw + unwedgeable unwedg + unweeded unweed + unweighed unweigh + unweighing unweigh + unwelcome unwelcom + unwept unwept + unwhipp unwhipp + unwholesome unwholesom + unwieldy unwieldi + unwilling unwil + unwillingly unwillingli + unwillingness unwilling + unwind unwind + unwiped unwip + unwise unwis + unwisely unwis + unwish unwish + unwished unwish + unwitted unwit + unwittingly unwittingli + unwonted unwont + unwooed unwoo + unworthier unworthi + unworthiest unworthiest + unworthily unworthili + unworthiness unworthi + unworthy unworthi + unwrung unwrung + unyok unyok + unyoke unyok + up up + upbraid upbraid + upbraided upbraid + upbraidings upbraid + upbraids upbraid + uphoarded uphoard + uphold uphold + upholdeth upholdeth + upholding uphold + upholds uphold + uplift uplift + uplifted uplift + upmost upmost + upon upon + upper upper + uprear uprear + upreared uprear + upright upright + uprighteously upright + uprightness upright + uprise upris + uprising upris + uproar uproar + uproars uproar + uprous uprou + upshoot upshoot + upshot upshot + upside upsid + upspring upspr + upstairs upstair + upstart upstart + upturned upturn + upward upward + upwards upward + urchin urchin + urchinfield urchinfield + urchins urchin + urg urg + urge urg + urged urg + urgent urgent + urges urg + urgest urgest + urging urg + urinal urin + urinals urin + urine urin + urn urn + urns urn + urs ur + ursa ursa + ursley urslei + ursula ursula + urswick urswick + us us + usage usag + usance usanc + usances usanc + use us + used us + useful us + useless useless + user user + uses us + usest usest + useth useth + usher usher + ushered usher + ushering usher + ushers usher + using us + usual usual + usually usual + usurer usur + usurers usur + usuries usuri + usuring usur + usurp usurp + usurpation usurp + usurped usurp + usurper usurp + usurpers usurp + usurping usurp + usurpingly usurpingli + usurps usurp + usury usuri + ut ut + utensil utensil + utensils utensil + utility util + utmost utmost + utt utt + utter utter + utterance utter + uttered utter + uttereth uttereth + uttering utter + utterly utterli + uttermost uttermost + utters utter + uy uy + v v + va va + vacancy vacanc + vacant vacant + vacation vacat + vade vade + vagabond vagabond + vagabonds vagabond + vagram vagram + vagrom vagrom + vail vail + vailed vail + vailing vail + vaillant vaillant + vain vain + vainer vainer + vainglory vainglori + vainly vainli + vainness vain + vais vai + valanc valanc + valance valanc + vale vale + valence valenc + valentine valentin + valentinus valentinu + valentio valentio + valeria valeria + valerius valeriu + vales vale + valiant valiant + valiantly valiantli + valiantness valiant + validity valid + vallant vallant + valley vallei + valleys vallei + vally valli + valor valor + valorous valor + valorously valor + valour valour + valu valu + valuation valuat + value valu + valued valu + valueless valueless + values valu + valuing valu + vane vane + vanish vanish + vanished vanish + vanishes vanish + vanishest vanishest + vanishing vanish + vanities vaniti + vanity vaniti + vanquish vanquish + vanquished vanquish + vanquisher vanquish + vanquishest vanquishest + vanquisheth vanquisheth + vant vant + vantage vantag + vantages vantag + vantbrace vantbrac + vapians vapian + vapor vapor + vaporous vapor + vapour vapour + vapours vapour + vara vara + variable variabl + variance varianc + variation variat + variations variat + varied vari + variest variest + variety varieti + varld varld + varlet varlet + varletry varletri + varlets varlet + varletto varletto + varnish varnish + varrius varriu + varro varro + vary vari + varying vari + vassal vassal + vassalage vassalag + vassals vassal + vast vast + vastidity vastid + vasty vasti + vat vat + vater vater + vaudemont vaudemont + vaughan vaughan + vault vault + vaultages vaultag + vaulted vault + vaulting vault + vaults vault + vaulty vaulti + vaumond vaumond + vaunt vaunt + vaunted vaunt + vaunter vaunter + vaunting vaunt + vauntingly vauntingli + vaunts vaunt + vauvado vauvado + vaux vaux + vaward vaward + ve ve + veal veal + vede vede + vehemence vehem + vehemency vehem + vehement vehement + vehor vehor + veil veil + veiled veil + veiling veil + vein vein + veins vein + vell vell + velure velur + velutus velutu + velvet velvet + vendible vendibl + venerable vener + venereal vener + venetia venetia + venetian venetian + venetians venetian + veneys venei + venge veng + vengeance vengeanc + vengeances vengeanc + vengeful veng + veni veni + venial venial + venice venic + venison venison + venit venit + venom venom + venomous venom + venomously venom + vent vent + ventages ventag + vented vent + ventidius ventidiu + ventricle ventricl + vents vent + ventur ventur + venture ventur + ventured ventur + ventures ventur + venturing ventur + venturous ventur + venue venu + venus venu + venuto venuto + ver ver + verb verb + verba verba + verbal verbal + verbatim verbatim + verbosity verbos + verdict verdict + verdun verdun + verdure verdur + vere vere + verefore verefor + verg verg + verge verg + vergers verger + verges verg + verier verier + veriest veriest + verified verifi + verify verifi + verily verili + veritable verit + verite verit + verities veriti + verity veriti + vermilion vermilion + vermin vermin + vernon vernon + verona verona + veronesa veronesa + versal versal + verse vers + verses vers + versing vers + vert vert + very veri + vesper vesper + vessel vessel + vessels vessel + vestal vestal + vestments vestment + vesture vestur + vetch vetch + vetches vetch + veux veux + vex vex + vexation vexat + vexations vexat + vexed vex + vexes vex + vexest vexest + vexeth vexeth + vexing vex + vi vi + via via + vial vial + vials vial + viand viand + viands viand + vic vic + vicar vicar + vice vice + vicegerent viceger + vicentio vicentio + viceroy viceroi + viceroys viceroi + vices vice + vici vici + vicious viciou + viciousness vicious + vict vict + victims victim + victor victor + victoress victoress + victories victori + victorious victori + victors victor + victory victori + victual victual + victuall victual + victuals victual + videlicet videlicet + video video + vides vide + videsne videsn + vidi vidi + vie vie + vied vi + vienna vienna + view view + viewest viewest + vieweth vieweth + viewing view + viewless viewless + views view + vigil vigil + vigilance vigil + vigilant vigil + vigitant vigit + vigour vigour + vii vii + viii viii + vile vile + vilely vile + vileness vile + viler viler + vilest vilest + vill vill + village villag + villager villag + villagery villageri + villages villag + villain villain + villainies villaini + villainous villain + villainously villain + villains villain + villainy villaini + villanies villani + villanous villan + villany villani + villiago villiago + villian villian + villianda villianda + villians villian + vinaigre vinaigr + vincentio vincentio + vincere vincer + vindicative vindic + vine vine + vinegar vinegar + vines vine + vineyard vineyard + vineyards vineyard + vint vint + vintner vintner + viol viol + viola viola + violate violat + violated violat + violates violat + violation violat + violator violat + violence violenc + violent violent + violenta violenta + violenteth violenteth + violently violent + violet violet + violets violet + viper viper + viperous viper + vipers viper + vir vir + virgilia virgilia + virgin virgin + virginal virgin + virginalling virginal + virginity virgin + virginius virginiu + virgins virgin + virgo virgo + virtue virtu + virtues virtu + virtuous virtuou + virtuously virtuous + visag visag + visage visag + visages visag + visard visard + viscount viscount + visible visibl + visibly visibl + vision vision + visions vision + visit visit + visitation visit + visitations visit + visited visit + visiting visit + visitings visit + visitor visitor + visitors visitor + visits visit + visor visor + vita vita + vitae vita + vital vital + vitement vitement + vitruvio vitruvio + vitx vitx + viva viva + vivant vivant + vive vive + vixen vixen + viz viz + vizaments vizament + vizard vizard + vizarded vizard + vizards vizard + vizor vizor + vlouting vlout + vocation vocat + vocativo vocativo + vocatur vocatur + voce voce + voic voic + voice voic + voices voic + void void + voided void + voiding void + voke voke + volable volabl + volant volant + volivorco volivorco + volley vollei + volquessen volquessen + volsce volsc + volsces volsc + volscian volscian + volscians volscian + volt volt + voltemand voltemand + volubility volubl + voluble volubl + volume volum + volumes volum + volumnia volumnia + volumnius volumniu + voluntaries voluntari + voluntary voluntari + voluptuously voluptu + voluptuousness voluptu + vomissement vomiss + vomit vomit + vomits vomit + vor vor + vore vore + vortnight vortnight + vot vot + votaries votari + votarist votarist + votarists votarist + votary votari + votre votr + vouch vouch + voucher voucher + vouchers voucher + vouches vouch + vouching vouch + vouchsaf vouchsaf + vouchsafe vouchsaf + vouchsafed vouchsaf + vouchsafes vouchsaf + vouchsafing vouchsaf + voudrais voudrai + vour vour + vous vou + voutsafe voutsaf + vow vow + vowed vow + vowel vowel + vowels vowel + vowing vow + vows vow + vox vox + voyage voyag + voyages voyag + vraiment vraiment + vulcan vulcan + vulgar vulgar + vulgarly vulgarli + vulgars vulgar + vulgo vulgo + vulnerable vulner + vulture vultur + vultures vultur + vurther vurther + w w + wad wad + waddled waddl + wade wade + waded wade + wafer wafer + waft waft + waftage waftag + wafting waft + wafts waft + wag wag + wage wage + wager wager + wagers wager + wages wage + wagging wag + waggish waggish + waggling waggl + waggon waggon + waggoner waggon + wagon wagon + wagoner wagon + wags wag + wagtail wagtail + wail wail + wailful wail + wailing wail + wails wail + wain wain + wainropes wainrop + wainscot wainscot + waist waist + wait wait + waited wait + waiter waiter + waiteth waiteth + waiting wait + waits wait + wak wak + wake wake + waked wake + wakefield wakefield + waken waken + wakened waken + wakes wake + wakest wakest + waking wake + wales wale + walk walk + walked walk + walking walk + walks walk + wall wall + walled wall + wallet wallet + wallets wallet + wallon wallon + walloon walloon + wallow wallow + walls wall + walnut walnut + walter walter + wan wan + wand wand + wander wander + wanderer wander + wanderers wander + wandering wander + wanders wander + wands wand + wane wane + waned wane + wanes wane + waning wane + wann wann + want want + wanted want + wanteth wanteth + wanting want + wanton wanton + wantonly wantonli + wantonness wanton + wantons wanton + wants want + wappen wappen + war war + warble warbl + warbling warbl + ward ward + warded ward + warden warden + warder warder + warders warder + wardrobe wardrob + wardrop wardrop + wards ward + ware ware + wares ware + warily warili + warkworth warkworth + warlike warlik + warm warm + warmed warm + warmer warmer + warming warm + warms warm + warmth warmth + warn warn + warned warn + warning warn + warnings warn + warns warn + warp warp + warped warp + warr warr + warrant warrant + warranted warrant + warranteth warranteth + warrantise warrantis + warrantize warrant + warrants warrant + warranty warranti + warren warren + warrener warren + warring war + warrior warrior + warriors warrior + wars war + wart wart + warwick warwick + warwickshire warwickshir + wary wari + was wa + wash wash + washed wash + washer washer + washes wash + washford washford + washing wash + wasp wasp + waspish waspish + wasps wasp + wassail wassail + wassails wassail + wast wast + waste wast + wasted wast + wasteful wast + wasters waster + wastes wast + wasting wast + wat wat + watch watch + watched watch + watchers watcher + watches watch + watchful watch + watching watch + watchings watch + watchman watchman + watchmen watchmen + watchword watchword + water water + waterdrops waterdrop + watered water + waterfly waterfli + waterford waterford + watering water + waterish waterish + waterpots waterpot + waterrugs waterrug + waters water + waterton waterton + watery wateri + wav wav + wave wave + waved wave + waver waver + waverer waver + wavering waver + waves wave + waving wave + waw waw + wawl wawl + wax wax + waxed wax + waxen waxen + waxes wax + waxing wax + way wai + waylaid waylaid + waylay waylai + ways wai + wayward wayward + waywarder wayward + waywardness wayward + we we + weak weak + weaken weaken + weakens weaken + weaker weaker + weakest weakest + weakling weakl + weakly weakli + weakness weak + weal weal + wealsmen wealsmen + wealth wealth + wealthiest wealthiest + wealthily wealthili + wealthy wealthi + wealtlly wealtlli + wean wean + weapon weapon + weapons weapon + wear wear + wearer wearer + wearers wearer + wearied weari + wearies weari + weariest weariest + wearily wearili + weariness weari + wearing wear + wearisome wearisom + wears wear + weary weari + weasel weasel + weather weather + weathercock weathercock + weathers weather + weav weav + weave weav + weaver weaver + weavers weaver + weaves weav + weaving weav + web web + wed wed + wedded wed + wedding wed + wedg wedg + wedged wedg + wedges wedg + wedlock wedlock + wednesday wednesdai + weed weed + weeded weed + weeder weeder + weeding weed + weeds weed + weedy weedi + week week + weeke week + weekly weekli + weeks week + ween ween + weening ween + weep weep + weeper weeper + weeping weep + weepingly weepingli + weepings weep + weeps weep + weet weet + weigh weigh + weighed weigh + weighing weigh + weighs weigh + weight weight + weightier weightier + weightless weightless + weights weight + weighty weighti + weird weird + welcom welcom + welcome welcom + welcomer welcom + welcomes welcom + welcomest welcomest + welfare welfar + welkin welkin + well well + wells well + welsh welsh + welshman welshman + welshmen welshmen + welshwomen welshwomen + wench wench + wenches wench + wenching wench + wend wend + went went + wept wept + weraday weradai + were were + wert wert + west west + western western + westminster westminst + westmoreland westmoreland + westward westward + wet wet + wether wether + wetting wet + wezand wezand + whale whale + whales whale + wharf wharf + wharfs wharf + what what + whate whate + whatever whatev + whatsoe whatso + whatsoever whatsoev + whatsome whatsom + whe whe + wheat wheat + wheaten wheaten + wheel wheel + wheeling wheel + wheels wheel + wheer wheer + wheeson wheeson + wheezing wheez + whelk whelk + whelks whelk + whelm whelm + whelp whelp + whelped whelp + whelps whelp + when when + whenas whena + whence whenc + whencesoever whencesoev + whene whene + whenever whenev + whensoever whensoev + where where + whereabout whereabout + whereas wherea + whereat whereat + whereby wherebi + wherefore wherefor + wherein wherein + whereinto whereinto + whereof whereof + whereon whereon + whereout whereout + whereso whereso + wheresoe whereso + wheresoever wheresoev + wheresome wheresom + whereto whereto + whereuntil whereuntil + whereunto whereunto + whereupon whereupon + wherever wherev + wherewith wherewith + wherewithal wherewith + whet whet + whether whether + whetstone whetston + whetted whet + whew whew + whey whei + which which + whiff whiff + whiffler whiffler + while while + whiles while + whilst whilst + whin whin + whine whine + whined whine + whinid whinid + whining whine + whip whip + whipp whipp + whippers whipper + whipping whip + whips whip + whipster whipster + whipstock whipstock + whipt whipt + whirl whirl + whirled whirl + whirligig whirligig + whirling whirl + whirlpool whirlpool + whirls whirl + whirlwind whirlwind + whirlwinds whirlwind + whisp whisp + whisper whisper + whispering whisper + whisperings whisper + whispers whisper + whist whist + whistle whistl + whistles whistl + whistling whistl + whit whit + white white + whitehall whitehal + whitely white + whiteness white + whiter whiter + whites white + whitest whitest + whither whither + whiting white + whitmore whitmor + whitsters whitster + whitsun whitsun + whittle whittl + whizzing whizz + who who + whoa whoa + whoe whoe + whoever whoever + whole whole + wholesom wholesom + wholesome wholesom + wholly wholli + whom whom + whoobub whoobub + whoop whoop + whooping whoop + whor whor + whore whore + whoremaster whoremast + whoremasterly whoremasterli + whoremonger whoremong + whores whore + whoreson whoreson + whoresons whoreson + whoring whore + whorish whorish + whose whose + whoso whoso + whosoe whoso + whosoever whosoev + why why + wi wi + wick wick + wicked wick + wickednes wickedn + wickedness wicked + wicket wicket + wicky wicki + wid wid + wide wide + widens widen + wider wider + widow widow + widowed widow + widower widow + widowhood widowhood + widows widow + wield wield + wife wife + wight wight + wights wight + wild wild + wildcats wildcat + wilder wilder + wilderness wilder + wildest wildest + wildfire wildfir + wildly wildli + wildness wild + wilds wild + wiles wile + wilful wil + wilfull wilful + wilfully wilfulli + wilfulnes wilfuln + wilfulness wil + will will + willed will + willers willer + willeth willeth + william william + williams william + willing will + willingly willingli + willingness willing + willoughby willoughbi + willow willow + wills will + wilt wilt + wiltshire wiltshir + wimpled wimpl + win win + wince winc + winch winch + winchester winchest + wincot wincot + wind wind + winded wind + windgalls windgal + winding wind + windlasses windlass + windmill windmil + window window + windows window + windpipe windpip + winds wind + windsor windsor + windy windi + wine wine + wing wing + winged wing + wingfield wingfield + wingham wingham + wings wing + wink wink + winking wink + winks wink + winner winner + winners winner + winning win + winnow winnow + winnowed winnow + winnows winnow + wins win + winter winter + winterly winterli + winters winter + wip wip + wipe wipe + wiped wipe + wipes wipe + wiping wipe + wire wire + wires wire + wiry wiri + wisdom wisdom + wisdoms wisdom + wise wise + wiselier wiseli + wisely wise + wiser wiser + wisest wisest + wish wish + wished wish + wisher wisher + wishers wisher + wishes wish + wishest wishest + wisheth wisheth + wishful wish + wishing wish + wishtly wishtli + wisp wisp + wist wist + wit wit + witb witb + witch witch + witchcraft witchcraft + witches witch + witching witch + with with + withal withal + withdraw withdraw + withdrawing withdraw + withdrawn withdrawn + withdrew withdrew + wither wither + withered wither + withering wither + withers wither + withheld withheld + withhold withhold + withholds withhold + within within + withold withold + without without + withstand withstand + withstanding withstand + withstood withstood + witless witless + witness wit + witnesses wit + witnesseth witnesseth + witnessing wit + wits wit + witted wit + wittenberg wittenberg + wittiest wittiest + wittily wittili + witting wit + wittingly wittingli + wittol wittol + wittolly wittolli + witty witti + wiv wiv + wive wive + wived wive + wives wive + wiving wive + wizard wizard + wizards wizard + wo wo + woe woe + woeful woeful + woefull woeful + woefullest woefullest + woes woe + woful woful + wolf wolf + wolfish wolfish + wolsey wolsei + wolves wolv + wolvish wolvish + woman woman + womanhood womanhood + womanish womanish + womankind womankind + womanly womanli + womb womb + wombs womb + womby wombi + women women + won won + woncot woncot + wond wond + wonder wonder + wondered wonder + wonderful wonder + wonderfully wonderfulli + wondering wonder + wonders wonder + wondrous wondrou + wondrously wondrous + wont wont + wonted wont + woo woo + wood wood + woodbine woodbin + woodcock woodcock + woodcocks woodcock + wooden wooden + woodland woodland + woodman woodman + woodmonger woodmong + woods wood + woodstock woodstock + woodville woodvil + wooed woo + wooer wooer + wooers wooer + wooes wooe + woof woof + wooing woo + wooingly wooingli + wool wool + woollen woollen + woolly woolli + woolsack woolsack + woolsey woolsei + woolward woolward + woos woo + wor wor + worcester worcest + word word + words word + wore wore + worins worin + work work + workers worker + working work + workings work + workman workman + workmanly workmanli + workmanship workmanship + workmen workmen + works work + worky worki + world world + worldlings worldl + worldly worldli + worlds world + worm worm + worms worm + wormwood wormwood + wormy wormi + worn worn + worried worri + worries worri + worry worri + worrying worri + worse wors + worser worser + worship worship + worshipful worship + worshipfully worshipfulli + worshipp worshipp + worshipper worshipp + worshippers worshipp + worshippest worshippest + worships worship + worst worst + worsted worst + wort wort + worth worth + worthied worthi + worthier worthier + worthies worthi + worthiest worthiest + worthily worthili + worthiness worthi + worthless worthless + worths worth + worthy worthi + worts wort + wot wot + wots wot + wotting wot + wouid wouid + would would + wouldest wouldest + wouldst wouldst + wound wound + wounded wound + wounding wound + woundings wound + woundless woundless + wounds wound + wouns woun + woven woven + wow wow + wrack wrack + wrackful wrack + wrangle wrangl + wrangler wrangler + wranglers wrangler + wrangling wrangl + wrap wrap + wrapp wrapp + wraps wrap + wrapt wrapt + wrath wrath + wrathful wrath + wrathfully wrathfulli + wraths wrath + wreak wreak + wreakful wreak + wreaks wreak + wreath wreath + wreathed wreath + wreathen wreathen + wreaths wreath + wreck wreck + wrecked wreck + wrecks wreck + wren wren + wrench wrench + wrenching wrench + wrens wren + wrest wrest + wrested wrest + wresting wrest + wrestle wrestl + wrestled wrestl + wrestler wrestler + wrestling wrestl + wretch wretch + wretchcd wretchcd + wretched wretch + wretchedness wretched + wretches wretch + wring wring + wringer wringer + wringing wring + wrings wring + wrinkle wrinkl + wrinkled wrinkl + wrinkles wrinkl + wrist wrist + wrists wrist + writ writ + write write + writer writer + writers writer + writes write + writhled writhl + writing write + writings write + writs writ + written written + wrong wrong + wronged wrong + wronger wronger + wrongful wrong + wrongfully wrongfulli + wronging wrong + wrongly wrongli + wrongs wrong + wronk wronk + wrote wrote + wroth wroth + wrought wrought + wrung wrung + wry wry + wrying wry + wt wt + wul wul + wye wye + x x + xanthippe xanthipp + xi xi + xii xii + xiii xiii + xiv xiv + xv xv + y y + yard yard + yards yard + yare yare + yarely yare + yarn yarn + yaughan yaughan + yaw yaw + yawn yawn + yawning yawn + ycleped yclepe + ycliped yclipe + ye ye + yea yea + yead yead + year year + yearly yearli + yearn yearn + yearns yearn + years year + yeas yea + yeast yeast + yedward yedward + yell yell + yellow yellow + yellowed yellow + yellowing yellow + yellowness yellow + yellows yellow + yells yell + yelping yelp + yeoman yeoman + yeomen yeomen + yerk yerk + yes ye + yesterday yesterdai + yesterdays yesterdai + yesternight yesternight + yesty yesti + yet yet + yew yew + yicld yicld + yield yield + yielded yield + yielder yielder + yielders yielder + yielding yield + yields yield + yok yok + yoke yoke + yoked yoke + yokefellow yokefellow + yokes yoke + yoketh yoketh + yon yon + yond yond + yonder yonder + yongrey yongrei + yore yore + yorick yorick + york york + yorkists yorkist + yorks york + yorkshire yorkshir + you you + young young + younger younger + youngest youngest + youngling youngl + younglings youngl + youngly youngli + younker younker + your your + yours your + yourself yourself + yourselves yourselv + youth youth + youthful youth + youths youth + youtli youtli + zanies zani + zany zani + zeal zeal + zealous zealou + zeals zeal + zed zed + zenelophon zenelophon + zenith zenith + zephyrs zephyr + zir zir + zo zo + zodiac zodiac + zodiacs zodiac + zone zone + zounds zound + zwagger zwagger +} + +# Create a full-text index to use for testing the stemmer. +# +db close +sqlite3 db :memory: +db eval { + CREATE VIRTUAL TABLE t1 USING fts1(word, tokenize Porter); +} + +foreach {pfrom pto} $porter_test_data { + do_test fts1porter-$pfrom { + execsql { + DELETE FROM t1_term; + DELETE FROM t1_content; + INSERT INTO t1(word) VALUES($pfrom); + SELECT term FROM t1_term; + } + } $pto +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2a.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2a.test new file mode 100644 index 0000000..2d1566f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2a.test @@ -0,0 +1,202 @@ +# 2006 September 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS2 module. +# +# $Id: fts2a.test,v 1.2 2007/05/21 21:59:18 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# Construct a full-text search table containing five keywords: +# one, two, three, four, and five, in various combinations. The +# rowid for each will be a bitmask for the elements it contains. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(content); + INSERT INTO t1(content) VALUES('one'); + INSERT INTO t1(content) VALUES('two'); + INSERT INTO t1(content) VALUES('one two'); + INSERT INTO t1(content) VALUES('three'); + INSERT INTO t1(content) VALUES('one three'); + INSERT INTO t1(content) VALUES('two three'); + INSERT INTO t1(content) VALUES('one two three'); + INSERT INTO t1(content) VALUES('four'); + INSERT INTO t1(content) VALUES('one four'); + INSERT INTO t1(content) VALUES('two four'); + INSERT INTO t1(content) VALUES('one two four'); + INSERT INTO t1(content) VALUES('three four'); + INSERT INTO t1(content) VALUES('one three four'); + INSERT INTO t1(content) VALUES('two three four'); + INSERT INTO t1(content) VALUES('one two three four'); + INSERT INTO t1(content) VALUES('five'); + INSERT INTO t1(content) VALUES('one five'); + INSERT INTO t1(content) VALUES('two five'); + INSERT INTO t1(content) VALUES('one two five'); + INSERT INTO t1(content) VALUES('three five'); + INSERT INTO t1(content) VALUES('one three five'); + INSERT INTO t1(content) VALUES('two three five'); + INSERT INTO t1(content) VALUES('one two three five'); + INSERT INTO t1(content) VALUES('four five'); + INSERT INTO t1(content) VALUES('one four five'); + INSERT INTO t1(content) VALUES('two four five'); + INSERT INTO t1(content) VALUES('one two four five'); + INSERT INTO t1(content) VALUES('three four five'); + INSERT INTO t1(content) VALUES('one three four five'); + INSERT INTO t1(content) VALUES('two three four five'); + INSERT INTO t1(content) VALUES('one two three four five'); +} + +do_test fts2a-1.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts2a-1.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two'} +} {3 7 11 15 19 23 27 31} +do_test fts2a-1.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two one'} +} {3 7 11 15 19 23 27 31} +do_test fts2a-1.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two three'} +} {7 15 23 31} +do_test fts2a-1.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one three two'} +} {7 15 23 31} +do_test fts2a-1.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two three one'} +} {7 15 23 31} +do_test fts2a-1.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two one three'} +} {7 15 23 31} +do_test fts2a-1.8 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three one two'} +} {7 15 23 31} +do_test fts2a-1.9 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three two one'} +} {7 15 23 31} +do_test fts2a-1.10 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two THREE'} +} {7 15 23 31} +do_test fts2a-1.11 { + execsql {SELECT rowid FROM t1 WHERE content MATCH ' ONE Two three '} +} {7 15 23 31} + +do_test fts2a-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one"'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts2a-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two"'} +} {3 7 11 15 19 23 27 31} +do_test fts2a-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"two one"'} +} {} +do_test fts2a-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two three"'} +} {7 15 23 31} +do_test fts2a-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three two"'} +} {} +do_test fts2a-2.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two three four"'} +} {15 31} +do_test fts2a-2.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three two four"'} +} {} +do_test fts2a-2.8 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three five"'} +} {21} +do_test fts2a-2.9 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three" five'} +} {21 29} +do_test fts2a-2.10 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five "one three"'} +} {21 29} +do_test fts2a-2.11 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five "one three" four'} +} {29} +do_test fts2a-2.12 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five four "one three"'} +} {29} +do_test fts2a-2.13 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three" four five'} +} {29} + +do_test fts2a-3.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts2a-3.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one -two'} +} {1 5 9 13 17 21 25 29} +do_test fts2a-3.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '-two one'} +} {1 5 9 13 17 21 25 29} + +do_test fts2a-4.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one OR two'} +} {1 2 3 5 6 7 9 10 11 13 14 15 17 18 19 21 22 23 25 26 27 29 30 31} +do_test fts2a-4.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two" OR three'} +} {3 4 5 6 7 11 12 13 14 15 19 20 21 22 23 27 28 29 30 31} +do_test fts2a-4.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three OR "one two"'} +} {3 4 5 6 7 11 12 13 14 15 19 20 21 22 23 27 28 29 30 31} +do_test fts2a-4.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two OR three'} +} {3 5 7 11 13 15 19 21 23 27 29 31} +do_test fts2a-4.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three OR two one'} +} {3 5 7 11 13 15 19 21 23 27 29 31} +do_test fts2a-4.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two OR three OR four'} +} {3 5 7 9 11 13 15 19 21 23 25 27 29 31} +do_test fts2a-4.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two OR three OR four one'} +} {3 5 7 9 11 13 15 19 21 23 25 27 29 31} + +# Test the ability to handle NULL content +# +do_test fts2a-5.1 { + execsql {INSERT INTO t1(content) VALUES(NULL)} +} {} +do_test fts2a-5.2 { + set rowid [db last_insert_rowid] + execsql {SELECT content FROM t1 WHERE rowid=$rowid} +} {{}} +do_test fts2a-5.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH NULL} +} {} + +# Test the ability to handle non-positive rowids +# +do_test fts2a-6.0 { + execsql {INSERT INTO t1(rowid, content) VALUES(0, 'four five')} +} {} +do_test fts2a-6.1 { + execsql {SELECT content FROM t1 WHERE rowid = 0} +} {{four five}} +do_test fts2a-6.2 { + execsql {INSERT INTO t1(rowid, content) VALUES(-1, 'three four')} +} {} +do_test fts2a-6.3 { + execsql {SELECT content FROM t1 WHERE rowid = -1} +} {{three four}} +do_test fts2a-6.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'four'} +} {-1 0 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2b.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2b.test new file mode 100644 index 0000000..169cd8a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2b.test @@ -0,0 +1,147 @@ +# 2006 September 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS2 module. +# +# $Id: fts2b.test,v 1.1 2006/10/19 23:36:26 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# Fill the full-text index "t1" with phrases in english, spanish, +# and german. For the i-th row, fill in the names for the bits +# that are set in the value of i. The least significant bit is +# 1. For example, the value 5 is 101 in binary which will be +# converted to "one three" in english. +# +proc fill_multilanguage_fulltext_t1 {} { + set english {one two three four five} + set spanish {un dos tres cuatro cinco} + set german {eine zwei drei vier funf} + + for {set i 1} {$i<=31} {incr i} { + set cmd "INSERT INTO t1 VALUES" + set vset {} + foreach lang {english spanish german} { + set words {} + for {set j 0; set k 1} {$j<5} {incr j; incr k $k} { + if {$k&$i} {lappend words [lindex [set $lang] $j]} + } + lappend vset "'$words'" + } + set sql "INSERT INTO t1(english,spanish,german) VALUES([join $vset ,])" + # puts $sql + db eval $sql + } +} + +# Construct a full-text search table containing five keywords: +# one, two, three, four, and five, in various combinations. The +# rowid for each will be a bitmask for the elements it contains. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(english,spanish,german); +} +fill_multilanguage_fulltext_t1 + +do_test fts2b-1.1 { + execsql {SELECT rowid FROM t1 WHERE english MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts2b-1.2 { + execsql {SELECT rowid FROM t1 WHERE spanish MATCH 'one'} +} {} +do_test fts2b-1.3 { + execsql {SELECT rowid FROM t1 WHERE german MATCH 'one'} +} {} +do_test fts2b-1.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts2b-1.5 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'one dos drei'} +} {7 15 23 31} +do_test fts2b-1.6 { + execsql {SELECT english, spanish, german FROM t1 WHERE rowid=1} +} {one un eine} +do_test fts2b-1.7 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"one un"'} +} {} + +do_test fts2b-2.1 { + execsql { + CREATE VIRTUAL TABLE t2 USING fts2(from,to); + INSERT INTO t2([from],[to]) VALUES ('one two three', 'four five six'); + SELECT [from], [to] FROM t2 + } +} {{one two three} {four five six}} + + +# Compute an SQL string that contains the words one, two, three,... to +# describe bits set in the value $i. Only the lower 5 bits are examined. +# +proc wordset {i} { + set x {} + for {set j 0; set k 1} {$j<5} {incr j; incr k $k} { + if {$k&$i} {lappend x [lindex {one two three four five} $j]} + } + return '$x' +} + +# Create a new FTS table with three columns: +# +# norm: words for the bits of rowid +# plusone: words for the bits of rowid+1 +# invert: words for the bits of ~rowid +# +db eval { + CREATE VIRTUAL TABLE t4 USING fts2([norm],'plusone',"invert"); +} +for {set i 1} {$i<=15} {incr i} { + set vset [list [wordset $i] [wordset [expr {$i+1}]] [wordset [expr {~$i}]]] + db eval "INSERT INTO t4(norm,plusone,invert) VALUES([join $vset ,]);" +} + +do_test fts2b-4.1 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one'} +} {1 3 5 7 9 11 13 15} +do_test fts2b-4.2 { + execsql {SELECT rowid FROM t4 WHERE norm MATCH 'one'} +} {1 3 5 7 9 11 13 15} +do_test fts2b-4.3 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'one'} +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15} +do_test fts2b-4.4 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'plusone:one'} +} {2 4 6 8 10 12 14} +do_test fts2b-4.5 { + execsql {SELECT rowid FROM t4 WHERE plusone MATCH 'one'} +} {2 4 6 8 10 12 14} +do_test fts2b-4.6 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one plusone:two'} +} {1 5 9 13} +do_test fts2b-4.7 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one two'} +} {1 3 5 7 9 11 13 15} +do_test fts2b-4.8 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'plusone:two norm:one'} +} {1 5 9 13} +do_test fts2b-4.9 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'two norm:one'} +} {1 3 5 7 9 11 13 15} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2c.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2c.test new file mode 100644 index 0000000..cc6c9bb --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2c.test @@ -0,0 +1,1213 @@ +# 2006 September 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS2 module. +# +# $Id: fts2c.test,v 1.1 2006/10/19 23:36:26 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# Create a table of sample email data. The data comes from email +# archives of Enron executives that was published as part of the +# litigation against that company. +# +do_test fts2c-1.1 { + db eval { + CREATE VIRTUAL TABLE email USING fts2([from],[to],subject,body); + BEGIN TRANSACTION; +INSERT INTO email([from],[to],subject,body) VALUES('savita.puthigai@enron.com', 'traders.eol@enron.com, traders.eol@enron.com', 'EnronOnline- Change to Autohedge', 'Effective Monday, October 22, 2001 the following changes will be made to the Autohedge functionality on EnronOnline. + +The volume on the hedge will now respect the minimum volume and volume increment settings on the parent product. See rules below: + +? If the transaction volume on the child is less than half of the parent''s minimum volume no hedge will occur. +? If the transaction volume on the child is more than half the parent''s minimum volume but less than half the volume increment on the parent, the hedge will volume will be the parent''s minimum volume. +? For all other volumes, the same rounding rules will apply based on the volume increment on the parent product. + +Please see example below: + +Parent''s Settings: +Minimum: 5000 +Increment: 1000 + +Volume on Autohedge transaction Volume Hedged +1 - 2499 0 +2500 - 5499 5000 +5500 - 6499 6000'); +INSERT INTO email([from],[to],subject,body) VALUES('dana.davis@enron.com', 'laynie.east@enron.com, lisa.king@enron.com, lisa.best@enron.com,', 'Leaving Early', 'FYI: +If it''s ok with everyone''s needs, I would like to leave @4pm. If you think +you will need my assistance past the 4 o''clock hour just let me know; I''ll +be more than willing to stay.'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'louise.kitchen@enron.com', '<> - CC02.06.02', 'The following expense report is ready for approval: + +Employee Name: Christopher F. Calger +Status last changed by: Mollie E. Gustafson Ms +Expense Report Name: CC02.06.02 +Report Total: $3,972.93 +Amount Due Employee: $3,972.93 + + +To approve this expense report, click on the following link for Concur Expense. +http://expensexms.enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('jeff.duff@enron.com', 'julie.johnson@enron.com', 'Work request', 'Julie, + +Could you print off the current work request report by 1:30 today? + +Gentlemen, + +I''d like to review this today at 1:30 in our office. Also, could you provide +me with your activity reports so I can have Julie enter this information. + +JD'); +INSERT INTO email([from],[to],subject,body) VALUES('v.weldon@enron.com', 'gary.l.carrier@usa.dupont.com, scott.joyce@bankofamerica.com', 'Enron News', 'This could turn into something big.... +http://biz.yahoo.com/rf/010129/n29305829.html'); +INSERT INTO email([from],[to],subject,body) VALUES('mark.haedicke@enron.com', 'paul.simons@enron.com', 'Re: First Polish Deal!', 'Congrats! Things seem to be building rapidly now on the Continent. Mark'); +INSERT INTO email([from],[to],subject,body) VALUES('e..carter@enron.com', 't..robinson@enron.com', 'FW: Producers Newsletter 9-24-2001', ' +The producer lumber pricing sheet. + -----Original Message----- +From: Johnson, Jay +Sent: Tuesday, October 16, 2001 3:42 PM +To: Carter, Karen E. +Subject: FW: Producers Newsletter 9-24-2001 + + + + -----Original Message----- +From: Daigre, Sergai +Sent: Friday, September 21, 2001 8:33 PM +Subject: Producers Newsletter 9-24-2001 + + '); +INSERT INTO email([from],[to],subject,body) VALUES('david.delainey@enron.com', 'kenneth.lay@enron.com', 'Greater Houston Partnership', 'Ken, in response to the letter from Mr Miguel San Juan, my suggestion would +be to offer up the Falcon for their use; however, given the tight time frame +and your recent visit with Mr. Fox that it would be difficult for either you +or me to participate. + +I spoke to Max and he agrees with this approach. + +I hope this meets with your approval. + +Regards +Delainey'); +INSERT INTO email([from],[to],subject,body) VALUES('lachandra.fenceroy@enron.com', 'lindy.donoho@enron.com', 'FW: Bus Applications Meeting Follow Up', 'Lindy, + +Here is the original memo we discussed earlier. Please provide any information that you may have. + +Your cooperation is greatly appreciated. + +Thanks, + +lachandra.fenceroy@enron.com +713.853.3884 +877.498.3401 Pager + + -----Original Message----- +From: Bisbee, Joanne +Sent: Wednesday, September 26, 2001 7:50 AM +To: Fenceroy, LaChandra +Subject: FW: Bus Applications Meeting Follow Up + +Lachandra, Please get with David Duff today and see what this is about. Who are our TW accounting business users? + + -----Original Message----- +From: Koh, Wendy +Sent: Tuesday, September 25, 2001 2:41 PM +To: Bisbee, Joanne +Subject: Bus Applications Meeting Follow Up + +Lisa brought up a TW change effective Nov 1. It involves eliminating a turnback surcharge. I have no other information, but you might check with the business folks for any system changes required. + +Wendy'); +INSERT INTO email([from],[to],subject,body) VALUES('danny.mccarty@enron.com', 'fran.fagan@enron.com', 'RE: worksheets', 'Fran, + If Julie''s merit needs to be lump sum, just move it over to that column. Also, send me Eric Gadd''s sheets as well. Thanks. +Dan + + -----Original Message----- +From: Fagan, Fran +Sent: Thursday, December 20, 2001 11:10 AM +To: McCarty, Danny +Subject: worksheets + +As discussed, attached are your sheets for bonus and merit. + +Thanks, + +Fran Fagan +Sr. HR Rep +713.853.5219 + + + << File: McCartyMerit.xls >> << File: mccartyBonusCommercial_UnP.xls >> + +'); +INSERT INTO email([from],[to],subject,body) VALUES('bert.meyers@enron.com', 'shift.dl-portland@enron.com', 'OCTOBER SCHEDULE', 'TEAM, + +PLEASE SEND ME ANY REQUESTS THAT YOU HAVE FOR OCTOBER. SO FAR I HAVE THEM FOR LEAF. I WOULD LIKE TO HAVE IT DONE BY THE 15TH OF THE MONTH. ANY QUESTIONS PLEASE GIVE ME A CALL. + +BERT'); +INSERT INTO email([from],[to],subject,body) VALUES('errol.mclaughlin@enron.com', 'john.arnold@enron.com, bilal.bajwa@enron.com, john.griffith@enron.com,', 'TRV Notification: (NG - PROPT P/L - 09/27/2001)', 'The report named: NG - PROPT P/L , published as of 09/27/2001 is now available for viewing on the website.'); +INSERT INTO email([from],[to],subject,body) VALUES('patrice.mims@enron.com', 'calvin.eakins@enron.com', 'Re: Small business supply assistance', 'Hi Calvin + + +I spoke with Rickey (boy, is he long-winded!!). Gave him the name of our +credit guy, Russell Diamond. + +Thank for your help!'); +INSERT INTO email([from],[to],subject,body) VALUES('legal <.hall@enron.com>', 'stephanie.panus@enron.com', 'Termination update', 'City of Vernon and Salt River Project terminated their contracts. I will fax these notices to you.'); +INSERT INTO email([from],[to],subject,body) VALUES('d..steffes@enron.com', 'richard.shapiro@enron.com', 'EES / ENA Government Affairs Staffing & Outside Services', 'Rick -- + +Here is the information on staffing and outside services. Call if you need anything else. + +Jim + + '); +INSERT INTO email([from],[to],subject,body) VALUES('gelliott@industrialinfo.com', 'pcopello@industrialinfo.com', 'ECAAR (Gavin), WSCC (Diablo Canyon), & NPCC (Seabrook)', 'Dear Power Outage Database Customer, +Attached you will find an excel document. The outages contained within are forced or rescheduled outages. Your daily delivery will still contain these outages. +In addition to the two excel documents, there is a dbf file that is formatted like your daily deliveries you receive nightly. This will enable you to load the data into your regular database. Any questions please let me know. Thanks. +Greg Elliott +IIR, Inc. +713-783-5147 x 3481 +outages@industrialinfo.com +THE INFORMATION CONTAINED IN THIS E-MAIL IS LEGALLY PRIVILEGED AND CONFIDENTIAL INFORMATION INTENDED ONLY FOR THE USE OF THE INDIVIDUAL OR ENTITY NAMED ABOVE. YOU ARE HEREBY NOTIFIED THAT ANY DISSEMINATION, DISTRIBUTION, OR COPY OF THIS E-MAIL TO UNAUTHORIZED ENTITIES IS STRICTLY PROHIBITED. IF YOU HAVE RECEIVED THIS +E-MAIL IN ERROR, PLEASE DELETE IT. + - OUTAGE.dbf + - 111201R.xls + - 111201.xls '); +INSERT INTO email([from],[to],subject,body) VALUES('enron.announcements@enron.com', 'all_ena_egm_eim@enron.com', 'EWS Brown Bag', 'MARK YOUR LUNCH CALENDARS NOW ! + +You are invited to attend the EWS Brown Bag Lunch Series + +Featuring: RAY BOWEN, COO + +Topic: Enron Industrial Markets + +Thursday, March 15, 2001 +11:30 am - 12:30 pm +EB 5 C2 + + +You bring your lunch, Limited Seating +We provide drinks and dessert. RSVP x 3-9610'); +INSERT INTO email([from],[to],subject,body) VALUES('chris.germany@enron.com', 'ingrid.immer@williams.com', 'Re: About St Pauls', 'Sounds good to me. I bet this is next to the Warick?? Hotel. + + + + +"Immer, Ingrid" on 12/21/2000 11:48:47 AM +To: "''chris.germany@enron.com''" +cc: +Subject: About St Pauls + + + + + <> +? +?http://www.stpaulshouston.org/about.html + +Chris, + +I like the looks of this place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. service and a candlelight service at 5:00 p.m., +among others. + +Let me know.?? ii + + - About St Pauls.url + +'); +INSERT INTO email([from],[to],subject,body) VALUES('nas@cpuc.ca.gov', 'skatz@sempratrading.com, kmccrea@sablaw.com, thompson@wrightlaw.com,', 'Reply Brief filed July 31, 2000', ' - CPUC01-#76371-v1-Revised_Reply_Brief__Due_today_7_31_.doc'); +INSERT INTO email([from],[to],subject,body) VALUES('gascontrol@aglresources.com', 'dscott4@enron.com, lcampbel@enron.com', 'Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder', 'Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder +As discussed in the Winter Operations Meeting on Sept.29,2000, +E-Gas(Emergency Gas) will not be offered this winter as a service from AGLC. +Marketers and Poolers can receive gas via Peaking and IBSS nominations(daisy +chain) from other marketers up to the 6 p.m. Same Day 2 nomination cycle. +'); +INSERT INTO email([from],[to],subject,body) VALUES('dutch.quigley@enron.com', 'rwolkwitz@powermerchants.com', '', ' + +Here is a goody for you'); +INSERT INTO email([from],[to],subject,body) VALUES('ryan.o''rourke@enron.com', 'k..allen@enron.com, randy.bhatia@enron.com, frank.ermis@enron.com,', 'TRV Notification: (West VaR - 11/07/2001)', 'The report named: West VaR , published as of 11/07/2001 is now available for viewing on the website.'); +INSERT INTO email([from],[to],subject,body) VALUES('mjones7@txu.com', 'cstone1@txu.com, ggreen2@txu.com, timpowell@txu.com,', 'Enron / HPL Actuals for July 10, 2000', 'Teco Tap 10.000 / Enron ; 110.000 / HPL IFERC + +LS HPL LSK IC 30.000 / Enron +'); +INSERT INTO email([from],[to],subject,body) VALUES('susan.pereira@enron.com', 'kkw816@aol.com', 'soccer practice', 'Kathy- + +Is it safe to assume that practice is cancelled for tonight?? + +Susan Pereira'); +INSERT INTO email([from],[to],subject,body) VALUES('mark.whitt@enron.com', 'barry.tycholiz@enron.com', 'Huber Internal Memo', 'Please look at this. I didn''t know how deep to go with the desk. Do you think this works. + + '); +INSERT INTO email([from],[to],subject,body) VALUES('m..forney@enron.com', 'george.phillips@enron.com', '', 'George, +Give me a call and we will further discuss opportunities on the 13st floor. + +Thanks, +JMForney +3-7160'); +INSERT INTO email([from],[to],subject,body) VALUES('brad.mckay@enron.com', 'angusmcka@aol.com', 'Re: (no subject)', 'not yet'); +INSERT INTO email([from],[to],subject,body) VALUES('adam.bayer@enron.com', 'jonathan.mckay@enron.com', 'FW: Curve Fetch File', 'Here is the curve fetch file sent to me. It has plenty of points in it. If you give me a list of which ones you need we may be able to construct a secondary worksheet to vlookup the values. + +adam +35227 + + + -----Original Message----- +From: Royed, Jeff +Sent: Tuesday, September 25, 2001 11:37 AM +To: Bayer, Adam +Subject: Curve Fetch File + +Let me know if it works. It may be required to have a certain version of Oracle for it to work properly. + + + +Jeff Royed +Enron +Energy Operations +Phone: 713-853-5295'); +INSERT INTO email([from],[to],subject,body) VALUES('matt.smith@enron.com', 'yan.wang@enron.com', 'Report Formats', 'Yan, + +The merged reports look great. I believe the only orientation changes are to +"unmerge" the following six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 +23 West_3 +25 CIG_WIC + +The orientation of the individual reports should be correct. Thanks. + +Mat + +PS. Just a reminder to add the "*" by the title of calculated points.'); +INSERT INTO email([from],[to],subject,body) VALUES('michelle.lokay@enron.com', 'jimboman@bigfoot.com', 'Egyptian Festival', '---------------------- Forwarded by Michelle Lokay/ET&S/Enron on 09/07/2000 +10:08 AM --------------------------- + + +"Karkour, Randa" on 09/07/2000 09:01:04 AM +To: "''Agheb (E-mail)" , "Leila Mankarious (E-mail)" +, "''Marymankarious (E-mail)" +, "Michelle lokay (E-mail)" , "Ramy +Mankarious (E-mail)" +cc: + +Subject: Egyptian Festival + + + <> + + http://www.egyptianfestival.com/ + + - Egyptian Festival.url +'); +INSERT INTO email([from],[to],subject,body) VALUES('errol.mclaughlin@enron.com', 'sherry.dawson@enron.com', 'Urgent!!! --- New EAST books', 'This has to be done.................................. + +Thanks +---------------------- Forwarded by Errol McLaughlin/Corp/Enron on 12/20/2000 +08:39 AM --------------------------- + + + + From: William Kelly @ ECT 12/20/2000 08:31 AM + + +To: Kam Keiser/HOU/ECT@ECT, Darron C Giron/HOU/ECT@ECT, David +Baumbach/HOU/ECT@ECT, Errol McLaughlin/Corp/Enron@ENRON +cc: Kimat Singla/HOU/ECT@ECT, Kulvinder Fowler/NA/Enron@ENRON, Kyle R +Lilly/HOU/ECT@ECT, Jeff Royed/Corp/Enron@ENRON, Alejandra +Chavez/NA/Enron@ENRON, Crystal Hyde/HOU/ECT@ECT + +Subject: New EAST books + +We have new book names in TAGG for our intramonth portfolios and it is +extremely important that any deal booked to the East is communicated quickly +to someone on my team. I know it will take some time for the new names to +sink in and I do not want us to miss any positions or P&L. + +Thanks for your help on this. + +New: +Scott Neal : East Northeast +Dick Jenkins: East Marketeast + +WK +'); +INSERT INTO email([from],[to],subject,body) VALUES('david.forster@enron.com', 'eol.wide@enron.com', 'Change to Stack Manager', 'Effective immediately, there is a change to the Stack Manager which will +affect any Inactive Child. + +An inactive Child with links to Parent products will not have their +calculated prices updated until the Child product is Activated. + +When the Child Product is activated, the price will be recalculated and +updated BEFORE it is displayed on the web. + +This means that if you are inputting a basis price on a Child product, you +will not see the final, calculated price until you Activate the product, at +which time the customer will also see it. + +If you have any questions, please contact the Help Desk on: + +Americas: 713 853 4357 +Europe: + 44 (0) 20 7783 7783 +Asia/Australia: +61 2 9229 2300 + +Dave'); +INSERT INTO email([from],[to],subject,body) VALUES('vince.kaminski@enron.com', 'jhh1@email.msn.com', 'Re: Light reading - see pieces beginning on page 7', 'John, + +I saw it. Very interesting. + +Vince + + + + + +"John H Herbert" on 07/28/2000 08:38:08 AM +To: "Vince J Kaminski" +cc: +Subject: Light reading - see pieces beginning on page 7 + + +Cheers and have a nice weekend, + + +JHHerbert + + + + + - gd000728.pdf + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('matthew.lenhart@enron.com', 'mmmarcantel@equiva.com', 'RE:', 'i will try to line up a pig for you '); +INSERT INTO email([from],[to],subject,body) VALUES('jae.black@enron.com', 'claudette.harvey@enron.com, chaun.roberts@enron.com, judy.martinez@enron.com,', 'Disaster Recovery Equipment', 'As a reminder...there are several pieces of equipment that are set up on the 30th Floor, as well as on our floor, for the Disaster Recovery Team. PLEASE DO NOT TAKE, BORROW OR USE this equipment. Should you need to use another computer system, other than yours, or make conference calls please work with your Assistant to help find or set up equipment for you to use. + +Thanks for your understanding in this matter. + +T.Jae Black +East Power Trading +Assistant to Kevin Presto +off. 713-853-5800 +fax 713-646-8272 +cell 713-539-4760'); +INSERT INTO email([from],[to],subject,body) VALUES('eric.bass@enron.com', 'dale.neuner@enron.com', '5 X 24', 'Dale, + +Have you heard anything more on the 5 X 24s? We would like to get this +product out ASAP. + + +Thanks, + +Eric'); +INSERT INTO email([from],[to],subject,body) VALUES('messenger@smartreminders.com', 'm..tholt@enron.com', '10% Coupon - PrintPal Printer Cartridges - 100% Guaranteed', '[IMAGE] +[IMAGE][IMAGE][IMAGE] +Dear SmartReminders Member, + [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] + + + + + + + + + + + + + + + + + + + + + +We respect your privacy and are a Certified Participant of the BBBOnLine + Privacy Program. To be removed from future offers,click here. +SmartReminders.com is a permission based service. To unsubscribe click here . '); +INSERT INTO email([from],[to],subject,body) VALUES('benjamin.rogers@enron.com', 'mark.bernstein@enron.com', '', 'The guy you are talking about left CIN under a "cloud of suspicion" sort of +speak. He was the one who got into several bad deals and PPA''s in California +for CIN, thus he left on a bad note. Let me know if you need more detail +than that, I felt this was the type of info you were looking for. Thanks! +Ben'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'michelle.cash@enron.com', 'Expense Report Receipts Not Received', 'Employee Name: Michelle Cash +Report Name: Houston Cellular 8-11-01 +Report Date: 12/13/01 +Report ID: 594D37C9ED2111D5B452 +Submitted On: 12/13/01 + +You are only allowed 2 reports with receipts outstanding. Your expense reports will not be paid until you meet this requirement.'); +INSERT INTO email([from],[to],subject,body) VALUES('susan.mara@enron.com', 'ray.alvarez@enron.com, mark.palmer@enron.com, karen.denne@enron.com,', 'CAISO Emergency Motion -- to discontinue market-based rates for', 'FYI. the latest broadside against the generators. + +Sue Mara +Enron Corp. +Tel: (415) 782-7802 +Fax:(415) 782-7854 +----- Forwarded by Susan J Mara/NA/Enron on 06/08/2001 12:24 PM ----- + + + "Milner, Marcie" 06/08/2001 11:13 AM To: "''smara@enron.com''" cc: Subject: CAISO Emergency Motion + + +Sue, did you see this emergency motion the CAISO filed today? Apparently +they are requesting that FERC discontinue market-based rates immediately and +grant refunds plus interest on the difference between cost-based rates and +market revenues received back to May 2000. They are requesting the +commission act within 14 days. Have you heard anything about what they are +doing? + +Marcie + +http://www.caiso.com/docs/2001/06/08/200106081005526469.pdf +'); +INSERT INTO email([from],[to],subject,body) VALUES('fletcher.sturm@enron.com', 'eloy.escobar@enron.com', 'Re: General Brinks Position Meeting', 'Eloy, + +Who is General Brinks? + +Fletch'); +INSERT INTO email([from],[to],subject,body) VALUES('nailia.dindarova@enron.com', 'richard.shapiro@enron.com', 'Documents for Mark Frevert (on EU developments and lessons from', 'Rick, + +Here are the documents that Peter has prepared for Mark Frevert. + +Nailia +---------------------- Forwarded by Nailia Dindarova/LON/ECT on 25/06/2001 +16:36 --------------------------- + + +Nailia Dindarova +25/06/2001 15:36 +To: Michael Brown/Enron@EUEnronXGate +cc: Ross Sankey/Enron@EUEnronXGate, Eric Shaw/ENRON@EUEnronXGate, Peter +Styles/LON/ECT@ECT + +Subject: Documents for Mark Frevert (on EU developments and lessons from +California) + +Michael, + + +These are the documents that Peter promised to give to you for Mark Frevert. +He has now handed them to him in person but asked me to transmit them +electronically to you, as well as Eric and Ross. + +Nailia + + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('peggy.a.kostial@accenture.com', 'dave.samuels@enron.com', 'EOL-Accenture Deal Sheet', 'Dave - + +Attached are our comments and suggested changes. Please call to review. + +On the time line for completion, we have four critical steps to complete: + Finalize market analysis to refine business case, specifically + projected revenue stream + Complete counterparty surveying, including targeting 3 CPs for letters + of intent + Review Enron asset base for potential reuse/ licensing + Contract negotiations + +Joe will come back to us with an updated time line, but it is my +expectation that we are still on the same schedule (we just begun week +three) with possibly a week or so slippage.....contract negotiations will +probably be the critical path. + +We will send our cut at the actual time line here shortly. Thanks, + +Peggy + +(See attached file: accenture-dealpoints v2.doc) + - accenture-dealpoints v2.doc '); +INSERT INTO email([from],[to],subject,body) VALUES('thomas.martin@enron.com', 'thomas.martin@enron.com', 'Re: Guadalupe Power Partners LP', '---------------------- Forwarded by Thomas A Martin/HOU/ECT on 03/20/2001 +03:49 PM --------------------------- + + +Thomas A Martin +10/11/2000 03:55 PM +To: Patrick Wade/HOU/ECT@ECT +cc: +Subject: Re: Guadalupe Power Partners LP + +The deal is physically served at Oasis Waha or Oasis Katy and is priced at +either HSC, Waha or Katytailgate GD at buyers option three days prior to +NYMEX close. + +'); +INSERT INTO email([from],[to],subject,body) VALUES('judy.townsend@enron.com', 'dan.junek@enron.com, chris.germany@enron.com', 'Columbia Distribution''s Capacity Available for Release - Sum', '---------------------- Forwarded by Judy Townsend/HOU/ECT on 03/09/2001 11:04 +AM --------------------------- + + +agoddard@nisource.com on 03/08/2001 09:16:57 AM +To: " - *Koch, Kent" , " - +*Millar, Debra" , " - *Burke, Lynn" + +cc: " - *Heckathorn, Tom" +Subject: Columbia Distribution''s Capacity Available for Release - Sum + + +Attached is Columbia Distribution''s notice of capacity available for release +for +the summer of 2001 (Apr. 2001 through Oct. 2001). + +Please note that the deadline for bids is 3:00pm EST on March 20, 2001. + +If you have any questions, feel free to contact any of the representatives +listed +at the bottom of the attachment. + +Aaron Goddard + + + + + - 2001Summer.doc +'); +INSERT INTO email([from],[to],subject,body) VALUES('rhonda.denton@enron.com', 'tim.belden@enron.com, dana.davis@enron.com, genia.fitzgerald@enron.com,', 'Split Rock Energy LLC', 'We have received the executed EEI contract from this CP dated 12/12/2000. +Copies will be distributed to Legal and Credit.'); +INSERT INTO email([from],[to],subject,body) VALUES('kerrymcelroy@dwt.com', 'jack.speer@alcoa.com, crow@millernash.com, michaelearly@earthlink.net,', 'Oral Argument Request', ' - Oral Argument Request.doc'); +INSERT INTO email([from],[to],subject,body) VALUES('mike.carson@enron.com', 'rlmichaelis@hormel.com', '', 'Did you come in town this wk end..... My new number at our house is : +713-668-3712...... my cell # is 281-381-7332 + +the kid'); +INSERT INTO email([from],[to],subject,body) VALUES('cooper.richey@enron.com', 'trycooper@hotmail.com', 'FW: Contact Info', ' + +-----Original Message----- +From: Punja, Karim +Sent: Thursday, December 13, 2001 2:35 PM +To: Richey, Cooper +Subject: Contact Info + + +Cooper, + +Its been a real pleasure working with you (even though it was for only a small amount of time) +I hope we can stay in touch. + +Home# 234-0249 +email: kpunja@hotmail.com + +Take Care, + +Karim. + '); +INSERT INTO email([from],[to],subject,body) VALUES('bjm30@earthlink.net', 'mcguinn.k@enron.com, mcguinn.ian@enron.com, mcguinn.stephen@enron.com,', 'email address change', 'Hello all. + +I haven''t talked to many of you via email recently but I do want to give you +my new address for your email file: + + bjm30@earthlink.net + +I hope all is well. + +Brian McGuinn'); +INSERT INTO email([from],[to],subject,body) VALUES('shelley.corman@enron.com', 'steve.hotte@enron.com', 'Flat Panels', 'Can you please advise what is going on with the flat panels that we had planned to distribute to our gas logistics team. It was in the budget and we had the okay, but now I''m hearing there is some hold-up & the units are stored on 44. + +Shelley'); +INSERT INTO email([from],[to],subject,body) VALUES('sara.davidson@enron.com', 'john.schwartzenburg@enron.com, scott.dieball@enron.com, recipients@enron.com,', '2001 Enron Law Conference (Distribution List 2)', ' Enron Law Conference + +San Antonio, Texas May 2-4, 2001 Westin Riverwalk + + See attached memo for more details!! + + +? Registration for the law conference this year will be handled through an +Online RSVP Form on the Enron Law Conference Website at +http://lawconference.corp.enron.com. The website is still under construction +and will not be available until Thursday, March 15, 2001. + +? We will send you another e-mail to confirm when the Law Conference Website +is operational. + +? Please complete the Online RSVP Form as soon as it is available and submit +it no later than Friday, March 30th. + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('tori.kuykendall@enron.com', 'heath.b.taylor@accenture.com', 'Re:', 'hey - thats funny about john - he definitely remembers him - i''ll call pat +and let him know - we are coming on saturday - i just havent had a chance to +call you guys back -- looking forward to it -- i probably need the +directions again though'); +INSERT INTO email([from],[to],subject,body) VALUES('darron.giron@enron.com', 'bryce.baxter@enron.com', 'Re: Feedback for Audrey Cook', 'Bryce, + +I''ll get it done today. + +DG 3-9573 + + + + + + From: Bryce Baxter 06/12/2000 07:15 PM + + +To: Darron C Giron/HOU/ECT@ECT +cc: +Subject: Feedback for Audrey Cook + +You were identified as a reviewer for Audrey Cook. If possible, could you +complete her feedback by end of business Wednesday? It will really help me +in the PRC process to have your input. Thanks. + +'); +INSERT INTO email([from],[to],subject,body) VALUES('casey.evans@enron.com', 'stephanie.sever@enron.com', 'Gas EOL ID', 'Stephanie, + +In conjunction with the recent movement of several power traders, they are changing the names of their gas books as well. The names of the new gas books and traders are as follows: + +PWR-NG-LT-SPP: Mike Carson +PWR-NG-LT-SERC: Jeff King + +If you need to know their power desk to map their ID to their gas books, those desks are as follows: + +EPMI-LT-SPP: Mike Carson +EPMI-LT-SERC: Jeff King + +I will be in training this afternoon, but will be back when class is over. Let me know if you have any questions. + +Thanks for your help! +Casey'); +INSERT INTO email([from],[to],subject,body) VALUES('darrell.schoolcraft@enron.com', 'david.roensch@enron.com, kimberly.watson@enron.com, michelle.lokay@enron.com,', 'Postings', 'Please see the attached. + + +ds + + + + + '); +INSERT INTO email([from],[to],subject,body) VALUES('mcominsky@aol.com', 'cpatman@bracepatt.com, james_derrick@enron.com', 'Jurisprudence Luncheon', 'Carrin & Jim -- + +It was an honor and a pleasure to meet both of you yesterday. I know we will +have fun working together on this very special event. + +Jeff left the jurisprudence luncheon lists for me before he left on vacation. + I wasn''t sure whether he transmitted them to you as well. Would you please +advise me if you would like them sent to you? I can email the MS Excel files +or I can fax the hard copies to you. Please advise what is most convenient. + +I plan to be in town through the holidays and can be reached by phone, email, +or cell phone at any time. My cell phone number is 713/705-4829. + +Thanks again for your interest in the ADL''s work. Martin. + +Martin B. Cominsky +Director, Southwest Region +Anti-Defamation League +713/627-3490, ext. 122 +713/627-2011 (fax) +MCominsky@aol.com'); +INSERT INTO email([from],[to],subject,body) VALUES('phillip.love@enron.com', 'todagost@utmb.edu, gbsonnta@utmb.edu', 'New President', 'I had a little bird put a word in my ear. Is there any possibility for Ben +Raimer to be Bush''s secretary of HHS? Just curious about that infamous UTMB +rumor mill. Hope things are well, happy holidays. +PL'); +INSERT INTO email([from],[to],subject,body) VALUES('marie.heard@enron.com', 'ehamilton@fna.com', 'ISDA Master Agreement', 'Erin: + +Pursuant to your request, attached are the Schedule to the ISDA Master Agreement, together with Paragraph 13 to the ISDA Credit Support Annex. Please let me know if you need anything else. We look forward to hearing your comments. + +Marie + +Marie Heard +Senior Legal Specialist +Enron North America Corp. +Phone: (713) 853-3907 +Fax: (713) 646-3490 +marie.heard@enron.com + + '); +INSERT INTO email([from],[to],subject,body) VALUES('andrea.ring@enron.com', 'beverly.beaty@enron.com', 'Re: Tennessee Buy - Louis Dreyfus', 'Beverly - once again thanks so much for your help on this. + + + + '); +INSERT INTO email([from],[to],subject,body) VALUES('karolyn.criado@enron.com', 'j..bonin@enron.com, felicia.case@enron.com, b..clapp@enron.com,', 'Price List week of Oct. 8-9, 2001', ' +Please contact me if you have any questions regarding last weeks prices. + +Thank you, +Karolyn Criado +3-9441 + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('kevin.presto@enron.com', 'edward.baughman@enron.com, billy.braddock@enron.com', 'Associated', 'Please begin working on filling our Associated short position in 02. I would like to take this risk off the books. + +In addition, please find out what a buy-out of VEPCO would cost us. With Rogers transitioning to run our retail risk management, I would like to clean up our customer positions. + +We also need to continue to explore a JEA buy-out. + +Thanks.'); +INSERT INTO email([from],[to],subject,body) VALUES('stacy.dickson@enron.com', 'gregg.penman@enron.com', 'RE: Constellation TC 5-7-01', 'Gregg, + +I am at home with a sick baby. (Lots of fun!) I will call you about this +tomorrow. + +Stacy'); +INSERT INTO email([from],[to],subject,body) VALUES('joe.quenet@enron.com', 'dfincher@utilicorp.com', '', 'hey big guy.....check this out..... + + w ww.gorelieberman-2000.com/'); +INSERT INTO email([from],[to],subject,body) VALUES('k..allen@enron.com', 'jacqestc@aol.com', '', 'Jacques, + +I sent you a fax of Kevin Kolb''s comments on the release. The payoff on the note would be $36,248 ($36090(principal) + $158 (accrued interest)). +This is assuming we wrap this up on Tuesday. + +Please email to confirm that their changes are ok so I can set up a meeting on Tuesday to reach closure. + +Phillip'); +INSERT INTO email([from],[to],subject,body) VALUES('kourtney.nelson@enron.com', 'mike.swerzbin@enron.com', 'Adjusted L/R Balance', 'Mike, + +I placed the adjusted L/R Balance on the Enronwest site. It is under the "Staff/Kourtney Nelson". There are two links: + +1) "Adj L_R" is the same data/format from the weekly strategy meeting. +2) "New Gen 2001_2002" link has all of the supply side info that is used to calculate the L/R balance + -Please note the Data Flag column, a value of "3" indicates the project was cancelled, on hold, etc and is not included in the calc. + +Both of these sheets are interactive Excel spreadsheets and thus you can play around with the data as you please. Also, James Bruce is working to get his gen report on the web. That will help with your access to information on new gen. + +Please let me know if you have any questions or feedback, + +Kourtney + + + +Kourtney Nelson +Fundamental Analysis +Enron North America +(503) 464-8280 +kourtney.nelson@enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('d..thomas@enron.com', 'naveed.ahmed@enron.com', 'FW: Current Enron TCC Portfolio', ' + +-----Original Message----- +From: Grace, Rebecca M. +Sent: Monday, December 17, 2001 9:44 AM +To: Thomas, Paul D. +Cc: Cashion, Jim; Allen, Thresa A.; May, Tom +Subject: RE: Current Enron TCC Portfolio + + +Paul, + +I reviewed NY''s list. I agree with all of their contracts numbers and mw amounts. + +Call if you have any more questions. + +Rebecca + + + + -----Original Message----- +From: Thomas, Paul D. +Sent: Monday, December 17, 2001 9:08 AM +To: Grace, Rebecca M. +Subject: FW: Current Enron TCC Portfolio + + << File: enrontccs.xls >> +Rebecca, +Let me know if you see any differences. + +Paul +X 3-0403 +-----Original Message----- +From: Thomas, Paul D. +Sent: Monday, December 17, 2001 9:04 AM +To: Ahmed, Naveed +Subject: FW: Current Enron TCC Portfolio + + + + +-----Original Message----- +From: Thomas, Paul D. +Sent: Thursday, December 13, 2001 10:01 AM +To: Baughman, Edward D. +Subject: Current Enron TCC Portfolio + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('stephanie.panus@enron.com', 'william.bradford@enron.com, debbie.brackett@enron.com,', 'Coastal Merchant Energy/El Paso Merchant Energy', 'Coastal Merchant Energy, L.P. merged with and into El Paso Merchant Energy, +L.P., effective February 1, 2001, with the surviving entity being El Paso +Merchant Energy, L.P. We currently have ISDA Master Agreements with both +counterparties. Please see the attached memo regarding the existing Masters +and let us know which agreement should be terminated. + +Thanks, +Stephanie +'); +INSERT INTO email([from],[to],subject,body) VALUES('kam.keiser@enron.com', 'c..kenne@enron.com', 'RE: What about this too???', ' + + -----Original Message----- +From: Kenne, Dawn C. +Sent: Wednesday, February 06, 2002 11:50 AM +To: Keiser, Kam +Subject: What about this too??? + + + << File: Netco Trader Matrix.xls >> + '); +INSERT INTO email([from],[to],subject,body) VALUES('chris.meyer@enron.com', 'joe.parks@enron.com', 'Centana', 'Talked to Chip. We do need Cash Committe approval given the netting feature of your deal, which means Batch Funding Request. Please update per my previous e-mail and forward. + +Thanks + +chris +x31666'); +INSERT INTO email([from],[to],subject,body) VALUES('debra.perlingiere@enron.com', 'jworman@academyofhealth.com', '', 'Have a great weekend! Happy Fathers Day! + + +Debra Perlingiere +Enron North America Corp. +1400 Smith Street, EB 3885 +Houston, Texas 77002 +dperlin@enron.com +Phone 713-853-7658 +Fax 713-646-3490'); +INSERT INTO email([from],[to],subject,body) VALUES('outlook.team@enron.com', '', 'Demo by Martha Janousek of Dashboard & Pipeline Profile / Julia &', 'CALENDAR ENTRY: APPOINTMENT + +Description: + Demo by Martha Janousek of Dashboard & Pipeline Profile / Julia & Dir Rpts. - 4102 + +Date: 1/5/2001 +Time: 9:00 AM - 10:00 AM (Central Standard Time) + +Chairperson: Outlook Migration Team + +Detailed Description:'); +INSERT INTO email([from],[to],subject,body) VALUES('diana.seifert@enron.com', 'mark.taylor@enron.com', 'Guest access Chile', 'Hello Mark, + +Justin Boyd told me that your can help me with questions regarding Chile. +We got a request for guest access through MG. +The company is called Escondida and is a subsidiary of BHP Australia. + +Please advise if I can set up a guest account or not. +F.Y.I.: MG is planning to put a "in w/h Chile" contract for Copper on-line as +soon as Enron has done the due diligence for this country. +Thanks ! + + +Best regards + +Diana Seifert +EOL PCG'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'mark.whitt@enron.com', '<> - 121001', 'The Approval status has changed on the following report: + +Status last changed by: Barry L. Tycholiz +Expense Report Name: 121001 +Report Total: $198.98 +Amount Due Employee: $198.98 +Amount Approved: $198.98 +Amount Paid: $0.00 +Approval Status: Approved +Payment Status: Pending + + +To review this expense report, click on the following link for Concur Expense. +http://expensexms.enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('kevin.hyatt@enron.com', '', 'Technical Support', 'Outside the U.S., please refer to the list below: + +Australia: +1800 678-515 +support@palm-au.com + +Canada: +1905 305-6530 +support@palm.com + +New Zealand: +0800 446-398 +support@palm-nz.com + +U.K.: +0171 867 0108 +eurosupport@palm.3com.com + +Please refer to the Worldwide Customer Support card for a complete technical support contact list.'); +INSERT INTO email([from],[to],subject,body) VALUES('geoff.storey@enron.com', 'dutch.quigley@enron.com', 'RE:', 'duke contact? + + -----Original Message----- +From: Quigley, Dutch +Sent: Wednesday, October 31, 2001 10:14 AM +To: Storey, Geoff +Subject: RE: + +bp corp Albert LaMore 281-366-4962 + +running the reports now + + + -----Original Message----- +From: Storey, Geoff +Sent: Wednesday, October 31, 2001 10:10 AM +To: Quigley, Dutch +Subject: RE: + +give me a contact over there too +BP + + + -----Original Message----- +From: Quigley, Dutch +Sent: Wednesday, October 31, 2001 9:42 AM +To: Storey, Geoff +Subject: + +Coral Jeff Whitnah 713-767-5374 +Relaint Steve McGinn 713-207-4000'); +INSERT INTO email([from],[to],subject,body) VALUES('pete.davis@enron.com', 'pete.davis@enron.com', 'Start Date: 4/22/01; HourAhead hour: 3; ', 'Start Date: 4/22/01; HourAhead hour: 3; No ancillary schedules awarded. +Variances detected. +Variances detected in Load schedule. + + LOG MESSAGES: + +PARSING FILE -->> O:\Portland\WestDesk\California Scheduling\ISO Final +Schedules\2001042203.txt + +---- Load Schedule ---- +$$$ Variance found in table tblLoads. + Details: (Hour: 3 / Preferred: 1.92 / Final: 1.89) + TRANS_TYPE: FINAL + LOAD_ID: PGE4 + MKT_TYPE: 2 + TRANS_DATE: 4/22/01 + SC_ID: EPMI + +'); +INSERT INTO email([from],[to],subject,body) VALUES('john.postlethwaite@enron.com', 'john.zufferli@enron.com', 'Reference', 'John, hope things are going well up there for you. The big day is almost here for you and Jessica. I was wondering if I could use your name as a job reference if need be. I am just trying to get everything in order just in case something happens. + +John'); +INSERT INTO email([from],[to],subject,body) VALUES('jeffrey.shankman@enron.com', 'lschiffm@jonesday.com', 'Re:', 'I saw you called on the cell this a.m. Sorry I missed you. (I was in the +shower). I have had a shitty week--I suspect my silence (not only to you, +but others) after our phone call is a result of the week. I''m seeing Glen at +11:15....talk to you'); +INSERT INTO email([from],[to],subject,body) VALUES('litebytz@enron.com', '', 'Lite Bytz RSVP', ' +This week''s Lite Bytz presentation will feature the following TOOLZ speaker: + +Richard McDougall +Solaris 8 +Thursday, June 7, 2001 + +If you have not already signed up, please RSVP via email to litebytz@enron.com by the end of the day Tuesday, June 5, 2001. + +*Remember: this is now a Brown Bag Event--so bring your lunch and we will provide cookies and drinks. + +Click below for more details. + +http://home.enron.com:84/messaging/litebytztoolzprint.jpg'); + COMMIT; + } +} {} + +############################################################################### +# Everything above just builds an interesting test database. The actual +# tests come after this comment. +############################################################################### + +do_test fts2c-1.2 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark' + } +} {6 17 25 38 40 42 73 74} +do_test fts2c-1.3 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'susan' + } +} {24 40} +do_test fts2c-1.4 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark susan' + } +} {40} +do_test fts2c-1.5 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'susan mark' + } +} {40} +do_test fts2c-1.6 { + execsql { + SELECT rowid FROM email WHERE email MATCH '"mark susan"' + } +} {} +do_test fts2c-1.7 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark -susan' + } +} {6 17 25 38 42 73 74} +do_test fts2c-1.8 { + execsql { + SELECT rowid FROM email WHERE email MATCH '-mark susan' + } +} {24} +do_test fts2c-1.9 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark OR susan' + } +} {6 17 24 25 38 40 42 73 74} + +# Some simple tests of the automatic "offsets(email)" column. In the sample +# data set above, only one message, number 20, contains the words +# "gas" and "reminder" in both body and subject. +# +do_test fts2c-2.1 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'gas reminder' + } +} {20 {2 0 42 3 2 1 54 8 3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} +do_test fts2c-2.2 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'subject:gas reminder' + } +} {20 {2 0 42 3 2 1 54 8 3 1 54 8}} +do_test fts2c-2.3 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'body:gas reminder' + } +} {20 {2 1 54 8 3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} +do_test fts2c-2.4 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE subject MATCH 'gas reminder' + } +} {20 {2 0 42 3 2 1 54 8}} +do_test fts2c-2.5 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH 'gas reminder' + } +} {20 {3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} + +# Document 32 contains 5 instances of the world "child". But only +# 3 of them are paired with "product". Make sure only those instances +# that match the phrase appear in the offsets(email) list. +# +do_test fts2c-3.1 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH 'child product' AND +rowid=32 + } +} {32 {3 0 94 5 3 0 114 5 3 0 207 5 3 1 213 7 3 0 245 5 3 1 251 7 3 0 409 5 3 1 415 7 3 1 493 7}} +do_test fts2c-3.2 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH '"child product"' + } +} {32 {3 0 207 5 3 1 213 7 3 0 245 5 3 1 251 7 3 0 409 5 3 1 415 7}} + +# Snippet generator tests +# +do_test fts2c-4.1 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'subject:gas reminder' + } +} {{Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder}} +do_test fts2c-4.2 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'christmas candlelight' + } +} {{... place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. service and a candlelight service at 5:00 p.m., +among others. ...}} + +do_test fts2c-4.3 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'deal sheet potential reuse' + } +} {{EOL-Accenture Deal Sheet ... intent + Review Enron asset base for potential reuse/ licensing + Contract negotiations ...}} +do_test fts2c-4.4 { + execsql { + SELECT snippet(email,'<<<','>>>',' ') FROM email + WHERE email MATCH 'deal sheet potential reuse' + } +} {{EOL-Accenture <<>> <<>> intent + Review Enron asset base for <<>> <<>>/ licensing + Contract negotiations }} +do_test fts2c-4.5 { + execsql { + SELECT snippet(email,'<<<','>>>',' ') FROM email + WHERE email MATCH 'first things' + } +} {{Re: <<>> Polish Deal! Congrats! <<>> seem to be building rapidly now on the }} +do_test fts2c-4.6 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'chris is here' + } +} {{chris.germany@enron.com ... Sounds good to me. I bet this is next to the Warick?? Hotel. ... place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. ...}} +do_test fts2c-4.7 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH '"pursuant to"' + } +} {{Erin: + +Pursuant to your request, attached are the Schedule to ...}} +do_test fts2c-4.8 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'ancillary load davis' + } +} {{pete.davis@enron.com ... Start Date: 4/22/01; HourAhead hour: 3; No ancillary schedules awarded. +Variances detected. +Variances detected in Load schedule. + + LOG MESSAGES: + +PARSING ...}} + +# Combinations of AND and OR operators: +# +do_test fts2c-5.1 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'questar enron OR com' + } +} {{matt.smith@enron.com ... six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 ...}} +do_test fts2c-5.2 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'enron OR com questar' + } +} {{matt.smith@enron.com ... six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 ...}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2d.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2d.test new file mode 100644 index 0000000..d8090d8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2d.test @@ -0,0 +1,65 @@ +# 2006 October 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS2 module, and in particular +# the Porter stemmer. +# +# $Id: fts2d.test,v 1.1 2006/10/19 23:36:26 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +do_test fts2d-1.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts2(content, tokenize porter); + INSERT INTO t1(rowid, content) VALUES(1, 'running and jumping'); + SELECT rowid FROM t1 WHERE content MATCH 'run jump'; + } +} {1} +do_test fts2d-1.2 { + execsql { + SELECT snippet(t1) FROM t1 WHERE t1 MATCH 'run jump'; + } +} {{running and jumping}} +do_test fts2d-1.3 { + execsql { + INSERT INTO t1(rowid, content) + VALUES(2, 'abcdefghijklmnopqrstuvwyxz'); + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH 'abcdefghijqrstuvwyxz' + } +} {2 abcdefghijklmnopqrstuvwyxz} +do_test fts2d-1.4 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH 'abcdefghijXXXXqrstuvwyxz' + } +} {2 abcdefghijklmnopqrstuvwyxz} +do_test fts2d-1.5 { + execsql { + INSERT INTO t1(rowid, content) + VALUES(3, 'The value is 123456789'); + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH '123789' + } +} {3 {The value is 123456789}} +do_test fts2d-1.6 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH '123000000789' + } +} {3 {The value is 123456789}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2e.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2e.test new file mode 100644 index 0000000..71845ac --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2e.test @@ -0,0 +1,85 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing deletions in the FTS2 module. +# +# $Id: fts2e.test,v 1.1 2006/10/19 23:36:26 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# Construct a full-text search table containing keywords which are the +# ordinal numbers of the bit positions set for a sequence of integers, +# which are used for the rowid. There are a total of 30 INSERT and +# DELETE statements, so that we'll test both the segmentMerge() merge +# (over the first 16) and the termSelect() merge (over the level-1 +# segment and 14 level-0 segments). +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'one'); + INSERT INTO t1 (rowid, content) VALUES(2, 'two'); + INSERT INTO t1 (rowid, content) VALUES(3, 'one two'); + INSERT INTO t1 (rowid, content) VALUES(4, 'three'); + DELETE FROM t1 WHERE rowid = 1; + INSERT INTO t1 (rowid, content) VALUES(5, 'one three'); + INSERT INTO t1 (rowid, content) VALUES(6, 'two three'); + INSERT INTO t1 (rowid, content) VALUES(7, 'one two three'); + DELETE FROM t1 WHERE rowid = 4; + INSERT INTO t1 (rowid, content) VALUES(8, 'four'); + INSERT INTO t1 (rowid, content) VALUES(9, 'one four'); + INSERT INTO t1 (rowid, content) VALUES(10, 'two four'); + DELETE FROM t1 WHERE rowid = 7; + INSERT INTO t1 (rowid, content) VALUES(11, 'one two four'); + INSERT INTO t1 (rowid, content) VALUES(12, 'three four'); + INSERT INTO t1 (rowid, content) VALUES(13, 'one three four'); + DELETE FROM t1 WHERE rowid = 10; + INSERT INTO t1 (rowid, content) VALUES(14, 'two three four'); + INSERT INTO t1 (rowid, content) VALUES(15, 'one two three four'); + INSERT INTO t1 (rowid, content) VALUES(16, 'five'); + DELETE FROM t1 WHERE rowid = 13; + INSERT INTO t1 (rowid, content) VALUES(17, 'one five'); + INSERT INTO t1 (rowid, content) VALUES(18, 'two five'); + INSERT INTO t1 (rowid, content) VALUES(19, 'one two five'); + DELETE FROM t1 WHERE rowid = 16; + INSERT INTO t1 (rowid, content) VALUES(20, 'three five'); + INSERT INTO t1 (rowid, content) VALUES(21, 'one three five'); + INSERT INTO t1 (rowid, content) VALUES(22, 'two three five'); + DELETE FROM t1 WHERE rowid = 19; + DELETE FROM t1 WHERE rowid = 22; +} + +do_test fts2f-1.1 { + execsql {SELECT COUNT(*) FROM t1} +} {14} + +do_test fts2e-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {3 5 9 11 15 17 21} + +do_test fts2e-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two'} +} {2 3 6 11 14 15 18} + +do_test fts2e-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three'} +} {5 6 12 14 15 20 21} + +do_test fts2e-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'four'} +} {8 9 11 12 14 15} + +do_test fts2e-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five'} +} {17 18 20 21} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2f.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2f.test new file mode 100644 index 0000000..49cff14 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2f.test @@ -0,0 +1,90 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing updates in the FTS2 module. +# +# $Id: fts2f.test,v 1.2 2007/02/23 00:14:06 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# Construct a full-text search table containing keywords which are the +# ordinal numbers of the bit positions set for a sequence of integers, +# which are used for the rowid. There are a total of 31 INSERT, +# UPDATE, and DELETE statements, so that we'll test both the +# segmentMerge() merge (over the first 16) and the termSelect() merge +# (over the level-1 segment and 15 level-0 segments). +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'one'); + INSERT INTO t1 (rowid, content) VALUES(2, 'two'); + INSERT INTO t1 (rowid, content) VALUES(3, 'one two'); + INSERT INTO t1 (rowid, content) VALUES(4, 'three'); + INSERT INTO t1 (rowid, content) VALUES(5, 'one three'); + INSERT INTO t1 (rowid, content) VALUES(6, 'two three'); + INSERT INTO t1 (rowid, content) VALUES(7, 'one two three'); + DELETE FROM t1 WHERE rowid = 4; + INSERT INTO t1 (rowid, content) VALUES(8, 'four'); + UPDATE t1 SET content = 'update one three' WHERE rowid = 1; + INSERT INTO t1 (rowid, content) VALUES(9, 'one four'); + INSERT INTO t1 (rowid, content) VALUES(10, 'two four'); + DELETE FROM t1 WHERE rowid = 7; + INSERT INTO t1 (rowid, content) VALUES(11, 'one two four'); + INSERT INTO t1 (rowid, content) VALUES(12, 'three four'); + INSERT INTO t1 (rowid, content) VALUES(13, 'one three four'); + DELETE FROM t1 WHERE rowid = 10; + INSERT INTO t1 (rowid, content) VALUES(14, 'two three four'); + INSERT INTO t1 (rowid, content) VALUES(15, 'one two three four'); + UPDATE t1 SET content = 'update two five' WHERE rowid = 8; + INSERT INTO t1 (rowid, content) VALUES(16, 'five'); + DELETE FROM t1 WHERE rowid = 13; + INSERT INTO t1 (rowid, content) VALUES(17, 'one five'); + INSERT INTO t1 (rowid, content) VALUES(18, 'two five'); + INSERT INTO t1 (rowid, content) VALUES(19, 'one two five'); + DELETE FROM t1 WHERE rowid = 16; + INSERT INTO t1 (rowid, content) VALUES(20, 'three five'); + INSERT INTO t1 (rowid, content) VALUES(21, 'one three five'); + INSERT INTO t1 (rowid, content) VALUES(22, 'two three five'); + DELETE FROM t1 WHERE rowid = 19; + UPDATE t1 SET content = 'update' WHERE rowid = 15; +} + +do_test fts2f-1.1 { + execsql {SELECT COUNT(*) FROM t1} +} {16} + +do_test fts2f-2.0 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'update'} +} {1 8 15} + +do_test fts2f-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 9 11 17 21} + +do_test fts2f-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two'} +} {2 3 6 8 11 14 18 22} + +do_test fts2f-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three'} +} {1 5 6 12 14 20 21 22} + +do_test fts2f-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'four'} +} {9 11 12 14} + +do_test fts2f-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five'} +} {8 17 18 20 21 22} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2g.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2g.test new file mode 100644 index 0000000..e2caca2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2g.test @@ -0,0 +1,87 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing handling of edge cases for various doclist +# merging functions in the FTS2 module query logic. +# +# $Id: fts2g.test,v 1.2 2007/04/19 18:36:32 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'this is a test'); + INSERT INTO t1 (rowid, content) VALUES(2, 'also a test'); +} + +# No hits at all. Returns empty doclists from termSelect(). +do_test fts2g-1.1 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something'} +} {} + +# Empty left in docListExceptMerge(). +do_test fts2g-1.2 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '-this something'} +} {} + +# Empty right in docListExceptMerge(). +do_test fts2g-1.3 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this -something'} +} {1} + +# Empty left in docListPhraseMerge(). +do_test fts2g-1.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"this something"'} +} {} + +# Empty right in docListPhraseMerge(). +do_test fts2g-1.5 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"something is"'} +} {} + +# Empty left in docListOrMerge(). +do_test fts2g-1.6 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something OR this'} +} {1} + +# Empty right in docListOrMerge(). +do_test fts2g-1.7 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this OR something'} +} {1} + +# Empty left in docListAndMerge(). +do_test fts2g-1.8 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something this'} +} {} + +# Empty right in docListAndMerge(). +do_test fts2g-1.9 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this something'} +} {} + +# No support for all-except queries. +do_test fts2g-1.10 { + catchsql {SELECT rowid FROM t1 WHERE t1 MATCH '-this -something'} +} {1 {SQL logic error or missing database}} + +# Test that docListOrMerge() correctly handles reaching the end of one +# doclist before it reaches the end of the other. +do_test fts2g-1.11 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this OR also'} +} {1 2} +do_test fts2g-1.12 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'also OR this'} +} {1 2} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2h.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2h.test new file mode 100644 index 0000000..72561d8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2h.test @@ -0,0 +1,76 @@ +# 2006 October 31 (scaaarey) +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# here is testing correct handling of excessively long terms. +# +# $Id: fts2h.test,v 1.1 2006/11/29 21:03:01 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# Generate a term of len copies of char. +proc bigterm {char len} { + for {set term ""} {$len>0} {incr len -1} { + append term $char + } + return $term +} + +# Generate a document of bigterms based on characters from the list +# chars. +proc bigtermdoc {chars len} { + set doc "" + foreach char $chars { + append doc " " [bigterm $char $len] + } + return $doc +} + +set len 5000 +set doc1 [bigtermdoc {a b c d} $len] +set doc2 [bigtermdoc {b d e f} $len] +set doc3 [bigtermdoc {a c e} $len] + +set aterm [bigterm a $len] +set bterm [bigterm b $len] +set xterm [bigterm x $len] + +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(content); + INSERT INTO t1 (rowid, content) VALUES(1, $doc1); + INSERT INTO t1 (rowid, content) VALUES(2, $doc2); + INSERT INTO t1 (rowid, content) VALUES(3, $doc3); +} + +# No hits at all. Returns empty doclists from termSelect(). +do_test fts2h-1.1 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something'} +} {} + +do_test fts2h-1.2 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH $aterm} +} {1 3} + +do_test fts2h-1.2 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH $xterm} +} {} + +do_test fts2h-1.3 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '$aterm -$xterm'" +} {1 3} + +do_test fts2h-1.4 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"$aterm $bterm\"'" +} {1} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2i.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2i.test new file mode 100644 index 0000000..e732e6a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2i.test @@ -0,0 +1,87 @@ +# 2007 January 17 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite fts2 library. The +# focus here is testing handling of UPDATE when using UTF-16-encoded +# databases. +# +# $Id: fts2i.test,v 1.2 2007/01/24 03:46:35 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# Return the UTF-16 representation of the supplied UTF-8 string $str. +# If $nt is true, append two 0x00 bytes as a nul terminator. +# NOTE(shess) Copied from capi3.test. +proc utf16 {str {nt 1}} { + set r [encoding convertto unicode $str] + if {$nt} { + append r "\x00\x00" + } + return $r +} + +db eval { + PRAGMA encoding = "UTF-16le"; + CREATE VIRTUAL TABLE t1 USING fts2(content); +} + +do_test fts2i-1.0 { + execsql {PRAGMA encoding} +} {UTF-16le} + +do_test fts2i-1.1 { + execsql {INSERT INTO t1 (rowid, content) VALUES(1, 'one')} + execsql {SELECT content FROM t1 WHERE rowid = 1} +} {one} + +do_test fts2i-1.2 { + set sql "INSERT INTO t1 (rowid, content) VALUES(2, 'two')" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 2} +} {two} + +do_test fts2i-1.3 { + set sql "INSERT INTO t1 (rowid, content) VALUES(3, 'three')" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + set sql "UPDATE t1 SET content = 'trois' WHERE rowid = 3" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 3} +} {trois} + +do_test fts2i-1.4 { + set sql16 [utf16 {INSERT INTO t1 (rowid, content) VALUES(4, 'four')}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 4} +} {four} + +do_test fts2i-1.5 { + set sql16 [utf16 {INSERT INTO t1 (rowid, content) VALUES(5, 'five')}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + set sql "UPDATE t1 SET content = 'cinq' WHERE rowid = 5" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 5} +} {cinq} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2j.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2j.test new file mode 100644 index 0000000..b8a89b2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2j.test @@ -0,0 +1,89 @@ +# 2007 February 6 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. This +# tests creating fts2 tables in an attached database. +# +# $Id: fts2j.test,v 1.1 2007/02/07 01:01:18 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# Clean up anything left over from a previous pass. +file delete -force test2.db +file delete -force test2.db-journal +sqlite3 db2 test2.db + +db eval { + CREATE VIRTUAL TABLE t3 USING fts2(content); + INSERT INTO t3 (rowid, content) VALUES(1, "hello world"); +} + +db2 eval { + CREATE VIRTUAL TABLE t1 USING fts2(content); + INSERT INTO t1 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t1 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t1 (rowid, content) VALUES(3, "cruel world"); +} + +# This has always worked because the t1_* tables used by fts2 will be +# the defaults. +do_test fts2j-1.1 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + SELECT rowid FROM t1 WHERE t1 MATCH 'hello'; + DETACH DATABASE two; + } +} {1 2} +# Make certain we're detached if there was an error. +catch {db eval {DETACH DATABASE two}} + +# In older code, this appears to work fine, but the t2_* tables used +# by fts2 will be created in database 'main' instead of database +# 'two'. It appears to work fine because the tables end up being the +# defaults, but obviously is badly broken if you hope to use things +# other than in the exact same ATTACH setup. +do_test fts2j-1.2 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + CREATE VIRTUAL TABLE two.t2 USING fts2(content); + INSERT INTO t2 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t2 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t2 (rowid, content) VALUES(3, "cruel world"); + SELECT rowid FROM t2 WHERE t2 MATCH 'hello'; + DETACH DATABASE two; + } +} {1 2} +catch {db eval {DETACH DATABASE two}} + +# In older code, this broke because the fts2 code attempted to create +# t3_* tables in database 'main', but they already existed. Normally +# this wouldn't happen without t3 itself existing, in which case the +# fts2 code would never be called in the first place. +do_test fts2j-1.3 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + + CREATE VIRTUAL TABLE two.t3 USING fts2(content); + INSERT INTO two.t3 (rowid, content) VALUES(2, "hello there"); + INSERT INTO two.t3 (rowid, content) VALUES(3, "cruel world"); + SELECT rowid FROM two.t3 WHERE t3 MATCH 'hello'; + + DETACH DATABASE two; + } db2 +} {2} +catch {db eval {DETACH DATABASE two}} + +catch {db2 close} +file delete -force test2.db + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2k.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2k.test new file mode 100644 index 0000000..e7d5f0d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2k.test @@ -0,0 +1,105 @@ +# 2007 March 9 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. These +# make sure that fts2 insertion buffering is fully transparent when +# using transactions. +# +# $Id: fts2k.test,v 1.2 2007/08/10 23:47:04 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(content); + INSERT INTO t1 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t1 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t1 (rowid, content) VALUES(3, "cruel world"); +} + +# Test that possibly-buffered inserts went through after commit. +do_test fts2k-1.1 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(4, "false world"); + INSERT INTO t1 (rowid, content) VALUES(5, "false door"); + COMMIT TRANSACTION; + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + } +} {1 3 4} + +# Test that buffered inserts are seen by selects in the same +# transaction. +do_test fts2k-1.2 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(6, "another world"); + INSERT INTO t1 (rowid, content) VALUES(7, "another test"); + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + COMMIT TRANSACTION; + } +} {1 3 4 6} + +# Test that buffered inserts are seen within a transaction. This is +# really the same test as 1.2. +do_test fts2k-1.3 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(8, "second world"); + INSERT INTO t1 (rowid, content) VALUES(9, "second sight"); + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + ROLLBACK TRANSACTION; + } +} {1 3 4 6 8} + +# Double-check that the previous result doesn't persist past the +# rollback! +do_test fts2k-1.4 { + execsql { + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + } +} {1 3 4 6} + +# Test it all together. +do_test fts2k-1.5 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(10, "second world"); + INSERT INTO t1 (rowid, content) VALUES(11, "second sight"); + ROLLBACK TRANSACTION; + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + } +} {1 3 4 6} + +# Test that the obvious case works. +do_test fts2k-1.6 { + execsql { + BEGIN; + INSERT INTO t1 (rowid, content) VALUES(12, "third world"); + COMMIT; + SELECT rowid FROM t1 WHERE t1 MATCH 'third'; + } +} {12} + +# This is exactly the same as the previous test, except that older +# code loses the INSERT due to an SQLITE_SCHEMA error. +do_test fts2k-1.7 { + execsql { + BEGIN; + INSERT INTO t1 (rowid, content) VALUES(13, "third dimension"); + CREATE TABLE x (c); + COMMIT; + SELECT rowid FROM t1 WHERE t1 MATCH 'dimension'; + } +} {13} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2l.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2l.test new file mode 100644 index 0000000..739eb50 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2l.test @@ -0,0 +1,69 @@ +# 2007 March 28 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing isspace/isalnum/tolower problems with the +# FTS2 module. Unfortunately, this code isn't a really principled set +# of tests, because it's impossible to know where new uses of these +# functions might appear. +# +# $Id: fts2l.test,v 1.1 2007/03/29 16:30:41 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# Tests that startsWith() (calls isspace, tolower, isalnum) can handle +# hi-bit chars. parseSpec() also calls isalnum here. +do_test fts2l-1.1 { + execsql "CREATE VIRTUAL TABLE t1 USING fts2(content, \x80)" +} {} + +# Additionally tests isspace() call in getToken(), and isalnum() call +# in tokenListToIdList(). +do_test fts2l-1.2 { + catch { + execsql "CREATE VIRTUAL TABLE t2 USING fts2(content, tokenize \x80)" + } + sqlite3_errmsg $DB +} "unknown tokenizer: \x80" + +# Additionally test final isalnum() in startsWith(). +do_test fts2l-1.3 { + execsql "CREATE VIRTUAL TABLE t3 USING fts2(content, tokenize\x80)" +} {} + +# The snippet-generation code has calls to isspace() which are sort of +# hard to get to. It finds convenient breakpoints by starting ~40 +# chars before and after the matched term, and scanning ~10 chars +# around that position for isspace() characters. The long word with +# embedded hi-bit chars causes one of these isspace() calls to be +# exercised. The version with a couple extra spaces should cause the +# other isspace() call to be exercised. [Both cases have been tested +# in the debugger, but I'm hoping to continue to catch it if simple +# constant changes change things slightly. +# +# The trailing and leading hi-bit chars help with code which tests for +# isspace() to coalesce multiple spaces. + +set word "\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80" +set phrase1 "$word $word $word target $word $word $word" +set phrase2 "$word $word $word target $word $word $word" + +db eval {CREATE VIRTUAL TABLE t4 USING fts2(content)} +db eval "INSERT INTO t4 (content) VALUES ('$phrase1')" +db eval "INSERT INTO t4 (content) VALUES ('$phrase2')" + +do_test fts2l-1.4 { + execsql {SELECT rowid, length(snippet(t4)) FROM t4 WHERE t4 MATCH 'target'} +} {1 111 2 117} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2m.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2m.test new file mode 100644 index 0000000..6552637 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2m.test @@ -0,0 +1,65 @@ +# 2007 April 9 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. fts2 +# DELETE handling assumed all fields were non-null. This was not +# the intention at all. +# +# $Id: fts2m.test,v 1.1 2007/04/09 20:45:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(col_a, col_b); + + INSERT INTO t1(rowid, col_a, col_b) VALUES(1, 'testing', 'testing'); + INSERT INTO t1(rowid, col_a, col_b) VALUES(2, 'only a', null); + INSERT INTO t1(rowid, col_a, col_b) VALUES(3, null, 'only b'); + INSERT INTO t1(rowid, col_a, col_b) VALUES(4, null, null); +} + +do_test fts2m-1.0 { + execsql { + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {2 2 4} + +do_test fts2m-1.1 { + execsql { + DELETE FROM t1 WHERE rowid = 1; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {1 1 3} + +do_test fts2m-1.2 { + execsql { + DELETE FROM t1 WHERE rowid = 2; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 1 2} + +do_test fts2m-1.3 { + execsql { + DELETE FROM t1 WHERE rowid = 3; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 0 1} + +do_test fts2m-1.4 { + execsql { + DELETE FROM t1 WHERE rowid = 4; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 0 0} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2n.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2n.test new file mode 100644 index 0000000..4bcbeef --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2n.test @@ -0,0 +1,196 @@ +# 2007 April 26 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements tests for prefix-searching in the fts2 +# component of the SQLite library. +# +# $Id: fts2n.test,v 1.1 2007/05/01 18:25:53 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +# A large string to prime the pump with. +set text { + Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Maecenas + iaculis mollis ipsum. Praesent rhoncus placerat justo. Duis non quam + sed turpis posuere placerat. Curabitur et lorem in lorem porttitor + aliquet. Pellentesque bibendum tincidunt diam. Vestibulum blandit + ante nec elit. In sapien diam, facilisis eget, dictum sed, viverra + at, felis. Vestibulum magna. Sed magna dolor, vestibulum rhoncus, + ornare vel, vulputate sit amet, felis. Integer malesuada, tellus at + luctus gravida, diam nunc porta nibh, nec imperdiet massa metus eu + lectus. Aliquam nisi. Nunc fringilla nulla at lectus. Suspendisse + potenti. Cum sociis natoque penatibus et magnis dis parturient + montes, nascetur ridiculus mus. Pellentesque odio nulla, feugiat eu, + suscipit nec, consequat quis, risus. +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(c); + + INSERT INTO t1(rowid, c) VALUES(1, $text); + INSERT INTO t1(rowid, c) VALUES(2, 'Another lovely row'); +} + +# Exact match +do_test fts2n-1.1 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lorem'" +} {1} + +# And a prefix +do_test fts2n-1.2 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lore*'" +} {1} + +# Prefix includes exact match +do_test fts2n-1.3 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lorem*'" +} {1} + +# Make certain everything isn't considered a prefix! +do_test fts2n-1.4 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lore'" +} {} + +# Prefix across multiple rows. +do_test fts2n-1.5 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lo*'" +} {1 2} + +# Likewise, with multiple hits in one document. +do_test fts2n-1.6 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'l*'" +} {1 2} + +# Prefix which should only hit one document. +do_test fts2n-1.7 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lov*'" +} {2} + +# * not at end is dropped. +do_test fts2n-1.8 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lo *'" +} {} + +# Stand-alone * is dropped. +do_test fts2n-1.9 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '*'" +} {} + +# Phrase-query prefix. +do_test fts2n-1.10 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"lovely r*\"'" +} {2} +do_test fts2n-1.11 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"lovely r\"'" +} {} + +# Phrase query with multiple prefix matches. +do_test fts2n-1.12 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"a* l*\"'" +} {1 2} + +# Phrase query with multiple prefix matches. +do_test fts2n-1.13 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"a* l* row\"'" +} {2} + + + + +# Test across updates (and, by implication, deletes). + +# Version of text without "lorem". +regsub -all {[Ll]orem} $text '' ntext + +db eval { + CREATE VIRTUAL TABLE t2 USING fts2(c); + + INSERT INTO t2(rowid, c) VALUES(1, $text); + INSERT INTO t2(rowid, c) VALUES(2, 'Another lovely row'); + UPDATE t2 SET c = $ntext WHERE rowid = 1; +} + +# Can't see lorem as an exact match. +do_test fts2n-2.1 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lorem'" +} {} + +# Can't see a prefix of lorem, either. +do_test fts2n-2.2 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lore*'" +} {} + +# Can see lovely in the other document. +do_test fts2n-2.3 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lo*'" +} {2} + +# Can still see other hits. +do_test fts2n-2.4 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'l*'" +} {1 2} + +# Prefix which should only hit one document. +do_test fts2n-2.5 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lov*'" +} {2} + + + +# Test with a segment which will have multiple levels in the tree. + +# Build a big document with lots of unique terms. +set bigtext $text +foreach c {a b c d e} { + regsub -all {[A-Za-z]+} $bigtext "&$c" t + append bigtext $t +} + +# Populate a table with many copies of the big document, so that we +# can test the number of hits found. Populate $ret with the expected +# hit counts for each row. offsets() returns 4 elements for every +# hit. We'll have 6 hits for row 1, 1 for row 2, and 6*(2^5)==192 for +# $bigtext. +set ret {6 1} +db eval { + BEGIN; + CREATE VIRTUAL TABLE t3 USING fts2(c); + + INSERT INTO t3(rowid, c) VALUES(1, $text); + INSERT INTO t3(rowid, c) VALUES(2, 'Another lovely row'); +} +for {set i 0} {$i<100} {incr i} { + db eval {INSERT INTO t3(rowid, c) VALUES(3+$i, $bigtext)} + lappend ret 192 +} +db eval {COMMIT;} + +# Test that we get the expected number of hits. +do_test fts2n-3.1 { + set t {} + db eval {SELECT offsets(t3) as o FROM t3 WHERE t3 MATCH 'l*'} { + set l [llength $o] + lappend t [expr {$l/4}] + } + set t +} $ret + +# TODO(shess) It would be useful to test a couple edge cases, but I +# don't know if we have the precision to manage it from here at this +# time. Prefix hits can cross leaves, which the code above _should_ +# hit by virtue of size. There are two variations on this. If the +# tree is 2 levels high, the code will find the leaf-node extent +# directly, but if it's higher, the code will have to follow two +# separate interior branches down the tree. Both should be tested. + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2o.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2o.test new file mode 100644 index 0000000..5a33c45 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2o.test @@ -0,0 +1,169 @@ +# 2007 June 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS2 module. +# +# $Id: fts2o.test,v 1.4 2007/07/02 10:16:50 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is not defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +#--------------------------------------------------------------------- +# These tests, fts2o-1.*, test that ticket #2429 is fixed. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts2(a, b, c); + INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one four two'); +} +do_test fts2o-1.1 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE c MATCH 'four'; + } +} {1 {one four two}} +do_test fts2o-1.2 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE b MATCH 'four'; + } +} {1 {one four}} +do_test fts2o-1.3 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE a MATCH 'four'; + } +} {1 {one three four}} + +#--------------------------------------------------------------------- +# Test that it is possible to rename an fts2 table. +# +do_test fts2o-2.1 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {t1 t1_content t1_segments t1_segdir} +do_test fts2o-2.2 { + execsql { ALTER TABLE t1 RENAME to fts_t1; } +} {} +do_test fts2o-2.3 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts2o-2.4 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir} + +# See what happens when renaming the fts2 table fails. +# +do_test fts2o-2.5 { + catchsql { + CREATE TABLE t1_segdir(a, b, c); + ALTER TABLE fts_t1 RENAME to t1; + } +} {1 {SQL logic error or missing database}} +do_test fts2o-2.6 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts2o-2.7 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir t1_segdir} + +# See what happens when renaming the fts2 table fails inside a transaction. +# +do_test fts2o-2.8 { + execsql { + BEGIN; + INSERT INTO fts_t1(a, b, c) VALUES('one two three', 'one four', 'one two'); + } +} {} +do_test fts2o-2.9 { + catchsql { + ALTER TABLE fts_t1 RENAME to t1; + } +} {1 {SQL logic error or missing database}} +do_test fts2o-2.10 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts2o-2.11 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir t1_segdir} +do_test fts2o-2.12 { + execsql COMMIT + execsql {SELECT a FROM fts_t1} +} {{one three four} {one two three}} +do_test fts2o-2.12 { + execsql { SELECT a, b, c FROM fts_t1 WHERE c MATCH 'four'; } +} {{one three four} {one four} {one four two}} + +#------------------------------------------------------------------- +# Close, delete and reopen the database. The following test should +# be run on an initially empty db. +# +db close +file delete -force test.db test.db-journal +sqlite3 db test.db + +do_test fts2o-3.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts2(a, b, c); + INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one two'); + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + } +} {{one three four} {one four} {one two}} + +# This test was crashing at one point. +# +do_test fts2o-3.2 { + execsql { + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + CREATE TABLE t3(a, b, c); + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + } +} {{one three four} {one four} {one two} {one three four} {one four} {one two}} + +#--------------------------------------------------------------------- +# Test that it is possible to rename an fts2 table in an attached +# database. +# +file delete -force test2.db test2.db-journal + +do_test fts2o-3.1 { + execsql { + ATTACH 'test2.db' AS aux; + CREATE VIRTUAL TABLE aux.t1 USING fts2(a, b, c); + INSERT INTO aux.t1(a, b, c) VALUES( + 'neung song sahm', 'neung see', 'neung see song' + ); + } +} {} + +do_test fts2o-3.2 { + execsql { SELECT a, b, c FROM aux.t1 WHERE a MATCH 'song'; } +} {{neung song sahm} {neung see} {neung see song}} + +do_test fts2o-3.3 { + execsql { SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } +} {{one three four} {one four} {one two}} + +do_test fts2o-3.4 { + execsql { ALTER TABLE aux.t1 RENAME TO t2 } +} {} + +do_test fts2o-3.2 { + execsql { SELECT a, b, c FROM t2 WHERE a MATCH 'song'; } +} {{neung song sahm} {neung see} {neung see song}} + +do_test fts2o-3.3 { + execsql { SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } +} {{one three four} {one four} {one two}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts2token.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2token.test new file mode 100644 index 0000000..de5f94d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts2token.test @@ -0,0 +1,174 @@ +# 2007 June 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing the pluggable tokeniser feature of the +# FTS2 module. +# +# $Id: fts2token.test,v 1.3 2007/06/25 12:05:40 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +proc escape_string {str} { + set out "" + foreach char [split $str ""] { + scan $char %c i + if {$i<=127} { + append out $char + } else { + append out [format {\x%.4x} $i] + } + } + set out +} + +#-------------------------------------------------------------------------- +# Test cases fts2token-1.* are the warm-body test for the SQL scalar +# function fts2_tokenizer(). The procedure is as follows: +# +# 1: Verify that there is no such fts2 tokenizer as 'blah'. +# +# 2: Query for the built-in tokenizer 'simple'. Insert a copy of the +# retrieved value as tokenizer 'blah'. +# +# 3: Test that the value returned for tokenizer 'blah' is now the +# same as that retrieved for 'simple'. +# +# 4: Test that it is now possible to create an fts2 table using +# tokenizer 'blah' (it was not possible in step 1). +# +# 5: Test that the table created to use tokenizer 'blah' is usable. +# +do_test fts2token-1.1 { + catchsql { + CREATE VIRTUAL TABLE t1 USING fts2(content, tokenize blah); + } +} {1 {unknown tokenizer: blah}} +do_test fts2token-1.2 { + execsql { + SELECT fts2_tokenizer('blah', fts2_tokenizer('simple')) IS NULL; + } +} {0} +do_test fts2token-1.3 { + execsql { + SELECT fts2_tokenizer('blah') == fts2_tokenizer('simple'); + } +} {1} +do_test fts2token-1.4 { + catchsql { + CREATE VIRTUAL TABLE t1 USING fts2(content, tokenize blah); + } +} {0 {}} +do_test fts2token-1.5 { + execsql { + INSERT INTO t1(content) VALUES('There was movement at the station'); + INSERT INTO t1(content) VALUES('For the word has passed around'); + INSERT INTO t1(content) VALUES('That the colt from ol regret had got away'); + SELECT content FROM t1 WHERE content MATCH 'movement' + } +} {{There was movement at the station}} + +#-------------------------------------------------------------------------- +# Test cases fts2token-2.* test error cases in the scalar function based +# API for getting and setting tokenizers. +# +do_test fts2token-2.1 { + catchsql { + SELECT fts2_tokenizer('nosuchtokenizer'); + } +} {1 {unknown tokenizer: nosuchtokenizer}} + +#-------------------------------------------------------------------------- +# Test cases fts2token-3.* test the three built-in tokenizers with a +# simple input string via the built-in test function. This is as much +# to test the test function as the tokenizer implementations. +# +do_test fts2token-3.1 { + execsql { + SELECT fts2_tokenizer_test('simple', 'I don''t see how'); + } +} {{0 i I 1 don don 2 t t 3 see see 4 how how}} +do_test fts2token-3.2 { + execsql { + SELECT fts2_tokenizer_test('porter', 'I don''t see how'); + } +} {{0 i I 1 don don 2 t t 3 see see 4 how how}} +ifcapable icu { + do_test fts2token-3.3 { + execsql { + SELECT fts2_tokenizer_test('icu', 'I don''t see how'); + } + } {{0 i I 1 don't don't 2 see see 3 how how}} +} + +#-------------------------------------------------------------------------- +# Test cases fts2token-4.* test the ICU tokenizer. In practice, this +# tokenizer only has two modes - "thai" and "everybody else". Some other +# Asian languages (Lao, Khmer etc.) require the same special treatment as +# Thai, but ICU doesn't support them yet. +# +ifcapable icu { + + proc do_icu_test {name locale input output} { + set ::out [db eval { SELECT fts2_tokenizer_test('icu', $locale, $input) }] + do_test $name { + lindex $::out 0 + } $output + } + + do_icu_test fts2token-4.1 en_US {} {} + do_icu_test fts2token-4.2 en_US {Test cases fts2} [list \ + 0 test Test 1 cases cases 2 fts2 fts2 + ] + + # The following test shows that ICU is smart enough to recognise + # Thai chararacters, even when the locale is set to English/United + # States. + # + set input "\u0e2d\u0e30\u0e44\u0e23\u0e19\u0e30\u0e04\u0e23\u0e31\u0e1a" + set output "0 \u0e2d\u0e30\u0e44\u0e23 \u0e2d\u0e30\u0e44\u0e23 " + append output "1 \u0e19\u0e30 \u0e19\u0e30 " + append output "2 \u0e04\u0e23\u0e31\u0e1a \u0e04\u0e23\u0e31\u0e1a" + + do_icu_test fts2token-4.3 th_TH $input $output + do_icu_test fts2token-4.4 en_US $input $output + + # ICU handles an unknown locale by falling back to the default. + # So this is not an error. + do_icu_test fts2token-4.5 MiddleOfTheOcean $input $output + + set longtoken "AReallyReallyLongTokenOneThatWillSurelyRequire" + append longtoken "AReallocInTheIcuTokenizerCode" + + set input "short tokens then " + append input $longtoken + set output "0 short short " + append output "1 tokens tokens " + append output "2 then then " + append output "3 [string tolower $longtoken] $longtoken" + + do_icu_test fts2token-4.6 MiddleOfTheOcean $input $output + do_icu_test fts2token-4.7 th_TH $input $output + do_icu_test fts2token-4.8 en_US $input $output +} + +do_test fts2token-internal { + execsql { SELECT fts2_tokenizer_internal_test() } +} {ok} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3aa.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3aa.test new file mode 100644 index 0000000..46304fb --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3aa.test @@ -0,0 +1,202 @@ +# 2006 September 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module. +# +# $Id: fts3aa.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Construct a full-text search table containing five keywords: +# one, two, three, four, and five, in various combinations. The +# rowid for each will be a bitmask for the elements it contains. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1(content) VALUES('one'); + INSERT INTO t1(content) VALUES('two'); + INSERT INTO t1(content) VALUES('one two'); + INSERT INTO t1(content) VALUES('three'); + INSERT INTO t1(content) VALUES('one three'); + INSERT INTO t1(content) VALUES('two three'); + INSERT INTO t1(content) VALUES('one two three'); + INSERT INTO t1(content) VALUES('four'); + INSERT INTO t1(content) VALUES('one four'); + INSERT INTO t1(content) VALUES('two four'); + INSERT INTO t1(content) VALUES('one two four'); + INSERT INTO t1(content) VALUES('three four'); + INSERT INTO t1(content) VALUES('one three four'); + INSERT INTO t1(content) VALUES('two three four'); + INSERT INTO t1(content) VALUES('one two three four'); + INSERT INTO t1(content) VALUES('five'); + INSERT INTO t1(content) VALUES('one five'); + INSERT INTO t1(content) VALUES('two five'); + INSERT INTO t1(content) VALUES('one two five'); + INSERT INTO t1(content) VALUES('three five'); + INSERT INTO t1(content) VALUES('one three five'); + INSERT INTO t1(content) VALUES('two three five'); + INSERT INTO t1(content) VALUES('one two three five'); + INSERT INTO t1(content) VALUES('four five'); + INSERT INTO t1(content) VALUES('one four five'); + INSERT INTO t1(content) VALUES('two four five'); + INSERT INTO t1(content) VALUES('one two four five'); + INSERT INTO t1(content) VALUES('three four five'); + INSERT INTO t1(content) VALUES('one three four five'); + INSERT INTO t1(content) VALUES('two three four five'); + INSERT INTO t1(content) VALUES('one two three four five'); +} + +do_test fts3aa-1.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3aa-1.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two'} +} {3 7 11 15 19 23 27 31} +do_test fts3aa-1.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two one'} +} {3 7 11 15 19 23 27 31} +do_test fts3aa-1.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two three'} +} {7 15 23 31} +do_test fts3aa-1.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one three two'} +} {7 15 23 31} +do_test fts3aa-1.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two three one'} +} {7 15 23 31} +do_test fts3aa-1.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two one three'} +} {7 15 23 31} +do_test fts3aa-1.8 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three one two'} +} {7 15 23 31} +do_test fts3aa-1.9 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three two one'} +} {7 15 23 31} +do_test fts3aa-1.10 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two THREE'} +} {7 15 23 31} +do_test fts3aa-1.11 { + execsql {SELECT rowid FROM t1 WHERE content MATCH ' ONE Two three '} +} {7 15 23 31} + +do_test fts3aa-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one"'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3aa-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two"'} +} {3 7 11 15 19 23 27 31} +do_test fts3aa-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"two one"'} +} {} +do_test fts3aa-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two three"'} +} {7 15 23 31} +do_test fts3aa-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three two"'} +} {} +do_test fts3aa-2.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two three four"'} +} {15 31} +do_test fts3aa-2.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three two four"'} +} {} +do_test fts3aa-2.8 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three five"'} +} {21} +do_test fts3aa-2.9 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three" five'} +} {21 29} +do_test fts3aa-2.10 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five "one three"'} +} {21 29} +do_test fts3aa-2.11 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five "one three" four'} +} {29} +do_test fts3aa-2.12 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five four "one three"'} +} {29} +do_test fts3aa-2.13 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three" four five'} +} {29} + +do_test fts3aa-3.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3aa-3.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one -two'} +} {1 5 9 13 17 21 25 29} +do_test fts3aa-3.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '-two one'} +} {1 5 9 13 17 21 25 29} + +do_test fts3aa-4.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one OR two'} +} {1 2 3 5 6 7 9 10 11 13 14 15 17 18 19 21 22 23 25 26 27 29 30 31} +do_test fts3aa-4.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two" OR three'} +} {3 4 5 6 7 11 12 13 14 15 19 20 21 22 23 27 28 29 30 31} +do_test fts3aa-4.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three OR "one two"'} +} {3 4 5 6 7 11 12 13 14 15 19 20 21 22 23 27 28 29 30 31} +do_test fts3aa-4.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two OR three'} +} {3 5 7 11 13 15 19 21 23 27 29 31} +do_test fts3aa-4.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three OR two one'} +} {3 5 7 11 13 15 19 21 23 27 29 31} +do_test fts3aa-4.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two OR three OR four'} +} {3 5 7 9 11 13 15 19 21 23 25 27 29 31} +do_test fts3aa-4.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two OR three OR four one'} +} {3 5 7 9 11 13 15 19 21 23 25 27 29 31} + +# Test the ability to handle NULL content +# +do_test fts3aa-5.1 { + execsql {INSERT INTO t1(content) VALUES(NULL)} +} {} +do_test fts3aa-5.2 { + set rowid [db last_insert_rowid] + execsql {SELECT content FROM t1 WHERE rowid=$rowid} +} {{}} +do_test fts3aa-5.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH NULL} +} {} + +# Test the ability to handle non-positive rowids +# +do_test fts3aa-6.0 { + execsql {INSERT INTO t1(rowid, content) VALUES(0, 'four five')} +} {} +do_test fts3aa-6.1 { + execsql {SELECT content FROM t1 WHERE rowid = 0} +} {{four five}} +do_test fts3aa-6.2 { + execsql {INSERT INTO t1(rowid, content) VALUES(-1, 'three four')} +} {} +do_test fts3aa-6.3 { + execsql {SELECT content FROM t1 WHERE rowid = -1} +} {{three four}} +do_test fts3aa-6.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'four'} +} {-1 0 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ab.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ab.test new file mode 100644 index 0000000..86124f7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ab.test @@ -0,0 +1,147 @@ +# 2006 September 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module. +# +# $Id: fts3ab.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Fill the full-text index "t1" with phrases in english, spanish, +# and german. For the i-th row, fill in the names for the bits +# that are set in the value of i. The least significant bit is +# 1. For example, the value 5 is 101 in binary which will be +# converted to "one three" in english. +# +proc fill_multilanguage_fulltext_t1 {} { + set english {one two three four five} + set spanish {un dos tres cuatro cinco} + set german {eine zwei drei vier funf} + + for {set i 1} {$i<=31} {incr i} { + set cmd "INSERT INTO t1 VALUES" + set vset {} + foreach lang {english spanish german} { + set words {} + for {set j 0; set k 1} {$j<5} {incr j; incr k $k} { + if {$k&$i} {lappend words [lindex [set $lang] $j]} + } + lappend vset "'$words'" + } + set sql "INSERT INTO t1(english,spanish,german) VALUES([join $vset ,])" + # puts $sql + db eval $sql + } +} + +# Construct a full-text search table containing five keywords: +# one, two, three, four, and five, in various combinations. The +# rowid for each will be a bitmask for the elements it contains. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(english,spanish,german); +} +fill_multilanguage_fulltext_t1 + +do_test fts3ab-1.1 { + execsql {SELECT rowid FROM t1 WHERE english MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3ab-1.2 { + execsql {SELECT rowid FROM t1 WHERE spanish MATCH 'one'} +} {} +do_test fts3ab-1.3 { + execsql {SELECT rowid FROM t1 WHERE german MATCH 'one'} +} {} +do_test fts3ab-1.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3ab-1.5 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'one dos drei'} +} {7 15 23 31} +do_test fts3ab-1.6 { + execsql {SELECT english, spanish, german FROM t1 WHERE rowid=1} +} {one un eine} +do_test fts3ab-1.7 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"one un"'} +} {} + +do_test fts3ab-2.1 { + execsql { + CREATE VIRTUAL TABLE t2 USING fts3(from,to); + INSERT INTO t2([from],[to]) VALUES ('one two three', 'four five six'); + SELECT [from], [to] FROM t2 + } +} {{one two three} {four five six}} + + +# Compute an SQL string that contains the words one, two, three,... to +# describe bits set in the value $i. Only the lower 5 bits are examined. +# +proc wordset {i} { + set x {} + for {set j 0; set k 1} {$j<5} {incr j; incr k $k} { + if {$k&$i} {lappend x [lindex {one two three four five} $j]} + } + return '$x' +} + +# Create a new FTS table with three columns: +# +# norm: words for the bits of rowid +# plusone: words for the bits of rowid+1 +# invert: words for the bits of ~rowid +# +db eval { + CREATE VIRTUAL TABLE t4 USING fts3([norm],'plusone',"invert"); +} +for {set i 1} {$i<=15} {incr i} { + set vset [list [wordset $i] [wordset [expr {$i+1}]] [wordset [expr {~$i}]]] + db eval "INSERT INTO t4(norm,plusone,invert) VALUES([join $vset ,]);" +} + +do_test fts3ab-4.1 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one'} +} {1 3 5 7 9 11 13 15} +do_test fts3ab-4.2 { + execsql {SELECT rowid FROM t4 WHERE norm MATCH 'one'} +} {1 3 5 7 9 11 13 15} +do_test fts3ab-4.3 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'one'} +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15} +do_test fts3ab-4.4 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'plusone:one'} +} {2 4 6 8 10 12 14} +do_test fts3ab-4.5 { + execsql {SELECT rowid FROM t4 WHERE plusone MATCH 'one'} +} {2 4 6 8 10 12 14} +do_test fts3ab-4.6 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one plusone:two'} +} {1 5 9 13} +do_test fts3ab-4.7 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one two'} +} {1 3 5 7 9 11 13 15} +do_test fts3ab-4.8 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'plusone:two norm:one'} +} {1 5 9 13} +do_test fts3ab-4.9 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'two norm:one'} +} {1 3 5 7 9 11 13 15} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ac.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ac.test new file mode 100644 index 0000000..72e5410 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ac.test @@ -0,0 +1,1213 @@ +# 2006 September 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module. +# +# $Id: fts3ac.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Create a table of sample email data. The data comes from email +# archives of Enron executives that was published as part of the +# litigation against that company. +# +do_test fts3ac-1.1 { + db eval { + CREATE VIRTUAL TABLE email USING fts3([from],[to],subject,body); + BEGIN TRANSACTION; +INSERT INTO email([from],[to],subject,body) VALUES('savita.puthigai@enron.com', 'traders.eol@enron.com, traders.eol@enron.com', 'EnronOnline- Change to Autohedge', 'Effective Monday, October 22, 2001 the following changes will be made to the Autohedge functionality on EnronOnline. + +The volume on the hedge will now respect the minimum volume and volume increment settings on the parent product. See rules below: + +? If the transaction volume on the child is less than half of the parent''s minimum volume no hedge will occur. +? If the transaction volume on the child is more than half the parent''s minimum volume but less than half the volume increment on the parent, the hedge will volume will be the parent''s minimum volume. +? For all other volumes, the same rounding rules will apply based on the volume increment on the parent product. + +Please see example below: + +Parent''s Settings: +Minimum: 5000 +Increment: 1000 + +Volume on Autohedge transaction Volume Hedged +1 - 2499 0 +2500 - 5499 5000 +5500 - 6499 6000'); +INSERT INTO email([from],[to],subject,body) VALUES('dana.davis@enron.com', 'laynie.east@enron.com, lisa.king@enron.com, lisa.best@enron.com,', 'Leaving Early', 'FYI: +If it''s ok with everyone''s needs, I would like to leave @4pm. If you think +you will need my assistance past the 4 o''clock hour just let me know; I''ll +be more than willing to stay.'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'louise.kitchen@enron.com', '<> - CC02.06.02', 'The following expense report is ready for approval: + +Employee Name: Christopher F. Calger +Status last changed by: Mollie E. Gustafson Ms +Expense Report Name: CC02.06.02 +Report Total: $3,972.93 +Amount Due Employee: $3,972.93 + + +To approve this expense report, click on the following link for Concur Expense. +http://expensexms.enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('jeff.duff@enron.com', 'julie.johnson@enron.com', 'Work request', 'Julie, + +Could you print off the current work request report by 1:30 today? + +Gentlemen, + +I''d like to review this today at 1:30 in our office. Also, could you provide +me with your activity reports so I can have Julie enter this information. + +JD'); +INSERT INTO email([from],[to],subject,body) VALUES('v.weldon@enron.com', 'gary.l.carrier@usa.dupont.com, scott.joyce@bankofamerica.com', 'Enron News', 'This could turn into something big.... +http://biz.yahoo.com/rf/010129/n29305829.html'); +INSERT INTO email([from],[to],subject,body) VALUES('mark.haedicke@enron.com', 'paul.simons@enron.com', 'Re: First Polish Deal!', 'Congrats! Things seem to be building rapidly now on the Continent. Mark'); +INSERT INTO email([from],[to],subject,body) VALUES('e..carter@enron.com', 't..robinson@enron.com', 'FW: Producers Newsletter 9-24-2001', ' +The producer lumber pricing sheet. + -----Original Message----- +From: Johnson, Jay +Sent: Tuesday, October 16, 2001 3:42 PM +To: Carter, Karen E. +Subject: FW: Producers Newsletter 9-24-2001 + + + + -----Original Message----- +From: Daigre, Sergai +Sent: Friday, September 21, 2001 8:33 PM +Subject: Producers Newsletter 9-24-2001 + + '); +INSERT INTO email([from],[to],subject,body) VALUES('david.delainey@enron.com', 'kenneth.lay@enron.com', 'Greater Houston Partnership', 'Ken, in response to the letter from Mr Miguel San Juan, my suggestion would +be to offer up the Falcon for their use; however, given the tight time frame +and your recent visit with Mr. Fox that it would be difficult for either you +or me to participate. + +I spoke to Max and he agrees with this approach. + +I hope this meets with your approval. + +Regards +Delainey'); +INSERT INTO email([from],[to],subject,body) VALUES('lachandra.fenceroy@enron.com', 'lindy.donoho@enron.com', 'FW: Bus Applications Meeting Follow Up', 'Lindy, + +Here is the original memo we discussed earlier. Please provide any information that you may have. + +Your cooperation is greatly appreciated. + +Thanks, + +lachandra.fenceroy@enron.com +713.853.3884 +877.498.3401 Pager + + -----Original Message----- +From: Bisbee, Joanne +Sent: Wednesday, September 26, 2001 7:50 AM +To: Fenceroy, LaChandra +Subject: FW: Bus Applications Meeting Follow Up + +Lachandra, Please get with David Duff today and see what this is about. Who are our TW accounting business users? + + -----Original Message----- +From: Koh, Wendy +Sent: Tuesday, September 25, 2001 2:41 PM +To: Bisbee, Joanne +Subject: Bus Applications Meeting Follow Up + +Lisa brought up a TW change effective Nov 1. It involves eliminating a turnback surcharge. I have no other information, but you might check with the business folks for any system changes required. + +Wendy'); +INSERT INTO email([from],[to],subject,body) VALUES('danny.mccarty@enron.com', 'fran.fagan@enron.com', 'RE: worksheets', 'Fran, + If Julie''s merit needs to be lump sum, just move it over to that column. Also, send me Eric Gadd''s sheets as well. Thanks. +Dan + + -----Original Message----- +From: Fagan, Fran +Sent: Thursday, December 20, 2001 11:10 AM +To: McCarty, Danny +Subject: worksheets + +As discussed, attached are your sheets for bonus and merit. + +Thanks, + +Fran Fagan +Sr. HR Rep +713.853.5219 + + + << File: McCartyMerit.xls >> << File: mccartyBonusCommercial_UnP.xls >> + +'); +INSERT INTO email([from],[to],subject,body) VALUES('bert.meyers@enron.com', 'shift.dl-portland@enron.com', 'OCTOBER SCHEDULE', 'TEAM, + +PLEASE SEND ME ANY REQUESTS THAT YOU HAVE FOR OCTOBER. SO FAR I HAVE THEM FOR LEAF. I WOULD LIKE TO HAVE IT DONE BY THE 15TH OF THE MONTH. ANY QUESTIONS PLEASE GIVE ME A CALL. + +BERT'); +INSERT INTO email([from],[to],subject,body) VALUES('errol.mclaughlin@enron.com', 'john.arnold@enron.com, bilal.bajwa@enron.com, john.griffith@enron.com,', 'TRV Notification: (NG - PROPT P/L - 09/27/2001)', 'The report named: NG - PROPT P/L , published as of 09/27/2001 is now available for viewing on the website.'); +INSERT INTO email([from],[to],subject,body) VALUES('patrice.mims@enron.com', 'calvin.eakins@enron.com', 'Re: Small business supply assistance', 'Hi Calvin + + +I spoke with Rickey (boy, is he long-winded!!). Gave him the name of our +credit guy, Russell Diamond. + +Thank for your help!'); +INSERT INTO email([from],[to],subject,body) VALUES('legal <.hall@enron.com>', 'stephanie.panus@enron.com', 'Termination update', 'City of Vernon and Salt River Project terminated their contracts. I will fax these notices to you.'); +INSERT INTO email([from],[to],subject,body) VALUES('d..steffes@enron.com', 'richard.shapiro@enron.com', 'EES / ENA Government Affairs Staffing & Outside Services', 'Rick -- + +Here is the information on staffing and outside services. Call if you need anything else. + +Jim + + '); +INSERT INTO email([from],[to],subject,body) VALUES('gelliott@industrialinfo.com', 'pcopello@industrialinfo.com', 'ECAAR (Gavin), WSCC (Diablo Canyon), & NPCC (Seabrook)', 'Dear Power Outage Database Customer, +Attached you will find an excel document. The outages contained within are forced or rescheduled outages. Your daily delivery will still contain these outages. +In addition to the two excel documents, there is a dbf file that is formatted like your daily deliveries you receive nightly. This will enable you to load the data into your regular database. Any questions please let me know. Thanks. +Greg Elliott +IIR, Inc. +713-783-5147 x 3481 +outages@industrialinfo.com +THE INFORMATION CONTAINED IN THIS E-MAIL IS LEGALLY PRIVILEGED AND CONFIDENTIAL INFORMATION INTENDED ONLY FOR THE USE OF THE INDIVIDUAL OR ENTITY NAMED ABOVE. YOU ARE HEREBY NOTIFIED THAT ANY DISSEMINATION, DISTRIBUTION, OR COPY OF THIS E-MAIL TO UNAUTHORIZED ENTITIES IS STRICTLY PROHIBITED. IF YOU HAVE RECEIVED THIS +E-MAIL IN ERROR, PLEASE DELETE IT. + - OUTAGE.dbf + - 111201R.xls + - 111201.xls '); +INSERT INTO email([from],[to],subject,body) VALUES('enron.announcements@enron.com', 'all_ena_egm_eim@enron.com', 'EWS Brown Bag', 'MARK YOUR LUNCH CALENDARS NOW ! + +You are invited to attend the EWS Brown Bag Lunch Series + +Featuring: RAY BOWEN, COO + +Topic: Enron Industrial Markets + +Thursday, March 15, 2001 +11:30 am - 12:30 pm +EB 5 C2 + + +You bring your lunch, Limited Seating +We provide drinks and dessert. RSVP x 3-9610'); +INSERT INTO email([from],[to],subject,body) VALUES('chris.germany@enron.com', 'ingrid.immer@williams.com', 'Re: About St Pauls', 'Sounds good to me. I bet this is next to the Warick?? Hotel. + + + + +"Immer, Ingrid" on 12/21/2000 11:48:47 AM +To: "''chris.germany@enron.com''" +cc: +Subject: About St Pauls + + + + + <> +? +?http://www.stpaulshouston.org/about.html + +Chris, + +I like the looks of this place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. service and a candlelight service at 5:00 p.m., +among others. + +Let me know.?? ii + + - About St Pauls.url + +'); +INSERT INTO email([from],[to],subject,body) VALUES('nas@cpuc.ca.gov', 'skatz@sempratrading.com, kmccrea@sablaw.com, thompson@wrightlaw.com,', 'Reply Brief filed July 31, 2000', ' - CPUC01-#76371-v1-Revised_Reply_Brief__Due_today_7_31_.doc'); +INSERT INTO email([from],[to],subject,body) VALUES('gascontrol@aglresources.com', 'dscott4@enron.com, lcampbel@enron.com', 'Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder', 'Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder +As discussed in the Winter Operations Meeting on Sept.29,2000, +E-Gas(Emergency Gas) will not be offered this winter as a service from AGLC. +Marketers and Poolers can receive gas via Peaking and IBSS nominations(daisy +chain) from other marketers up to the 6 p.m. Same Day 2 nomination cycle. +'); +INSERT INTO email([from],[to],subject,body) VALUES('dutch.quigley@enron.com', 'rwolkwitz@powermerchants.com', '', ' + +Here is a goody for you'); +INSERT INTO email([from],[to],subject,body) VALUES('ryan.o''rourke@enron.com', 'k..allen@enron.com, randy.bhatia@enron.com, frank.ermis@enron.com,', 'TRV Notification: (West VaR - 11/07/2001)', 'The report named: West VaR , published as of 11/07/2001 is now available for viewing on the website.'); +INSERT INTO email([from],[to],subject,body) VALUES('mjones7@txu.com', 'cstone1@txu.com, ggreen2@txu.com, timpowell@txu.com,', 'Enron / HPL Actuals for July 10, 2000', 'Teco Tap 10.000 / Enron ; 110.000 / HPL IFERC + +LS HPL LSK IC 30.000 / Enron +'); +INSERT INTO email([from],[to],subject,body) VALUES('susan.pereira@enron.com', 'kkw816@aol.com', 'soccer practice', 'Kathy- + +Is it safe to assume that practice is cancelled for tonight?? + +Susan Pereira'); +INSERT INTO email([from],[to],subject,body) VALUES('mark.whitt@enron.com', 'barry.tycholiz@enron.com', 'Huber Internal Memo', 'Please look at this. I didn''t know how deep to go with the desk. Do you think this works. + + '); +INSERT INTO email([from],[to],subject,body) VALUES('m..forney@enron.com', 'george.phillips@enron.com', '', 'George, +Give me a call and we will further discuss opportunities on the 13st floor. + +Thanks, +JMForney +3-7160'); +INSERT INTO email([from],[to],subject,body) VALUES('brad.mckay@enron.com', 'angusmcka@aol.com', 'Re: (no subject)', 'not yet'); +INSERT INTO email([from],[to],subject,body) VALUES('adam.bayer@enron.com', 'jonathan.mckay@enron.com', 'FW: Curve Fetch File', 'Here is the curve fetch file sent to me. It has plenty of points in it. If you give me a list of which ones you need we may be able to construct a secondary worksheet to vlookup the values. + +adam +35227 + + + -----Original Message----- +From: Royed, Jeff +Sent: Tuesday, September 25, 2001 11:37 AM +To: Bayer, Adam +Subject: Curve Fetch File + +Let me know if it works. It may be required to have a certain version of Oracle for it to work properly. + + + +Jeff Royed +Enron +Energy Operations +Phone: 713-853-5295'); +INSERT INTO email([from],[to],subject,body) VALUES('matt.smith@enron.com', 'yan.wang@enron.com', 'Report Formats', 'Yan, + +The merged reports look great. I believe the only orientation changes are to +"unmerge" the following six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 +23 West_3 +25 CIG_WIC + +The orientation of the individual reports should be correct. Thanks. + +Mat + +PS. Just a reminder to add the "*" by the title of calculated points.'); +INSERT INTO email([from],[to],subject,body) VALUES('michelle.lokay@enron.com', 'jimboman@bigfoot.com', 'Egyptian Festival', '---------------------- Forwarded by Michelle Lokay/ET&S/Enron on 09/07/2000 +10:08 AM --------------------------- + + +"Karkour, Randa" on 09/07/2000 09:01:04 AM +To: "''Agheb (E-mail)" , "Leila Mankarious (E-mail)" +, "''Marymankarious (E-mail)" +, "Michelle lokay (E-mail)" , "Ramy +Mankarious (E-mail)" +cc: + +Subject: Egyptian Festival + + + <> + + http://www.egyptianfestival.com/ + + - Egyptian Festival.url +'); +INSERT INTO email([from],[to],subject,body) VALUES('errol.mclaughlin@enron.com', 'sherry.dawson@enron.com', 'Urgent!!! --- New EAST books', 'This has to be done.................................. + +Thanks +---------------------- Forwarded by Errol McLaughlin/Corp/Enron on 12/20/2000 +08:39 AM --------------------------- + + + + From: William Kelly @ ECT 12/20/2000 08:31 AM + + +To: Kam Keiser/HOU/ECT@ECT, Darron C Giron/HOU/ECT@ECT, David +Baumbach/HOU/ECT@ECT, Errol McLaughlin/Corp/Enron@ENRON +cc: Kimat Singla/HOU/ECT@ECT, Kulvinder Fowler/NA/Enron@ENRON, Kyle R +Lilly/HOU/ECT@ECT, Jeff Royed/Corp/Enron@ENRON, Alejandra +Chavez/NA/Enron@ENRON, Crystal Hyde/HOU/ECT@ECT + +Subject: New EAST books + +We have new book names in TAGG for our intramonth portfolios and it is +extremely important that any deal booked to the East is communicated quickly +to someone on my team. I know it will take some time for the new names to +sink in and I do not want us to miss any positions or P&L. + +Thanks for your help on this. + +New: +Scott Neal : East Northeast +Dick Jenkins: East Marketeast + +WK +'); +INSERT INTO email([from],[to],subject,body) VALUES('david.forster@enron.com', 'eol.wide@enron.com', 'Change to Stack Manager', 'Effective immediately, there is a change to the Stack Manager which will +affect any Inactive Child. + +An inactive Child with links to Parent products will not have their +calculated prices updated until the Child product is Activated. + +When the Child Product is activated, the price will be recalculated and +updated BEFORE it is displayed on the web. + +This means that if you are inputting a basis price on a Child product, you +will not see the final, calculated price until you Activate the product, at +which time the customer will also see it. + +If you have any questions, please contact the Help Desk on: + +Americas: 713 853 4357 +Europe: + 44 (0) 20 7783 7783 +Asia/Australia: +61 2 9229 2300 + +Dave'); +INSERT INTO email([from],[to],subject,body) VALUES('vince.kaminski@enron.com', 'jhh1@email.msn.com', 'Re: Light reading - see pieces beginning on page 7', 'John, + +I saw it. Very interesting. + +Vince + + + + + +"John H Herbert" on 07/28/2000 08:38:08 AM +To: "Vince J Kaminski" +cc: +Subject: Light reading - see pieces beginning on page 7 + + +Cheers and have a nice weekend, + + +JHHerbert + + + + + - gd000728.pdf + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('matthew.lenhart@enron.com', 'mmmarcantel@equiva.com', 'RE:', 'i will try to line up a pig for you '); +INSERT INTO email([from],[to],subject,body) VALUES('jae.black@enron.com', 'claudette.harvey@enron.com, chaun.roberts@enron.com, judy.martinez@enron.com,', 'Disaster Recovery Equipment', 'As a reminder...there are several pieces of equipment that are set up on the 30th Floor, as well as on our floor, for the Disaster Recovery Team. PLEASE DO NOT TAKE, BORROW OR USE this equipment. Should you need to use another computer system, other than yours, or make conference calls please work with your Assistant to help find or set up equipment for you to use. + +Thanks for your understanding in this matter. + +T.Jae Black +East Power Trading +Assistant to Kevin Presto +off. 713-853-5800 +fax 713-646-8272 +cell 713-539-4760'); +INSERT INTO email([from],[to],subject,body) VALUES('eric.bass@enron.com', 'dale.neuner@enron.com', '5 X 24', 'Dale, + +Have you heard anything more on the 5 X 24s? We would like to get this +product out ASAP. + + +Thanks, + +Eric'); +INSERT INTO email([from],[to],subject,body) VALUES('messenger@smartreminders.com', 'm..tholt@enron.com', '10% Coupon - PrintPal Printer Cartridges - 100% Guaranteed', '[IMAGE] +[IMAGE][IMAGE][IMAGE] +Dear SmartReminders Member, + [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] + + + + + + + + + + + + + + + + + + + + + +We respect your privacy and are a Certified Participant of the BBBOnLine + Privacy Program. To be removed from future offers,click here. +SmartReminders.com is a permission based service. To unsubscribe click here . '); +INSERT INTO email([from],[to],subject,body) VALUES('benjamin.rogers@enron.com', 'mark.bernstein@enron.com', '', 'The guy you are talking about left CIN under a "cloud of suspicion" sort of +speak. He was the one who got into several bad deals and PPA''s in California +for CIN, thus he left on a bad note. Let me know if you need more detail +than that, I felt this was the type of info you were looking for. Thanks! +Ben'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'michelle.cash@enron.com', 'Expense Report Receipts Not Received', 'Employee Name: Michelle Cash +Report Name: Houston Cellular 8-11-01 +Report Date: 12/13/01 +Report ID: 594D37C9ED2111D5B452 +Submitted On: 12/13/01 + +You are only allowed 2 reports with receipts outstanding. Your expense reports will not be paid until you meet this requirement.'); +INSERT INTO email([from],[to],subject,body) VALUES('susan.mara@enron.com', 'ray.alvarez@enron.com, mark.palmer@enron.com, karen.denne@enron.com,', 'CAISO Emergency Motion -- to discontinue market-based rates for', 'FYI. the latest broadside against the generators. + +Sue Mara +Enron Corp. +Tel: (415) 782-7802 +Fax:(415) 782-7854 +----- Forwarded by Susan J Mara/NA/Enron on 06/08/2001 12:24 PM ----- + + + "Milner, Marcie" 06/08/2001 11:13 AM To: "''smara@enron.com''" cc: Subject: CAISO Emergency Motion + + +Sue, did you see this emergency motion the CAISO filed today? Apparently +they are requesting that FERC discontinue market-based rates immediately and +grant refunds plus interest on the difference between cost-based rates and +market revenues received back to May 2000. They are requesting the +commission act within 14 days. Have you heard anything about what they are +doing? + +Marcie + +http://www.caiso.com/docs/2001/06/08/200106081005526469.pdf +'); +INSERT INTO email([from],[to],subject,body) VALUES('fletcher.sturm@enron.com', 'eloy.escobar@enron.com', 'Re: General Brinks Position Meeting', 'Eloy, + +Who is General Brinks? + +Fletch'); +INSERT INTO email([from],[to],subject,body) VALUES('nailia.dindarova@enron.com', 'richard.shapiro@enron.com', 'Documents for Mark Frevert (on EU developments and lessons from', 'Rick, + +Here are the documents that Peter has prepared for Mark Frevert. + +Nailia +---------------------- Forwarded by Nailia Dindarova/LON/ECT on 25/06/2001 +16:36 --------------------------- + + +Nailia Dindarova +25/06/2001 15:36 +To: Michael Brown/Enron@EUEnronXGate +cc: Ross Sankey/Enron@EUEnronXGate, Eric Shaw/ENRON@EUEnronXGate, Peter +Styles/LON/ECT@ECT + +Subject: Documents for Mark Frevert (on EU developments and lessons from +California) + +Michael, + + +These are the documents that Peter promised to give to you for Mark Frevert. +He has now handed them to him in person but asked me to transmit them +electronically to you, as well as Eric and Ross. + +Nailia + + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('peggy.a.kostial@accenture.com', 'dave.samuels@enron.com', 'EOL-Accenture Deal Sheet', 'Dave - + +Attached are our comments and suggested changes. Please call to review. + +On the time line for completion, we have four critical steps to complete: + Finalize market analysis to refine business case, specifically + projected revenue stream + Complete counterparty surveying, including targeting 3 CPs for letters + of intent + Review Enron asset base for potential reuse/ licensing + Contract negotiations + +Joe will come back to us with an updated time line, but it is my +expectation that we are still on the same schedule (we just begun week +three) with possibly a week or so slippage.....contract negotiations will +probably be the critical path. + +We will send our cut at the actual time line here shortly. Thanks, + +Peggy + +(See attached file: accenture-dealpoints v2.doc) + - accenture-dealpoints v2.doc '); +INSERT INTO email([from],[to],subject,body) VALUES('thomas.martin@enron.com', 'thomas.martin@enron.com', 'Re: Guadalupe Power Partners LP', '---------------------- Forwarded by Thomas A Martin/HOU/ECT on 03/20/2001 +03:49 PM --------------------------- + + +Thomas A Martin +10/11/2000 03:55 PM +To: Patrick Wade/HOU/ECT@ECT +cc: +Subject: Re: Guadalupe Power Partners LP + +The deal is physically served at Oasis Waha or Oasis Katy and is priced at +either HSC, Waha or Katytailgate GD at buyers option three days prior to +NYMEX close. + +'); +INSERT INTO email([from],[to],subject,body) VALUES('judy.townsend@enron.com', 'dan.junek@enron.com, chris.germany@enron.com', 'Columbia Distribution''s Capacity Available for Release - Sum', '---------------------- Forwarded by Judy Townsend/HOU/ECT on 03/09/2001 11:04 +AM --------------------------- + + +agoddard@nisource.com on 03/08/2001 09:16:57 AM +To: " - *Koch, Kent" , " - +*Millar, Debra" , " - *Burke, Lynn" + +cc: " - *Heckathorn, Tom" +Subject: Columbia Distribution''s Capacity Available for Release - Sum + + +Attached is Columbia Distribution''s notice of capacity available for release +for +the summer of 2001 (Apr. 2001 through Oct. 2001). + +Please note that the deadline for bids is 3:00pm EST on March 20, 2001. + +If you have any questions, feel free to contact any of the representatives +listed +at the bottom of the attachment. + +Aaron Goddard + + + + + - 2001Summer.doc +'); +INSERT INTO email([from],[to],subject,body) VALUES('rhonda.denton@enron.com', 'tim.belden@enron.com, dana.davis@enron.com, genia.fitzgerald@enron.com,', 'Split Rock Energy LLC', 'We have received the executed EEI contract from this CP dated 12/12/2000. +Copies will be distributed to Legal and Credit.'); +INSERT INTO email([from],[to],subject,body) VALUES('kerrymcelroy@dwt.com', 'jack.speer@alcoa.com, crow@millernash.com, michaelearly@earthlink.net,', 'Oral Argument Request', ' - Oral Argument Request.doc'); +INSERT INTO email([from],[to],subject,body) VALUES('mike.carson@enron.com', 'rlmichaelis@hormel.com', '', 'Did you come in town this wk end..... My new number at our house is : +713-668-3712...... my cell # is 281-381-7332 + +the kid'); +INSERT INTO email([from],[to],subject,body) VALUES('cooper.richey@enron.com', 'trycooper@hotmail.com', 'FW: Contact Info', ' + +-----Original Message----- +From: Punja, Karim +Sent: Thursday, December 13, 2001 2:35 PM +To: Richey, Cooper +Subject: Contact Info + + +Cooper, + +Its been a real pleasure working with you (even though it was for only a small amount of time) +I hope we can stay in touch. + +Home# 234-0249 +email: kpunja@hotmail.com + +Take Care, + +Karim. + '); +INSERT INTO email([from],[to],subject,body) VALUES('bjm30@earthlink.net', 'mcguinn.k@enron.com, mcguinn.ian@enron.com, mcguinn.stephen@enron.com,', 'email address change', 'Hello all. + +I haven''t talked to many of you via email recently but I do want to give you +my new address for your email file: + + bjm30@earthlink.net + +I hope all is well. + +Brian McGuinn'); +INSERT INTO email([from],[to],subject,body) VALUES('shelley.corman@enron.com', 'steve.hotte@enron.com', 'Flat Panels', 'Can you please advise what is going on with the flat panels that we had planned to distribute to our gas logistics team. It was in the budget and we had the okay, but now I''m hearing there is some hold-up & the units are stored on 44. + +Shelley'); +INSERT INTO email([from],[to],subject,body) VALUES('sara.davidson@enron.com', 'john.schwartzenburg@enron.com, scott.dieball@enron.com, recipients@enron.com,', '2001 Enron Law Conference (Distribution List 2)', ' Enron Law Conference + +San Antonio, Texas May 2-4, 2001 Westin Riverwalk + + See attached memo for more details!! + + +? Registration for the law conference this year will be handled through an +Online RSVP Form on the Enron Law Conference Website at +http://lawconference.corp.enron.com. The website is still under construction +and will not be available until Thursday, March 15, 2001. + +? We will send you another e-mail to confirm when the Law Conference Website +is operational. + +? Please complete the Online RSVP Form as soon as it is available and submit +it no later than Friday, March 30th. + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('tori.kuykendall@enron.com', 'heath.b.taylor@accenture.com', 'Re:', 'hey - thats funny about john - he definitely remembers him - i''ll call pat +and let him know - we are coming on saturday - i just havent had a chance to +call you guys back -- looking forward to it -- i probably need the +directions again though'); +INSERT INTO email([from],[to],subject,body) VALUES('darron.giron@enron.com', 'bryce.baxter@enron.com', 'Re: Feedback for Audrey Cook', 'Bryce, + +I''ll get it done today. + +DG 3-9573 + + + + + + From: Bryce Baxter 06/12/2000 07:15 PM + + +To: Darron C Giron/HOU/ECT@ECT +cc: +Subject: Feedback for Audrey Cook + +You were identified as a reviewer for Audrey Cook. If possible, could you +complete her feedback by end of business Wednesday? It will really help me +in the PRC process to have your input. Thanks. + +'); +INSERT INTO email([from],[to],subject,body) VALUES('casey.evans@enron.com', 'stephanie.sever@enron.com', 'Gas EOL ID', 'Stephanie, + +In conjunction with the recent movement of several power traders, they are changing the names of their gas books as well. The names of the new gas books and traders are as follows: + +PWR-NG-LT-SPP: Mike Carson +PWR-NG-LT-SERC: Jeff King + +If you need to know their power desk to map their ID to their gas books, those desks are as follows: + +EPMI-LT-SPP: Mike Carson +EPMI-LT-SERC: Jeff King + +I will be in training this afternoon, but will be back when class is over. Let me know if you have any questions. + +Thanks for your help! +Casey'); +INSERT INTO email([from],[to],subject,body) VALUES('darrell.schoolcraft@enron.com', 'david.roensch@enron.com, kimberly.watson@enron.com, michelle.lokay@enron.com,', 'Postings', 'Please see the attached. + + +ds + + + + + '); +INSERT INTO email([from],[to],subject,body) VALUES('mcominsky@aol.com', 'cpatman@bracepatt.com, james_derrick@enron.com', 'Jurisprudence Luncheon', 'Carrin & Jim -- + +It was an honor and a pleasure to meet both of you yesterday. I know we will +have fun working together on this very special event. + +Jeff left the jurisprudence luncheon lists for me before he left on vacation. + I wasn''t sure whether he transmitted them to you as well. Would you please +advise me if you would like them sent to you? I can email the MS Excel files +or I can fax the hard copies to you. Please advise what is most convenient. + +I plan to be in town through the holidays and can be reached by phone, email, +or cell phone at any time. My cell phone number is 713/705-4829. + +Thanks again for your interest in the ADL''s work. Martin. + +Martin B. Cominsky +Director, Southwest Region +Anti-Defamation League +713/627-3490, ext. 122 +713/627-2011 (fax) +MCominsky@aol.com'); +INSERT INTO email([from],[to],subject,body) VALUES('phillip.love@enron.com', 'todagost@utmb.edu, gbsonnta@utmb.edu', 'New President', 'I had a little bird put a word in my ear. Is there any possibility for Ben +Raimer to be Bush''s secretary of HHS? Just curious about that infamous UTMB +rumor mill. Hope things are well, happy holidays. +PL'); +INSERT INTO email([from],[to],subject,body) VALUES('marie.heard@enron.com', 'ehamilton@fna.com', 'ISDA Master Agreement', 'Erin: + +Pursuant to your request, attached are the Schedule to the ISDA Master Agreement, together with Paragraph 13 to the ISDA Credit Support Annex. Please let me know if you need anything else. We look forward to hearing your comments. + +Marie + +Marie Heard +Senior Legal Specialist +Enron North America Corp. +Phone: (713) 853-3907 +Fax: (713) 646-3490 +marie.heard@enron.com + + '); +INSERT INTO email([from],[to],subject,body) VALUES('andrea.ring@enron.com', 'beverly.beaty@enron.com', 'Re: Tennessee Buy - Louis Dreyfus', 'Beverly - once again thanks so much for your help on this. + + + + '); +INSERT INTO email([from],[to],subject,body) VALUES('karolyn.criado@enron.com', 'j..bonin@enron.com, felicia.case@enron.com, b..clapp@enron.com,', 'Price List week of Oct. 8-9, 2001', ' +Please contact me if you have any questions regarding last weeks prices. + +Thank you, +Karolyn Criado +3-9441 + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('kevin.presto@enron.com', 'edward.baughman@enron.com, billy.braddock@enron.com', 'Associated', 'Please begin working on filling our Associated short position in 02. I would like to take this risk off the books. + +In addition, please find out what a buy-out of VEPCO would cost us. With Rogers transitioning to run our retail risk management, I would like to clean up our customer positions. + +We also need to continue to explore a JEA buy-out. + +Thanks.'); +INSERT INTO email([from],[to],subject,body) VALUES('stacy.dickson@enron.com', 'gregg.penman@enron.com', 'RE: Constellation TC 5-7-01', 'Gregg, + +I am at home with a sick baby. (Lots of fun!) I will call you about this +tomorrow. + +Stacy'); +INSERT INTO email([from],[to],subject,body) VALUES('joe.quenet@enron.com', 'dfincher@utilicorp.com', '', 'hey big guy.....check this out..... + + w ww.gorelieberman-2000.com/'); +INSERT INTO email([from],[to],subject,body) VALUES('k..allen@enron.com', 'jacqestc@aol.com', '', 'Jacques, + +I sent you a fax of Kevin Kolb''s comments on the release. The payoff on the note would be $36,248 ($36090(principal) + $158 (accrued interest)). +This is assuming we wrap this up on Tuesday. + +Please email to confirm that their changes are ok so I can set up a meeting on Tuesday to reach closure. + +Phillip'); +INSERT INTO email([from],[to],subject,body) VALUES('kourtney.nelson@enron.com', 'mike.swerzbin@enron.com', 'Adjusted L/R Balance', 'Mike, + +I placed the adjusted L/R Balance on the Enronwest site. It is under the "Staff/Kourtney Nelson". There are two links: + +1) "Adj L_R" is the same data/format from the weekly strategy meeting. +2) "New Gen 2001_2002" link has all of the supply side info that is used to calculate the L/R balance + -Please note the Data Flag column, a value of "3" indicates the project was cancelled, on hold, etc and is not included in the calc. + +Both of these sheets are interactive Excel spreadsheets and thus you can play around with the data as you please. Also, James Bruce is working to get his gen report on the web. That will help with your access to information on new gen. + +Please let me know if you have any questions or feedback, + +Kourtney + + + +Kourtney Nelson +Fundamental Analysis +Enron North America +(503) 464-8280 +kourtney.nelson@enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('d..thomas@enron.com', 'naveed.ahmed@enron.com', 'FW: Current Enron TCC Portfolio', ' + +-----Original Message----- +From: Grace, Rebecca M. +Sent: Monday, December 17, 2001 9:44 AM +To: Thomas, Paul D. +Cc: Cashion, Jim; Allen, Thresa A.; May, Tom +Subject: RE: Current Enron TCC Portfolio + + +Paul, + +I reviewed NY''s list. I agree with all of their contracts numbers and mw amounts. + +Call if you have any more questions. + +Rebecca + + + + -----Original Message----- +From: Thomas, Paul D. +Sent: Monday, December 17, 2001 9:08 AM +To: Grace, Rebecca M. +Subject: FW: Current Enron TCC Portfolio + + << File: enrontccs.xls >> +Rebecca, +Let me know if you see any differences. + +Paul +X 3-0403 +-----Original Message----- +From: Thomas, Paul D. +Sent: Monday, December 17, 2001 9:04 AM +To: Ahmed, Naveed +Subject: FW: Current Enron TCC Portfolio + + + + +-----Original Message----- +From: Thomas, Paul D. +Sent: Thursday, December 13, 2001 10:01 AM +To: Baughman, Edward D. +Subject: Current Enron TCC Portfolio + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('stephanie.panus@enron.com', 'william.bradford@enron.com, debbie.brackett@enron.com,', 'Coastal Merchant Energy/El Paso Merchant Energy', 'Coastal Merchant Energy, L.P. merged with and into El Paso Merchant Energy, +L.P., effective February 1, 2001, with the surviving entity being El Paso +Merchant Energy, L.P. We currently have ISDA Master Agreements with both +counterparties. Please see the attached memo regarding the existing Masters +and let us know which agreement should be terminated. + +Thanks, +Stephanie +'); +INSERT INTO email([from],[to],subject,body) VALUES('kam.keiser@enron.com', 'c..kenne@enron.com', 'RE: What about this too???', ' + + -----Original Message----- +From: Kenne, Dawn C. +Sent: Wednesday, February 06, 2002 11:50 AM +To: Keiser, Kam +Subject: What about this too??? + + + << File: Netco Trader Matrix.xls >> + '); +INSERT INTO email([from],[to],subject,body) VALUES('chris.meyer@enron.com', 'joe.parks@enron.com', 'Centana', 'Talked to Chip. We do need Cash Committe approval given the netting feature of your deal, which means Batch Funding Request. Please update per my previous e-mail and forward. + +Thanks + +chris +x31666'); +INSERT INTO email([from],[to],subject,body) VALUES('debra.perlingiere@enron.com', 'jworman@academyofhealth.com', '', 'Have a great weekend! Happy Fathers Day! + + +Debra Perlingiere +Enron North America Corp. +1400 Smith Street, EB 3885 +Houston, Texas 77002 +dperlin@enron.com +Phone 713-853-7658 +Fax 713-646-3490'); +INSERT INTO email([from],[to],subject,body) VALUES('outlook.team@enron.com', '', 'Demo by Martha Janousek of Dashboard & Pipeline Profile / Julia &', 'CALENDAR ENTRY: APPOINTMENT + +Description: + Demo by Martha Janousek of Dashboard & Pipeline Profile / Julia & Dir Rpts. - 4102 + +Date: 1/5/2001 +Time: 9:00 AM - 10:00 AM (Central Standard Time) + +Chairperson: Outlook Migration Team + +Detailed Description:'); +INSERT INTO email([from],[to],subject,body) VALUES('diana.seifert@enron.com', 'mark.taylor@enron.com', 'Guest access Chile', 'Hello Mark, + +Justin Boyd told me that your can help me with questions regarding Chile. +We got a request for guest access through MG. +The company is called Escondida and is a subsidiary of BHP Australia. + +Please advise if I can set up a guest account or not. +F.Y.I.: MG is planning to put a "in w/h Chile" contract for Copper on-line as +soon as Enron has done the due diligence for this country. +Thanks ! + + +Best regards + +Diana Seifert +EOL PCG'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'mark.whitt@enron.com', '<> - 121001', 'The Approval status has changed on the following report: + +Status last changed by: Barry L. Tycholiz +Expense Report Name: 121001 +Report Total: $198.98 +Amount Due Employee: $198.98 +Amount Approved: $198.98 +Amount Paid: $0.00 +Approval Status: Approved +Payment Status: Pending + + +To review this expense report, click on the following link for Concur Expense. +http://expensexms.enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('kevin.hyatt@enron.com', '', 'Technical Support', 'Outside the U.S., please refer to the list below: + +Australia: +1800 678-515 +support@palm-au.com + +Canada: +1905 305-6530 +support@palm.com + +New Zealand: +0800 446-398 +support@palm-nz.com + +U.K.: +0171 867 0108 +eurosupport@palm.3com.com + +Please refer to the Worldwide Customer Support card for a complete technical support contact list.'); +INSERT INTO email([from],[to],subject,body) VALUES('geoff.storey@enron.com', 'dutch.quigley@enron.com', 'RE:', 'duke contact? + + -----Original Message----- +From: Quigley, Dutch +Sent: Wednesday, October 31, 2001 10:14 AM +To: Storey, Geoff +Subject: RE: + +bp corp Albert LaMore 281-366-4962 + +running the reports now + + + -----Original Message----- +From: Storey, Geoff +Sent: Wednesday, October 31, 2001 10:10 AM +To: Quigley, Dutch +Subject: RE: + +give me a contact over there too +BP + + + -----Original Message----- +From: Quigley, Dutch +Sent: Wednesday, October 31, 2001 9:42 AM +To: Storey, Geoff +Subject: + +Coral Jeff Whitnah 713-767-5374 +Relaint Steve McGinn 713-207-4000'); +INSERT INTO email([from],[to],subject,body) VALUES('pete.davis@enron.com', 'pete.davis@enron.com', 'Start Date: 4/22/01; HourAhead hour: 3; ', 'Start Date: 4/22/01; HourAhead hour: 3; No ancillary schedules awarded. +Variances detected. +Variances detected in Load schedule. + + LOG MESSAGES: + +PARSING FILE -->> O:\Portland\WestDesk\California Scheduling\ISO Final +Schedules\2001042203.txt + +---- Load Schedule ---- +$$$ Variance found in table tblLoads. + Details: (Hour: 3 / Preferred: 1.92 / Final: 1.89) + TRANS_TYPE: FINAL + LOAD_ID: PGE4 + MKT_TYPE: 2 + TRANS_DATE: 4/22/01 + SC_ID: EPMI + +'); +INSERT INTO email([from],[to],subject,body) VALUES('john.postlethwaite@enron.com', 'john.zufferli@enron.com', 'Reference', 'John, hope things are going well up there for you. The big day is almost here for you and Jessica. I was wondering if I could use your name as a job reference if need be. I am just trying to get everything in order just in case something happens. + +John'); +INSERT INTO email([from],[to],subject,body) VALUES('jeffrey.shankman@enron.com', 'lschiffm@jonesday.com', 'Re:', 'I saw you called on the cell this a.m. Sorry I missed you. (I was in the +shower). I have had a shitty week--I suspect my silence (not only to you, +but others) after our phone call is a result of the week. I''m seeing Glen at +11:15....talk to you'); +INSERT INTO email([from],[to],subject,body) VALUES('litebytz@enron.com', '', 'Lite Bytz RSVP', ' +This week''s Lite Bytz presentation will feature the following TOOLZ speaker: + +Richard McDougall +Solaris 8 +Thursday, June 7, 2001 + +If you have not already signed up, please RSVP via email to litebytz@enron.com by the end of the day Tuesday, June 5, 2001. + +*Remember: this is now a Brown Bag Event--so bring your lunch and we will provide cookies and drinks. + +Click below for more details. + +http://home.enron.com:84/messaging/litebytztoolzprint.jpg'); + COMMIT; + } +} {} + +############################################################################### +# Everything above just builds an interesting test database. The actual +# tests come after this comment. +############################################################################### + +do_test fts3ac-1.2 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark' + } +} {6 17 25 38 40 42 73 74} +do_test fts3ac-1.3 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'susan' + } +} {24 40} +do_test fts3ac-1.4 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark susan' + } +} {40} +do_test fts3ac-1.5 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'susan mark' + } +} {40} +do_test fts3ac-1.6 { + execsql { + SELECT rowid FROM email WHERE email MATCH '"mark susan"' + } +} {} +do_test fts3ac-1.7 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark -susan' + } +} {6 17 25 38 42 73 74} +do_test fts3ac-1.8 { + execsql { + SELECT rowid FROM email WHERE email MATCH '-mark susan' + } +} {24} +do_test fts3ac-1.9 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark OR susan' + } +} {6 17 24 25 38 40 42 73 74} + +# Some simple tests of the automatic "offsets(email)" column. In the sample +# data set above, only one message, number 20, contains the words +# "gas" and "reminder" in both body and subject. +# +do_test fts3ac-2.1 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'gas reminder' + } +} {20 {2 0 42 3 2 1 54 8 3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} +do_test fts3ac-2.2 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'subject:gas reminder' + } +} {20 {2 0 42 3 2 1 54 8 3 1 54 8}} +do_test fts3ac-2.3 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'body:gas reminder' + } +} {20 {2 1 54 8 3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} +do_test fts3ac-2.4 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE subject MATCH 'gas reminder' + } +} {20 {2 0 42 3 2 1 54 8}} +do_test fts3ac-2.5 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH 'gas reminder' + } +} {20 {3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} + +# Document 32 contains 5 instances of the world "child". But only +# 3 of them are paired with "product". Make sure only those instances +# that match the phrase appear in the offsets(email) list. +# +do_test fts3ac-3.1 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH 'child product' AND +rowid=32 + } +} {32 {3 0 94 5 3 0 114 5 3 0 207 5 3 1 213 7 3 0 245 5 3 1 251 7 3 0 409 5 3 1 415 7 3 1 493 7}} +do_test fts3ac-3.2 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH '"child product"' + } +} {32 {3 0 207 5 3 1 213 7 3 0 245 5 3 1 251 7 3 0 409 5 3 1 415 7}} + +# Snippet generator tests +# +do_test fts3ac-4.1 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'subject:gas reminder' + } +} {{Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder}} +do_test fts3ac-4.2 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'christmas candlelight' + } +} {{... place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. service and a candlelight service at 5:00 p.m., +among others. ...}} + +do_test fts3ac-4.3 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'deal sheet potential reuse' + } +} {{EOL-Accenture Deal Sheet ... intent + Review Enron asset base for potential reuse/ licensing + Contract negotiations ...}} +do_test fts3ac-4.4 { + execsql { + SELECT snippet(email,'<<<','>>>',' ') FROM email + WHERE email MATCH 'deal sheet potential reuse' + } +} {{EOL-Accenture <<>> <<>> intent + Review Enron asset base for <<>> <<>>/ licensing + Contract negotiations }} +do_test fts3ac-4.5 { + execsql { + SELECT snippet(email,'<<<','>>>',' ') FROM email + WHERE email MATCH 'first things' + } +} {{Re: <<>> Polish Deal! Congrats! <<>> seem to be building rapidly now on the }} +do_test fts3ac-4.6 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'chris is here' + } +} {{chris.germany@enron.com ... Sounds good to me. I bet this is next to the Warick?? Hotel. ... place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. ...}} +do_test fts3ac-4.7 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH '"pursuant to"' + } +} {{Erin: + +Pursuant to your request, attached are the Schedule to ...}} +do_test fts3ac-4.8 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'ancillary load davis' + } +} {{pete.davis@enron.com ... Start Date: 4/22/01; HourAhead hour: 3; No ancillary schedules awarded. +Variances detected. +Variances detected in Load schedule. + + LOG MESSAGES: + +PARSING ...}} + +# Combinations of AND and OR operators: +# +do_test fts3ac-5.1 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'questar enron OR com' + } +} {{matt.smith@enron.com ... six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 ...}} +do_test fts3ac-5.2 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'enron OR com questar' + } +} {{matt.smith@enron.com ... six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 ...}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ad.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ad.test new file mode 100644 index 0000000..420b5b2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ad.test @@ -0,0 +1,65 @@ +# 2006 October 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module, and in particular +# the Porter stemmer. +# +# $Id: fts3ad.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +do_test fts3ad-1.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts3(content, tokenize porter); + INSERT INTO t1(rowid, content) VALUES(1, 'running and jumping'); + SELECT rowid FROM t1 WHERE content MATCH 'run jump'; + } +} {1} +do_test fts3ad-1.2 { + execsql { + SELECT snippet(t1) FROM t1 WHERE t1 MATCH 'run jump'; + } +} {{running and jumping}} +do_test fts3ad-1.3 { + execsql { + INSERT INTO t1(rowid, content) + VALUES(2, 'abcdefghijklmnopqrstuvwyxz'); + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH 'abcdefghijqrstuvwyxz' + } +} {2 abcdefghijklmnopqrstuvwyxz} +do_test fts3ad-1.4 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH 'abcdefghijXXXXqrstuvwyxz' + } +} {2 abcdefghijklmnopqrstuvwyxz} +do_test fts3ad-1.5 { + execsql { + INSERT INTO t1(rowid, content) + VALUES(3, 'The value is 123456789'); + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH '123789' + } +} {3 {The value is 123456789}} +do_test fts3ad-1.6 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH '123000000789' + } +} {3 {The value is 123456789}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ae.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ae.test new file mode 100644 index 0000000..949a72b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ae.test @@ -0,0 +1,85 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing deletions in the FTS3 module. +# +# $Id: fts3ae.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Construct a full-text search table containing keywords which are the +# ordinal numbers of the bit positions set for a sequence of integers, +# which are used for the rowid. There are a total of 30 INSERT and +# DELETE statements, so that we'll test both the segmentMerge() merge +# (over the first 16) and the termSelect() merge (over the level-1 +# segment and 14 level-0 segments). +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'one'); + INSERT INTO t1 (rowid, content) VALUES(2, 'two'); + INSERT INTO t1 (rowid, content) VALUES(3, 'one two'); + INSERT INTO t1 (rowid, content) VALUES(4, 'three'); + DELETE FROM t1 WHERE rowid = 1; + INSERT INTO t1 (rowid, content) VALUES(5, 'one three'); + INSERT INTO t1 (rowid, content) VALUES(6, 'two three'); + INSERT INTO t1 (rowid, content) VALUES(7, 'one two three'); + DELETE FROM t1 WHERE rowid = 4; + INSERT INTO t1 (rowid, content) VALUES(8, 'four'); + INSERT INTO t1 (rowid, content) VALUES(9, 'one four'); + INSERT INTO t1 (rowid, content) VALUES(10, 'two four'); + DELETE FROM t1 WHERE rowid = 7; + INSERT INTO t1 (rowid, content) VALUES(11, 'one two four'); + INSERT INTO t1 (rowid, content) VALUES(12, 'three four'); + INSERT INTO t1 (rowid, content) VALUES(13, 'one three four'); + DELETE FROM t1 WHERE rowid = 10; + INSERT INTO t1 (rowid, content) VALUES(14, 'two three four'); + INSERT INTO t1 (rowid, content) VALUES(15, 'one two three four'); + INSERT INTO t1 (rowid, content) VALUES(16, 'five'); + DELETE FROM t1 WHERE rowid = 13; + INSERT INTO t1 (rowid, content) VALUES(17, 'one five'); + INSERT INTO t1 (rowid, content) VALUES(18, 'two five'); + INSERT INTO t1 (rowid, content) VALUES(19, 'one two five'); + DELETE FROM t1 WHERE rowid = 16; + INSERT INTO t1 (rowid, content) VALUES(20, 'three five'); + INSERT INTO t1 (rowid, content) VALUES(21, 'one three five'); + INSERT INTO t1 (rowid, content) VALUES(22, 'two three five'); + DELETE FROM t1 WHERE rowid = 19; + DELETE FROM t1 WHERE rowid = 22; +} + +do_test fts3af-1.1 { + execsql {SELECT COUNT(*) FROM t1} +} {14} + +do_test fts3ae-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {3 5 9 11 15 17 21} + +do_test fts3ae-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two'} +} {2 3 6 11 14 15 18} + +do_test fts3ae-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three'} +} {5 6 12 14 15 20 21} + +do_test fts3ae-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'four'} +} {8 9 11 12 14 15} + +do_test fts3ae-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five'} +} {17 18 20 21} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3af.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3af.test new file mode 100644 index 0000000..221ac4c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3af.test @@ -0,0 +1,90 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing updates in the FTS3 module. +# +# $Id: fts3af.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Construct a full-text search table containing keywords which are the +# ordinal numbers of the bit positions set for a sequence of integers, +# which are used for the rowid. There are a total of 31 INSERT, +# UPDATE, and DELETE statements, so that we'll test both the +# segmentMerge() merge (over the first 16) and the termSelect() merge +# (over the level-1 segment and 15 level-0 segments). +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'one'); + INSERT INTO t1 (rowid, content) VALUES(2, 'two'); + INSERT INTO t1 (rowid, content) VALUES(3, 'one two'); + INSERT INTO t1 (rowid, content) VALUES(4, 'three'); + INSERT INTO t1 (rowid, content) VALUES(5, 'one three'); + INSERT INTO t1 (rowid, content) VALUES(6, 'two three'); + INSERT INTO t1 (rowid, content) VALUES(7, 'one two three'); + DELETE FROM t1 WHERE rowid = 4; + INSERT INTO t1 (rowid, content) VALUES(8, 'four'); + UPDATE t1 SET content = 'update one three' WHERE rowid = 1; + INSERT INTO t1 (rowid, content) VALUES(9, 'one four'); + INSERT INTO t1 (rowid, content) VALUES(10, 'two four'); + DELETE FROM t1 WHERE rowid = 7; + INSERT INTO t1 (rowid, content) VALUES(11, 'one two four'); + INSERT INTO t1 (rowid, content) VALUES(12, 'three four'); + INSERT INTO t1 (rowid, content) VALUES(13, 'one three four'); + DELETE FROM t1 WHERE rowid = 10; + INSERT INTO t1 (rowid, content) VALUES(14, 'two three four'); + INSERT INTO t1 (rowid, content) VALUES(15, 'one two three four'); + UPDATE t1 SET content = 'update two five' WHERE rowid = 8; + INSERT INTO t1 (rowid, content) VALUES(16, 'five'); + DELETE FROM t1 WHERE rowid = 13; + INSERT INTO t1 (rowid, content) VALUES(17, 'one five'); + INSERT INTO t1 (rowid, content) VALUES(18, 'two five'); + INSERT INTO t1 (rowid, content) VALUES(19, 'one two five'); + DELETE FROM t1 WHERE rowid = 16; + INSERT INTO t1 (rowid, content) VALUES(20, 'three five'); + INSERT INTO t1 (rowid, content) VALUES(21, 'one three five'); + INSERT INTO t1 (rowid, content) VALUES(22, 'two three five'); + DELETE FROM t1 WHERE rowid = 19; + UPDATE t1 SET content = 'update' WHERE rowid = 15; +} + +do_test fts3af-1.1 { + execsql {SELECT COUNT(*) FROM t1} +} {16} + +do_test fts3af-2.0 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'update'} +} {1 8 15} + +do_test fts3af-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 9 11 17 21} + +do_test fts3af-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two'} +} {2 3 6 8 11 14 18 22} + +do_test fts3af-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three'} +} {1 5 6 12 14 20 21 22} + +do_test fts3af-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'four'} +} {9 11 12 14} + +do_test fts3af-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five'} +} {8 17 18 20 21 22} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ag.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ag.test new file mode 100644 index 0000000..1e1171c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ag.test @@ -0,0 +1,87 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing handling of edge cases for various doclist +# merging functions in the FTS3 module query logic. +# +# $Id: fts3ag.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'this is a test'); + INSERT INTO t1 (rowid, content) VALUES(2, 'also a test'); +} + +# No hits at all. Returns empty doclists from termSelect(). +do_test fts3ag-1.1 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something'} +} {} + +# Empty left in docListExceptMerge(). +do_test fts3ag-1.2 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '-this something'} +} {} + +# Empty right in docListExceptMerge(). +do_test fts3ag-1.3 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this -something'} +} {1} + +# Empty left in docListPhraseMerge(). +do_test fts3ag-1.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"this something"'} +} {} + +# Empty right in docListPhraseMerge(). +do_test fts3ag-1.5 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"something is"'} +} {} + +# Empty left in docListOrMerge(). +do_test fts3ag-1.6 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something OR this'} +} {1} + +# Empty right in docListOrMerge(). +do_test fts3ag-1.7 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this OR something'} +} {1} + +# Empty left in docListAndMerge(). +do_test fts3ag-1.8 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something this'} +} {} + +# Empty right in docListAndMerge(). +do_test fts3ag-1.9 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this something'} +} {} + +# No support for all-except queries. +do_test fts3ag-1.10 { + catchsql {SELECT rowid FROM t1 WHERE t1 MATCH '-this -something'} +} {1 {SQL logic error or missing database}} + +# Test that docListOrMerge() correctly handles reaching the end of one +# doclist before it reaches the end of the other. +do_test fts3ag-1.11 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this OR also'} +} {1 2} +do_test fts3ag-1.12 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'also OR this'} +} {1 2} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ah.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ah.test new file mode 100644 index 0000000..1a58e49 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ah.test @@ -0,0 +1,76 @@ +# 2006 October 31 (scaaarey) +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# here is testing correct handling of excessively long terms. +# +# $Id: fts3ah.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Generate a term of len copies of char. +proc bigterm {char len} { + for {set term ""} {$len>0} {incr len -1} { + append term $char + } + return $term +} + +# Generate a document of bigterms based on characters from the list +# chars. +proc bigtermdoc {chars len} { + set doc "" + foreach char $chars { + append doc " " [bigterm $char $len] + } + return $doc +} + +set len 5000 +set doc1 [bigtermdoc {a b c d} $len] +set doc2 [bigtermdoc {b d e f} $len] +set doc3 [bigtermdoc {a c e} $len] + +set aterm [bigterm a $len] +set bterm [bigterm b $len] +set xterm [bigterm x $len] + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, $doc1); + INSERT INTO t1 (rowid, content) VALUES(2, $doc2); + INSERT INTO t1 (rowid, content) VALUES(3, $doc3); +} + +# No hits at all. Returns empty doclists from termSelect(). +do_test fts3ah-1.1 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something'} +} {} + +do_test fts3ah-1.2 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH $aterm} +} {1 3} + +do_test fts3ah-1.2 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH $xterm} +} {} + +do_test fts3ah-1.3 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '$aterm -$xterm'" +} {1 3} + +do_test fts3ah-1.4 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"$aterm $bterm\"'" +} {1} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ai.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ai.test new file mode 100644 index 0000000..144b4c3 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ai.test @@ -0,0 +1,87 @@ +# 2007 January 17 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite fts3 library. The +# focus here is testing handling of UPDATE when using UTF-16-encoded +# databases. +# +# $Id: fts3ai.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Return the UTF-16 representation of the supplied UTF-8 string $str. +# If $nt is true, append two 0x00 bytes as a nul terminator. +# NOTE(shess) Copied from capi3.test. +proc utf16 {str {nt 1}} { + set r [encoding convertto unicode $str] + if {$nt} { + append r "\x00\x00" + } + return $r +} + +db eval { + PRAGMA encoding = "UTF-16le"; + CREATE VIRTUAL TABLE t1 USING fts3(content); +} + +do_test fts3ai-1.0 { + execsql {PRAGMA encoding} +} {UTF-16le} + +do_test fts3ai-1.1 { + execsql {INSERT INTO t1 (rowid, content) VALUES(1, 'one')} + execsql {SELECT content FROM t1 WHERE rowid = 1} +} {one} + +do_test fts3ai-1.2 { + set sql "INSERT INTO t1 (rowid, content) VALUES(2, 'two')" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 2} +} {two} + +do_test fts3ai-1.3 { + set sql "INSERT INTO t1 (rowid, content) VALUES(3, 'three')" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + set sql "UPDATE t1 SET content = 'trois' WHERE rowid = 3" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 3} +} {trois} + +do_test fts3ai-1.4 { + set sql16 [utf16 {INSERT INTO t1 (rowid, content) VALUES(4, 'four')}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 4} +} {four} + +do_test fts3ai-1.5 { + set sql16 [utf16 {INSERT INTO t1 (rowid, content) VALUES(5, 'five')}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + set sql "UPDATE t1 SET content = 'cinq' WHERE rowid = 5" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 5} +} {cinq} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3aj.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3aj.test new file mode 100644 index 0000000..60d26c0 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3aj.test @@ -0,0 +1,89 @@ +# 2007 February 6 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. This +# tests creating fts3 tables in an attached database. +# +# $Id: fts3aj.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Clean up anything left over from a previous pass. +file delete -force test2.db +file delete -force test2.db-journal +sqlite3 db2 test2.db + +db eval { + CREATE VIRTUAL TABLE t3 USING fts3(content); + INSERT INTO t3 (rowid, content) VALUES(1, "hello world"); +} + +db2 eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t1 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t1 (rowid, content) VALUES(3, "cruel world"); +} + +# This has always worked because the t1_* tables used by fts3 will be +# the defaults. +do_test fts3aj-1.1 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + SELECT rowid FROM t1 WHERE t1 MATCH 'hello'; + DETACH DATABASE two; + } +} {1 2} +# Make certain we're detached if there was an error. +catch {db eval {DETACH DATABASE two}} + +# In older code, this appears to work fine, but the t2_* tables used +# by fts3 will be created in database 'main' instead of database +# 'two'. It appears to work fine because the tables end up being the +# defaults, but obviously is badly broken if you hope to use things +# other than in the exact same ATTACH setup. +do_test fts3aj-1.2 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + CREATE VIRTUAL TABLE two.t2 USING fts3(content); + INSERT INTO t2 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t2 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t2 (rowid, content) VALUES(3, "cruel world"); + SELECT rowid FROM t2 WHERE t2 MATCH 'hello'; + DETACH DATABASE two; + } +} {1 2} +catch {db eval {DETACH DATABASE two}} + +# In older code, this broke because the fts3 code attempted to create +# t3_* tables in database 'main', but they already existed. Normally +# this wouldn't happen without t3 itself existing, in which case the +# fts3 code would never be called in the first place. +do_test fts3aj-1.3 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + + CREATE VIRTUAL TABLE two.t3 USING fts3(content); + INSERT INTO two.t3 (rowid, content) VALUES(2, "hello there"); + INSERT INTO two.t3 (rowid, content) VALUES(3, "cruel world"); + SELECT rowid FROM two.t3 WHERE t3 MATCH 'hello'; + + DETACH DATABASE two; + } db2 +} {2} +catch {db eval {DETACH DATABASE two}} + +catch {db2 close} +file delete -force test2.db + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ak.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ak.test new file mode 100644 index 0000000..a263f0b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ak.test @@ -0,0 +1,105 @@ +# 2007 March 9 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. These +# make sure that fts3 insertion buffering is fully transparent when +# using transactions. +# +# $Id: fts3ak.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t1 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t1 (rowid, content) VALUES(3, "cruel world"); +} + +# Test that possibly-buffered inserts went through after commit. +do_test fts3ak-1.1 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(4, "false world"); + INSERT INTO t1 (rowid, content) VALUES(5, "false door"); + COMMIT TRANSACTION; + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + } +} {1 3 4} + +# Test that buffered inserts are seen by selects in the same +# transaction. +do_test fts3ak-1.2 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(6, "another world"); + INSERT INTO t1 (rowid, content) VALUES(7, "another test"); + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + COMMIT TRANSACTION; + } +} {1 3 4 6} + +# Test that buffered inserts are seen within a transaction. This is +# really the same test as 1.2. +do_test fts3ak-1.3 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(8, "second world"); + INSERT INTO t1 (rowid, content) VALUES(9, "second sight"); + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + ROLLBACK TRANSACTION; + } +} {1 3 4 6 8} + +# Double-check that the previous result doesn't persist past the +# rollback! +do_test fts3ak-1.4 { + execsql { + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + } +} {1 3 4 6} + +# Test it all together. +do_test fts3ak-1.5 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(10, "second world"); + INSERT INTO t1 (rowid, content) VALUES(11, "second sight"); + ROLLBACK TRANSACTION; + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + } +} {1 3 4 6} + +# Test that the obvious case works. +do_test fts3ak-1.6 { + execsql { + BEGIN; + INSERT INTO t1 (rowid, content) VALUES(12, "third world"); + COMMIT; + SELECT rowid FROM t1 WHERE t1 MATCH 'third'; + } +} {12} + +# This is exactly the same as the previous test, except that older +# code loses the INSERT due to an SQLITE_SCHEMA error. +do_test fts3ak-1.7 { + execsql { + BEGIN; + INSERT INTO t1 (rowid, content) VALUES(13, "third dimension"); + CREATE TABLE x (c); + COMMIT; + SELECT rowid FROM t1 WHERE t1 MATCH 'dimension'; + } +} {13} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3al.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3al.test new file mode 100644 index 0000000..2556d8c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3al.test @@ -0,0 +1,69 @@ +# 2007 March 28 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing isspace/isalnum/tolower problems with the +# FTS3 module. Unfortunately, this code isn't a really principled set +# of tests, because it's impossible to know where new uses of these +# functions might appear. +# +# $Id: fts3al.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Tests that startsWith() (calls isspace, tolower, isalnum) can handle +# hi-bit chars. parseSpec() also calls isalnum here. +do_test fts3al-1.1 { + execsql "CREATE VIRTUAL TABLE t1 USING fts3(content, \x80)" +} {} + +# Additionally tests isspace() call in getToken(), and isalnum() call +# in tokenListToIdList(). +do_test fts3al-1.2 { + catch { + execsql "CREATE VIRTUAL TABLE t2 USING fts3(content, tokenize \x80)" + } + sqlite3_errmsg $DB +} "unknown tokenizer: \x80" + +# Additionally test final isalnum() in startsWith(). +do_test fts3al-1.3 { + execsql "CREATE VIRTUAL TABLE t3 USING fts3(content, tokenize\x80)" +} {} + +# The snippet-generation code has calls to isspace() which are sort of +# hard to get to. It finds convenient breakpoints by starting ~40 +# chars before and after the matched term, and scanning ~10 chars +# around that position for isspace() characters. The long word with +# embedded hi-bit chars causes one of these isspace() calls to be +# exercised. The version with a couple extra spaces should cause the +# other isspace() call to be exercised. [Both cases have been tested +# in the debugger, but I'm hoping to continue to catch it if simple +# constant changes change things slightly. +# +# The trailing and leading hi-bit chars help with code which tests for +# isspace() to coalesce multiple spaces. + +set word "\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80" +set phrase1 "$word $word $word target $word $word $word" +set phrase2 "$word $word $word target $word $word $word" + +db eval {CREATE VIRTUAL TABLE t4 USING fts3(content)} +db eval "INSERT INTO t4 (content) VALUES ('$phrase1')" +db eval "INSERT INTO t4 (content) VALUES ('$phrase2')" + +do_test fts3al-1.4 { + execsql {SELECT rowid, length(snippet(t4)) FROM t4 WHERE t4 MATCH 'target'} +} {1 111 2 117} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3am.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3am.test new file mode 100644 index 0000000..4a09cd8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3am.test @@ -0,0 +1,65 @@ +# 2007 April 9 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. fts3 +# DELETE handling assumed all fields were non-null. This was not +# the intention at all. +# +# $Id: fts3am.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(col_a, col_b); + + INSERT INTO t1(rowid, col_a, col_b) VALUES(1, 'testing', 'testing'); + INSERT INTO t1(rowid, col_a, col_b) VALUES(2, 'only a', null); + INSERT INTO t1(rowid, col_a, col_b) VALUES(3, null, 'only b'); + INSERT INTO t1(rowid, col_a, col_b) VALUES(4, null, null); +} + +do_test fts3am-1.0 { + execsql { + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {2 2 4} + +do_test fts3am-1.1 { + execsql { + DELETE FROM t1 WHERE rowid = 1; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {1 1 3} + +do_test fts3am-1.2 { + execsql { + DELETE FROM t1 WHERE rowid = 2; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 1 2} + +do_test fts3am-1.3 { + execsql { + DELETE FROM t1 WHERE rowid = 3; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 0 1} + +do_test fts3am-1.4 { + execsql { + DELETE FROM t1 WHERE rowid = 4; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 0 0} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3an.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3an.test new file mode 100644 index 0000000..63a5353 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3an.test @@ -0,0 +1,196 @@ +# 2007 April 26 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements tests for prefix-searching in the fts3 +# component of the SQLite library. +# +# $Id: fts3an.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# A large string to prime the pump with. +set text { + Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Maecenas + iaculis mollis ipsum. Praesent rhoncus placerat justo. Duis non quam + sed turpis posuere placerat. Curabitur et lorem in lorem porttitor + aliquet. Pellentesque bibendum tincidunt diam. Vestibulum blandit + ante nec elit. In sapien diam, facilisis eget, dictum sed, viverra + at, felis. Vestibulum magna. Sed magna dolor, vestibulum rhoncus, + ornare vel, vulputate sit amet, felis. Integer malesuada, tellus at + luctus gravida, diam nunc porta nibh, nec imperdiet massa metus eu + lectus. Aliquam nisi. Nunc fringilla nulla at lectus. Suspendisse + potenti. Cum sociis natoque penatibus et magnis dis parturient + montes, nascetur ridiculus mus. Pellentesque odio nulla, feugiat eu, + suscipit nec, consequat quis, risus. +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(c); + + INSERT INTO t1(rowid, c) VALUES(1, $text); + INSERT INTO t1(rowid, c) VALUES(2, 'Another lovely row'); +} + +# Exact match +do_test fts3an-1.1 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lorem'" +} {1} + +# And a prefix +do_test fts3an-1.2 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lore*'" +} {1} + +# Prefix includes exact match +do_test fts3an-1.3 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lorem*'" +} {1} + +# Make certain everything isn't considered a prefix! +do_test fts3an-1.4 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lore'" +} {} + +# Prefix across multiple rows. +do_test fts3an-1.5 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lo*'" +} {1 2} + +# Likewise, with multiple hits in one document. +do_test fts3an-1.6 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'l*'" +} {1 2} + +# Prefix which should only hit one document. +do_test fts3an-1.7 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lov*'" +} {2} + +# * not at end is dropped. +do_test fts3an-1.8 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lo *'" +} {} + +# Stand-alone * is dropped. +do_test fts3an-1.9 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '*'" +} {} + +# Phrase-query prefix. +do_test fts3an-1.10 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"lovely r*\"'" +} {2} +do_test fts3an-1.11 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"lovely r\"'" +} {} + +# Phrase query with multiple prefix matches. +do_test fts3an-1.12 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"a* l*\"'" +} {1 2} + +# Phrase query with multiple prefix matches. +do_test fts3an-1.13 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"a* l* row\"'" +} {2} + + + + +# Test across updates (and, by implication, deletes). + +# Version of text without "lorem". +regsub -all {[Ll]orem} $text '' ntext + +db eval { + CREATE VIRTUAL TABLE t2 USING fts3(c); + + INSERT INTO t2(rowid, c) VALUES(1, $text); + INSERT INTO t2(rowid, c) VALUES(2, 'Another lovely row'); + UPDATE t2 SET c = $ntext WHERE rowid = 1; +} + +# Can't see lorem as an exact match. +do_test fts3an-2.1 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lorem'" +} {} + +# Can't see a prefix of lorem, either. +do_test fts3an-2.2 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lore*'" +} {} + +# Can see lovely in the other document. +do_test fts3an-2.3 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lo*'" +} {2} + +# Can still see other hits. +do_test fts3an-2.4 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'l*'" +} {1 2} + +# Prefix which should only hit one document. +do_test fts3an-2.5 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lov*'" +} {2} + + + +# Test with a segment which will have multiple levels in the tree. + +# Build a big document with lots of unique terms. +set bigtext $text +foreach c {a b c d e} { + regsub -all {[A-Za-z]+} $bigtext "&$c" t + append bigtext $t +} + +# Populate a table with many copies of the big document, so that we +# can test the number of hits found. Populate $ret with the expected +# hit counts for each row. offsets() returns 4 elements for every +# hit. We'll have 6 hits for row 1, 1 for row 2, and 6*(2^5)==192 for +# $bigtext. +set ret {6 1} +db eval { + BEGIN; + CREATE VIRTUAL TABLE t3 USING fts3(c); + + INSERT INTO t3(rowid, c) VALUES(1, $text); + INSERT INTO t3(rowid, c) VALUES(2, 'Another lovely row'); +} +for {set i 0} {$i<100} {incr i} { + db eval {INSERT INTO t3(rowid, c) VALUES(3+$i, $bigtext)} + lappend ret 192 +} +db eval {COMMIT;} + +# Test that we get the expected number of hits. +do_test fts3an-3.1 { + set t {} + db eval {SELECT offsets(t3) as o FROM t3 WHERE t3 MATCH 'l*'} { + set l [llength $o] + lappend t [expr {$l/4}] + } + set t +} $ret + +# TODO(shess) It would be useful to test a couple edge cases, but I +# don't know if we have the precision to manage it from here at this +# time. Prefix hits can cross leaves, which the code above _should_ +# hit by virtue of size. There are two variations on this. If the +# tree is 2 levels high, the code will find the leaf-node extent +# directly, but if it's higher, the code will have to follow two +# separate interior branches down the tree. Both should be tested. + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ao.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ao.test new file mode 100644 index 0000000..c3d356e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3ao.test @@ -0,0 +1,169 @@ +# 2007 June 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module. +# +# $Id: fts3ao.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is not defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +#--------------------------------------------------------------------- +# These tests, fts3ao-1.*, test that ticket #2429 is fixed. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(a, b, c); + INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one four two'); +} +do_test fts3ao-1.1 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE c MATCH 'four'; + } +} {1 {one four two}} +do_test fts3ao-1.2 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE b MATCH 'four'; + } +} {1 {one four}} +do_test fts3ao-1.3 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE a MATCH 'four'; + } +} {1 {one three four}} + +#--------------------------------------------------------------------- +# Test that it is possible to rename an fts3 table. +# +do_test fts3ao-2.1 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {t1 t1_content t1_segments t1_segdir} +do_test fts3ao-2.2 { + execsql { ALTER TABLE t1 RENAME to fts_t1; } +} {} +do_test fts3ao-2.3 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts3ao-2.4 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir} + +# See what happens when renaming the fts3 table fails. +# +do_test fts3ao-2.5 { + catchsql { + CREATE TABLE t1_segdir(a, b, c); + ALTER TABLE fts_t1 RENAME to t1; + } +} {1 {SQL logic error or missing database}} +do_test fts3ao-2.6 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts3ao-2.7 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir t1_segdir} + +# See what happens when renaming the fts3 table fails inside a transaction. +# +do_test fts3ao-2.8 { + execsql { + BEGIN; + INSERT INTO fts_t1(a, b, c) VALUES('one two three', 'one four', 'one two'); + } +} {} +do_test fts3ao-2.9 { + catchsql { + ALTER TABLE fts_t1 RENAME to t1; + } +} {1 {SQL logic error or missing database}} +do_test fts3ao-2.10 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts3ao-2.11 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir t1_segdir} +do_test fts3ao-2.12 { + execsql COMMIT + execsql {SELECT a FROM fts_t1} +} {{one three four} {one two three}} +do_test fts3ao-2.12 { + execsql { SELECT a, b, c FROM fts_t1 WHERE c MATCH 'four'; } +} {{one three four} {one four} {one four two}} + +#------------------------------------------------------------------- +# Close, delete and reopen the database. The following test should +# be run on an initially empty db. +# +db close +file delete -force test.db test.db-journal +sqlite3 db test.db + +do_test fts3ao-3.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts3(a, b, c); + INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one two'); + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + } +} {{one three four} {one four} {one two}} + +# This test was crashing at one point. +# +do_test fts3ao-3.2 { + execsql { + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + CREATE TABLE t3(a, b, c); + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + } +} {{one three four} {one four} {one two} {one three four} {one four} {one two}} + +#--------------------------------------------------------------------- +# Test that it is possible to rename an fts3 table in an attached +# database. +# +file delete -force test2.db test2.db-journal + +do_test fts3ao-3.1 { + execsql { + ATTACH 'test2.db' AS aux; + CREATE VIRTUAL TABLE aux.t1 USING fts3(a, b, c); + INSERT INTO aux.t1(a, b, c) VALUES( + 'neung song sahm', 'neung see', 'neung see song' + ); + } +} {} + +do_test fts3ao-3.2 { + execsql { SELECT a, b, c FROM aux.t1 WHERE a MATCH 'song'; } +} {{neung song sahm} {neung see} {neung see song}} + +do_test fts3ao-3.3 { + execsql { SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } +} {{one three four} {one four} {one two}} + +do_test fts3ao-3.4 { + execsql { ALTER TABLE aux.t1 RENAME TO t2 } +} {} + +do_test fts3ao-3.2 { + execsql { SELECT a, b, c FROM t2 WHERE a MATCH 'song'; } +} {{neung song sahm} {neung see} {neung see song}} + +do_test fts3ao-3.3 { + execsql { SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } +} {{one three four} {one four} {one two}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3atoken.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3atoken.test new file mode 100644 index 0000000..cf9574e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3atoken.test @@ -0,0 +1,174 @@ +# 2007 June 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing the pluggable tokeniser feature of the +# FTS3 module. +# +# $Id: fts3atoken.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +proc escape_string {str} { + set out "" + foreach char [split $str ""] { + scan $char %c i + if {$i<=127} { + append out $char + } else { + append out [format {\x%.4x} $i] + } + } + set out +} + +#-------------------------------------------------------------------------- +# Test cases fts3token-1.* are the warm-body test for the SQL scalar +# function fts3_tokenizer(). The procedure is as follows: +# +# 1: Verify that there is no such fts3 tokenizer as 'blah'. +# +# 2: Query for the built-in tokenizer 'simple'. Insert a copy of the +# retrieved value as tokenizer 'blah'. +# +# 3: Test that the value returned for tokenizer 'blah' is now the +# same as that retrieved for 'simple'. +# +# 4: Test that it is now possible to create an fts3 table using +# tokenizer 'blah' (it was not possible in step 1). +# +# 5: Test that the table created to use tokenizer 'blah' is usable. +# +do_test fts3token-1.1 { + catchsql { + CREATE VIRTUAL TABLE t1 USING fts3(content, tokenize blah); + } +} {1 {unknown tokenizer: blah}} +do_test fts3token-1.2 { + execsql { + SELECT fts3_tokenizer('blah', fts3_tokenizer('simple')) IS NULL; + } +} {0} +do_test fts3token-1.3 { + execsql { + SELECT fts3_tokenizer('blah') == fts3_tokenizer('simple'); + } +} {1} +do_test fts3token-1.4 { + catchsql { + CREATE VIRTUAL TABLE t1 USING fts3(content, tokenize blah); + } +} {0 {}} +do_test fts3token-1.5 { + execsql { + INSERT INTO t1(content) VALUES('There was movement at the station'); + INSERT INTO t1(content) VALUES('For the word has passed around'); + INSERT INTO t1(content) VALUES('That the colt from ol regret had got away'); + SELECT content FROM t1 WHERE content MATCH 'movement' + } +} {{There was movement at the station}} + +#-------------------------------------------------------------------------- +# Test cases fts3token-2.* test error cases in the scalar function based +# API for getting and setting tokenizers. +# +do_test fts3token-2.1 { + catchsql { + SELECT fts3_tokenizer('nosuchtokenizer'); + } +} {1 {unknown tokenizer: nosuchtokenizer}} + +#-------------------------------------------------------------------------- +# Test cases fts3token-3.* test the three built-in tokenizers with a +# simple input string via the built-in test function. This is as much +# to test the test function as the tokenizer implementations. +# +do_test fts3token-3.1 { + execsql { + SELECT fts3_tokenizer_test('simple', 'I don''t see how'); + } +} {{0 i I 1 don don 2 t t 3 see see 4 how how}} +do_test fts3token-3.2 { + execsql { + SELECT fts3_tokenizer_test('porter', 'I don''t see how'); + } +} {{0 i I 1 don don 2 t t 3 see see 4 how how}} +ifcapable icu { + do_test fts3token-3.3 { + execsql { + SELECT fts3_tokenizer_test('icu', 'I don''t see how'); + } + } {{0 i I 1 don't don't 2 see see 3 how how}} +} + +#-------------------------------------------------------------------------- +# Test cases fts3token-4.* test the ICU tokenizer. In practice, this +# tokenizer only has two modes - "thai" and "everybody else". Some other +# Asian languages (Lao, Khmer etc.) require the same special treatment as +# Thai, but ICU doesn't support them yet. +# +ifcapable icu { + + proc do_icu_test {name locale input output} { + set ::out [db eval { SELECT fts3_tokenizer_test('icu', $locale, $input) }] + do_test $name { + lindex $::out 0 + } $output + } + + do_icu_test fts3token-4.1 en_US {} {} + do_icu_test fts3token-4.2 en_US {Test cases fts3} [list \ + 0 test Test 1 cases cases 2 fts3 fts3 + ] + + # The following test shows that ICU is smart enough to recognise + # Thai chararacters, even when the locale is set to English/United + # States. + # + set input "\u0e2d\u0e30\u0e44\u0e23\u0e19\u0e30\u0e04\u0e23\u0e31\u0e1a" + set output "0 \u0e2d\u0e30\u0e44\u0e23 \u0e2d\u0e30\u0e44\u0e23 " + append output "1 \u0e19\u0e30 \u0e19\u0e30 " + append output "2 \u0e04\u0e23\u0e31\u0e1a \u0e04\u0e23\u0e31\u0e1a" + + do_icu_test fts3token-4.3 th_TH $input $output + do_icu_test fts3token-4.4 en_US $input $output + + # ICU handles an unknown locale by falling back to the default. + # So this is not an error. + do_icu_test fts3token-4.5 MiddleOfTheOcean $input $output + + set longtoken "AReallyReallyLongTokenOneThatWillSurelyRequire" + append longtoken "AReallocInTheIcuTokenizerCode" + + set input "short tokens then " + append input $longtoken + set output "0 short short " + append output "1 tokens tokens " + append output "2 then then " + append output "3 [string tolower $longtoken] $longtoken" + + do_icu_test fts3token-4.6 MiddleOfTheOcean $input $output + do_icu_test fts3token-4.7 th_TH $input $output + do_icu_test fts3token-4.8 en_US $input $output +} + +do_test fts3token-internal { + execsql { SELECT fts3_tokenizer_internal_test() } +} {ok} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fts3b.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3b.test new file mode 100644 index 0000000..17ee0da --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fts3b.test @@ -0,0 +1,218 @@ +# 2007 August 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. This +# script tests for the fts2 rowid-versus-vacuum problem (ticket #2566). +# +# $Id: fts3b.test,v 1.3 2007/09/13 18:14:49 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is not defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (c) VALUES('this is a test'); + INSERT INTO t1 (c) VALUES('that was a test'); + INSERT INTO t1 (c) VALUES('this is fun'); + DELETE FROM t1 WHERE c = 'that was a test'; +} + +# Baseline test. +do_test fts3b-1.1 { + execsql { + SELECT rowid FROM t1 WHERE c MATCH 'this'; + } +} {1 3} + +db eval {VACUUM} + +# The VACUUM renumbered the t1_content table in fts2, which breaks +# this. +do_test fts3b-1.2 { + execsql { + SELECT rowid FROM t1 WHERE c MATCH 'this'; + } +} {1 3} + +# The t2 table is unfortunately pretty contrived. We need documents +# that are bigger than ROOT_MAX (1024) to force segments out of the +# segdir and into %_segments. We also need to force segment merging +# to generate a hole in the %_segments table, which needs more than 16 +# docs. Beyond that, to test correct operation of BLOCK_SELECT_STMT, +# we need to merge a mult-level tree, which is where the 10,000 comes +# from. Which is slow, thus the set of transactions, with the 500 +# being a number such that 10,000/500 > 16. +set text { + Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Maecenas + iaculis mollis ipsum. Praesent rhoncus placerat justo. Duis non quam + sed turpis posuere placerat. Curabitur et lorem in lorem porttitor + aliquet. Pellentesque bibendum tincidunt diam. Vestibulum blandit + ante nec elit. In sapien diam, facilisis eget, dictum sed, viverra + at, felis. Vestibulum magna. Sed magna dolor, vestibulum rhoncus, + ornare vel, vulputate sit amet, felis. Integer malesuada, tellus at + luctus gravida, diam nunc porta nibh, nec imperdiet massa metus eu + lectus. Aliquam nisi. Nunc fringilla nulla at lectus. Suspendisse + potenti. Cum sociis natoque penatibus et magnis dis parturient + montes, nascetur ridiculus mus. Pellentesque odio nulla, feugiat eu, + suscipit nec, consequat quis, risus. +} +append text $text + +db eval {CREATE VIRTUAL TABLE t2 USING fts3(c)} +set res {} +db eval {BEGIN} +for {set ii 0} {$ii<10000} {incr ii} { + db eval {INSERT INTO t2 (c) VALUES ($text)} + lappend res [expr {$ii+1}] + if {($ii%500)==0} { + db eval { + COMMIT; + BEGIN; + } + } +} +db eval {COMMIT} + +do_test fts3b-2.1 { + execsql { + SELECT rowid FROM t2 WHERE c MATCH 'lorem'; + } +} $res + +db eval {VACUUM} + +# The VACUUM renumbered the t2_segment table in fts2, which would +# break the following. +do_test fts3b-2.2 { + execsql { + SELECT rowid FROM t2 WHERE c MATCH 'lorem'; + } +} $res + +# Since fts3 is already an API break, I've marked the table-named +# column HIDDEN. + +db eval { + CREATE VIRTUAL TABLE t3 USING fts3(c); + INSERT INTO t3 (c) VALUES('this is a test'); + INSERT INTO t3 (c) VALUES('that was a test'); + INSERT INTO t3 (c) VALUES('this is fun'); + DELETE FROM t3 WHERE c = 'that was a test'; +} + +# Test that the table-named column still works. +do_test fts3b-3.1 { + execsql { + SELECT snippet(t3) FROM t3 WHERE t3 MATCH 'test'; + } +} {{this is a test}} + +# Test that the column doesn't appear when selecting all columns. +do_test fts3b-3.2 { + execsql { + SELECT * FROM t3 WHERE rowid = 1; + } +} {{this is a test}} + +# Test that the column doesn't conflict with inserts that don't name +# columns. +do_test fts3b-3.3 { + execsql { + INSERT INTO t3 VALUES ('another test'); + } +} {} + +# fts3 adds a new implicit column, docid, which acts as an alias for +# rowid. + +db eval { + CREATE VIRTUAL TABLE t4 USING fts3(c); + INSERT INTO t4 (c) VALUES('this is a test'); + INSERT INTO t4 (c) VALUES('that was a test'); + INSERT INTO t4 (c) VALUES('this is fun'); + DELETE FROM t4 WHERE c = 'that was a test'; +} + +# Test that docid is present and identical to rowid. +do_test fts3b-4.1 { + execsql { + SELECT rowid FROM t4 WHERE rowid <> docid; + } +} {} + +# Test that docid is hidden. +do_test fts3b-4.2 { + execsql { + SELECT * FROM t4 WHERE rowid = 1; + } +} {{this is a test}} + +# Test that docid can be selected. +do_test fts3b-4.3 { + execsql { + SELECT docid, * FROM t4 WHERE rowid = 1; + } +} {1 {this is a test}} + +# Test that docid can be used in WHERE. +do_test fts3b-4.4 { + execsql { + SELECT docid, * FROM t4 WHERE docid = 1; + } +} {1 {this is a test}} + +# Test that the column doesn't conflict with inserts that don't name +# columns. [Yes, this is the same as fts3b-3.3, here just in case the +# goals of that test change.] +do_test fts3b-4.5 { + execsql { + INSERT INTO t4 VALUES ('another test'); + } +} {} + +# Test that the docid can be forced on insert. +do_test fts3b-4.6 { + execsql { + INSERT INTO t4 (docid, c) VALUES (10, 'yet another test'); + SELECT * FROM t4 WHERE docid = 10; + } +} {{yet another test}} + +# Test that rowid can also be forced. +do_test fts3b-4.7 { + execsql { + INSERT INTO t4 (docid, c) VALUES (12, 'still testing'); + SELECT * FROM t4 WHERE docid = 12; + } +} {{still testing}} + +# If an insert tries to set both docid and rowid, require an error. +do_test fts3b-4.8 { + catchsql { + INSERT INTO t4 (rowid, docid, c) VALUES (14, 15, 'bad test'); + SELECT * FROM t4 WHERE docid = 14; + } +} {1 {SQL logic error or missing database}} + +# Don't allow update of docid, to match rowid behaviour. +do_test fts3b-4.9 { + catchsql { + UPDATE t4 SET docid = 14 WHERE docid = 12; + } +} {1 {SQL logic error or missing database}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/func.test b/libraries/sqlite/unix/sqlite-3.5.1/test/func.test new file mode 100644 index 0000000..3149c54 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/func.test @@ -0,0 +1,886 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing built-in functions. +# +# $Id: func.test,v 1.69 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table to work with. +# +do_test func-0.0 { + execsql {CREATE TABLE tbl1(t1 text)} + foreach word {this program is free software} { + execsql "INSERT INTO tbl1 VALUES('$word')" + } + execsql {SELECT t1 FROM tbl1 ORDER BY t1} +} {free is program software this} +do_test func-0.1 { + execsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES(1); + INSERT INTO t2 VALUES(NULL); + INSERT INTO t2 VALUES(345); + INSERT INTO t2 VALUES(NULL); + INSERT INTO t2 VALUES(67890); + SELECT * FROM t2; + } +} {1 {} 345 {} 67890} + +# Check out the length() function +# +do_test func-1.0 { + execsql {SELECT length(t1) FROM tbl1 ORDER BY t1} +} {4 2 7 8 4} +do_test func-1.1 { + set r [catch {execsql {SELECT length(*) FROM tbl1 ORDER BY t1}} msg] + lappend r $msg +} {1 {wrong number of arguments to function length()}} +do_test func-1.2 { + set r [catch {execsql {SELECT length(t1,5) FROM tbl1 ORDER BY t1}} msg] + lappend r $msg +} {1 {wrong number of arguments to function length()}} +do_test func-1.3 { + execsql {SELECT length(t1), count(*) FROM tbl1 GROUP BY length(t1) + ORDER BY length(t1)} +} {2 1 4 2 7 1 8 1} +do_test func-1.4 { + execsql {SELECT coalesce(length(a),-1) FROM t2} +} {1 -1 3 -1 5} + +# Check out the substr() function +# +do_test func-2.0 { + execsql {SELECT substr(t1,1,2) FROM tbl1 ORDER BY t1} +} {fr is pr so th} +do_test func-2.1 { + execsql {SELECT substr(t1,2,1) FROM tbl1 ORDER BY t1} +} {r s r o h} +do_test func-2.2 { + execsql {SELECT substr(t1,3,3) FROM tbl1 ORDER BY t1} +} {ee {} ogr ftw is} +do_test func-2.3 { + execsql {SELECT substr(t1,-1,1) FROM tbl1 ORDER BY t1} +} {e s m e s} +do_test func-2.4 { + execsql {SELECT substr(t1,-1,2) FROM tbl1 ORDER BY t1} +} {e s m e s} +do_test func-2.5 { + execsql {SELECT substr(t1,-2,1) FROM tbl1 ORDER BY t1} +} {e i a r i} +do_test func-2.6 { + execsql {SELECT substr(t1,-2,2) FROM tbl1 ORDER BY t1} +} {ee is am re is} +do_test func-2.7 { + execsql {SELECT substr(t1,-4,2) FROM tbl1 ORDER BY t1} +} {fr {} gr wa th} +do_test func-2.8 { + execsql {SELECT t1 FROM tbl1 ORDER BY substr(t1,2,20)} +} {this software free program is} +do_test func-2.9 { + execsql {SELECT substr(a,1,1) FROM t2} +} {1 {} 3 {} 6} +do_test func-2.10 { + execsql {SELECT substr(a,2,2) FROM t2} +} {{} {} 45 {} 78} + +# Only do the following tests if TCL has UTF-8 capabilities +# +if {"\u1234"!="u1234"} { + +# Put some UTF-8 characters in the database +# +do_test func-3.0 { + execsql {DELETE FROM tbl1} + foreach word "contains UTF-8 characters hi\u1234ho" { + execsql "INSERT INTO tbl1 VALUES('$word')" + } + execsql {SELECT t1 FROM tbl1 ORDER BY t1} +} "UTF-8 characters contains hi\u1234ho" +do_test func-3.1 { + execsql {SELECT length(t1) FROM tbl1 ORDER BY t1} +} {5 10 8 5} +do_test func-3.2 { + execsql {SELECT substr(t1,1,2) FROM tbl1 ORDER BY t1} +} {UT ch co hi} +do_test func-3.3 { + execsql {SELECT substr(t1,1,3) FROM tbl1 ORDER BY t1} +} "UTF cha con hi\u1234" +do_test func-3.4 { + execsql {SELECT substr(t1,2,2) FROM tbl1 ORDER BY t1} +} "TF ha on i\u1234" +do_test func-3.5 { + execsql {SELECT substr(t1,2,3) FROM tbl1 ORDER BY t1} +} "TF- har ont i\u1234h" +do_test func-3.6 { + execsql {SELECT substr(t1,3,2) FROM tbl1 ORDER BY t1} +} "F- ar nt \u1234h" +do_test func-3.7 { + execsql {SELECT substr(t1,4,2) FROM tbl1 ORDER BY t1} +} "-8 ra ta ho" +do_test func-3.8 { + execsql {SELECT substr(t1,-1,1) FROM tbl1 ORDER BY t1} +} "8 s s o" +do_test func-3.9 { + execsql {SELECT substr(t1,-3,2) FROM tbl1 ORDER BY t1} +} "F- er in \u1234h" +do_test func-3.10 { + execsql {SELECT substr(t1,-4,3) FROM tbl1 ORDER BY t1} +} "TF- ter ain i\u1234h" +do_test func-3.99 { + execsql {DELETE FROM tbl1} + foreach word {this program is free software} { + execsql "INSERT INTO tbl1 VALUES('$word')" + } + execsql {SELECT t1 FROM tbl1} +} {this program is free software} + +} ;# End \u1234!=u1234 + +# Test the abs() and round() functions. +# +do_test func-4.1 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(2,1.2345678901234,-12345.67890); + INSERT INTO t1 VALUES(3,-2,-5); + } + catchsql {SELECT abs(a,b) FROM t1} +} {1 {wrong number of arguments to function abs()}} +do_test func-4.2 { + catchsql {SELECT abs() FROM t1} +} {1 {wrong number of arguments to function abs()}} +do_test func-4.3 { + catchsql {SELECT abs(b) FROM t1 ORDER BY a} +} {0 {2 1.2345678901234 2}} +do_test func-4.4 { + catchsql {SELECT abs(c) FROM t1 ORDER BY a} +} {0 {3 12345.6789 5}} +do_test func-4.4.1 { + execsql {SELECT abs(a) FROM t2} +} {1 {} 345 {} 67890} +do_test func-4.4.2 { + execsql {SELECT abs(t1) FROM tbl1} +} {0.0 0.0 0.0 0.0 0.0} + +do_test func-4.5 { + catchsql {SELECT round(a,b,c) FROM t1} +} {1 {wrong number of arguments to function round()}} +do_test func-4.6 { + catchsql {SELECT round(b,2) FROM t1 ORDER BY b} +} {0 {-2.0 1.23 2.0}} +do_test func-4.7 { + catchsql {SELECT round(b,0) FROM t1 ORDER BY a} +} {0 {2.0 1.0 -2.0}} +do_test func-4.8 { + catchsql {SELECT round(c) FROM t1 ORDER BY a} +} {0 {3.0 -12346.0 -5.0}} +do_test func-4.9 { + catchsql {SELECT round(c,a) FROM t1 ORDER BY a} +} {0 {3.0 -12345.68 -5.0}} +do_test func-4.10 { + catchsql {SELECT 'x' || round(c,a) || 'y' FROM t1 ORDER BY a} +} {0 {x3.0y x-12345.68y x-5.0y}} +do_test func-4.11 { + catchsql {SELECT round() FROM t1 ORDER BY a} +} {1 {wrong number of arguments to function round()}} +do_test func-4.12 { + execsql {SELECT coalesce(round(a,2),'nil') FROM t2} +} {1.0 nil 345.0 nil 67890.0} +do_test func-4.13 { + execsql {SELECT round(t1,2) FROM tbl1} +} {0.0 0.0 0.0 0.0 0.0} +do_test func-4.14 { + execsql {SELECT typeof(round(5.1,1));} +} {real} +do_test func-4.15 { + execsql {SELECT typeof(round(5.1));} +} {real} + + +# Test the upper() and lower() functions +# +do_test func-5.1 { + execsql {SELECT upper(t1) FROM tbl1} +} {THIS PROGRAM IS FREE SOFTWARE} +do_test func-5.2 { + execsql {SELECT lower(upper(t1)) FROM tbl1} +} {this program is free software} +do_test func-5.3 { + execsql {SELECT upper(a), lower(a) FROM t2} +} {1 1 {} {} 345 345 {} {} 67890 67890} +ifcapable !icu { + do_test func-5.4 { + catchsql {SELECT upper(a,5) FROM t2} + } {1 {wrong number of arguments to function upper()}} +} +do_test func-5.5 { + catchsql {SELECT upper(*) FROM t2} +} {1 {wrong number of arguments to function upper()}} + +# Test the coalesce() and nullif() functions +# +do_test func-6.1 { + execsql {SELECT coalesce(a,'xyz') FROM t2} +} {1 xyz 345 xyz 67890} +do_test func-6.2 { + execsql {SELECT coalesce(upper(a),'nil') FROM t2} +} {1 nil 345 nil 67890} +do_test func-6.3 { + execsql {SELECT coalesce(nullif(1,1),'nil')} +} {nil} +do_test func-6.4 { + execsql {SELECT coalesce(nullif(1,2),'nil')} +} {1} +do_test func-6.5 { + execsql {SELECT coalesce(nullif(1,NULL),'nil')} +} {1} + + +# Test the last_insert_rowid() function +# +do_test func-7.1 { + execsql {SELECT last_insert_rowid()} +} [db last_insert_rowid] + +# Tests for aggregate functions and how they handle NULLs. +# +do_test func-8.1 { + ifcapable explain { + execsql {EXPLAIN SELECT sum(a) FROM t2;} + } + execsql { + SELECT sum(a), count(a), round(avg(a),2), min(a), max(a), count(*) FROM t2; + } +} {68236 3 22745.33 1 67890 5} +do_test func-8.2 { + execsql { + SELECT max('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t2; + } +} {z+67890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP} + +ifcapable tempdb { + do_test func-8.3 { + execsql { + CREATE TEMP TABLE t3 AS SELECT a FROM t2 ORDER BY a DESC; + SELECT min('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t3; + } + } {z+1abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP} +} else { + do_test func-8.3 { + execsql { + CREATE TABLE t3 AS SELECT a FROM t2 ORDER BY a DESC; + SELECT min('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t3; + } + } {z+1abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP} +} +do_test func-8.4 { + execsql { + SELECT max('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t3; + } +} {z+67890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP} + +# How do you test the random() function in a meaningful, deterministic way? +# +do_test func-9.1 { + execsql { + SELECT random() is not null; + } +} {1} +do_test func-9.2 { + execsql { + SELECT typeof(random()); + } +} {integer} +do_test func-9.3 { + execsql { + SELECT randomblob(32) is not null; + } +} {1} +do_test func-9.4 { + execsql { + SELECT typeof(randomblob(32)); + } +} {blob} +do_test func-9.5 { + execsql { + SELECT length(randomblob(32)), length(randomblob(-5)), + length(randomblob(2000)) + } +} {32 1 2000} + +# The "hex()" function was added in order to be able to render blobs +# generated by randomblob(). So this seems like a good place to test +# hex(). +# +ifcapable bloblit { + do_test func-9.10 { + execsql {SELECT hex(x'00112233445566778899aAbBcCdDeEfF')} + } {00112233445566778899AABBCCDDEEFF} +} +set encoding [db one {PRAGMA encoding}] +if {$encoding=="UTF-16le"} { + do_test func-9.11-utf16le { + execsql {SELECT hex(replace('abcdefg','ef','12'))} + } {6100620063006400310032006700} + do_test func-9.12-utf16le { + execsql {SELECT hex(replace('abcdefg','','12'))} + } {{}} + breakpoint + do_test func-9.13-utf16le { + execsql {SELECT hex(replace('aabcdefg','a','aaa'))} + } {610061006100610061006100620063006400650066006700} +} elseif {$encoding=="UTF-8"} { + do_test func-9.11-utf8 { + execsql {SELECT hex(replace('abcdefg','ef','12'))} + } {61626364313267} + do_test func-9.12-utf8 { + execsql {SELECT hex(replace('abcdefg','','12'))} + } {{}} + breakpoint + do_test func-9.13-utf8 { + execsql {SELECT hex(replace('aabcdefg','a','aaa'))} + } {616161616161626364656667} +} + +# Use the "sqlite_register_test_function" TCL command which is part of +# the text fixture in order to verify correct operation of some of +# the user-defined SQL function APIs that are not used by the built-in +# functions. +# +set ::DB [sqlite3_connection_pointer db] +sqlite_register_test_function $::DB testfunc +do_test func-10.1 { + catchsql { + SELECT testfunc(NULL,NULL); + } +} {1 {first argument should be one of: int int64 string double null value}} +do_test func-10.2 { + execsql { + SELECT testfunc( + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'int', 1234 + ); + } +} {1234} +do_test func-10.3 { + execsql { + SELECT testfunc( + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'string', NULL + ); + } +} {{}} +do_test func-10.4 { + execsql { + SELECT testfunc( + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'double', 1.234 + ); + } +} {1.234} +do_test func-10.5 { + execsql { + SELECT testfunc( + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'int', 1234, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'string', NULL, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'double', 1.234, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'int', 1234, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'string', NULL, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'double', 1.234 + ); + } +} {1.234} + +# Test the built-in sqlite_version(*) SQL function. +# +do_test func-11.1 { + execsql { + SELECT sqlite_version(*); + } +} [sqlite3 -version] + +# Test that destructors passed to sqlite3 by calls to sqlite3_result_text() +# etc. are called. These tests use two special user-defined functions +# (implemented in func.c) only available in test builds. +# +# Function test_destructor() takes one argument and returns a copy of the +# text form of that argument. A destructor is associated with the return +# value. Function test_destructor_count() returns the number of outstanding +# destructor calls for values returned by test_destructor(). +# +do_test func-12.1 { + execsql { + SELECT test_destructor('hello world'), test_destructor_count(); + } +} {{hello world} 1} +do_test func-12.2 { + execsql { + SELECT test_destructor_count(); + } +} {0} +do_test func-12.3 { + execsql { + SELECT test_destructor('hello')||' world', test_destructor_count(); + } +} {{hello world} 0} +do_test func-12.4 { + execsql { + SELECT test_destructor_count(); + } +} {0} +do_test func-12.5 { + execsql { + CREATE TABLE t4(x); + INSERT INTO t4 VALUES(test_destructor('hello')); + INSERT INTO t4 VALUES(test_destructor('world')); + SELECT min(test_destructor(x)), max(test_destructor(x)) FROM t4; + } +} {hello world} +do_test func-12.6 { + execsql { + SELECT test_destructor_count(); + } +} {0} +do_test func-12.7 { + execsql { + DROP TABLE t4; + } +} {} + +# Test that the auxdata API for scalar functions works. This test uses +# a special user-defined function only available in test builds, +# test_auxdata(). Function test_auxdata() takes any number of arguments. +do_test func-13.1 { + execsql { + SELECT test_auxdata('hello world'); + } +} {0} + +do_test func-13.2 { + execsql { + CREATE TABLE t4(a, b); + INSERT INTO t4 VALUES('abc', 'def'); + INSERT INTO t4 VALUES('ghi', 'jkl'); + } +} {} +do_test func-13.3 { + execsql { + SELECT test_auxdata('hello world') FROM t4; + } +} {0 1} +do_test func-13.4 { + execsql { + SELECT test_auxdata('hello world', 123) FROM t4; + } +} {{0 0} {1 1}} +do_test func-13.5 { + execsql { + SELECT test_auxdata('hello world', a) FROM t4; + } +} {{0 0} {1 0}} +do_test func-13.6 { + execsql { + SELECT test_auxdata('hello'||'world', a) FROM t4; + } +} {{0 0} {1 0}} + +# Test that auxilary data is preserved between calls for SQL variables. +do_test func-13.7 { + set DB [sqlite3_connection_pointer db] + set sql "SELECT test_auxdata( ? , a ) FROM t4;" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_bind_text $STMT 1 hello -1 + set res [list] + while { "SQLITE_ROW"==[sqlite3_step $STMT] } { + lappend res [sqlite3_column_text $STMT 0] + } + lappend res [sqlite3_finalize $STMT] +} {{0 0} {1 0} SQLITE_OK} + +# Make sure that a function with a very long name is rejected +do_test func-14.1 { + catch { + db function [string repeat X 254] {return "hello"} + } +} {0} +do_test func-14.2 { + catch { + db function [string repeat X 256] {return "hello"} + } +} {1} + +do_test func-15.1 { + catchsql { + select test_error(NULL); + } +} {1 {}} + +# Test the quote function for BLOB and NULL values. +do_test func-16.1 { + execsql { + CREATE TABLE tbl2(a, b); + } + set STMT [sqlite3_prepare $::DB "INSERT INTO tbl2 VALUES(?, ?)" -1 TAIL] + sqlite3_bind_blob $::STMT 1 abc 3 + sqlite3_step $::STMT + sqlite3_finalize $::STMT + execsql { + SELECT quote(a), quote(b) FROM tbl2; + } +} {X'616263' NULL} + +# Correctly handle function error messages that include %. Ticket #1354 +# +do_test func-17.1 { + proc testfunc1 args {error "Error %d with %s percents %p"} + db function testfunc1 ::testfunc1 + catchsql { + SELECT testfunc1(1,2,3); + } +} {1 {Error %d with %s percents %p}} + +# The SUM function should return integer results when all inputs are integer. +# +do_test func-18.1 { + execsql { + CREATE TABLE t5(x); + INSERT INTO t5 VALUES(1); + INSERT INTO t5 VALUES(-99); + INSERT INTO t5 VALUES(10000); + SELECT sum(x) FROM t5; + } +} {9902} +do_test func-18.2 { + execsql { + INSERT INTO t5 VALUES(0.0); + SELECT sum(x) FROM t5; + } +} {9902.0} + +# The sum of nothing is NULL. But the sum of all NULLs is NULL. +# +# The TOTAL of nothing is 0.0. +# +do_test func-18.3 { + execsql { + DELETE FROM t5; + SELECT sum(x), total(x) FROM t5; + } +} {{} 0.0} +do_test func-18.4 { + execsql { + INSERT INTO t5 VALUES(NULL); + SELECT sum(x), total(x) FROM t5 + } +} {{} 0.0} +do_test func-18.5 { + execsql { + INSERT INTO t5 VALUES(NULL); + SELECT sum(x), total(x) FROM t5 + } +} {{} 0.0} +do_test func-18.6 { + execsql { + INSERT INTO t5 VALUES(123); + SELECT sum(x), total(x) FROM t5 + } +} {123 123.0} + +# Ticket #1664, #1669, #1670, #1674: An integer overflow on SUM causes +# an error. The non-standard TOTAL() function continues to give a helpful +# result. +# +do_test func-18.10 { + execsql { + CREATE TABLE t6(x INTEGER); + INSERT INTO t6 VALUES(1); + INSERT INTO t6 VALUES(1<<62); + SELECT sum(x) - ((1<<62)+1) from t6; + } +} 0 +do_test func-18.11 { + execsql { + SELECT typeof(sum(x)) FROM t6 + } +} integer +do_test func-18.12 { + catchsql { + INSERT INTO t6 VALUES(1<<62); + SELECT sum(x) - ((1<<62)*2.0+1) from t6; + } +} {1 {integer overflow}} +do_test func-18.13 { + execsql { + SELECT total(x) - ((1<<62)*2.0+1) FROM t6 + } +} 0.0 +do_test func-18.14 { + execsql { + SELECT sum(-9223372036854775805); + } +} -9223372036854775805 + +ifcapable compound&&subquery { + +do_test func-18.15 { + catchsql { + SELECT sum(x) FROM + (SELECT 9223372036854775807 AS x UNION ALL + SELECT 10 AS x); + } +} {1 {integer overflow}} +do_test func-18.16 { + catchsql { + SELECT sum(x) FROM + (SELECT 9223372036854775807 AS x UNION ALL + SELECT -10 AS x); + } +} {0 9223372036854775797} +do_test func-18.17 { + catchsql { + SELECT sum(x) FROM + (SELECT -9223372036854775807 AS x UNION ALL + SELECT 10 AS x); + } +} {0 -9223372036854775797} +do_test func-18.18 { + catchsql { + SELECT sum(x) FROM + (SELECT -9223372036854775807 AS x UNION ALL + SELECT -10 AS x); + } +} {1 {integer overflow}} +do_test func-18.19 { + catchsql { + SELECT sum(x) FROM (SELECT 9 AS x UNION ALL SELECT -10 AS x); + } +} {0 -1} +do_test func-18.20 { + catchsql { + SELECT sum(x) FROM (SELECT -9 AS x UNION ALL SELECT 10 AS x); + } +} {0 1} +do_test func-18.21 { + catchsql { + SELECT sum(x) FROM (SELECT -10 AS x UNION ALL SELECT 9 AS x); + } +} {0 -1} +do_test func-18.22 { + catchsql { + SELECT sum(x) FROM (SELECT 10 AS x UNION ALL SELECT -9 AS x); + } +} {0 1} + +} ;# ifcapable compound&&subquery + +# Integer overflow on abs() +# +do_test func-18.31 { + catchsql { + SELECT abs(-9223372036854775807); + } +} {0 9223372036854775807} +do_test func-18.32 { + catchsql { + SELECT abs(-9223372036854775807-1); + } +} {1 {integer overflow}} + +# The MATCH function exists but is only a stub and always throws an error. +# +do_test func-19.1 { + execsql { + SELECT match(a,b) FROM t1 WHERE 0; + } +} {} +do_test func-19.2 { + catchsql { + SELECT 'abc' MATCH 'xyz'; + } +} {1 {unable to use function MATCH in the requested context}} +do_test func-19.3 { + catchsql { + SELECT 'abc' NOT MATCH 'xyz'; + } +} {1 {unable to use function MATCH in the requested context}} +do_test func-19.4 { + catchsql { + SELECT match(1,2,3); + } +} {1 {wrong number of arguments to function match()}} + +# Soundex tests. +# +if {![catch {db eval {SELECT soundex('hello')}}]} { + set i 0 + foreach {name sdx} { + euler E460 + EULER E460 + Euler E460 + ellery E460 + gauss G200 + ghosh G200 + hilbert H416 + Heilbronn H416 + knuth K530 + kant K530 + Lloyd L300 + LADD L300 + Lukasiewicz L222 + Lissajous L222 + A A000 + 12345 ?000 + } { + incr i + do_test func-20.$i { + execsql {SELECT soundex($name)} + } $sdx + } +} + +# Tests of the REPLACE function. +# +do_test func-21.1 { + catchsql { + SELECT replace(1,2); + } +} {1 {wrong number of arguments to function replace()}} +do_test func-21.2 { + catchsql { + SELECT replace(1,2,3,4); + } +} {1 {wrong number of arguments to function replace()}} +do_test func-21.3 { + execsql { + SELECT typeof(replace("This is the main test string", NULL, "ALT")); + } +} {null} +do_test func-21.4 { + execsql { + SELECT typeof(replace(NULL, "main", "ALT")); + } +} {null} +do_test func-21.5 { + execsql { + SELECT typeof(replace("This is the main test string", "main", NULL)); + } +} {null} +do_test func-21.6 { + execsql { + SELECT replace("This is the main test string", "main", "ALT"); + } +} {{This is the ALT test string}} +do_test func-21.7 { + execsql { + SELECT replace("This is the main test string", "main", "larger-main"); + } +} {{This is the larger-main test string}} +do_test func-21.8 { + execsql { + SELECT replace("aaaaaaa", "a", "0123456789"); + } +} {0123456789012345678901234567890123456789012345678901234567890123456789} + +ifcapable tclvar { + do_test func-21.9 { + # Attempt to exploit a buffer-overflow that at one time existed + # in the REPLACE function. + set ::str "[string repeat A 29998]CC[string repeat A 35537]" + set ::rep [string repeat B 65536] + execsql { + SELECT LENGTH(REPLACE($::str, 'C', $::rep)); + } + } [expr 29998 + 2*65536 + 35537] +} + +# Tests for the TRIM, LTRIM and RTRIM functions. +# +do_test func-22.1 { + catchsql {SELECT trim(1,2,3)} +} {1 {wrong number of arguments to function trim()}} +do_test func-22.2 { + catchsql {SELECT ltrim(1,2,3)} +} {1 {wrong number of arguments to function ltrim()}} +do_test func-22.3 { + catchsql {SELECT rtrim(1,2,3)} +} {1 {wrong number of arguments to function rtrim()}} +do_test func-22.4 { + execsql {SELECT trim(' hi ');} +} {hi} +do_test func-22.5 { + execsql {SELECT ltrim(' hi ');} +} {{hi }} +do_test func-22.6 { + execsql {SELECT rtrim(' hi ');} +} {{ hi}} +do_test func-22.7 { + execsql {SELECT trim(' hi ','xyz');} +} {{ hi }} +do_test func-22.8 { + execsql {SELECT ltrim(' hi ','xyz');} +} {{ hi }} +do_test func-22.9 { + execsql {SELECT rtrim(' hi ','xyz');} +} {{ hi }} +do_test func-22.10 { + execsql {SELECT trim('xyxzy hi zzzy','xyz');} +} {{ hi }} +do_test func-22.11 { + execsql {SELECT ltrim('xyxzy hi zzzy','xyz');} +} {{ hi zzzy}} +do_test func-22.12 { + execsql {SELECT rtrim('xyxzy hi zzzy','xyz');} +} {{xyxzy hi }} +do_test func-22.13 { + execsql {SELECT trim(' hi ','');} +} {{ hi }} +if {[db one {PRAGMA encoding}]=="UTF-8"} { + do_test func-22.14 { + execsql {SELECT hex(trim(x'c280e1bfbff48fbfbf6869',x'6162e1bfbfc280'))} + } {F48FBFBF6869} + do_test func-22.15 { + execsql {SELECT hex(trim(x'6869c280e1bfbff48fbfbf61', + x'6162e1bfbfc280f48fbfbf'))} + } {6869} + do_test func-22.16 { + execsql {SELECT hex(trim(x'ceb1ceb2ceb3',x'ceb1'));} + } {CEB2CEB3} +} +do_test func-22.20 { + execsql {SELECT typeof(trim(NULL));} +} {null} +do_test func-22.21 { + execsql {SELECT typeof(trim(NULL,'xyz'));} +} {null} +do_test func-22.22 { + execsql {SELECT typeof(trim('hello',NULL));} +} {null} + +# This is to test the deprecated sqlite3_aggregate_count() API. +# +do_test func-23.1 { + sqlite3_create_aggregate db + execsql { + SELECT legacy_count() FROM t6; + } +} {3} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz.test new file mode 100644 index 0000000..76e3aad --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz.test @@ -0,0 +1,251 @@ +# 2007 May 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is generating semi-random strings of SQL +# (a.k.a. "fuzz") and sending it into the parser to try to +# generate errors. +# +# The tests in this file are really about testing fuzzily generated +# SQL parse-trees. The majority of the fuzzily generated SQL is +# valid as far as the parser is concerned. +# +# The most complicated trees are for SELECT statements. +# +# $Id: fuzz.test,v 1.14 2007/05/30 10:36:47 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +set ::REPEATS 5000 + +# If running quick.test, don't do so many iterations. +if {[info exists ::ISQUICK]} { + if {$::ISQUICK} { set ::REPEATS 20 } +} + +source $testdir/fuzz_common.tcl + +#---------------------------------------------------------------- +# These tests caused errors that were first caught by the tests +# in this file. They are still here. +do_test fuzz-1.1 { + execsql { + SELECT 'abc' LIKE X'ABCD'; + } +} {0} +do_test fuzz-1.2 { + execsql { + SELECT 'abc' LIKE zeroblob(10); + } +} {0} +do_test fuzz-1.3 { + execsql { + SELECT zeroblob(10) LIKE 'abc'; + } +} {0} +do_test fuzz-1.4 { + execsql { + SELECT (- -21) % NOT (456 LIKE zeroblob(10)); + } +} {0} +do_test fuzz-1.5 { + execsql { + SELECT (SELECT ( + SELECT (SELECT -2147483648) FROM (SELECT 1) ORDER BY 1 + )) + } +} {-2147483648} +do_test fuzz-1.6 { + execsql { + SELECT 'abc', zeroblob(1) FROM (SELECT 1) ORDER BY 1 + } +} [execsql {SELECT 'abc', zeroblob(1)}] + +do_test fuzz-1.7 { + execsql { + SELECT ( SELECT zeroblob(1000) FROM ( + SELECT * FROM (SELECT 'first') ORDER BY NOT 'in') + ) + } +} [execsql {SELECT zeroblob(1000)}] + +do_test fuzz-1.8 { + # Problems with opcode OP_ToText (did not account for MEM_Zero). + # Also MemExpandBlob() was marking expanded blobs as nul-terminated. + # They are not. + execsql { + SELECT CAST(zeroblob(1000) AS text); + } +} {{}} + +do_test fuzz-1.9 { + # This was causing a NULL pointer dereference of Expr.pList. + execsql { + SELECT 1 FROM (SELECT * FROM sqlite_master WHERE random()) + } +} {} + +do_test fuzz-1.10 { + # Bug in calculation of Parse.ckOffset causing an assert() + # to fail. Probably harmless. + execsql { + SELECT coalesce(1, substr( 1, 2, length('in' IN (SELECT 1)))) + } +} {1} + +do_test fuzz-1.11 { + # The literals (A, B, C, D) are not important, they are just used + # to make the EXPLAIN output easier to read. + # + # The problem here is that the EXISTS(...) expression leaves an + # extra value on the VDBE stack. This is confusing the parent and + # leads to an assert() failure when OP_Insert encounters an integer + # when it expects a record blob. + # + # Update: Any query with (LIMIT 0) was leaking stack. + # + execsql { + SELECT 'A' FROM (SELECT 'B') ORDER BY EXISTS ( + SELECT 'C' FROM (SELECT 'D' LIMIT 0) + ) + } +} {A} + +do_test fuzz-1.12.1 { + # Create a table with a single row. + execsql { + CREATE TABLE abc(b); + INSERT INTO abc VALUES('ABCDE'); + } + + # The following query was crashing. The later subquery (in the FROM) + # clause was flattened into the parent, but the code was not repairng + # the "b" reference in the other sub-query. When the query was executed, + # that "b" refered to a non-existant vdbe table-cursor. + # + execsql { + SELECT 1 IN ( SELECT b UNION SELECT 1 ) FROM (SELECT b FROM abc); + } +} {1} +do_test fuzz-1.12.2 { + # Clean up after the previous query. + execsql { + DROP TABLE abc; + } +} {} + + +do_test fuzz-1.13 { + # The problem here was that when there were more expressions in + # the ORDER BY list than the result-set list. The temporary b-tree + # used for sorting was being misconfigured in this case. + # + execsql { + SELECT 'abcd' UNION SELECT 'efgh' ORDER BY 1 ASC, 1 ASC; + } +} {abcd efgh} + +do_test fuzz-1.14.1 { + execsql { + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(123, 456, 789); + } + + # The [a] reference in the sub-select was causing a problem. Because + # the internal walkSelectExpr() function was not considering compound + # SELECT operators. + execsql { + SELECT 1 FROM abc + GROUP BY c HAVING EXISTS (SELECT a UNION SELECT 123); + } +} {1} +do_test fuzz-1.14.2 { + execsql { + DROP TABLE abc; + } +} {} + +#---------------------------------------------------------------- +# Test some fuzzily generated expressions. +# +do_fuzzy_test fuzz-2 -template { SELECT [Expr] } + +do_test fuzz-3.1 { + execsql { + CREATE TABLE abc(a, b, c); + CREATE TABLE def(a, b, c); + CREATE TABLE ghi(a, b, c); + } +} {} +set ::TableList [list abc def ghi] + +#---------------------------------------------------------------- +# Test some fuzzily generated SELECT statements. +# +do_fuzzy_test fuzz-3.2 -template {[Select]} + +#---------------------------------------------------------------- +# Insert a small amount of data into the database and then run +# some more generated SELECT statements. +# +do_test fuzz-4.1 { + execsql { + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, 6); + INSERT INTO abc VALUES(7, 8, 9); + INSERT INTO def VALUES(1, 2, 3); + INSERT INTO def VALUES(4, 5, 6); + INSERT INTO def VALUES(7, 8, 9); + INSERT INTO ghi VALUES(1, 2, 3); + INSERT INTO ghi VALUES(4, 5, 6); + INSERT INTO ghi VALUES(7, 8, 9); + CREATE INDEX abc_i ON abc(a, b, c); + CREATE INDEX def_i ON def(c, a, b); + CREATE INDEX ghi_i ON ghi(b, c, a); + } +} {} +do_fuzzy_test fuzz-4.2 -template {[Select]} + +#---------------------------------------------------------------- +# Test some fuzzy INSERT statements: +# +do_test fuzz-5.1 {execsql BEGIN} {} +do_fuzzy_test fuzz-5.2 -template {[Insert]} -errorlist table +integrity_check fuzz-5.2.integrity +do_test fuzz-5.3 {execsql COMMIT} {} +integrity_check fuzz-5.4.integrity + +#---------------------------------------------------------------- +# Now that there is data in the database, run some more SELECT +# statements +# +set ::ColumnList [list a b c] +set E {{no such col} {ambiguous column name}} +do_fuzzy_test fuzz-6.1 -template {[Select]} -errorlist $E + +#---------------------------------------------------------------- +# Run some SELECTs, INSERTs, UPDATEs and DELETEs in a transaction. +# +set E {{no such col} {ambiguous column name} {table}} +do_test fuzz-7.1 {execsql BEGIN} {} +do_fuzzy_test fuzz-7.2 -template {[Statement]} -errorlist $E +integrity_check fuzz-7.3.integrity +do_test fuzz-7.4 {execsql COMMIT} {} +integrity_check fuzz-7.5.integrity + +#---------------------------------------------------------------- +# Many CREATE and DROP TABLE statements: +# +set E [list table duplicate {no such col} {ambiguous column name} {use DROP}] +do_fuzzy_test fuzz-8.1 -template {[CreateOrDropTableOrView]} -errorlist $E + +close $::log +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz2.test new file mode 100644 index 0000000..eb5eb83 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz2.test @@ -0,0 +1,102 @@ +# 2007 May 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file checks error recovery from malformed SQL strings. +# +# $Id: fuzz2.test,v 1.3 2007/05/15 16:51:37 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +proc fuzzcatch {sql} { + return [lindex [catchsql $sql] 0] +} + +do_test fuzz2-1.1 { + fuzzcatch {SELECT ALL "AAAAAA" . * GROUP BY LIMIT round(1), #12} +} {1} +do_test fuzz2-2.0 { + fuzzcatch {SELECT + #100} +} {1} +do_test fuzz2-2.1 { + fuzzcatch {SELECT 1 WHERE ( #61 NOT MATCH ROUND( 1 ) )} +} {1} +do_test fuzz2-2.2 { + fuzzcatch {SELECT 1 LIMIT NOT #59 COLLATE AAAAAA NOT IN + ( "AAAAAA" NOTNULL <= x'414141414141' IS NULL , ( ROUND ( 1.0 ) ) )} +} {1} +do_test fuzz2-2.3 { + fuzzcatch {INSERT OR REPLACE INTO AAAAAA . "AAAAAA" ( "AAAAAA" ) SELECT DISTINCT * , ( SELECT #252 IN ( SELECT DISTINCT AAAAAA . * ) )} +} {1} +do_test fuzz2-2.4 { + fuzzcatch {SELECT 1 LIMIT NOT #59 COLLATE AAAAAA NOT IN round(1.0)} +} {1} +do_test fuzz2-2.5 { + fuzzcatch {SELECT( #239 )} +} {1} +do_test fuzz2-2.6 { + fuzzcatch {DELETE FROM AAAAAA WHERE #65 NOT NULL} +} {1} +do_test fuzz2-2.7 { + fuzzcatch {ATTACH ROUND( 1.0 ) in AAAAAA . "AAAAAA" AS #122 ISNULL} +} {1} +do_test fuzz2-2.8 { + fuzzcatch {SELECT 1 LIMIT #122 ISNULL} +} {1} +do_test fuzz2-2.9 { + fuzzcatch {CREATE VIEW AAAAAA . "AAAAAA" AS SELECT DISTINCT #162 IS NULL "AAAAAA"} +} {1} +do_test fuzz2-2.10 { + fuzzcatch {DELETE FROM AAAAAA WHERE #202 IS NOT NULL ISNULL} +} {1} +do_test fuzz2-2.11 { + fuzzcatch {UPDATE OR IGNORE "AAAAAA" . "AAAAAA" SET "AAAAAA" = NOT #96} +} {1} +do_test fuzz2-2.12 { + fuzzcatch {SELECT - #196} +} {1} +do_test fuzz2-3.0 { + fuzzcatch {CREATE TRIGGER "AAAAAA" . "AAAAAA" AFTER UPDATE OF "AAAAAA" , "AAAAAA" ON "AAAAAA" . "AAAAAA" FOR EACH ROW BEGIN UPDATE AAAAAA SET "AAAAAA" = #162; END} +} {1} +do_test fuzz2-3.1 { + fuzzcatch {CREATE TRIGGER IF NOT EXISTS "AAAAAA" UPDATE ON "AAAAAA" . AAAAAA FOR EACH ROW BEGIN DELETE FROM "AAAAAA" ; INSERT INTO AAAAAA ( "AAAAAA" ) SELECT DISTINCT "AAAAAA" "AAAAAA" , #167 AAAAAA , "AAAAAA" . * ORDER BY "AAAAAA" ASC , x'414141414141' BETWEEN RAISE ( FAIL , "AAAAAA" ) AND AAAAAA ( * ) NOT NULL DESC LIMIT AAAAAA ; REPLACE INTO AAAAAA ( AAAAAA ) VALUES ( AAAAAA ( * ) ) ; END} +} {1} +do_test fuzz2-3.2 { + fuzzcatch {CREATE TEMP TRIGGER IF NOT EXISTS AAAAAA . "AAAAAA" BEFORE UPDATE OF "AAAAAA" ON AAAAAA . "AAAAAA" BEGIN SELECT ALL * , #175 "AAAAAA" FROM "AAAAAA" . AAAAAA; END} +} {1} +do_test fuzz2-4.0 { + fuzzcatch {ATTACH DATABASE #168 AS whatever} +} {1} +do_test fuzz2-4.1 { + fuzzcatch {DETACH #133} +} {1} +do_test fuzz2-5.0 { + fuzzcatch {SELECT 1 LIMIT ( SELECT DISTINCT * , AAAAAA , * , AAAAAA , "AAAAAA" . * FROM "AAAAAA" ON ROUND( 1 ) COLLATE AAAAAA OR "AAAAAA" USING ( AAAAAA , "AAAAAA" ) WHERE ROUND( 1 ) GROUP BY ORDER BY #84 ASC , #44 DESC , ( SELECT "AAAAAA" . * , "AAAAAA" . * FROM , ( ) "AAAAAA" USING ( )} +} {1} +do_test fuzz2-5.1 { + fuzzcatch {SELECT 1 WHERE 1 == AAAAAA ( * ) BETWEEN + - ~ + "AAAAAA" . AAAAAA | RAISE ( IGNORE ) COLLATE AAAAAA NOT IN ( SELECT DISTINCT "AAAAAA" . * , * , * WHERE ( SELECT ALL AAAAAA AS "AAAAAA" HAVING CAST ( "AAAAAA" . "AAAAAA" . "AAAAAA" AS AAAAAA ) ORDER BY , , IS NULL ASC , ~ AND DESC LIMIT ( ( "AAAAAA" ) NOT BETWEEN ( ) NOT IN ( ) AND AAAAAA ( ) IS NOT NULL ) OFFSET AAAAAA ( ALL , , ) ) GROUP BY ORDER BY "AAAAAA" . AAAAAA ASC , NULL IN ( SELECT UNION ALL SELECT ALL WHERE HAVING ORDER BY LIMIT UNION SELECT DISTINCT FROM ( ) WHERE + HAVING >> ORDER BY LIMIT . . , "AAAAAA" ) , CAST ( ~ "AAAAAA" . AAAAAA AS "AAAAAA" AAAAAA "AAAAAA" ( + 4294967295 , - 4294967296.0 ) ) ASC LIMIT AAAAAA INTERSECT SELECT ALL * GROUP BY , AAAAAA ( DISTINCT , ) != #241 NOT IN ( , , ) , , CTIME_KW HAVING AAAAAA ORDER BY #103 DESC , #81 ASC LIMIT AAAAAA OFFSET ~ AAAAAA ( ALL AAAAAA . AAAAAA >= AAAAAA . "AAAAAA" . "AAAAAA" ) ) NOTNULL NOT NULL} +} {1} +do_test fuzz2-5.2 { + fuzzcatch {SELECT 1 WHERE 1 == AAAAAA ( * ) BETWEEN + - ~ + "AAAAAA" . AAAAAA | RAISE ( IGNORE ) COLLATE AAAAAA NOT IN ( SELECT DISTINCT "AAAAAA" . * , * , * WHERE ( SELECT ALL AAAAAA AS "AAAAAA" HAVING CAST ( "AAAAAA" . "AAAAAA" . "AAAAAA" AS AAAAAA ) ORDER BY , , IS NULL ASC , ~ AND DESC LIMIT ( ( "AAAAAA" ) NOT BETWEEN ( ) NOT IN ( ) AND AAAAAA ( ) IS NOT NULL ) OFFSET AAAAAA ( ALL , , ) ) GROUP BY ORDER BY "AAAAAA" . AAAAAA ASC , NULL IN ( SELECT UNION ALL SELECT ALL WHERE HAVING ORDER BY LIMIT UNION SELECT DISTINCT FROM ( ) WHERE + HAVING >> ORDER BY LIMIT . . , "AAAAAA" ) , CAST ( ~ "AAAAAA" . AAAAAA AS "AAAAAA" AAAAAA "AAAAAA" ( + 4294967295 , - 4294967296.0 ) ) ASC LIMIT AAAAAA INTERSECT SELECT ALL * GROUP BY , AAAAAA ( DISTINCT , ) != #241 NOT IN ( , , ) , , CTIME_KW HAVING AAAAAA ORDER BY #103 DESC , #81 ASC LIMIT AAAAAA OFFSET ~ AAAAAA ( ALL AAAAAA . AAAAAA >= AAAAAA . "AAAAAA" . "AAAAAA" ) ) NOTNULL NOT NULL} +} {1} +do_test fuzz2-5.3 { + fuzzcatch {UPDATE "AAAAAA" SET "AAAAAA" = - EXISTS ( SELECT DISTINCT * , * ORDER BY #202 ASC , #147 , ~ AAAAAA . "AAAAAA" ASC LIMIT AAAAAA . "AAAAAA" , RAISE ( ABORT , AAAAAA ) UNION ALL SELECT DISTINCT AAAAAA . * , * FROM ( SELECT DISTINCT} +} {1} +do_test fuzz2-5.4 { + fuzzcatch {REPLACE INTO AAAAAA SELECT DISTINCT "AAAAAA" . * WHERE AAAAAA ( AAAAAA ( ) ) GROUP BY AAAAAA . AAAAAA . "AAAAAA" IN "AAAAAA" | AAAAAA ( ALL , ) ORDER BY #238, #92 DESC LIMIT 0 OFFSET - RAISE ( IGNORE ) NOT NULL > RAISE ( IGNORE ) IS NULL} +} {1} +do_test fuzz2-5.5 { + fuzzcatch {SELECT ALL * GROUP BY EXISTS ( SELECT "AAAAAA" . * , AAAAAA ( * ) AS AAAAAA FROM "AAAAAA" . "AAAAAA" AS "AAAAAA" USING ( AAAAAA , "AAAAAA" , "AAAAAA" ) WHERE AAAAAA ( DISTINCT ) - RAISE ( FAIL , "AAAAAA" ) HAVING "AAAAAA" . "AAAAAA" . AAAAAA ORDER BY #182 , #55 ) BETWEEN EXISTS ( SELECT ALL * FROM ( ( } +} {1} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz_common.tcl b/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz_common.tcl new file mode 100644 index 0000000..b552129 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz_common.tcl @@ -0,0 +1,392 @@ +# 2007 May 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: fuzz_common.tcl,v 1.1 2007/05/30 10:36:47 danielk1977 Exp $ + +proc fuzz {TemplateList} { + set n [llength $TemplateList] + set i [expr {int(rand()*$n)}] + set r [uplevel 1 subst -novar [list [lindex $TemplateList $i]]] + + string map {"\n" " "} $r +} + +# Fuzzy generation primitives: +# +# Literal +# UnaryOp +# BinaryOp +# Expr +# Table +# Select +# Insert +# + +# Returns a string representing an SQL literal. +# +proc Literal {} { + set TemplateList { + 456 0 -456 1 -1 + 2147483648 2147483647 2147483649 -2147483647 -2147483648 -2147483649 + 'The' 'first' 'experiments' 'in' 'hardware' 'fault' 'injection' + zeroblob(1000) + NULL + 56.1 -56.1 + 123456789.1234567899 + } + fuzz $TemplateList +} + +# Returns a string containing an SQL unary operator (e.g. "+" or "NOT"). +# +proc UnaryOp {} { + set TemplateList {+ - NOT ~} + fuzz $TemplateList +} + +# Returns a string containing an SQL binary operator (e.g. "*" or "/"). +# +proc BinaryOp {} { + set TemplateList { + || * / % + - << >> & | < <= > >= = == != <> AND OR + LIKE GLOB {NOT LIKE} + } + fuzz $TemplateList +} + +# Return the complete text of an SQL expression. +# +set ::ExprDepth 0 +proc Expr { {c {}} } { + incr ::ExprDepth + + set TemplateList [concat $c $c $c {[Literal]}] + if {$::ExprDepth < 3} { + lappend TemplateList \ + {[Expr $c] [BinaryOp] [Expr $c]} \ + {[UnaryOp] [Expr $c]} \ + {[Expr $c] ISNULL} \ + {[Expr $c] NOTNULL} \ + {CAST([Expr $c] AS blob)} \ + {CAST([Expr $c] AS text)} \ + {CAST([Expr $c] AS integer)} \ + {CAST([Expr $c] AS real)} \ + {abs([Expr])} \ + {coalesce([Expr], [Expr])} \ + {hex([Expr])} \ + {length([Expr])} \ + {lower([Expr])} \ + {upper([Expr])} \ + {quote([Expr])} \ + {random()} \ + {randomblob(min(max([Expr],1), 500))} \ + {typeof([Expr])} \ + {substr([Expr],[Expr],[Expr])} \ + {CASE WHEN [Expr $c] THEN [Expr $c] ELSE [Expr $c] END} \ + {[Literal]} {[Literal]} {[Literal]} \ + {[Literal]} {[Literal]} {[Literal]} \ + {[Literal]} {[Literal]} {[Literal]} \ + {[Literal]} {[Literal]} {[Literal]} + } + if {$::SelectDepth < 4} { + lappend TemplateList \ + {([Select 1])} \ + {[Expr $c] IN ([Select 1])} \ + {[Expr $c] NOT IN ([Select 1])} \ + {EXISTS ([Select 1])} \ + } + set res [fuzz $TemplateList] + incr ::ExprDepth -1 + return $res +} + +# Return a valid table name. +# +set ::TableList [list] +proc Table {} { + set TemplateList [concat sqlite_master $::TableList] + fuzz $TemplateList +} + +# Return one of: +# +# "SELECT DISTINCT", "SELECT ALL" or "SELECT" +# +proc SelectKw {} { + set TemplateList { + "SELECT DISTINCT" + "SELECT ALL" + "SELECT" + } + fuzz $TemplateList +} + +# Return a result set for a SELECT statement. +# +proc ResultSet {{nRes 0} {c ""}} { + if {$nRes == 0} { + set nRes [expr {rand()*2 + 1}] + } + + set aRes [list] + for {set ii 0} {$ii < $nRes} {incr ii} { + lappend aRes [Expr $c] + } + + join $aRes ", " +} + +set ::SelectDepth 0 +set ::ColumnList [list] +proc SimpleSelect {{nRes 0}} { + + set TemplateList { + {[SelectKw] [ResultSet $nRes]} + } + + # The ::SelectDepth variable contains the number of ancestor SELECT + # statements (i.e. for a top level SELECT it is set to 0, for a + # sub-select 1, for a sub-select of a sub-select 2 etc.). + # + # If this is already greater than 3, do not generate a complicated + # SELECT statement. This tends to cause parser stack overflow (too + # boring to bother with). + # + if {$::SelectDepth < 4} { + lappend TemplateList \ + {[SelectKw] [ResultSet $nRes $::ColumnList] FROM ([Select])} \ + {[SelectKw] [ResultSet $nRes] FROM ([Select])} \ + {[SelectKw] [ResultSet $nRes $::ColumnList] FROM [Table]} \ + { + [SelectKw] [ResultSet $nRes $::ColumnList] + FROM ([Select]) + GROUP BY [Expr] + HAVING [Expr] + } \ + + if {0 == $nRes} { + lappend TemplateList \ + {[SelectKw] * FROM ([Select])} \ + {[SelectKw] * FROM [Table]} \ + {[SelectKw] * FROM [Table] WHERE [Expr $::ColumnList]} \ + { + [SelectKw] * + FROM [Table],[Table] AS t2 + WHERE [Expr $::ColumnList] + } { + [SelectKw] * + FROM [Table] LEFT OUTER JOIN [Table] AS t2 + ON [Expr $::ColumnList] + WHERE [Expr $::ColumnList] + } + } + } + + fuzz $TemplateList +} + +# Return a SELECT statement. +# +# If boolean parameter $isExpr is set to true, make sure the +# returned SELECT statement returns a single column of data. +# +proc Select {{nMulti 0}} { + set TemplateList { + {[SimpleSelect $nMulti]} {[SimpleSelect $nMulti]} {[SimpleSelect $nMulti]} + {[SimpleSelect $nMulti]} {[SimpleSelect $nMulti]} {[SimpleSelect $nMulti]} + {[SimpleSelect $nMulti]} {[SimpleSelect $nMulti]} {[SimpleSelect $nMulti]} + {[SimpleSelect $nMulti]} {[SimpleSelect $nMulti]} {[SimpleSelect $nMulti]} + {[SimpleSelect $nMulti] ORDER BY [Expr] DESC} + {[SimpleSelect $nMulti] ORDER BY [Expr] ASC} + {[SimpleSelect $nMulti] ORDER BY [Expr] ASC, [Expr] DESC} + {[SimpleSelect $nMulti] ORDER BY [Expr] LIMIT [Expr] OFFSET [Expr]} + } + + if {$::SelectDepth < 4} { + if {$nMulti == 0} { + set nMulti [expr {(rand()*2)+1}] + } + lappend TemplateList \ + {[SimpleSelect $nMulti] UNION [Select $nMulti]} \ + {[SimpleSelect $nMulti] UNION ALL [Select $nMulti]} \ + {[SimpleSelect $nMulti] EXCEPT [Select $nMulti]} \ + {[SimpleSelect $nMulti] INTERSECT [Select $nMulti]} + } + + incr ::SelectDepth + set res [fuzz $TemplateList] + incr ::SelectDepth -1 + set res +} + +# Generate and return a fuzzy INSERT statement. +# +proc Insert {} { + set TemplateList { + {INSERT INTO [Table] VALUES([Expr], [Expr], [Expr]);} + {INSERT INTO [Table] VALUES([Expr], [Expr], [Expr], [Expr]);} + {INSERT INTO [Table] VALUES([Expr], [Expr]);} + } + fuzz $TemplateList +} + +proc Column {} { + fuzz $::ColumnList +} + +# Generate and return a fuzzy UPDATE statement. +# +proc Update {} { + set TemplateList { + {UPDATE [Table] + SET [Column] = [Expr $::ColumnList] + WHERE [Expr $::ColumnList]} + } + fuzz $TemplateList +} + +proc Delete {} { + set TemplateList { + {DELETE FROM [Table] WHERE [Expr $::ColumnList]} + } + fuzz $TemplateList +} + +proc Statement {} { + set TemplateList { + {[Update]} + {[Insert]} + {[Select]} + {[Delete]} + } + fuzz $TemplateList +} + +# Return an identifier. This just chooses randomly from a fixed set +# of strings. +proc Identifier {} { + set TemplateList { + This just chooses randomly a fixed + We would also thank the developers + for their analysis Samba + } + fuzz $TemplateList +} + +proc Check {} { + # Use a large value for $::SelectDepth, because sub-selects are + # not allowed in expressions used by CHECK constraints. + # + set sd $::SelectDepth + set ::SelectDepth 500 + set TemplateList { + {} + {CHECK ([Expr])} + } + set res [fuzz $TemplateList] + set ::SelectDepth $sd + set res +} + +proc Coltype {} { + set TemplateList { + {INTEGER PRIMARY KEY} + {VARCHAR [Check]} + {PRIMARY KEY} + } + fuzz $TemplateList +} + +proc DropTable {} { + set TemplateList { + {DROP TABLE IF EXISTS [Identifier]} + } + fuzz $TemplateList +} + +proc CreateView {} { + set TemplateList { + {CREATE VIEW [Identifier] AS [Select]} + } + fuzz $TemplateList +} +proc DropView {} { + set TemplateList { + {DROP VIEW IF EXISTS [Identifier]} + } + fuzz $TemplateList +} + +proc CreateTable {} { + set TemplateList { + {CREATE TABLE [Identifier]([Identifier] [Coltype], [Identifier] [Coltype])} + {CREATE TEMP TABLE [Identifier]([Identifier] [Coltype])} + } + fuzz $TemplateList +} + +proc CreateOrDropTableOrView {} { + set TemplateList { + {[CreateTable]} + {[DropTable]} + {[CreateView]} + {[DropView]} + } + fuzz $TemplateList +} + +######################################################################## + +set ::log [open fuzzy.log w] + +# +# Usage: do_fuzzy_test ?? +# +# -template +# -errorlist +# -repeats +# +proc do_fuzzy_test {testname args} { + set ::fuzzyopts(-errorlist) [list] + set ::fuzzyopts(-repeats) $::REPEATS + array set ::fuzzyopts $args + + lappend ::fuzzyopts(-errorlist) {parser stack overflow} + lappend ::fuzzyopts(-errorlist) {ORDER BY} + lappend ::fuzzyopts(-errorlist) {GROUP BY} + lappend ::fuzzyopts(-errorlist) {datatype mismatch} + + for {set ii 0} {$ii < $::fuzzyopts(-repeats)} {incr ii} { + do_test ${testname}.$ii { + set ::sql [subst $::fuzzyopts(-template)] + puts $::log $::sql + flush $::log + set rc [catch {execsql $::sql} msg] + set e 1 + if {$rc} { + set e 0 + foreach error $::fuzzyopts(-errorlist) { + if {0 == [string first $error $msg]} { + set e 1 + break + } + } + } + if {$e == 0} { + puts "" + puts $::sql + puts $msg + } + set e + } {1} + } +} + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz_malloc.test b/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz_malloc.test new file mode 100644 index 0000000..2d78807 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/fuzz_malloc.test @@ -0,0 +1,93 @@ +# +# 2007 May 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file tests malloc failures in concert with fuzzy SQL generation. +# +# $Id: fuzz_malloc.test,v 1.9 2007/09/03 15:42:48 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !memdebug { + finish_test + return +} + +source $testdir/malloc_common.tcl +source $testdir/fuzz_common.tcl + +if {[info exists ISQUICK]} { + set ::REPEATS 20 +} elseif {[info exists SOAKTEST]} { + set ::REPEATS 100 +} else { + set ::REPEATS 40 +} + +# +# Usage: do_fuzzy_malloc_test ?? +# +# -template +# -sqlprep +# -repeats +# +proc do_fuzzy_malloc_test {testname args} { + set ::fuzzyopts(-repeats) $::REPEATS + set ::fuzzyopts(-sqlprep) {} + array set ::fuzzyopts $args + + sqlite3_memdebug_fail -1 + db close + file delete test.db test.db-journal + sqlite3 db test.db + set ::prep $::fuzzyopts(-sqlprep) + execsql $::prep + set jj 0 + for {set ii 0} {$ii < $::fuzzyopts(-repeats)} {incr ii} { + expr srand($jj) + incr jj + set ::sql [subst $::fuzzyopts(-template)] + foreach {rc res} [catchsql "$::sql"] {} + if {$rc==0} { + do_malloc_test $testname-$ii -sqlbody $::sql -sqlprep $::prep + } else { + incr ii -1 + } + } +} + +#---------------------------------------------------------------- +# Test malloc failure during parsing (and execution) of a fuzzily +# generated expressions. +# +do_fuzzy_malloc_test fuzzy_malloc-1 -template {Select [Expr]} +do_fuzzy_malloc_test fuzzy_malloc-2 -template {[Select]} + +set ::SQLPREP { + BEGIN; + CREATE TABLE abc(a, b, c); + CREATE TABLE def(a, b, c); + CREATE TABLE ghi(a, b, c); + INSERT INTO abc VALUES(1.5, 3, 'a short string'); + INSERT INTO def VALUES(NULL, X'ABCDEF', + 'a longer string. Long enough that it doesn''t fit in Mem.zShort'); + INSERT INTO ghi VALUES(zeroblob(1000), 'hello world', -1257900987654321); + COMMIT; +} +set ::TableList [list abc def ghi] +set ::ColumnList [list a b c] + +do_fuzzy_malloc_test fuzzy_malloc-3 \ + -template {[Select]} \ + -sqlprep $::SQLPREP + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/hook.test b/libraries/sqlite/unix/sqlite-3.5.1/test/hook.test new file mode 100644 index 0000000..1bbd6fb --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/hook.test @@ -0,0 +1,297 @@ +# 2004 Jan 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for TCL interface to the +# SQLite library. +# +# The focus of the tests in this file is the following interface: +# +# sqlite_commit_hook (tests hook-1..hook-3 inclusive) +# sqlite_update_hook (tests hook-4-*) +# sqlite_rollback_hook (tests hook-5.*) +# +# $Id: hook.test,v 1.11 2006/01/17 09:35:02 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test hook-1.2 { + db commit_hook +} {} + + +do_test hook-3.1 { + set commit_cnt 0 + proc commit_hook {} { + incr ::commit_cnt + return 0 + } + db commit_hook ::commit_hook + db commit_hook +} {::commit_hook} +do_test hook-3.2 { + set commit_cnt +} {0} +do_test hook-3.3 { + execsql { + CREATE TABLE t2(a,b); + } + set commit_cnt +} {1} +do_test hook-3.4 { + execsql { + INSERT INTO t2 VALUES(1,2); + INSERT INTO t2 SELECT a+1, b+1 FROM t2; + INSERT INTO t2 SELECT a+2, b+2 FROM t2; + } + set commit_cnt +} {4} +do_test hook-3.5 { + set commit_cnt {} + proc commit_hook {} { + set ::commit_cnt [execsql {SELECT * FROM t2}] + return 0 + } + execsql { + INSERT INTO t2 VALUES(5,6); + } + set commit_cnt +} {1 2 2 3 3 4 4 5 5 6} +do_test hook-3.6 { + set commit_cnt {} + proc commit_hook {} { + set ::commit_cnt [execsql {SELECT * FROM t2}] + return 1 + } + catchsql { + INSERT INTO t2 VALUES(6,7); + } +} {1 {constraint failed}} +do_test hook-3.7 { + set ::commit_cnt +} {1 2 2 3 3 4 4 5 5 6 6 7} +do_test hook-3.8 { + execsql {SELECT * FROM t2} +} {1 2 2 3 3 4 4 5 5 6} + +# Test turnning off the commit hook +# +do_test hook-3.9 { + db commit_hook {} + set ::commit_cnt {} + execsql { + INSERT INTO t2 VALUES(7,8); + } + set ::commit_cnt +} {} + +#---------------------------------------------------------------------------- +# Tests for the update-hook. +# +# 4.1.* - Very simple tests. Test that the update hook is invoked correctly +# for INSERT, DELETE and UPDATE statements, including DELETE +# statements with no WHERE clause. +# 4.2.* - Check that the update-hook is invoked for rows modified by trigger +# bodies. Also that the database name is correctly reported when +# an attached database is modified. +# 4.3.* - Do some sorting, grouping, compound queries, population and +# depopulation of indices, to make sure the update-hook is not +# invoked incorrectly. +# + +# Simple tests +do_test hook-4.1.1 { + catchsql { + DROP TABLE t1; + } + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + INSERT INTO t1 VALUES(3, 'three'); + } + db update_hook [list lappend ::update_hook] +} {} +do_test hook-4.1.2 { + execsql { + INSERT INTO t1 VALUES(4, 'four'); + DELETE FROM t1 WHERE b = 'two'; + UPDATE t1 SET b = '' WHERE a = 1 OR a = 3; + DELETE FROM t1 WHERE 1; -- Avoid the truncate optimization (for now) + } + set ::update_hook +} [list \ + INSERT main t1 4 \ + DELETE main t1 2 \ + UPDATE main t1 1 \ + UPDATE main t1 3 \ + DELETE main t1 1 \ + DELETE main t1 3 \ + DELETE main t1 4 \ +] + +set ::update_hook {} +ifcapable trigger { + do_test hook-4.2.1 { + catchsql { + DROP TABLE t2; + } + execsql { + CREATE TABLE t2(c INTEGER PRIMARY KEY, d); + CREATE TRIGGER t1_trigger AFTER INSERT ON t1 BEGIN + INSERT INTO t2 VALUES(new.a, new.b); + UPDATE t2 SET d = d || ' via trigger' WHERE new.a = c; + DELETE FROM t2 WHERE new.a = c; + END; + } + } {} + do_test hook-4.2.2 { + execsql { + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + } + set ::update_hook + } [list \ + INSERT main t1 1 \ + INSERT main t2 1 \ + UPDATE main t2 1 \ + DELETE main t2 1 \ + INSERT main t1 2 \ + INSERT main t2 2 \ + UPDATE main t2 2 \ + DELETE main t2 2 \ + ] +} else { + execsql { + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + } +} + +# Update-hook + ATTACH +set ::update_hook {} +do_test hook-4.2.3 { + file delete -force test2.db + execsql { + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t3(a INTEGER PRIMARY KEY, b); + INSERT INTO aux.t3 SELECT * FROM t1; + UPDATE t3 SET b = 'two or so' WHERE a = 2; + DELETE FROM t3 WHERE 1; -- Avoid the truncate optimization (for now) + } + set ::update_hook +} [list \ + INSERT aux t3 1 \ + INSERT aux t3 2 \ + UPDATE aux t3 2 \ + DELETE aux t3 1 \ + DELETE aux t3 2 \ +] + +ifcapable trigger { + execsql { + DROP TRIGGER t1_trigger; + } +} + +# Test that other vdbe operations involving btree structures do not +# incorrectly invoke the update-hook. +set ::update_hook {} +do_test hook-4.3.1 { + execsql { + CREATE INDEX t1_i ON t1(b); + INSERT INTO t1 VALUES(3, 'three'); + UPDATE t1 SET b = ''; + DELETE FROM t1 WHERE a > 1; + } + set ::update_hook +} [list \ + INSERT main t1 3 \ + UPDATE main t1 1 \ + UPDATE main t1 2 \ + UPDATE main t1 3 \ + DELETE main t1 2 \ + DELETE main t1 3 \ +] +set ::update_hook {} +ifcapable compound { + do_test hook-4.3.2 { + execsql { + SELECT * FROM t1 UNION SELECT * FROM t3; + SELECT * FROM t1 UNION ALL SELECT * FROM t3; + SELECT * FROM t1 INTERSECT SELECT * FROM t3; + SELECT * FROM t1 EXCEPT SELECT * FROM t3; + SELECT * FROM t1 ORDER BY b; + SELECT * FROM t1 GROUP BY b; + } + set ::update_hook + } [list] +} +db update_hook {} +# +#---------------------------------------------------------------------------- + +#---------------------------------------------------------------------------- +# Test the rollback-hook. The rollback-hook is a bit more complicated than +# either the commit or update hooks because a rollback can happen +# explicitly (an sql ROLLBACK statement) or implicitly (a constraint or +# error condition). +# +# hook-5.1.* - Test explicit rollbacks. +# hook-5.2.* - Test implicit rollbacks caused by constraint failure. +# +# hook-5.3.* - Test implicit rollbacks caused by IO errors. +# hook-5.4.* - Test implicit rollbacks caused by malloc() failure. +# hook-5.5.* - Test hot-journal rollbacks. Or should the rollback hook +# not be called for these? +# + +do_test hook-5.0 { + # Configure the rollback hook to increment global variable + # $::rollback_hook each time it is invoked. + set ::rollback_hook 0 + db rollback_hook [list incr ::rollback_hook] +} {} + +# Test explicit rollbacks. Not much can really go wrong here. +# +do_test hook-5.1.1 { + set ::rollback_hook 0 + execsql { + BEGIN; + ROLLBACK; + } + set ::rollback_hook +} {1} + +# Test implicit rollbacks caused by constraints. +# +do_test hook-5.2.1 { + set ::rollback_hook 0 + catchsql { + DROP TABLE t1; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES('one', 'I'); + INSERT INTO t1 VALUES('one', 'I'); + } + set ::rollback_hook +} {1} +do_test hook-5.2.2 { + # Check that the INSERT transaction above really was rolled back. + execsql { + SELECT count(*) FROM t1; + } +} {1} + +# +# End rollback-hook testing. +#---------------------------------------------------------------------------- + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/icu.test b/libraries/sqlite/unix/sqlite-3.5.1/test/icu.test new file mode 100644 index 0000000..2a247c6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/icu.test @@ -0,0 +1,118 @@ +# 2007 May 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: icu.test,v 1.1 2007/05/07 11:53:14 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !icu { + finish_test + return +} + +# Create a table to work with. +# +execsql {CREATE TABLE test1(i1 int, i2 int, r1 real, r2 real, t1 text, t2 text)} +execsql {INSERT INTO test1 VALUES(1,2,1.1,2.2,'hello','world')} +proc test_expr {name settings expr result} { + do_test $name [format { + db one { + BEGIN; + UPDATE test1 SET %s; + SELECT %s FROM test1; + ROLLBACK; + } + } $settings $expr] $result +} + +# Tests of the REGEXP operator. +# +test_expr icu-1.1 {i1='hello'} {i1 REGEXP 'hello'} 1 +test_expr icu-1.2 {i1='hello'} {i1 REGEXP '.ello'} 1 +test_expr icu-1.3 {i1='hello'} {i1 REGEXP '.ell'} 0 +test_expr icu-1.4 {i1='hello'} {i1 REGEXP '.ell.*'} 1 +test_expr icu-1.5 {i1=NULL} {i1 REGEXP '.ell.*'} {} + +# Some non-ascii characters with defined case mappings +# +set ::EGRAVE "\xC8" +set ::egrave "\xE8" + +set ::OGRAVE "\xD2" +set ::ograve "\xF2" + +# That German letter that looks a bit like a B. The +# upper-case version of which is "SS" (two characters). +# +set ::szlig "\xDF" + +# Tests of the upper()/lower() functions. +# +test_expr icu-2.1 {i1='HellO WorlD'} {upper(i1)} {HELLO WORLD} +test_expr icu-2.2 {i1='HellO WorlD'} {lower(i1)} {hello world} +test_expr icu-2.3 {i1=$::egrave} {lower(i1)} $::egrave +test_expr icu-2.4 {i1=$::egrave} {upper(i1)} $::EGRAVE +test_expr icu-2.5 {i1=$::ograve} {lower(i1)} $::ograve +test_expr icu-2.6 {i1=$::ograve} {upper(i1)} $::OGRAVE +test_expr icu-2.3 {i1=$::EGRAVE} {lower(i1)} $::egrave +test_expr icu-2.4 {i1=$::EGRAVE} {upper(i1)} $::EGRAVE +test_expr icu-2.5 {i1=$::OGRAVE} {lower(i1)} $::ograve +test_expr icu-2.6 {i1=$::OGRAVE} {upper(i1)} $::OGRAVE + +test_expr icu-2.7 {i1=$::szlig} {upper(i1)} "SS" +test_expr icu-2.8 {i1='SS'} {lower(i1)} "ss" + +# In turkish (locale="tr_TR"), the lower case version of I +# is "small dotless i" (code point 0x131 (decimal 305)). +# +set ::small_dotless_i "\u0131" +test_expr icu-3.1 {i1='I'} {lower(i1)} "i" +test_expr icu-3.2 {i1='I'} {lower(i1, 'tr_tr')} $::small_dotless_i +test_expr icu-3.3 {i1='I'} {lower(i1, 'en_AU')} "i" + +#-------------------------------------------------------------------- +# Test the collation sequence function. +# +do_test icu-4.1 { + execsql { + CREATE TABLE fruit(name); + INSERT INTO fruit VALUES('plum'); + INSERT INTO fruit VALUES('cherry'); + INSERT INTO fruit VALUES('apricot'); + INSERT INTO fruit VALUES('peach'); + INSERT INTO fruit VALUES('chokecherry'); + INSERT INTO fruit VALUES('yamot'); + } +} {} +do_test icu-4.2 { + execsql { + SELECT icu_load_collation('en_US', 'AmericanEnglish'); + SELECT icu_load_collation('lt_LT', 'Lithuanian'); + } + execsql { + SELECT name FROM fruit ORDER BY name COLLATE AmericanEnglish ASC; + } +} {apricot cherry chokecherry peach plum yamot} + + +# Test collation using Lithuanian rules. In the Lithuanian +# alphabet, "y" comes right after "i". +# +do_test icu-4.3 { + execsql { + SELECT name FROM fruit ORDER BY name COLLATE Lithuanian ASC; + } +} {apricot cherry chokecherry yamot peach plum} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/in.test b/libraries/sqlite/unix/sqlite-3.5.1/test/in.test new file mode 100644 index 0000000..84147fd --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/in.test @@ -0,0 +1,367 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the IN and BETWEEN operator. +# +# $Id: in.test,v 1.17 2006/05/23 23:25:10 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Generate the test data we will need for the first squences of tests. +# +do_test in-1.0 { + execsql { + BEGIN; + CREATE TABLE t1(a int, b int); + } + for {set i 1} {$i<=10} {incr i} { + execsql "INSERT INTO t1 VALUES($i,[expr {int(pow(2,$i))}])" + } + execsql { + COMMIT; + SELECT count(*) FROM t1; + } +} {10} + +# Do basic testing of BETWEEN. +# +do_test in-1.1 { + execsql {SELECT a FROM t1 WHERE b BETWEEN 10 AND 50 ORDER BY a} +} {4 5} +do_test in-1.2 { + execsql {SELECT a FROM t1 WHERE b NOT BETWEEN 10 AND 50 ORDER BY a} +} {1 2 3 6 7 8 9 10} +do_test in-1.3 { + execsql {SELECT a FROM t1 WHERE b BETWEEN a AND a*5 ORDER BY a} +} {1 2 3 4} +do_test in-1.4 { + execsql {SELECT a FROM t1 WHERE b NOT BETWEEN a AND a*5 ORDER BY a} +} {5 6 7 8 9 10} +do_test in-1.6 { + execsql {SELECT a FROM t1 WHERE b BETWEEN a AND a*5 OR b=512 ORDER BY a} +} {1 2 3 4 9} +do_test in-1.7 { + execsql {SELECT a+ 100*(a BETWEEN 1 and 3) FROM t1 ORDER BY b} +} {101 102 103 4 5 6 7 8 9 10} + +# The rest of this file concentrates on testing the IN operator. +# Skip this if the library is compiled with SQLITE_OMIT_SUBQUERY +# (because the IN operator is unavailable). +# +ifcapable !subquery { + finish_test + return +} + +# Testing of the IN operator using static lists on the right-hand side. +# +do_test in-2.1 { + execsql {SELECT a FROM t1 WHERE b IN (8,12,16,24,32) ORDER BY a} +} {3 4 5} +do_test in-2.2 { + execsql {SELECT a FROM t1 WHERE b NOT IN (8,12,16,24,32) ORDER BY a} +} {1 2 6 7 8 9 10} +do_test in-2.3 { + execsql {SELECT a FROM t1 WHERE b IN (8,12,16,24,32) OR b=512 ORDER BY a} +} {3 4 5 9} +do_test in-2.4 { + execsql {SELECT a FROM t1 WHERE b NOT IN (8,12,16,24,32) OR b=512 ORDER BY a} +} {1 2 6 7 8 9 10} +do_test in-2.5 { + execsql {SELECT a+100*(b IN (8,16,24)) FROM t1 ORDER BY b} +} {1 2 103 104 5 6 7 8 9 10} + +do_test in-2.6 { + execsql {SELECT a FROM t1 WHERE b IN (b+8,64)} +} {6} +do_test in-2.7 { + execsql {SELECT a FROM t1 WHERE b IN (max(5,10,b),20)} +} {4 5 6 7 8 9 10} +do_test in-2.8 { + execsql {SELECT a FROM t1 WHERE b IN (8*2,64/2) ORDER BY b} +} {4 5} +do_test in-2.9 { + execsql {SELECT a FROM t1 WHERE b IN (max(5,10),20)} +} {} +do_test in-2.10 { + execsql {SELECT a FROM t1 WHERE min(0,b IN (a,30))} +} {} +do_test in-2.11 { + set v [catch {execsql {SELECT a FROM t1 WHERE c IN (10,20)}} msg] + lappend v $msg +} {1 {no such column: c}} + +# Testing the IN operator where the right-hand side is a SELECT +# +do_test in-3.1 { + execsql { + SELECT a FROM t1 + WHERE b IN (SELECT b FROM t1 WHERE a<5) + ORDER BY a + } +} {1 2 3 4} +do_test in-3.2 { + execsql { + SELECT a FROM t1 + WHERE b IN (SELECT b FROM t1 WHERE a<5) OR b==512 + ORDER BY a + } +} {1 2 3 4 9} +do_test in-3.3 { + execsql { + SELECT a + 100*(b IN (SELECT b FROM t1 WHERE a<5)) FROM t1 ORDER BY b + } +} {101 102 103 104 5 6 7 8 9 10} + +# Make sure the UPDATE and DELETE commands work with IN-SELECT +# +do_test in-4.1 { + execsql { + UPDATE t1 SET b=b*2 + WHERE b IN (SELECT b FROM t1 WHERE a>8) + } + execsql {SELECT b FROM t1 ORDER BY b} +} {2 4 8 16 32 64 128 256 1024 2048} +do_test in-4.2 { + execsql { + DELETE FROM t1 WHERE b IN (SELECT b FROM t1 WHERE a>8) + } + execsql {SELECT a FROM t1 ORDER BY a} +} {1 2 3 4 5 6 7 8} +do_test in-4.3 { + execsql { + DELETE FROM t1 WHERE b NOT IN (SELECT b FROM t1 WHERE a>4) + } + execsql {SELECT a FROM t1 ORDER BY a} +} {5 6 7 8} + +# Do an IN with a constant RHS but where the RHS has many, many +# elements. We need to test that collisions in the hash table +# are resolved properly. +# +do_test in-5.1 { + execsql { + INSERT INTO t1 VALUES('hello', 'world'); + SELECT * FROM t1 + WHERE a IN ( + 'Do','an','IN','with','a','constant','RHS','but','where','the', + 'has','many','elements','We','need','to','test','that', + 'collisions','hash','table','are','resolved','properly', + 'This','in-set','contains','thirty','one','entries','hello'); + } +} {hello world} + +# Make sure the IN operator works with INTEGER PRIMARY KEY fields. +# +do_test in-6.1 { + execsql { + CREATE TABLE ta(a INTEGER PRIMARY KEY, b); + INSERT INTO ta VALUES(1,1); + INSERT INTO ta VALUES(2,2); + INSERT INTO ta VALUES(3,3); + INSERT INTO ta VALUES(4,4); + INSERT INTO ta VALUES(6,6); + INSERT INTO ta VALUES(8,8); + INSERT INTO ta VALUES(10, + 'This is a key that is long enough to require a malloc in the VDBE'); + SELECT * FROM ta WHERE a<10; + } +} {1 1 2 2 3 3 4 4 6 6 8 8} +do_test in-6.2 { + execsql { + CREATE TABLE tb(a INTEGER PRIMARY KEY, b); + INSERT INTO tb VALUES(1,1); + INSERT INTO tb VALUES(2,2); + INSERT INTO tb VALUES(3,3); + INSERT INTO tb VALUES(5,5); + INSERT INTO tb VALUES(7,7); + INSERT INTO tb VALUES(9,9); + INSERT INTO tb VALUES(11, + 'This is a key that is long enough to require a malloc in the VDBE'); + SELECT * FROM tb WHERE a<10; + } +} {1 1 2 2 3 3 5 5 7 7 9 9} +do_test in-6.3 { + execsql { + SELECT a FROM ta WHERE b IN (SELECT a FROM tb); + } +} {1 2 3} +do_test in-6.4 { + execsql { + SELECT a FROM ta WHERE b NOT IN (SELECT a FROM tb); + } +} {4 6 8 10} +do_test in-6.5 { + execsql { + SELECT a FROM ta WHERE b IN (SELECT b FROM tb); + } +} {1 2 3 10} +do_test in-6.6 { + execsql { + SELECT a FROM ta WHERE b NOT IN (SELECT b FROM tb); + } +} {4 6 8} +do_test in-6.7 { + execsql { + SELECT a FROM ta WHERE a IN (SELECT a FROM tb); + } +} {1 2 3} +do_test in-6.8 { + execsql { + SELECT a FROM ta WHERE a NOT IN (SELECT a FROM tb); + } +} {4 6 8 10} +do_test in-6.9 { + execsql { + SELECT a FROM ta WHERE a IN (SELECT b FROM tb); + } +} {1 2 3} +do_test in-6.10 { + execsql { + SELECT a FROM ta WHERE a NOT IN (SELECT b FROM tb); + } +} {4 6 8 10} + +# Tests of IN operator against empty sets. (Ticket #185) +# +do_test in-7.1 { + execsql { + SELECT a FROM t1 WHERE a IN (); + } +} {} +do_test in-7.2 { + execsql { + SELECT a FROM t1 WHERE a IN (5); + } +} {5} +do_test in-7.3 { + execsql { + SELECT a FROM t1 WHERE a NOT IN () ORDER BY a; + } +} {5 6 7 8 hello} +do_test in-7.4 { + execsql { + SELECT a FROM t1 WHERE a IN (5) AND b IN (); + } +} {} +do_test in-7.5 { + execsql { + SELECT a FROM t1 WHERE a IN (5) AND b NOT IN (); + } +} {5} +do_test in-7.6 { + execsql { + SELECT a FROM ta WHERE a IN (); + } +} {} +do_test in-7.7 { + execsql { + SELECT a FROM ta WHERE a NOT IN (); + } +} {1 2 3 4 6 8 10} + +do_test in-8.1 { + execsql { + SELECT b FROM t1 WHERE a IN ('hello','there') + } +} {world} +do_test in-8.2 { + execsql { + SELECT b FROM t1 WHERE a IN ("hello",'there') + } +} {world} + +# Test constructs of the form: expr IN tablename +# +do_test in-9.1 { + execsql { + CREATE TABLE t4 AS SELECT a FROM tb; + SELECT * FROM t4; + } +} {1 2 3 5 7 9 11} +do_test in-9.2 { + execsql { + SELECT b FROM t1 WHERE a IN t4; + } +} {32 128} +do_test in-9.3 { + execsql { + SELECT b FROM t1 WHERE a NOT IN t4; + } +} {64 256 world} +do_test in-9.4 { + catchsql { + SELECT b FROM t1 WHERE a NOT IN tb; + } +} {1 {only a single result allowed for a SELECT that is part of an expression}} + +# IN clauses in CHECK constraints. Ticket #1645 +# +do_test in-10.1 { + execsql { + CREATE TABLE t5( + a INTEGER, + CHECK( a IN (111,222,333) ) + ); + INSERT INTO t5 VALUES(111); + SELECT * FROM t5; + } +} {111} +do_test in-10.2 { + catchsql { + INSERT INTO t5 VALUES(4); + } +} {1 {constraint failed}} + +# Ticket #1821 +# +# Type affinity applied to the right-hand side of an IN operator. +# +do_test in-11.1 { + execsql { + CREATE TABLE t6(a,b NUMERIC); + INSERT INTO t6 VALUES(1,2); + INSERT INTO t6 VALUES(2,3); + SELECT * FROM t6 WHERE b IN (2); + } +} {1 2} +do_test in-11.2 { + # The '2' should be coerced into 2 because t6.b is NUMERIC + execsql { + SELECT * FROM t6 WHERE b IN ('2'); + } +} {1 2} +do_test in-11.3 { + # No coercion should occur here because of the unary + before b. + execsql { + SELECT * FROM t6 WHERE +b IN ('2'); + } +} {} +do_test in-11.4 { + # No coercion because column a as affinity NONE + execsql { + SELECT * FROM t6 WHERE a IN ('2'); + } +} {} +do_test in-11.5 { + execsql { + SELECT * FROM t6 WHERE a IN (2); + } +} {2 3} +do_test in-11.6 { + # No coercion because column a as affinity NONE + execsql { + SELECT * FROM t6 WHERE +a IN ('2'); + } +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/in2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/in2.test new file mode 100644 index 0000000..77092bf --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/in2.test @@ -0,0 +1,68 @@ +# 2007 May 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file tests a special case in the b-tree code that can be +# hit by the "IN" operator (or EXISTS, NOT IN, etc.). +# +# $Id: in2.test,v 1.2 2007/05/12 10:41:48 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test in2-1 { + execsql { + CREATE TABLE a(i INTEGER PRIMARY KEY, a); + } +} {} + +set ::N 2000 + +do_test in2-2 { + db transaction { + for {set ::ii 0} {$::ii < $::N} {incr ::ii} { + execsql {INSERT INTO a VALUES($::ii, $::ii)} + } + execsql {INSERT INTO a VALUES(4000, '')} + + for {set ::ii 0} {$::ii < $::N} {incr ::ii} { + set ::t [format "x%04d" $ii] + execsql {INSERT INTO a VALUES(NULL, $::t)} + } + } +} {} + +# Each iteration of this loop builds a slightly different b-tree to +# evaluate the "IN (...)" operator in the SQL statement. The contents +# of the b-tree are (in sorted order): +# +# $::ii integers. +# a string of zero length. +# $::N short strings. +# +# Records are inserted in sorted order. +# +# The string of zero-length is stored in a b-tree cell with 3 bytes +# of payload. Moving this cell from a leaf node to a internal node +# during b-tree balancing was causing an assertion failure. +# +# This bug only applied to b-trees generated to evaluate IN (..) +# clauses, as it is impossible for persistent b-trees (SQL tables + +# indices) to contain cells smaller than 4 bytes. +# +for {set ::ii 3} {$::ii < $::N} {incr ::ii} { + do_test in2-$::ii { + execsql { + SELECT 1 IN (SELECT a FROM a WHERE (i < $::ii) OR (i >= $::N)) + } + } {1} +} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/incrblob.test b/libraries/sqlite/unix/sqlite-3.5.1/test/incrblob.test new file mode 100644 index 0000000..28d7132 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/incrblob.test @@ -0,0 +1,597 @@ +# 2007 May 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: incrblob.test,v 1.16 2007/09/03 16:45:36 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!autovacuum || !pragma || !incrblob} { + finish_test + return +} + +do_test incrblob-1.1 { + execsql { + CREATE TABLE blobs(k PRIMARY KEY, v BLOB); + INSERT INTO blobs VALUES('one', X'0102030405060708090A'); + INSERT INTO blobs VALUES('two', X'0A090807060504030201'); + } +} {} + +do_test incrblob-1.2.1 { + set ::blob [db incrblob blobs v 1] + string match incrblob_* $::blob +} {1} +do_test incrblob-1.2.2 { + binary scan [read $::blob] c* data + set data +} {1 2 3 4 5 6 7 8 9 10} +do_test incrblob-1.2.3 { + seek $::blob 0 + puts -nonewline $::blob "1234567890" + flush $::blob +} {} +do_test incrblob-1.2.4 { + seek $::blob 0 + binary scan [read $::blob] c* data + set data +} {49 50 51 52 53 54 55 56 57 48} +do_test incrblob-1.2.5 { + close $::blob +} {} +do_test incrblob-1.2.6 { + execsql { + SELECT v FROM blobs WHERE rowid = 1; + } +} {1234567890} + +#-------------------------------------------------------------------- +# Test cases incrblob-1.3.X check that it is possible to read and write +# regions of a blob that lie on overflow pages. +# +do_test incrblob-1.3.1 { + set ::str "[string repeat . 10000]" + execsql { + INSERT INTO blobs(rowid, k, v) VALUES(3, 'three', $::str); + } +} {} + +do_test incrblob-1.3.2 { + set ::blob [db incrblob blobs v 3] + seek $::blob 8500 + read $::blob 10 +} {..........} +do_test incrblob-1.3.3 { + seek $::blob 8500 + puts -nonewline $::blob 1234567890 +} {} +do_test incrblob-1.3.4 { + seek $::blob 8496 + read $::blob 10 +} {....123456} +do_test incrblob-1.3.10 { + close $::blob +} {} + + +#------------------------------------------------------------------------ +# incrblob-2.*: +# +# Test that the following operations use ptrmap pages to reduce +# unnecessary reads: +# +# * Reading near the end of a blob, +# * Writing near the end of a blob, and +# * SELECT a column value that is located on an overflow page. +# +proc nRead {db} { + set bt [btree_from_db $db] + db_enter $db + array set stats [btree_pager_stats $bt] + db_leave $db + return $stats(read) +} +proc nWrite {db} { + set bt [btree_from_db $db] + db_enter $db + array set stats [btree_pager_stats $bt] + db_leave $db + return $stats(write) +} + +sqlite3_soft_heap_limit 0 + +foreach AutoVacuumMode [list 0 1] { + + if {$AutoVacuumMode>0} { + ifcapable !autovacuum { + break + } + } + + db close + file delete -force test.db test.db-journal + + sqlite3 db test.db + execsql "PRAGMA auto_vacuum = $AutoVacuumMode" + + do_test incrblob-2.$AutoVacuumMode.1 { + set ::str [string repeat abcdefghij 2900] + execsql { + BEGIN; + CREATE TABLE blobs(k PRIMARY KEY, v BLOB, i INTEGER); + DELETE FROM blobs; + INSERT INTO blobs VALUES('one', $::str || randstr(500,500), 45); + COMMIT; + } + expr [file size test.db]/1024 + } [expr 31 + $AutoVacuumMode] + + ifcapable autovacuum { + do_test incrblob-2.$AutoVacuumMode.2 { + execsql { + PRAGMA auto_vacuum; + } + } $AutoVacuumMode + } + + do_test incrblob-2.$AutoVacuumMode.3 { + # Open and close the db to make sure the page cache is empty. + db close + sqlite3 db test.db + + # Read the last 20 bytes of the blob via a blob handle. + set ::blob [db incrblob blobs v 1] + seek $::blob -20 end + set ::fragment [read $::blob] + close $::blob + + # If the database is not in auto-vacuum mode, the whole of + # the overflow-chain must be scanned. In auto-vacuum mode, + # sqlite uses the ptrmap pages to avoid reading the other pages. + # + nRead db + } [expr $AutoVacuumMode ? 4 : 30] + + do_test incrblob-2.$AutoVacuumMode.4 { + string range [db one {SELECT v FROM blobs}] end-19 end + } $::fragment + + do_test incrblob-2.$AutoVacuumMode.5 { + # Open and close the db to make sure the page cache is empty. + db close + sqlite3 db test.db + + # Write the second-to-last 20 bytes of the blob via a blob handle. + # + set ::blob [db incrblob blobs v 1] + seek $::blob -40 end + puts -nonewline $::blob "1234567890abcdefghij" + flush $::blob + + # If the database is not in auto-vacuum mode, the whole of + # the overflow-chain must be scanned. In auto-vacuum mode, + # sqlite uses the ptrmap pages to avoid reading the other pages. + # + nRead db + } [expr $AutoVacuumMode ? 4 : 30] + + # Pages 1 (the write-counter) and 32 (the blob data) were written. + do_test incrblob-2.$AutoVacuumMode.6 { + close $::blob + nWrite db + } 2 + + do_test incrblob-2.$AutoVacuumMode.7 { + string range [db one {SELECT v FROM blobs}] end-39 end-20 + } "1234567890abcdefghij" + + do_test incrblob-2.$AutoVacuumMode.8 { + # Open and close the db to make sure the page cache is empty. + db close + sqlite3 db test.db + + execsql { SELECT i FROM blobs } + } {45} + + do_test incrblob-2.$AutoVacuumMode.9 { + nRead db + } [expr $AutoVacuumMode ? 4 : 30] +} +sqlite3_soft_heap_limit $soft_limit + +#------------------------------------------------------------------------ +# incrblob-3.*: +# +# Test the outcome of trying to write to a read-only blob handle. +# +do_test incrblob-3.1 { + set ::blob [db incrblob -readonly blobs v 1] + seek $::blob -40 end + read $::blob 20 +} "1234567890abcdefghij" +do_test incrblob-3.2 { + seek $::blob 0 + set rc [catch { + puts -nonewline $::blob "helloworld" + } msg] + close $::blob + list $rc $msg +} "1 {channel \"$::blob\" wasn't opened for writing}" + +do_test incrblob-3.3 { + set ::blob [db incrblob -readonly blobs v 1] + seek $::blob -40 end + read $::blob 20 +} "1234567890abcdefghij" +do_test incrblob-3.4 { + set rc [catch { + sqlite3_blob_write $::blob 20 "qwertyuioplkjhgfds" + } msg] + list $rc $msg +} {1 SQLITE_READONLY} +catch {close $::blob} + +#------------------------------------------------------------------------ +# incrblob-4.*: +# +# Try a couple of error conditions: +# +# 4.1 - Attempt to open a row that does not exist. +# 4.2 - Attempt to open a column that does not exist. +# 4.3 - Attempt to open a table that does not exist. +# 4.4 - Attempt to open a database that does not exist. +# +# 4.5 - Attempt to open an integer +# 4.6 - Attempt to open a real value +# 4.7 - Attempt to open an SQL null +# +# 4.8 - Attempt to open an indexed column for writing +# 4.9 - Attempt to open an indexed column for reading (this works) +# +do_test incrblob-4.1 { + set rc [catch { + set ::blob [db incrblob blobs v 2] + } msg ] + list $rc $msg +} {1 {no such rowid: 2}} +do_test incrblob-4.2 { + set rc [catch { + set ::blob [db incrblob blobs blue 1] + } msg ] + list $rc $msg +} {1 {no such column: "blue"}} +do_test incrblob-4.3 { + set rc [catch { + set ::blob [db incrblob nosuchtable blue 1] + } msg ] + list $rc $msg +} {1 {no such table: main.nosuchtable}} +do_test incrblob-4.4 { + set rc [catch { + set ::blob [db incrblob nosuchdb blobs v 1] + } msg ] + list $rc $msg +} {1 {no such table: nosuchdb.blobs}} + +do_test incrblob-4.5 { + set rc [catch { + set ::blob [db incrblob blobs i 1] + } msg ] + list $rc $msg +} {1 {cannot open value of type integer}} +do_test incrblob-4.6 { + execsql { + INSERT INTO blobs(k, v, i) VALUES(123, 567.765, NULL); + } + set rc [catch { + set ::blob [db incrblob blobs v 2] + } msg ] + list $rc $msg +} {1 {cannot open value of type real}} +do_test incrblob-4.7 { + set rc [catch { + set ::blob [db incrblob blobs i 2] + } msg ] + list $rc $msg +} {1 {cannot open value of type null}} + +do_test incrblob-4.8 { + execsql { + INSERT INTO blobs(k, v, i) VALUES(X'010203040506070809', 'hello', 'world'); + } + set rc [catch { + set ::blob [db incrblob blobs k 3] + } msg ] + list $rc $msg +} {1 {cannot open indexed column for writing}} + +do_test incrblob-4.9.1 { + set rc [catch { + set ::blob [db incrblob -readonly blobs k 3] + } msg] +} {0} +do_test incrblob-4.9.2 { + binary scan [read $::blob] c* c + close $::blob + set c +} {1 2 3 4 5 6 7 8 9} + +do_test incrblob-4.10 { + set ::blob [db incrblob -readonly blobs k 3] + set rc [catch { sqlite3_blob_read $::blob 10 100 } msg] + list $rc $msg +} {1 SQLITE_ERROR} +do_test incrblob-4.11 { + close $::blob +} {} + +#------------------------------------------------------------------------ +# incrblob-5.*: +# +# Test that opening a blob in an attached database works. +# +do_test incrblob-5.1 { + file delete -force test2.db test2.db-journal + set ::size [expr [file size [info script]]] + execsql { + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.files(name, text); + INSERT INTO aux.files VALUES('this one', zeroblob($::size)); + } + set fd [db incrblob aux files text 1] + fconfigure $fd -translation binary + set fd2 [open [info script]] + fconfigure $fd2 -translation binary + puts -nonewline $fd [read $fd2] + close $fd + close $fd2 + set ::text [db one {select text from aux.files}] + string length $::text +} [file size [info script]] +do_test incrblob-5.2 { + set fd2 [open [info script]] + fconfigure $fd2 -translation binary + set ::data [read $fd2] + close $fd2 + set ::data +} $::text + +# free memory +unset ::data +unset ::text + +#------------------------------------------------------------------------ +# incrblob-6.*: +# +# Test that opening a blob for write-access is impossible if +# another connection has the database RESERVED lock. +# +# Then test that blob writes that take place inside of a +# transaction are not visible to external connections until +# after the transaction is commited and the blob channel +# closed. +# +sqlite3_soft_heap_limit 0 +do_test incrblob-6.1 { + sqlite3 db2 test.db + execsql { + BEGIN; + INSERT INTO blobs(k, v, i) VALUES('a', 'different', 'connection'); + } db2 +} {} +do_test incrblob-6.2 { + execsql { + SELECT rowid FROM blobs + } +} {1 2 3} +do_test incrblob-6.3 { + set rc [catch { + db incrblob blobs v 1 + } msg] + list $rc $msg +} {1 {database is locked}} +do_test incrblob-6.4 { + set rc [catch { + db incrblob blobs v 3 + } msg] + list $rc $msg +} {1 {database is locked}} +do_test incrblob-6.5 { + set ::blob [db incrblob -readonly blobs v 3] + read $::blob +} {hello} +do_test incrblob-6.6 { + close $::blob +} {} + +do_test incrblob-6.7 { + set ::blob [db2 incrblob blobs i 4] + gets $::blob +} {connection} +do_test incrblob-6.8 { + tell $::blob +} {10} +do_test incrblob-6.9 { + seek $::blob 0 + puts -nonewline $::blob "invocation" + flush $::blob +} {} + +# At this point rollback or commit should be illegal (because +# there is an open blob channel). +do_test incrblob-6.10 { + catchsql { + ROLLBACK; + } db2 +} {1 {cannot rollback transaction - SQL statements in progress}} +do_test incrblob-6.11 { + catchsql { + COMMIT; + } db2 +} {1 {cannot commit transaction - SQL statements in progress}} + +do_test incrblob-6.12 { + execsql { + SELECT * FROM blobs WHERE rowid = 4; + } +} {} +do_test incrblob-6.13 { + close $::blob + execsql { + COMMIT; + } db2 +} {} +do_test incrblob-6.14 { + execsql { + SELECT * FROM blobs WHERE rowid = 4; + } +} {a different invocation} +db2 close +sqlite3_soft_heap_limit $soft_limit + +#----------------------------------------------------------------------- +# The following tests verify the behaviour of the incremental IO +# APIs in the following cases: +# +# 7.1 A row that containing an open blob is modified. +# +# 7.2 A CREATE TABLE requires that an overflow page that is part +# of an open blob is moved. +# +# 7.3 An INCREMENTAL VACUUM moves an overflow page that is part +# of an open blob. +# +# In the first case above, correct behaviour is for all subsequent +# read/write operations on the blob-handle to return SQLITE_ABORT. +# More accurately, blob-handles are invalidated whenever the table +# they belong to is written to. +# +# The second two cases have no external effect. They are testing +# that the internal cache of overflow page numbers is correctly +# invalidated. +# +do_test incrblob-7.1.0 { + execsql { + BEGIN; + DROP TABLE blobs; + CREATE TABLE t1 (a, b, c, d BLOB); + INSERT INTO t1(a, b, c, d) VALUES(1, 2, 3, 4); + COMMIT; + } +} {} + +foreach {tn arg} {1 "" 2 -readonly} { + + execsql { + UPDATE t1 SET d = zeroblob(10000); + } + + do_test incrblob-7.1.$tn.1 { + set ::b [eval db incrblob $arg t1 d 1] + binary scan [sqlite3_blob_read $::b 5000 5] c* c + set c + } {0 0 0 0 0} + do_test incrblob-7.1.$tn.2 { + execsql { + UPDATE t1 SET d = 15; + } + } {} + do_test incrblob-7.1.$tn.3 { + set rc [catch { sqlite3_blob_read $::b 5000 5 } msg] + list $rc $msg + } {1 SQLITE_ABORT} + do_test incrblob-7.1.$tn.4 { + execsql { + SELECT d FROM t1; + } + } {15} + do_test incrblob-7.1.$tn.5 { + set rc [catch { close $::b } msg] + list $rc $msg + } {0 {}} + do_test incrblob-7.1.$tn.6 { + execsql { + SELECT d FROM t1; + } + } {15} + +} + +set fd [open [info script]] +fconfigure $fd -translation binary +set ::data [read $fd 14000] +close $fd + +db close +file delete -force test.db test.db-journal +sqlite3 db test.db + +do_test incrblob-7.2.1 { + execsql { + PRAGMA auto_vacuum = "incremental"; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); -- root@page3 + INSERT INTO t1 VALUES(123, $::data); + } + set ::b [db incrblob -readonly t1 b 123] + read $::b +} $::data +do_test incrblob-7.2.2 { + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b); -- root@page4 + } + seek $::b 0 + read $::b +} $::data +do_test incrblob-7.2.3 { + close $::b + execsql { + SELECT rootpage FROM sqlite_master; + } +} {3 4} + +set ::otherdata "[string range $::data 0 1000][string range $::data 1001 end]" +do_test incrblob-7.3.1 { + execsql { + INSERT INTO t2 VALUES(456, $::otherdata); + } + set ::b [db incrblob -readonly t2 b 456] + read $::b +} $::otherdata +do_test incrblob-7.3.2 { + expr [file size test.db]/1024 +} 30 +do_test incrblob-7.3.3 { + execsql { + DELETE FROM t1 WHERE a = 123; + PRAGMA INCREMENTAL_VACUUM(0); + } + seek $::b 0 + read $::b +} $::otherdata + +# Attempt to write on a read-only blob. Make sure the error code +# gets set. Ticket #2464. +# +do_test incrblob-7.4 { + set rc [catch {sqlite3_blob_write $::b 10 HELLO} msg] + lappend rc $msg +} {1 SQLITE_READONLY} +do_test incrblob-7.5 { + sqlite3_errcode db +} {SQLITE_READONLY} +do_test incrblob-7.6 { + sqlite3_errmsg db +} {attempt to write a readonly database} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/incrblob_err.test b/libraries/sqlite/unix/sqlite-3.5.1/test/incrblob_err.test new file mode 100644 index 0000000..397b291 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/incrblob_err.test @@ -0,0 +1,102 @@ +# 2007 May 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: incrblob_err.test,v 1.8 2007/09/12 17:01:45 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!incrblob || !memdebug || !tclvar} { + finish_test + return +} + +source $testdir/malloc_common.tcl + +set ::fd [open [info script]] +set ::data [read $::fd] +close $::fd + +do_malloc_test 1 -tclprep { + set bytes [file size [info script]] + execsql { + CREATE TABLE blobs(k, v BLOB); + INSERT INTO blobs VALUES(1, zeroblob($::bytes)); + } +} -tclbody { + set ::blob [db incrblob blobs v 1] + set rc [catch {puts -nonewline $::blob $::data}] + if {$rc} { error "out of memory" } +} + +do_malloc_test 2 -tclprep { + execsql { + CREATE TABLE blobs(k, v BLOB); + INSERT INTO blobs VALUES(1, $::data); + } +} -tclbody { + set ::blob [db incrblob blobs v 1] + set rc [catch {set ::r [read $::blob]}] + if {$rc} { + error "out of memory" + } elseif {$::r ne $::data} { + error "Bad data read..." + } +} + +do_malloc_test 3 -tclprep { + execsql { + CREATE TABLE blobs(k, v BLOB); + INSERT INTO blobs VALUES(1, $::data); + } +} -tclbody { + set ::blob [db incrblob blobs v 1] + set rc [catch {set ::r [read $::blob]}] + if {$rc} { + error "out of memory" + } elseif {$::r ne $::data} { + error "Bad data read..." + } + set rc [catch {close $::blob}] + if {$rc} { + error "out of memory" + } +} + +do_ioerr_test incrblob_err-4 -cksum 1 -sqlprep { + CREATE TABLE blobs(k, v BLOB); + INSERT INTO blobs VALUES(1, $::data); +} -tclbody { + set ::blob [db incrblob blobs v 1] + read $::blob +} + +do_ioerr_test incrblob_err-5 -cksum 1 -sqlprep { + CREATE TABLE blobs(k, v BLOB); + INSERT INTO blobs VALUES(1, zeroblob(length(CAST($::data AS BLOB)))); +} -tclbody { + set ::blob [db incrblob blobs v 1] + puts -nonewline $::blob $::data + close $::blob +} + +do_ioerr_test incrblob_err-6 -cksum 1 -sqlprep { + CREATE TABLE blobs(k, v BLOB); + INSERT INTO blobs VALUES(1, $::data || $::data || $::data); +} -tclbody { + set ::blob [db incrblob blobs v 1] + seek $::blob -20 end + puts -nonewline $::blob "12345678900987654321" + close $::blob +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum.test b/libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum.test new file mode 100644 index 0000000..e9c5db9 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum.test @@ -0,0 +1,699 @@ +# 2007 April 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the incremental vacuum feature. +# +# Note: There are also some tests for incremental vacuum and IO +# errors in incrvacuum_ioerr.test. +# +# $Id: incrvacuum.test,v 1.14 2007/09/01 10:01:13 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If this build of the library does not support auto-vacuum, omit this +# whole file. +ifcapable {!autovacuum || !pragma} { + finish_test + return +} + +#--------------------------------------------------------------------- +# Test the pragma on an empty database. +# +do_test incrvacuum-1.1 { + execsql { + pragma auto_vacuum; + } +} $sqlite_options(default_autovacuum) +do_test incrvacuum-1.2.0 { + expr {[file size test.db] > 0} +} {0} +do_test incrvacuum-1.2 { + # This command will create the database. + execsql { + pragma auto_vacuum = 'full'; + pragma auto_vacuum; + } +} {1} +do_test incrvacuum-1.2.1 { + expr {[file size test.db] > 0} +} {1} +do_test incrvacuum-1.3 { + execsql { + pragma auto_vacuum = 'incremental'; + pragma auto_vacuum; + } +} {2} +do_test incrvacuum-1.4 { + # In this case the invalid value is ignored and the auto_vacuum + # setting remains unchanged. + execsql { + pragma auto_vacuum = 'invalid'; + pragma auto_vacuum; + } +} {2} +do_test incrvacuum-1.5 { + execsql { + pragma auto_vacuum = 1; + pragma auto_vacuum; + } +} {1} +do_test incrvacuum-1.6 { + execsql { + pragma auto_vacuum = '2'; + pragma auto_vacuum; + } +} {2} +do_test incrvacuum-1.7 { + # Invalid value. auto_vacuum setting remains unchanged. + execsql { + pragma auto_vacuum = 5; + pragma auto_vacuum; + } +} {2} + +#--------------------------------------------------------------------- +# Test the pragma on a non-empty database. It is possible to toggle +# the connection between "full" and "incremental" mode, but not to +# change from either of these to "none", or from "none" to "full" or +# "incremental". +# +do_test incrvacuum-2.1 { + execsql { + pragma auto_vacuum = 1; + CREATE TABLE abc(a, b, c); + } +} {} +do_test incrvacuum-2.2 { + execsql { + pragma auto_vacuum = 'none'; + pragma auto_vacuum; + } +} {1} +do_test incrvacuum-2.2.1 { + db close + sqlite3 db test.db + execsql { + pragma auto_vacuum; + } +} {1} +do_test incrvacuum-2.3 { + execsql { + pragma auto_vacuum = 'incremental'; + pragma auto_vacuum; + } +} {2} +do_test incrvacuum-2.4 { + execsql { + pragma auto_vacuum = 'full'; + pragma auto_vacuum; + } +} {1} + +#--------------------------------------------------------------------- +# Test that when the auto_vacuum mode is "incremental", the database +# does not shrink when pages are removed from it. But it does if +# the mode is set to "full". +# +do_test incrvacuum-3.1 { + execsql { + pragma auto_vacuum; + } +} {1} +do_test incrvacuum-3.2 { + set ::str [string repeat 1234567890 110] + execsql { + PRAGMA auto_vacuum = 2; + BEGIN; + CREATE TABLE tbl2(str); + INSERT INTO tbl2 VALUES($::str); + COMMIT; + } + # 5 pages: + # + # 1 -> database header + # 2 -> first back-pointer page + # 3 -> table abc + # 4 -> table tbl2 + # 5 -> table tbl2 overflow page. + # + expr {[file size test.db] / 1024} +} {5} +do_test incrvacuum-3.3 { + execsql { + DROP TABLE abc; + DELETE FROM tbl2; + } + expr {[file size test.db] / 1024} +} {5} +do_test incrvacuum-3.4 { + execsql { + PRAGMA auto_vacuum = 1; + INSERT INTO tbl2 VALUES('hello world'); + } + expr {[file size test.db] / 1024} +} {3} + +#--------------------------------------------------------------------- +# Try to run a very simple incremental vacuum. Also verify that +# PRAGMA incremental_vacuum is a harmless no-op against a database that +# does not support auto-vacuum. +# +do_test incrvacuum-4.1 { + set ::str [string repeat 1234567890 110] + execsql { + PRAGMA auto_vacuum = 2; + INSERT INTO tbl2 VALUES($::str); + CREATE TABLE tbl1(a, b, c); + } + expr {[file size test.db] / 1024} +} {5} +do_test incrvacuum-4.2 { + execsql { + DELETE FROM tbl2; + DROP TABLE tbl1; + } + expr {[file size test.db] / 1024} +} {5} +do_test incrvacuum-4.3 { + set ::nStep 0 + db eval {pragma incremental_vacuum(10)} { + incr ::nStep + } + list [expr {[file size test.db] / 1024}] $::nStep +} {3 2} + +#--------------------------------------------------------------------- +# The following tests - incrvacuum-5.* - test incremental vacuum +# from within a transaction. +# +do_test incrvacuum-5.1.1 { + expr {[file size test.db] / 1024} +} {3} +do_test incrvacuum-5.1.2 { + execsql { + BEGIN; + DROP TABLE tbl2; + PRAGMA incremental_vacuum; + COMMIT; + } + expr {[file size test.db] / 1024} +} {1} + +do_test incrvacuum-5.2.1 { + set ::str [string repeat abcdefghij 110] + execsql { + BEGIN; + CREATE TABLE tbl1(a); + INSERT INTO tbl1 VALUES($::str); + PRAGMA incremental_vacuum; -- this is a no-op. + COMMIT; + } + expr {[file size test.db] / 1024} +} {4} +do_test incrvacuum-5.2.2 { + set ::str [string repeat abcdefghij 110] + execsql { + BEGIN; + INSERT INTO tbl1 VALUES($::str); + INSERT INTO tbl1 SELECT * FROM tbl1; + DELETE FROM tbl1 WHERE oid%2; -- Put 2 overflow pages on free-list. + COMMIT; + } + expr {[file size test.db] / 1024} +} {7} +do_test incrvacuum-5.2.3 { + execsql { + BEGIN; + PRAGMA incremental_vacuum; -- Vacuum up the two pages. + CREATE TABLE tbl2(b); -- Use one free page as a table root. + INSERT INTO tbl2 VALUES('a nice string'); + COMMIT; + } + expr {[file size test.db] / 1024} +} {6} +do_test incrvacuum-5.2.4 { + execsql { + SELECT * FROM tbl2; + } +} {{a nice string}} +do_test incrvacuum-5.2.5 { + execsql { + DROP TABLE tbl1; + DROP TABLE tbl2; + PRAGMA incremental_vacuum; + } + expr {[file size test.db] / 1024} +} {1} + + +# Test cases incrvacuum-5.3.* use the following list as input data. +# Two new databases are opened, one with incremental vacuum enabled, +# the other with no auto-vacuum completely disabled. After executing +# each element of the following list on both databases, test that +# the integrity-check passes and the contents of each are identical. +# +set TestScriptList [list { + BEGIN; + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + CREATE INDEX t1_i ON t1(a); + CREATE INDEX t2_i ON t2(a); +} { + INSERT INTO t1 VALUES($::str1, $::str2); + INSERT INTO t1 VALUES($::str1||$::str2, $::str2||$::str1); + INSERT INTO t2 SELECT b, a FROM t1; + INSERT INTO t2 SELECT a, b FROM t1; + INSERT INTO t1 SELECT b, a FROM t2; + UPDATE t2 SET b = ''; + PRAGMA incremental_vacuum; +} { + UPDATE t2 SET b = (SELECT b FROM t1 WHERE t1.oid = t2.oid); + PRAGMA incremental_vacuum; +} { + CREATE TABLE t3(a, b); + INSERT INTO t3 SELECT * FROM t2; + DROP TABLE t2; + PRAGMA incremental_vacuum; +} { + CREATE INDEX t3_i ON t3(a); + COMMIT; +} { + BEGIN; + DROP INDEX t3_i; + PRAGMA incremental_vacuum; + INSERT INTO t3 VALUES('hello', 'world'); + ROLLBACK; +} { + INSERT INTO t3 VALUES('hello', 'world'); +} +] + +# Compare the contents of databases $A and $B. +# +proc compare_dbs {A B tname} { + set tbl_list [execsql { + SELECT tbl_name FROM sqlite_master WHERE type = 'table' + } $A] + + do_test ${tname}.1 [subst { + execsql { + SELECT tbl_name FROM sqlite_master WHERE type = 'table' + } $B + }] $tbl_list + + set tn 1 + foreach tbl $tbl_list { + set control [execsql "SELECT * FROM $tbl" $A] + do_test ${tname}.[incr tn] [subst { + execsql "SELECT * FROM $tbl" $B + }] $control + } +} + +set ::str1 [string repeat abcdefghij 130] +set ::str2 [string repeat 1234567890 105] + +file delete -force test1.db test1.db-journal test2.db test2.db-journal +sqlite3 db1 test1.db +sqlite3 db2 test2.db +execsql { PRAGMA auto_vacuum = 'none' } db1 +execsql { PRAGMA auto_vacuum = 'incremental' } db2 + +set tn 1 +foreach sql $::TestScriptList { + execsql $sql db1 + execsql $sql db2 + + compare_dbs db1 db2 incrvacuum-5.3.${tn} + do_test incrvacuum-5.3.${tn}.integrity1 { + execsql { PRAGMA integrity_check; } db1 + } {ok} + do_test incrvacuum-5.3.${tn}.integrity2 { + execsql { PRAGMA integrity_check; } db2 + } {ok} + incr tn +} +db1 close +db2 close +# +# End of test cases 5.3.* + +#--------------------------------------------------------------------- +# The following tests - incrvacuum-6.* - test running incremental +# vacuum while another statement (a read) is being executed. +# +for {set jj 0} {$jj < 10} {incr jj} { + # Build some test data. Two tables are created in an empty + # database. tbl1 data is a contiguous block starting at page 5 (pages + # 3 and 4 are the table roots). tbl2 is a contiguous block starting + # right after tbl1. + # + # Then drop tbl1 so that when an incr vacuum is run the pages + # of tbl2 have to be moved to fill the gap. + # + do_test incrvacuum-6.${jj}.1 { + execsql { + DROP TABLE IF EXISTS tbl1; + DROP TABLE IF EXISTS tbl2; + PRAGMA incremental_vacuum; + CREATE TABLE tbl1(a, b); + CREATE TABLE tbl2(a, b); + BEGIN; + } + for {set ii 0} {$ii < 1000} {incr ii} { + db eval {INSERT INTO tbl1 VALUES($ii, $ii || $ii)} + } + execsql { + INSERT INTO tbl2 SELECT * FROM tbl1; + COMMIT; + DROP TABLE tbl1; + } + expr {[file size test.db] / 1024} + } {36} + + # Run a linear scan query on tbl2. After reading ($jj*100) rows, + # run the incremental vacuum to shrink the database. + # + do_test incrvacuum-6.${jj}.2 { + set ::nRow 0 + db eval {SELECT a FROM tbl2} {} { + if {$a == [expr $jj*100]} { + db eval {PRAGMA incremental_vacuum} + } + incr ::nRow + } + list [expr {[file size test.db] / 1024}] $nRow + } {19 1000} +} + +#--------------------------------------------------------------------- +# This test - incrvacuum-7.* - is to check that the database can be +# written in the middle of an incremental vacuum. +# +set ::iWrite 1 +while 1 { + do_test incrvacuum-7.${::iWrite}.1 { + execsql { + DROP TABLE IF EXISTS tbl1; + DROP TABLE IF EXISTS tbl2; + PRAGMA incremental_vacuum; + CREATE TABLE tbl1(a, b); + CREATE TABLE tbl2(a, b); + BEGIN; + } + for {set ii 0} {$ii < 1000} {incr ii} { + db eval {INSERT INTO tbl1 VALUES($ii, $ii || $ii)} + } + execsql { + INSERT INTO tbl2 SELECT * FROM tbl1; + COMMIT; + DROP TABLE tbl1; + } + expr {[file size test.db] / 1024} + } {36} + + do_test incrvacuum-7.${::iWrite}.2 { + set ::nRow 0 + db eval {PRAGMA incremental_vacuum} { + incr ::nRow + if {$::nRow == $::iWrite} { + db eval { + CREATE TABLE tbl1(a, b); + INSERT INTO tbl1 VALUES('hello', 'world'); + } + } + } + list [expr {[file size test.db] / 1024}] + } {20} + + do_test incrvacuum-7.${::iWrite}.3 { + execsql { + SELECT * FROM tbl1; + } + } {hello world} + + if {$::nRow == $::iWrite} break + incr ::iWrite +} + +#--------------------------------------------------------------------- +# This test - incrvacuum-8.* - is to check that nothing goes wrong +# with an incremental-vacuum if it is the first statement executed +# after an existing database is opened. +# +# At one point, this would always return SQLITE_SCHEMA (which +# causes an infinite loop in tclsqlite.c if using the Tcl interface). +# +do_test incrvacuum-8.1 { + db close + sqlite3 db test.db + execsql { + PRAGMA incremental_vacuum(50); + } +} {} + +#--------------------------------------------------------------------- +# At one point this test case was causing an assert() to fail. +# +do_test incrvacuum-9.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + + execsql { + PRAGMA auto_vacuum = 'incremental'; + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(a, b, c); + INSERT INTO t2 VALUES(randstr(500,500),randstr(500,500),randstr(500,500)); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 SELECT a||a, b||b, c||c FROM t1; + INSERT INTO t1 SELECT a||a, b||b, c||c FROM t1; + INSERT INTO t1 SELECT a||a, b||b, c||c FROM t1; + INSERT INTO t1 SELECT a||a, b||b, c||c FROM t1; + INSERT INTO t1 SELECT a||a, b||b, c||c FROM t1; + INSERT INTO t1 SELECT a||a, b||b, c||c FROM t1; + INSERT INTO t1 SELECT a||a, b||b, c||c FROM t1; + INSERT INTO t1 SELECT a||a, b||b, c||c FROM t1; + } +} {} + +do_test incrvacuum-9.2 { + execsql { + PRAGMA synchronous = 'OFF'; + BEGIN; + UPDATE t1 SET a = a, b = b, c = c; + DROP TABLE t2; + PRAGMA incremental_vacuum(10); + ROLLBACK; + } +} {} + +do_test incrvacuum-9.3 { + execsql { + PRAGMA cache_size = 10; + BEGIN; + UPDATE t1 SET a = a, b = b, c = c; + DROP TABLE t2; + PRAGMA incremental_vacuum(10); + ROLLBACK; + } +} {} + +#--------------------------------------------------------------------- +# Test that the parameter to the incremental_vacuum pragma works. That +# is, if the user executes "PRAGMA incremental_vacuum(N)", at most +# N pages are vacuumed. +# +do_test incrvacuum-10.1 { + execsql { + DROP TABLE t1; + DROP TABLE t2; + } + expr [file size test.db] / 1024 +} {29} + +do_test incrvacuum-10.2 { + execsql { + PRAGMA incremental_vacuum(1); + } + expr [file size test.db] / 1024 +} {28} + +do_test incrvacuum-10.3 { + execsql { + PRAGMA incremental_vacuum(5); + } + expr [file size test.db] / 1024 +} {23} + +do_test incrvacuum-10.4 { + execsql { + PRAGMA incremental_vacuum('1'); + } + expr [file size test.db] / 1024 +} {22} + +do_test incrvacuum-10.5 { +breakpoint + execsql { + PRAGMA incremental_vacuum("+3"); + } + expr [file size test.db] / 1024 +} {19} + +do_test incrvacuum-10.6 { + execsql { + PRAGMA incremental_vacuum = 1; + } + expr [file size test.db] / 1024 +} {18} + +do_test incrvacuum-10.7 { + # Use a really big number as an argument to incremetal_vacuum. Should + # be interpreted as "free all possible space". + execsql { + PRAGMA incremental_vacuum(2147483649); + } + expr [file size test.db] / 1024 +} {1} + +#---------------------------------------------------------------- +# Test that if we set the auto_vacuum mode to 'incremental', then +# create a database, thereafter that database defaults to incremental +# vacuum mode. +# +db close +file delete -force test.db test.db-journal +sqlite3 db test.db + +ifcapable default_autovacuum { + do_test incrvacuum-11.1-av-dflt-on { + execsql { + PRAGMA auto_vacuum; + } + } {1} +} else { + do_test incrvacuum-11.1-av-dflt-off { + execsql { + PRAGMA auto_vacuum; + } + } {0} +} +do_test incrvacuum-11.2 { + execsql { + PRAGMA auto_vacuum = incremental; + } +} {} +do_test incrvacuum-11.3 { + execsql { + PRAGMA auto_vacuum; + } +} {2} +do_test incrvacuum-11.4 { + # The database has now been created. + expr {[file size test.db]>0} +} {1} +do_test incrvacuum-11.5 { + # Close and reopen the connection. + db close + sqlite3 db test.db + + # Test we are still in incremental vacuum mode. + execsql { PRAGMA auto_vacuum; } +} {2} +do_test incrvacuum-11.6 { + execsql { + PRAGMA auto_vacuum = 'full'; + PRAGMA auto_vacuum; + } +} {1} +do_test incrvacuum-11.7 { + # Close and reopen the connection. + db close + sqlite3 db test.db + + # Test we are still in "full" auto-vacuum mode. + execsql { PRAGMA auto_vacuum; } +} {1} + +#---------------------------------------------------------------------- +# Special case: What happens if the database is locked when a "PRAGMA +# auto_vacuum = XXX" statement is executed. +# +db close +file delete -force test.db test.db-journal +sqlite3 db test.db + +do_test incrvacuum-12.1 { + execsql { + PRAGMA auto_vacuum = 1; + } + expr {[file size test.db]>0} +} {1} + +# Try to change the auto-vacuum from "full" to "incremental" while the +# database is locked. Nothing should change. +# +do_test incrvacuum-12.2 { + sqlite3 db2 test.db + execsql { BEGIN EXCLUSIVE; } db2 + catchsql { PRAGMA auto_vacuum = 2; } +} {1 {database is locked}} + +do_test incrvacuum-12.3 { + execsql { ROLLBACK; } db2 + execsql { PRAGMA auto_vacuum } +} {1} + +do_test incrvacuum-12.3 { + execsql { SELECT * FROM sqlite_master } + execsql { PRAGMA auto_vacuum } +} {1} + +#---------------------------------------------------------------------- +# Special case #2: What if one process prepares a "PRAGMA auto_vacuum = XXX" +# statement when the database is empty, but doesn't execute it until +# after some other process has created the database. +# +db2 close +db close +file delete -force test.db test.db-journal +sqlite3 db test.db ; set ::DB [sqlite3_connection_pointer db] +sqlite3 db2 test.db + +do_test incrvacuum-13.1 { + expr {[file size test.db]>0} +} {0} +do_test incrvacuum-13.2 { + set ::STMT [sqlite3_prepare $::DB {PRAGMA auto_vacuum = 2} -1 DUMMY] + execsql { + PRAGMA auto_vacuum = none; + PRAGMA default_cache_size = 1024; + PRAGMA auto_vacuum; + } db2 +} {0} +do_test incrvacuum-13.3 { + expr {[file size test.db]>0} +} {1} +do_test incrvacuum-13.4 { + set rc [sqlite3_step $::STMT] + list $rc [sqlite3_finalize $::STMT] +} {SQLITE_DONE SQLITE_OK} +do_test incrvacuum-13.5 { + execsql { + PRAGMA auto_vacuum; + } +} {0} + +db2 close +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum2.test new file mode 100644 index 0000000..50da7a2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum2.test @@ -0,0 +1,125 @@ +# 2007 May 04 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the incremental vacuum feature. +# +# $Id: incrvacuum2.test,v 1.3 2007/05/17 06:44:28 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If this build of the library does not support auto-vacuum, omit this +# whole file. +ifcapable {!autovacuum || !pragma} { + finish_test + return +} + +# If the OMIT_INCRBLOB symbol was defined at compile time, there +# is no zeroblob() function available. So create a similar +# function here using Tcl. It doesn't return a blob, but it returns +# data of the required length, which is good enough for this +# test file. +ifcapable !incrblob { + proc zeroblob {n} { string repeat 0 $n } + db function zeroblob zeroblob +} + +# Create a database in incremental vacuum mode that has many +# pages on the freelist. +# +do_test incrvacuum2-1.1 { + execsql { + PRAGMA page_size=1024; + PRAGMA auto_vacuum=incremental; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(zeroblob(30000)); + DELETE FROM t1; + } + file size test.db +} {32768} + +# Vacuum off a single page. +# +do_test incrvacuum2-1.2 { + execsql { + PRAGMA incremental_vacuum(1); + } + file size test.db +} {31744} + +# Vacuum off five pages +# +do_test incrvacuum2-1.3 { + execsql { + PRAGMA incremental_vacuum(5); + } + file size test.db +} {26624} + +# Vacuum off all the rest +# +do_test incrvacuum2-1.4 { + execsql { + PRAGMA incremental_vacuum(1000); + } + file size test.db +} {3072} + +# Make sure incremental vacuum works on attached databases. +# +do_test incrvacuum2-2.1 { + file delete -force test2.db test2.db-journal + execsql { + ATTACH DATABASE 'test2.db' AS aux; + PRAGMA aux.auto_vacuum=incremental; + CREATE TABLE aux.t2(x); + INSERT INTO t2 VALUES(zeroblob(30000)); + INSERT INTO t1 SELECT * FROM t2; + DELETE FROM t2; + DELETE FROM t1; + } + list [file size test.db] [file size test2.db] +} {32768 32768} +do_test incrvacuum2-2.2 { + execsql { + PRAGMA aux.incremental_vacuum(1) + } + list [file size test.db] [file size test2.db] +} {32768 31744} +do_test incrvacuum2-2.3 { + execsql { + PRAGMA aux.incremental_vacuum(5) + } + list [file size test.db] [file size test2.db] +} {32768 26624} +do_test incrvacuum2-2.4 { + execsql { + PRAGMA main.incremental_vacuum(5) + } + list [file size test.db] [file size test2.db] +} {27648 26624} +do_test incrvacuum2-2.5 { + execsql { + PRAGMA aux.incremental_vacuum + } + list [file size test.db] [file size test2.db] +} {27648 3072} +do_test incrvacuum2-2.6 { + execsql { + PRAGMA incremental_vacuum(1) + } + list [file size test.db] [file size test2.db] +} {26624 3072} + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum_ioerr.test b/libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum_ioerr.test new file mode 100644 index 0000000..9ee9e38 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/incrvacuum_ioerr.test @@ -0,0 +1,89 @@ +# 2001 October 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing for correct handling of I/O errors +# such as writes failing because the disk is full. +# +# The tests in this file use special facilities that are only +# available in the SQLite test fixture. +# +# $Id: incrvacuum_ioerr.test,v 1.2 2007/05/04 18:30:41 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If this build of the library does not support auto-vacuum, omit this +# whole file. +ifcapable {!autovacuum} { + finish_test + return +} + +do_ioerr_test incrvacuum-ioerr-1 -cksum 1 -sqlprep { + PRAGMA auto_vacuum = 'incremental'; + CREATE TABLE abc(a); + INSERT INTO abc VALUES(randstr(1500,1500)); +} -sqlbody { + BEGIN; + CREATE TABLE abc2(a); + DELETE FROM abc; + PRAGMA incremental_vacuum; + COMMIT; +} + +# do_ioerr_test incrvacuum-ioerr-3 -start 1 -cksum 1 -tclprep { +# db eval { +# PRAGMA auto_vacuum = 'full'; +# PRAGMA cache_size = 10; +# BEGIN; +# CREATE TABLE abc(a, UNIQUE(a)); +# } +# for {set ii 0} {$ii < 25} {incr ii} { +# db eval {INSERT INTO abc VALUES(randstr(1500,1500))} +# } +# db eval COMMIT +# } -sqlbody { +# BEGIN; +# DELETE FROM abc WHERE (oid%3)==0; +# INSERT INTO abc SELECT a || '1234567890' FROM abc WHERE oid%2; +# CREATE INDEX abc_i ON abc(a); +# DELETE FROM abc WHERE (oid%2)==0; +# DROP INDEX abc_i; +# COMMIT; +# } + + +do_ioerr_test incrvacuum-ioerr-2 -start 1 -cksum 1 -tclprep { + db eval { + PRAGMA auto_vacuum = 'full'; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE abc(a, UNIQUE(a)); + } + for {set ii 0} {$ii < 25} {incr ii} { + db eval {INSERT INTO abc VALUES(randstr(1500,1500))} + } + db eval COMMIT +} -sqlbody { + BEGIN; + PRAGMA incremental_vacuum; + DELETE FROM abc WHERE (oid%3)==0; + PRAGMA incremental_vacuum; + INSERT INTO abc SELECT a || '1234567890' FROM abc WHERE oid%2; + PRAGMA incremental_vacuum; + CREATE INDEX abc_i ON abc(a); + DELETE FROM abc WHERE (oid%2)==0; + PRAGMA incremental_vacuum; + DROP INDEX abc_i; + PRAGMA incremental_vacuum; + COMMIT; +} +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/index.test b/libraries/sqlite/unix/sqlite-3.5.1/test/index.test new file mode 100644 index 0000000..69427e8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/index.test @@ -0,0 +1,711 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the CREATE INDEX statement. +# +# $Id: index.test,v 1.42 2006/03/29 00:24:07 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a basic index and verify it is added to sqlite_master +# +do_test index-1.1 { + execsql {CREATE TABLE test1(f1 int, f2 int, f3 int)} + execsql {CREATE INDEX index1 ON test1(f1)} + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {index1 test1} +do_test index-1.1b { + execsql {SELECT name, sql, tbl_name, type FROM sqlite_master + WHERE name='index1'} +} {index1 {CREATE INDEX index1 ON test1(f1)} test1 index} +do_test index-1.1c { + db close + sqlite3 db test.db + execsql {SELECT name, sql, tbl_name, type FROM sqlite_master + WHERE name='index1'} +} {index1 {CREATE INDEX index1 ON test1(f1)} test1 index} +do_test index-1.1d { + db close + sqlite3 db test.db + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {index1 test1} + +# Verify that the index dies with the table +# +do_test index-1.2 { + execsql {DROP TABLE test1} + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {} + +# Try adding an index to a table that does not exist +# +do_test index-2.1 { + set v [catch {execsql {CREATE INDEX index1 ON test1(f1)}} msg] + lappend v $msg +} {1 {no such table: main.test1}} + +# Try adding an index on a column of a table where the table +# exists but the column does not. +# +do_test index-2.1 { + execsql {CREATE TABLE test1(f1 int, f2 int, f3 int)} + set v [catch {execsql {CREATE INDEX index1 ON test1(f4)}} msg] + lappend v $msg +} {1 {table test1 has no column named f4}} + +# Try an index with some columns that match and others that do now. +# +do_test index-2.2 { + set v [catch {execsql {CREATE INDEX index1 ON test1(f1, f2, f4, f3)}} msg] + execsql {DROP TABLE test1} + lappend v $msg +} {1 {table test1 has no column named f4}} + +# Try creating a bunch of indices on the same table +# +set r {} +for {set i 1} {$i<100} {incr i} { + lappend r [format index%02d $i] +} +do_test index-3.1 { + execsql {CREATE TABLE test1(f1 int, f2 int, f3 int, f4 int, f5 int)} + for {set i 1} {$i<100} {incr i} { + set sql "CREATE INDEX [format index%02d $i] ON test1(f[expr {($i%5)+1}])" + execsql $sql + } + execsql {SELECT name FROM sqlite_master + WHERE type='index' AND tbl_name='test1' + ORDER BY name} +} $r +integrity_check index-3.2.1 +ifcapable {reindex} { + do_test index-3.2.2 { + execsql REINDEX + } {} +} +integrity_check index-3.2.3 + + +# Verify that all the indices go away when we drop the table. +# +do_test index-3.3 { + execsql {DROP TABLE test1} + execsql {SELECT name FROM sqlite_master + WHERE type='index' AND tbl_name='test1' + ORDER BY name} +} {} + +# Create a table and insert values into that table. Then create +# an index on that table. Verify that we can select values +# from the table correctly using the index. +# +# Note that the index names "index9" and "indext" are chosen because +# they both have the same hash. +# +do_test index-4.1 { + execsql {CREATE TABLE test1(cnt int, power int)} + for {set i 1} {$i<20} {incr i} { + execsql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])" + } + execsql {CREATE INDEX index9 ON test1(cnt)} + execsql {CREATE INDEX indext ON test1(power)} + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {index9 indext test1} +do_test index-4.2 { + execsql {SELECT cnt FROM test1 WHERE power=4} +} {2} +do_test index-4.3 { + execsql {SELECT cnt FROM test1 WHERE power=1024} +} {10} +do_test index-4.4 { + execsql {SELECT power FROM test1 WHERE cnt=6} +} {64} +do_test index-4.5 { + execsql {DROP INDEX indext} + execsql {SELECT power FROM test1 WHERE cnt=6} +} {64} +do_test index-4.6 { + execsql {SELECT cnt FROM test1 WHERE power=1024} +} {10} +do_test index-4.7 { + execsql {CREATE INDEX indext ON test1(cnt)} + execsql {SELECT power FROM test1 WHERE cnt=6} +} {64} +do_test index-4.8 { + execsql {SELECT cnt FROM test1 WHERE power=1024} +} {10} +do_test index-4.9 { + execsql {DROP INDEX index9} + execsql {SELECT power FROM test1 WHERE cnt=6} +} {64} +do_test index-4.10 { + execsql {SELECT cnt FROM test1 WHERE power=1024} +} {10} +do_test index-4.11 { + execsql {DROP INDEX indext} + execsql {SELECT power FROM test1 WHERE cnt=6} +} {64} +do_test index-4.12 { + execsql {SELECT cnt FROM test1 WHERE power=1024} +} {10} +do_test index-4.13 { + execsql {DROP TABLE test1} + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {} +integrity_check index-4.14 + +# Do not allow indices to be added to sqlite_master +# +do_test index-5.1 { + set v [catch {execsql {CREATE INDEX index1 ON sqlite_master(name)}} msg] + lappend v $msg +} {1 {table sqlite_master may not be indexed}} +do_test index-5.2 { + execsql {SELECT name FROM sqlite_master WHERE type!='meta'} +} {} + +# Do not allow indices with duplicate names to be added +# +do_test index-6.1 { + execsql {CREATE TABLE test1(f1 int, f2 int)} + execsql {CREATE TABLE test2(g1 real, g2 real)} + execsql {CREATE INDEX index1 ON test1(f1)} + set v [catch {execsql {CREATE INDEX index1 ON test2(g1)}} msg] + lappend v $msg +} {1 {index index1 already exists}} +do_test index-6.1.1 { + catchsql {CREATE INDEX [index1] ON test2(g1)} +} {1 {index index1 already exists}} +do_test index-6.1b { + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {index1 test1 test2} +do_test index-6.1c { + catchsql {CREATE INDEX IF NOT EXISTS index1 ON test1(f1)} +} {0 {}} +do_test index-6.2 { + set v [catch {execsql {CREATE INDEX test1 ON test2(g1)}} msg] + lappend v $msg +} {1 {there is already a table named test1}} +do_test index-6.2b { + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {index1 test1 test2} +do_test index-6.3 { + execsql {DROP TABLE test1} + execsql {DROP TABLE test2} + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {} +do_test index-6.4 { + execsql { + CREATE TABLE test1(a,b); + CREATE INDEX index1 ON test1(a); + CREATE INDEX index2 ON test1(b); + CREATE INDEX index3 ON test1(a,b); + DROP TABLE test1; + SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name; + } +} {} +integrity_check index-6.5 + + +# Create a primary key +# +do_test index-7.1 { + execsql {CREATE TABLE test1(f1 int, f2 int primary key)} + for {set i 1} {$i<20} {incr i} { + execsql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])" + } + execsql {SELECT count(*) FROM test1} +} {19} +do_test index-7.2 { + execsql {SELECT f1 FROM test1 WHERE f2=65536} +} {16} +do_test index-7.3 { + execsql { + SELECT name FROM sqlite_master + WHERE type='index' AND tbl_name='test1' + } +} {sqlite_autoindex_test1_1} +do_test index-7.4 { + execsql {DROP table test1} + execsql {SELECT name FROM sqlite_master WHERE type!='meta'} +} {} +integrity_check index-7.5 + +# Make sure we cannot drop a non-existant index. +# +do_test index-8.1 { + set v [catch {execsql {DROP INDEX index1}} msg] + lappend v $msg +} {1 {no such index: index1}} + +# Make sure we don't actually create an index when the EXPLAIN keyword +# is used. +# +do_test index-9.1 { + execsql {CREATE TABLE tab1(a int)} + ifcapable {explain} { + execsql {EXPLAIN CREATE INDEX idx1 ON tab1(a)} + } + execsql {SELECT name FROM sqlite_master WHERE tbl_name='tab1'} +} {tab1} +do_test index-9.2 { + execsql {CREATE INDEX idx1 ON tab1(a)} + execsql {SELECT name FROM sqlite_master WHERE tbl_name='tab1' ORDER BY name} +} {idx1 tab1} +integrity_check index-9.3 + +# Allow more than one entry with the same key. +# +do_test index-10.0 { + execsql { + CREATE TABLE t1(a int, b int); + CREATE INDEX i1 ON t1(a); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(2,4); + INSERT INTO t1 VALUES(3,8); + INSERT INTO t1 VALUES(1,12); + SELECT b FROM t1 WHERE a=1 ORDER BY b; + } +} {2 12} +do_test index-10.1 { + execsql { + SELECT b FROM t1 WHERE a=2 ORDER BY b; + } +} {4} +do_test index-10.2 { + execsql { + DELETE FROM t1 WHERE b=12; + SELECT b FROM t1 WHERE a=1 ORDER BY b; + } +} {2} +do_test index-10.3 { + execsql { + DELETE FROM t1 WHERE b=2; + SELECT b FROM t1 WHERE a=1 ORDER BY b; + } +} {} +do_test index-10.4 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES (1,1); + INSERT INTO t1 VALUES (1,2); + INSERT INTO t1 VALUES (1,3); + INSERT INTO t1 VALUES (1,4); + INSERT INTO t1 VALUES (1,5); + INSERT INTO t1 VALUES (1,6); + INSERT INTO t1 VALUES (1,7); + INSERT INTO t1 VALUES (1,8); + INSERT INTO t1 VALUES (1,9); + INSERT INTO t1 VALUES (2,0); + SELECT b FROM t1 WHERE a=1 ORDER BY b; + } +} {1 2 3 4 5 6 7 8 9} +do_test index-10.5 { + ifcapable subquery { + execsql { DELETE FROM t1 WHERE b IN (2, 4, 6, 8); } + } else { + execsql { DELETE FROM t1 WHERE b = 2 OR b = 4 OR b = 6 OR b = 8; } + } + execsql { + SELECT b FROM t1 WHERE a=1 ORDER BY b; + } +} {1 3 5 7 9} +do_test index-10.6 { + execsql { + DELETE FROM t1 WHERE b>2; + SELECT b FROM t1 WHERE a=1 ORDER BY b; + } +} {1} +do_test index-10.7 { + execsql { + DELETE FROM t1 WHERE b=1; + SELECT b FROM t1 WHERE a=1 ORDER BY b; + } +} {} +do_test index-10.8 { + execsql { + SELECT b FROM t1 ORDER BY b; + } +} {0} +integrity_check index-10.9 + +# Automatically create an index when we specify a primary key. +# +do_test index-11.1 { + execsql { + CREATE TABLE t3( + a text, + b int, + c float, + PRIMARY KEY(b) + ); + } + for {set i 1} {$i<=50} {incr i} { + execsql "INSERT INTO t3 VALUES('x${i}x',$i,0.$i)" + } + set sqlite_search_count 0 + concat [execsql {SELECT c FROM t3 WHERE b==10}] $sqlite_search_count +} {0.1 3} +integrity_check index-11.2 + + +# Numeric strings should compare as if they were numbers. So even if the +# strings are not character-by-character the same, if they represent the +# same number they should compare equal to one another. Verify that this +# is true in indices. +# +# Updated for sqlite3 v3: SQLite will now store these values as numbers +# (because the affinity of column a is NUMERIC) so the quirky +# representations are not retained. i.e. '+1.0' becomes '1'. +do_test index-12.1 { + execsql { + CREATE TABLE t4(a NUM,b); + INSERT INTO t4 VALUES('0.0',1); + INSERT INTO t4 VALUES('0.00',2); + INSERT INTO t4 VALUES('abc',3); + INSERT INTO t4 VALUES('-1.0',4); + INSERT INTO t4 VALUES('+1.0',5); + INSERT INTO t4 VALUES('0',6); + INSERT INTO t4 VALUES('00000',7); + SELECT a FROM t4 ORDER BY b; + } +} {0 0 abc -1 1 0 0} +do_test index-12.2 { + execsql { + SELECT a FROM t4 WHERE a==0 ORDER BY b + } +} {0 0 0 0} +do_test index-12.3 { + execsql { + SELECT a FROM t4 WHERE a<0.5 ORDER BY b + } +} {0 0 -1 0 0} +do_test index-12.4 { + execsql { + SELECT a FROM t4 WHERE a>-0.5 ORDER BY b + } +} {0 0 abc 1 0 0} +do_test index-12.5 { + execsql { + CREATE INDEX t4i1 ON t4(a); + SELECT a FROM t4 WHERE a==0 ORDER BY b + } +} {0 0 0 0} +do_test index-12.6 { + execsql { + SELECT a FROM t4 WHERE a<0.5 ORDER BY b + } +} {0 0 -1 0 0} +do_test index-12.7 { + execsql { + SELECT a FROM t4 WHERE a>-0.5 ORDER BY b + } +} {0 0 abc 1 0 0} +integrity_check index-12.8 + +# Make sure we cannot drop an automatically created index. +# +do_test index-13.1 { + execsql { + CREATE TABLE t5( + a int UNIQUE, + b float PRIMARY KEY, + c varchar(10), + UNIQUE(a,c) + ); + INSERT INTO t5 VALUES(1,2,3); + SELECT * FROM t5; + } +} {1 2.0 3} +do_test index-13.2 { + set ::idxlist [execsql { + SELECT name FROM sqlite_master WHERE type="index" AND tbl_name="t5"; + }] + llength $::idxlist +} {3} +for {set i 0} {$i<[llength $::idxlist]} {incr i} { + do_test index-13.3.$i { + catchsql " + DROP INDEX '[lindex $::idxlist $i]'; + " + } {1 {index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped}} +} +do_test index-13.4 { + execsql { + INSERT INTO t5 VALUES('a','b','c'); + SELECT * FROM t5; + } +} {1 2.0 3 a b c} +integrity_check index-13.5 + +# Check the sort order of data in an index. +# +do_test index-14.1 { + execsql { + CREATE TABLE t6(a,b,c); + CREATE INDEX t6i1 ON t6(a,b); + INSERT INTO t6 VALUES('','',1); + INSERT INTO t6 VALUES('',NULL,2); + INSERT INTO t6 VALUES(NULL,'',3); + INSERT INTO t6 VALUES('abc',123,4); + INSERT INTO t6 VALUES(123,'abc',5); + SELECT c FROM t6 ORDER BY a,b; + } +} {3 5 2 1 4} +do_test index-14.2 { + execsql { + SELECT c FROM t6 WHERE a=''; + } +} {2 1} +do_test index-14.3 { + execsql { + SELECT c FROM t6 WHERE b=''; + } +} {1 3} +do_test index-14.4 { + execsql { + SELECT c FROM t6 WHERE a>''; + } +} {4} +do_test index-14.5 { + execsql { + SELECT c FROM t6 WHERE a>=''; + } +} {2 1 4} +do_test index-14.6 { + execsql { + SELECT c FROM t6 WHERE a>123; + } +} {2 1 4} +do_test index-14.7 { + execsql { + SELECT c FROM t6 WHERE a>=123; + } +} {5 2 1 4} +do_test index-14.8 { + execsql { + SELECT c FROM t6 WHERE a<'abc'; + } +} {5 2 1} +do_test index-14.9 { + execsql { + SELECT c FROM t6 WHERE a<='abc'; + } +} {5 2 1 4} +do_test index-14.10 { + execsql { + SELECT c FROM t6 WHERE a<=''; + } +} {5 2 1} +do_test index-14.11 { + execsql { + SELECT c FROM t6 WHERE a<''; + } +} {5} +integrity_check index-14.12 + +do_test index-15.1 { + execsql { + DELETE FROM t1; + SELECT * FROM t1; + } +} {} +do_test index-15.2 { + execsql { + INSERT INTO t1 VALUES('1.234e5',1); + INSERT INTO t1 VALUES('12.33e04',2); + INSERT INTO t1 VALUES('12.35E4',3); + INSERT INTO t1 VALUES('12.34e',4); + INSERT INTO t1 VALUES('12.32e+4',5); + INSERT INTO t1 VALUES('12.36E+04',6); + INSERT INTO t1 VALUES('12.36E+',7); + INSERT INTO t1 VALUES('+123.10000E+0003',8); + INSERT INTO t1 VALUES('+',9); + INSERT INTO t1 VALUES('+12347.E+02',10); + INSERT INTO t1 VALUES('+12347E+02',11); + SELECT b FROM t1 ORDER BY a; + } +} {8 5 2 1 3 6 11 9 10 4 7} +integrity_check index-15.1 + +# The following tests - index-16.* - test that when a table definition +# includes qualifications that specify the same constraint twice only a +# single index is generated to enforce the constraint. +# +# For example: "CREATE TABLE abc( x PRIMARY KEY, UNIQUE(x) );" +# +do_test index-16.1 { + execsql { + CREATE TABLE t7(c UNIQUE PRIMARY KEY); + SELECT count(*) FROM sqlite_master WHERE tbl_name = 't7' AND type = 'index'; + } +} {1} +do_test index-16.2 { + execsql { + DROP TABLE t7; + CREATE TABLE t7(c UNIQUE PRIMARY KEY); + SELECT count(*) FROM sqlite_master WHERE tbl_name = 't7' AND type = 'index'; + } +} {1} +do_test index-16.3 { + execsql { + DROP TABLE t7; + CREATE TABLE t7(c PRIMARY KEY, UNIQUE(c) ); + SELECT count(*) FROM sqlite_master WHERE tbl_name = 't7' AND type = 'index'; + } +} {1} +do_test index-16.4 { + execsql { + DROP TABLE t7; + CREATE TABLE t7(c, d , UNIQUE(c, d), PRIMARY KEY(c, d) ); + SELECT count(*) FROM sqlite_master WHERE tbl_name = 't7' AND type = 'index'; + } +} {1} +do_test index-16.5 { + execsql { + DROP TABLE t7; + CREATE TABLE t7(c, d , UNIQUE(c), PRIMARY KEY(c, d) ); + SELECT count(*) FROM sqlite_master WHERE tbl_name = 't7' AND type = 'index'; + } +} {2} + +# Test that automatically create indices are named correctly. The current +# convention is: "sqlite_autoindex__" +# +# Then check that it is an error to try to drop any automtically created +# indices. +do_test index-17.1 { + execsql { + DROP TABLE t7; + CREATE TABLE t7(c, d UNIQUE, UNIQUE(c), PRIMARY KEY(c, d) ); + SELECT name FROM sqlite_master WHERE tbl_name = 't7' AND type = 'index'; + } +} {sqlite_autoindex_t7_1 sqlite_autoindex_t7_2 sqlite_autoindex_t7_3} +do_test index-17.2 { + catchsql { + DROP INDEX sqlite_autoindex_t7_1; + } +} {1 {index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped}} +do_test index-17.3 { + catchsql { + DROP INDEX IF EXISTS sqlite_autoindex_t7_1; + } +} {1 {index associated with UNIQUE or PRIMARY KEY constraint cannot be dropped}} +do_test index-17.4 { + catchsql { + DROP INDEX IF EXISTS no_such_index; + } +} {0 {}} + + +# The following tests ensure that it is not possible to explicitly name +# a schema object with a name beginning with "sqlite_". Granted that is a +# little outside the focus of this test scripts, but this has got to be +# tested somewhere. +do_test index-18.1 { + catchsql { + CREATE TABLE sqlite_t1(a, b, c); + } +} {1 {object name reserved for internal use: sqlite_t1}} +do_test index-18.2 { + catchsql { + CREATE INDEX sqlite_i1 ON t7(c); + } +} {1 {object name reserved for internal use: sqlite_i1}} +ifcapable view { +do_test index-18.3 { + catchsql { + CREATE VIEW sqlite_v1 AS SELECT * FROM t7; + } +} {1 {object name reserved for internal use: sqlite_v1}} +} ;# ifcapable view +ifcapable {trigger} { + do_test index-18.4 { + catchsql { + CREATE TRIGGER sqlite_tr1 BEFORE INSERT ON t7 BEGIN SELECT 1; END; + } + } {1 {object name reserved for internal use: sqlite_tr1}} +} +do_test index-18.5 { + execsql { + DROP TABLE t7; + } +} {} + +# These tests ensure that if multiple table definition constraints are +# implemented by a single indice, the correct ON CONFLICT policy applies. +ifcapable conflict { + do_test index-19.1 { + execsql { + CREATE TABLE t7(a UNIQUE PRIMARY KEY); + CREATE TABLE t8(a UNIQUE PRIMARY KEY ON CONFLICT ROLLBACK); + INSERT INTO t7 VALUES(1); + INSERT INTO t8 VALUES(1); + } + } {} + do_test index-19.2 { + catchsql { + BEGIN; + INSERT INTO t7 VALUES(1); + } + } {1 {column a is not unique}} + do_test index-19.3 { + catchsql { + BEGIN; + } + } {1 {cannot start a transaction within a transaction}} + do_test index-19.4 { + catchsql { + INSERT INTO t8 VALUES(1); + } + } {1 {column a is not unique}} + do_test index-19.5 { + catchsql { + BEGIN; + COMMIT; + } + } {0 {}} + do_test index-19.6 { + catchsql { + DROP TABLE t7; + DROP TABLE t8; + CREATE TABLE t7( + a PRIMARY KEY ON CONFLICT FAIL, + UNIQUE(a) ON CONFLICT IGNORE + ); + } + } {1 {conflicting ON CONFLICT clauses specified}} +} ; # end of "ifcapable conflict" block + +ifcapable {reindex} { + do_test index-19.7 { + execsql REINDEX + } {} +} +integrity_check index-19.8 + +# Drop index with a quoted name. Ticket #695. +# +do_test index-20.1 { + execsql { + CREATE INDEX "t6i2" ON t6(c); + DROP INDEX "t6i2"; + } +} {} +do_test index-20.2 { + execsql { + DROP INDEX "t6i1"; + } +} {} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/index2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/index2.test new file mode 100644 index 0000000..48d0c38 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/index2.test @@ -0,0 +1,74 @@ +# 2005 January 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the CREATE INDEX statement. +# +# $Id: index2.test,v 1.3 2006/03/03 19:12:30 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table with a large number of columns +# +do_test index2-1.1 { + set sql {CREATE TABLE t1(} + for {set i 1} {$i<1000} {incr i} { + append sql "c$i," + } + append sql "c1000);" + execsql $sql +} {} +do_test index2-1.2 { + set sql {INSERT INTO t1 VALUES(} + for {set i 1} {$i<1000} {incr i} { + append sql $i, + } + append sql {1000);} + execsql $sql +} {} +do_test index2-1.3 { + execsql {SELECT c123 FROM t1} +} 123 +do_test index2-1.4 { + execsql BEGIN + for {set j 1} {$j<=100} {incr j} { + set sql {INSERT INTO t1 VALUES(} + for {set i 1} {$i<1000} {incr i} { + append sql [expr {$j*10000+$i}], + } + append sql "[expr {$j*10000+1000}]);" + execsql $sql + } + execsql COMMIT + execsql {SELECT count(*) FROM t1} +} 101 +do_test index2-1.5 { + execsql {SELECT round(sum(c1000)) FROM t1} +} {50601000.0} + +# Create indices with many columns +# +do_test index2-2.1 { + set sql "CREATE INDEX t1i1 ON t1(" + for {set i 1} {$i<1000} {incr i} { + append sql c$i, + } + append sql c1000) + execsql $sql +} {} +do_test index2-2.2 { + ifcapable explain { + execsql {EXPLAIN SELECT c9 FROM t1 ORDER BY c1, c2, c3, c4, c5} + } + execsql {SELECT c9 FROM t1 ORDER BY c1, c2, c3, c4, c5, c6 LIMIT 5} +} {9 10009 20009 30009 40009} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/index3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/index3.test new file mode 100644 index 0000000..c6c6ff4 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/index3.test @@ -0,0 +1,58 @@ +# 2005 February 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the CREATE INDEX statement. +# +# $Id: index3.test,v 1.2 2005/08/20 03:03:04 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Ticket #1115. Make sure that when a UNIQUE index is created on a +# non-unique column (or columns) that it fails and that it leaves no +# residue behind. +# +do_test index3-1.1 { + execsql { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(1); + SELECT * FROM t1; + } +} {1 1} +do_test index3-1.2 { + catchsql { + BEGIN; + CREATE UNIQUE INDEX i1 ON t1(a); + } +} {1 {indexed columns are not unique}} +do_test index3-1.3 { + catchsql COMMIT; +} {0 {}} +integrity_check index3-1.4 + +# This test corrupts the database file so it must be the last test +# in the series. +# +do_test index3-99.1 { + execsql { + PRAGMA writable_schema=on; + UPDATE sqlite_master SET sql='nonsense'; + } + db close + sqlite3 db test.db + catchsql { + DROP INDEX i1; + } +} {1 {malformed database schema - near "nonsense": syntax error}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/insert.test b/libraries/sqlite/unix/sqlite-3.5.1/test/insert.test new file mode 100644 index 0000000..9ea9cd7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/insert.test @@ -0,0 +1,391 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the INSERT statement. +# +# $Id: insert.test,v 1.31 2007/04/05 11:25:59 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Try to insert into a non-existant table. +# +do_test insert-1.1 { + set v [catch {execsql {INSERT INTO test1 VALUES(1,2,3)}} msg] + lappend v $msg +} {1 {no such table: test1}} + +# Try to insert into sqlite_master +# +do_test insert-1.2 { + set v [catch {execsql {INSERT INTO sqlite_master VALUES(1,2,3,4)}} msg] + lappend v $msg +} {1 {table sqlite_master may not be modified}} + +# Try to insert the wrong number of entries. +# +do_test insert-1.3 { + execsql {CREATE TABLE test1(one int, two int, three int)} + set v [catch {execsql {INSERT INTO test1 VALUES(1,2)}} msg] + lappend v $msg +} {1 {table test1 has 3 columns but 2 values were supplied}} +do_test insert-1.3b { + set v [catch {execsql {INSERT INTO test1 VALUES(1,2,3,4)}} msg] + lappend v $msg +} {1 {table test1 has 3 columns but 4 values were supplied}} +do_test insert-1.3c { + set v [catch {execsql {INSERT INTO test1(one,two) VALUES(1,2,3,4)}} msg] + lappend v $msg +} {1 {4 values for 2 columns}} +do_test insert-1.3d { + set v [catch {execsql {INSERT INTO test1(one,two) VALUES(1)}} msg] + lappend v $msg +} {1 {1 values for 2 columns}} + +# Try to insert into a non-existant column of a table. +# +do_test insert-1.4 { + set v [catch {execsql {INSERT INTO test1(one,four) VALUES(1,2)}} msg] + lappend v $msg +} {1 {table test1 has no column named four}} + +# Make sure the inserts actually happen +# +do_test insert-1.5 { + execsql {INSERT INTO test1 VALUES(1,2,3)} + execsql {SELECT * FROM test1} +} {1 2 3} +do_test insert-1.5b { + execsql {INSERT INTO test1 VALUES(4,5,6)} + execsql {SELECT * FROM test1 ORDER BY one} +} {1 2 3 4 5 6} +do_test insert-1.5c { + execsql {INSERT INTO test1 VALUES(7,8,9)} + execsql {SELECT * FROM test1 ORDER BY one} +} {1 2 3 4 5 6 7 8 9} + +do_test insert-1.6 { + execsql {DELETE FROM test1} + execsql {INSERT INTO test1(one,two) VALUES(1,2)} + execsql {SELECT * FROM test1 ORDER BY one} +} {1 2 {}} +do_test insert-1.6b { + execsql {INSERT INTO test1(two,three) VALUES(5,6)} + execsql {SELECT * FROM test1 ORDER BY one} +} {{} 5 6 1 2 {}} +do_test insert-1.6c { + execsql {INSERT INTO test1(three,one) VALUES(7,8)} + execsql {SELECT * FROM test1 ORDER BY one} +} {{} 5 6 1 2 {} 8 {} 7} + +# A table to use for testing default values +# +do_test insert-2.1 { + execsql { + CREATE TABLE test2( + f1 int default -111, + f2 real default +4.32, + f3 int default +222, + f4 int default 7.89 + ) + } + execsql {SELECT * from test2} +} {} +do_test insert-2.2 { + execsql {INSERT INTO test2(f1,f3) VALUES(+10,-10)} + execsql {SELECT * FROM test2} +} {10 4.32 -10 7.89} +do_test insert-2.3 { + execsql {INSERT INTO test2(f2,f4) VALUES(1.23,-3.45)} + execsql {SELECT * FROM test2 WHERE f1==-111} +} {-111 1.23 222 -3.45} +do_test insert-2.4 { + execsql {INSERT INTO test2(f1,f2,f4) VALUES(77,+1.23,3.45)} + execsql {SELECT * FROM test2 WHERE f1==77} +} {77 1.23 222 3.45} +do_test insert-2.10 { + execsql { + DROP TABLE test2; + CREATE TABLE test2( + f1 int default 111, + f2 real default -4.32, + f3 text default hi, + f4 text default 'abc-123', + f5 varchar(10) + ) + } + execsql {SELECT * from test2} +} {} +do_test insert-2.11 { + execsql {INSERT INTO test2(f2,f4) VALUES(-2.22,'hi!')} + execsql {SELECT * FROM test2} +} {111 -2.22 hi hi! {}} +do_test insert-2.12 { + execsql {INSERT INTO test2(f1,f5) VALUES(1,'xyzzy')} + execsql {SELECT * FROM test2 ORDER BY f1} +} {1 -4.32 hi abc-123 xyzzy 111 -2.22 hi hi! {}} + +# Do additional inserts with default values, but this time +# on a table that has indices. In particular we want to verify +# that the correct default values are inserted into the indices. +# +do_test insert-3.1 { + execsql { + DELETE FROM test2; + CREATE INDEX index9 ON test2(f1,f2); + CREATE INDEX indext ON test2(f4,f5); + SELECT * from test2; + } +} {} + +# Update for sqlite3 v3: +# Change the 111 to '111' in the following two test cases, because +# the default value is being inserted as a string. TODO: It shouldn't be. +do_test insert-3.2 { + execsql {INSERT INTO test2(f2,f4) VALUES(-3.33,'hum')} + execsql {SELECT * FROM test2 WHERE f1='111' AND f2=-3.33} +} {111 -3.33 hi hum {}} +do_test insert-3.3 { + execsql {INSERT INTO test2(f1,f2,f5) VALUES(22,-4.44,'wham')} + execsql {SELECT * FROM test2 WHERE f1='111' AND f2=-3.33} +} {111 -3.33 hi hum {}} +do_test insert-3.4 { + execsql {SELECT * FROM test2 WHERE f1=22 AND f2=-4.44} +} {22 -4.44 hi abc-123 wham} +ifcapable {reindex} { + do_test insert-3.5 { + execsql REINDEX + } {} +} +integrity_check insert-3.5 + +# Test of expressions in the VALUES clause +# +do_test insert-4.1 { + execsql { + CREATE TABLE t3(a,b,c); + INSERT INTO t3 VALUES(1+2+3,4,5); + SELECT * FROM t3; + } +} {6 4 5} +do_test insert-4.2 { + ifcapable subquery { + execsql {INSERT INTO t3 VALUES((SELECT max(a) FROM t3)+1,5,6);} + } else { + set maxa [execsql {SELECT max(a) FROM t3}] + execsql "INSERT INTO t3 VALUES($maxa+1,5,6);" + } + execsql { + SELECT * FROM t3 ORDER BY a; + } +} {6 4 5 7 5 6} +ifcapable subquery { + do_test insert-4.3 { + catchsql { + INSERT INTO t3 VALUES((SELECT max(a) FROM t3)+1,t3.a,6); + SELECT * FROM t3 ORDER BY a; + } + } {1 {no such column: t3.a}} +} +do_test insert-4.4 { + ifcapable subquery { + execsql {INSERT INTO t3 VALUES((SELECT b FROM t3 WHERE a=0),6,7);} + } else { + set b [execsql {SELECT b FROM t3 WHERE a = 0}] + if {$b==""} {set b NULL} + execsql "INSERT INTO t3 VALUES($b,6,7);" + } + execsql { + SELECT * FROM t3 ORDER BY a; + } +} {{} 6 7 6 4 5 7 5 6} +do_test insert-4.5 { + execsql { + SELECT b,c FROM t3 WHERE a IS NULL; + } +} {6 7} +do_test insert-4.6 { + catchsql { + INSERT INTO t3 VALUES(notafunc(2,3),2,3); + } +} {1 {no such function: notafunc}} +do_test insert-4.7 { + execsql { + INSERT INTO t3 VALUES(min(1,2,3),max(1,2,3),99); + SELECT * FROM t3 WHERE c=99; + } +} {1 3 99} + +# Test the ability to insert from a temporary table into itself. +# Ticket #275. +# +ifcapable tempdb { + do_test insert-5.1 { + execsql { + CREATE TEMP TABLE t4(x); + INSERT INTO t4 VALUES(1); + SELECT * FROM t4; + } + } {1} + do_test insert-5.2 { + execsql { + INSERT INTO t4 SELECT x+1 FROM t4; + SELECT * FROM t4; + } + } {1 2} + ifcapable {explain} { + do_test insert-5.3 { + # verify that a temporary table is used to copy t4 to t4 + set x [execsql { + EXPLAIN INSERT INTO t4 SELECT x+2 FROM t4; + }] + expr {[lsearch $x OpenEphemeral]>0} + } {1} + } + + do_test insert-5.4 { + # Verify that table "test1" begins on page 3. This should be the same + # page number used by "t4" above. + # + # Update for v3 - the first table now begins on page 2 of each file, not 3. + execsql { + SELECT rootpage FROM sqlite_master WHERE name='test1'; + } + } [expr $AUTOVACUUM?3:2] + do_test insert-5.5 { + # Verify that "t4" begins on page 3. + # + # Update for v3 - the first table now begins on page 2 of each file, not 3. + execsql { + SELECT rootpage FROM sqlite_temp_master WHERE name='t4'; + } + } {2} + do_test insert-5.6 { + # This should not use an intermediate temporary table. + execsql { + INSERT INTO t4 SELECT one FROM test1 WHERE three=7; + SELECT * FROM t4 + } + } {1 2 8} + ifcapable {explain} { + do_test insert-5.7 { + # verify that no temporary table is used to copy test1 to t4 + set x [execsql { + EXPLAIN INSERT INTO t4 SELECT one FROM test1; + }] + expr {[lsearch $x OpenTemp]>0} + } {0} + } +} + +# Ticket #334: REPLACE statement corrupting indices. +# +ifcapable conflict { + # The REPLACE command is not available if SQLITE_OMIT_CONFLICT is + # defined at compilation time. + do_test insert-6.1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b UNIQUE); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(2,3); + SELECT b FROM t1 WHERE b=2; + } + } {2} + do_test insert-6.2 { + execsql { + REPLACE INTO t1 VALUES(1,4); + SELECT b FROM t1 WHERE b=2; + } + } {} + do_test insert-6.3 { + execsql { + UPDATE OR REPLACE t1 SET a=2 WHERE b=4; + SELECT * FROM t1 WHERE b=4; + } + } {2 4} + do_test insert-6.4 { + execsql { + SELECT * FROM t1 WHERE b=3; + } + } {} + ifcapable {reindex} { + do_test insert-6.5 { + execsql REINDEX + } {} + } + do_test insert-6.6 { + execsql { + DROP TABLE t1; + } + } {} +} + +# Test that the special optimization for queries of the form +# "SELECT max(x) FROM tbl" where there is an index on tbl(x) works with +# INSERT statments. +do_test insert-7.1 { + execsql { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + CREATE INDEX i1 ON t1(a); + } +} {} +do_test insert-7.2 { + execsql { + INSERT INTO t1 SELECT max(a) FROM t1; + } +} {} +do_test insert-7.3 { + execsql { + SELECT a FROM t1; + } +} {1 2 2} + +# Ticket #1140: Check for an infinite loop in the algorithm that tests +# to see if the right-hand side of an INSERT...SELECT references the left-hand +# side. +# +ifcapable subquery&&compound { + do_test insert-8.1 { + execsql { + INSERT INTO t3 SELECT * FROM (SELECT * FROM t3 UNION ALL SELECT 1,2,3) + } + } {} +} + +# Make sure the rowid cache in the VDBE is reset correctly when +# an explicit rowid is given. +# +do_test insert-9.1 { + execsql { + CREATE TABLE t5(x); + INSERT INTO t5 VALUES(1); + INSERT INTO t5 VALUES(2); + INSERT INTO t5 VALUES(3); + INSERT INTO t5(rowid, x) SELECT nullif(x*2+10,14), x+100 FROM t5; + SELECT rowid, x FROM t5; + } +} {1 1 2 2 3 3 12 101 13 102 16 103} +do_test insert-9.2 { + execsql { + CREATE TABLE t6(x INTEGER PRIMARY KEY, y); + INSERT INTO t6 VALUES(1,1); + INSERT INTO t6 VALUES(2,2); + INSERT INTO t6 VALUES(3,3); + INSERT INTO t6 SELECT nullif(y*2+10,14), y+100 FROM t6; + SELECT x, y FROM t6; + } +} {1 1 2 2 3 3 12 101 13 102 16 103} + +integrity_check insert-99.0 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/insert2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/insert2.test new file mode 100644 index 0000000..21bd0b7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/insert2.test @@ -0,0 +1,278 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the INSERT statement that takes is +# result from a SELECT. +# +# $Id: insert2.test,v 1.18 2005/10/05 11:35:09 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create some tables with data that we can select against +# +do_test insert2-1.0 { + execsql {CREATE TABLE d1(n int, log int);} + for {set i 1} {$i<=20} {incr i} { + for {set j 0} {pow(2,$j)<$i} {incr j} {} + execsql "INSERT INTO d1 VALUES($i,$j)" + } + execsql {SELECT * FROM d1 ORDER BY n} +} {1 0 2 1 3 2 4 2 5 3 6 3 7 3 8 3 9 4 10 4 11 4 12 4 13 4 14 4 15 4 16 4 17 5 18 5 19 5 20 5} + +# Insert into a new table from the old one. +# +do_test insert2-1.1.1 { + execsql { + CREATE TABLE t1(log int, cnt int); + PRAGMA count_changes=on; + } + ifcapable explain { + execsql { + EXPLAIN INSERT INTO t1 SELECT log, count(*) FROM d1 GROUP BY log; + } + } + execsql { + INSERT INTO t1 SELECT log, count(*) FROM d1 GROUP BY log; + } +} {6} +do_test insert2-1.1.2 { + db changes +} {6} +do_test insert2-1.1.3 { + execsql {SELECT * FROM t1 ORDER BY log} +} {0 1 1 1 2 2 3 4 4 8 5 4} + +ifcapable compound { +do_test insert2-1.2.1 { + catch {execsql {DROP TABLE t1}} + execsql { + CREATE TABLE t1(log int, cnt int); + INSERT INTO t1 + SELECT log, count(*) FROM d1 GROUP BY log + EXCEPT SELECT n-1,log FROM d1; + } +} {4} +do_test insert2-1.2.2 { + execsql { + SELECT * FROM t1 ORDER BY log; + } +} {0 1 3 4 4 8 5 4} +do_test insert2-1.3.1 { + catch {execsql {DROP TABLE t1}} + execsql { + CREATE TABLE t1(log int, cnt int); + PRAGMA count_changes=off; + INSERT INTO t1 + SELECT log, count(*) FROM d1 GROUP BY log + INTERSECT SELECT n-1,log FROM d1; + } +} {} +do_test insert2-1.3.2 { + execsql { + SELECT * FROM t1 ORDER BY log; + } +} {1 1 2 2} +} ;# ifcapable compound +execsql {PRAGMA count_changes=off;} + +do_test insert2-1.4 { + catch {execsql {DROP TABLE t1}} + set r [execsql { + CREATE TABLE t1(log int, cnt int); + CREATE INDEX i1 ON t1(log); + CREATE INDEX i2 ON t1(cnt); + INSERT INTO t1 SELECT log, count() FROM d1 GROUP BY log; + SELECT * FROM t1 ORDER BY log; + }] + lappend r [execsql {SELECT cnt FROM t1 WHERE log=3}] + lappend r [execsql {SELECT log FROM t1 WHERE cnt=4 ORDER BY log}] +} {0 1 1 1 2 2 3 4 4 8 5 4 4 {3 5}} + +do_test insert2-2.0 { + execsql { + CREATE TABLE t3(a,b,c); + CREATE TABLE t4(x,y); + INSERT INTO t4 VALUES(1,2); + SELECT * FROM t4; + } +} {1 2} +do_test insert2-2.1 { + execsql { + INSERT INTO t3(a,c) SELECT * FROM t4; + SELECT * FROM t3; + } +} {1 {} 2} +do_test insert2-2.2 { + execsql { + DELETE FROM t3; + INSERT INTO t3(c,b) SELECT * FROM t4; + SELECT * FROM t3; + } +} {{} 2 1} +do_test insert2-2.3 { + execsql { + DELETE FROM t3; + INSERT INTO t3(c,a,b) SELECT x, 'hi', y FROM t4; + SELECT * FROM t3; + } +} {hi 2 1} + +integrity_check insert2-3.0 + +# File table t4 with lots of data +# +do_test insert2-3.1 { + execsql { + SELECT * from t4; + } +} {1 2} +do_test insert2-3.2 { + set x [db total_changes] + execsql { + BEGIN; + INSERT INTO t4 VALUES(2,4); + INSERT INTO t4 VALUES(3,6); + INSERT INTO t4 VALUES(4,8); + INSERT INTO t4 VALUES(5,10); + INSERT INTO t4 VALUES(6,12); + INSERT INTO t4 VALUES(7,14); + INSERT INTO t4 VALUES(8,16); + INSERT INTO t4 VALUES(9,18); + INSERT INTO t4 VALUES(10,20); + COMMIT; + } + expr [db total_changes] - $x +} {9} +do_test insert2-3.2.1 { + execsql { + SELECT count(*) FROM t4; + } +} {10} +do_test insert2-3.3 { + ifcapable subquery { + execsql { + BEGIN; + INSERT INTO t4 SELECT x+(SELECT max(x) FROM t4),y FROM t4; + INSERT INTO t4 SELECT x+(SELECT max(x) FROM t4),y FROM t4; + INSERT INTO t4 SELECT x+(SELECT max(x) FROM t4),y FROM t4; + INSERT INTO t4 SELECT x+(SELECT max(x) FROM t4),y FROM t4; + COMMIT; + SELECT count(*) FROM t4; + } + } else { + db function max_x_t4 {execsql {SELECT max(x) FROM t4}} + execsql { + BEGIN; + INSERT INTO t4 SELECT x+max_x_t4() ,y FROM t4; + INSERT INTO t4 SELECT x+max_x_t4() ,y FROM t4; + INSERT INTO t4 SELECT x+max_x_t4() ,y FROM t4; + INSERT INTO t4 SELECT x+max_x_t4() ,y FROM t4; + COMMIT; + SELECT count(*) FROM t4; + } + } +} {160} +do_test insert2-3.4 { + execsql { + BEGIN; + UPDATE t4 SET y='lots of data for the row where x=' || x + || ' and y=' || y || ' - even more data to fill space'; + COMMIT; + SELECT count(*) FROM t4; + } +} {160} +do_test insert2-3.5 { + ifcapable subquery { + execsql { + BEGIN; + INSERT INTO t4 SELECT x+(SELECT max(x)+1 FROM t4),y FROM t4; + SELECT count(*) from t4; + ROLLBACK; + } + } else { + execsql { + BEGIN; + INSERT INTO t4 SELECT x+max_x_t4()+1,y FROM t4; + SELECT count(*) from t4; + ROLLBACK; + } + } +} {320} +do_test insert2-3.6 { + execsql { + SELECT count(*) FROM t4; + } +} {160} +do_test insert2-3.7 { + execsql { + BEGIN; + DELETE FROM t4 WHERE x!=123; + SELECT count(*) FROM t4; + ROLLBACK; + } +} {1} +do_test insert2-3.8 { + db changes +} {159} +integrity_check insert2-3.9 + +# Ticket #901 +# +ifcapable tempdb { + do_test insert2-4.1 { + execsql { + CREATE TABLE Dependencies(depId integer primary key, + class integer, name str, flag str); + CREATE TEMPORARY TABLE DepCheck(troveId INT, depNum INT, + flagCount INT, isProvides BOOL, class INTEGER, name STRING, + flag STRING); + INSERT INTO DepCheck + VALUES(-1, 0, 1, 0, 2, 'libc.so.6', 'GLIBC_2.0'); + INSERT INTO Dependencies + SELECT DISTINCT + NULL, + DepCheck.class, + DepCheck.name, + DepCheck.flag + FROM DepCheck LEFT OUTER JOIN Dependencies ON + DepCheck.class == Dependencies.class AND + DepCheck.name == Dependencies.name AND + DepCheck.flag == Dependencies.flag + WHERE + Dependencies.depId is NULL; + }; + } {} +} + +#-------------------------------------------------------------------- +# Test that the INSERT works when the SELECT statement (a) references +# the table being inserted into and (b) is optimized to use an index +# only. +do_test insert2-5.1 { + execsql { + CREATE TABLE t2(a, b); + INSERT INTO t2 VALUES(1, 2); + CREATE INDEX t2i1 ON t2(a); + INSERT INTO t2 SELECT a, 3 FROM t2 WHERE a = 1; + SELECT * FROM t2; + } +} {1 2 1 3} +ifcapable subquery { + do_test insert2-5.2 { + execsql { + INSERT INTO t2 SELECT (SELECT a FROM t2), 4; + SELECT * FROM t2; + } + } {1 2 1 3 1 4} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/insert3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/insert3.test new file mode 100644 index 0000000..825b2ac --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/insert3.test @@ -0,0 +1,171 @@ +# 2005 January 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing corner cases of the INSERT statement. +# +# $Id: insert3.test,v 1.7 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# All the tests in this file require trigger support +# +ifcapable {trigger} { + +# Create a table and a corresponding insert trigger. Do a self-insert +# into the table. +# +do_test insert3-1.0 { + execsql { + CREATE TABLE t1(a,b); + CREATE TABLE log(x UNIQUE, y); + CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN + UPDATE log SET y=y+1 WHERE x=new.a; + INSERT OR IGNORE INTO log VALUES(new.a, 1); + END; + INSERT INTO t1 VALUES('hello','world'); + INSERT INTO t1 VALUES(5,10); + SELECT * FROM log ORDER BY x; + } +} {5 1 hello 1} +do_test insert3-1.1 { + execsql { + INSERT INTO t1 SELECT a, b+10 FROM t1; + SELECT * FROM log ORDER BY x; + } +} {5 2 hello 2} +do_test insert3-1.2 { + execsql { + CREATE TABLE log2(x PRIMARY KEY,y); + CREATE TRIGGER r2 BEFORE INSERT ON t1 BEGIN + UPDATE log2 SET y=y+1 WHERE x=new.b; + INSERT OR IGNORE INTO log2 VALUES(new.b,1); + END; + INSERT INTO t1 VALUES(453,'hi'); + SELECT * FROM log ORDER BY x; + } +} {5 2 453 1 hello 2} +do_test insert3-1.3 { + execsql { + SELECT * FROM log2 ORDER BY x; + } +} {hi 1} +ifcapable compound { + do_test insert3-1.4.1 { + execsql { + INSERT INTO t1 SELECT * FROM t1; + SELECT 'a:', x, y FROM log UNION ALL + SELECT 'b:', x, y FROM log2 ORDER BY x; + } + } {a: 5 4 b: 10 2 b: 20 1 a: 453 2 a: hello 4 b: hi 2 b: world 1} + do_test insert3-1.4.2 { + execsql { + SELECT 'a:', x, y FROM log UNION ALL + SELECT 'b:', x, y FROM log2 ORDER BY x, y; + } + } {a: 5 4 b: 10 2 b: 20 1 a: 453 2 a: hello 4 b: hi 2 b: world 1} + do_test insert3-1.5 { + execsql { + INSERT INTO t1(a) VALUES('xyz'); + SELECT * FROM log ORDER BY x; + } + } {5 4 453 2 hello 4 xyz 1} +} + +do_test insert3-2.1 { + execsql { + CREATE TABLE t2( + a INTEGER PRIMARY KEY, + b DEFAULT 'b', + c DEFAULT 'c' + ); + CREATE TABLE t2dup(a,b,c); + CREATE TRIGGER t2r1 BEFORE INSERT ON t2 BEGIN + INSERT INTO t2dup(a,b,c) VALUES(new.a,new.b,new.c); + END; + INSERT INTO t2(a) VALUES(123); + INSERT INTO t2(b) VALUES(234); + INSERT INTO t2(c) VALUES(345); + SELECT * FROM t2dup; + } +} {123 b c -1 234 c -1 b 345} +do_test insert3-2.2 { + execsql { + DELETE FROM t2dup; + INSERT INTO t2(a) SELECT 1 FROM t1 LIMIT 1; + INSERT INTO t2(b) SELECT 987 FROM t1 LIMIT 1; + INSERT INTO t2(c) SELECT 876 FROM t1 LIMIT 1; + SELECT * FROM t2dup; + } +} {1 b c -1 987 c -1 b 876} + +# Test for proper detection of malformed WHEN clauses on INSERT triggers. +# +do_test insert3-3.1 { + execsql { + CREATE TABLE t3(a,b,c); + CREATE TRIGGER t3r1 BEFORE INSERT on t3 WHEN nosuchcol BEGIN + SELECT 'illegal WHEN clause'; + END; + } +} {} +do_test insert3-3.2 { + catchsql { + INSERT INTO t3 VALUES(1,2,3) + } +} {1 {no such column: nosuchcol}} +do_test insert3-3.3 { + execsql { + CREATE TABLE t4(a,b,c); + CREATE TRIGGER t4r1 AFTER INSERT on t4 WHEN nosuchcol BEGIN + SELECT 'illegal WHEN clause'; + END; + } +} {} +do_test insert3-3.4 { + catchsql { + INSERT INTO t4 VALUES(1,2,3) + } +} {1 {no such column: nosuchcol}} + +} ;# ifcapable {trigger} + +# Tests for the INSERT INTO ... DEFAULT VALUES construct +# +do_test insert3-3.5 { + execsql { + CREATE TABLE t5( + a INTEGER PRIMARY KEY, + b DEFAULT 'xyz' + ); + INSERT INTO t5 DEFAULT VALUES; + SELECT * FROM t5; + } +} {1 xyz} +do_test insert3-3.6 { + execsql { + INSERT INTO t5 DEFAULT VALUES; + SELECT * FROM t5; + } +} {1 xyz 2 xyz} + +ifcapable bloblit { + do_test insert3-3.7 { + execsql { + CREATE TABLE t6(x,y DEFAULT 4.3, z DEFAULT x'6869'); + INSERT INTO t6 DEFAULT VALUES; + SELECT * FROM t6; + } + } {{} 4.3 hi} +} +db close + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/insert4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/insert4.test new file mode 100644 index 0000000..d85fec3 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/insert4.test @@ -0,0 +1,272 @@ +# 2007 January 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the INSERT transfer optimization. +# +# $Id: insert4.test,v 1.7 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !view||!subquery { + finish_test + return +} + +# The sqlite3_xferopt_count variable is incremented whenever the +# insert transfer optimization applies. +# +# This procedure runs a test to see if the sqlite3_xferopt_count is +# set to N. +# +proc xferopt_test {testname N} { + do_test $testname {set ::sqlite3_xferopt_count} $N +} + +# Create tables used for testing. +# +execsql { + PRAGMA legacy_file_format = 0; + CREATE TABLE t1(a int, b int, check(b>a)); + CREATE TABLE t2(x int, y int); + CREATE VIEW v2 AS SELECT y, x FROM t2; + CREATE TABLE t3(a int, b int); +} + +# Ticket #2252. Make sure the an INSERT from identical tables +# does not violate constraints. +# +do_test insert4-1.1 { + set sqlite3_xferopt_count 0 + execsql { + DELETE FROM t1; + DELETE FROM t2; + INSERT INTO t2 VALUES(9,1); + } + catchsql { + INSERT INTO t1 SELECT * FROM t2; + } +} {1 {constraint failed}} +xferopt_test insert4-1.2 0 +do_test insert4-1.3 { + execsql { + SELECT * FROM t1; + } +} {} + +# Tests to make sure that the transfer optimization is not occurring +# when it is not a valid optimization. +# +# The SELECT must be against a real table. +do_test insert4-2.1.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1 SELECT 4, 8; + SELECT * FROM t1; + } +} {4 8} +xferopt_test insert4-2.1.2 0 +do_test insert4-2.2.1 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 SELECT * FROM v2; + SELECT * FROM t1; + } +} {0 {1 9}} +xferopt_test insert4-2.2.2 0 + +# Do not run the transfer optimization if there is a LIMIT clause +# +do_test insert4-2.3.1 { + execsql { + DELETE FROM t2; + INSERT INTO t2 VALUES(9,1); + INSERT INTO t2 SELECT y, x FROM t2; + INSERT INTO t3 SELECT * FROM t2 LIMIT 1; + SELECT * FROM t3; + } +} {9 1} +xferopt_test insert4-2.3.2 0 +do_test insert4-2.3.3 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 SELECT * FROM t2 LIMIT 1; + SELECT * FROM t1; + } +} {1 {constraint failed}} +xferopt_test insert4-2.3.4 0 + +# Do not run the transfer optimization if there is a DISTINCT +# +do_test insert4-2.4.1 { + execsql { + DELETE FROM t3; + INSERT INTO t3 SELECT DISTINCT * FROM t2; + SELECT * FROM t3; + } +} {9 1 1 9} +xferopt_test insert4-2.4.2 0 +do_test insert4-2.4.3 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 SELECT DISTINCT * FROM t2; + } +} {1 {constraint failed}} +xferopt_test insert4-2.4.4 0 + +# The following procedure constructs two tables then tries to transfer +# data from one table to the other. Checks are made to make sure the +# transfer is successful and that the transfer optimization was used or +# not, as appropriate. +# +# xfer_check TESTID XFER-USED INIT-DATA DEST-SCHEMA SRC-SCHEMA +# +# The TESTID argument is the symbolic name for this test. The XFER-USED +# argument is true if the transfer optimization should be employed and +# false if not. INIT-DATA is a single row of data that is to be +# transfered. DEST-SCHEMA and SRC-SCHEMA are table declarations for +# the destination and source tables. +# +proc xfer_check {testid xferused initdata destschema srcschema} { + execsql "CREATE TABLE dest($destschema)" + execsql "CREATE TABLE src($srcschema)" + execsql "INSERT INTO src VALUES([join $initdata ,])" + set ::sqlite3_xferopt_count 0 + do_test $testid.1 { + execsql { + INSERT INTO dest SELECT * FROM src; + SELECT * FROM dest; + } + } $initdata + do_test $testid.2 { + set ::sqlite3_xferopt_count + } $xferused + execsql { + DROP TABLE dest; + DROP TABLE src; + } +} + + +# Do run the transfer optimization if tables have identical +# CHECK constraints. +# +xfer_check insert4-3.1 1 {1 9} \ + {a int, b int CHECK(b>a)} \ + {x int, y int CHECK(y>x)} +xfer_check insert4-3.2 1 {1 9} \ + {a int, b int CHECK(b>a)} \ + {x int CHECK(y>x), y int} + +# Do run the transfer optimization if the destination table lacks +# any CHECK constraints regardless of whether or not there are CHECK +# constraints on the source table. +# +xfer_check insert4-3.3 1 {1 9} \ + {a int, b int} \ + {x int, y int CHECK(y>x)} + +# Do run the transfer optimization if the destination table omits +# NOT NULL constraints that the source table has. +# +xfer_check insert4-3.4 0 {1 9} \ + {a int, b int CHECK(b>a)} \ + {x int, y int} + +# Do not run the optimization if the destination has NOT NULL +# constraints that the source table lacks. +# +xfer_check insert4-3.5 0 {1 9} \ + {a int, b int NOT NULL} \ + {x int, y int} +xfer_check insert4-3.6 0 {1 9} \ + {a int, b int NOT NULL} \ + {x int NOT NULL, y int} +xfer_check insert4-3.7 0 {1 9} \ + {a int NOT NULL, b int NOT NULL} \ + {x int NOT NULL, y int} +xfer_check insert4-3.8 0 {1 9} \ + {a int NOT NULL, b int} \ + {x int, y int} + + +# Do run the transfer optimization if the destination table and +# source table have the same NOT NULL constraints or if the +# source table has extra NOT NULL constraints. +# +xfer_check insert4-3.9 1 {1 9} \ + {a int, b int} \ + {x int NOT NULL, y int} +xfer_check insert4-3.10 1 {1 9} \ + {a int, b int} \ + {x int NOT NULL, y int NOT NULL} +xfer_check insert4-3.11 1 {1 9} \ + {a int NOT NULL, b int} \ + {x int NOT NULL, y int NOT NULL} +xfer_check insert4-3.12 1 {1 9} \ + {a int, b int NOT NULL} \ + {x int NOT NULL, y int NOT NULL} + +# Do not run the optimization if any corresponding table +# columns have different affinities. +# +xfer_check insert4-3.20 0 {1 9} \ + {a text, b int} \ + {x int, b int} +xfer_check insert4-3.21 0 {1 9} \ + {a int, b int} \ + {x text, b int} + +# "int" and "integer" are equivalent so the optimization should +# run here. +# +xfer_check insert4-3.22 1 {1 9} \ + {a int, b int} \ + {x integer, b int} + +# Ticket #2291. +# +do_test insert4-4.1 { + execsql { + CREATE TABLE t4(a, b, UNIQUE(a,b)); + INSERT INTO t4 VALUES(NULL,0); + INSERT INTO t4 VALUES(NULL,1); + INSERT INTO t4 VALUES(NULL,1); + VACUUM; + } +} {} + +# Check some error conditions: +# +do_test insert4-5.1 { + # Table does not exist. + catchsql { INSERT INTO t2 SELECT * FROM nosuchtable } +} {1 {no such table: nosuchtable}} +do_test insert4-5.2 { + # Number of columns does not match. + catchsql { + CREATE TABLE t5(a, b, c); + INSERT INTO t4 SELECT * FROM t5; + } +} {1 {table t4 has 2 columns but 3 values were supplied}} + +do_test insert4-6.1 { + execsql { + CREATE INDEX t2_i2 ON t2(x, y COLLATE nocase); + CREATE INDEX t2_i1 ON t2(x ASC, y DESC); + CREATE INDEX t3_i1 ON t3(a, b); + INSERT INTO t2 SELECT * FROM t3; + } +} {} + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/interrupt.test b/libraries/sqlite/unix/sqlite-3.5.1/test/interrupt.test new file mode 100644 index 0000000..b348baa --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/interrupt.test @@ -0,0 +1,198 @@ +# 2004 Feb 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is the sqlite_interrupt() API. +# +# $Id: interrupt.test,v 1.15 2007/06/13 16:49:49 danielk1977 Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set DB [sqlite3_connection_pointer db] + +# Compute a checksum on the entire database. +# +proc cksum {{db db}} { + set txt [$db eval {SELECT name, type, sql FROM sqlite_master}]\n + foreach tbl [$db eval {SELECT name FROM sqlite_master WHERE type='table'}] { + append txt [$db eval "SELECT * FROM $tbl"]\n + } + foreach prag {default_synchronous default_cache_size} { + append txt $prag-[$db eval "PRAGMA $prag"]\n + } + set cksum [string length $txt]-[md5 $txt] + # puts $cksum-[file size test.db] + return $cksum +} + +# This routine attempts to execute the sql in $sql. It triggers an +# interrupt at progressively later and later points during the processing +# and checks to make sure SQLITE_INTERRUPT is returned. Eventually, +# the routine completes successfully. +# +proc interrupt_test {testid sql result {initcnt 0}} { + set orig_sum [cksum] + set i $initcnt + while 1 { + incr i + set ::sqlite_interrupt_count $i + do_test $testid.$i.1 [format { + set ::r [catchsql %s] + set ::code [db errorcode] + expr {$::code==0 || $::code==9} + } [list $sql]] 1 + if {$::code==9} { + do_test $testid.$i.2 { + cksum + } $orig_sum + } else { + do_test $testid.$i.99 { + set ::r + } [list 0 $result] + break + } + } + set ::sqlite_interrupt_count 0 +} + +do_test interrupt-1.1 { + execsql { + CREATE TABLE t1(a,b); + SELECT name FROM sqlite_master; + } +} {t1} +interrupt_test interrupt-1.2 {DROP TABLE t1} {} +do_test interrupt-1.3 { + execsql { + SELECT name FROM sqlite_master; + } +} {} +integrity_check interrupt-1.4 + +do_test interrrupt-2.1 { + execsql { + BEGIN; + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,randstr(300,400)); + INSERT INTO t1 SELECT a+1, randstr(300,400) FROM t1; + INSERT INTO t1 SELECT a+2, a || '-' || b FROM t1; + INSERT INTO t1 SELECT a+4, a || '-' || b FROM t1; + INSERT INTO t1 SELECT a+8, a || '-' || b FROM t1; + INSERT INTO t1 SELECT a+16, a || '-' || b FROM t1; + INSERT INTO t1 SELECT a+32, a || '-' || b FROM t1; + COMMIT; + UPDATE t1 SET b=substr(b,-5,5); + SELECT count(*) from t1; + } +} 64 +set origsize [file size test.db] +set cksum [db eval {SELECT md5sum(a || b) FROM t1}] +ifcapable {vacuum} { + interrupt_test interrupt-2.2 {VACUUM} {} 100 +} +do_test interrupt-2.3 { + execsql { + SELECT md5sum(a || b) FROM t1; + } +} $cksum +ifcapable {vacuum && !default_autovacuum} { + do_test interrupt-2.4 { + expr {$::origsize>[file size test.db]} + } 1 +} +ifcapable {explain} { + do_test interrupt-2.5 { + set sql {EXPLAIN SELECT max(a,b), a, b FROM t1} + execsql $sql + set rc [catch {db eval $sql {sqlite3_interrupt $DB}} msg] + lappend rc $msg + } {1 interrupted} +} +integrity_check interrupt-2.6 + +# Ticket #594. If an interrupt occurs in the middle of a transaction +# and that transaction is later rolled back, the internal schema tables do +# not reset. +# +# UPDATE: Interrupting a DML statement in the middle of a transaction now +# causes the transaction to roll back. Leaving the transaction open after +# an SQL statement was interrupted halfway through risks database corruption. +# +ifcapable tempdb { + for {set i 1} {$i<50} {incr i 5} { + do_test interrupt-3.$i.1 { + execsql { + BEGIN; + CREATE TEMP TABLE t2(x,y); + SELECT name FROM sqlite_temp_master; + } + } {t2} + do_test interrupt-3.$i.2 { + set ::sqlite_interrupt_count $::i + catchsql { + INSERT INTO t2 SELECT * FROM t1; + } + } {1 interrupted} + do_test interrupt-3.$i.3 { + execsql { + SELECT name FROM sqlite_temp_master; + } + } {} + do_test interrupt-3.$i.4 { + catchsql { + ROLLBACK + } + } {1 {cannot rollback - no transaction is active}} + do_test interrupt-3.$i.5 { + catchsql {SELECT name FROM sqlite_temp_master}; + execsql { + SELECT name FROM sqlite_temp_master; + } + } {} + } +} + +# There are reports of a memory leak if an interrupt occurs during +# the beginning of a complex query - before the first callback. We +# will try to reproduce it here: +# +execsql { + CREATE TABLE t2(a,b,c); + INSERT INTO t2 SELECT round(a/10), randstr(50,80), randstr(50,60) FROM t1; +} +set sql { + SELECT max(min(b,c)), min(max(b,c)), a FROM t2 GROUP BY a ORDER BY a; +} +set sqlite_interrupt_count 1000000 +execsql $sql +set max_count [expr {1000000-$sqlite_interrupt_count}] +for {set i 1} {$i<$max_count-5} {incr i 1} { + do_test interrupt-4.$i.1 { + set ::sqlite_interrupt_count $::i + catchsql $sql + } {1 interrupted} +} + +# Interrupt during parsing +# +do_test interrupt-5.1 { + proc fake_interrupt {args} { + db collate fake_collation no-op + sqlite3_interrupt db + return SQLITE_OK + } + db collation_needed fake_interrupt + catchsql { + CREATE INDEX fake ON fake1(a COLLATE fake_collation, b, c DESC); + } +} {1 interrupt} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/intpkey.test b/libraries/sqlite/unix/sqlite-3.5.1/test/intpkey.test new file mode 100644 index 0000000..168f1df --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/intpkey.test @@ -0,0 +1,605 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for the special processing associated +# with INTEGER PRIMARY KEY columns. +# +# $Id: intpkey.test,v 1.23 2005/07/21 03:48:20 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table with a primary key and a datatype other than +# integer +# +do_test intpkey-1.0 { + execsql { + CREATE TABLE t1(a TEXT PRIMARY KEY, b, c); + } +} {} + +# There should be an index associated with the primary key +# +do_test intpkey-1.1 { + execsql { + SELECT name FROM sqlite_master + WHERE type='index' AND tbl_name='t1'; + } +} {sqlite_autoindex_t1_1} + +# Now create a table with an integer primary key and verify that +# there is no associated index. +# +do_test intpkey-1.2 { + execsql { + DROP TABLE t1; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c); + SELECT name FROM sqlite_master + WHERE type='index' AND tbl_name='t1'; + } +} {} + +# Insert some records into the new table. Specify the primary key +# and verify that the key is used as the record number. +# +do_test intpkey-1.3 { + execsql { + INSERT INTO t1 VALUES(5,'hello','world'); + } + db last_insert_rowid +} {5} +do_test intpkey-1.4 { + execsql { + SELECT * FROM t1; + } +} {5 hello world} +do_test intpkey-1.5 { + execsql { + SELECT rowid, * FROM t1; + } +} {5 5 hello world} + +# Attempting to insert a duplicate primary key should give a constraint +# failure. +# +do_test intpkey-1.6 { + set r [catch {execsql { + INSERT INTO t1 VALUES(5,'second','entry'); + }} msg] + lappend r $msg +} {1 {PRIMARY KEY must be unique}} +do_test intpkey-1.7 { + execsql { + SELECT rowid, * FROM t1; + } +} {5 5 hello world} +do_test intpkey-1.8 { + set r [catch {execsql { + INSERT INTO t1 VALUES(6,'second','entry'); + }} msg] + lappend r $msg +} {0 {}} +do_test intpkey-1.8.1 { + db last_insert_rowid +} {6} +do_test intpkey-1.9 { + execsql { + SELECT rowid, * FROM t1; + } +} {5 5 hello world 6 6 second entry} + +# A ROWID is automatically generated for new records that do not specify +# the integer primary key. +# +do_test intpkey-1.10 { + execsql { + INSERT INTO t1(b,c) VALUES('one','two'); + SELECT b FROM t1 ORDER BY b; + } +} {hello one second} + +# Try to change the ROWID for the new entry. +# +do_test intpkey-1.11 { + execsql { + UPDATE t1 SET a=4 WHERE b='one'; + SELECT * FROM t1; + } +} {4 one two 5 hello world 6 second entry} + +# Make sure SELECT statements are able to use the primary key column +# as an index. +# +do_test intpkey-1.12.1 { + execsql { + SELECT * FROM t1 WHERE a==4; + } +} {4 one two} +do_test intpkey-1.12.2 { + set sqlite_query_plan +} {t1 *} + +# Try to insert a non-integer value into the primary key field. This +# should result in a data type mismatch. +# +do_test intpkey-1.13.1 { + set r [catch {execsql { + INSERT INTO t1 VALUES('x','y','z'); + }} msg] + lappend r $msg +} {1 {datatype mismatch}} +do_test intpkey-1.13.2 { + set r [catch {execsql { + INSERT INTO t1 VALUES('','y','z'); + }} msg] + lappend r $msg +} {1 {datatype mismatch}} +do_test intpkey-1.14 { + set r [catch {execsql { + INSERT INTO t1 VALUES(3.4,'y','z'); + }} msg] + lappend r $msg +} {1 {datatype mismatch}} +do_test intpkey-1.15 { + set r [catch {execsql { + INSERT INTO t1 VALUES(-3,'y','z'); + }} msg] + lappend r $msg +} {0 {}} +do_test intpkey-1.16 { + execsql {SELECT * FROM t1} +} {-3 y z 4 one two 5 hello world 6 second entry} + +#### INDICES +# Check to make sure indices work correctly with integer primary keys +# +do_test intpkey-2.1 { + execsql { + CREATE INDEX i1 ON t1(b); + SELECT * FROM t1 WHERE b=='y' + } +} {-3 y z} +do_test intpkey-2.1.1 { + execsql { + SELECT * FROM t1 WHERE b=='y' AND rowid<0 + } +} {-3 y z} +do_test intpkey-2.1.2 { + execsql { + SELECT * FROM t1 WHERE b=='y' AND rowid<0 AND rowid>=-20 + } +} {-3 y z} +do_test intpkey-2.1.3 { + execsql { + SELECT * FROM t1 WHERE b>='y' + } +} {-3 y z} +do_test intpkey-2.1.4 { + execsql { + SELECT * FROM t1 WHERE b>='y' AND rowid<10 + } +} {-3 y z} + +do_test intpkey-2.2 { + execsql { + UPDATE t1 SET a=8 WHERE b=='y'; + SELECT * FROM t1 WHERE b=='y'; + } +} {8 y z} +do_test intpkey-2.3 { + execsql { + SELECT rowid, * FROM t1; + } +} {4 4 one two 5 5 hello world 6 6 second entry 8 8 y z} +do_test intpkey-2.4 { + execsql { + SELECT rowid, * FROM t1 WHERE b<'second' + } +} {5 5 hello world 4 4 one two} +do_test intpkey-2.4.1 { + execsql { + SELECT rowid, * FROM t1 WHERE 'second'>b + } +} {5 5 hello world 4 4 one two} +do_test intpkey-2.4.2 { + execsql { + SELECT rowid, * FROM t1 WHERE 8>rowid AND 'second'>b + } +} {4 4 one two 5 5 hello world} +do_test intpkey-2.4.3 { + execsql { + SELECT rowid, * FROM t1 WHERE 8>rowid AND 'second'>b AND 0'a' + } +} {5 5 hello world 4 4 one two 6 6 second entry 8 8 y z} +do_test intpkey-2.6 { + execsql { + DELETE FROM t1 WHERE rowid=4; + SELECT * FROM t1 WHERE b>'a'; + } +} {5 hello world 6 second entry 8 y z} +do_test intpkey-2.7 { + execsql { + UPDATE t1 SET a=-4 WHERE rowid=8; + SELECT * FROM t1 WHERE b>'a'; + } +} {5 hello world 6 second entry -4 y z} +do_test intpkey-2.7 { + execsql { + SELECT * FROM t1 + } +} {-4 y z 5 hello world 6 second entry} + +# Do an SQL statement. Append the search count to the end of the result. +# +proc count sql { + set ::sqlite_search_count 0 + return [concat [execsql $sql] $::sqlite_search_count] +} + +# Create indices that include the integer primary key as one of their +# columns. +# +do_test intpkey-3.1 { + execsql { + CREATE INDEX i2 ON t1(a); + } +} {} +do_test intpkey-3.2 { + count { + SELECT * FROM t1 WHERE a=5; + } +} {5 hello world 0} +do_test intpkey-3.3 { + count { + SELECT * FROM t1 WHERE a>4 AND a<6; + } +} {5 hello world 2} +do_test intpkey-3.4 { + count { + SELECT * FROM t1 WHERE b>='hello' AND b<'hello2'; + } +} {5 hello world 3} +do_test intpkey-3.5 { + execsql { + CREATE INDEX i3 ON t1(c,a); + } +} {} +do_test intpkey-3.6 { + count { + SELECT * FROM t1 WHERE c=='world'; + } +} {5 hello world 3} +do_test intpkey-3.7 { + execsql {INSERT INTO t1 VALUES(11,'hello','world')} + count { + SELECT * FROM t1 WHERE c=='world'; + } +} {5 hello world 11 hello world 5} +do_test intpkey-3.8 { + count { + SELECT * FROM t1 WHERE c=='world' AND a>7; + } +} {11 hello world 5} +do_test intpkey-3.9 { + count { + SELECT * FROM t1 WHERE 7=oid; + } +} {11 hello world 1} +do_test intpkey-4.9 { + count { + SELECT * FROM t1 WHERE 11<=_rowid_ AND 12>=a; + } +} {11 hello world 1} +do_test intpkey-4.10 { + count { + SELECT * FROM t1 WHERE 0>=_rowid_; + } +} {-4 y z 1} +do_test intpkey-4.11 { + count { + SELECT * FROM t1 WHERE a<0; + } +} {-4 y z 1} +do_test intpkey-4.12 { + count { + SELECT * FROM t1 WHERE a<0 AND a>10; + } +} {1} + +# Make sure it is OK to insert a rowid of 0 +# +do_test intpkey-5.1 { + execsql { + INSERT INTO t1 VALUES(0,'zero','entry'); + } + count { + SELECT * FROM t1 WHERE a=0; + } +} {0 zero entry 0} +do_test intpkey-5.2 { + execsql { + SELECT rowid, a FROM t1 + } +} {-4 -4 0 0 5 5 6 6 11 11} + +# Test the ability of the COPY command to put data into a +# table that contains an integer primary key. +# +# COPY command has been removed. But we retain these tests so +# that the tables will contain the right data for tests that follow. +# +do_test intpkey-6.1 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(20,'b-20','c-20'); + INSERT INTO t1 VALUES(21,'b-21','c-21'); + INSERT INTO t1 VALUES(22,'b-22','c-22'); + COMMIT; + SELECT * FROM t1 WHERE a>=20; + } +} {20 b-20 c-20 21 b-21 c-21 22 b-22 c-22} +do_test intpkey-6.2 { + execsql { + SELECT * FROM t1 WHERE b=='hello' + } +} {5 hello world 11 hello world} +do_test intpkey-6.3 { + execsql { + DELETE FROM t1 WHERE b='b-21'; + SELECT * FROM t1 WHERE b=='b-21'; + } +} {} +do_test intpkey-6.4 { + execsql { + SELECT * FROM t1 WHERE a>=20 + } +} {20 b-20 c-20 22 b-22 c-22} + +# Do an insert of values with the columns specified out of order. +# +do_test intpkey-7.1 { + execsql { + INSERT INTO t1(c,b,a) VALUES('row','new',30); + SELECT * FROM t1 WHERE rowid>=30; + } +} {30 new row} +do_test intpkey-7.2 { + execsql { + SELECT * FROM t1 WHERE rowid>20; + } +} {22 b-22 c-22 30 new row} + +# Do an insert from a select statement. +# +do_test intpkey-8.1 { + execsql { + CREATE TABLE t2(x INTEGER PRIMARY KEY, y, z); + INSERT INTO t2 SELECT * FROM t1; + SELECT rowid FROM t2; + } +} {-4 0 5 6 11 20 22 30} +do_test intpkey-8.2 { + execsql { + SELECT x FROM t2; + } +} {-4 0 5 6 11 20 22 30} + +do_test intpkey-9.1 { + execsql { + UPDATE t1 SET c='www' WHERE c='world'; + SELECT rowid, a, c FROM t1 WHERE c=='www'; + } +} {5 5 www 11 11 www} + + +# Check insert of NULL for primary key +# +do_test intpkey-10.1 { + execsql { + DROP TABLE t2; + CREATE TABLE t2(x INTEGER PRIMARY KEY, y, z); + INSERT INTO t2 VALUES(NULL, 1, 2); + SELECT * from t2; + } +} {1 1 2} +do_test intpkey-10.2 { + execsql { + INSERT INTO t2 VALUES(NULL, 2, 3); + SELECT * from t2 WHERE x=2; + } +} {2 2 3} +do_test intpkey-10.3 { + execsql { + INSERT INTO t2 SELECT NULL, z, y FROM t2; + SELECT * FROM t2; + } +} {1 1 2 2 2 3 3 2 1 4 3 2} + +# This tests checks to see if a floating point number can be used +# to reference an integer primary key. +# +do_test intpkey-11.1 { + execsql { + SELECT b FROM t1 WHERE a=2.0+3.0; + } +} {hello} +do_test intpkey-11.1 { + execsql { + SELECT b FROM t1 WHERE a=2.0+3.5; + } +} {} + +integrity_check intpkey-12.1 + +# Try to use a string that looks like a floating point number as +# an integer primary key. This should actually work when the floating +# point value can be rounded to an integer without loss of data. +# +do_test intpkey-13.1 { + execsql { + SELECT * FROM t1 WHERE a=1; + } +} {} +do_test intpkey-13.2 { + execsql { + INSERT INTO t1 VALUES('1.0',2,3); + SELECT * FROM t1 WHERE a=1; + } +} {1 2 3} +do_test intpkey-13.3 { + catchsql { + INSERT INTO t1 VALUES('1.5',3,4); + } +} {1 {datatype mismatch}} +ifcapable {bloblit} { + do_test intpkey-13.4 { + catchsql { + INSERT INTO t1 VALUES(x'123456',3,4); + } + } {1 {datatype mismatch}} +} +do_test intpkey-13.5 { + catchsql { + INSERT INTO t1 VALUES('+1234567890',3,4); + } +} {0 {}} + +# Compare an INTEGER PRIMARY KEY against a TEXT expression. The INTEGER +# affinity should be applied to the text value before the comparison +# takes place. +# +do_test intpkey-14.1 { + execsql { + CREATE TABLE t3(a INTEGER PRIMARY KEY, b INTEGER, c TEXT); + INSERT INTO t3 VALUES(1, 1, 'one'); + INSERT INTO t3 VALUES(2, 2, '2'); + INSERT INTO t3 VALUES(3, 3, 3); + } +} {} +do_test intpkey-14.2 { + execsql { + SELECT * FROM t3 WHERE a>2; + } +} {3 3 3} +do_test intpkey-14.3 { + execsql { + SELECT * FROM t3 WHERE a>'2'; + } +} {3 3 3} +do_test intpkey-14.4 { + execsql { + SELECT * FROM t3 WHERE a<'2'; + } +} {1 1 one} +do_test intpkey-14.5 { + execsql { + SELECT * FROM t3 WHERE a2147483648; + } +} {} +do_test intpkey-15.2 { + execsql { + INSERT INTO t1 VALUES(NULL, 'big-2', 234); + SELECT b FROM t1 WHERE a>=2147483648; + } +} {big-2} +do_test intpkey-15.3 { + execsql { + SELECT b FROM t1 WHERE a>2147483648; + } +} {} +do_test intpkey-15.4 { + execsql { + SELECT b FROM t1 WHERE a>=2147483647; + } +} {big-1 big-2} +do_test intpkey-15.5 { + execsql { + SELECT b FROM t1 WHERE a<2147483648; + } +} {y zero 2 hello second hello b-20 b-22 new 3 big-1} +do_test intpkey-15.6 { + execsql { + SELECT b FROM t1 WHERE a<12345678901; + } +} {y zero 2 hello second hello b-20 b-22 new 3 big-1 big-2} +do_test intpkey-15.7 { + execsql { + SELECT b FROM t1 WHERE a>12345678901; + } +} {} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/io.test b/libraries/sqlite/unix/sqlite-3.5.1/test/io.test new file mode 100644 index 0000000..74fb0e8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/io.test @@ -0,0 +1,549 @@ +# 2007 August 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing some specific characteristics of the +# IO traffic generated by SQLite (making sure SQLite is not writing out +# more database pages than it has to, stuff like that). +# +# $Id: io.test,v 1.11 2007/10/03 21:18:20 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Test summary: +# +# io-1.* - Test that quick-balance does not journal pages unnecessarily. +# +# io-2.* - Test the "atomic-write optimization". +# +# io-3.* - Test the IO traffic enhancements triggered when the +# IOCAP_SEQUENTIAL device capability flag is set (no +# fsync() calls on the journal file). +# +# io-4.* - Test the IO traffic enhancements triggered when the +# IOCAP_SAFE_APPEND device capability flag is set (fewer +# fsync() calls on the journal file, no need to set nRec +# field in the single journal header). +# +# io-5.* - Test that the default page size is selected and used +# correctly. +# + +set ::nWrite 0 +proc nWrite {db} { + set bt [btree_from_db $db] + db_enter $db + array set stats [btree_pager_stats $bt] + db_leave $db + set res [expr $stats(write) - $::nWrite] + set ::nWrite $stats(write) + set res +} + +set ::nSync 0 +proc nSync {} { + set res [expr {$::sqlite_sync_count - $::nSync}] + set ::nSync $::sqlite_sync_count + set res +} + +do_test io-1.1 { + execsql { + PRAGMA auto_vacuum = OFF; + PRAGMA page_size = 1024; + CREATE TABLE abc(a,b); + } + nWrite db +} {2} + +# Insert into the table 4 records of aproximately 240 bytes each. +# This should completely fill the root-page of the table. Each +# INSERT causes 2 db pages to be written - the root-page of "abc" +# and page 1 (db change-counter page). +do_test io-1.2 { + set ret [list] + execsql { INSERT INTO abc VALUES(1,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(2,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(3,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(4,randstr(230,230)); } + lappend ret [nWrite db] +} {2 2 2 2} + +# Insert another 240 byte record. This causes two leaf pages +# to be added to the root page of abc. 4 pages in total +# are written to the db file - the two leaf pages, the root +# of abc and the change-counter page. +do_test io-1.3 { + execsql { INSERT INTO abc VALUES(5,randstr(230,230)); } + nWrite db +} {4} + +# Insert another 3 240 byte records. After this, the tree consists of +# the root-node, which is close to empty, and two leaf pages, both of +# which are full. +do_test io-1.4 { + set ret [list] + execsql { INSERT INTO abc VALUES(6,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(7,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(8,randstr(230,230)); } + lappend ret [nWrite db] +} {2 2 2} + +# This insert should use the quick-balance trick to add a third leaf +# to the b-tree used to store table abc. It should only be necessary to +# write to 3 pages to do this: the change-counter, the root-page and +# the new leaf page. +do_test io-1.5 { + execsql { INSERT INTO abc VALUES(9,randstr(230,230)); } + nWrite db +} {3} + +ifcapable atomicwrite { + +#---------------------------------------------------------------------- +# Test cases io-2.* test the atomic-write optimization. +# +do_test io-2.1 { + execsql { DELETE FROM abc; VACUUM; } +} {} + +# Clear the write and sync counts. +nWrite db ; nSync + +# The following INSERT updates 2 pages and requires 4 calls to fsync(): +# +# 1) The directory in which the journal file is created, +# 2) The journal file (to sync the page data), +# 3) The journal file (to sync the journal file header), +# 4) The database file. +# +do_test io-2.2 { + execsql { INSERT INTO abc VALUES(1, 2) } + list [nWrite db] [nSync] +} {2 4} + +# Set the device-characteristic mask to include the SQLITE_IOCAP_ATOMIC, +# then do another INSERT similar to the one in io-2.2. This should +# only write 1 page and require a single fsync(). +# +# The single fsync() is the database file. Only one page is reported as +# written because page 1 - the change-counter page - is written using +# an out-of-band method that bypasses the write counter. +# +sqlite3_simulate_device -char atomic +do_test io-2.3 { + execsql { INSERT INTO abc VALUES(3, 4) } + list [nWrite db] [nSync] +} {1 1} + +# Test that the journal file is not created and the change-counter is +# updated when the atomic-write optimization is used. +# +do_test io-2.4.1 { + execsql { + BEGIN; + INSERT INTO abc VALUES(5, 6); + } + sqlite3 db2 test.db + execsql { SELECT * FROM abc } db2 +} {1 2 3 4} +do_test io-2.4.2 { + file exists test.db-journal +} {0} +do_test io-2.4.3 { + execsql { COMMIT } + execsql { SELECT * FROM abc } db2 +} {1 2 3 4 5 6} +db2 close + +# Test that the journal file is created and sync()d if the transaction +# modifies more than one database page, even if the IOCAP_ATOMIC flag +# is set. +# +do_test io-2.5.1 { + execsql { CREATE TABLE def(d, e) } + nWrite db ; nSync + execsql { + BEGIN; + INSERT INTO abc VALUES(7, 8); + } + file exists test.db-journal +} {0} +do_test io-2.5.2 { + execsql { INSERT INTO def VALUES('a', 'b'); } + file exists test.db-journal +} {1} +do_test io-2.5.3 { + execsql { COMMIT } + list [nWrite db] [nSync] +} {3 4} + +# Test that the journal file is created and sync()d if the transaction +# modifies a single database page and also appends a page to the file. +# Internally, this case is handled differently to the one above. The +# journal file is not actually created until the 'COMMIT' statement +# is executed. +# +do_test io-2.6.1 { + execsql { + BEGIN; + INSERT INTO abc VALUES(9, randstr(1000,1000)); + } + file exists test.db-journal +} {0} +do_test io-2.6.2 { + # Create a file at "test.db-journal". This will prevent SQLite from + # opening the journal for exclusive access. As a result, the COMMIT + # should fail with SQLITE_CANTOPEN and the transaction rolled back. + # + set fd [open test.db-journal w] + puts $fd "This is not a journal file" + close $fd + catchsql { COMMIT } +} {1 {unable to open database file}} +do_test io-2.6.3 { + file delete -force test.db-journal + catchsql { COMMIT } +} {1 {cannot commit - no transaction is active}} +do_test io-2.6.4 { + execsql { SELECT * FROM abc } +} {1 2 3 4 5 6 7 8} + + +# Test that if the database modification is part of multi-file commit, +# the journal file is always created. In this case, the journal file +# is created during execution of the COMMIT statement, so we have to +# use the same technique to check that it is created as in the above +# block. +file delete -force test2.db test2.db-journal +do_test io-2.7.1 { + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA aux.page_size = 1024; + CREATE TABLE aux.abc2(a, b); + BEGIN; + INSERT INTO abc VALUES(9, 10); + } + file exists test.db-journal +} {0} +do_test io-2.7.2 { + execsql { INSERT INTO abc2 SELECT * FROM abc } + file exists test2.db-journal +} {0} +do_test io-2.7.3 { + execsql { SELECT * FROM abc UNION ALL SELECT * FROM abc2 } +} {1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10} +do_test io-2.7.4 { + set fd [open test2.db-journal w] + puts $fd "This is not a journal file" + close $fd + catchsql { COMMIT } +} {1 {unable to open database file}} +do_test io-2.7.5 { + file delete -force test2.db-journal + catchsql { COMMIT } +} {1 {cannot commit - no transaction is active}} +do_test io-2.7.6 { + execsql { SELECT * FROM abc UNION ALL SELECT * FROM abc2 } +} {1 2 3 4 5 6 7 8} + +# Try an explicit ROLLBACK before the journal file is created. +# +do_test io-2.8.1 { + execsql { + BEGIN; + DELETE FROM abc; + } + file exists test.db-journal +} {0} +do_test io-2.8.2 { + execsql { SELECT * FROM abc } +} {} +do_test io-2.8.3 { + execsql { + ROLLBACK; + SELECT * FROM abc; + } +} {1 2 3 4 5 6 7 8} + +# Test that the atomic write optimisation is not enabled if the sector +# size is larger than the page-size. +# +do_test io-2.9.1 { + sqlite3_simulate_device -char atomic -sectorsize 2048 + execsql { + BEGIN; + INSERT INTO abc VALUES(9, 10); + } + file exists test.db-journal +} {1} +do_test io-2.9.2 { + execsql { ROLLBACK; } + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum = OFF; + PRAGMA page_size = 2048; + CREATE TABLE abc(a, b); + } + execsql { + BEGIN; + INSERT INTO abc VALUES(9, 10); + } + file exists test.db-journal +} {0} +do_test io-2.9.3 { + execsql { COMMIT } +} {} + +# Test a couple of the more specific IOCAP_ATOMIC flags +# (i.e IOCAP_ATOMIC2K etc.). +# +do_test io-2.10.1 { + sqlite3_simulate_device -char atomic1k + execsql { + BEGIN; + INSERT INTO abc VALUES(11, 12); + } + file exists test.db-journal +} {1} +do_test io-2.10.2 { + execsql { ROLLBACK } + sqlite3_simulate_device -char atomic2k + execsql { + BEGIN; + INSERT INTO abc VALUES(11, 12); + } + file exists test.db-journal +} {0} +do_test io-2.10.3 { + execsql { ROLLBACK } +} {} + +do_test io-2.11.0 { + execsql { + PRAGMA locking_mode = exclusive; + PRAGMA locking_mode; + } +} {exclusive exclusive} +breakpoint +do_test io-2.11.1 { + execsql { + INSERT INTO abc VALUES(11, 12); + } + file exists test.db-journal +} {0} +breakpoint + +do_test io-2.11.2 { + execsql { + PRAGMA locking_mode = normal; + INSERT INTO abc VALUES(13, 14); + } + file exists test.db-journal +} {0} + +} ;# /* ifcapable atomicwrite */ + +#---------------------------------------------------------------------- +# Test cases io-3.* test the IOCAP_SEQUENTIAL optimization. +# +sqlite3_simulate_device -char sequential -sectorsize 0 +ifcapable pager_pragmas { + do_test io-3.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + db eval { + PRAGMA auto_vacuum=OFF; + } + file size test.db + } {0} + do_test io-3.2 { + execsql { CREATE TABLE abc(a, b) } + nSync + execsql { + PRAGMA cache_size = 10; + BEGIN; + INSERT INTO abc VALUES('hello', 'world'); + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + } + # File has grown - showing there was a cache-spill - but there + # have been no calls to fsync(): + list [file size test.db] [nSync] + } {31744 0} + do_test io-3.3 { + # The COMMIT requires a single fsync() - to the database file. + execsql { COMMIT } + list [file size test.db] [nSync] + } {39936 1} +} + +#---------------------------------------------------------------------- +# Test cases io-4.* test the IOCAP_SAFE_APPEND optimization. +# +sqlite3_simulate_device -char safe_append + +# With the SAFE_APPEND flag set, simple transactions require 3, rather +# than 4, calls to fsync(). The fsync() calls are on: +# +# 1) The directory in which the journal file is created, (unix only) +# 2) The journal file (to sync the page data), +# 3) The database file. +# +# Normally, when the SAFE_APPEND flag is not set, there is another fsync() +# on the journal file between steps (2) and (3) above. +# +if {$::tcl_platform(platform)=="unix"} { + set expected_sync_count 3 +} else { + set expected_sync_count 2 +} +do_test io-4.1 { + execsql { DELETE FROM abc } + nSync + execsql { INSERT INTO abc VALUES('a', 'b') } + nSync +} $expected_sync_count + +# With SAFE_APPEND set, the nRec field of the journal file header should +# be set to 0xFFFFFFFF before the first journal sync. The nRec field +# occupies bytes 8-11 of the journal file. +# +do_test io-4.2.1 { + execsql { BEGIN } + execsql { INSERT INTO abc VALUES('c', 'd') } + file exists test.db-journal +} {1} +if {$::tcl_platform(platform)=="unix"} { + do_test io-4.2.2 { + set fd [open test.db-journal] + fconfigure $fd -translation binary -encoding binary + seek $fd 8 + set blob [read $fd 4] + close $fd + binary scan $blob i res + format 0x%X $res + } {0xFFFFFFFF} +} +do_test io-4.2.3 { + execsql { COMMIT } + nSync +} $expected_sync_count +sqlite3_simulate_device -char safe_append + +# With SAFE_APPEND set, there should only ever be one journal-header +# written to the database, even though the sync-mode is "full". +# +do_test io-4.3.1 { + execsql { + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + } + expr {[file size test.db]/1024} +} {43} +ifcapable pager_pragmas { + do_test io-4.3.2 { + execsql { + PRAGMA synchronous = full; + PRAGMA cache_size = 10; + PRAGMA synchronous; + } + } {2} +} +do_test io-4.3.3 { + execsql { + BEGIN; + UPDATE abc SET a = 'x'; + } + file exists test.db-journal +} {1} +do_test io-4.3.4 { + # The UPDATE statement in the statement above modifies 41 pages + # (all pages in the database except page 1 and the root page of + # abc). Because the cache_size is set to 10, this must have required + # at least 4 cache-spills. If there were no journal headers written + # to the journal file after the cache-spill, then the size of the + # journal file is give by: + # + # = + nPage * ( + 8) + # + # If the journal file contains additional headers, this formula + # will not predict the size of the journal file. + # + file size test.db-journal +} [expr 1024 + (1024+8)*41] + +#---------------------------------------------------------------------- +# Test cases io-5.* test that the default page size is selected and +# used correctly. +# +set tn 0 +foreach {char sectorsize pgsize} { + {} 512 1024 + {} 1024 1024 + {} 2048 2048 + {} 8192 8192 + {} 16384 8192 + {atomic} 512 8192 + {atomic512} 512 1024 + {atomic2K} 512 2048 + {atomic2K} 4096 4096 + {atomic2K atomic} 512 8192 + {atomic64K} 512 1024 +} { + incr tn + if {$pgsize>$::SQLITE_MAX_PAGE_SIZE} continue + db close + file delete -force test.db test.db-journal + sqlite3_simulate_device -char $char -sectorsize $sectorsize + sqlite3 db test.db + db eval { + PRAGMA auto_vacuum=OFF; + } + ifcapable !atomicwrite { + if {[regexp {^atomic} $char]} continue + } + do_test io-5.$tn { + execsql { + CREATE TABLE abc(a, b, c); + } + expr {[file size test.db]/2} + } $pgsize +} + +sqlite3_simulate_device -char {} -sectorsize 0 +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/ioerr.test b/libraries/sqlite/unix/sqlite-3.5.1/test/ioerr.test new file mode 100644 index 0000000..9fb2668 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/ioerr.test @@ -0,0 +1,290 @@ +# 2001 October 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing for correct handling of I/O errors +# such as writes failing because the disk is full. +# +# The tests in this file use special facilities that are only +# available in the SQLite test fixture. +# +# $Id: ioerr.test,v 1.32 2007/09/01 17:00:13 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# If SQLITE_DEFAULT_AUTOVACUUM is set to true, then a simulated IO error +# on the 8th IO operation in the SQL script below doesn't report an error. +# +# This is because the 8th IO call attempts to read page 2 of the database +# file when the file on disk is only 1 page. The pager layer detects that +# this has happened and suppresses the error returned by the OS layer. +# +do_ioerr_test ioerr-1 -erc 1 -sqlprep { + SELECT * FROM sqlite_master; +} -sqlbody { + CREATE TABLE t1(a,b,c); + SELECT * FROM sqlite_master; + BEGIN TRANSACTION; + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(4,5,6); + ROLLBACK; + SELECT * FROM t1; + BEGIN TRANSACTION; + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(4,5,6); + COMMIT; + SELECT * FROM t1; + DELETE FROM t1 WHERE a<100; +} -exclude [expr [string match [execsql {pragma auto_vacuum}] 1] ? 4 : 0] + +# Test for IO errors during a VACUUM. +# +# The first IO call is excluded from the test. This call attempts to read +# the file-header of the temporary database used by VACUUM. Since the +# database doesn't exist at that point, the IO error is not detected. +# +# Additionally, if auto-vacuum is enabled, the 12th IO error is not +# detected. Same reason as the 8th in the test case above. +# +ifcapable vacuum { + do_ioerr_test ioerr-2 -cksum true -sqlprep { + BEGIN; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, randstr(50,50), randstr(50,50)); + INSERT INTO t1 SELECT a+2, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+4, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+8, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+16, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+32, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+64, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+128, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 VALUES(1, randstr(600,600), randstr(600,600)); + CREATE TABLE t2 AS SELECT * FROM t1; + CREATE TABLE t3 AS SELECT * FROM t1; + COMMIT; + DROP TABLE t2; + } -sqlbody { + VACUUM; + } -exclude [list \ + 1 [expr [string match [execsql {pragma auto_vacuum}] 1]?9:-1]] +} + +do_ioerr_test ioerr-3 -tclprep { + execsql { + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE abc(a); + INSERT INTO abc VALUES(randstr(1500,1500)); -- Page 4 is overflow + } + for {set i 0} {$i<150} {incr i} { + execsql { + INSERT INTO abc VALUES(randstr(100,100)); + } + } + execsql COMMIT +} -sqlbody { + CREATE TABLE abc2(a); + BEGIN; + DELETE FROM abc WHERE length(a)>100; + UPDATE abc SET a = randstr(90,90); + COMMIT; + CREATE TABLE abc3(a); +} + +# Test IO errors that can occur retrieving a record header that flows over +# onto an overflow page. +do_ioerr_test ioerr-4 -tclprep { + set sql "CREATE TABLE abc(a1" + for {set i 2} {$i<1300} {incr i} { + append sql ", a$i" + } + append sql ");" + execsql $sql + execsql {INSERT INTO abc (a1) VALUES(NULL)} +} -sqlbody { + SELECT * FROM abc; +} + +# Test IO errors that may occur during a multi-file commit. +# +# Tests 8 and 17 are excluded when auto-vacuum is enabled for the same +# reason as in test cases ioerr-1.XXX +set ex "" +if {[string match [execsql {pragma auto_vacuum}] 1]} { + set ex [list 4 17] +} +do_ioerr_test ioerr-5 -sqlprep { + ATTACH 'test2.db' AS test2; +} -sqlbody { + BEGIN; + CREATE TABLE t1(a,b,c); + CREATE TABLE test2.t2(a,b,c); + COMMIT; +} -exclude $ex + +# Test IO errors when replaying two hot journals from a 2-file +# transaction. This test only runs on UNIX. +ifcapable crashtest { + if {![catch {sqlite3 -has_codec} r] && !$r} { + do_ioerr_test ioerr-6 -tclprep { + execsql { + ATTACH 'test2.db' as aux; + CREATE TABLE tx(a, b); + CREATE TABLE aux.ty(a, b); + } + set rc [crashsql -delay 2 -file test2.db-journal { + ATTACH 'test2.db' as aux; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE aux.t2(a, b, c); + CREATE TABLE t1(a, b, c); + COMMIT; + }] + if {$rc!="1 {child process exited abnormally}"} { + error "Wrong error message: $rc" + } + } -sqlbody { + SELECT * FROM sqlite_master; + SELECT * FROM aux.sqlite_master; + } + } +} + +# Test handling of IO errors that occur while rolling back hot journal +# files. +# +# These tests can't be run on windows because the windows version of +# SQLite holds a mandatory exclusive lock on journal files it has open. +# +btree_breakpoint +if {$tcl_platform(platform)!="windows"} { + do_ioerr_test ioerr-7 -tclprep { + db close + sqlite3 db2 test2.db + db2 eval { + PRAGMA synchronous = 0; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + BEGIN; + INSERT INTO t1 VALUES(3, 4); + } + copy_file test2.db test.db + copy_file test2.db-journal test.db-journal + db2 close + } -tclbody { + sqlite3 db test.db + db eval { + SELECT * FROM t1; + } + } -exclude 1 +} + +# For test coverage: Cause an I/O failure while trying to read a +# short field (one that fits into a Mem buffer without mallocing +# for space). +# +do_ioerr_test ioerr-8 -tclprep { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(randstr(200,200), randstr(1000,1000), 2); + } + db close + sqlite3 db test.db +} -sqlbody { + SELECT c FROM t1; +} + +# For test coverage: Cause an IO error whilst reading the master-journal +# name from a journal file. +if {$tcl_platform(platform)=="unix"} { + do_ioerr_test ioerr-9 -tclprep { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(randstr(200,200), randstr(1000,1000), 2); + BEGIN; + INSERT INTO t1 VALUES(randstr(200,200), randstr(1000,1000), 2); + } + copy_file test.db-journal test2.db-journal + execsql { + COMMIT; + } + copy_file test2.db-journal test.db-journal + set f [open test.db-journal a] + fconfigure $f -encoding binary + puts -nonewline $f "hello" + puts -nonewline $f "\x00\x00\x00\x05\x01\x02\x03\x04" + puts -nonewline $f "\xd9\xd5\x05\xf9\x20\xa1\x63\xd7" + close $f + } -sqlbody { + SELECT a FROM t1; + } +} + +# For test coverage: Cause an IO error during statement playback (i.e. +# a constraint). +do_ioerr_test ioerr-10 -tclprep { + execsql { + BEGIN; + CREATE TABLE t1(a PRIMARY KEY, b); + } + for {set i 0} {$i < 500} {incr i} { + execsql {INSERT INTO t1 VALUES(:i, 'hello world');} + } + execsql { + COMMIT; + } +} -tclbody { + + catch {execsql { + BEGIN; + INSERT INTO t1 VALUES('abc', 123); + INSERT INTO t1 VALUES('def', 123); + INSERT INTO t1 VALUES('ghi', 123); + INSERT INTO t1 SELECT (a+500)%900, 'good string' FROM t1; + }} msg + + if {$msg != "column a is not unique"} { + error $msg + } +} + +# Assertion fault bug reported by alex dimitrov. +# +do_ioerr_test ioerr-11 -erc 1 -sqlprep { + CREATE TABLE A(Id INTEGER, Name TEXT); + INSERT INTO A(Id, Name) VALUES(1, 'Name'); +} -sqlbody { + UPDATE A SET Id = 2, Name = 'Name2' WHERE Id = 1; +} + +# Test that an io error encountered in a sync() caused by a call to +# sqlite3_release_memory() is handled Ok. Only try this if +# memory-management is enabled. +# +ifcapable memorymanage { + do_ioerr_test memmanage-ioerr1 -sqlprep { + BEGIN; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(randstr(50,50), randstr(100,100), randstr(10,10)); + INSERT INTO t1 SELECT randstr(50,50), randstr(9,9), randstr(90,90) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(9,9), randstr(90,90) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(9,9), randstr(90,90) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(9,9), randstr(90,90) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(9,9), randstr(90,90) FROM t1; + } -tclbody { + sqlite3_release_memory + } -sqlbody { + COMMIT; + } +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/ioerr2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/ioerr2.test new file mode 100644 index 0000000..ff72f82 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/ioerr2.test @@ -0,0 +1,115 @@ +# 2007 April 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing for correct handling of I/O errors +# such as writes failing because the disk is full. +# +# The tests in this file use special facilities that are only +# available in the SQLite test fixture. +# +# $Id: ioerr2.test,v 1.6 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !integrityck { + finish_test + return +} + +do_test ioerr2-1.1 { + execsql { + PRAGMA cache_size = 10; + PRAGMA default_cache_size = 10; + CREATE TABLE t1(a, b, PRIMARY KEY(a, b)); + INSERT INTO t1 VALUES(randstr(400,400),randstr(400,400)); + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2 + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 4 + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 8 + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 16 + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 32 + } +} {} + +set ::cksum [execsql {SELECT md5sum(a, b) FROM t1}] +proc check_db {testname} { + + # Make sure no I/O errors are simulated in this proc. + set ::sqlite_io_error_hit 0 + set ::sqlite_io_error_persist 0 + set ::sqlite_io_error_pending 0 + + # Run an integrity-check. If "disk I/O error" is returned, the + # pager must be in error state. In this case open a new database + # connection. Otherwise, try a ROLLBACK, in case a transaction + # is still active. + set rc [catch {execsql {PRAGMA integrity_check}} msg] + if {$rc && $msg eq "disk I/O error"} { + db close + sqlite3 db test.db + set refcnt 0 + } else { + if {$rc || $msg ne "ok"} { + error $msg + } + catch {execsql ROLLBACK} + } + + # Check that the database checksum is still $::cksum, and that + # the integrity-check passes. + set ck [execsql {SELECT md5sum(a, b) FROM t1}] + do_test ${testname}.cksum [list set ck $ck] $::cksum + integrity_check ${testname}.integrity + do_test ${testname}.refcnt { + lindex [sqlite3_pager_refcounts db] 0 + } 0 +} + +check_db ioerr2-2 + +set sql { + PRAGMA cache_size = 10; + PRAGMA default_cache_size = 10; + BEGIN; + DELETE FROM t1 WHERE (oid%7)==0; + INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) + WHERE (random()%7)==0; + UPDATE t1 SET a = randstr(400,400), b = randstr(400,400) + WHERE (random()%7)==0; + ROLLBACK; +} + +foreach bPersist [list 0 1] { + set ::go 1 + for {set ::N 1} {$::go} {incr ::N} { + db close + sqlite3 db test.db + set ::sqlite_io_error_hit 0 + set ::sqlite_io_error_persist $bPersist + set ::sqlite_io_error_pending $::N + + foreach {::go res} [catchsql $sql] {} + check_db ioerr2-3.$bPersist.$::N + } +} +foreach bPersist [list 0 1] { + set ::go 1 + for {set ::N 1} {$::go} {incr ::N} { + set ::sqlite_io_error_hit 0 + set ::sqlite_io_error_persist $bPersist + set ::sqlite_io_error_pending $::N + + foreach {::go res} [catchsql $sql] {} + check_db ioerr2-4.[expr {$bPersist+2}].$::N + } +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/join.test b/libraries/sqlite/unix/sqlite-3.5.1/test/join.test new file mode 100644 index 0000000..2a13128 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/join.test @@ -0,0 +1,461 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for joins, including outer joins. +# +# $Id: join.test,v 1.22 2006/06/20 11:01:09 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test join-1.1 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(2,3,4); + INSERT INTO t1 VALUES(3,4,5); + SELECT * FROM t1; + } +} {1 2 3 2 3 4 3 4 5} +do_test join-1.2 { + execsql { + CREATE TABLE t2(b,c,d); + INSERT INTO t2 VALUES(1,2,3); + INSERT INTO t2 VALUES(2,3,4); + INSERT INTO t2 VALUES(3,4,5); + SELECT * FROM t2; + } +} {1 2 3 2 3 4 3 4 5} + +do_test join-1.3 { + execsql2 { + SELECT * FROM t1 NATURAL JOIN t2; + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test join-1.3.1 { + execsql2 { + SELECT * FROM t2 NATURAL JOIN t1; + } +} {b 2 c 3 d 4 a 1 b 3 c 4 d 5 a 2} +do_test join-1.3.2 { + execsql2 { + SELECT * FROM t2 AS x NATURAL JOIN t1; + } +} {b 2 c 3 d 4 a 1 b 3 c 4 d 5 a 2} +do_test join-1.3.3 { + execsql2 { + SELECT * FROM t2 NATURAL JOIN t1 AS y; + } +} {b 2 c 3 d 4 a 1 b 3 c 4 d 5 a 2} +do_test join-1.3.4 { + execsql { + SELECT b FROM t1 NATURAL JOIN t2; + } +} {2 3} +do_test join-1.4.1 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 USING(b,c); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test join-1.4.2 { + execsql2 { + SELECT * FROM t1 AS x INNER JOIN t2 USING(b,c); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test join-1.4.3 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 AS y USING(b,c); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test join-1.4.4 { + execsql2 { + SELECT * FROM t1 AS x INNER JOIN t2 AS y USING(b,c); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test join-1.4.5 { + execsql { + SELECT b FROM t1 JOIN t2 USING(b); + } +} {2 3} +do_test join-1.5 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 USING(b); + } +} {a 1 b 2 c 3 c 3 d 4 a 2 b 3 c 4 c 4 d 5} +do_test join-1.6 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 USING(c); + } +} {a 1 b 2 c 3 b 2 d 4 a 2 b 3 c 4 b 3 d 5} +do_test join-1.7 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 USING(c,b); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} + +do_test join-1.8 { + execsql { + SELECT * FROM t1 NATURAL CROSS JOIN t2; + } +} {1 2 3 4 2 3 4 5} +do_test join-1.9 { + execsql { + SELECT * FROM t1 CROSS JOIN t2 USING(b,c); + } +} {1 2 3 4 2 3 4 5} +do_test join-1.10 { + execsql { + SELECT * FROM t1 NATURAL INNER JOIN t2; + } +} {1 2 3 4 2 3 4 5} +do_test join-1.11 { + execsql { + SELECT * FROM t1 INNER JOIN t2 USING(b,c); + } +} {1 2 3 4 2 3 4 5} +do_test join-1.12 { + execsql { + SELECT * FROM t1 natural inner join t2; + } +} {1 2 3 4 2 3 4 5} + +ifcapable subquery { + do_test join-1.13 { + execsql2 { + SELECT * FROM t1 NATURAL JOIN + (SELECT b as 'c', c as 'd', d as 'e' FROM t2) as t3 + } + } {a 1 b 2 c 3 d 4 e 5} + do_test join-1.14 { + execsql2 { + SELECT * FROM (SELECT b as 'c', c as 'd', d as 'e' FROM t2) as 'tx' + NATURAL JOIN t1 + } + } {c 3 d 4 e 5 a 1 b 2} +} + +do_test join-1.15 { + execsql { + CREATE TABLE t3(c,d,e); + INSERT INTO t3 VALUES(2,3,4); + INSERT INTO t3 VALUES(3,4,5); + INSERT INTO t3 VALUES(4,5,6); + SELECT * FROM t3; + } +} {2 3 4 3 4 5 4 5 6} +do_test join-1.16 { + execsql { + SELECT * FROM t1 natural join t2 natural join t3; + } +} {1 2 3 4 5 2 3 4 5 6} +do_test join-1.17 { + execsql2 { + SELECT * FROM t1 natural join t2 natural join t3; + } +} {a 1 b 2 c 3 d 4 e 5 a 2 b 3 c 4 d 5 e 6} +do_test join-1.18 { + execsql { + CREATE TABLE t4(d,e,f); + INSERT INTO t4 VALUES(2,3,4); + INSERT INTO t4 VALUES(3,4,5); + INSERT INTO t4 VALUES(4,5,6); + SELECT * FROM t4; + } +} {2 3 4 3 4 5 4 5 6} +do_test join-1.19.1 { + execsql { + SELECT * FROM t1 natural join t2 natural join t4; + } +} {1 2 3 4 5 6} +do_test join-1.19.2 { + execsql2 { + SELECT * FROM t1 natural join t2 natural join t4; + } +} {a 1 b 2 c 3 d 4 e 5 f 6} +do_test join-1.20 { + execsql { + SELECT * FROM t1 natural join t2 natural join t3 WHERE t1.a=1 + } +} {1 2 3 4 5} + +do_test join-2.1 { + execsql { + SELECT * FROM t1 NATURAL LEFT JOIN t2; + } +} {1 2 3 4 2 3 4 5 3 4 5 {}} +do_test join-2.2 { + execsql { + SELECT * FROM t2 NATURAL LEFT OUTER JOIN t1; + } +} {1 2 3 {} 2 3 4 1 3 4 5 2} +do_test join-2.3 { + catchsql { + SELECT * FROM t1 NATURAL RIGHT OUTER JOIN t2; + } +} {1 {RIGHT and FULL OUTER JOINs are not currently supported}} +do_test join-2.4 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.d + } +} {1 2 3 {} {} {} 2 3 4 {} {} {} 3 4 5 1 2 3} +do_test join-2.5 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.d WHERE t1.a>1 + } +} {2 3 4 {} {} {} 3 4 5 1 2 3} +do_test join-2.6 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.d WHERE t2.b IS NULL OR t2.b>1 + } +} {1 2 3 {} {} {} 2 3 4 {} {} {}} + +do_test join-3.1 { + catchsql { + SELECT * FROM t1 NATURAL JOIN t2 ON t1.a=t2.b; + } +} {1 {a NATURAL join may not have an ON or USING clause}} +do_test join-3.2 { + catchsql { + SELECT * FROM t1 NATURAL JOIN t2 USING(b); + } +} {1 {a NATURAL join may not have an ON or USING clause}} +do_test join-3.3 { + catchsql { + SELECT * FROM t1 JOIN t2 ON t1.a=t2.b USING(b); + } +} {1 {cannot have both ON and USING clauses in the same join}} +do_test join-3.4 { + catchsql { + SELECT * FROM t1 JOIN t2 USING(a); + } +} {1 {cannot join using column a - column not present in both tables}} +do_test join-3.5 { + catchsql { + SELECT * FROM t1 USING(a); + } +} {0 {1 2 3 2 3 4 3 4 5}} +do_test join-3.6 { + catchsql { + SELECT * FROM t1 JOIN t2 ON t3.a=t2.b; + } +} {1 {no such column: t3.a}} +do_test join-3.7 { + catchsql { + SELECT * FROM t1 INNER OUTER JOIN t2; + } +} {1 {unknown or unsupported join type: INNER OUTER}} +do_test join-3.7 { + catchsql { + SELECT * FROM t1 LEFT BOGUS JOIN t2; + } +} {1 {unknown or unsupported join type: LEFT BOGUS}} + +do_test join-4.1 { + execsql { + BEGIN; + CREATE TABLE t5(a INTEGER PRIMARY KEY); + CREATE TABLE t6(a INTEGER); + INSERT INTO t6 VALUES(NULL); + INSERT INTO t6 VALUES(NULL); + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + COMMIT; + } + execsql { + SELECT * FROM t6 NATURAL JOIN t5; + } +} {} +do_test join-4.2 { + execsql { + SELECT * FROM t6, t5 WHERE t6.at5.a; + } +} {} +do_test join-4.4 { + execsql { + UPDATE t6 SET a='xyz'; + SELECT * FROM t6 NATURAL JOIN t5; + } +} {} +do_test join-4.6 { + execsql { + SELECT * FROM t6, t5 WHERE t6.at5.a; + } +} {} +do_test join-4.8 { + execsql { + UPDATE t6 SET a=1; + SELECT * FROM t6 NATURAL JOIN t5; + } +} {} +do_test join-4.9 { + execsql { + SELECT * FROM t6, t5 WHERE t6.at5.a; + } +} {} + +do_test join-5.1 { + execsql { + BEGIN; + create table centros (id integer primary key, centro); + INSERT INTO centros VALUES(1,'xxx'); + create table usuarios (id integer primary key, nombre, apellidos, + idcentro integer); + INSERT INTO usuarios VALUES(1,'a','aa',1); + INSERT INTO usuarios VALUES(2,'b','bb',1); + INSERT INTO usuarios VALUES(3,'c','cc',NULL); + create index idcentro on usuarios (idcentro); + END; + select usuarios.id, usuarios.nombre, centros.centro from + usuarios left outer join centros on usuarios.idcentro = centros.id; + } +} {1 a xxx 2 b xxx 3 c {}} + +# A test for ticket #247. +# +do_test join-7.1 { + execsql { + CREATE TABLE t7 (x, y); + INSERT INTO t7 VALUES ("pa1", 1); + INSERT INTO t7 VALUES ("pa2", NULL); + INSERT INTO t7 VALUES ("pa3", NULL); + INSERT INTO t7 VALUES ("pa4", 2); + INSERT INTO t7 VALUES ("pa30", 131); + INSERT INTO t7 VALUES ("pa31", 130); + INSERT INTO t7 VALUES ("pa28", NULL); + + CREATE TABLE t8 (a integer primary key, b); + INSERT INTO t8 VALUES (1, "pa1"); + INSERT INTO t8 VALUES (2, "pa4"); + INSERT INTO t8 VALUES (3, NULL); + INSERT INTO t8 VALUES (4, NULL); + INSERT INTO t8 VALUES (130, "pa31"); + INSERT INTO t8 VALUES (131, "pa30"); + + SELECT coalesce(t8.a,999) from t7 LEFT JOIN t8 on y=a; + } +} {1 999 999 2 131 130 999} + +# Make sure a left join where the right table is really a view that +# is itself a join works right. Ticket #306. +# +ifcapable view { +do_test join-8.1 { + execsql { + BEGIN; + CREATE TABLE t9(a INTEGER PRIMARY KEY, b); + INSERT INTO t9 VALUES(1,11); + INSERT INTO t9 VALUES(2,22); + CREATE TABLE t10(x INTEGER PRIMARY KEY, y); + INSERT INTO t10 VALUES(1,2); + INSERT INTO t10 VALUES(3,3); + CREATE TABLE t11(p INTEGER PRIMARY KEY, q); + INSERT INTO t11 VALUES(2,111); + INSERT INTO t11 VALUES(3,333); + CREATE VIEW v10_11 AS SELECT x, q FROM t10, t11 WHERE t10.y=t11.p; + COMMIT; + SELECT * FROM t9 LEFT JOIN v10_11 ON( a=x ); + } +} {1 11 1 111 2 22 {} {}} +ifcapable subquery { + do_test join-8.2 { + execsql { + SELECT * FROM t9 LEFT JOIN (SELECT x, q FROM t10, t11 WHERE t10.y=t11.p) + ON( a=x); + } + } {1 11 1 111 2 22 {} {}} +} +do_test join-8.3 { + execsql { + SELECT * FROM v10_11 LEFT JOIN t9 ON( a=x ); + } +} {1 111 1 11 3 333 {} {}} +} ;# ifcapable view + +# Ticket #350 describes a scenario where LEFT OUTER JOIN does not +# function correctly if the right table in the join is really +# subquery. +# +# To test the problem, we generate the same LEFT OUTER JOIN in two +# separate selects but with on using a subquery and the other calling +# the table directly. Then connect the two SELECTs using an EXCEPT. +# Both queries should generate the same results so the answer should +# be an empty set. +# +ifcapable compound { +do_test join-9.1 { + execsql { + BEGIN; + CREATE TABLE t12(a,b); + INSERT INTO t12 VALUES(1,11); + INSERT INTO t12 VALUES(2,22); + CREATE TABLE t13(b,c); + INSERT INTO t13 VALUES(22,222); + COMMIT; + } +} {} + +ifcapable subquery { + do_test join-9.1.1 { + execsql { + SELECT * FROM t12 NATURAL LEFT JOIN t13 + EXCEPT + SELECT * FROM t12 NATURAL LEFT JOIN (SELECT * FROM t13 WHERE b>0); + } + } {} +} +ifcapable view { + do_test join-9.2 { + execsql { + CREATE VIEW v13 AS SELECT * FROM t13 WHERE b>0; + SELECT * FROM t12 NATURAL LEFT JOIN t13 + EXCEPT + SELECT * FROM t12 NATURAL LEFT JOIN v13; + } + } {} +} ;# ifcapable view +} ;# ifcapable compound + +# Ticket #1697: Left Join WHERE clause terms that contain an +# aggregate subquery. +# +ifcapable subquery { +do_test join-10.1 { + execsql { + CREATE TABLE t21(a,b,c); + CREATE TABLE t22(p,q); + CREATE INDEX i22 ON t22(q); + SELECT a FROM t21 LEFT JOIN t22 ON b=p WHERE q= + (SELECT max(m.q) FROM t22 m JOIN t21 n ON n.b=m.p WHERE n.c=1); + } +} {} +} ;# ifcapable subquery + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/join2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/join2.test new file mode 100644 index 0000000..0f558c5 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/join2.test @@ -0,0 +1,75 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for joins, including outer joins. +# +# $Id: join2.test,v 1.2 2005/01/21 03:12:16 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test join2-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,11); + INSERT INTO t1 VALUES(2,22); + INSERT INTO t1 VALUES(3,33); + SELECT * FROM t1; + } +} {1 11 2 22 3 33} +do_test join2-1.2 { + execsql { + CREATE TABLE t2(b,c); + INSERT INTO t2 VALUES(11,111); + INSERT INTO t2 VALUES(33,333); + INSERT INTO t2 VALUES(44,444); + SELECT * FROM t2; + } +} {11 111 33 333 44 444}; +do_test join2-1.3 { + execsql { + CREATE TABLE t3(c,d); + INSERT INTO t3 VALUES(111,1111); + INSERT INTO t3 VALUES(444,4444); + INSERT INTO t3 VALUES(555,5555); + SELECT * FROM t3; + } +} {111 1111 444 4444 555 5555} + +do_test join2-1.4 { + execsql { + SELECT * FROM + t1 NATURAL JOIN t2 NATURAL JOIN t3 + } +} {1 11 111 1111} +do_test join2-1.5 { + execsql { + SELECT * FROM + t1 NATURAL JOIN t2 NATURAL LEFT OUTER JOIN t3 + } +} {1 11 111 1111 3 33 333 {}} +do_test join2-1.6 { + execsql { + SELECT * FROM + t1 NATURAL LEFT OUTER JOIN t2 NATURAL JOIN t3 + } +} {1 11 111 1111} +ifcapable subquery { + do_test join2-1.7 { + execsql { + SELECT * FROM + t1 NATURAL LEFT OUTER JOIN (t2 NATURAL JOIN t3) + } + } {1 11 111 1111 2 22 {} {} 3 33 {} {}} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/join3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/join3.test new file mode 100644 index 0000000..f1c273d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/join3.test @@ -0,0 +1,62 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for joins, including outer joins, where +# there are a large number of tables involved in the join. +# +# $Id: join3.test,v 1.4 2005/01/19 23:24:51 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# An unrestricted join +# +catch {unset ::result} +set result {} +for {set N 1} {$N<=$bitmask_size} {incr N} { + lappend result $N + do_test join3-1.$N { + execsql "CREATE TABLE t${N}(x);" + execsql "INSERT INTO t$N VALUES($N)" + set sql "SELECT * FROM t1" + for {set i 2} {$i<=$N} {incr i} {append sql ", t$i"} + execsql $sql + } $result +} + +# Joins with a comparison +# +set result {} +for {set N 1} {$N<=$bitmask_size} {incr N} { + lappend result $N + do_test join3-2.$N { + set sql "SELECT * FROM t1" + for {set i 2} {$i<=$N} {incr i} {append sql ", t$i"} + set sep WHERE + for {set i 1} {$i<$N} {incr i} { + append sql " $sep t[expr {$i+1}].x==t$i.x+1" + set sep AND + } + execsql $sql + } $result +} + +# Error of too many tables in the join +# +do_test join3-3.1 { + set sql "SELECT * FROM t1 AS t0, t1" + for {set i 2} {$i<=$bitmask_size} {incr i} {append sql ", t$i"} + catchsql $sql +} [list 1 "at most $bitmask_size tables in a join"] + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/join4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/join4.test new file mode 100644 index 0000000..77db25f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/join4.test @@ -0,0 +1,98 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for left outer joins containing WHERE +# clauses that restrict the scope of the left term of the join. +# +# $Id: join4.test,v 1.4 2005/03/29 03:11:00 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable tempdb { + do_test join4-1.1 { + execsql { + create temp table t1(a integer, b varchar(10)); + insert into t1 values(1,'one'); + insert into t1 values(2,'two'); + insert into t1 values(3,'three'); + insert into t1 values(4,'four'); + + create temp table t2(x integer, y varchar(10), z varchar(10)); + insert into t2 values(2,'niban','ok'); + insert into t2 values(4,'yonban','err'); + } + execsql { + select * from t1 left outer join t2 on t1.a=t2.x where t2.z='ok' + } + } {2 two 2 niban ok} +} else { + do_test join4-1.1 { + execsql { + create table t1(a integer, b varchar(10)); + insert into t1 values(1,'one'); + insert into t1 values(2,'two'); + insert into t1 values(3,'three'); + insert into t1 values(4,'four'); + + create table t2(x integer, y varchar(10), z varchar(10)); + insert into t2 values(2,'niban','ok'); + insert into t2 values(4,'yonban','err'); + } + execsql { + select * from t1 left outer join t2 on t1.a=t2.x where t2.z='ok' + } + } {2 two 2 niban ok} +} +do_test join4-1.2 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x and t2.z='ok' + } +} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}} +do_test join4-1.3 { + execsql { + create index i2 on t2(z); + } + execsql { + select * from t1 left outer join t2 on t1.a=t2.x where t2.z='ok' + } +} {2 two 2 niban ok} +do_test join4-1.4 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x and t2.z='ok' + } +} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}} +do_test join4-1.5 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x where t2.z>='ok' + } +} {2 two 2 niban ok} +do_test join4-1.4 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x and t2.z>='ok' + } +} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}} +ifcapable subquery { + do_test join4-1.6 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x where t2.z IN ('ok') + } + } {2 two 2 niban ok} + do_test join4-1.7 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x and t2.z IN ('ok') + } + } {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}} +} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/join5.test b/libraries/sqlite/unix/sqlite-3.5.1/test/join5.test new file mode 100644 index 0000000..45d8a31 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/join5.test @@ -0,0 +1,110 @@ +# 2005 September 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for left outer joins containing ON +# clauses that restrict the scope of the left term of the join. +# +# $Id: join5.test,v 1.2 2007/06/08 00:20:48 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +do_test join5-1.1 { + execsql { + BEGIN; + CREATE TABLE t1(a integer primary key, b integer, c integer); + CREATE TABLE t2(x integer primary key, y); + CREATE TABLE t3(p integer primary key, q); + INSERT INTO t3 VALUES(11,'t3-11'); + INSERT INTO t3 VALUES(12,'t3-12'); + INSERT INTO t2 VALUES(11,'t2-11'); + INSERT INTO t2 VALUES(12,'t2-12'); + INSERT INTO t1 VALUES(1, 5, 0); + INSERT INTO t1 VALUES(2, 11, 2); + INSERT INTO t1 VALUES(3, 12, 1); + COMMIT; + } +} {} +do_test join5-1.2 { + execsql { + select * from t1 left join t2 on t1.b=t2.x and t1.c=1 + } +} {1 5 0 {} {} 2 11 2 {} {} 3 12 1 12 t2-12} +do_test join5-1.3 { + execsql { + select * from t1 left join t2 on t1.b=t2.x where t1.c=1 + } +} {3 12 1 12 t2-12} +do_test join5-1.4 { + execsql { + select * from t1 left join t2 on t1.b=t2.x and t1.c=1 + left join t3 on t1.b=t3.p and t1.c=2 + } +} {1 5 0 {} {} {} {} 2 11 2 {} {} 11 t3-11 3 12 1 12 t2-12 {} {}} +do_test join5-1.5 { + execsql { + select * from t1 left join t2 on t1.b=t2.x and t1.c=1 + left join t3 on t1.b=t3.p where t1.c=2 + } +} {2 11 2 {} {} 11 t3-11} + +# Ticket #2403 +# +do_test join5-2.1 { + execsql { + CREATE TABLE ab(a,b); + INSERT INTO "ab" VALUES(1,2); + INSERT INTO "ab" VALUES(3,NULL); + + CREATE TABLE xy(x,y); + INSERT INTO "xy" VALUES(2,3); + INSERT INTO "xy" VALUES(NULL,1); + } + execsql {SELECT * FROM xy LEFT JOIN ab ON 0} +} {2 3 {} {} {} 1 {} {}} +do_test join5-2.2 { + execsql {SELECT * FROM xy LEFT JOIN ab ON 1} +} {2 3 1 2 2 3 3 {} {} 1 1 2 {} 1 3 {}} +do_test join5-2.3 { + execsql {SELECT * FROM xy LEFT JOIN ab ON NULL} +} {2 3 {} {} {} 1 {} {}} +do_test join5-2.4 { + execsql {SELECT * FROM xy LEFT JOIN ab ON 0 WHERE 0} +} {} +do_test join5-2.5 { + execsql {SELECT * FROM xy LEFT JOIN ab ON 1 WHERE 0} +} {} +do_test join5-2.6 { + execsql {SELECT * FROM xy LEFT JOIN ab ON NULL WHERE 0} +} {} +do_test join5-2.7 { + execsql {SELECT * FROM xy LEFT JOIN ab ON 0 WHERE 1} +} {2 3 {} {} {} 1 {} {}} +do_test join5-2.8 { + execsql {SELECT * FROM xy LEFT JOIN ab ON 1 WHERE 1} +} {2 3 1 2 2 3 3 {} {} 1 1 2 {} 1 3 {}} +do_test join5-2.9 { + execsql {SELECT * FROM xy LEFT JOIN ab ON NULL WHERE 1} +} {2 3 {} {} {} 1 {} {}} +do_test join5-2.10 { + execsql {SELECT * FROM xy LEFT JOIN ab ON 0 WHERE NULL} +} {} +do_test join5-2.11 { + execsql {SELECT * FROM xy LEFT JOIN ab ON 1 WHERE NULL} +} {} +do_test join5-2.12 { + execsql {SELECT * FROM xy LEFT JOIN ab ON NULL WHERE NULL} +} {} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/journal1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/journal1.test new file mode 100644 index 0000000..a1b17b4 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/journal1.test @@ -0,0 +1,67 @@ +# 2005 March 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure that leftover journals from +# prior databases do not try to rollback into new databases. +# +# $Id: journal1.test,v 1.2 2005/03/20 22:54:56 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# These tests will not work on windows because windows uses +# manditory file locking which breaks the file copy command. +# +if {$tcl_platform(platform)=="windows"} { + finish_test + return +} + +# Create a smaple database +# +do_test journal1-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,randstr(10,400)); + INSERT INTO t1 VALUES(2,randstr(10,400)); + INSERT INTO t1 SELECT a+2, a||b FROM t1; + INSERT INTO t1 SELECT a+4, a||b FROM t1; + SELECT count(*) FROM t1; + } +} 8 + +# Make changes to the database and save the journal file. +# Then delete the database. Replace the the journal file +# and try to create a new database with the same name. The +# old journal should not attempt to rollback into the new +# database. +# +do_test journal1-1.2 { + execsql { + BEGIN; + DELETE FROM t1; + } + file copy -force test.db-journal test.db-journal-bu + execsql { + ROLLBACK; + } + db close + file delete test.db + file copy test.db-journal-bu test.db-journal + sqlite3 db test.db + catchsql { + SELECT * FROM sqlite_master + } +} {0 {}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/lastinsert.test b/libraries/sqlite/unix/sqlite-3.5.1/test/lastinsert.test new file mode 100644 index 0000000..adeb798 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/lastinsert.test @@ -0,0 +1,366 @@ +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests to make sure that value returned by last_insert_rowid() (LIRID) +# is updated properly, especially inside triggers +# +# Note 1: insert into table is now the only statement which changes LIRID +# Note 2: upon entry into before or instead of triggers, +# LIRID is unchanged (rather than -1) +# Note 3: LIRID is changed within the context of a trigger, +# but is restored once the trigger exits +# Note 4: LIRID is not changed by an insert into a view (since everything +# is done within instead of trigger context) +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# ---------------------------------------------------------------------------- +# 1.x - basic tests (no triggers) + +# LIRID changed properly after an insert into a table +do_test lastinsert-1.1 { + catchsql { + create table t1 (k integer primary key); + insert into t1 values (1); + insert into t1 values (NULL); + insert into t1 values (NULL); + select last_insert_rowid(); + } +} {0 3} + +# LIRID unchanged after an update on a table +do_test lastinsert-1.2 { + catchsql { + update t1 set k=4 where k=2; + select last_insert_rowid(); + } +} {0 3} + +# LIRID unchanged after a delete from a table +do_test lastinsert-1.3 { + catchsql { + delete from t1 where k=4; + select last_insert_rowid(); + } +} {0 3} + +# LIRID unchanged after create table/view statements +do_test lastinsert-1.4.1 { + catchsql { + create table t2 (k integer primary key, val1, val2, val3); + select last_insert_rowid(); + } +} {0 3} +ifcapable view { +do_test lastinsert-1.4.2 { + catchsql { + create view v as select * from t1; + select last_insert_rowid(); + } +} {0 3} +} ;# ifcapable view + +# All remaining tests involve triggers. Skip them if triggers are not +# supported in this build. +# +ifcapable {!trigger} { + finish_test + return +} + +# ---------------------------------------------------------------------------- +# 2.x - tests with after insert trigger + +# LIRID changed properly after an insert into table containing an after trigger +do_test lastinsert-2.1 { + catchsql { + delete from t2; + create trigger r1 after insert on t1 for each row begin + insert into t2 values (NEW.k*2, last_insert_rowid(), NULL, NULL); + update t2 set k=k+10, val2=100+last_insert_rowid(); + update t2 set val3=1000+last_insert_rowid(); + end; + insert into t1 values (13); + select last_insert_rowid(); + } +} {0 13} + +# LIRID equals NEW.k upon entry into after insert trigger +do_test lastinsert-2.2 { + catchsql { + select val1 from t2; + } +} {0 13} + +# LIRID changed properly by insert within context of after insert trigger +do_test lastinsert-2.3 { + catchsql { + select val2 from t2; + } +} {0 126} + +# LIRID unchanged by update within context of after insert trigger +do_test lastinsert-2.4 { + catchsql { + select val3 from t2; + } +} {0 1026} + +# ---------------------------------------------------------------------------- +# 3.x - tests with after update trigger + +# LIRID not changed after an update onto a table containing an after trigger +do_test lastinsert-3.1 { + catchsql { + delete from t2; + drop trigger r1; + create trigger r1 after update on t1 for each row begin + insert into t2 values (NEW.k*2, last_insert_rowid(), NULL, NULL); + update t2 set k=k+10, val2=100+last_insert_rowid(); + update t2 set val3=1000+last_insert_rowid(); + end; + update t1 set k=14 where k=3; + select last_insert_rowid(); + } +} {0 13} + +# LIRID unchanged upon entry into after update trigger +do_test lastinsert-3.2 { + catchsql { + select val1 from t2; + } +} {0 13} + +# LIRID changed properly by insert within context of after update trigger +do_test lastinsert-3.3 { + catchsql { + select val2 from t2; + } +} {0 128} + +# LIRID unchanged by update within context of after update trigger +do_test lastinsert-3.4 { + catchsql { + select val3 from t2; + } +} {0 1028} + +# ---------------------------------------------------------------------------- +# 4.x - tests with instead of insert trigger +# These may not be run if either views or triggers were disabled at +# compile-time + +ifcapable {view && trigger} { +# LIRID not changed after an insert into view containing an instead of trigger +do_test lastinsert-4.1 { + catchsql { + delete from t2; + drop trigger r1; + create trigger r1 instead of insert on v for each row begin + insert into t2 values (NEW.k*2, last_insert_rowid(), NULL, NULL); + update t2 set k=k+10, val2=100+last_insert_rowid(); + update t2 set val3=1000+last_insert_rowid(); + end; + insert into v values (15); + select last_insert_rowid(); + } +} {0 13} + +# LIRID unchanged upon entry into instead of trigger +do_test lastinsert-4.2 { + catchsql { + select val1 from t2; + } +} {0 13} + +# LIRID changed properly by insert within context of instead of trigger +do_test lastinsert-4.3 { + catchsql { + select val2 from t2; + } +} {0 130} + +# LIRID unchanged by update within context of instead of trigger +do_test lastinsert-4.4 { + catchsql { + select val3 from t2; + } +} {0 1030} +} ;# ifcapable (view && trigger) + +# ---------------------------------------------------------------------------- +# 5.x - tests with before delete trigger + +# LIRID not changed after a delete on a table containing a before trigger +do_test lastinsert-5.1 { + catchsql { + drop trigger r1; -- This was not created if views are disabled. + } + catchsql { + delete from t2; + create trigger r1 before delete on t1 for each row begin + insert into t2 values (77, last_insert_rowid(), NULL, NULL); + update t2 set k=k+10, val2=100+last_insert_rowid(); + update t2 set val3=1000+last_insert_rowid(); + end; + delete from t1 where k=1; + select last_insert_rowid(); + } +} {0 13} + +# LIRID unchanged upon entry into delete trigger +do_test lastinsert-5.2 { + catchsql { + select val1 from t2; + } +} {0 13} + +# LIRID changed properly by insert within context of delete trigger +do_test lastinsert-5.3 { + catchsql { + select val2 from t2; + } +} {0 177} + +# LIRID unchanged by update within context of delete trigger +do_test lastinsert-5.4 { + catchsql { + select val3 from t2; + } +} {0 1077} + +# ---------------------------------------------------------------------------- +# 6.x - tests with instead of update trigger +# These tests may not run if either views or triggers are disabled. + +ifcapable {view && trigger} { +# LIRID not changed after an update on a view containing an instead of trigger +do_test lastinsert-6.1 { + catchsql { + delete from t2; + drop trigger r1; + create trigger r1 instead of update on v for each row begin + insert into t2 values (NEW.k*2, last_insert_rowid(), NULL, NULL); + update t2 set k=k+10, val2=100+last_insert_rowid(); + update t2 set val3=1000+last_insert_rowid(); + end; + update v set k=16 where k=14; + select last_insert_rowid(); + } +} {0 13} + +# LIRID unchanged upon entry into instead of trigger +do_test lastinsert-6.2 { + catchsql { + select val1 from t2; + } +} {0 13} + +# LIRID changed properly by insert within context of instead of trigger +do_test lastinsert-6.3 { + catchsql { + select val2 from t2; + } +} {0 132} + +# LIRID unchanged by update within context of instead of trigger +do_test lastinsert-6.4 { + catchsql { + select val3 from t2; + } +} {0 1032} +} ;# ifcapable (view && trigger) + +# ---------------------------------------------------------------------------- +# 7.x - complex tests with temporary tables and nested instead of triggers +# These do not run if views or triggers are disabled. + +ifcapable {trigger && view && tempdb} { +do_test lastinsert-7.1 { + catchsql { + drop table t1; drop table t2; drop trigger r1; + create temp table t1 (k integer primary key); + create temp table t2 (k integer primary key); + create temp view v1 as select * from t1; + create temp view v2 as select * from t2; + create temp table rid (k integer primary key, rin, rout); + insert into rid values (1, NULL, NULL); + insert into rid values (2, NULL, NULL); + create temp trigger r1 instead of insert on v1 for each row begin + update rid set rin=last_insert_rowid() where k=1; + insert into t1 values (100+NEW.k); + insert into v2 values (100+last_insert_rowid()); + update rid set rout=last_insert_rowid() where k=1; + end; + create temp trigger r2 instead of insert on v2 for each row begin + update rid set rin=last_insert_rowid() where k=2; + insert into t2 values (1000+NEW.k); + update rid set rout=last_insert_rowid() where k=2; + end; + insert into t1 values (77); + select last_insert_rowid(); + } +} {0 77} + +do_test lastinsert-7.2 { + catchsql { + insert into v1 values (5); + select last_insert_rowid(); + } +} {0 77} + +do_test lastinsert-7.3 { + catchsql { + select rin from rid where k=1; + } +} {0 77} + +do_test lastinsert-7.4 { + catchsql { + select rout from rid where k=1; + } +} {0 105} + +do_test lastinsert-7.5 { + catchsql { + select rin from rid where k=2; + } +} {0 105} + +do_test lastinsert-7.6 { + catchsql { + select rout from rid where k=2; + } +} {0 1205} + +do_test lastinsert-8.1 { + db close + sqlite3 db test.db + execsql { + CREATE TABLE t2(x INTEGER PRIMARY KEY, y); + CREATE TABLE t3(a, b); + CREATE TRIGGER after_t2 AFTER INSERT ON t2 BEGIN + INSERT INTO t3 VALUES(new.x, new.y); + END; + INSERT INTO t2 VALUES(5000000000, 1); + SELECT last_insert_rowid(); + } +} 5000000000 + +do_test lastinsert-9.1 { + db eval {INSERT INTO t2 VALUES(123456789012345,0)} + db last_insert_rowid +} {123456789012345} + + +} ;# ifcapable (view && trigger) + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/laststmtchanges.test b/libraries/sqlite/unix/sqlite-3.5.1/test/laststmtchanges.test new file mode 100644 index 0000000..13bb5fa --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/laststmtchanges.test @@ -0,0 +1,281 @@ +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests to make sure that values returned by changes() and total_changes() +# are updated properly, especially inside triggers +# +# Note 1: changes() remains constant within a statement and only updates +# once the statement is finished (triggers count as part of +# statement). +# Note 2: changes() is changed within the context of a trigger much like +# last_insert_rowid() (see lastinsert.test), but is restored once +# the trigger exits. +# Note 3: changes() is not changed by a change to a view (since everything +# is done within instead of trigger context). +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# ---------------------------------------------------------------------------- +# 1.x - basic tests (no triggers) + +# changes() set properly after insert +do_test laststmtchanges-1.1 { + catchsql { + create table t0 (x); + insert into t0 values (1); + insert into t0 values (1); + insert into t0 values (2); + insert into t0 values (2); + insert into t0 values (1); + insert into t0 values (1); + insert into t0 values (1); + insert into t0 values (2); + select changes(), total_changes(); + } +} {0 {1 8}} + +# changes() set properly after update +do_test laststmtchanges-1.2 { + catchsql { + update t0 set x=3 where x=1; + select changes(), total_changes(); + } +} {0 {5 13}} + +# There was some goofy change-counting logic in sqlite3_exec() that +# appears to have been left over from SQLite version 2. This test +# makes sure it has been removed. +# +do_test laststmtchanges-1.2.1 { + db cache flush + sqlite3_exec_printf db {update t0 set x=4 where x=3; select 1;} {} + execsql {select changes()} +} {5} + +# changes() unchanged within an update statement +do_test laststmtchanges-1.3 { + execsql {update t0 set x=3 where x=4} + catchsql { + update t0 set x=x+changes() where x=3; + select count() from t0 where x=8; + } +} {0 5} + +# changes() set properly after update on table where no rows changed +do_test laststmtchanges-1.4 { + catchsql { + update t0 set x=77 where x=88; + select changes(); + } +} {0 0} + +# changes() set properly after delete from table +do_test laststmtchanges-1.5 { + catchsql { + delete from t0 where x=2; + select changes(); + } +} {0 3} + +# All remaining tests involve triggers. Skip them if triggers are not +# supported in this build. +# +ifcapable {!trigger} { + finish_test + return +} + + +# ---------------------------------------------------------------------------- +# 2.x - tests with after insert trigger + +# changes() changed properly after insert into table containing after trigger +do_test laststmtchanges-2.1 { + set ::tc [db total_changes] + catchsql { + create table t1 (k integer primary key); + create table t2 (k integer primary key, v1, v2); + create trigger r1 after insert on t1 for each row begin + insert into t2 values (NULL, changes(), NULL); + update t0 set x=x; + update t2 set v2=changes(); + end; + insert into t1 values (77); + select changes(); + } +} {0 1} + +# changes() unchanged upon entry into after insert trigger +do_test laststmtchanges-2.2 { + catchsql { + select v1 from t2; + } +} {0 3} + +# changes() changed properly by update within context of after insert trigger +do_test laststmtchanges-2.3 { + catchsql { + select v2 from t2; + } +} {0 5} + +# Total changes caused by firing the trigger above: +# +# 1 from "insert into t1 values(77)" + +# 1 from "insert into t2 values (NULL, changes(), NULL);" + +# 5 from "update t0 set x=x;" + +# 1 from "update t2 set v2=changes();" +# +do_test laststmtchanges-2.4 { + expr [db total_changes] - $::tc +} {8} + +# ---------------------------------------------------------------------------- +# 3.x - tests with after update trigger + +# changes() changed properly after update into table containing after trigger +do_test laststmtchanges-3.1 { + catchsql { + drop trigger r1; + delete from t2; delete from t2; + create trigger r1 after update on t1 for each row begin + insert into t2 values (NULL, changes(), NULL); + delete from t0 where oid=1 or oid=2; + update t2 set v2=changes(); + end; + update t1 set k=k; + select changes(); + } +} {0 1} + +# changes() unchanged upon entry into after update trigger +do_test laststmtchanges-3.2 { + catchsql { + select v1 from t2; + } +} {0 0} + +# changes() changed properly by delete within context of after update trigger +do_test laststmtchanges-3.3 { + catchsql { + select v2 from t2; + } +} {0 2} + +# ---------------------------------------------------------------------------- +# 4.x - tests with before delete trigger + +# changes() changed properly on delete from table containing before trigger +do_test laststmtchanges-4.1 { + catchsql { + drop trigger r1; + delete from t2; delete from t2; + create trigger r1 before delete on t1 for each row begin + insert into t2 values (NULL, changes(), NULL); + insert into t0 values (5); + update t2 set v2=changes(); + end; + delete from t1; + select changes(); + } +} {0 1} + +# changes() unchanged upon entry into before delete trigger +do_test laststmtchanges-4.2 { + catchsql { + select v1 from t2; + } +} {0 0} + +# changes() changed properly by insert within context of before delete trigger +do_test laststmtchanges-4.3 { + catchsql { + select v2 from t2; + } +} {0 1} + +# ---------------------------------------------------------------------------- +# 5.x - complex tests with temporary tables and nested instead of triggers +# These tests cannot run if the library does not have view support enabled. + +ifcapable view&&tempdb { + +do_test laststmtchanges-5.1 { + catchsql { + drop table t0; drop table t1; drop table t2; + create temp table t0(x); + create temp table t1 (k integer primary key); + create temp table t2 (k integer primary key); + create temp view v1 as select * from t1; + create temp view v2 as select * from t2; + create temp table n1 (k integer primary key, n); + create temp table n2 (k integer primary key, n); + insert into t0 values (1); + insert into t0 values (2); + insert into t0 values (1); + insert into t0 values (1); + insert into t0 values (1); + insert into t0 values (2); + insert into t0 values (2); + insert into t0 values (1); + create temp trigger r1 instead of insert on v1 for each row begin + insert into n1 values (NULL, changes()); + update t0 set x=x*10 where x=1; + insert into n1 values (NULL, changes()); + insert into t1 values (NEW.k); + insert into n1 values (NULL, changes()); + update t0 set x=x*10 where x=0; + insert into v2 values (100+NEW.k); + insert into n1 values (NULL, changes()); + end; + create temp trigger r2 instead of insert on v2 for each row begin + insert into n2 values (NULL, changes()); + insert into t2 values (1000+NEW.k); + insert into n2 values (NULL, changes()); + update t0 set x=x*100 where x=0; + insert into n2 values (NULL, changes()); + delete from t0 where x=2; + insert into n2 values (NULL, changes()); + end; + insert into t1 values (77); + select changes(); + } +} {0 1} + +do_test laststmtchanges-5.2 { + catchsql { + delete from t1 where k=88; + select changes(); + } +} {0 0} + +do_test laststmtchanges-5.3 { + catchsql { + insert into v1 values (5); + select changes(); + } +} {0 0} + +do_test laststmtchanges-5.4 { + catchsql { + select n from n1; + } +} {0 {0 5 1 0}} + +do_test laststmtchanges-5.5 { + catchsql { + select n from n2; + } +} {0 {0 1 0 3}} + +} ;# ifcapable view + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/like.test b/libraries/sqlite/unix/sqlite-3.5.1/test/like.test new file mode 100644 index 0000000..e154ca2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/like.test @@ -0,0 +1,400 @@ +# 2005 August 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the LIKE and GLOB operators and +# in particular the optimizations that occur to help those operators +# run faster. +# +# $Id: like.test,v 1.7 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create some sample data to work with. +# +do_test like-1.0 { + execsql { + CREATE TABLE t1(x TEXT); + } + foreach str { + a + ab + abc + abcd + + acd + abd + bc + bcd + + xyz + ABC + CDE + {ABC abc xyz} + } { + db eval {INSERT INTO t1 VALUES(:str)} + } + execsql { + SELECT count(*) FROM t1; + } +} {12} + +# Test that both case sensitive and insensitive version of LIKE work. +# +do_test like-1.1 { + execsql { + SELECT x FROM t1 WHERE x LIKE 'abc' ORDER BY 1; + } +} {ABC abc} +do_test like-1.2 { + execsql { + SELECT x FROM t1 WHERE x GLOB 'abc' ORDER BY 1; + } +} {abc} +do_test like-1.3 { + execsql { + SELECT x FROM t1 WHERE x LIKE 'ABC' ORDER BY 1; + } +} {ABC abc} +do_test like-1.4 { + execsql { + SELECT x FROM t1 WHERE x LIKE 'aBc' ORDER BY 1; + } +} {ABC abc} +do_test like-1.5 { + execsql { + PRAGMA case_sensitive_like=on; + SELECT x FROM t1 WHERE x LIKE 'abc' ORDER BY 1; + } +} {abc} +do_test like-1.6 { + execsql { + SELECT x FROM t1 WHERE x GLOB 'abc' ORDER BY 1; + } +} {abc} +do_test like-1.7 { + execsql { + SELECT x FROM t1 WHERE x LIKE 'ABC' ORDER BY 1; + } +} {ABC} +do_test like-1.8 { + execsql { + SELECT x FROM t1 WHERE x LIKE 'aBc' ORDER BY 1; + } +} {} +do_test like-1.9 { + execsql { + PRAGMA case_sensitive_like=off; + SELECT x FROM t1 WHERE x LIKE 'abc' ORDER BY 1; + } +} {ABC abc} + +# Tests of the REGEXP operator +# +do_test like-2.1 { + proc test_regexp {a b} { + return [regexp $a $b] + } + db function regexp test_regexp + execsql { + SELECT x FROM t1 WHERE x REGEXP 'abc' ORDER BY 1; + } +} {{ABC abc xyz} abc abcd} +do_test like-2.2 { + execsql { + SELECT x FROM t1 WHERE x REGEXP '^abc' ORDER BY 1; + } +} {abc abcd} + +# Tests of the MATCH operator +# +do_test like-2.3 { + proc test_match {a b} { + return [string match $a $b] + } + db function match test_match + execsql { + SELECT x FROM t1 WHERE x MATCH '*abc*' ORDER BY 1; + } +} {{ABC abc xyz} abc abcd} +do_test like-2.4 { + execsql { + SELECT x FROM t1 WHERE x MATCH 'abc*' ORDER BY 1; + } +} {abc abcd} + +# For the remaining tests, we need to have the like optimizations +# enabled. +# +ifcapable !like_opt { + finish_test + return +} + +# This procedure executes the SQL. Then it appends to the result the +# "sort" or "nosort" keyword (as in the cksort procedure above) then +# it appends the ::sqlite_query_plan variable. +# +proc queryplan {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return [concat $data $::sqlite_query_plan] +} + +# Perform tests on the like optimization. +# +# With no index on t1.x and with case sensitivity turned off, no optimization +# is performed. +# +do_test like-3.1 { + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x LIKE 'abc%' ORDER BY 1; + } +} {ABC {ABC abc xyz} abc abcd sort t1 {}} +do_test like-3.2 { + set sqlite_like_count +} {12} + +# With an index on t1.x and case sensitivity on, optimize completely. +# +do_test like-3.3 { + set sqlite_like_count 0 + execsql { + PRAGMA case_sensitive_like=on; + CREATE INDEX i1 ON t1(x); + } + queryplan { + SELECT x FROM t1 WHERE x LIKE 'abc%' ORDER BY 1; + } +} {abc abcd nosort {} i1} +do_test like-3.4 { + set sqlite_like_count +} 0 + +# Partial optimization when the pattern does not end in '%' +# +do_test like-3.5 { + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x LIKE 'a_c' ORDER BY 1; + } +} {abc nosort {} i1} +do_test like-3.6 { + set sqlite_like_count +} 6 +do_test like-3.7 { + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x LIKE 'ab%d' ORDER BY 1; + } +} {abcd abd nosort {} i1} +do_test like-3.8 { + set sqlite_like_count +} 4 +do_test like-3.9 { + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x LIKE 'a_c%' ORDER BY 1; + } +} {abc abcd nosort {} i1} +do_test like-3.10 { + set sqlite_like_count +} 6 + +# No optimization when the pattern begins with a wildcard. +# Note that the index is still used but only for sorting. +# +do_test like-3.11 { + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x LIKE '%bcd' ORDER BY 1; + } +} {abcd bcd nosort {} i1} +do_test like-3.12 { + set sqlite_like_count +} 12 + +# No optimization for case insensitive LIKE +# +do_test like-3.13 { + set sqlite_like_count 0 + queryplan { + PRAGMA case_sensitive_like=off; + SELECT x FROM t1 WHERE x LIKE 'abc%' ORDER BY 1; + } +} {ABC {ABC abc xyz} abc abcd nosort {} i1} +do_test like-3.14 { + set sqlite_like_count +} 12 + +# No optimization without an index. +# +do_test like-3.15 { + set sqlite_like_count 0 + queryplan { + PRAGMA case_sensitive_like=on; + DROP INDEX i1; + SELECT x FROM t1 WHERE x LIKE 'abc%' ORDER BY 1; + } +} {abc abcd sort t1 {}} +do_test like-3.16 { + set sqlite_like_count +} 12 + +# No GLOB optimization without an index. +# +do_test like-3.17 { + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x GLOB 'abc*' ORDER BY 1; + } +} {abc abcd sort t1 {}} +do_test like-3.18 { + set sqlite_like_count +} 12 + +# GLOB is optimized regardless of the case_sensitive_like setting. +# +do_test like-3.19 { + set sqlite_like_count 0 + queryplan { + CREATE INDEX i1 ON t1(x); + SELECT x FROM t1 WHERE x GLOB 'abc*' ORDER BY 1; + } +} {abc abcd nosort {} i1} +do_test like-3.20 { + set sqlite_like_count +} 0 +do_test like-3.21 { + set sqlite_like_count 0 + queryplan { + PRAGMA case_sensitive_like=on; + SELECT x FROM t1 WHERE x GLOB 'abc*' ORDER BY 1; + } +} {abc abcd nosort {} i1} +do_test like-3.22 { + set sqlite_like_count +} 0 +do_test like-3.23 { + set sqlite_like_count 0 + queryplan { + PRAGMA case_sensitive_like=off; + SELECT x FROM t1 WHERE x GLOB 'a[bc]d' ORDER BY 1; + } +} {abd acd nosort {} i1} +do_test like-3.24 { + set sqlite_like_count +} 6 + +# No optimization if the LHS of the LIKE is not a column name or +# if the RHS is not a string. +# +do_test like-4.1 { + execsql {PRAGMA case_sensitive_like=on} + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x LIKE 'abc%' ORDER BY 1 + } +} {abc abcd nosort {} i1} +do_test like-4.2 { + set sqlite_like_count +} 0 +do_test like-4.3 { + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE +x LIKE 'abc%' ORDER BY 1 + } +} {abc abcd nosort {} i1} +do_test like-4.4 { + set sqlite_like_count +} 12 +do_test like-4.5 { + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x LIKE ('ab' || 'c%') ORDER BY 1 + } +} {abc abcd nosort {} i1} +do_test like-4.6 { + set sqlite_like_count +} 12 + +# Collating sequences on the index disable the LIKE optimization. +# Or if the NOCASE collating sequence is used, the LIKE optimization +# is enabled when case_sensitive_like is OFF. +# +do_test like-5.1 { + execsql {PRAGMA case_sensitive_like=off} + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x LIKE 'abc%' ORDER BY 1 + } +} {ABC {ABC abc xyz} abc abcd nosort {} i1} +do_test like-5.2 { + set sqlite_like_count +} 12 +do_test like-5.3 { + execsql { + CREATE TABLE t2(x COLLATE NOCASE); + INSERT INTO t2 SELECT * FROM t1; + CREATE INDEX i2 ON t2(x COLLATE NOCASE); + } + set sqlite_like_count 0 + queryplan { + SELECT x FROM t2 WHERE x LIKE 'abc%' ORDER BY 1 + } +} {abc ABC {ABC abc xyz} abcd nosort {} i2} +do_test like-5.4 { + set sqlite_like_count +} 0 +do_test like-5.5 { + execsql { + PRAGMA case_sensitive_like=on; + } + set sqlite_like_count 0 + queryplan { + SELECT x FROM t2 WHERE x LIKE 'abc%' ORDER BY 1 + } +} {abc abcd nosort {} i2} +do_test like-5.6 { + set sqlite_like_count +} 12 +do_test like-5.7 { + execsql { + PRAGMA case_sensitive_like=off; + } + set sqlite_like_count 0 + queryplan { + SELECT x FROM t2 WHERE x GLOB 'abc*' ORDER BY 1 + } +} {abc abcd nosort {} i2} +do_test like-5.8 { + set sqlite_like_count +} 12 + +# ticket #2407 +# +# Make sure the LIKE prefix optimization does not strip off leading +# characters of the like pattern that happen to be quote characters. +# +do_test like-6.1 { + foreach x { 'abc 'bcd 'def 'ax } { + set x2 '[string map {' ''} $x]' + db eval "INSERT INTO t2 VALUES($x2)" + } + execsql { + SELECT * FROM t2 WHERE x LIKE '''a%' + } +} {'abc 'ax} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/limit.test b/libraries/sqlite/unix/sqlite-3.5.1/test/limit.test new file mode 100644 index 0000000..636bdf6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/limit.test @@ -0,0 +1,448 @@ +# 2001 November 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the LIMIT ... OFFSET ... clause +# of SELECT statements. +# +# $Id: limit.test,v 1.30 2006/06/20 11:01:09 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Build some test data +# +execsql { + CREATE TABLE t1(x int, y int); + BEGIN; +} +for {set i 1} {$i<=32} {incr i} { + for {set j 0} {pow(2,$j)<$i} {incr j} {} + execsql "INSERT INTO t1 VALUES([expr {32-$i}],[expr {10-$j}])" +} +execsql { + COMMIT; +} + +do_test limit-1.0 { + execsql {SELECT count(*) FROM t1} +} {32} +do_test limit-1.1 { + execsql {SELECT count(*) FROM t1 LIMIT 5} +} {32} +do_test limit-1.2.1 { + execsql {SELECT x FROM t1 ORDER BY x LIMIT 5} +} {0 1 2 3 4} +do_test limit-1.2.2 { + execsql {SELECT x FROM t1 ORDER BY x LIMIT 5 OFFSET 2} +} {2 3 4 5 6} +do_test limit-1.2.3 { + execsql {SELECT x FROM t1 ORDER BY x+1 LIMIT 5 OFFSET -2} +} {0 1 2 3 4} +do_test limit-1.2.4 { + execsql {SELECT x FROM t1 ORDER BY x+1 LIMIT 2, -5} +} {2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31} +do_test limit-1.2.5 { + execsql {SELECT x FROM t1 ORDER BY x+1 LIMIT -2, 5} +} {0 1 2 3 4} +do_test limit-1.2.6 { + execsql {SELECT x FROM t1 ORDER BY x+1 LIMIT -2, -5} +} {0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31} +do_test limit-1.2.7 { + execsql {SELECT x FROM t1 ORDER BY x LIMIT 2, 5} +} {2 3 4 5 6} +do_test limit-1.3 { + execsql {SELECT x FROM t1 ORDER BY x LIMIT 5 OFFSET 5} +} {5 6 7 8 9} +do_test limit-1.4.1 { + execsql {SELECT x FROM t1 ORDER BY x LIMIT 50 OFFSET 30} +} {30 31} +do_test limit-1.4.2 { + execsql {SELECT x FROM t1 ORDER BY x LIMIT 30, 50} +} {30 31} +do_test limit-1.5 { + execsql {SELECT x FROM t1 ORDER BY x LIMIT 50 OFFSET 50} +} {} +do_test limit-1.6 { + execsql {SELECT * FROM t1 AS a, t1 AS b ORDER BY a.x, b.x LIMIT 5} +} {0 5 0 5 0 5 1 5 0 5 2 5 0 5 3 5 0 5 4 5} +do_test limit-1.7 { + execsql {SELECT * FROM t1 AS a, t1 AS b ORDER BY a.x, b.x LIMIT 5 OFFSET 32} +} {1 5 0 5 1 5 1 5 1 5 2 5 1 5 3 5 1 5 4 5} + +ifcapable {view && subquery} { + do_test limit-2.1 { + execsql { + CREATE VIEW v1 AS SELECT * FROM t1 LIMIT 2; + SELECT count(*) FROM (SELECT * FROM v1); + } + } 2 +} ;# ifcapable view +do_test limit-2.2 { + execsql { + CREATE TABLE t2 AS SELECT * FROM t1 LIMIT 2; + SELECT count(*) FROM t2; + } +} 2 +ifcapable subquery { + do_test limit-2.3 { + execsql { + SELECT count(*) FROM t1 WHERE rowid IN (SELECT rowid FROM t1 LIMIT 2); + } + } 2 +} + +ifcapable subquery { + do_test limit-3.1 { + execsql { + SELECT z FROM (SELECT y*10+x AS z FROM t1 ORDER BY x LIMIT 10) + ORDER BY z LIMIT 5; + } + } {50 51 52 53 54} +} + +do_test limit-4.1 { + ifcapable subquery { + execsql { + BEGIN; + CREATE TABLE t3(x); + INSERT INTO t3 SELECT x FROM t1 ORDER BY x LIMIT 10 OFFSET 1; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + INSERT INTO t3 SELECT x+(SELECT max(x) FROM t3) FROM t3; + END; + SELECT count(*) FROM t3; + } + } else { + execsql { + BEGIN; + CREATE TABLE t3(x); + INSERT INTO t3 SELECT x FROM t1 ORDER BY x LIMIT 10 OFFSET 1; + } + for {set i 0} {$i<10} {incr i} { + set max_x_t3 [execsql {SELECT max(x) FROM t3}] + execsql "INSERT INTO t3 SELECT x+$max_x_t3 FROM t3;" + } + execsql { + END; + SELECT count(*) FROM t3; + } + } +} {10240} +do_test limit-4.2 { + execsql { + SELECT x FROM t3 LIMIT 2 OFFSET 10000 + } +} {10001 10002} +do_test limit-4.3 { + execsql { + CREATE TABLE t4 AS SELECT x, + 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x || + 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x || + 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x || + 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x || + 'abcdefghijklmnopqrstuvwyxz ABCDEFGHIJKLMNOPQRSTUVWYXZ' || x AS y + FROM t3 LIMIT 1000; + SELECT x FROM t4 ORDER BY y DESC LIMIT 1 OFFSET 999; + } +} {1000} + +do_test limit-5.1 { + execsql { + CREATE TABLE t5(x,y); + INSERT INTO t5 SELECT x-y, x+y FROM t1 WHERE x BETWEEN 10 AND 15 + ORDER BY x LIMIT 2; + SELECT * FROM t5 ORDER BY x; + } +} {5 15 6 16} +do_test limit-5.2 { + execsql { + DELETE FROM t5; + INSERT INTO t5 SELECT x-y, x+y FROM t1 WHERE x BETWEEN 10 AND 15 + ORDER BY x DESC LIMIT 2; + SELECT * FROM t5 ORDER BY x; + } +} {9 19 10 20} +do_test limit-5.3 { + execsql { + DELETE FROM t5; + INSERT INTO t5 SELECT x-y, x+y FROM t1 WHERE x ORDER BY x DESC LIMIT 31; + SELECT * FROM t5 ORDER BY x LIMIT 2; + } +} {-4 6 -3 7} +do_test limit-5.4 { + execsql { + SELECT * FROM t5 ORDER BY x DESC, y DESC LIMIT 2; + } +} {21 41 21 39} +do_test limit-5.5 { + execsql { + DELETE FROM t5; + INSERT INTO t5 SELECT a.x*100+b.x, a.y*100+b.y FROM t1 AS a, t1 AS b + ORDER BY 1, 2 LIMIT 1000; + SELECT count(*), sum(x), sum(y), min(x), max(x), min(y), max(y) FROM t5; + } +} {1000 1528204 593161 0 3107 505 1005} + +# There is some contraversy about whether LIMIT 0 should be the same as +# no limit at all or if LIMIT 0 should result in zero output rows. +# +do_test limit-6.1 { + execsql { + BEGIN; + CREATE TABLE t6(a); + INSERT INTO t6 VALUES(1); + INSERT INTO t6 VALUES(2); + INSERT INTO t6 SELECT a+2 FROM t6; + COMMIT; + SELECT * FROM t6; + } +} {1 2 3 4} +do_test limit-6.2 { + execsql { + SELECT * FROM t6 LIMIT -1 OFFSET -1; + } +} {1 2 3 4} +do_test limit-6.3 { + execsql { + SELECT * FROM t6 LIMIT 2 OFFSET -123; + } +} {1 2} +do_test limit-6.4 { + execsql { + SELECT * FROM t6 LIMIT -432 OFFSET 2; + } +} {3 4} +do_test limit-6.5 { + execsql { + SELECT * FROM t6 LIMIT -1 + } +} {1 2 3 4} +do_test limit-6.6 { + execsql { + SELECT * FROM t6 LIMIT -1 OFFSET 1 + } +} {2 3 4} +do_test limit-6.7 { + execsql { + SELECT * FROM t6 LIMIT 0 + } +} {} +do_test limit-6.8 { + execsql { + SELECT * FROM t6 LIMIT 0 OFFSET 1 + } +} {} + +# Make sure LIMIT works well with compound SELECT statements. +# Ticket #393 +# +ifcapable compound { +do_test limit-7.1.1 { + catchsql { + SELECT x FROM t2 LIMIT 5 UNION ALL SELECT a FROM t6; + } +} {1 {LIMIT clause should come after UNION ALL not before}} +do_test limit-7.1.2 { + catchsql { + SELECT x FROM t2 LIMIT 5 UNION SELECT a FROM t6; + } +} {1 {LIMIT clause should come after UNION not before}} +do_test limit-7.1.3 { + catchsql { + SELECT x FROM t2 LIMIT 5 EXCEPT SELECT a FROM t6 LIMIT 3; + } +} {1 {LIMIT clause should come after EXCEPT not before}} +do_test limit-7.1.4 { + catchsql { + SELECT x FROM t2 LIMIT 0,5 INTERSECT SELECT a FROM t6; + } +} {1 {LIMIT clause should come after INTERSECT not before}} +do_test limit-7.2 { + execsql { + SELECT x FROM t2 UNION ALL SELECT a FROM t6 LIMIT 5; + } +} {31 30 1 2 3} +do_test limit-7.3 { + execsql { + SELECT x FROM t2 UNION ALL SELECT a FROM t6 LIMIT 3 OFFSET 1; + } +} {30 1 2} +do_test limit-7.4 { + execsql { + SELECT x FROM t2 UNION ALL SELECT a FROM t6 ORDER BY 1 LIMIT 3 OFFSET 1; + } +} {2 3 4} +do_test limit-7.5 { + execsql { + SELECT x FROM t2 UNION SELECT x+2 FROM t2 LIMIT 2 OFFSET 1; + } +} {31 32} +do_test limit-7.6 { + execsql { + SELECT x FROM t2 UNION SELECT x+2 FROM t2 ORDER BY 1 DESC LIMIT 2 OFFSET 1; + } +} {32 31} +do_test limit-7.7 { + execsql { + SELECT a+9 FROM t6 EXCEPT SELECT y FROM t2 LIMIT 2; + } +} {11 12} +do_test limit-7.8 { + execsql { + SELECT a+9 FROM t6 EXCEPT SELECT y FROM t2 ORDER BY 1 DESC LIMIT 2; + } +} {13 12} +do_test limit-7.9 { + execsql { + SELECT a+26 FROM t6 INTERSECT SELECT x FROM t2 LIMIT 1; + } +} {30} +do_test limit-7.10 { + execsql { + SELECT a+27 FROM t6 INTERSECT SELECT x FROM t2 LIMIT 1; + } +} {30} +do_test limit-7.11 { + execsql { + SELECT a+27 FROM t6 INTERSECT SELECT x FROM t2 LIMIT 1 OFFSET 1; + } +} {31} +do_test limit-7.12 { + execsql { + SELECT a+27 FROM t6 INTERSECT SELECT x FROM t2 + ORDER BY 1 DESC LIMIT 1 OFFSET 1; + } +} {30} +} ;# ifcapable compound + +# Tests for limit in conjunction with distinct. The distinct should +# occur before both the limit and the offset. Ticket #749. +# +do_test limit-8.1 { + execsql { + SELECT DISTINCT cast(round(x/100) as integer) FROM t3 LIMIT 5; + } +} {0 1 2 3 4} +do_test limit-8.2 { + execsql { + SELECT DISTINCT cast(round(x/100) as integer) FROM t3 LIMIT 5 OFFSET 5; + } +} {5 6 7 8 9} +do_test limit-8.3 { + execsql { + SELECT DISTINCT cast(round(x/100) as integer) FROM t3 LIMIT 5 OFFSET 25; + } +} {25 26 27 28 29} + +# Make sure limits on multiple subqueries work correctly. +# Ticket #1035 +# +ifcapable subquery { + do_test limit-9.1 { + execsql { + SELECT * FROM (SELECT * FROM t6 LIMIT 3); + } + } {1 2 3} +} +do_test limit-9.2.1 { + execsql { + CREATE TABLE t7 AS SELECT * FROM t6; + } +} {} +ifcapable subquery { + do_test limit-9.2.2 { + execsql { + SELECT * FROM (SELECT * FROM t7 LIMIT 3); + } + } {1 2 3} +} +ifcapable compound { + ifcapable subquery { + do_test limit-9.3 { + execsql { + SELECT * FROM (SELECT * FROM t6 LIMIT 3) + UNION + SELECT * FROM (SELECT * FROM t7 LIMIT 3) + ORDER BY 1 + } + } {1 2 3} + do_test limit-9.4 { + execsql { + SELECT * FROM (SELECT * FROM t6 LIMIT 3) + UNION + SELECT * FROM (SELECT * FROM t7 LIMIT 3) + ORDER BY 1 + LIMIT 2 + } + } {1 2} + } + do_test limit-9.5 { + catchsql { + SELECT * FROM t6 LIMIT 3 + UNION + SELECT * FROM t7 LIMIT 3 + } + } {1 {LIMIT clause should come after UNION not before}} +} + +# Test LIMIT and OFFSET using SQL variables. +do_test limit-10.1 { + set limit 10 + db eval { + SELECT x FROM t1 LIMIT :limit; + } +} {31 30 29 28 27 26 25 24 23 22} +do_test limit-10.2 { + set limit 5 + set offset 5 + db eval { + SELECT x FROM t1 LIMIT :limit OFFSET :offset; + } +} {26 25 24 23 22} +do_test limit-10.3 { + set limit -1 + db eval { + SELECT x FROM t1 WHERE x<10 LIMIT :limit; + } +} {9 8 7 6 5 4 3 2 1 0} +do_test limit-10.4 { + set limit 1.5 + set rc [catch { + db eval { + SELECT x FROM t1 WHERE x<10 LIMIT :limit; + } } msg] + list $rc $msg +} {1 {datatype mismatch}} +do_test limit-10.5 { + set limit "hello world" + set rc [catch { + db eval { + SELECT x FROM t1 WHERE x<10 LIMIT :limit; + } } msg] + list $rc $msg +} {1 {datatype mismatch}} + +ifcapable subquery { +do_test limit-11.1 { + db eval { + SELECT x FROM (SELECT x FROM t1 ORDER BY x LIMIT 0) ORDER BY x + } +} {} +} ;# ifcapable subquery + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/loadext.test b/libraries/sqlite/unix/sqlite-3.5.1/test/loadext.test new file mode 100644 index 0000000..81e152f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/loadext.test @@ -0,0 +1,218 @@ +# 2006 July 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is extension loading. +# +# $Id: loadext.test,v 1.11 2007/09/01 06:19:06 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !load_ext { + finish_test + return +} + +# The name of the test extension varies by operating system. +# +if {$::tcl_platform(platform) eq "windows"} { + set testextension ./testloadext.dll +} else { + set testextension ./libtestloadext.so +} + +# The error messages tested by this file are operating system dependent +# (because they are returned by sqlite3OsDlError()). For now, they only +# work with UNIX (and probably only certain kinds of UNIX). +# +# When a shared-object cannot be opened because it does not exist, the +# format of the message returned is: +# +# [format $dlerror_nosuchfile ] +# +# When a shared-object cannot be opened because it consists of the 4 +# characters "blah" only, we expect the error message to be: +# +# [format $dlerror_notadll ] +# +# When a symbol cannot be found within an open shared-object, the error +# message should be: +# +# [format $dlerror_nosymbol ] +# +# The exact error messages are not important. The important bit is +# that SQLite is correctly copying the message from xDlError(). +# +set dlerror_nosuchfile \ + {%s: cannot open shared object file: No such file or directory} +set dlerror_notadll {%s: file too short} +set dlerror_nosymbol {%s: undefined symbol: %s} + +# Make sure the test extension actually exists. If it does not +# exist, try to create it. If unable to create it, then skip this +# test file. +# +if {![file exists $testextension]} { + set srcdir [file dir $testdir]/src + set testextsrc $srcdir/test_loadext.c + if {[catch { + exec gcc -Wall -I$srcdir -I. -g -shared $testextsrc -o $testextension + } msg]} { + puts "Skipping loadext tests: Test extension not built..." + puts $msg + finish_test + return + } +} + +# Test that loading the extension produces the expected results - adding +# the half() function to the specified database handle. +# +do_test loadext-1.1 { + catchsql { + SELECT half(1.0); + } +} {1 {no such function: half}} +do_test loadext-1.2 { + db enable_load_extension 1 + sqlite3_load_extension db $testextension testloadext_init + catchsql { + SELECT half(1.0); + } +} {0 0.5} + +# Test that a second database connection (db2) can load the extension also. +# +do_test loadext-1.3 { + sqlite3 db2 test.db + sqlite3_enable_load_extension db2 1 + catchsql { + SELECT half(1.0); + } db2 +} {1 {no such function: half}} +do_test loadext-1.4 { + sqlite3_load_extension db2 $testextension testloadext_init + catchsql { + SELECT half(1.0); + } db2 +} {0 0.5} + +# Close the first database connection. Then check that the second database +# can still use the half() function without a problem. +# +do_test loadext-1.5 { + db close + catchsql { + SELECT half(1.0); + } db2 +} {0 0.5} + +db2 close +sqlite3 db test.db +sqlite3_enable_load_extension db 1 + +# Try to load an extension for which the file does not exist. +# +do_test loadext-2.1 { + file delete -force ${testextension}xx + set rc [catch { + sqlite3_load_extension db "${testextension}xx" + } msg] + list $rc $msg +} [list 1 [format $dlerror_nosuchfile ${testextension}xx]] + +# Try to load an extension for which the file is not a shared object +# +do_test loadext-2.2 { + set fd [open "${testextension}xx" w] + puts $fd blah + close $fd + set rc [catch { + sqlite3_load_extension db "${testextension}xx" + } msg] + list $rc $msg +} [list 1 [format $dlerror_notadll ${testextension}xx]] + +# Try to load an extension for which the file is present but the +# entry point is not. +# +do_test loadext-2.3 { + set rc [catch { + sqlite3_load_extension db $testextension icecream + } msg] + list $rc $msg +} [list 1 [format $dlerror_nosymbol $testextension icecream]] + +# Try to load an extension for which the entry point fails (returns non-zero) +# +do_test loadext-2.4 { + set rc [catch { + sqlite3_load_extension db $testextension testbrokenext_init + } msg] + list $rc $msg +} {1 {error during initialization: broken!}} + +############################################################################ +# Tests for the load_extension() SQL function +# + +db close +sqlite3 db test.db +sqlite3_enable_load_extension db 1 +do_test loadext-3.1 { + catchsql { + SELECT half(5); + } +} {1 {no such function: half}} +do_test loadext-3.2 { + catchsql { + SELECT load_extension($::testextension) + } +} [list 1 [format $dlerror_nosymbol $testextension sqlite3_extension_init]] +do_test loadext-3.3 { + catchsql { + SELECT load_extension($::testextension,'testloadext_init') + } +} {0 {{}}} +do_test loadext-3.4 { + catchsql { + SELECT half(5); + } +} {0 2.5} + +# Ticket #1863 +# Make sure the extension loading mechanism will not work unless it +# is explicitly enabled. +# +db close +sqlite3 db test.db +do_test loadext-4.1 { + catchsql { + SELECT load_extension($::testextension,'testloadext_init') + } +} {1 {not authorized}} +do_test loadext-4.2 { + sqlite3_enable_load_extension db 1 + catchsql { + SELECT load_extension($::testextension,'testloadext_init') + } +} {0 {{}}} + +do_test loadext-4.3 { + sqlite3_enable_load_extension db 0 + catchsql { + SELECT load_extension($::testextension,'testloadext_init') + } +} {1 {not authorized}} + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/loadext2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/loadext2.test new file mode 100644 index 0000000..0c89600 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/loadext2.test @@ -0,0 +1,143 @@ +# 2006 August 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is automatic extension loading and the +# sqlite3_auto_extension() API. +# +# $Id: loadext2.test,v 1.2 2007/04/06 21:42:22 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if the approriate APIs are defined +# in the system under test. +# +ifcapable !load_ext { + finish_test + return +} +if {[info command sqlite3_auto_extension_sqr]==""} { + finish_test + return +} + + +# None of the extension are loaded by default. +# +do_test loadext2-1.1 { + catchsql { + SELECT sqr(2) + } +} {1 {no such function: sqr}} +do_test loadext2-1.2 { + catchsql { + SELECT cube(2) + } +} {1 {no such function: cube}} + +# Register auto-loaders. Still functions do not exist. +# +do_test loadext2-1.3 { + sqlite3_auto_extension_sqr + sqlite3_auto_extension_cube + catchsql { + SELECT sqr(2) + } +} {1 {no such function: sqr}} +do_test loadext2-1.4 { + catchsql { + SELECT cube(2) + } +} {1 {no such function: cube}} + + +# Functions do exist in a new database connection +# +do_test loadext2-1.5 { + sqlite3 db test.db + catchsql { + SELECT sqr(2) + } +} {0 4.0} +do_test loadext2-1.6 { + catchsql { + SELECT cube(2) + } +} {0 8.0} + + +# Reset extension auto loading. Existing extensions still exist. +# +do_test loadext2-1.7 { + sqlite3_reset_auto_extension + catchsql { + SELECT sqr(2) + } +} {0 4.0} +do_test loadext2-1.8 { + catchsql { + SELECT cube(2) + } +} {0 8.0} + + +# Register only the sqr() function. +# +do_test loadext2-1.9 { + sqlite3_auto_extension_sqr + sqlite3 db test.db + catchsql { + SELECT sqr(2) + } +} {0 4.0} +do_test loadext2-1.10 { + catchsql { + SELECT cube(2) + } +} {1 {no such function: cube}} + +# Register only the cube() function. +# +do_test loadext2-1.11 { + sqlite3_reset_auto_extension + sqlite3_auto_extension_cube + sqlite3 db test.db + catchsql { + SELECT sqr(2) + } +} {1 {no such function: sqr}} +do_test loadext2-1.12 { + catchsql { + SELECT cube(2) + } +} {0 8.0} + +# Register a broken entry point. +# +do_test loadext2-1.13 { + sqlite3_auto_extension_broken + set rc [catch {sqlite3 db test.db} errmsg] + lappend rc $errmsg +} {1 {automatic extension loading failed: broken autoext!}} +do_test loadext2-1.14 { + catchsql { + SELECT sqr(2) + } +} {1 {no such function: sqr}} +do_test loadext2-1.15 { + catchsql { + SELECT cube(2) + } +} {0 8.0} + + +sqlite3_reset_auto_extension +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/lock.test b/libraries/sqlite/unix/sqlite-3.5.1/test/lock.test new file mode 100644 index 0000000..e453ffd --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/lock.test @@ -0,0 +1,354 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is database locks. +# +# $Id: lock.test,v 1.33 2006/08/16 16:42:48 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create an alternative connection to the database +# +do_test lock-1.0 { + sqlite3 db2 ./test.db + set dummy {} +} {} +do_test lock-1.1 { + execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name} +} {} +do_test lock-1.2 { + execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name} db2 +} {} +do_test lock-1.3 { + execsql {CREATE TABLE t1(a int, b int)} + execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name} +} {t1} +do_test lock-1.5 { + catchsql { + SELECT name FROM sqlite_master WHERE type='table' ORDER BY name + } db2 +} {0 t1} + +do_test lock-1.6 { + execsql {INSERT INTO t1 VALUES(1,2)} + execsql {SELECT * FROM t1} +} {1 2} +# Update: The schema is now brought up to date by test lock-1.5. +# do_test lock-1.7.1 { +# catchsql {SELECT * FROM t1} db2 +# } {1 {no such table: t1}} +do_test lock-1.7.2 { + catchsql {SELECT * FROM t1} db2 +} {0 {1 2}} +do_test lock-1.8 { + execsql {UPDATE t1 SET a=b, b=a} db2 + execsql {SELECT * FROM t1} db2 +} {2 1} +do_test lock-1.9 { + execsql {SELECT * FROM t1} +} {2 1} +do_test lock-1.10 { + execsql {BEGIN TRANSACTION} + execsql {UPDATE t1 SET a = 0 WHERE 0} + execsql {SELECT * FROM t1} +} {2 1} +do_test lock-1.11 { + catchsql {SELECT * FROM t1} db2 +} {0 {2 1}} +do_test lock-1.12 { + execsql {ROLLBACK} + catchsql {SELECT * FROM t1} +} {0 {2 1}} + +do_test lock-1.13 { + execsql {CREATE TABLE t2(x int, y int)} + execsql {INSERT INTO t2 VALUES(8,9)} + execsql {SELECT * FROM t2} +} {8 9} +do_test lock-1.14.1 { + catchsql {SELECT * FROM t2} db2 +} {1 {no such table: t2}} +do_test lock-1.14.2 { + catchsql {SELECT * FROM t1} db2 +} {0 {2 1}} +do_test lock-1.15 { + catchsql {SELECT * FROM t2} db2 +} {0 {8 9}} + +do_test lock-1.16 { + db eval {SELECT * FROM t1} qv { + set x [db eval {SELECT * FROM t1}] + } + set x +} {2 1} +do_test lock-1.17 { + db eval {SELECT * FROM t1} qv { + set x [db eval {SELECT * FROM t2}] + } + set x +} {8 9} + +# You cannot UPDATE a table from within the callback of a SELECT +# on that same table because the SELECT has the table locked. +# +# 2006-08-16: Reads no longer block writes within the same +# database connection. +# +#do_test lock-1.18 { +# db eval {SELECT * FROM t1} qv { +# set r [catch {db eval {UPDATE t1 SET a=b, b=a}} msg] +# lappend r $msg +# } +# set r +#} {1 {database table is locked}} + +# But you can UPDATE a different table from the one that is used in +# the SELECT. +# +do_test lock-1.19 { + db eval {SELECT * FROM t1} qv { + set r [catch {db eval {UPDATE t2 SET x=y, y=x}} msg] + lappend r $msg + } + set r +} {0 {}} +do_test lock-1.20 { + execsql {SELECT * FROM t2} +} {9 8} + +# It is possible to do a SELECT of the same table within the +# callback of another SELECT on that same table because two +# or more read-only cursors can be open at once. +# +do_test lock-1.21 { + db eval {SELECT * FROM t1} qv { + set r [catch {db eval {SELECT a FROM t1}} msg] + lappend r $msg + } + set r +} {0 2} + +# Under UNIX you can do two SELECTs at once with different database +# connections, because UNIX supports reader/writer locks. Under windows, +# this is not possible. +# +if {$::tcl_platform(platform)=="unix"} { + do_test lock-1.22 { + db eval {SELECT * FROM t1} qv { + set r [catch {db2 eval {SELECT a FROM t1}} msg] + lappend r $msg + } + set r + } {0 2} +} +integrity_check lock-1.23 + +# If one thread has a transaction another thread cannot start +# a transaction. -> Not true in version 3.0. But if one thread +# as a RESERVED lock another thread cannot acquire one. +# +do_test lock-2.1 { + execsql {BEGIN TRANSACTION} + execsql {UPDATE t1 SET a = 0 WHERE 0} + execsql {BEGIN TRANSACTION} db2 + set r [catch {execsql {UPDATE t1 SET a = 0 WHERE 0} db2} msg] + execsql {ROLLBACK} db2 + lappend r $msg +} {1 {database is locked}} + +# A thread can read when another has a RESERVED lock. +# +do_test lock-2.2 { + catchsql {SELECT * FROM t2} db2 +} {0 {9 8}} + +# If the other thread (the one that does not hold the transaction with +# a RESERVED lock) tries to get a RESERVED lock, we do get a busy callback +# as long as we were not orginally holding a READ lock. +# +do_test lock-2.3.1 { + proc callback {count} { + set ::callback_value $count + break + } + set ::callback_value {} + db2 busy callback + # db2 does not hold a lock so we should get a busy callback here + set r [catch {execsql {UPDATE t1 SET a=b, b=a} db2} msg] + lappend r $msg + lappend r $::callback_value +} {1 {database is locked} 0} +do_test lock-2.3.2 { + set ::callback_value {} + execsql {BEGIN; SELECT rowid FROM sqlite_master LIMIT 1} db2 + # This time db2 does hold a read lock. No busy callback this time. + set r [catch {execsql {UPDATE t1 SET a=b, b=a} db2} msg] + lappend r $msg + lappend r $::callback_value +} {1 {database is locked} {}} +catch {execsql {ROLLBACK} db2} +do_test lock-2.4.1 { + proc callback {count} { + lappend ::callback_value $count + if {$count>4} break + } + set ::callback_value {} + db2 busy callback + # We get a busy callback because db2 is not holding a lock + set r [catch {execsql {UPDATE t1 SET a=b, b=a} db2} msg] + lappend r $msg + lappend r $::callback_value +} {1 {database is locked} {0 1 2 3 4 5}} +do_test lock-2.4.2 { + proc callback {count} { + lappend ::callback_value $count + if {$count>4} break + } + set ::callback_value {} + db2 busy callback + execsql {BEGIN; SELECT rowid FROM sqlite_master LIMIT 1} db2 + # No busy callback this time because we are holding a lock + set r [catch {execsql {UPDATE t1 SET a=b, b=a} db2} msg] + lappend r $msg + lappend r $::callback_value +} {1 {database is locked} {}} +catch {execsql {ROLLBACK} db2} +do_test lock-2.5 { + proc callback {count} { + lappend ::callback_value $count + if {$count>4} break + } + set ::callback_value {} + db2 busy callback + set r [catch {execsql {SELECT * FROM t1} db2} msg] + lappend r $msg + lappend r $::callback_value +} {0 {2 1} {}} +execsql {ROLLBACK} + +# Test the built-in busy timeout handler +# +do_test lock-2.8 { + db2 timeout 400 + execsql BEGIN + execsql {UPDATE t1 SET a = 0 WHERE 0} + catchsql {BEGIN EXCLUSIVE;} db2 +} {1 {database is locked}} +do_test lock-2.9 { + db2 timeout 0 + execsql COMMIT +} {} +integrity_check lock-2.10 + +# Try to start two transactions in a row +# +do_test lock-3.1 { + execsql {BEGIN TRANSACTION} + set r [catch {execsql {BEGIN TRANSACTION}} msg] + execsql {ROLLBACK} + lappend r $msg +} {1 {cannot start a transaction within a transaction}} +integrity_check lock-3.2 + +# Make sure the busy handler and error messages work when +# opening a new pointer to the database while another pointer +# has the database locked. +# +do_test lock-4.1 { + db2 close + catch {db eval ROLLBACK} + db eval BEGIN + db eval {UPDATE t1 SET a=0 WHERE 0} + sqlite3 db2 ./test.db + catchsql {UPDATE t1 SET a=0} db2 +} {1 {database is locked}} +do_test lock-4.2 { + set ::callback_value {} + set rc [catch {db2 eval {UPDATE t1 SET a=0}} msg] + lappend rc $msg $::callback_value +} {1 {database is locked} {}} +do_test lock-4.3 { + proc callback {count} { + lappend ::callback_value $count + if {$count>4} break + } + db2 busy callback + set rc [catch {db2 eval {UPDATE t1 SET a=0}} msg] + lappend rc $msg $::callback_value +} {1 {database is locked} {0 1 2 3 4 5}} +execsql {ROLLBACK} + +# When one thread is writing, other threads cannot read. Except if the +# writing thread is writing to its temporary tables, the other threads +# can still read. -> Not so in 3.0. One thread can read while another +# holds a RESERVED lock. +# +proc tx_exec {sql} { + db2 eval $sql +} +do_test lock-5.1 { + execsql { + SELECT * FROM t1 + } +} {2 1} +do_test lock-5.2 { + db function tx_exec tx_exec + catchsql { + INSERT INTO t1(a,b) SELECT 3, tx_exec('SELECT y FROM t2 LIMIT 1'); + } +} {0 {}} + +ifcapable tempdb { + do_test lock-5.3 { + execsql { + CREATE TEMP TABLE t3(x); + SELECT * FROM t3; + } + } {} + do_test lock-5.4 { + catchsql { + INSERT INTO t3 SELECT tx_exec('SELECT y FROM t2 LIMIT 1'); + } + } {0 {}} + do_test lock-5.5 { + execsql { + SELECT * FROM t3; + } + } {8} + do_test lock-5.6 { + catchsql { + UPDATE t1 SET a=tx_exec('SELECT x FROM t2'); + } + } {0 {}} + do_test lock-5.7 { + execsql { + SELECT * FROM t1; + } + } {9 1 9 8} + do_test lock-5.8 { + catchsql { + UPDATE t3 SET x=tx_exec('SELECT x FROM t2'); + } + } {0 {}} + do_test lock-5.9 { + execsql { + SELECT * FROM t3; + } + } {9} +} + +do_test lock-999.1 { + rename db2 {} +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/lock2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/lock2.test new file mode 100644 index 0000000..d071844 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/lock2.test @@ -0,0 +1,169 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is database locks between competing processes. +# +# $Id: lock2.test,v 1.8 2007/08/12 20:07:59 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Launch another testfixture process to be controlled by this one. A +# channel name is returned that may be passed as the first argument to proc +# 'testfixture' to execute a command. The child testfixture process is shut +# down by closing the channel. +proc launch_testfixture {} { + set prg [info nameofexec] + if {$prg eq ""} { + set prg [file join . testfixture] + } + set chan [open "|$prg tf_main.tcl" r+] + fconfigure $chan -buffering line + return $chan +} + +# Execute a command in a child testfixture process, connected by two-way +# channel $chan. Return the result of the command, or an error message. +proc testfixture {chan cmd} { + puts $chan $cmd + puts $chan OVER + set r "" + while { 1 } { + set line [gets $chan] + if { $line == "OVER" } { + return $r + } + append r $line + } +} + +# Write the main loop for the child testfixture processes into file +# tf_main.tcl. The parent (this script) interacts with the child processes +# via a two way pipe. The parent writes a script to the stdin of the child +# process, followed by the word "OVER" on a line of it's own. The child +# process evaluates the script and writes the results to stdout, followed +# by an "OVER" of its own. +set f [open tf_main.tcl w] +puts $f { + set l [open log w] + set script "" + while {![eof stdin]} { + flush stdout + set line [gets stdin] + puts $l "READ $line" + if { $line == "OVER" } { + catch {eval $script} result + puts $result + puts $l "WRITE $result" + puts OVER + puts $l "WRITE OVER" + flush stdout + set script "" + } else { + append script $line + append script " ; " + } + } + close $l +} +close $f + +# Simple locking test case: +# +# lock2-1.1: Connect a second process to the database. +# lock2-1.2: Establish a RESERVED lock with this process. +# lock2-1.3: Get a SHARED lock with the second process. +# lock2-1.4: Try for a RESERVED lock with process 2. This fails. +# lock2-1.5: Try to upgrade the first process to EXCLUSIVE, this fails so +# it gets PENDING. +# lock2-1.6: Release the SHARED lock held by the second process. +# lock2-1.7: Attempt to reaquire a SHARED lock with the second process. +# this fails due to the PENDING lock. +# lock2-1.8: Ensure the first process can now upgrade to EXCLUSIVE. +# +do_test lock2-1.1 { + set ::tf1 [launch_testfixture] + testfixture $::tf1 "set sqlite_pending_byte $::sqlite_pending_byte" + testfixture $::tf1 { + sqlite3 db test.db -key xyzzy + db eval {select * from sqlite_master} + } +} {} +do_test lock2-1.1.1 { + execsql {pragma lock_status} +} {main unlocked temp closed} +sqlite3_soft_heap_limit 0 +do_test lock2-1.2 { + execsql { + BEGIN; + CREATE TABLE abc(a, b, c); + } +} {} +do_test lock2-1.3 { + testfixture $::tf1 { + db eval { + BEGIN; + SELECT * FROM sqlite_master; + } + } +} {} +do_test lock2-1.4 { + testfixture $::tf1 { + db eval { + CREATE TABLE def(d, e, f) + } + } +} {database is locked} +do_test lock2-1.5 { + catchsql { + COMMIT; + } +} {1 {database is locked}} +do_test lock2-1.6 { + testfixture $::tf1 { + db eval { + SELECT * FROM sqlite_master; + COMMIT; + } + } +} {} +do_test lock2-1.7 { + testfixture $::tf1 { + db eval { + BEGIN; + SELECT * FROM sqlite_master; + } + } +} {database is locked} +do_test lock2-1.8 { + catchsql { + COMMIT; + } +} {0 {}} +do_test lock2-1.9 { + execsql { + SELECT * FROM sqlite_master; + } +} "table abc abc [expr $AUTOVACUUM?3:2] {CREATE TABLE abc(a, b, c)}" +do_test lock2-1.10 { + testfixture $::tf1 { + db eval { + SELECT * FROM sqlite_master; + } + } +} "table abc abc [expr $AUTOVACUUM?3:2] {CREATE TABLE abc(a, b, c)}" + +catch {testfixture $::tf1 {db close}} +catch {close $::tf1} +sqlite3_soft_heap_limit $soft_limit + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/lock3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/lock3.test new file mode 100644 index 0000000..1835c66 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/lock3.test @@ -0,0 +1,78 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is database locks and the operation of the +# DEFERRED, IMMEDIATE, and EXCLUSIVE keywords as modifiers to the +# BEGIN command. +# +# $Id: lock3.test,v 1.1 2004/10/05 02:41:43 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Establish two connections to the same database. Put some +# sample data into the database. +# +do_test lock3-1.1 { + sqlite3 db2 test.db + execsql { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + } + execsql { + SELECT * FROM t1 + } db2 +} 1 + +# Get a deferred lock on the database using one connection. The +# other connection should still be able to write. +# +do_test lock3-2.1 { + execsql {BEGIN DEFERRED TRANSACTION} + execsql {INSERT INTO t1 VALUES(2)} db2 + execsql {END TRANSACTION} + execsql {SELECT * FROM t1} +} {1 2} + +# Get an immediate lock on the database using one connection. The +# other connection should be able to read the database but not write +# it. +# +do_test lock3-3.1 { + execsql {BEGIN IMMEDIATE TRANSACTION} + catchsql {SELECT * FROM t1} db2 +} {0 {1 2}} +do_test lock3-3.2 { + catchsql {INSERT INTO t1 VALUES(3)} db2 +} {1 {database is locked}} +do_test lock3-3.3 { + execsql {END TRANSACTION} +} {} + + +# Get an exclusive lock on the database using one connection. The +# other connection should be unable to read or write the database. +# +do_test lock3-4.1 { + execsql {BEGIN EXCLUSIVE TRANSACTION} + catchsql {SELECT * FROM t1} db2 +} {1 {database is locked}} +do_test lock3-4.2 { + catchsql {INSERT INTO t1 VALUES(3)} db2 +} {1 {database is locked}} +do_test lock3-4.3 { + execsql {END TRANSACTION} +} {} + +catch {db2 close} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/lock4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/lock4.test new file mode 100644 index 0000000..3820476 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/lock4.test @@ -0,0 +1,99 @@ +# 2007 April 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is database locks. +# +# $Id: lock4.test,v 1.6 2007/09/06 23:28:25 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Initialize the test.db database so that it is non-empty +# +do_test lock4-1.1 { + db eval { + PRAGMA auto_vacuum=OFF; + CREATE TABLE t1(x); + } + file delete -force test2.db test2.db-journal + sqlite3 db2 test2.db + db2 eval { + PRAGMA auto_vacuum=OFF; + CREATE TABLE t2(x) + } + db2 close + list [file size test.db] [file size test2.db] +} {2048 2048} + +# Create a script to drive a separate process that will +# +# 1. Create a second database test2.db +# 2. Get an exclusive lock on test2.db +# 3. Add an entry to test.db in table t1, waiting as necessary. +# 4. Commit the change to test2.db. +# +# Meanwhile, this process will: +# +# A. Get an exclusive lock on test.db +# B. Attempt to read from test2.db but get an SQLITE_BUSY error. +# C. Commit the changes to test.db thus alloing the other process +# to continue. +# +do_test lock4-1.2 { + set out [open test2-script.tcl w] + puts $out "set sqlite_pending_byte [set sqlite_pending_byte]" + puts $out { + sqlite3 db2 test2.db + db2 eval { + BEGIN; + INSERT INTO t2 VALUES(2); + } + sqlite3 db test.db + db timeout 1000000 + db eval { + INSERT INTO t1 VALUES(2); + } + db2 eval COMMIT + exit + } + close $out + db eval { + BEGIN; + INSERT INTO t1 VALUES(1); + } + exec [info nameofexec] ./test2-script.tcl & + while {![file exists test2.db-journal]} { + after 10 + } + sqlite3 db2 test2.db + catchsql { + INSERT INTO t2 VALUES(1) + } db2 +} {1 {database is locked}} +do_test lock4-1.3 { + db eval { + COMMIT; + } + while {[file exists test2.db-journal]} { + after 10 + } + db2 eval { + SELECT * FROM t2 + } +} {2} + + +do_test lock4-999.1 { + rename db2 {} +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/main.test b/libraries/sqlite/unix/sqlite-3.5.1/test/main.test new file mode 100644 index 0000000..00aa96a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/main.test @@ -0,0 +1,360 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is exercising the code in main.c. +# +# $Id: main.test,v 1.27 2007/09/03 15:42:48 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only do the next group of tests if the sqlite3_complete API is available +# +ifcapable {complete} { + +# Tests of the sqlite_complete() function. +# +do_test main-1.1 { + db complete {This is a test} +} {0} +do_test main-1.2 { + db complete { + } +} {1} +do_test main-1.3 { + db complete { + -- a comment ; + } +} {1} +do_test main-1.4 { + db complete { + -- a comment ; + ; + } +} {1} +do_test main-1.5 { + db complete {DROP TABLE 'xyz;} +} {0} +do_test main-1.6 { + db complete {DROP TABLE 'xyz';} +} {1} +do_test main-1.7 { + db complete {DROP TABLE "xyz;} +} {0} +do_test main-1.8 { + db complete {DROP TABLE "xyz';} +} {0} +do_test main-1.9 { + db complete {DROP TABLE "xyz";} +} {1} +do_test main-1.10 { + db complete {DROP TABLE xyz; hi} +} {0} +do_test main-1.11 { + db complete {DROP TABLE xyz; } +} {1} +do_test main-1.12 { + db complete {DROP TABLE xyz; -- hi } +} {1} +do_test main-1.13 { + db complete {DROP TABLE xyz; -- hi + } +} {1} +do_test main-1.14 { + db complete {SELECT a-b FROM t1; } +} {1} +do_test main-1.15 { + db complete {SELECT a/e FROM t1 } +} {0} +do_test main-1.16 { + db complete { + CREATE TABLE abc(x,y); + } +} {1} +ifcapable {trigger} { + do_test main-1.17 { + db complete { + CREATE TRIGGER xyz AFTER DELETE abc BEGIN UPDATE pqr; + } + } {0} + do_test main-1.18 { + db complete { + CREATE TRIGGER xyz AFTER DELETE abc BEGIN UPDATE pqr; END; + } + } {1} + do_test main-1.19 { + db complete { + CREATE TRIGGER xyz AFTER DELETE abc BEGIN + UPDATE pqr; + unknown command; + } + } {0} + do_test main-1.20 { + db complete { + CREATE TRIGGER xyz AFTER DELETE backend BEGIN + UPDATE pqr; + } + } {0} + do_test main-1.21 { + db complete { + CREATE TRIGGER xyz AFTER DELETE end BEGIN + SELECT a, b FROM end; + } + } {0} + do_test main-1.22 { + db complete { + CREATE TRIGGER xyz AFTER DELETE end BEGIN + SELECT a, b FROM end; + END; + } + } {1} + do_test main-1.23 { + db complete { + CREATE TRIGGER xyz AFTER DELETE end BEGIN + SELECT a, b FROM end; + END; + SELECT a, b FROM end; + } + } {1} + do_test main-1.24 { + db complete { + CREATE TRIGGER xyz AFTER DELETE [;end;] BEGIN + UPDATE pqr; + } + } {0} + do_test main-1.25 { + db complete { + CREATE TRIGGER xyz AFTER DELETE backend BEGIN + UPDATE pqr SET a=[;end;];;; + } + } {0} + do_test main-1.26 { + db complete { + CREATE -- a comment + TRIGGER xyz AFTER DELETE backend BEGIN + UPDATE pqr SET a=5; + } + } {0} + do_test main-1.27.1 { + db complete { + CREATE -- a comment + TRIGGERX xyz AFTER DELETE backend BEGIN + UPDATE pqr SET a=5; + } + } {1} + do_test main-1.27.2 { + db complete { + CREATE/**/TRIGGER xyz AFTER DELETE backend BEGIN + UPDATE pqr SET a=5; + } + } {0} + ifcapable {explain} { + do_test main-1.27.3 { + db complete { + /* */ EXPLAIN -- A comment + CREATE/**/TRIGGER xyz AFTER DELETE backend BEGIN + UPDATE pqr SET a=5; + } + } {0} + } + do_test main-1.27.4 { + db complete { + BOGUS token + CREATE TRIGGER xyz AFTER DELETE backend BEGIN + UPDATE pqr SET a=5; + } + } {1} + ifcapable {explain} { + do_test main-1.27.5 { + db complete { + EXPLAIN + CREATE TEMP TRIGGER xyz AFTER DELETE backend BEGIN + UPDATE pqr SET a=5; + } + } {0} + } + do_test main-1.28 { + db complete { + CREATE TEMPORARY TRIGGER xyz AFTER DELETE backend BEGIN + UPDATE pqr SET a=5; + } + } {0} + do_test main-1.29 { + db complete { + CREATE TRIGGER xyz AFTER DELETE backend BEGIN + UPDATE pqr SET a=5; + EXPLAIN select * from xyz; + } + } {0} +} +do_test main-1.30 { + db complete { + CREATE TABLE /* In comment ; */ + } +} {0} +do_test main-1.31 { + db complete { + CREATE TABLE /* In comment ; */ hi; + } +} {1} +do_test main-1.31 { + db complete { + CREATE TABLE /* In comment ; */; + } +} {1} +do_test main-1.32 { + db complete { + stuff; + /* + CREATE TABLE + multiple lines + of text + */ + } +} {1} +do_test main-1.33 { + db complete { + /* + CREATE TABLE + multiple lines + of text; + } +} {0} +do_test main-1.34 { + db complete { + /* + CREATE TABLE + multiple lines "*/ + of text; + } +} {1} +do_test main-1.35 { + db complete {hi /**/ there;} +} {1} +do_test main-1.36 { + db complete {hi there/***/;} +} {1} + +} ;# end ifcapable {complete} + + +# Try to open a database with a corrupt database file. +# +do_test main-2.0 { + catch {db close} + file delete -force test.db + set fd [open test.db w] + puts $fd hi! + close $fd + set v [catch {sqlite3 db test.db} msg] + if {$v} {lappend v $msg} {lappend v {}} +} {0 {}} + +# Here are some tests for tokenize.c. +# +do_test main-3.1 { + catch {db close} + foreach f [glob -nocomplain testdb/*] {file delete -force $f} + file delete -force testdb + sqlite3 db testdb + set v [catch {execsql {SELECT * from T1 where x!!5}} msg] + lappend v $msg +} {1 {unrecognized token: "!!"}} +do_test main-3.2 { + catch {db close} + foreach f [glob -nocomplain testdb/*] {file delete -force $f} + file delete -force testdb + sqlite3 db testdb + set v [catch {execsql {SELECT * from T1 where ^x}} msg] + lappend v $msg +} {1 {unrecognized token: "^"}} +do_test main-3.2.2 { + catchsql {select 'abc} +} {1 {unrecognized token: "'abc"}} +do_test main-3.2.3 { + catchsql {select "abc} +} {1 {unrecognized token: ""abc"}} + +do_test main-3.3 { + catch {db close} + foreach f [glob -nocomplain testdb/*] {file delete -force $f} + file delete -force testdb + sqlite3 db testdb + execsql { + create table T1(X REAL); /* C-style comments allowed */ + insert into T1 values(0.5); + insert into T1 values(0.5e2); + insert into T1 values(0.5e-002); + insert into T1 values(5e-002); + insert into T1 values(-5.0e-2); + insert into T1 values(-5.1e-2); + insert into T1 values(0.5e2); + insert into T1 values(0.5E+02); + insert into T1 values(5E+02); + insert into T1 values(5.0E+03); + select x*10 from T1 order by x*5; + } +} {-0.51 -0.5 0.05 0.5 5.0 500.0 500.0 500.0 5000.0 50000.0} +do_test main-3.4 { + set v [catch {execsql {create bogus}} msg] + lappend v $msg +} {1 {near "bogus": syntax error}} +do_test main-3.5 { + set v [catch {execsql {create}} msg] + lappend v $msg +} {1 {near "create": syntax error}} +do_test main-3.6 { + catchsql {SELECT 'abc' + #9} +} {1 {near "#9": syntax error}} + +# The following test-case tests the linked list code used to manage +# sqlite3_vfs structures. +if {$::tcl_platform(platform)=="unix"} { + ifcapable threadsafe { + do_test main-4.1 { + sqlite3_crash_enable 1 + sqlite3_crash_enable 0 + + sqlite3async_enable 1 + sqlite3async_enable 0 + + sqlite3_crash_enable 1 + sqlite3async_enable 1 + sqlite3_crash_enable 0 + sqlite3async_enable 0 + + sqlite3_crash_enable 1 + sqlite3async_enable 1 + sqlite3async_enable 0 + sqlite3_crash_enable 0 + + sqlite3async_enable 1 + sqlite3_crash_enable 1 + sqlite3_crash_enable 0 + sqlite3async_enable 0 + + sqlite3async_enable 1 + sqlite3_crash_enable 1 + sqlite3async_enable 0 + sqlite3_crash_enable 0 + } {} + do_test main-4.2 { + set rc [catch {sqlite3 db test.db -vfs crash} msg] + list $rc $msg + } {1 {no such vfs: crash}} + do_test main-4.3 { + set rc [catch {sqlite3 db test.db -vfs async} msg] + list $rc $msg + } {1 {no such vfs: async}} + } +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc.test b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc.test new file mode 100644 index 0000000..379dd46 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc.test @@ -0,0 +1,571 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file attempts to check the behavior of the SQLite library in +# an out-of-memory situation. When compiled with -DSQLITE_DEBUG=1, +# the SQLite library accepts a special command (sqlite3_memdebug_fail N C) +# which causes the N-th malloc to fail. This special feature is used +# to see what happens in the library if a malloc were to really fail +# due to an out-of-memory situation. +# +# $Id: malloc.test,v 1.50 2007/10/03 15:02:40 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +source $testdir/malloc_common.tcl + +ifcapable bloblit&&subquery { + do_malloc_test 1 -tclprep { + db close + } -tclbody { + if {[catch {sqlite3 db test.db}]} { + error "out of memory" + } + sqlite3_extended_result_codes db 1 + } -sqlbody { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1( + a int, b float, c double, d text, e varchar(20), + primary key(a,b,c) + ); + CREATE INDEX i1 ON t1(a,b); + INSERT INTO t1 VALUES(1,2.3,4.5,'hi',x'746865726500'); + INSERT INTO t1 VALUES(6,7.0,0.8,'hello','out yonder'); + SELECT * FROM t1; + SELECT avg(b) FROM t1 GROUP BY a HAVING b>20.0; + DELETE FROM t1 WHERE a IN (SELECT min(a) FROM t1); + SELECT count(*) FROM t1; + } +} + +# Ensure that no file descriptors were leaked. +do_test malloc-1.X { + catch {db close} + set sqlite_open_file_count +} {0} + +ifcapable subquery { + do_malloc_test 2 -sqlbody { + CREATE TABLE t1(a int, b int default 'abc', c int default 1); + CREATE INDEX i1 ON t1(a,b); + INSERT INTO t1 VALUES(1,1,'99 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(2,4,'98 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(3,9,'97 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(4,16,'96 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(5,25,'95 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(6,36,'94 abcdefghijklmnopqrstuvwxyz'); + SELECT 'stuff', count(*) as 'other stuff', max(a+10) FROM t1; + UPDATE t1 SET b=b||b||b||b; + UPDATE t1 SET b=a WHERE a in (10,12,22); + INSERT INTO t1(c,b,a) VALUES(20,10,5); + INSERT INTO t1 SELECT * FROM t1 + WHERE a IN (SELECT a FROM t1 WHERE a<10); + DELETE FROM t1 WHERE a>=10; + DROP INDEX i1; + DELETE FROM t1; + } +} + +# Ensure that no file descriptors were leaked. +do_test malloc-2.X { + catch {db close} + set sqlite_open_file_count +} {0} + +do_malloc_test 3 -sqlbody { + BEGIN TRANSACTION; + CREATE TABLE t1(a int, b int, c int); + CREATE INDEX i1 ON t1(a,b); + INSERT INTO t1 VALUES(1,1,99); + INSERT INTO t1 VALUES(2,4,98); + INSERT INTO t1 VALUES(3,9,97); + INSERT INTO t1 VALUES(4,16,96); + INSERT INTO t1 VALUES(5,25,95); + INSERT INTO t1 VALUES(6,36,94); + INSERT INTO t1(c,b,a) VALUES(20,10,5); + DELETE FROM t1 WHERE a>=10; + DROP INDEX i1; + DELETE FROM t1; + ROLLBACK; +} + + +# Ensure that no file descriptors were leaked. +do_test malloc-3.X { + catch {db close} + set sqlite_open_file_count +} {0} + +ifcapable subquery { + do_malloc_test 4 -sqlbody { + BEGIN TRANSACTION; + CREATE TABLE t1(a int, b int, c int); + CREATE INDEX i1 ON t1(a,b); + INSERT INTO t1 VALUES(1,1,99); + INSERT INTO t1 VALUES(2,4,98); + INSERT INTO t1 VALUES(3,9,97); + INSERT INTO t1 VALUES(4,16,96); + INSERT INTO t1 VALUES(5,25,95); + INSERT INTO t1 VALUES(6,36,94); + UPDATE t1 SET b=a WHERE a in (10,12,22); + INSERT INTO t1 SELECT * FROM t1 + WHERE a IN (SELECT a FROM t1 WHERE a<10); + DROP INDEX i1; + DELETE FROM t1; + COMMIT; + } +} + +# Ensure that no file descriptors were leaked. +do_test malloc-4.X { + catch {db close} + set sqlite_open_file_count +} {0} + +ifcapable trigger { + do_malloc_test 5 -sqlbody { + BEGIN TRANSACTION; + CREATE TABLE t1(a,b); + CREATE TABLE t2(x,y); + CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN + INSERT INTO t2(x,y) VALUES(new.rowid,1); + INSERT INTO t2(x,y) SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t2; + UPDATE t2 SET y=y+1 WHERE x=new.rowid; + SELECT 123; + DELETE FROM t2 WHERE x=new.rowid; + END; + INSERT INTO t1(a,b) VALUES(2,3); + COMMIT; + } +} + +# Ensure that no file descriptors were leaked. +do_test malloc-5.X { + catch {db close} + set sqlite_open_file_count +} {0} + +ifcapable vacuum { + do_malloc_test 6 -sqlprep { + BEGIN TRANSACTION; + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + DELETE FROM t1 where rowid%5 = 0; + COMMIT; + } -sqlbody { + VACUUM; + } +} + +do_malloc_test 7 -sqlprep { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + INSERT INTO t1 VALUES(5, 6); + INSERT INTO t1 VALUES(7, randstr(1200,1200)); +} -sqlbody { + SELECT min(a) FROM t1 WHERE a<6 GROUP BY b; + SELECT a FROM t1 WHERE a<6 ORDER BY a; + SELECT b FROM t1 WHERE a>6; +} + +# This block is designed to test that some malloc failures that may +# occur in vdbeapi.c. Specifically, if a malloc failure that occurs +# when converting UTF-16 text to integers and real numbers is handled +# correctly. +# +# This is done by retrieving a string from the database engine and +# manipulating it using the sqlite3_column_*** APIs. This doesn't +# actually return an error to the user when a malloc() fails.. That +# could be viewed as a bug. +# +# These tests only run if UTF-16 support is compiled in. +# +ifcapable utf16 { + set ::STMT {} + do_malloc_test 8 -tclprep { + set sql "SELECT '[string repeat abc 20]', '[string repeat def 20]', ?" + set ::STMT [sqlite3_prepare db $sql -1 X] + sqlite3_step $::STMT + if { $::tcl_platform(byteOrder)=="littleEndian" } { + set ::bomstr "\xFF\xFE" + } else { + set ::bomstr "\xFE\xFF" + } + append ::bomstr [encoding convertto unicode "123456789_123456789_12345678"] + } -tclbody { + sqlite3_column_text16 $::STMT 0 + sqlite3_column_int $::STMT 0 + sqlite3_column_text16 $::STMT 1 + sqlite3_column_double $::STMT 1 + set rc [sqlite3_reset $::STMT] + if {$rc eq "SQLITE_NOMEM"} {error "out of memory"} + sqlite3_bind_text16 $::STMT 1 $::bomstr 60 + #catch {sqlite3_finalize $::STMT} + #if {[lindex [sqlite_malloc_stat] 2]<=0} { + # error "out of memory" + #} + } -cleanup { + if {$::STMT!=""} { + sqlite3_finalize $::STMT + set ::STMT {} + } + } +} + +# This block tests that malloc() failures that occur whilst commiting +# a multi-file transaction are handled correctly. +# +do_malloc_test 9 -sqlprep { + ATTACH 'test2.db' as test2; + CREATE TABLE abc1(a, b, c); + CREATE TABLE test2.abc2(a, b, c); +} -sqlbody { + BEGIN; + INSERT INTO abc1 VALUES(1, 2, 3); + INSERT INTO abc2 VALUES(1, 2, 3); + COMMIT; +} + +# This block tests malloc() failures that occur while opening a +# connection to a database. +do_malloc_test 10 -tclprep { + catch {db2 close} + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + sqlite3_extended_result_codes db 1 + db eval {CREATE TABLE abc(a, b, c)} +} -tclbody { + db close + sqlite3 db2 test.db + sqlite3_extended_result_codes db2 1 + db2 eval {SELECT * FROM sqlite_master} + db2 close +} + +# This block tests malloc() failures that occur within calls to +# sqlite3_create_function(). +do_malloc_test 11 -tclbody { + set rc [sqlite3_create_function db] + if {[string match $rc SQLITE_OK]} { + set rc [sqlite3_create_aggregate db] + } + if {[string match $rc SQLITE_NOMEM]} { + error "out of memory" + } +} + +do_malloc_test 12 -tclbody { + set sql16 [encoding convertto unicode "SELECT * FROM sqlite_master"] + append sql16 "\00\00" + set ::STMT [sqlite3_prepare16 db $sql16 -1 DUMMY] + sqlite3_finalize $::STMT +} + +# Test malloc errors when replaying two hot journals from a 2-file +# transaction. +ifcapable crashtest { + do_malloc_test 13 -tclprep { + set rc [crashsql -delay 1 -file test2.db { + ATTACH 'test2.db' as aux; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE aux.t2(a, b, c); + CREATE TABLE t1(a, b, c); + COMMIT; + }] + if {$rc!="1 {child process exited abnormally}"} { + error "Wrong error message: $rc" + } + } -tclbody { + db eval {ATTACH 'test2.db' as aux;} + set rc [catch {db eval { + SELECT * FROM t1; + SELECT * FROM t2; + }} err] + if {$rc && $err!="no such table: t1"} { + error $err + } + } +} + +if {$tcl_platform(platform)!="windows"} { + do_malloc_test 14 -tclprep { + catch {db close} + sqlite3 db2 test2.db + sqlite3_extended_result_codes db2 1 + db2 eval { + PRAGMA synchronous = 0; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + BEGIN; + INSERT INTO t1 VALUES(3, 4); + } + copy_file test2.db test.db + copy_file test2.db-journal test.db-journal + db2 close + } -tclbody { + sqlite3 db test.db + sqlite3_extended_result_codes db 1 + db eval { + SELECT * FROM t1; + } + } +} + +proc string_compare {a b} { + return [string compare $a $b] +} + +# Test for malloc() failures in sqlite3_create_collation() and +# sqlite3_create_collation16(). +# +ifcapable utf16 { + do_malloc_test 15 -start 4 -tclbody { + db collate string_compare string_compare + if {[catch {add_test_collate db 1 1 1} msg]} { + if {$msg=="SQLITE_NOMEM"} {set msg "out of memory"} + error $msg + } + + db complete {SELECT "hello """||'world"' [microsoft], * FROM anicetable;} + db complete {-- Useful comment} + + execsql { + CREATE TABLE t1(a, b COLLATE string_compare); + INSERT INTO t1 VALUES(10, 'string'); + INSERT INTO t1 VALUES(10, 'string2'); + } + } +} + +# Also test sqlite3_complete(). There are (currently) no malloc() +# calls in this function, but test anyway against future changes. +# +do_malloc_test 16 -tclbody { + db complete {SELECT "hello """||'world"' [microsoft], * FROM anicetable;} + db complete {-- Useful comment} + db eval { + SELECT * FROM sqlite_master; + } +} + +# Test handling of malloc() failures in sqlite3_open16(). +# +ifcapable utf16 { + do_malloc_test 17 -tclbody { + set DB2 0 + set STMT 0 + + # open database using sqlite3_open16() + set filename [encoding convertto unicode test.db] + append filename "\x00\x00" + set DB2 [sqlite3_open16 $filename -unused] + if {0==$DB2} { + error "out of memory" + } + sqlite3_extended_result_codes $DB2 1 + + # Prepare statement + set rc [catch {sqlite3_prepare $DB2 {SELECT * FROM sqlite_master} -1 X} msg] + if {[sqlite3_errcode $DB2] eq "SQLITE_IOERR+12"} { + error "out of memory" + } + if {$rc} { + error [string range $msg 4 end] + } + set STMT $msg + + # Finalize statement + set rc [sqlite3_finalize $STMT] + if {$rc!="SQLITE_OK"} { + error [sqlite3_errmsg $DB2] + } + set STMT 0 + + # Close database + set rc [sqlite3_close $DB2] + if {$rc!="SQLITE_OK"} { + error [sqlite3_errmsg $DB2] + } + set DB2 0 + } -cleanup { + if {$STMT!="0"} { + sqlite3_finalize $STMT + } + if {$DB2!="0"} { + set rc [sqlite3_close $DB2] + } + } +} + +# Test handling of malloc() failures in sqlite3_errmsg16(). +# +ifcapable utf16 { + do_malloc_test 18 -tclprep { + catch { + db eval "SELECT [string repeat longcolumnname 10] FROM sqlite_master" + } + } -tclbody { + set utf16 [sqlite3_errmsg16 [sqlite3_connection_pointer db]] + binary scan $utf16 c* bytes + if {[llength $bytes]==0} { + error "out of memory" + } + } +} + +# This test is aimed at coverage testing. Specificly, it is supposed to +# cause a malloc() only used when converting between the two utf-16 +# encodings to fail (i.e. little-endian->big-endian). It only actually +# hits this malloc() on little-endian hosts. +# +set static_string "\x00h\x00e\x00l\x00l\x00o" +for {set l 0} {$l<10} {incr l} { + append static_string $static_string +} +append static_string "\x00\x00" +do_malloc_test 19 -tclprep { + execsql { + PRAGMA encoding = "UTF16be"; + CREATE TABLE abc(a, b, c); + } +} -tclbody { + unset -nocomplain ::STMT + set r [catch { + set ::STMT [sqlite3_prepare db {SELECT ?} -1 DUMMY] + sqlite3_bind_text16 -static $::STMT 1 $static_string 112 + } msg] + if {$r} {error [string range $msg 4 end]} + set msg +} -cleanup { + if {[info exists ::STMT]} { + sqlite3_finalize $::STMT + } +} +unset static_string + +# Make sure SQLITE_NOMEM is reported out on an ATTACH failure even +# when the malloc failure occurs within the nested parse. +# +do_malloc_test 20 -tclprep { + db close + file delete -force test2.db test2.db-journal + sqlite3 db test2.db + sqlite3_extended_result_codes db 1 + db eval {CREATE TABLE t1(x);} + db close +} -tclbody { + if {[catch {sqlite3 db test.db}]} { + error "out of memory" + } + sqlite3_extended_result_codes db 1 +} -sqlbody { + ATTACH DATABASE 'test2.db' AS t2; + SELECT * FROM t1; + DETACH DATABASE t2; +} + +# Test malloc failure whilst installing a foreign key. +# +ifcapable foreignkey { + do_malloc_test 21 -sqlbody { + CREATE TABLE abc(a, b, c, FOREIGN KEY(a) REFERENCES abc(b)) + } +} + +# Test malloc failure in an sqlite3_prepare_v2() call. +# +do_malloc_test 22 -tclbody { + set ::STMT "" + set r [catch { + set ::STMT [ + sqlite3_prepare_v2 db "SELECT * FROM sqlite_master" -1 DUMMY + ] + } msg] + if {$r} {error [string range $msg 4 end]} +} -cleanup { + if {$::STMT ne ""} { + sqlite3_finalize $::STMT + set ::STMT "" + } +} + +ifcapable {pager_pragmas} { + # This tests a special case - that an error that occurs while the pager + # is trying to recover from error-state in exclusive-access mode works. + # + do_malloc_test 23 -tclprep { + db eval { + PRAGMA cache_size = 10; + PRAGMA locking_mode = exclusive; + BEGIN; + CREATE TABLE abc(a, b, c); + CREATE INDEX abc_i ON abc(a, b, c); + INSERT INTO abc + VALUES(randstr(100,100), randstr(100,100), randstr(100,100)); + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + COMMIT; + } + + # This puts the pager into error state. + # + db eval BEGIN + db eval {UPDATE abc SET a = 0 WHERE oid%2} + set ::sqlite_io_error_pending 10 + catch {db eval {ROLLBACK}} msg + + } -sqlbody { + SELECT * FROM abc LIMIT 10; + } -cleanup { + set e [db eval {PRAGMA integrity_check}] + if {$e ne "ok"} {error $e} + } +} + +# Ensure that no file descriptors were leaked. +do_test malloc-99.X { + catch {db close} + set sqlite_open_file_count +} {0} + +puts open-file-count=$sqlite_open_file_count +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc2.test new file mode 100644 index 0000000..e48a569 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc2.test @@ -0,0 +1,366 @@ +# 2005 March 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file attempts to check that the library can recover from a malloc() +# failure when sqlite3_global_recover() is invoked. +# +# (Later:) The sqlite3_global_recover() interface is now a no-op. +# Recovery from malloc() failures is automatic. But we keep these +# tests around because you can never have too many test cases. +# +# $Id: malloc2.test,v 1.8 2007/10/03 08:46:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +sqlite3_extended_result_codes db 1 + +# Generate a checksum based on the contents of the database. If the +# checksum of two databases is the same, and the integrity-check passes +# for both, the two databases are identical. +# +proc cksum {db} { + set ret [list] + ifcapable tempdb { + set sql { + SELECT name FROM sqlite_master WHERE type = 'table' UNION + SELECT name FROM sqlite_temp_master WHERE type = 'table' UNION + SELECT 'sqlite_master' UNION + SELECT 'sqlite_temp_master' + } + } else { + set sql { + SELECT name FROM sqlite_master WHERE type = 'table' UNION + SELECT 'sqlite_master' + } + } + set tbllist [$db eval $sql] + set txt {} + foreach tbl $tbllist { + append txt [$db eval "SELECT * FROM $tbl"] + } + # puts txt=$txt + return [md5 $txt] +} + +proc do_malloc2_test {tn args} { + array set ::mallocopts $args + set sum [cksum db] + + for {set ::n 1} {true} {incr ::n} { + + # Run the SQL. Malloc number $::n is set to fail. A malloc() failure + # may or may not be reported. + sqlite3_memdebug_fail $::n -repeat 1 + do_test malloc2-$tn.$::n.2 { + set res [catchsql [string trim $::mallocopts(-sql)]] + set rc [expr { + 0==[string compare $res {1 {out of memory}}] || + [db errorcode] == 3082 || + 0==[lindex $res 0] + }] + if {$rc!=1} { + puts "Error: $res" + } + set rc + } {1} + + # If $::n is greater than the number of malloc() calls required to + # execute the SQL, then this test is finished. Break out of the loop. + set nFail [sqlite3_memdebug_fail -1] + if {$nFail==0} break + + # Nothing should work now, because the allocator should refuse to + # allocate any memory. + # + # Update: SQLite now automatically recovers from a malloc() failure. + # So the statement in the test below would work. + if 0 { + do_test malloc2-$tn.$::n.3 { + catchsql {SELECT 'nothing should work'} + } {1 {out of memory}} + } + + # Recover from the malloc failure. + # + # Update: The new malloc() failure handling means that a transaction may + # still be active even if a malloc() has failed. But when these tests were + # written this was not the case. So do a manual ROLLBACK here so that the + # tests pass. + do_test malloc2-$tn.$::n.4 { + sqlite3_global_recover + catch { + execsql { + ROLLBACK; + } + } + expr 0 + } {0} + + # Checksum the database. + do_test malloc2-$tn.$::n.5 { + cksum db + } $sum + + integrity_check malloc2-$tn.$::n.6 + if {$::nErr>1} return + } + unset ::mallocopts +} + +do_test malloc2.1.setup { + execsql { + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(10, 20, 30); + INSERT INTO abc VALUES(40, 50, 60); + CREATE INDEX abc_i ON abc(a, b, c); + } +} {} +do_malloc2_test 1.1 -sql { + SELECT * FROM abc; +} +do_malloc2_test 1.2 -sql { + UPDATE abc SET c = c+10; +} +do_malloc2_test 1.3 -sql { + INSERT INTO abc VALUES(70, 80, 90); +} +do_malloc2_test 1.4 -sql { + DELETE FROM abc; +} +do_test malloc2.1.5 { + execsql { + SELECT * FROM abc; + } +} {} + +do_test malloc2.2.setup { + execsql { + CREATE TABLE def(a, b, c); + CREATE INDEX def_i1 ON def(a); + CREATE INDEX def_i2 ON def(c); + BEGIN; + } + for {set i 0} {$i<20} {incr i} { + execsql { + INSERT INTO def VALUES(randstr(300,300),randstr(300,300),randstr(300,300)); + } + } + execsql { + COMMIT; + } +} {} +do_malloc2_test 2 -sql { + BEGIN; + UPDATE def SET a = randstr(100,100) WHERE (oid%9)==0; + INSERT INTO def SELECT * FROM def WHERE (oid%13)==0; + + CREATE INDEX def_i3 ON def(b); + + UPDATE def SET a = randstr(100,100) WHERE (oid%9)==1; + INSERT INTO def SELECT * FROM def WHERE (oid%13)==1; + + CREATE TABLE def2 AS SELECT * FROM def; + DROP TABLE def; + CREATE TABLE def AS SELECT * FROM def2; + DROP TABLE def2; + + DELETE FROM def WHERE (oid%9)==2; + INSERT INTO def SELECT * FROM def WHERE (oid%13)==2; + COMMIT; +} + +ifcapable tempdb { + do_test malloc2.3.setup { + execsql { + CREATE TEMP TABLE ghi(a, b, c); + BEGIN; + } + for {set i 0} {$i<20} {incr i} { + execsql { + INSERT INTO ghi VALUES(randstr(300,300),randstr(300,300),randstr(300,300)); + } + } + execsql { + COMMIT; + } + } {} + do_malloc2_test 3 -sql { + BEGIN; + CREATE INDEX ghi_i1 ON ghi(a); + UPDATE def SET a = randstr(100,100) WHERE (oid%2)==0; + UPDATE ghi SET a = randstr(100,100) WHERE (oid%2)==0; + COMMIT; + } +} + +############################################################################ +# The test cases below are to increase the code coverage in btree.c and +# pager.c of this test file. The idea is that each malloc() that occurs in +# these two source files should be made to fail at least once. +# +catchsql { + DROP TABLE ghi; +} +do_malloc2_test 4.1 -sql { + SELECT * FROM def ORDER BY oid ASC; + SELECT * FROM def ORDER BY oid DESC; +} +do_malloc2_test 4.2 -sql { + PRAGMA cache_size = 10; + BEGIN; + + -- This will put about 25 pages on the free list. + DELETE FROM def WHERE 1; + + -- Allocate 32 new root pages. This will exercise the 'extract specific + -- page from the freelist' code when in auto-vacuum mode (see the + -- allocatePage() routine in btree.c). + CREATE TABLE t1(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t2(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t3(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t4(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t5(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t6(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t7(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t8(a UNIQUE, b UNIQUE, c UNIQUE); + + ROLLBACK; +} + +######################################################################## +# Test that the global linked list of database handles works. An assert() +# will fail if there is some problem. +do_test malloc2-5 { + sqlite3 db1 test.db + sqlite3 db2 test.db + sqlite3 db3 test.db + sqlite3 db4 test.db + sqlite3 db5 test.db + + sqlite3_extended_result_codes db1 1 + sqlite3_extended_result_codes db2 1 + sqlite3_extended_result_codes db3 1 + sqlite3_extended_result_codes db4 1 + sqlite3_extended_result_codes db5 1 + + # Close the head of the list: + db5 close + + # Close the end of the list: + db1 close + + # Close a handle from the middle of the list: + db3 close + + # Close the other two. Then open and close one more database, to make + # sure the head of the list was set back to NULL. + db2 close + db4 close + sqlite db1 test.db + db1 close +} {} + +######################################################################## +# Check that if a statement is active sqlite3_global_recover doesn't reset +# the sqlite3_malloc_failed variable. +# +# Update: There is now no sqlite3_malloc_failed variable, so these tests +# are not run. +# +# do_test malloc2-6.1 { +# set ::STMT [sqlite3_prepare $::DB {SELECT * FROM def} -1 DUMMY] +# sqlite3_step $::STMT +# } {SQLITE_ROW} +# do_test malloc2-6.2 { +# sqlite3 db1 test.db +# sqlite_malloc_fail 100 +# catchsql { +# SELECT * FROM def; +# } db1 +# } {1 {out of memory}} +# do_test malloc2-6.3 { +# sqlite3_global_recover +# } {SQLITE_BUSY} +# do_test malloc2-6.4 { +# catchsql { +# SELECT 'hello'; +# } +# } {1 {out of memory}} +# do_test malloc2-6.5 { +# sqlite3_reset $::STMT +# } {SQLITE_OK} +# do_test malloc2-6.6 { +# sqlite3_global_recover +# } {SQLITE_OK} +# do_test malloc2-6.7 { +# catchsql { +# SELECT 'hello'; +# } +# } {0 hello} +# do_test malloc2-6.8 { +# sqlite3_step $::STMT +# } {SQLITE_ERROR} +# do_test malloc2-6.9 { +# sqlite3_finalize $::STMT +# } {SQLITE_SCHEMA} +# do_test malloc2-6.10 { +# db1 close +# } {} + +######################################################################## +# Check that if an in-memory database is being used it is not possible +# to recover from a malloc() failure. +# +# Update: An in-memory database can now survive a malloc() failure, so these +# tests are not run. +# +# ifcapable memorydb { +# do_test malloc2-7.1 { +# sqlite3 db1 :memory: +# list +# } {} +# do_test malloc2-7.2 { +# sqlite_malloc_fail 100 +# catchsql { +# SELECT * FROM def; +# } +# } {1 {out of memory}} +# do_test malloc2-7.3 { +# sqlite3_global_recover +# } {SQLITE_ERROR} +# do_test malloc2-7.4 { +# catchsql { +# SELECT 'hello'; +# } +# } {1 {out of memory}} +# do_test malloc2-7.5 { +# db1 close +# } {} +# do_test malloc2-7.6 { +# sqlite3_global_recover +# } {SQLITE_OK} +# do_test malloc2-7.7 { +# catchsql { +# SELECT 'hello'; +# } +# } {0 hello} +# } + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc3.test new file mode 100644 index 0000000..864a219 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc3.test @@ -0,0 +1,657 @@ +# 2005 November 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests to ensure that the library handles malloc() failures +# correctly. The emphasis of these tests are the _prepare(), _step() and +# _finalize() calls. +# +# $Id: malloc3.test,v 1.16 2007/10/03 08:46:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping malloc3 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +#-------------------------------------------------------------------------- +# NOTES ON RECOVERING FROM A MALLOC FAILURE +# +# The tests in this file test the behaviours described in the following +# paragraphs. These tests test the behaviour of the system when malloc() fails +# inside of a call to _prepare(), _step(), _finalize() or _reset(). The +# handling of malloc() failures within ancillary procedures is tested +# elsewhere. +# +# Overview: +# +# Executing a statement is done in three stages (prepare, step and finalize). A +# malloc() failure may occur within any stage. If a memory allocation fails +# during statement preparation, no statement handle is returned. From the users +# point of view the system state is as if _prepare() had never been called. +# +# If the memory allocation fails during the _step() or _finalize() calls, then +# the database may be left in one of two states (after finalize() has been +# called): +# +# * As if the neither _step() nor _finalize() had ever been called on +# the statement handle (i.e. any changes made by the statement are +# rolled back). +# * The current transaction may be rolled back. In this case a hot-journal +# may or may not actually be present in the filesystem. +# +# The caller can tell the difference between these two scenarios by invoking +# _get_autocommit(). +# +# +# Handling of sqlite3_reset(): +# +# If a malloc() fails while executing an sqlite3_reset() call, this is handled +# in the same way as a failure within _finalize(). The statement handle +# is not deleted and must be passed to _finalize() for resource deallocation. +# Attempting to _step() or _reset() the statement after a failed _reset() will +# always return SQLITE_NOMEM. +# +# +# Other active SQL statements: +# +# The effect of a malloc failure on concurrently executing SQL statements, +# particularly when the statement is executing with READ_UNCOMMITTED set and +# the malloc() failure mandates statement rollback only. Currently, if +# transaction rollback is required, all other vdbe's are aborted. +# +# Non-transient mallocs in btree.c: +# * The Btree structure itself +# * Each BtCursor structure +# +# Mallocs in pager.c: +# readMasterJournal() - Space to read the master journal name +# pager_delmaster() - Space for the entire master journal file +# +# sqlite3pager_open() - The pager structure itself +# sqlite3_pagerget() - Space for a new page +# pager_open_journal() - Pager.aInJournal[] bitmap +# sqlite3pager_write() - For in-memory databases only: history page and +# statement history page. +# pager_stmt_begin() - Pager.aInStmt[] bitmap +# +# None of the above are a huge problem. The most troublesome failures are the +# transient malloc() calls in btree.c, which can occur during the tree-balance +# operation. This means the tree being balanced will be internally inconsistent +# after the malloc() fails. To avoid the corrupt tree being read by a +# READ_UNCOMMITTED query, we have to make sure the transaction or statement +# rollback occurs before sqlite3_step() returns, not during a subsequent +# sqlite3_finalize(). +#-------------------------------------------------------------------------- + +#-------------------------------------------------------------------------- +# NOTES ON TEST IMPLEMENTATION +# +# The tests in this file are implemented differently from those in other +# files. Instead, tests are specified using three primitives: SQL, PREP and +# TEST. Each primitive has a single argument. Primitives are processed in +# the order they are specified in the file. +# +# A TEST primitive specifies a TCL script as it's argument. When a TEST +# directive is encountered the Tcl script is evaluated. Usually, this Tcl +# script contains one or more calls to [do_test]. +# +# A PREP primitive specifies an SQL script as it's argument. When a PREP +# directive is encountered the SQL is evaluated using database connection +# [db]. +# +# The SQL primitives are where the action happens. An SQL primitive must +# contain a single, valid SQL statement as it's argument. When an SQL +# primitive is encountered, it is evaluated one or more times to test the +# behaviour of the system when malloc() fails during preparation or +# execution of said statement. The Nth time the statement is executed, +# the Nth malloc is said to fail. The statement is executed until it +# succeeds, i.e. (M+1) times, where M is the number of mallocs() required +# to prepare and execute the statement. +# +# Each time an SQL statement fails, the driver program (see proc [run_test] +# below) figures out if a transaction has been automatically rolled back. +# If not, it executes any TEST block immediately proceeding the SQL +# statement, then reexecutes the SQL statement with the next value of N. +# +# If a transaction has been automatically rolled back, then the driver +# program executes all the SQL specified as part of SQL or PREP primitives +# between the current SQL statement and the most recent "BEGIN". Any +# TEST block immediately proceeding the SQL statement is evaluated, and +# then the SQL statement reexecuted with the incremented N value. +# +# That make any sense? If not, read the code in [run_test] and it might. +# +# Extra restriction imposed by the implementation: +# +# * If a PREP block starts a transaction, it must finish it. +# * A PREP block may not close a transaction it did not start. +# +#-------------------------------------------------------------------------- + + +# These procs are used to build up a "program" in global variable +# ::run_test_script. At the end of this file, the proc [run_test] is used +# to execute the program (and all test cases contained therein). +# +set ::run_test_script [list] +proc TEST {id t} {lappend ::run_test_script -test [list $id $t]} +proc PREP {p} {lappend ::run_test_script -prep [string trim $p]} + +# SQL -- +# +# SQL ?-norollback? +# +# Add an 'SQL' primitive to the program (see notes above). If the -norollback +# switch is present, then the statement is not allowed to automatically roll +# back any active transaction if malloc() fails. It must rollback the statement +# transaction only. +# +proc SQL {a1 {a2 ""}} { + # An SQL primitive parameter is a list of two elements, a boolean value + # indicating if the statement may cause transaction rollback when malloc() + # fails, and the sql statement itself. + if {$a2 == ""} { + lappend ::run_test_script -sql [list true [string trim $a1]] + } else { + lappend ::run_test_script -sql [list false [string trim $a2]] + } +} + +# TEST_AUTOCOMMIT -- +# +# A shorthand test to see if a transaction is active or not. The first +# argument - $id - is the integer number of the test case. The second +# argument is either 1 or 0, the expected value of the auto-commit flag. +# +proc TEST_AUTOCOMMIT {id a} { + TEST $id "do_test \$testid { sqlite3_get_autocommit \$::DB } {$a}" +} + +#-------------------------------------------------------------------------- +# Start of test program declaration +# + + +# Warm body test. A malloc() fails in the middle of a CREATE TABLE statement +# in a single-statement transaction on an empty database. Not too much can go +# wrong here. +# +TEST 1 { + do_test $testid { + execsql {SELECT tbl_name FROM sqlite_master;} + } {} +} +SQL { + CREATE TABLE abc(a, b, c); +} +TEST 2 { + do_test $testid.1 { + execsql {SELECT tbl_name FROM sqlite_master;} + } {abc} +} + +# Insert a couple of rows into the table. each insert is in it's own +# transaction. test that the table is unpopulated before running the inserts +# (and hence after each failure of the first insert), and that it has been +# populated correctly after the final insert succeeds. +# +TEST 3 { + do_test $testid.2 { + execsql {SELECT * FROM abc} + } {} +} +SQL {INSERT INTO abc VALUES(1, 2, 3);} +SQL {INSERT INTO abc VALUES(4, 5, 6);} +SQL {INSERT INTO abc VALUES(7, 8, 9);} +TEST 4 { + do_test $testid { + execsql {SELECT * FROM abc} + } {1 2 3 4 5 6 7 8 9} +} + +# Test a CREATE INDEX statement. Because the table 'abc' is so small, the index +# will all fit on a single page, so this doesn't test too much that the CREATE +# TABLE statement didn't test. A few of the transient malloc()s in btree.c +# perhaps. +# +SQL {CREATE INDEX abc_i ON abc(a, b, c);} +TEST 4 { + do_test $testid { + execsql { + SELECT * FROM abc ORDER BY a DESC; + } + } {7 8 9 4 5 6 1 2 3} +} + +# Test a DELETE statement. Also create a trigger and a view, just to make sure +# these statements don't have any obvious malloc() related bugs in them. Note +# that the test above will be executed each time the DELETE fails, so we're +# also testing rollback of a DELETE from a table with an index on it. +# +SQL {DELETE FROM abc WHERE a > 2;} +SQL {CREATE TRIGGER abc_t AFTER INSERT ON abc BEGIN SELECT 'trigger!'; END;} +SQL {CREATE VIEW abc_v AS SELECT * FROM abc;} +TEST 5 { + do_test $testid { + execsql { + SELECT name, tbl_name FROM sqlite_master ORDER BY name; + SELECT * FROM abc; + } + } {abc abc abc_i abc abc_t abc abc_v abc_v 1 2 3} +} + +set sql { + BEGIN;DELETE FROM abc; +} +for {set i 1} {$i < 100} {incr i} { + set a $i + set b "String value $i" + set c [string repeat X $i] + append sql "INSERT INTO abc VALUES ($a, '$b', '$c');" +} +append sql {COMMIT;} +PREP $sql + +SQL { + DELETE FROM abc WHERE oid IN (SELECT oid FROM abc ORDER BY random() LIMIT 5); +} +TEST 6 { + do_test $testid.1 { + execsql {SELECT count(*) FROM abc} + } {94} + do_test $testid.2 { + execsql { + SELECT min( + (oid == a) AND 'String value ' || a == b AND a == length(c) + ) FROM abc; + } + } {1} +} +SQL { + DELETE FROM abc WHERE oid IN (SELECT oid FROM abc ORDER BY random() LIMIT 5); +} +TEST 7 { + do_test $testid { + execsql {SELECT count(*) FROM abc} + } {89} + do_test $testid { + execsql { + SELECT min( + (oid == a) AND 'String value ' || a == b AND a == length(c) + ) FROM abc; + } + } {1} +} +SQL { + DELETE FROM abc WHERE oid IN (SELECT oid FROM abc ORDER BY random() LIMIT 5); +} +TEST 9 { + do_test $testid { + execsql {SELECT count(*) FROM abc} + } {84} + do_test $testid { + execsql { + SELECT min( + (oid == a) AND 'String value ' || a == b AND a == length(c) + ) FROM abc; + } + } {1} +} + +set padding [string repeat X 500] +PREP [subst { + DROP TABLE abc; + CREATE TABLE abc(a PRIMARY KEY, padding, b, c); + INSERT INTO abc VALUES(0, '$padding', 2, 2); + INSERT INTO abc VALUES(3, '$padding', 5, 5); + INSERT INTO abc VALUES(6, '$padding', 8, 8); +}] + +TEST 10 { + do_test $testid { + execsql {SELECT a, b, c FROM abc} + } {0 2 2 3 5 5 6 8 8} +} + +SQL {BEGIN;} +SQL {INSERT INTO abc VALUES(9, 'XXXXX', 11, 12);} +TEST_AUTOCOMMIT 11 0 +SQL -norollback {UPDATE abc SET a = a + 1, c = c + 1;} +TEST_AUTOCOMMIT 12 0 +SQL {DELETE FROM abc WHERE a = 10;} +TEST_AUTOCOMMIT 13 0 +SQL {COMMIT;} + +TEST 14 { + do_test $testid.1 { + sqlite3_get_autocommit $::DB + } {1} + do_test $testid.2 { + execsql {SELECT a, b, c FROM abc} + } {1 2 3 4 5 6 7 8 9} +} + +PREP [subst { + DROP TABLE abc; + CREATE TABLE abc(a, padding, b, c); + INSERT INTO abc VALUES(1, '$padding', 2, 3); + INSERT INTO abc VALUES(4, '$padding', 5, 6); + INSERT INTO abc VALUES(7, '$padding', 8, 9); + CREATE INDEX abc_i ON abc(a, padding, b, c); +}] + +TEST 15 { + db eval {PRAGMA cache_size = 10} +} + +SQL {BEGIN;} +SQL -norllbck {INSERT INTO abc (oid, a, padding, b, c) SELECT NULL, * FROM abc} +TEST 16 { + do_test $testid { + execsql {SELECT a, count(*) FROM abc GROUP BY a;} + } {1 2 4 2 7 2} +} +SQL -norllbck {INSERT INTO abc (oid, a, padding, b, c) SELECT NULL, * FROM abc} +TEST 17 { + do_test $testid { + execsql {SELECT a, count(*) FROM abc GROUP BY a;} + } {1 4 4 4 7 4} +} +SQL -norllbck {INSERT INTO abc (oid, a, padding, b, c) SELECT NULL, * FROM abc} +TEST 18 { + do_test $testid { + execsql {SELECT a, count(*) FROM abc GROUP BY a;} + } {1 8 4 8 7 8} +} +SQL -norllbck {INSERT INTO abc (oid, a, padding, b, c) SELECT NULL, * FROM abc} +TEST 19 { + do_test $testid { + execsql {SELECT a, count(*) FROM abc GROUP BY a;} + } {1 16 4 16 7 16} +} +SQL {COMMIT;} +TEST 21 { + do_test $testid { + execsql {SELECT a, count(*) FROM abc GROUP BY a;} + } {1 16 4 16 7 16} +} + +SQL {BEGIN;} +SQL {DELETE FROM abc WHERE oid %2} +TEST 22 { + do_test $testid { + execsql {SELECT a, count(*) FROM abc GROUP BY a;} + } {1 8 4 8 7 8} +} +SQL {DELETE FROM abc} +TEST 23 { + do_test $testid { + execsql {SELECT * FROM abc} + } {} +} +SQL {ROLLBACK;} +TEST 24 { + do_test $testid { + execsql {SELECT a, count(*) FROM abc GROUP BY a;} + } {1 16 4 16 7 16} +} + +# Test some schema modifications inside of a transaction. These should all +# cause transaction rollback if they fail. Also query a view, to cover a bit +# more code. +# +PREP {DROP VIEW abc_v;} +TEST 25 { + do_test $testid { + execsql { + SELECT name, tbl_name FROM sqlite_master; + } + } {abc abc abc_i abc} +} +SQL {BEGIN;} +SQL {CREATE TABLE def(d, e, f);} +SQL {CREATE TABLE ghi(g, h, i);} +TEST 26 { + do_test $testid { + execsql { + SELECT name, tbl_name FROM sqlite_master; + } + } {abc abc abc_i abc def def ghi ghi} +} +SQL {CREATE VIEW v1 AS SELECT * FROM def, ghi} +SQL {CREATE UNIQUE INDEX ghi_i1 ON ghi(g);} +TEST 27 { + do_test $testid { + execsql { + SELECT name, tbl_name FROM sqlite_master; + } + } {abc abc abc_i abc def def ghi ghi v1 v1 ghi_i1 ghi} +} +SQL {INSERT INTO def VALUES('a', 'b', 'c')} +SQL {INSERT INTO def VALUES(1, 2, 3)} +SQL -norollback {INSERT INTO ghi SELECT * FROM def} +TEST 28 { + do_test $testid { + execsql { + SELECT * FROM def, ghi WHERE d = g; + } + } {a b c a b c 1 2 3 1 2 3} +} +SQL {COMMIT} +TEST 29 { + do_test $testid { + execsql { + SELECT * FROM v1 WHERE d = g; + } + } {a b c a b c 1 2 3 1 2 3} +} + +# Test a simple multi-file transaction +# +file delete -force test2.db +SQL {ATTACH 'test2.db' AS aux;} +SQL {BEGIN} +SQL {CREATE TABLE aux.tbl2(x, y, z)} +SQL {INSERT INTO tbl2 VALUES(1, 2, 3)} +SQL {INSERT INTO def VALUES(4, 5, 6)} +TEST 30 { + do_test $testid { + execsql { + SELECT * FROM tbl2, def WHERE d = x; + } + } {1 2 3 1 2 3} +} +SQL {COMMIT} +TEST 31 { + do_test $testid { + execsql { + SELECT * FROM tbl2, def WHERE d = x; + } + } {1 2 3 1 2 3} +} + +# Test what happens when a malloc() fails while there are other active +# statements. This changes the way sqlite3VdbeHalt() works. +TEST 32 { + if {![info exists ::STMT32]} { + set sql "SELECT name FROM sqlite_master" + set ::STMT32 [sqlite3_prepare $::DB $sql -1 DUMMY] + do_test $testid { + sqlite3_step $::STMT32 + } {SQLITE_ROW} + } +} +SQL BEGIN +TEST 33 { + do_test $testid { + execsql {SELECT * FROM ghi} + } {a b c 1 2 3} +} +SQL -norollback { + -- There is a unique index on ghi(g), so this statement may not cause + -- an automatic ROLLBACK. Hence the "-norollback" switch. + INSERT INTO ghi SELECT '2'||g, h, i FROM ghi; +} +TEST 34 { + if {[info exists ::STMT32]} { + do_test $testid { + sqlite3_finalize $::STMT32 + } {SQLITE_OK} + unset ::STMT32 + } +} +SQL COMMIT + +# +# End of test program declaration +#-------------------------------------------------------------------------- + +proc run_test {arglist iRepeat {pcstart 0} {iFailStart 1}} { + if {[llength $arglist] %2} { + error "Uneven number of arguments to TEST" + } + + for {set i 0} {$i < $pcstart} {incr i} { + set k2 [lindex $arglist [expr 2 * $i]] + set v2 [lindex $arglist [expr 2 * $i + 1]] + set ac [sqlite3_get_autocommit $::DB] ;# Auto-Commit + switch -- $k2 { + -sql {db eval [lindex $v2 1]} + -prep {db eval $v2} + } + set nac [sqlite3_get_autocommit $::DB] ;# New Auto-Commit + if {$ac && !$nac} {set begin_pc $i} + } + + db rollback_hook [list incr ::rollback_hook_count] + + set iFail $iFailStart + set pc $pcstart + while {$pc*2 < [llength $arglist]} { + + # Id of this iteration: + set iterid "(pc $pc).(iFail $iFail)" + set k [lindex $arglist [expr 2 * $pc]] + set v [lindex $arglist [expr 2 * $pc + 1]] + + switch -- $k { + + -test { + foreach {id script} $v {} + set testid "malloc3-(test $id).$iterid" + eval $script + incr pc + } + + -sql { + set ::rollback_hook_count 0 + + set ac [sqlite3_get_autocommit $::DB] ;# Auto-Commit + sqlite3_memdebug_fail $iFail -repeat 0 + set rc [catch {db eval [lindex $v 1]} msg] ;# True error occurs + set nac [sqlite3_get_autocommit $::DB] ;# New Auto-Commit + + if {$rc != 0 && $nac && !$ac} { + # Before [db eval] the auto-commit flag was clear. Now it + # is set. Since an error occured we assume this was not a + # commit - therefore a rollback occured. Check that the + # rollback-hook was invoked. + do_test malloc3-rollback_hook.$iterid { + set ::rollback_hook_count + } {1} + } + + set nFail [sqlite3_memdebug_fail -1 -benigncnt nBenign] + if {$rc == 0} { + # Successful execution of sql. The number of failed malloc() + # calls should be equal to the number of benign failures. + # Otherwise a malloc() failed and the error was not reported. + # + if {$nFail!=$nBenign} { + error "Unreported malloc() failure" + } + + if {$ac && !$nac} { + # Before the [db eval] the auto-commit flag was set, now it + # is clear. We can deduce that a "BEGIN" statement has just + # been successfully executed. + set begin_pc $pc + } + + incr pc + set iFail 1 + integrity_check "malloc3-(integrity).$iterid" + } elseif {[regexp {.*out of memory} $msg] || [db errorcode] == 3082} { + # Out of memory error, as expected. + # + integrity_check "malloc3-(integrity).$iterid" + incr iFail + if {$nac && !$ac} { + + if {![lindex $v 0] && [db errorcode] != 3082} { + # error "Statement \"[lindex $v 1]\" caused a rollback" + } + + for {set i $begin_pc} {$i < $pc} {incr i} { + set k2 [lindex $arglist [expr 2 * $i]] + set v2 [lindex $arglist [expr 2 * $i + 1]] + set catchupsql "" + switch -- $k2 { + -sql {set catchupsql [lindex $v2 1]} + -prep {set catchupsql $v2} + } + db eval $catchupsql + } + } + } else { + error $msg + } + + while {[lindex $arglist [expr 2 * ($pc -1)]] == "-test"} { + incr pc -1 + } + } + + -prep { + db eval $v + incr pc + } + + default { error "Unknown switch: $k" } + } + } +} + +# Turn of the Tcl interface's prepared statement caching facility. Then +# run the tests with "persistent" malloc failures. +sqlite3_extended_result_codes db 1 +db cache size 0 +run_test $::run_test_script 1 + +# Close and reopen the db. +db close +file delete -force test.db test.db-journal test2.db test2.db-journal +sqlite3 db test.db +sqlite3_extended_result_codes db 1 +set ::DB [sqlite3_connection_pointer db] + +# Turn of the Tcl interface's prepared statement caching facility in +# the new connnection. Then run the tests with "transient" malloc failures. +db cache size 0 +run_test $::run_test_script 0 + +sqlite3_memdebug_fail -1 +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc4.test new file mode 100644 index 0000000..78777e6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc4.test @@ -0,0 +1,193 @@ +# 2005 November 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests to ensure that the library handles malloc() failures +# correctly. The emphasis in this file is on sqlite3_column_XXX() APIs. +# +# $Id: malloc4.test,v 1.9 2007/09/03 16:12:10 drh Exp $ + +#--------------------------------------------------------------------------- +# NOTES ON EXPECTED BEHAVIOUR +# +# [193] When a memory allocation failure occurs during sqlite3_column_name(), +# sqlite3_column_name16(), sqlite3_column_decltype(), or +# sqlite3_column_decltype16() the function shall return NULL. +# +#--------------------------------------------------------------------------- + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +ifcapable !memdebug { + puts "Skipping malloc4 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +ifcapable !utf16 { + finish_test + return +} + +proc do_stmt_test {id sql} { + set ::sql $sql + set go 1 + for {set n 0} {$go} {incr n} { + set testid "malloc4-$id.$n" + + # Prepare the statement + do_test ${testid}.1 { + set ::STMT [sqlite3_prepare $::DB $sql -1 TAIL] + expr [string length $::STMT] > 0 + } {1} + + # Set the Nth malloc() to fail. + sqlite3_memdebug_fail $n -repeat 0 + + # Test malloc failure in the _name(), _name16(), decltype() and + # decltype16() APIs. Calls that occur after the malloc() failure should + # return NULL. No error is raised though. + # + # ${testid}.2.1 - Call _name() + # ${testid}.2.2 - Call _name16() + # ${testid}.2.3 - Call _name() + # ${testid}.2.4 - Check that the return values of the above three calls are + # consistent with each other and with the simulated + # malloc() failures. + # + # Because the code that implements the _decltype() and _decltype16() APIs + # is the same as the _name() and _name16() implementations, we don't worry + # about explicitly testing them. + # + do_test ${testid}.2.1 { + set mf1 [expr [sqlite3_memdebug_pending] < 0] + set ::name8 [sqlite3_column_name $::STMT 0] + set mf2 [expr [sqlite3_memdebug_pending] < 0] + expr {$mf1 == $mf2 || $::name8 == ""} + } {1} + do_test ${testid}.2.2 { + set mf1 [expr [sqlite3_memdebug_pending] < 0] + set ::name16 [sqlite3_column_name16 $::STMT 0] + set ::name16 [encoding convertfrom unicode $::name16] + set ::name16 [string range $::name16 0 end-1] + set mf2 [expr [sqlite3_memdebug_pending] < 0] + expr {$mf1 == $mf2 || $::name16 == ""} + } {1} + do_test ${testid}.2.3 { + set mf1 [expr [sqlite3_memdebug_pending] < 0] + set ::name8_2 [sqlite3_column_name $::STMT 0] + set mf2 [expr [sqlite3_memdebug_pending] < 0] + expr {$mf1 == $mf2 || $::name8_2 == ""} + } {1} + set ::mallocFailed [expr [sqlite3_memdebug_pending] < 0] + do_test ${testid}.2.4 { + expr { + $::name8 == $::name8_2 && $::name16 == $::name8 && !$::mallocFailed || + $::name8 == $::name8_2 && $::name16 == "" && $::mallocFailed || + $::name8 == $::name16 && $::name8_2 == "" && $::mallocFailed || + $::name8_2 == $::name16 && $::name8 == "" && $::mallocFailed + } + } {1} + + # Step the statement so that we can call _text() and _text16(). Before + # running sqlite3_step(), make sure that malloc() is not about to fail. + # Memory allocation failures that occur within sqlite3_step() are tested + # elsewhere. + set mf [sqlite3_memdebug_pending] + sqlite3_memdebug_fail -1 + do_test ${testid}.3 { + sqlite3_step $::STMT + } {SQLITE_ROW} + sqlite3_memdebug_fail $mf + + # Test for malloc() failures within _text() and _text16(). + # + do_test ${testid}.4.1 { + set ::text8 [sqlite3_column_text $::STMT 0] + set mf [expr [sqlite3_memdebug_pending] < 0 && !$::mallocFailed] + expr {$mf==0 || $::text8 == ""} + } {1} + do_test ${testid}.4.2 { + set ::text16 [sqlite3_column_text16 $::STMT 0] + set ::text16 [encoding convertfrom unicode $::text16] + set ::text16 [string range $::text16 0 end-1] + set mf [expr [sqlite3_memdebug_pending] < 0 && !$::mallocFailed] + expr {$mf==0 || $::text16 == ""} + } {1} + do_test ${testid}.4.3 { + set ::text8_2 [sqlite3_column_text $::STMT 0] + set mf [expr [sqlite3_memdebug_pending] < 0 && !$::mallocFailed] + expr {$mf==0 || $::text8_2 == "" || ($::text16 == "" && $::text8 != "")} + } {1} + + # Test for malloc() failures within _int(), _int64() and _real(). The only + # way this can occur is if the string has to be translated from UTF-16 to + # UTF-8 before being converted to a numeric value. + do_test ${testid}.4.4.1 { + set mf [sqlite3_memdebug_pending] + sqlite3_memdebug_fail -1 + sqlite3_column_text16 $::STMT 0 + sqlite3_memdebug_fail $mf + sqlite3_column_int $::STMT 0 + } {0} + do_test ${testid}.4.5 { + set mf [sqlite3_memdebug_pending] + sqlite3_memdebug_fail -1 + sqlite3_column_text16 $::STMT 0 + sqlite3_memdebug_fail $mf + sqlite3_column_int64 $::STMT 0 + } {0} + + do_test ${testid}.4.6 { + set mf [sqlite3_memdebug_pending] + sqlite3_memdebug_fail -1 + sqlite3_column_text16 $::STMT 0 + sqlite3_memdebug_fail $mf + sqlite3_column_double $::STMT 0 + } {0.0} + + set mallocFailedAfterStep [expr \ + [sqlite3_memdebug_pending] < 0 && !$::mallocFailed + ] + + sqlite3_memdebug_fail -1 + # Test that if a malloc() failed the next call to sqlite3_step() returns + # SQLITE_ERROR. If malloc() did not fail, it should return SQLITE_DONE. + # + do_test ${testid}.5 { + sqlite3_step $::STMT + } [expr {$mallocFailedAfterStep ? "SQLITE_ERROR" : "SQLITE_DONE"}] + + do_test ${testid}.6 { + sqlite3_finalize $::STMT + } [expr {$mallocFailedAfterStep ? "SQLITE_NOMEM" : "SQLITE_OK"}] + + if {$::mallocFailed == 0 && $mallocFailedAfterStep == 0} { + sqlite3_memdebug_fail -1 + set go 0 + } + } +} + +execsql { + CREATE TABLE tbl( + the_first_reasonably_long_column_name that_also_has_quite_a_lengthy_type + ); + INSERT INTO tbl VALUES( + 'An extra long string. Far too long to be stored in NBFS bytes.' + ); +} + +do_stmt_test 1 "SELECT * FROM tbl" + +sqlite3_memdebug_fail -1 +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc5.test b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc5.test new file mode 100644 index 0000000..6bb119b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc5.test @@ -0,0 +1,396 @@ +# 2005 November 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains test cases focused on the two memory-management APIs, +# sqlite3_soft_heap_limit() and sqlite3_release_memory(). +# +# $Id: malloc5.test,v 1.17 2007/10/03 09:43:55 danielk1977 Exp $ + +#--------------------------------------------------------------------------- +# NOTES ON EXPECTED BEHAVIOUR +# +#--------------------------------------------------------------------------- + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +db close + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping malloc5 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +# Skip these tests if OMIT_MEMORY_MANAGEMENT was defined at compile time. +ifcapable !memorymanage { + finish_test + return +} + +sqlite3_soft_heap_limit 0 +sqlite3 db test.db + +do_test malloc5-1.1 { + # Simplest possible test. Call sqlite3_release_memory when there is exactly + # one unused page in a single pager cache. This test case set's the + # value of the ::pgalloc variable, which is used in subsequent tests. + # + # Note: Even though executing this statement on an empty database + # modifies 2 pages (the root of sqlite_master and the new root page), + # the sqlite_master root (page 1) is never freed because the btree layer + # retains a reference to it for the entire transaction. + execsql { + PRAGMA auto_vacuum=OFF; + BEGIN; + CREATE TABLE abc(a, b, c); + } + set ::pgalloc [sqlite3_release_memory] + expr $::pgalloc > 0 +} {1} +do_test malloc5-1.2 { + # Test that the transaction started in the above test is still active. + # Because the page freed had been written to, freeing it required a + # journal sync and exclusive lock on the database file. Test the file + # appears to be locked. + sqlite3 db2 test.db + catchsql { + SELECT * FROM abc; + } db2 +} {1 {database is locked}} +do_test malloc5-1.3 { + # Again call [sqlite3_release_memory] when there is exactly one unused page + # in the cache. The same amount of memory is required, but no journal-sync + # or exclusive lock should be established. + execsql { + COMMIT; + BEGIN; + SELECT * FROM abc; + } + sqlite3_release_memory +} $::pgalloc +do_test malloc5-1.4 { + # Database should not be locked this time. + catchsql { + SELECT * FROM abc; + } db2 +} {0 {}} +do_test malloc5-1.5 { + # Manipulate the cache so that it contains two unused pages. One requires + # a journal-sync to free, the other does not. + db2 close + execsql { + SELECT * FROM abc; + CREATE TABLE def(d, e, f); + } + sqlite3_release_memory 500 +} $::pgalloc +do_test malloc5-1.6 { + # Database should not be locked this time. The above test case only + # requested 500 bytes of memory, which can be obtained by freeing the page + # that does not require an fsync(). + sqlite3 db2 test.db + catchsql { + SELECT * FROM abc; + } db2 +} {0 {}} +do_test malloc5-1.7 { + # Release another 500 bytes of memory. This time we require a sync(), + # so the database file will be locked afterwards. + db2 close + sqlite3_release_memory 500 +} $::pgalloc +do_test malloc5-1.8 { + sqlite3 db2 test.db + catchsql { + SELECT * FROM abc; + } db2 +} {1 {database is locked}} +do_test malloc5-1.9 { + execsql { + COMMIT; + } +} {} + +do_test malloc5-2.1 { + # Put some data in tables abc and def. Both tables are still wholly + # contained within their root pages. + execsql { + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, 6); + INSERT INTO def VALUES(7, 8, 9); + INSERT INTO def VALUES(10,11,12); + } +} {} +do_test malloc5-2.2 { + # Load the root-page for table def into the cache. Then query table abc. + # Halfway through the query call sqlite3_release_memory(). The goal of this + # test is to make sure we don't free pages that are in use (specifically, + # the root of table abc). + set nRelease 0 + execsql { + BEGIN; + SELECT * FROM def; + } + set data [list] + db eval {SELECT * FROM abc} { + incr nRelease [sqlite3_release_memory] + lappend data $a $b $c + } + execsql { + COMMIT; + } + list $nRelease $data +} [list $pgalloc [list 1 2 3 4 5 6]] + +do_test malloc5-3.1 { + # Simple test to show that if two pagers are opened from within this + # thread, memory is freed from both when sqlite3_release_memory() is + # called. + execsql { + BEGIN; + SELECT * FROM abc; + } + execsql { + SELECT * FROM sqlite_master; + BEGIN; + SELECT * FROM def; + } db2 + sqlite3_release_memory +} [expr $::pgalloc * 2] +do_test malloc5-3.2 { + concat \ + [execsql {SELECT * FROM abc; COMMIT}] \ + [execsql {SELECT * FROM def; COMMIT} db2] +} {1 2 3 4 5 6 7 8 9 10 11 12} + +db2 close +puts "Highwater mark: [sqlite3_memory_highwater]" + +# The following two test cases each execute a transaction in which +# 10000 rows are inserted into table abc. The first test case is used +# to ensure that more than 1MB of dynamic memory is used to perform +# the transaction. +# +# The second test case sets the "soft-heap-limit" to 100,000 bytes (0.1 MB) +# and tests to see that this limit is not exceeded at any point during +# transaction execution. +# +# Before executing malloc5-4.* we save the value of the current soft heap +# limit in variable ::soft_limit. The original value is restored after +# running the tests. +# +set ::soft_limit [sqlite3_soft_heap_limit -1] +execsql {PRAGMA cache_size=2000} +do_test malloc5-4.1 { + execsql {BEGIN;} + execsql {DELETE FROM abc;} + for {set i 0} {$i < 10000} {incr i} { + execsql "INSERT INTO abc VALUES($i, $i, '[string repeat X 100]');" + } + execsql {COMMIT;} + set nMaxBytes [sqlite3_memory_highwater 1] + puts -nonewline " (Highwater mark: $nMaxBytes) " + expr $nMaxBytes > 1000000 +} {1} +do_test malloc5-4.2 { + sqlite3_release_memory + sqlite3_soft_heap_limit 100000 + sqlite3_memory_highwater 1 + execsql {BEGIN;} + for {set i 0} {$i < 10000} {incr i} { + execsql "INSERT INTO abc VALUES($i, $i, '[string repeat X 100]');" + } + execsql {COMMIT;} + set nMaxBytes [sqlite3_memory_highwater 1] + puts -nonewline " (Highwater mark: $nMaxBytes) " + + # We used to test ($nMaxBytes<100000), because the soft-heap-limit is + # 100000 bytes. But if an allocation that will exceed the + # soft-heap-limit is requested from within the only pager instance in + # the system, then there is no way to free memory and the limit has to + # be exceeded. An exception is memory allocated to store actual page + # data (the code contains a special case for this). + # + # This is not a problem because all allocations apart from those + # used to store cached page data are both small and transient. + # + # Summary: the actual high-water mark for memory usage may be slightly + # higher than the soft-heap-limit. The specific allocations that cause + # the problem are the calls to sqlite3_malloc() inserted into selected + # sqlite3OsXXX() functions in test builds. + # + expr $nMaxBytes <= 100100 +} {1} +do_test malloc5-4.3 { + # Check that the content of table abc is at least roughly as expected. + execsql { + SELECT count(*), sum(a), sum(b) FROM abc; + } +} [list 20000 [expr int(20000.0 * 4999.5)] [expr int(20000.0 * 4999.5)]] + +# Restore the soft heap limit. +sqlite3_soft_heap_limit $::soft_limit + +# Test that there are no problems calling sqlite3_release_memory when +# there are open in-memory databases. +# +# At one point these tests would cause a seg-fault. +# +do_test malloc5-5.1 { + db close + sqlite3 db :memory: + execsql { + BEGIN; + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES('abcdefghi', 1234567890, NULL); + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + } + sqlite3_release_memory +} 0 +do_test malloc5-5.2 { + sqlite3_soft_heap_limit 5000 + execsql { + COMMIT; + PRAGMA temp_store = memory; + SELECT * FROM abc ORDER BY a; + } + expr 1 +} {1} +sqlite3_soft_heap_limit $::soft_limit + +#------------------------------------------------------------------------- +# The following test cases (malloc5-6.*) test the new global LRU list +# used to determine the pages to recycle when sqlite3_release_memory is +# called and there is more than one pager open. +# +proc nPage {db} { + set bt [btree_from_db $db] + array set stats [btree_pager_stats $bt] + set stats(page) +} +db close +file delete -force test.db test.db-journal test2.db test2.db-journal + +# This block of test-cases (malloc5-6.1.*) prepares two database files +# for the subsequent tests. +do_test malloc5-6.1.1 { + sqlite3 db test.db + execsql { + PRAGMA page_size=1024; + PRAGMA default_cache_size=10; + BEGIN; + CREATE TABLE abc(a PRIMARY KEY, b, c); + INSERT INTO abc VALUES(randstr(50,50), randstr(75,75), randstr(100,100)); + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + COMMIT; + } + copy_file test.db test2.db + sqlite3 db2 test2.db + list \ + [expr ([file size test.db]/1024)>20] [expr ([file size test2.db]/1024)>20] +} {1 1} +do_test malloc5-6.1.2 { + list [execsql {PRAGMA cache_size}] [execsql {PRAGMA cache_size} db2] +} {10 10} + +do_test malloc5-6.2.1 { + execsql { SELECT * FROM abc } db2 + execsql {SELECT * FROM abc} db + list [nPage db] [nPage db2] +} {10 10} +do_test malloc5-6.2.2 { + # If we now try to reclaim some memory, it should come from the db2 cache. + sqlite3_release_memory 3000 + list [nPage db] [nPage db2] +} {10 7} +do_test malloc5-6.2.3 { + # Access the db2 cache again, so that all the db2 pages have been used + # more recently than all the db pages. Then try to reclaim 3000 bytes. + # This time, 3 pages should be pulled from the db cache. + execsql { SELECT * FROM abc } db2 + sqlite3_release_memory 3000 + list [nPage db] [nPage db2] +} {7 10} + + +do_test malloc5-6.3.1 { + # Now open a transaction and update 2 pages in the db2 cache. Then + # do a SELECT on the db cache so that all the db pages are more recently + # used than the db2 pages. When we try to free memory, SQLite should + # free the non-dirty db2 pages, then the db pages, then finally use + # sync() to free up the dirty db2 pages. The only page that cannot be + # freed is page1 of db2. Because there is an open transaction, the + # btree layer holds a reference to page 1 in the db2 cache. + execsql { + BEGIN; + UPDATE abc SET c = randstr(100,100) + WHERE rowid = 1 OR rowid = (SELECT max(rowid) FROM abc); + } db2 + execsql { SELECT * FROM abc } db + list [nPage db] [nPage db2] +} {10 10} +do_test malloc5-6.3.2 { + # Try to release 7700 bytes. This should release all the + # non-dirty pages held by db2. + sqlite3_release_memory [expr 7*1100] + list [nPage db] [nPage db2] +} {10 3} +do_test malloc5-6.3.3 { + # Try to release another 1000 bytes. This should come fromt the db + # cache, since all three pages held by db2 are either in-use or diry. + sqlite3_release_memory 1000 + list [nPage db] [nPage db2] +} {9 3} +do_test malloc5-6.3.4 { + # Now release 9900 more (about 9 pages worth). This should expunge + # the rest of the db cache. But the db2 cache remains intact, because + # SQLite tries to avoid calling sync(). + sqlite3_release_memory 9900 + list [nPage db] [nPage db2] +} {0 3} +do_test malloc5-6.3.5 { + # But if we are really insistent, SQLite will consent to call sync() + # if there is no other option. + sqlite3_release_memory 1000 + list [nPage db] [nPage db2] +} {0 2} +do_test malloc5-6.3.6 { + # The referenced page (page 1 of the db2 cache) will not be freed no + # matter how much memory we ask for: + sqlite3_release_memory 31459 + list [nPage db] [nPage db2] +} {0 1} + +db2 close + +sqlite3_soft_heap_limit $::soft_limit +finish_test +catch {db close} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc6.test b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc6.test new file mode 100644 index 0000000..84ae619 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc6.test @@ -0,0 +1,55 @@ +# 2006 June 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file attempts to check the library in an out-of-memory situation. +# +# $Id: malloc6.test,v 1.4 2007/10/03 08:46:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping malloc6 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} +source $testdir/malloc_common.tcl + + +set sqlite_os_trace 0 +do_malloc_test malloc6-1 -tclprep { + db close +} -tclbody { + if {[catch {sqlite3 db test.db}]} { + error "out of memory" + } + sqlite3_extended_result_codes db 1 +} -sqlbody { + DROP TABLE IF EXISTS t1; + CREATE TABLE IF NOT EXISTS t1( + a int, b float, c double, d text, e varchar(20), + primary key(a,b,c) + ); + CREATE TABLE IF NOT EXISTS t1( + a int, b float, c double, d text, e varchar(20), + primary key(a,b,c) + ); + DROP TABLE IF EXISTS t1; +} + +# Ensure that no file descriptors were leaked. +do_test malloc6-1.X { + catch {db close} + set sqlite_open_file_count +} {0} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc7.test b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc7.test new file mode 100644 index 0000000..5475dfc --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc7.test @@ -0,0 +1,48 @@ +# 2006 July 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file contains additional out-of-memory checks (see malloc.tcl) +# added to expose a bug in out-of-memory handling for sqlite3_prepare16(). +# +# $Id: malloc7.test,v 1.4 2007/09/03 16:12:10 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping malloc7 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} +source $testdir/malloc_common.tcl + + +do_malloc_test malloc7-1 -sqlprep { + CREATE TABLE t1(a,b,c,d); + CREATE INDEX i1 ON t1(b,c); +} -tclbody { + set sql16 [encoding convertto unicode "SELECT * FROM sqlite_master"] + append sql16 "\00\00" + set nbyte [string length $sql16] + set ::STMT [sqlite3_prepare16 db $sql16 $nbyte DUMMY] + sqlite3_finalize $::STMT +} + + +# Ensure that no file descriptors were leaked. +do_test malloc-99.X { + catch {db close} + set sqlite_open_file_count +} {0} + +puts open-file-count=$sqlite_open_file_count +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc8.test b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc8.test new file mode 100644 index 0000000..55b1b49 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc8.test @@ -0,0 +1,95 @@ +# 2007 April 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file contains additional out-of-memory checks (see malloc.tcl) +# added to expose a bug in out-of-memory handling for sqlite3_value_text() +# +# $Id: malloc8.test,v 1.6 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping malloc8 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +source $testdir/malloc_common.tcl + +# The setup is a database with UTF-16 encoding that contains a single +# large string. We will be running lots of queries against this +# database. Because we will be extracting the string as UTF-8, there +# is a type conversion that occurs and thus an opportunity for malloc() +# to fail and for sqlite3_value_text() to return 0 even though +# sqlite3_value_type() returns SQLITE_TEXT. +# + +do_malloc_test malloc8-1 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); +} -sqlbody { + SELECT lower(a), upper(a), quote(a), trim(a), trim('x',a) FROM t1; +} +do_malloc_test malloc8-2 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); +} -sqlbody { + SELECT replace(a,'x','y'), replace('x',a,'y'), replace('x','y',a) + FROM t1; +} +do_malloc_test malloc8-3 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); +} -sqlbody { + SELECT length(a), substr(a, 4, 4) FROM t1; +} +ifcapable datetime { + do_malloc_test malloc8-4 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); + } -sqlbody { + SELECT julianday(a,a) FROM t1; + } +} +do_malloc_test malloc8-5 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); +} -sqlbody { + SELECT 1 FROM t1 WHERE a LIKE 'hello' ESCAPE NULL; +} +do_malloc_test malloc8-6 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); +} -sqlbody { + SELECT hex(randomblob(100)); +} + +# Ensure that no file descriptors were leaked. +do_test malloc-99.X { + catch {db close} + set sqlite_open_file_count +} {0} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc9.test b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc9.test new file mode 100644 index 0000000..d9a5afc --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc9.test @@ -0,0 +1,51 @@ +# 2007 April 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file contains additional out-of-memory checks (see malloc.tcl) +# added to expose a bug in out-of-memory handling for sqlite3_prepare(). +# +# $Id: malloc9.test,v 1.3 2007/09/03 16:12:10 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping malloc9 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +source $testdir/malloc_common.tcl + +do_malloc_test 1 -tclprep { + set sql {CREATE TABLE t1(x)} + set sqlbytes [string length $sql] + append sql {; INSERT INTO t1 VALUES(1)} +} -tclbody { + if {[catch {sqlite3_prepare db $sql $sqlbytes TAIL} STMT]} { + set msg $STMT + set STMT {} + error $msg + } +} -cleanup { + if {$STMT!=""} { + sqlite3_finalize $STMT + } +} + +# Ensure that no file descriptors were leaked. +do_test malloc-99.X { + catch {db close} + set sqlite_open_file_count +} {0} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/mallocA.test b/libraries/sqlite/unix/sqlite-3.5.1/test/mallocA.test new file mode 100644 index 0000000..8c7705f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/mallocA.test @@ -0,0 +1,69 @@ +# 2007 April 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file contains additional out-of-memory checks (see malloc.tcl). +# +# $Id: mallocA.test,v 1.6 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping mallocA tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +source $testdir/malloc_common.tcl + +# Construct a test database +# +file delete -force test.db.bu +db eval { + CREATE TABLE t1(a COLLATE NOCASE,b,c); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(1,2,4); + INSERT INTO t1 VALUES(2,3,4); + CREATE INDEX t1i1 ON t1(a); + CREATE INDEX t1i2 ON t1(b,c); + CREATE TABLE t2(x,y,z); +} +db close +file copy test.db test.db.bu + + +do_malloc_test mallocA-1 -testdb test.db.bu -sqlbody { + ANALYZE +} +ifcapable reindex { + do_malloc_test mallocA-2 -testdb test.db.bu -sqlbody { + REINDEX; + } + do_malloc_test mallocA-3 -testdb test.db.bu -sqlbody { + REINDEX t1; + } + do_malloc_test mallocA-4 -testdb test.db.bu -sqlbody { + REINDEX main.t1; + } + do_malloc_test mallocA-5 -testdb test.db.bu -sqlbody { + REINDEX nocase; + } +} + +# Ensure that no file descriptors were leaked. +do_test malloc-99.X { + catch {db close} + set sqlite_open_file_count +} {0} + +file delete -force test.db.bu +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/mallocB.test b/libraries/sqlite/unix/sqlite-3.5.1/test/mallocB.test new file mode 100644 index 0000000..e56c8b1 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/mallocB.test @@ -0,0 +1,47 @@ +# 2007 May 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file contains additional out-of-memory checks (see malloc.tcl). +# These were all discovered by fuzzy generation of SQL. Apart from +# that they have little in common. +# +# +# $Id: mallocB.test,v 1.6 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping mallocB tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} +source $testdir/malloc_common.tcl + +do_malloc_test mallocB-1 -sqlbody {SELECT - 456} +do_malloc_test mallocB-2 -sqlbody {SELECT - 456.1} +do_malloc_test mallocB-3 -sqlbody {SELECT random()} +do_malloc_test mallocB-4 -sqlbody {SELECT zeroblob(1000)} +ifcapable subquery { + do_malloc_test mallocB-5 -sqlbody {SELECT * FROM (SELECT 1) GROUP BY 1;} +} + +# The following test checks that there are no resource leaks following a +# malloc() failure in sqlite3_set_auxdata(). +# +# Note: This problem was not discovered by fuzzy generation of SQL. Not +# that it really matters. +# +do_malloc_test mallocB-6 -sqlbody { SELECT test_auxdata('hello world'); } + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/mallocC.test b/libraries/sqlite/unix/sqlite-3.5.1/test/mallocC.test new file mode 100644 index 0000000..54e4c5f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/mallocC.test @@ -0,0 +1,134 @@ +# 2007 Aug 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file tests aspects of the malloc failure while parsing +# CREATE TABLE statements in auto_vacuum mode. +# +# $Id: mallocC.test,v 1.7 2007/10/03 08:46:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug||!compound { + puts "Skipping mallocC tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +# Generate a checksum based on the contents of the database. If the +# checksum of two databases is the same, and the integrity-check passes +# for both, the two databases are identical. +# +proc cksum {db} { + set ret [list] + ifcapable tempdb { + set sql { + SELECT name FROM sqlite_master WHERE type = 'table' UNION + SELECT name FROM sqlite_temp_master WHERE type = 'table' UNION + SELECT 'sqlite_master' UNION + SELECT 'sqlite_temp_master' + } + } else { + set sql { + SELECT name FROM sqlite_master WHERE type = 'table' UNION + SELECT 'sqlite_master' + } + } + set tbllist [$db eval $sql] + set txt {} + foreach tbl $tbllist { + append txt [$db eval "SELECT * FROM $tbl"] + } + # puts txt=$txt + return [md5 $txt] +} + +proc do_mallocC_test {tn args} { + array set ::mallocopts $args + set sum [cksum db] + + for {set ::n 1} {true} {incr ::n} { + + # Run the SQL. Malloc number $::n is set to fail. A malloc() failure + # may or may not be reported. + sqlite3_memdebug_fail $::n -repeat 1 + do_test mallocC-$tn.$::n.1 { + set res [catchsql [string trim $::mallocopts(-sql)]] + set rc [expr { + 0==[string compare $res {1 {out of memory}}] || + [db errorcode] == 3082 || + 0==[lindex $res 0] + }] + if {$rc!=1} { + puts "Error: $res" + } + set rc + } {1} + + # If $::n is greater than the number of malloc() calls required to + # execute the SQL, then this test is finished. Break out of the loop. + set nFail [sqlite3_memdebug_fail -1] + if {$nFail==0} { + break + } + + # Recover from the malloc failure. + # + # Update: The new malloc() failure handling means that a transaction may + # still be active even if a malloc() has failed. But when these tests were + # written this was not the case. So do a manual ROLLBACK here so that the + # tests pass. + do_test mallocC-$tn.$::n.2 { + catch { + execsql { + ROLLBACK; + } + } + expr 0 + } {0} + + # Checksum the database. + #do_test mallocC-$tn.$::n.3 { + # cksum db + #} $sum + + #integrity_check mallocC-$tn.$::n.4 + if {$::nErr>1} return + } + unset ::mallocopts +} + +sqlite3_extended_result_codes db 1 + +execsql { + PRAGMA auto_vacuum=1; + CREATE TABLE t0(a, b, c); +} +do_mallocC_test 1 -sql { + BEGIN; + -- Allocate 32 new root pages. This will exercise the 'extract specific + -- page from the freelist' code when in auto-vacuum mode (see the + -- allocatePage() routine in btree.c). + CREATE TABLE t1(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t2(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t3(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t4(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t5(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t6(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t7(a UNIQUE, b UNIQUE, c UNIQUE); + CREATE TABLE t8(a UNIQUE, b UNIQUE, c UNIQUE); + + ROLLBACK; +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/mallocD.test b/libraries/sqlite/unix/sqlite-3.5.1/test/mallocD.test new file mode 100644 index 0000000..f816bdc --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/mallocD.test @@ -0,0 +1,61 @@ +# 2007 Aug 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: mallocD.test,v 1.3 2007/09/03 17:02:50 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping mallocD tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} +source $testdir/malloc_common.tcl + +sqlite3_simulate_device -char atomic + +set PREP { + PRAGMA page_size = 1024; + CREATE TABLE abc(a, b, c); +} + +do_malloc_test mallocD-1 -sqlprep $PREP -sqlbody { + INSERT INTO abc VALUES(1, 2, 3); +} + +do_malloc_test mallocD-2 -sqlprep $PREP -sqlbody { + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, 6); + ROLLBACK; +} + +do_malloc_test mallocD-3 -sqlprep $PREP -sqlbody { + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, randstr(1500,1500)); + COMMIT; +} + +do_malloc_test mallocD-4 -sqlprep $PREP -sqlbody { + ATTACH 'test2.db' AS aux; + BEGIN; + CREATE TABLE aux.def(d, e, f); + INSERT INTO abc VALUES(4, 5, 6); + COMMIT; +} + +sqlite3_simulate_device -char {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/malloc_common.tcl b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc_common.tcl new file mode 100644 index 0000000..a055013 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/malloc_common.tcl @@ -0,0 +1,156 @@ +# 2007 May 05 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains common code used by many different malloc tests +# within the test suite. +# +# $Id: malloc_common.tcl,v 1.9 2007/10/03 08:46:45 danielk1977 Exp $ + +# If we did not compile with malloc testing enabled, then do nothing. +# +ifcapable !memdebug { + return 0 +} + +# Usage: do_malloc_test +# +# The first argument, , is an integer used to name the +# tests executed by this proc. Options are as follows: +# +# -tclprep TCL script to run to prepare test. +# -sqlprep SQL script to run to prepare test. +# -tclbody TCL script to run with malloc failure simulation. +# -sqlbody TCL script to run with malloc failure simulation. +# -cleanup TCL script to run after the test. +# +# This command runs a series of tests to verify SQLite's ability +# to handle an out-of-memory condition gracefully. It is assumed +# that if this condition occurs a malloc() call will return a +# NULL pointer. Linux, for example, doesn't do that by default. See +# the "BUGS" section of malloc(3). +# +# Each iteration of a loop, the TCL commands in any argument passed +# to the -tclbody switch, followed by the SQL commands in any argument +# passed to the -sqlbody switch are executed. Each iteration the +# Nth call to sqliteMalloc() is made to fail, where N is increased +# each time the loop runs starting from 1. When all commands execute +# successfully, the loop ends. +# +proc do_malloc_test {tn args} { + array unset ::mallocopts + array set ::mallocopts $args + + if {[string is integer $tn]} { + set tn malloc-$tn + } + if {[info exists ::mallocopts(-start)]} { + set start $::mallocopts(-start) + } else { + set start 0 + } + + foreach ::iRepeat {0 1} { + set ::go 1 + for {set ::n $start} {$::go && $::n < 50000} {incr ::n} { + + # If $::iRepeat is 0, then the malloc() failure is transient - it + # fails and then subsequent calls succeed. If $::iRepeat is 1, + # then the failure is persistent - once malloc() fails it keeps + # failing. + # + set zRepeat "transient" + if {$::iRepeat} {set zRepeat "persistent"} + + do_test ${tn}.${zRepeat}.${::n} { + + # Remove all traces of database files test.db and test2.db + # from the file-system. Then open (empty database) "test.db" + # with the handle [db]. + # + catch {db close} + catch {file delete -force test.db} + catch {file delete -force test.db-journal} + catch {file delete -force test2.db} + catch {file delete -force test2.db-journal} + if {[info exists ::mallocopts(-testdb)]} { + file copy $::mallocopts(-testdb) test.db + } + catch { sqlite3 db test.db } + if {[info commands db] ne ""} { + sqlite3_extended_result_codes db 1 + } + + # Execute any -tclprep and -sqlprep scripts. + # + if {[info exists ::mallocopts(-tclprep)]} { + eval $::mallocopts(-tclprep) + } + if {[info exists ::mallocopts(-sqlprep)]} { + execsql $::mallocopts(-sqlprep) + } + + # Now set the ${::n}th malloc() to fail and execute the -tclbody + # and -sqlbody scripts. + # + sqlite3_memdebug_fail $::n -repeat $::iRepeat + set ::mallocbody {} + if {[info exists ::mallocopts(-tclbody)]} { + append ::mallocbody "$::mallocopts(-tclbody)\n" + } + if {[info exists ::mallocopts(-sqlbody)]} { + append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" + } + + # The following block sets local variables as follows: + # + # isFail - True if an error (any error) was reported by sqlite. + # nFail - The total number of simulated malloc() failures. + # nBenign - The number of benign simulated malloc() failures. + # + set isFail [catch $::mallocbody msg] + set nFail [sqlite3_memdebug_fail -1 -benigncnt nBenign] + + # If one or more mallocs failed, run this loop body again. + # + set go [expr {$nFail>0}] + + if {($nFail-$nBenign)==0} { + if {$isFail} { + set v2 $msg + } else { + set isFail 1 + set v2 1 + } + } elseif {!$isFail} { + set v2 $msg + } elseif { + [info command db]=="" || + [db errorcode]==7 || + [db errorcode]==[expr 10+(12<<8)] || + $msg=="out of memory" + } { + set v2 1 + } else { + set v2 $msg + breakpoint + puts [db errorcode] + } + lappend isFail $v2 + } {1 1} + + if {[info exists ::mallocopts(-cleanup)]} { + catch [list uplevel #0 $::mallocopts(-cleanup)] msg + } + } + } + unset ::mallocopts + sqlite3_memdebug_fail -1 +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/manydb.test b/libraries/sqlite/unix/sqlite-3.5.1/test/manydb.test new file mode 100644 index 0000000..9af5465 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/manydb.test @@ -0,0 +1,91 @@ +# 2005 October 3 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests the ability of the library to open +# many different databases at the same time without leaking memory. +# +# $Id: manydb.test,v 1.3 2006/01/11 01:08:34 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +set N 300 + +# First test how many file descriptors are available for use. To open a +# database for writing SQLite requires 3 file descriptors (the database, the +# journal and the directory). +set filehandles {} +catch { + for {set i 0} {$i<($N * 3)} {incr i} { + lappend filehandles [open testfile.1 w] + } +} +foreach fd $filehandles { + close $fd +} +catch { + file delete -force testfile.1 +} +set N [expr $i / 3] + +# Create a bunch of random database names +# +unset -nocomplain dbname +unset -nocomplain used +for {set i 0} {$i<$N} {incr i} { + while 1 { + set name test-[format %08x [expr {int(rand()*0x7fffffff)}]].db + if {[info exists used($name)]} continue + set dbname($i) $name + set used($name) $i + break + } +} + +# Create a bunch of databases +# +for {set i 0} {$i<$N} {incr i} { + do_test manydb-1.$i { + sqlite3 db$i $dbname($i) + execsql { + CREATE TABLE t1(a,b); + BEGIN; + INSERT INTO t1 VALUES(1,2); + } db$i + } {} +} + +# Finish the transactions +# +for {set i 0} {$i<$N} {incr i} { + do_test manydb-2.$i { + execsql { + COMMIT; + SELECT * FROM t1; + } db$i + } {1 2} +} + + +# Close the databases and erase the files. +# +for {set i 0} {$i<$N} {incr i} { + do_test manydb-3.$i { + db$i close + file delete -force $dbname($i) + } {} +} + + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/memdb.test b/libraries/sqlite/unix/sqlite-3.5.1/test/memdb.test new file mode 100644 index 0000000..c1eb115 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/memdb.test @@ -0,0 +1,417 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is in-memory database backend. +# +# $Id: memdb.test,v 1.15 2006/01/30 22:48:44 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable memorydb { + +# In the following sequence of tests, compute the MD5 sum of the content +# of a table, make lots of modifications to that table, then do a rollback. +# Verify that after the rollback, the MD5 checksum is unchanged. +# +# These tests were browed from trans.tcl. +# +do_test memdb-1.1 { + db close + sqlite3 db :memory: + # sqlite3 db test.db + execsql { + BEGIN; + CREATE TABLE t3(x TEXT); + INSERT INTO t3 VALUES(randstr(10,400)); + INSERT INTO t3 VALUES(randstr(10,400)); + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + COMMIT; + SELECT count(*) FROM t3; + } +} {1024} + +# The following procedure computes a "signature" for table "t3". If +# T3 changes in any way, the signature should change. +# +# This is used to test ROLLBACK. We gather a signature for t3, then +# make lots of changes to t3, then rollback and take another signature. +# The two signatures should be the same. +# +proc signature {{fn {}}} { + set rx [db eval {SELECT x FROM t3}] + # set r1 [md5 $rx\n] + if {$fn!=""} { + # set fd [open $fn w] + # puts $fd $rx + # close $fd + } + # set r [db eval {SELECT count(*), md5sum(x) FROM t3}] + # puts "SIG($fn)=$r1" + return [list [string length $rx] $rx] +} + +# Do rollbacks. Make sure the signature does not change. +# +set limit 10 +for {set i 2} {$i<=$limit} {incr i} { + set ::sig [signature one] + # puts "sig=$sig" + set cnt [lindex $::sig 0] + if {$i%2==0} { + execsql {PRAGMA synchronous=FULL} + } else { + execsql {PRAGMA synchronous=NORMAL} + } + do_test memdb-1.$i.1-$cnt { + execsql { + BEGIN; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + ROLLBACK; + } + set sig2 [signature two] + } $sig + # puts "sig2=$sig2" + # if {$sig2!=$sig} exit + do_test memdb-1.$i.2-$cnt { + execsql { + BEGIN; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + ROLLBACK; + } + signature + } $sig + if {$i<$limit} { + do_test memdb-1.$i.9-$cnt { + execsql { + INSERT INTO t3 SELECT randstr(10,400) FROM t3 WHERE random()%10==0; + } + } {} + } + set ::pager_old_format 0 +} + +integrity_check memdb-2.1 + +do_test memdb-3.1 { + execsql { + CREATE TABLE t4(a,b,c,d); + BEGIN; + INSERT INTO t4 VALUES(1,2,3,4); + SELECT * FROM t4; + } +} {1 2 3 4} +do_test memdb-3.2 { + execsql { + SELECT name FROM sqlite_master WHERE type='table'; + } +} {t3 t4} +do_test memdb-3.3 { + execsql { + DROP TABLE t4; + SELECT name FROM sqlite_master WHERE type='table'; + } +} {t3} +do_test memdb-3.4 { + execsql { + ROLLBACK; + SELECT name FROM sqlite_master WHERE type='table'; + } +} {t3 t4} + +# Create tables for the first group of tests. +# +do_test memdb-4.0 { + execsql { + CREATE TABLE t1(a, b, c, UNIQUE(a,b)); + CREATE TABLE t2(x); + SELECT c FROM t1 ORDER BY c; + } +} {} + +# Six columns of configuration data as follows: +# +# i The reference number of the test +# conf The conflict resolution algorithm on the BEGIN statement +# cmd An INSERT or REPLACE command to execute against table t1 +# t0 True if there is an error from $cmd +# t1 Content of "c" column of t1 assuming no error in $cmd +# t2 Content of "x" column of t2 +# +foreach {i conf cmd t0 t1 t2} { + 1 {} INSERT 1 {} 1 + 2 {} {INSERT OR IGNORE} 0 3 1 + 3 {} {INSERT OR REPLACE} 0 4 1 + 4 {} REPLACE 0 4 1 + 5 {} {INSERT OR FAIL} 1 {} 1 + 6 {} {INSERT OR ABORT} 1 {} 1 + 7 {} {INSERT OR ROLLBACK} 1 {} {} +} { + + # All tests after test 1 depend on conflict resolution. So end the + # loop if that is not available in this build. + ifcapable !conflict {if {$i>1} break} + + do_test memdb-4.$i { + if {$conf!=""} {set conf "ON CONFLICT $conf"} + set r0 [catch {execsql [subst { + DELETE FROM t1; + DELETE FROM t2; + INSERT INTO t1 VALUES(1,2,3); + BEGIN $conf; + INSERT INTO t2 VALUES(1); + $cmd INTO t1 VALUES(1,2,4); + }]} r1] + catch {execsql {COMMIT}} + if {$r0} {set r1 {}} {set r1 [execsql {SELECT c FROM t1}]} + set r2 [execsql {SELECT x FROM t2}] + list $r0 $r1 $r2 + } [list $t0 $t1 $t2] +} + +do_test memdb-5.0 { + execsql { + DROP TABLE t2; + DROP TABLE t3; + CREATE TABLE t2(a,b,c); + INSERT INTO t2 VALUES(1,2,1); + INSERT INTO t2 VALUES(2,3,2); + INSERT INTO t2 VALUES(3,4,1); + INSERT INTO t2 VALUES(4,5,4); + SELECT c FROM t2 ORDER BY b; + CREATE TABLE t3(x); + INSERT INTO t3 VALUES(1); + } +} {1 2 1 4} + +# Six columns of configuration data as follows: +# +# i The reference number of the test +# conf1 The conflict resolution algorithm on the UNIQUE constraint +# conf2 The conflict resolution algorithm on the BEGIN statement +# cmd An UPDATE command to execute against table t1 +# t0 True if there is an error from $cmd +# t1 Content of "b" column of t1 assuming no error in $cmd +# t2 Content of "x" column of t3 +# +foreach {i conf1 conf2 cmd t0 t1 t2} { + 1 {} {} UPDATE 1 {6 7 8 9} 1 + 2 REPLACE {} UPDATE 0 {7 6 9} 1 + 3 IGNORE {} UPDATE 0 {6 7 3 9} 1 + 4 FAIL {} UPDATE 1 {6 7 3 4} 1 + 5 ABORT {} UPDATE 1 {1 2 3 4} 1 + 6 ROLLBACK {} UPDATE 1 {1 2 3 4} 0 + 7 REPLACE {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1 + 8 IGNORE {} {UPDATE OR REPLACE} 0 {7 6 9} 1 + 9 FAIL {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1 + 10 ABORT {} {UPDATE OR REPLACE} 0 {7 6 9} 1 + 11 ROLLBACK {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1 + 12 {} {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1 + 13 {} {} {UPDATE OR REPLACE} 0 {7 6 9} 1 + 14 {} {} {UPDATE OR FAIL} 1 {6 7 3 4} 1 + 15 {} {} {UPDATE OR ABORT} 1 {1 2 3 4} 1 + 16 {} {} {UPDATE OR ROLLBACK} 1 {1 2 3 4} 0 +} { + # All tests after test 1 depend on conflict resolution. So end the + # loop if that is not available in this build. + ifcapable !conflict { + if {$i>1} break + } + + if {$t0} {set t1 {column a is not unique}} + do_test memdb-5.$i { + if {$conf1!=""} {set conf1 "ON CONFLICT $conf1"} + if {$conf2!=""} {set conf2 "ON CONFLICT $conf2"} + set r0 [catch {execsql [subst { + DROP TABLE t1; + CREATE TABLE t1(a,b,c, UNIQUE(a) $conf1); + INSERT INTO t1 SELECT * FROM t2; + UPDATE t3 SET x=0; + BEGIN $conf2; + $cmd t3 SET x=1; + $cmd t1 SET b=b*2; + $cmd t1 SET a=c+5; + }]} r1] + catch {execsql {COMMIT}} + if {!$r0} {set r1 [execsql {SELECT a FROM t1 ORDER BY b}]} + set r2 [execsql {SELECT x FROM t3}] + list $r0 $r1 $r2 + } [list $t0 $t1 $t2] +} + +do_test memdb-6.1 { + execsql { + SELECT * FROM t2; + } +} {1 2 1 2 3 2 3 4 1 4 5 4} +do_test memdb-6.2 { + execsql { + BEGIN; + DROP TABLE t2; + SELECT name FROM sqlite_master WHERE type='table' ORDER BY 1; + } +} {t1 t3 t4} +do_test memdb-6.3 { + execsql { + ROLLBACK; + SELECT name FROM sqlite_master WHERE type='table' ORDER BY 1; + } +} {t1 t2 t3 t4} +do_test memdb-6.4 { + execsql { + SELECT * FROM t2; + } +} {1 2 1 2 3 2 3 4 1 4 5 4} +ifcapable compound { +do_test memdb-6.5 { + execsql { + SELECT a FROM t2 UNION SELECT b FROM t2 ORDER BY 1; + } +} {1 2 3 4 5} +} ;# ifcapable compound +do_test memdb-6.6 { + execsql { + CREATE INDEX i2 ON t2(c); + SELECT a FROM t2 ORDER BY c; + } +} {1 3 2 4} +do_test memdb-6.6 { + execsql { + SELECT a FROM t2 ORDER BY c DESC; + } +} {4 2 3 1} +do_test memdb-6.7 { + execsql { + BEGIN; + CREATE TABLE t5(x,y); + INSERT INTO t5 VALUES(1,2); + SELECT * FROM t5; + } +} {1 2} +do_test memdb-6.8 { + execsql { + SELECT name FROM sqlite_master WHERE type='table' ORDER BY 1; + } +} {t1 t2 t3 t4 t5} +do_test memdb-6.9 { + execsql { + ROLLBACK; + SELECT name FROM sqlite_master WHERE type='table' ORDER BY 1; + } +} {t1 t2 t3 t4} +do_test memdb-6.10 { + execsql { + CREATE TABLE t5(x PRIMARY KEY, y UNIQUE); + SELECT * FROM t5; + } +} {} +do_test memdb-6.11 { + execsql { + SELECT * FROM t5 ORDER BY y DESC; + } +} {} + +ifcapable conflict { + do_test memdb-6.12 { + execsql { + INSERT INTO t5 VALUES(1,2); + INSERT INTO t5 VALUES(3,4); + REPLACE INTO t5 VALUES(1,4); + SELECT rowid,* FROM t5; + } + } {3 1 4} + do_test memdb-6.13 { + execsql { + DELETE FROM t5 WHERE x>5; + SELECT * FROM t5; + } + } {1 4} + do_test memdb-6.14 { + execsql { + DELETE FROM t5 WHERE y<3; + SELECT * FROM t5; + } + } {1 4} +} + +do_test memdb-6.15 { + execsql { + DELETE FROM t5 WHERE x>0; + SELECT * FROM t5; + } +} {} + +ifcapable subquery { + do_test memdb-7.1 { + execsql { + CREATE TABLE t6(x); + INSERT INTO t6 VALUES(1); + INSERT INTO t6 SELECT x+1 FROM t6; + INSERT INTO t6 SELECT x+2 FROM t6; + INSERT INTO t6 SELECT x+4 FROM t6; + INSERT INTO t6 SELECT x+8 FROM t6; + INSERT INTO t6 SELECT x+16 FROM t6; + INSERT INTO t6 SELECT x+32 FROM t6; + INSERT INTO t6 SELECT x+64 FROM t6; + INSERT INTO t6 SELECT x+128 FROM t6; + SELECT count(*) FROM (SELECT DISTINCT x FROM t6); + } + } {256} + for {set i 1} {$i<=256} {incr i} { + do_test memdb-7.2.$i { + execsql "DELETE FROM t6 WHERE x=\ + (SELECT x FROM t6 ORDER BY random() LIMIT 1)" + execsql {SELECT count(*) FROM t6} + } [expr {256-$i}] + } +} + +# Ticket #1524 +# +do_test memdb-8.1 { + db close + sqlite3 db {:memory:} + execsql { + PRAGMA auto_vacuum=TRUE; + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(randstr(5000,6000)); + INSERT INTO t1 VALUES(randstr(5000,6000)); + INSERT INTO t1 VALUES(randstr(5000,6000)); + INSERT INTO t1 VALUES(randstr(5000,6000)); + INSERT INTO t1 VALUES(randstr(5000,6000)); + SELECT count(*) FROM t1; + } +} 5 +do_test memdb-8.2 { + execsql { + DELETE FROM t1; + SELECT count(*) FROM t1; + } +} 0 + + +} ;# ifcapable memorydb + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/memleak.test b/libraries/sqlite/unix/sqlite-3.5.1/test/memleak.test new file mode 100644 index 0000000..f4aaf27 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/memleak.test @@ -0,0 +1,98 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all tests. +# +# $Id: memleak.test,v 1.10 2007/03/30 17:17:52 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +rename finish_test memleak_finish_test +proc finish_test {} { + catch {db close} + memleak_check +} + +if {[file exists ./sqlite_test_count]} { + set COUNT [exec cat ./sqlite_test_count] +} else { + set COUNT 3 +} + +# LeakList will hold a list of the number of unfreed mallocs after +# each round of the test. This number should be constant. If it +# grows, it may mean there is a memory leak in the library. +# +set LeakList {} + +set EXCLUDE { + all.test + quick.test + misuse.test + memleak.test + btree2.test + async.test + async2.test + trans.test + crash.test + autovacuum_crash.test +} +# Test files btree2.test and btree4.test don't work if the +# SQLITE_DEFAULT_AUTOVACUUM macro is defined to true (because they depend +# on tables being allocated starting at page 2). +# +ifcapable default_autovacuum { + lappend EXCLUDE btree2.test + lappend EXCLUDE btree4.test +} + +if {[sqlite3 -has-codec]} { + # lappend EXCLUDE +} +if {[llength $argv]>0} { + set FILELIST $argv + set argv {} +} else { + set FILELIST [lsort -dictionary [glob $testdir/*.test]] +} + +foreach testfile $FILELIST { + set tail [file tail $testfile] + if {[lsearch -exact $EXCLUDE $tail]>=0} continue + set LeakList {} + for {set COUNTER 0} {$COUNTER<$COUNT} {incr COUNTER} { + source $testfile + if {[info exists Leak]} { + lappend LeakList $Leak + } + } + if {$LeakList!=""} { + puts -nonewline memory-leak-test-$tail... + incr ::nTest + foreach x $LeakList { + if {$x!=[lindex $LeakList 0]} { + puts " failed! ($LeakList)" + incr ::nErr + lappend ::failList memory-leak-test-$tail + break + } + } + puts " Ok" + } +} +memleak_finish_test + +# Run the malloc tests and the misuse test after memory leak detection. +# Both tests leak memory. +# +#catch {source $testdir/misuse.test} +#catch {source $testdir/malloc.test} + +memleak_finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/minmax.test b/libraries/sqlite/unix/sqlite-3.5.1/test/minmax.test new file mode 100644 index 0000000..074f6df --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/minmax.test @@ -0,0 +1,384 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing SELECT statements that contain +# aggregate min() and max() functions and which are handled as +# as a special case. +# +# $Id: minmax.test,v 1.19 2006/03/26 01:21:23 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test minmax-1.0 { + execsql { + BEGIN; + CREATE TABLE t1(x, y); + INSERT INTO t1 VALUES(1,1); + INSERT INTO t1 VALUES(2,2); + INSERT INTO t1 VALUES(3,2); + INSERT INTO t1 VALUES(4,3); + INSERT INTO t1 VALUES(5,3); + INSERT INTO t1 VALUES(6,3); + INSERT INTO t1 VALUES(7,3); + INSERT INTO t1 VALUES(8,4); + INSERT INTO t1 VALUES(9,4); + INSERT INTO t1 VALUES(10,4); + INSERT INTO t1 VALUES(11,4); + INSERT INTO t1 VALUES(12,4); + INSERT INTO t1 VALUES(13,4); + INSERT INTO t1 VALUES(14,4); + INSERT INTO t1 VALUES(15,4); + INSERT INTO t1 VALUES(16,5); + INSERT INTO t1 VALUES(17,5); + INSERT INTO t1 VALUES(18,5); + INSERT INTO t1 VALUES(19,5); + INSERT INTO t1 VALUES(20,5); + COMMIT; + SELECT DISTINCT y FROM t1 ORDER BY y; + } +} {1 2 3 4 5} + +do_test minmax-1.1 { + set sqlite_search_count 0 + execsql {SELECT min(x) FROM t1} +} {1} +do_test minmax-1.2 { + set sqlite_search_count +} {19} +do_test minmax-1.3 { + set sqlite_search_count 0 + execsql {SELECT max(x) FROM t1} +} {20} +do_test minmax-1.4 { + set sqlite_search_count +} {19} +do_test minmax-1.5 { + execsql {CREATE INDEX t1i1 ON t1(x)} + set sqlite_search_count 0 + execsql {SELECT min(x) FROM t1} +} {1} +do_test minmax-1.6 { + set sqlite_search_count +} {2} +do_test minmax-1.7 { + set sqlite_search_count 0 + execsql {SELECT max(x) FROM t1} +} {20} +do_test minmax-1.8 { + set sqlite_search_count +} {1} +do_test minmax-1.9 { + set sqlite_search_count 0 + execsql {SELECT max(y) FROM t1} +} {5} +do_test minmax-1.10 { + set sqlite_search_count +} {19} + +do_test minmax-2.0 { + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b); + INSERT INTO t2 SELECT * FROM t1; + } + set sqlite_search_count 0 + execsql {SELECT min(a) FROM t2} +} {1} +do_test minmax-2.1 { + set sqlite_search_count +} {0} +do_test minmax-2.2 { + set sqlite_search_count 0 + execsql {SELECT max(a) FROM t2} +} {20} +do_test minmax-2.3 { + set sqlite_search_count +} {0} + +do_test minmax-3.0 { + ifcapable subquery { + execsql {INSERT INTO t2 VALUES((SELECT max(a) FROM t2)+1,999)} + } else { + db function max_a_t2 {execsql {SELECT max(a) FROM t2}} + execsql {INSERT INTO t2 VALUES(max_a_t2()+1,999)} + } + set sqlite_search_count 0 + execsql {SELECT max(a) FROM t2} +} {21} +do_test minmax-3.1 { + set sqlite_search_count +} {0} +do_test minmax-3.2 { + ifcapable subquery { + execsql {INSERT INTO t2 VALUES((SELECT max(a) FROM t2)+1,999)} + } else { + db function max_a_t2 {execsql {SELECT max(a) FROM t2}} + execsql {INSERT INTO t2 VALUES(max_a_t2()+1,999)} + } + set sqlite_search_count 0 + ifcapable subquery { + execsql { SELECT b FROM t2 WHERE a=(SELECT max(a) FROM t2) } + } else { + execsql { SELECT b FROM t2 WHERE a=max_a_t2() } + } +} {999} +do_test minmax-3.3 { + set sqlite_search_count +} {0} + +ifcapable {compound && subquery} { + do_test minmax-4.1 { + execsql { + SELECT coalesce(min(x+0),-1), coalesce(max(x+0),-1) FROM + (SELECT * FROM t1 UNION SELECT NULL as 'x', NULL as 'y') + } + } {1 20} + do_test minmax-4.2 { + execsql { + SELECT y, coalesce(sum(x),0) FROM + (SELECT null AS x, y+1 AS y FROM t1 UNION SELECT * FROM t1) + GROUP BY y ORDER BY y; + } + } {1 1 2 5 3 22 4 92 5 90 6 0} + do_test minmax-4.3 { + execsql { + SELECT y, count(x), count(*) FROM + (SELECT null AS x, y+1 AS y FROM t1 UNION SELECT * FROM t1) + GROUP BY y ORDER BY y; + } + } {1 1 1 2 2 3 3 4 5 4 8 9 5 5 6 6 0 1} +} ;# ifcapable compound + +# Make sure the min(x) and max(x) optimizations work on empty tables +# including empty tables with indices. Ticket #296. +# +do_test minmax-5.1 { + execsql { + CREATE TABLE t3(x INTEGER UNIQUE NOT NULL); + SELECT coalesce(min(x),999) FROM t3; + } +} {999} +do_test minmax-5.2 { + execsql { + SELECT coalesce(min(rowid),999) FROM t3; + } +} {999} +do_test minmax-5.3 { + execsql { + SELECT coalesce(max(x),999) FROM t3; + } +} {999} +do_test minmax-5.4 { + execsql { + SELECT coalesce(max(rowid),999) FROM t3; + } +} {999} +do_test minmax-5.5 { + execsql { + SELECT coalesce(max(rowid),999) FROM t3 WHERE rowid<25; + } +} {999} + +# Make sure the min(x) and max(x) optimizations work when there +# is a LIMIT clause. Ticket #396. +# +do_test minmax-6.1 { + execsql { + SELECT min(a) FROM t2 LIMIT 1 + } +} {1} +do_test minmax-6.2 { + execsql { + SELECT max(a) FROM t2 LIMIT 3 + } +} {22} +do_test minmax-6.3 { + execsql { + SELECT min(a) FROM t2 LIMIT 0,100 + } +} {1} +do_test minmax-6.4 { + execsql { + SELECT max(a) FROM t2 LIMIT 1,100 + } +} {} +do_test minmax-6.5 { + execsql { + SELECT min(x) FROM t3 LIMIT 1 + } +} {{}} +do_test minmax-6.6 { + execsql { + SELECT max(x) FROM t3 LIMIT 0 + } +} {} +do_test minmax-6.7 { + execsql { + SELECT max(a) FROM t2 LIMIT 0 + } +} {} + +# Make sure the max(x) and min(x) optimizations work for nested +# queries. Ticket #587. +# +do_test minmax-7.1 { + execsql { + SELECT max(x) FROM t1; + } +} 20 +ifcapable subquery { + do_test minmax-7.2 { + execsql { + SELECT * FROM (SELECT max(x) FROM t1); + } + } 20 +} +do_test minmax-7.3 { + execsql { + SELECT min(x) FROM t1; + } +} 1 +ifcapable subquery { + do_test minmax-7.4 { + execsql { + SELECT * FROM (SELECT min(x) FROM t1); + } + } 1 +} + +# Make sure min(x) and max(x) work correctly when the datatype is +# TEXT instead of NUMERIC. Ticket #623. +# +do_test minmax-8.1 { + execsql { + CREATE TABLE t4(a TEXT); + INSERT INTO t4 VALUES('1234'); + INSERT INTO t4 VALUES('234'); + INSERT INTO t4 VALUES('34'); + SELECT min(a), max(a) FROM t4; + } +} {1234 34} +do_test minmax-8.2 { + execsql { + CREATE TABLE t5(a INTEGER); + INSERT INTO t5 VALUES('1234'); + INSERT INTO t5 VALUES('234'); + INSERT INTO t5 VALUES('34'); + SELECT min(a), max(a) FROM t5; + } +} {34 1234} + +# Ticket #658: Test the min()/max() optimization when the FROM clause +# is a subquery. +# +ifcapable {compound && subquery} { + do_test minmax-9.1 { + execsql { + SELECT max(rowid) FROM ( + SELECT max(rowid) FROM t4 UNION SELECT max(rowid) FROM t5 + ) + } + } {1} + do_test minmax-9.2 { + execsql { + SELECT max(rowid) FROM ( + SELECT max(rowid) FROM t4 EXCEPT SELECT max(rowid) FROM t5 + ) + } + } {{}} +} ;# ifcapable compound&&subquery + +# If there is a NULL in an aggregate max() or min(), ignore it. An +# aggregate min() or max() will only return NULL if all values are NULL. +# +do_test minmax-10.1 { + execsql { + CREATE TABLE t6(x); + INSERT INTO t6 VALUES(1); + INSERT INTO t6 VALUES(2); + INSERT INTO t6 VALUES(NULL); + SELECT coalesce(min(x),-1) FROM t6; + } +} {1} +do_test minmax-10.2 { + execsql { + SELECT max(x) FROM t6; + } +} {2} +do_test minmax-10.3 { + execsql { + CREATE INDEX i6 ON t6(x); + SELECT coalesce(min(x),-1) FROM t6; + } +} {1} +do_test minmax-10.4 { + execsql { + SELECT max(x) FROM t6; + } +} {2} +do_test minmax-10.5 { + execsql { + DELETE FROM t6 WHERE x NOT NULL; + SELECT count(*) FROM t6; + } +} 1 +do_test minmax-10.6 { + execsql { + SELECT count(x) FROM t6; + } +} 0 +ifcapable subquery { + do_test minmax-10.7 { + execsql { + SELECT (SELECT min(x) FROM t6), (SELECT max(x) FROM t6); + } + } {{} {}} +} +do_test minmax-10.8 { + execsql { + SELECT min(x), max(x) FROM t6; + } +} {{} {}} +do_test minmax-10.9 { + execsql { + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + SELECT count(*) FROM t6; + } +} 1024 +do_test minmax-10.10 { + execsql { + SELECT count(x) FROM t6; + } +} 0 +ifcapable subquery { + do_test minmax-10.11 { + execsql { + SELECT (SELECT min(x) FROM t6), (SELECT max(x) FROM t6); + } + } {{} {}} +} +do_test minmax-10.12 { + execsql { + SELECT min(x), max(x) FROM t6; + } +} {{} {}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/minmax2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/minmax2.test new file mode 100644 index 0000000..a304940 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/minmax2.test @@ -0,0 +1,387 @@ +# 2007 July 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing SELECT statements that contain +# aggregate min() and max() functions and which are handled as +# as a special case. This file makes sure that the min/max +# optimization works right in the presence of descending +# indices. Ticket #2514. +# +# $Id: minmax2.test,v 1.1 2007/07/18 18:17:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test minmax2-1.0 { + execsql { + PRAGMA legacy_file_format=0; + BEGIN; + CREATE TABLE t1(x, y); + INSERT INTO t1 VALUES(1,1); + INSERT INTO t1 VALUES(2,2); + INSERT INTO t1 VALUES(3,2); + INSERT INTO t1 VALUES(4,3); + INSERT INTO t1 VALUES(5,3); + INSERT INTO t1 VALUES(6,3); + INSERT INTO t1 VALUES(7,3); + INSERT INTO t1 VALUES(8,4); + INSERT INTO t1 VALUES(9,4); + INSERT INTO t1 VALUES(10,4); + INSERT INTO t1 VALUES(11,4); + INSERT INTO t1 VALUES(12,4); + INSERT INTO t1 VALUES(13,4); + INSERT INTO t1 VALUES(14,4); + INSERT INTO t1 VALUES(15,4); + INSERT INTO t1 VALUES(16,5); + INSERT INTO t1 VALUES(17,5); + INSERT INTO t1 VALUES(18,5); + INSERT INTO t1 VALUES(19,5); + INSERT INTO t1 VALUES(20,5); + COMMIT; + SELECT DISTINCT y FROM t1 ORDER BY y; + } +} {1 2 3 4 5} + +do_test minmax2-1.1 { + set sqlite_search_count 0 + execsql {SELECT min(x) FROM t1} +} {1} +do_test minmax2-1.2 { + set sqlite_search_count +} {19} +do_test minmax2-1.3 { + set sqlite_search_count 0 + execsql {SELECT max(x) FROM t1} +} {20} +do_test minmax2-1.4 { + set sqlite_search_count +} {19} +do_test minmax2-1.5 { + execsql {CREATE INDEX t1i1 ON t1(x DESC)} + set sqlite_search_count 0 + execsql {SELECT min(x) FROM t1} +} {1} +do_test minmax2-1.6 { + set sqlite_search_count +} {2} +do_test minmax2-1.7 { + set sqlite_search_count 0 + execsql {SELECT max(x) FROM t1} +} {20} +do_test minmax2-1.8 { + set sqlite_search_count +} {1} +do_test minmax2-1.9 { + set sqlite_search_count 0 + execsql {SELECT max(y) FROM t1} +} {5} +do_test minmax2-1.10 { + set sqlite_search_count +} {19} + +do_test minmax2-2.0 { + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b); + INSERT INTO t2 SELECT * FROM t1; + } + set sqlite_search_count 0 + execsql {SELECT min(a) FROM t2} +} {1} +do_test minmax2-2.1 { + set sqlite_search_count +} {0} +do_test minmax2-2.2 { + set sqlite_search_count 0 + execsql {SELECT max(a) FROM t2} +} {20} +do_test minmax2-2.3 { + set sqlite_search_count +} {0} + +do_test minmax2-3.0 { + ifcapable subquery { + execsql {INSERT INTO t2 VALUES((SELECT max(a) FROM t2)+1,999)} + } else { + db function max_a_t2 {execsql {SELECT max(a) FROM t2}} + execsql {INSERT INTO t2 VALUES(max_a_t2()+1,999)} + } + set sqlite_search_count 0 + execsql {SELECT max(a) FROM t2} +} {21} +do_test minmax2-3.1 { + set sqlite_search_count +} {0} +do_test minmax2-3.2 { + ifcapable subquery { + execsql {INSERT INTO t2 VALUES((SELECT max(a) FROM t2)+1,999)} + } else { + db function max_a_t2 {execsql {SELECT max(a) FROM t2}} + execsql {INSERT INTO t2 VALUES(max_a_t2()+1,999)} + } + set sqlite_search_count 0 + ifcapable subquery { + execsql { SELECT b FROM t2 WHERE a=(SELECT max(a) FROM t2) } + } else { + execsql { SELECT b FROM t2 WHERE a=max_a_t2() } + } +} {999} +do_test minmax2-3.3 { + set sqlite_search_count +} {0} + +ifcapable {compound && subquery} { + do_test minmax2-4.1 { + execsql { + SELECT coalesce(min(x+0),-1), coalesce(max(x+0),-1) FROM + (SELECT * FROM t1 UNION SELECT NULL as 'x', NULL as 'y') + } + } {1 20} + do_test minmax2-4.2 { + execsql { + SELECT y, coalesce(sum(x),0) FROM + (SELECT null AS x, y+1 AS y FROM t1 UNION SELECT * FROM t1) + GROUP BY y ORDER BY y; + } + } {1 1 2 5 3 22 4 92 5 90 6 0} + do_test minmax2-4.3 { + execsql { + SELECT y, count(x), count(*) FROM + (SELECT null AS x, y+1 AS y FROM t1 UNION SELECT * FROM t1) + GROUP BY y ORDER BY y; + } + } {1 1 1 2 2 3 3 4 5 4 8 9 5 5 6 6 0 1} +} ;# ifcapable compound + +# Make sure the min(x) and max(x) optimizations work on empty tables +# including empty tables with indices. Ticket #296. +# +do_test minmax2-5.1 { + execsql { + CREATE TABLE t3(x INTEGER UNIQUE NOT NULL); + SELECT coalesce(min(x),999) FROM t3; + } +} {999} +do_test minmax2-5.2 { + execsql { + SELECT coalesce(min(rowid),999) FROM t3; + } +} {999} +do_test minmax2-5.3 { + execsql { + SELECT coalesce(max(x),999) FROM t3; + } +} {999} +do_test minmax2-5.4 { + execsql { + SELECT coalesce(max(rowid),999) FROM t3; + } +} {999} +do_test minmax2-5.5 { + execsql { + SELECT coalesce(max(rowid),999) FROM t3 WHERE rowid<25; + } +} {999} + +# Make sure the min(x) and max(x) optimizations work when there +# is a LIMIT clause. Ticket #396. +# +do_test minmax2-6.1 { + execsql { + SELECT min(a) FROM t2 LIMIT 1 + } +} {1} +do_test minmax2-6.2 { + execsql { + SELECT max(a) FROM t2 LIMIT 3 + } +} {22} +do_test minmax2-6.3 { + execsql { + SELECT min(a) FROM t2 LIMIT 0,100 + } +} {1} +do_test minmax2-6.4 { + execsql { + SELECT max(a) FROM t2 LIMIT 1,100 + } +} {} +do_test minmax2-6.5 { + execsql { + SELECT min(x) FROM t3 LIMIT 1 + } +} {{}} +do_test minmax2-6.6 { + execsql { + SELECT max(x) FROM t3 LIMIT 0 + } +} {} +do_test minmax2-6.7 { + execsql { + SELECT max(a) FROM t2 LIMIT 0 + } +} {} + +# Make sure the max(x) and min(x) optimizations work for nested +# queries. Ticket #587. +# +do_test minmax2-7.1 { + execsql { + SELECT max(x) FROM t1; + } +} 20 +ifcapable subquery { + do_test minmax2-7.2 { + execsql { + SELECT * FROM (SELECT max(x) FROM t1); + } + } 20 +} +do_test minmax2-7.3 { + execsql { + SELECT min(x) FROM t1; + } +} 1 +ifcapable subquery { + do_test minmax2-7.4 { + execsql { + SELECT * FROM (SELECT min(x) FROM t1); + } + } 1 +} + +# Make sure min(x) and max(x) work correctly when the datatype is +# TEXT instead of NUMERIC. Ticket #623. +# +do_test minmax2-8.1 { + execsql { + CREATE TABLE t4(a TEXT); + INSERT INTO t4 VALUES('1234'); + INSERT INTO t4 VALUES('234'); + INSERT INTO t4 VALUES('34'); + SELECT min(a), max(a) FROM t4; + } +} {1234 34} +do_test minmax2-8.2 { + execsql { + CREATE TABLE t5(a INTEGER); + INSERT INTO t5 VALUES('1234'); + INSERT INTO t5 VALUES('234'); + INSERT INTO t5 VALUES('34'); + SELECT min(a), max(a) FROM t5; + } +} {34 1234} + +# Ticket #658: Test the min()/max() optimization when the FROM clause +# is a subquery. +# +ifcapable {compound && subquery} { + do_test minmax2-9.1 { + execsql { + SELECT max(rowid) FROM ( + SELECT max(rowid) FROM t4 UNION SELECT max(rowid) FROM t5 + ) + } + } {1} + do_test minmax2-9.2 { + execsql { + SELECT max(rowid) FROM ( + SELECT max(rowid) FROM t4 EXCEPT SELECT max(rowid) FROM t5 + ) + } + } {{}} +} ;# ifcapable compound&&subquery + +# If there is a NULL in an aggregate max() or min(), ignore it. An +# aggregate min() or max() will only return NULL if all values are NULL. +# +do_test minmax2-10.1 { + execsql { + CREATE TABLE t6(x); + INSERT INTO t6 VALUES(1); + INSERT INTO t6 VALUES(2); + INSERT INTO t6 VALUES(NULL); + SELECT coalesce(min(x),-1) FROM t6; + } +} {1} +do_test minmax2-10.2 { + execsql { + SELECT max(x) FROM t6; + } +} {2} +do_test minmax2-10.3 { + execsql { + CREATE INDEX i6 ON t6(x DESC); + SELECT coalesce(min(x),-1) FROM t6; + } +} {1} +do_test minmax2-10.4 { + execsql { + SELECT max(x) FROM t6; + } +} {2} +do_test minmax2-10.5 { + execsql { + DELETE FROM t6 WHERE x NOT NULL; + SELECT count(*) FROM t6; + } +} 1 +do_test minmax2-10.6 { + execsql { + SELECT count(x) FROM t6; + } +} 0 +ifcapable subquery { + do_test minmax2-10.7 { + execsql { + SELECT (SELECT min(x) FROM t6), (SELECT max(x) FROM t6); + } + } {{} {}} +} +do_test minmax2-10.8 { + execsql { + SELECT min(x), max(x) FROM t6; + } +} {{} {}} +do_test minmax2-10.9 { + execsql { + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + SELECT count(*) FROM t6; + } +} 1024 +do_test minmax2-10.10 { + execsql { + SELECT count(x) FROM t6; + } +} 0 +ifcapable subquery { + do_test minmax2-10.11 { + execsql { + SELECT (SELECT min(x) FROM t6), (SELECT max(x) FROM t6); + } + } {{} {}} +} +do_test minmax2-10.12 { + execsql { + SELECT min(x), max(x) FROM t6; + } +} {{} {}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/misc1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/misc1.test new file mode 100644 index 0000000..c23d987 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/misc1.test @@ -0,0 +1,585 @@ +# 2001 September 15. +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for miscellanous features that were +# left out of other test files. +# +# $Id: misc1.test,v 1.41 2006/06/27 20:06:45 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Mimic the SQLite 2 collation type NUMERIC. +db collate numeric numeric_collate +proc numeric_collate {lhs rhs} { + if {$lhs == $rhs} {return 0} + return [expr ($lhs>$rhs)?1:-1] +} + +# Mimic the SQLite 2 collation type TEXT. +db collate text text_collate +proc numeric_collate {lhs rhs} { + return [string compare $lhs $rhs] +} + +# Test the creation and use of tables that have a large number +# of columns. +# +do_test misc1-1.1 { + set cmd "CREATE TABLE manycol(x0 text" + for {set i 1} {$i<=99} {incr i} { + append cmd ",x$i text" + } + append cmd ")"; + execsql $cmd + set cmd "INSERT INTO manycol VALUES(0" + for {set i 1} {$i<=99} {incr i} { + append cmd ",$i" + } + append cmd ")"; + execsql $cmd + execsql "SELECT x99 FROM manycol" +} 99 +do_test misc1-1.2 { + execsql {SELECT x0, x10, x25, x50, x75 FROM manycol} +} {0 10 25 50 75} +do_test misc1-1.3.1 { + for {set j 100} {$j<=1000} {incr j 100} { + set cmd "INSERT INTO manycol VALUES($j" + for {set i 1} {$i<=99} {incr i} { + append cmd ",[expr {$i+$j}]" + } + append cmd ")" + execsql $cmd + } + execsql {SELECT x50 FROM manycol ORDER BY x80+0} +} {50 150 250 350 450 550 650 750 850 950 1050} +do_test misc1-1.3.2 { + execsql {SELECT x50 FROM manycol ORDER BY x80} +} {1050 150 250 350 450 550 650 750 50 850 950} +do_test misc1-1.4 { + execsql {SELECT x75 FROM manycol WHERE x50=350} +} 375 +do_test misc1-1.5 { + execsql {SELECT x50 FROM manycol WHERE x99=599} +} 550 +do_test misc1-1.6 { + execsql {CREATE INDEX manycol_idx1 ON manycol(x99)} + execsql {SELECT x50 FROM manycol WHERE x99=899} +} 850 +do_test misc1-1.7 { + execsql {SELECT count(*) FROM manycol} +} 11 +do_test misc1-1.8 { + execsql {DELETE FROM manycol WHERE x98=1234} + execsql {SELECT count(*) FROM manycol} +} 11 +do_test misc1-1.9 { + execsql {DELETE FROM manycol WHERE x98=998} + execsql {SELECT count(*) FROM manycol} +} 10 +do_test misc1-1.10 { + execsql {DELETE FROM manycol WHERE x99=500} + execsql {SELECT count(*) FROM manycol} +} 10 +do_test misc1-1.11 { + execsql {DELETE FROM manycol WHERE x99=599} + execsql {SELECT count(*) FROM manycol} +} 9 + +# Check GROUP BY expressions that name two or more columns. +# +do_test misc1-2.1 { + execsql { + BEGIN TRANSACTION; + CREATE TABLE agger(one text, two text, three text, four text); + INSERT INTO agger VALUES(1, 'one', 'hello', 'yes'); + INSERT INTO agger VALUES(2, 'two', 'howdy', 'no'); + INSERT INTO agger VALUES(3, 'thr', 'howareya', 'yes'); + INSERT INTO agger VALUES(4, 'two', 'lothere', 'yes'); + INSERT INTO agger VALUES(5, 'one', 'atcha', 'yes'); + INSERT INTO agger VALUES(6, 'two', 'hello', 'no'); + COMMIT + } + execsql {SELECT count(*) FROM agger} +} 6 +do_test misc1-2.2 { + execsql {SELECT sum(one), two, four FROM agger + GROUP BY two, four ORDER BY sum(one) desc} +} {8 two no 6 one yes 4 two yes 3 thr yes} +do_test misc1-2.3 { + execsql {SELECT sum((one)), (two), (four) FROM agger + GROUP BY (two), (four) ORDER BY sum(one) desc} +} {8 two no 6 one yes 4 two yes 3 thr yes} + +# Here's a test for a bug found by Joel Lucsy. The code below +# was causing an assertion failure. +# +do_test misc1-3.1 { + set r [execsql { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES('hi'); + PRAGMA full_column_names=on; + SELECT rowid, * FROM t1; + }] + lindex $r 1 +} {hi} + +# Here's a test for yet another bug found by Joel Lucsy. The code +# below was causing an assertion failure. +# +do_test misc1-4.1 { + execsql { + BEGIN; + CREATE TABLE t2(a); + INSERT INTO t2 VALUES('This is a long string to use up a lot of disk -'); + UPDATE t2 SET a=a||a||a||a; + INSERT INTO t2 SELECT '1 - ' || a FROM t2; + INSERT INTO t2 SELECT '2 - ' || a FROM t2; + INSERT INTO t2 SELECT '3 - ' || a FROM t2; + INSERT INTO t2 SELECT '4 - ' || a FROM t2; + INSERT INTO t2 SELECT '5 - ' || a FROM t2; + INSERT INTO t2 SELECT '6 - ' || a FROM t2; + COMMIT; + SELECT count(*) FROM t2; + } +} {64} + +# Make sure we actually see a semicolon or end-of-file in the SQL input +# before executing a command. Thus if "WHERE" is misspelled on an UPDATE, +# the user won't accidently update every record. +# +do_test misc1-5.1 { + catchsql { + CREATE TABLE t3(a,b); + INSERT INTO t3 VALUES(1,2); + INSERT INTO t3 VALUES(3,4); + UPDATE t3 SET a=0 WHEREwww b=2; + } +} {1 {near "WHEREwww": syntax error}} +do_test misc1-5.2 { + execsql { + SELECT * FROM t3 ORDER BY a; + } +} {1 2 3 4} + +# Certain keywords (especially non-standard keywords like "REPLACE") can +# also be used as identifiers. The way this works in the parser is that +# the parser first detects a syntax error, the error handling routine +# sees that the special keyword caused the error, then replaces the keyword +# with "ID" and tries again. +# +# Check the operation of this logic. +# +do_test misc1-6.1 { + catchsql { + CREATE TABLE t4( + abort, asc, begin, cluster, conflict, copy, delimiters, desc, end, + explain, fail, ignore, key, offset, pragma, replace, temp, + vacuum, view + ); + } +} {0 {}} +do_test misc1-6.2 { + catchsql { + INSERT INTO t4 + VALUES(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19); + } +} {0 {}} +do_test misc1-6.3 { + execsql { + SELECT * FROM t4 + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19} +do_test misc1-6.4 { + execsql { + SELECT abort+asc,max(key,pragma,temp) FROM t4 + } +} {3 17} + +# Test for multi-column primary keys, and for multiple primary keys. +# +do_test misc1-7.1 { + catchsql { + CREATE TABLE error1( + a TYPE PRIMARY KEY, + b TYPE PRIMARY KEY + ); + } +} {1 {table "error1" has more than one primary key}} +do_test misc1-7.2 { + catchsql { + CREATE TABLE error1( + a INTEGER PRIMARY KEY, + b TYPE PRIMARY KEY + ); + } +} {1 {table "error1" has more than one primary key}} +do_test misc1-7.3 { + execsql { + CREATE TABLE t5(a,b,c,PRIMARY KEY(a,b)); + INSERT INTO t5 VALUES(1,2,3); + SELECT * FROM t5 ORDER BY a; + } +} {1 2 3} +do_test misc1-7.4 { + catchsql { + INSERT INTO t5 VALUES(1,2,4); + } +} {1 {columns a, b are not unique}} +do_test misc1-7.5 { + catchsql { + INSERT INTO t5 VALUES(0,2,4); + } +} {0 {}} +do_test misc1-7.6 { + execsql { + SELECT * FROM t5 ORDER BY a; + } +} {0 2 4 1 2 3} + +do_test misc1-8.1 { + catchsql { + SELECT *; + } +} {1 {no tables specified}} +do_test misc1-8.2 { + catchsql { + SELECT t1.*; + } +} {1 {no such table: t1}} + +execsql { + DROP TABLE t1; + DROP TABLE t2; + DROP TABLE t3; + DROP TABLE t4; +} + +# 64-bit integers are represented exactly. +# +do_test misc1-9.1 { + catchsql { + CREATE TABLE t1(a unique not null, b unique not null); + INSERT INTO t1 VALUES('a',1234567890123456789); + INSERT INTO t1 VALUES('b',1234567891123456789); + INSERT INTO t1 VALUES('c',1234567892123456789); + SELECT * FROM t1; + } +} {0 {a 1234567890123456789 b 1234567891123456789 c 1234567892123456789}} + +# A WHERE clause is not allowed to contain more than 99 terms. Check to +# make sure this limit is enforced. +# +# 2005-07-16: There is no longer a limit on the number of terms in a +# WHERE clause. But keep these tests just so that we have some tests +# that use a large number of terms in the WHERE clause. +# +do_test misc1-10.0 { + execsql {SELECT count(*) FROM manycol} +} {9} +do_test misc1-10.1 { + set ::where {WHERE x0>=0} + for {set i 1} {$i<=99} {incr i} { + append ::where " AND x$i<>0" + } + catchsql "SELECT count(*) FROM manycol $::where" +} {0 9} +do_test misc1-10.2 { + catchsql "SELECT count(*) FROM manycol $::where AND rowid>0" +} {0 9} +do_test misc1-10.3 { + regsub "x0>=0" $::where "x0=0" ::where + catchsql "DELETE FROM manycol $::where" +} {0 {}} +do_test misc1-10.4 { + execsql {SELECT count(*) FROM manycol} +} {8} +do_test misc1-10.5 { + catchsql "DELETE FROM manycol $::where AND rowid>0" +} {0 {}} +do_test misc1-10.6 { + execsql {SELECT x1 FROM manycol WHERE x0=100} +} {101} +do_test misc1-10.7 { + regsub "x0=0" $::where "x0=100" ::where + catchsql "UPDATE manycol SET x1=x1+1 $::where" +} {0 {}} +do_test misc1-10.8 { + execsql {SELECT x1 FROM manycol WHERE x0=100} +} {102} +do_test misc1-10.9 { + catchsql "UPDATE manycol SET x1=x1+1 $::where AND rowid>0" +} {0 {}} +do_test misc1-10.10 { + execsql {SELECT x1 FROM manycol WHERE x0=100} +} {103} + +# Make sure the initialization works even if a database is opened while +# another process has the database locked. +# +# Update for v3: The BEGIN doesn't lock the database so the schema is read +# and the SELECT returns successfully. +do_test misc1-11.1 { + execsql {BEGIN} + execsql {UPDATE t1 SET a=0 WHERE 0} + sqlite3 db2 test.db + set rc [catch {db2 eval {SELECT count(*) FROM t1}} msg] + lappend rc $msg +# v2 result: {1 {database is locked}} +} {0 3} +do_test misc1-11.2 { + execsql {COMMIT} + set rc [catch {db2 eval {SELECT count(*) FROM t1}} msg] + db2 close + lappend rc $msg +} {0 3} + +# Make sure string comparisons really do compare strings in format4+. +# Similar tests in the format3.test file show that for format3 and earlier +# all comparisions where numeric if either operand looked like a number. +# +do_test misc1-12.1 { + execsql {SELECT '0'=='0.0'} +} {0} +do_test misc1-12.2 { + execsql {SELECT '0'==0.0} +} {0} +do_test misc1-12.3 { + execsql {SELECT '12345678901234567890'=='12345678901234567891'} +} {0} +do_test misc1-12.4 { + execsql { + CREATE TABLE t6(a INT UNIQUE, b TEXT UNIQUE); + INSERT INTO t6 VALUES('0','0.0'); + SELECT * FROM t6; + } +} {0 0.0} +ifcapable conflict { + do_test misc1-12.5 { + execsql { + INSERT OR IGNORE INTO t6 VALUES(0.0,'x'); + SELECT * FROM t6; + } + } {0 0.0} + do_test misc1-12.6 { + execsql { + INSERT OR IGNORE INTO t6 VALUES('y',0); + SELECT * FROM t6; + } + } {0 0.0 y 0} +} +do_test misc1-12.7 { + execsql { + CREATE TABLE t7(x INTEGER, y TEXT, z); + INSERT INTO t7 VALUES(0,0,1); + INSERT INTO t7 VALUES(0.0,0,2); + INSERT INTO t7 VALUES(0,0.0,3); + INSERT INTO t7 VALUES(0.0,0.0,4); + SELECT DISTINCT x, y FROM t7 ORDER BY z; + } +} {0 0 0 0.0} +do_test misc1-12.8 { + execsql { + SELECT min(z), max(z), count(z) FROM t7 GROUP BY x ORDER BY 1; + } +} {1 4 4} +do_test misc1-12.9 { + execsql { + SELECT min(z), max(z), count(z) FROM t7 GROUP BY y ORDER BY 1; + } +} {1 2 2 3 4 2} + +# This used to be an error. But we changed the code so that arbitrary +# identifiers can be used as a collating sequence. Collation is by text +# if the identifier contains "text", "blob", or "clob" and is numeric +# otherwise. +# +# Update: In v3, it is an error again. +# +#do_test misc1-12.10 { +# catchsql { +# SELECT * FROM t6 ORDER BY a COLLATE unknown; +# } +#} {0 {0 0 y 0}} +do_test misc1-12.11 { + execsql { + CREATE TABLE t8(x TEXT COLLATE numeric, y INTEGER COLLATE text, z); + INSERT INTO t8 VALUES(0,0,1); + INSERT INTO t8 VALUES(0.0,0,2); + INSERT INTO t8 VALUES(0,0.0,3); + INSERT INTO t8 VALUES(0.0,0.0,4); + SELECT DISTINCT x, y FROM t8 ORDER BY z; + } +} {0 0 0.0 0} +do_test misc1-12.12 { + execsql { + SELECT min(z), max(z), count(z) FROM t8 GROUP BY x ORDER BY 1; + } +} {1 3 2 2 4 2} +do_test misc1-12.13 { + execsql { + SELECT min(z), max(z), count(z) FROM t8 GROUP BY y ORDER BY 1; + } +} {1 4 4} + +# There was a problem with realloc() in the OP_MemStore operation of +# the VDBE. A buffer was being reallocated but some pointers into +# the old copy of the buffer were not being moved over to the new copy. +# The following code tests for the problem. +# +ifcapable subquery { + do_test misc1-13.1 { + execsql { + CREATE TABLE t9(x,y); + INSERT INTO t9 VALUES('one',1); + INSERT INTO t9 VALUES('two',2); + INSERT INTO t9 VALUES('three',3); + INSERT INTO t9 VALUES('four',4); + INSERT INTO t9 VALUES('five',5); + INSERT INTO t9 VALUES('six',6); + INSERT INTO t9 VALUES('seven',7); + INSERT INTO t9 VALUES('eight',8); + INSERT INTO t9 VALUES('nine',9); + INSERT INTO t9 VALUES('ten',10); + INSERT INTO t9 VALUES('eleven',11); + SELECT y FROM t9 + WHERE x=(SELECT x FROM t9 WHERE y=1) + OR x=(SELECT x FROM t9 WHERE y=2) + OR x=(SELECT x FROM t9 WHERE y=3) + OR x=(SELECT x FROM t9 WHERE y=4) + OR x=(SELECT x FROM t9 WHERE y=5) + OR x=(SELECT x FROM t9 WHERE y=6) + OR x=(SELECT x FROM t9 WHERE y=7) + OR x=(SELECT x FROM t9 WHERE y=8) + OR x=(SELECT x FROM t9 WHERE y=9) + OR x=(SELECT x FROM t9 WHERE y=10) + OR x=(SELECT x FROM t9 WHERE y=11) + OR x=(SELECT x FROM t9 WHERE y=12) + OR x=(SELECT x FROM t9 WHERE y=13) + OR x=(SELECT x FROM t9 WHERE y=14) + ; + } + } {1 2 3 4 5 6 7 8 9 10 11} +} + +# Make sure a database connection still works after changing the +# working directory. +# +do_test misc1-14.1 { + file mkdir tempdir + cd tempdir + execsql {BEGIN} + file exists ./test.db-journal +} {0} +do_test misc1-14.2 { + execsql {UPDATE t1 SET a=0 WHERE 0} + file exists ../test.db-journal +} {1} +do_test misc1-14.3 { + cd .. + file delete tempdir + execsql {COMMIT} + file exists ./test.db-journal +} {0} + +# A failed create table should not leave the table in the internal +# data structures. Ticket #238. +# +do_test misc1-15.1.1 { + catchsql { + CREATE TABLE t10 AS SELECT c1; + } +} {1 {no such column: c1}} +do_test misc1-15.1.2 { + catchsql { + CREATE TABLE t10 AS SELECT t9.c1; + } +} {1 {no such column: t9.c1}} +do_test misc1-15.1.3 { + catchsql { + CREATE TABLE t10 AS SELECT main.t9.c1; + } +} {1 {no such column: main.t9.c1}} +do_test misc1-15.2 { + catchsql { + CREATE TABLE t10 AS SELECT 1; + } + # The bug in ticket #238 causes the statement above to fail with + # the error "table t10 alread exists" +} {0 {}} + +# Test for memory leaks when a CREATE TABLE containing a primary key +# fails. Ticket #249. +# +do_test misc1-16.1 { + catchsql {SELECT name FROM sqlite_master LIMIT 1} + catchsql { + CREATE TABLE test(a integer, primary key(a)); + } +} {0 {}} +do_test misc1-16.2 { + catchsql { + CREATE TABLE test(a integer, primary key(a)); + } +} {1 {table test already exists}} +do_test misc1-16.3 { + catchsql { + CREATE TABLE test2(a text primary key, b text, primary key(a,b)); + } +} {1 {table "test2" has more than one primary key}} +do_test misc1-16.4 { + execsql { + INSERT INTO test VALUES(1); + SELECT rowid, a FROM test; + } +} {1 1} +do_test misc1-16.5 { + execsql { + INSERT INTO test VALUES(5); + SELECT rowid, a FROM test; + } +} {1 1 5 5} +do_test misc1-16.6 { + execsql { + INSERT INTO test VALUES(NULL); + SELECT rowid, a FROM test; + } +} {1 1 5 5 6 6} + +ifcapable trigger&&tempdb { +# Ticket #333: Temp triggers that modify persistent tables. +# +do_test misc1-17.1 { + execsql { + BEGIN; + CREATE TABLE RealTable(TestID INTEGER PRIMARY KEY, TestString TEXT); + CREATE TEMP TABLE TempTable(TestID INTEGER PRIMARY KEY, TestString TEXT); + CREATE TEMP TRIGGER trigTest_1 AFTER UPDATE ON TempTable BEGIN + INSERT INTO RealTable(TestString) + SELECT new.TestString FROM TempTable LIMIT 1; + END; + INSERT INTO TempTable(TestString) VALUES ('1'); + INSERT INTO TempTable(TestString) VALUES ('2'); + UPDATE TempTable SET TestString = TestString + 1 WHERE TestID=1 OR TestId=2; + COMMIT; + SELECT TestString FROM RealTable ORDER BY 1; + } +} {2 3} +} + +do_test misc1-18.1 { + set n [sqlite3_sleep 100] + expr {$n>=100} +} {1} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/misc2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/misc2.test new file mode 100644 index 0000000..e9d85ed --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/misc2.test @@ -0,0 +1,435 @@ +# 2003 June 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for miscellanous features that were +# left out of other test files. +# +# $Id: misc2.test,v 1.28 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {trigger} { +# Test for ticket #360 +# +do_test misc2-1.1 { + catchsql { + CREATE TABLE FOO(bar integer); + CREATE TRIGGER foo_insert BEFORE INSERT ON foo BEGIN + SELECT CASE WHEN (NOT new.bar BETWEEN 0 AND 20) + THEN raise(rollback, 'aiieee') END; + END; + INSERT INTO foo(bar) VALUES (1); + } +} {0 {}} +do_test misc2-1.2 { + catchsql { + INSERT INTO foo(bar) VALUES (111); + } +} {1 aiieee} +} ;# endif trigger + +# Make sure ROWID works on a view and a subquery. Ticket #364 +# +do_test misc2-2.1 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1,2,3); + CREATE TABLE t2(a,b,c); + INSERT INTO t2 VALUES(7,8,9); + } +} {} +ifcapable subquery { + do_test misc2-2.2 { + execsql { + SELECT rowid, * FROM (SELECT * FROM t1, t2); + } + } {{} 1 2 3 7 8 9} +} +ifcapable view { + do_test misc2-2.3 { + execsql { + CREATE VIEW v1 AS SELECT * FROM t1, t2; + SELECT rowid, * FROM v1; + } + } {{} 1 2 3 7 8 9} +} ;# ifcapable view + +# Ticket #2002 and #1952. +ifcapable subquery { + do_test misc2-2.4 { + execsql2 { + SELECT * FROM (SELECT a, b AS 'a', c AS 'a', 4 AS 'a' FROM t1) + } + } {a 1 a:1 2 a:2 3 a:3 4} +} + +# Check name binding precedence. Ticket #387 +# +do_test misc2-3.1 { + catchsql { + SELECT t1.b+t2.b AS a, t1.a, t2.a FROM t1, t2 WHERE a==10 + } +} {1 {ambiguous column name: a}} + +# Make sure 32-bit integer overflow is handled properly in queries. +# ticket #408 +# +do_test misc2-4.1 { + execsql { + INSERT INTO t1 VALUES(4000000000,'a','b'); + SELECT a FROM t1 WHERE a>1; + } +} {4000000000} +do_test misc2-4.2 { + execsql { + INSERT INTO t1 VALUES(2147483648,'b2','c2'); + INSERT INTO t1 VALUES(2147483647,'b3','c3'); + SELECT a FROM t1 WHERE a>2147483647; + } +} {4000000000 2147483648} +do_test misc2-4.3 { + execsql { + SELECT a FROM t1 WHERE a<2147483648; + } +} {1 2147483647} +do_test misc2-4.4 { + execsql { + SELECT a FROM t1 WHERE a<=2147483648; + } +} {1 2147483648 2147483647} +do_test misc2-4.5 { + execsql { + SELECT a FROM t1 WHERE a<10000000000; + } +} {1 4000000000 2147483648 2147483647} +do_test misc2-4.6 { + execsql { + SELECT a FROM t1 WHERE a<1000000000000 ORDER BY 1; + } +} {1 2147483647 2147483648 4000000000} + +# There were some issues with expanding a SrcList object using a call +# to sqliteSrcListAppend() if the SrcList had previously been duplicated +# using a call to sqliteSrcListDup(). Ticket #416. The following test +# makes sure the problem has been fixed. +# +ifcapable view { +do_test misc2-5.1 { + execsql { + CREATE TABLE x(a,b); + CREATE VIEW y AS + SELECT x1.b AS p, x2.b AS q FROM x AS x1, x AS x2 WHERE x1.a=x2.a; + CREATE VIEW z AS + SELECT y1.p, y2.p FROM y AS y1, y AS y2 WHERE y1.q=y2.q; + SELECT * from z; + } +} {} +} + +# Make sure we can open a database with an empty filename. What this +# does is store the database in a temporary file that is deleted when +# the database is closed. Ticket #432. +# +do_test misc2-6.1 { + db close + sqlite3 db {} + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + SELECT * FROM t1; + } +} {1 2} + +# Make sure we get an error message (not a segfault) on an attempt to +# update a table from within the callback of a select on that same +# table. +# +# 2006-08-16: This has changed. It is now permitted to update +# the table being SELECTed from within the callback of the query. +# +ifcapable tclvar { + do_test misc2-7.1 { + db close + file delete -force test.db + sqlite3 db test.db + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + SELECT * FROM t1; + } + } {1 2 3} + do_test misc2-7.2 { + set rc [catch { + db eval {SELECT rowid FROM t1} {} { + db eval "DELETE FROM t1 WHERE rowid=$rowid" + } + } msg] + lappend rc $msg + } {0 {}} + do_test misc2-7.3 { + execsql {SELECT * FROM t1} + } {} + do_test misc2-7.4 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {DELETE FROM t1 WHERE rowid=$rowid} + } + } + execsql {SELECT * FROM t1} + } {2 4} + do_test misc2-7.5 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {DELETE FROM t1 WHERE rowid=$rowid+1} + } + } + execsql {SELECT * FROM t1} + } {1 3} + do_test misc2-7.6 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {DELETE FROM t1} + } + } + execsql {SELECT * FROM t1} + } {} + do_test misc2-7.7 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {UPDATE t1 SET x=x+100 WHERE rowid=$rowid} + } + } + execsql {SELECT * FROM t1} + } {101 2 103 4} + do_test misc2-7.8 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + } + db eval {SELECT rowid, x FROM t1} { + if {$x<10} { + db eval {INSERT INTO t1 VALUES($x+1)} + } + } + execsql {SELECT * FROM t1} + } {1 2 3 4 5 6 7 8 9 10} + + # Repeat the tests 7.1 through 7.8 about but this time do the SELECTs + # in reverse order so that we exercise the sqlite3BtreePrev() routine + # instead of sqlite3BtreeNext() + # + do_test misc2-7.11 { + db close + file delete -force test.db + sqlite3 db test.db + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + SELECT * FROM t1; + } + } {1 2 3} + do_test misc2-7.12 { + set rc [catch { + db eval {SELECT rowid FROM t1 ORDER BY rowid DESC} {} { + db eval "DELETE FROM t1 WHERE rowid=$rowid" + } + } msg] + lappend rc $msg + } {0 {}} + do_test misc2-7.13 { + execsql {SELECT * FROM t1} + } {} + do_test misc2-7.14 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { + if {$x & 1} { + db eval {DELETE FROM t1 WHERE rowid=$rowid} + } + } + execsql {SELECT * FROM t1} + } {2 4} + do_test misc2-7.15 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {DELETE FROM t1 WHERE rowid=$rowid+1} + } + } + execsql {SELECT * FROM t1} + } {1 3} + do_test misc2-7.16 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { + if {$x & 1} { + db eval {DELETE FROM t1} + } + } + execsql {SELECT * FROM t1} + } {} + do_test misc2-7.17 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { + if {$x & 1} { + db eval {UPDATE t1 SET x=x+100 WHERE rowid=$rowid} + } + } + execsql {SELECT * FROM t1} + } {101 2 103 4} + do_test misc2-7.18 { + execsql { + DELETE FROM t1; + INSERT INTO t1(rowid,x) VALUES(10,10); + } + db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { + if {$x>1} { + db eval {INSERT INTO t1(rowid,x) VALUES($x-1,$x-1)} + } + } + execsql {SELECT * FROM t1} + } {1 2 3 4 5 6 7 8 9 10} +} + +db close +file delete -force test.db +sqlite3 db test.db + +# Ticket #453. If the SQL ended with "-", the tokenizer was calling that +# an incomplete token, which caused problem. The solution was to just call +# it a minus sign. +# +do_test misc2-8.1 { + catchsql {-} +} {1 {near "-": syntax error}} + +# Ticket #513. Make sure the VDBE stack does not grow on a 3-way join. +# +ifcapable tempdb { + do_test misc2-9.1 { + execsql { + BEGIN; + CREATE TABLE counts(n INTEGER PRIMARY KEY); + INSERT INTO counts VALUES(0); + INSERT INTO counts VALUES(1); + INSERT INTO counts SELECT n+2 FROM counts; + INSERT INTO counts SELECT n+4 FROM counts; + INSERT INTO counts SELECT n+8 FROM counts; + COMMIT; + + CREATE TEMP TABLE x AS + SELECT dim1.n, dim2.n, dim3.n + FROM counts AS dim1, counts AS dim2, counts AS dim3 + WHERE dim1.n<10 AND dim2.n<10 AND dim3.n<10; + + SELECT count(*) FROM x; + } + } {1000} + do_test misc2-9.2 { + execsql { + DROP TABLE x; + CREATE TEMP TABLE x AS + SELECT dim1.n, dim2.n, dim3.n + FROM counts AS dim1, counts AS dim2, counts AS dim3 + WHERE dim1.n>=6 AND dim2.n>=6 AND dim3.n>=6; + + SELECT count(*) FROM x; + } + } {1000} + do_test misc2-9.3 { + execsql { + DROP TABLE x; + CREATE TEMP TABLE x AS + SELECT dim1.n, dim2.n, dim3.n, dim4.n + FROM counts AS dim1, counts AS dim2, counts AS dim3, counts AS dim4 + WHERE dim1.n<5 AND dim2.n<5 AND dim3.n<5 AND dim4.n<5; + + SELECT count(*) FROM x; + } + } [expr 5*5*5*5] +} + +# Ticket #1229. Sometimes when a "NEW.X" appears in a SELECT without +# a FROM clause deep within a trigger, the code generator is unable to +# trace the NEW.X back to an original table and thus figure out its +# declared datatype. +# +# The SQL code below was causing a segfault. +# +ifcapable subquery&&trigger { + do_test misc2-10.1 { + execsql { + CREATE TABLE t1229(x); + CREATE TRIGGER r1229 BEFORE INSERT ON t1229 BEGIN + INSERT INTO t1229 SELECT y FROM (SELECT new.x y); + END; + INSERT INTO t1229 VALUES(1); + } + } {} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/misc3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/misc3.test new file mode 100644 index 0000000..cd76335 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/misc3.test @@ -0,0 +1,317 @@ +# 2003 December 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for miscellanous features that were +# left out of other test files. +# +# $Id: misc3.test,v 1.16 2005/01/21 03:12:16 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {integrityck} { + # Ticket #529. Make sure an ABORT does not damage the in-memory cache + # that will be used by subsequent statements in the same transaction. + # + do_test misc3-1.1 { + execsql { + CREATE TABLE t1(a UNIQUE,b); + INSERT INTO t1 + VALUES(1,'a23456789_b23456789_c23456789_d23456789_e23456789_'); + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + UPDATE t1 SET b=b||b; + INSERT INTO t1 VALUES(2,'x'); + UPDATE t1 SET b=substr(b,1,500); + BEGIN; + } + catchsql {UPDATE t1 SET a=CASE a WHEN 2 THEN 1 ELSE a END, b='y';} + execsql { + CREATE TABLE t2(x,y); + COMMIT; + PRAGMA integrity_check; + } + } ok +} +ifcapable {integrityck} { + do_test misc3-1.2 { + execsql { + DROP TABLE t1; + DROP TABLE t2; + } + ifcapable {vacuum} {execsql VACUUM} + execsql { + CREATE TABLE t1(a UNIQUE,b); + INSERT INTO t1 + VALUES(1,'a23456789_b23456789_c23456789_d23456789_e23456789_'); + INSERT INTO t1 SELECT a+1, b||b FROM t1; + INSERT INTO t1 SELECT a+2, b||b FROM t1; + INSERT INTO t1 SELECT a+4, b FROM t1; + INSERT INTO t1 SELECT a+8, b FROM t1; + INSERT INTO t1 SELECT a+16, b FROM t1; + INSERT INTO t1 SELECT a+32, b FROM t1; + INSERT INTO t1 SELECT a+64, b FROM t1; + BEGIN; + } + catchsql {UPDATE t1 SET a=CASE a WHEN 128 THEN 127 ELSE a END, b='';} + execsql { + INSERT INTO t1 VALUES(200,'hello out there'); + COMMIT; + PRAGMA integrity_check; + } + } ok +} + +# Tests of the sqliteAtoF() function in util.c +# +do_test misc3-2.1 { + execsql {SELECT 2e-25*0.5e25} +} 1.0 +do_test misc3-2.2 { + execsql {SELECT 2.0e-25*000000.500000000000000000000000000000e+00025} +} 1.0 +do_test misc3-2.3 { + execsql {SELECT 000000000002e-0000000025*0.5e25} +} 1.0 +do_test misc3-2.4 { + execsql {SELECT 2e-25*0.5e250} +} 1e+225 +do_test misc3-2.5 { + execsql {SELECT 2.0e-250*0.5e25} +} 1e-225 +do_test misc3-2.6 { + execsql {SELECT '-2.0e-127' * '-0.5e27'} +} 1e-100 +do_test misc3-2.7 { + execsql {SELECT '+2.0e-127' * '-0.5e27'} +} -1e-100 +do_test misc3-2.8 { + execsql {SELECT 2.0e-27 * '+0.5e+127'} +} 1e+100 +do_test misc3-2.9 { + execsql {SELECT 2.0e-27 * '+0.000005e+132'} +} 1e+100 + +# Ticket #522. Make sure integer overflow is handled properly in +# indices. +# +integrity_check misc3-3.1 +do_test misc3-3.2 { + execsql { + CREATE TABLE t2(a INT UNIQUE); + } +} {} +integrity_check misc3-3.2.1 +do_test misc3-3.3 { + execsql { + INSERT INTO t2 VALUES(2147483648); + } +} {} +integrity_check misc3-3.3.1 +do_test misc3-3.4 { + execsql { + INSERT INTO t2 VALUES(-2147483649); + } +} {} +integrity_check misc3-3.4.1 +do_test misc3-3.5 { + execsql { + INSERT INTO t2 VALUES(+2147483649); + } +} {} +integrity_check misc3-3.5.1 +do_test misc3-3.6 { + execsql { + INSERT INTO t2 VALUES(+2147483647); + INSERT INTO t2 VALUES(-2147483648); + INSERT INTO t2 VALUES(-2147483647); + INSERT INTO t2 VALUES(2147483646); + SELECT * FROM t2 ORDER BY a; + } +} {-2147483649 -2147483648 -2147483647 2147483646 2147483647 2147483648 2147483649} +do_test misc3-3.7 { + execsql { + SELECT * FROM t2 WHERE a>=-2147483648 ORDER BY a; + } +} {-2147483648 -2147483647 2147483646 2147483647 2147483648 2147483649} +do_test misc3-3.8 { + execsql { + SELECT * FROM t2 WHERE a>-2147483648 ORDER BY a; + } +} {-2147483647 2147483646 2147483647 2147483648 2147483649} +do_test misc3-3.9 { + execsql { + SELECT * FROM t2 WHERE a>-2147483649 ORDER BY a; + } +} {-2147483648 -2147483647 2147483646 2147483647 2147483648 2147483649} +do_test misc3-3.10 { + execsql { + SELECT * FROM t2 WHERE a>=0 AND a<2147483649 ORDER BY a DESC; + } +} {2147483648 2147483647 2147483646} +do_test misc3-3.11 { + execsql { + SELECT * FROM t2 WHERE a>=0 AND a<=2147483648 ORDER BY a DESC; + } +} {2147483648 2147483647 2147483646} +do_test misc3-3.12 { + execsql { + SELECT * FROM t2 WHERE a>=0 AND a<2147483648 ORDER BY a DESC; + } +} {2147483647 2147483646} +do_test misc3-3.13 { + execsql { + SELECT * FROM t2 WHERE a>=0 AND a<=2147483647 ORDER BY a DESC; + } +} {2147483647 2147483646} +do_test misc3-3.14 { + execsql { + SELECT * FROM t2 WHERE a>=0 AND a<2147483647 ORDER BY a DESC; + } +} {2147483646} + +# Ticket #565. A stack overflow is occurring when the subquery to the +# right of an IN operator contains many NULLs +# +do_test misc3-4.1 { + execsql { + CREATE TABLE t3(a INTEGER PRIMARY KEY, b); + INSERT INTO t3(b) VALUES('abc'); + INSERT INTO t3(b) VALUES('xyz'); + INSERT INTO t3(b) VALUES(NULL); + INSERT INTO t3(b) VALUES(NULL); + INSERT INTO t3(b) SELECT b||'d' FROM t3; + INSERT INTO t3(b) SELECT b||'e' FROM t3; + INSERT INTO t3(b) SELECT b||'f' FROM t3; + INSERT INTO t3(b) SELECT b||'g' FROM t3; + INSERT INTO t3(b) SELECT b||'h' FROM t3; + SELECT count(a), count(b) FROM t3; + } +} {128 64} +ifcapable subquery { +do_test misc3-4.2 { + execsql { + SELECT count(a) FROM t3 WHERE b IN (SELECT b FROM t3); + } + } {64} + do_test misc3-4.3 { + execsql { + SELECT count(a) FROM t3 WHERE b IN (SELECT b FROM t3 ORDER BY a+1); + } + } {64} +} + +# Ticket #601: Putting a left join inside "SELECT * FROM ()" +# gives different results that if the outer "SELECT * FROM ..." is omitted. +# +ifcapable subquery { + do_test misc3-5.1 { + execsql { + CREATE TABLE x1 (b, c); + INSERT INTO x1 VALUES('dog',3); + INSERT INTO x1 VALUES('cat',1); + INSERT INTO x1 VALUES('dog',4); + CREATE TABLE x2 (c, e); + INSERT INTO x2 VALUES(1,'one'); + INSERT INTO x2 VALUES(2,'two'); + INSERT INTO x2 VALUES(3,'three'); + INSERT INTO x2 VALUES(4,'four'); + SELECT x2.c AS c, e, b FROM x2 LEFT JOIN + (SELECT b, max(c)+0 AS c FROM x1 GROUP BY b) + USING(c); + } + } {1 one cat 2 two {} 3 three {} 4 four dog} + do_test misc3-5.2 { + execsql { + SELECT * FROM ( + SELECT x2.c AS c, e, b FROM x2 LEFT JOIN + (SELECT b, max(c)+0 AS c FROM x1 GROUP BY b) + USING(c) + ); + } + } {1 one cat 2 two {} 3 three {} 4 four dog} +} + +ifcapable {explain} { + # Ticket #626: make sure EXPLAIN prevents BEGIN and COMMIT from working. + # + do_test misc3-6.1 { + execsql {EXPLAIN BEGIN} + catchsql {BEGIN} + } {0 {}} + do_test misc3-6.2 { + execsql {EXPLAIN COMMIT} + catchsql {COMMIT} + } {0 {}} + do_test misc3-6.3 { + execsql {BEGIN; EXPLAIN ROLLBACK} + catchsql {ROLLBACK} + } {0 {}} +} + +ifcapable {trigger} { +# Ticket #640: vdbe stack overflow with a LIMIT clause on a SELECT inside +# of a trigger. +# +do_test misc3-7.1 { + execsql { + BEGIN; + CREATE TABLE y1(a); + CREATE TABLE y2(b); + CREATE TABLE y3(c); + CREATE TRIGGER r1 AFTER DELETE ON y1 FOR EACH ROW BEGIN + INSERT INTO y3(c) SELECT b FROM y2 ORDER BY b LIMIT 1; + END; + INSERT INTO y1 VALUES(1); + INSERT INTO y1 VALUES(2); + INSERT INTO y1 SELECT a+2 FROM y1; + INSERT INTO y1 SELECT a+4 FROM y1; + INSERT INTO y1 SELECT a+8 FROM y1; + INSERT INTO y1 SELECT a+16 FROM y1; + INSERT INTO y2 SELECT a FROM y1; + COMMIT; + SELECT count(*) FROM y1; + } +} 32 +do_test misc3-7.2 { + execsql { + DELETE FROM y1; + SELECT count(*) FROM y1; + } +} 0 +do_test misc3-7.3 { + execsql { + SELECT count(*) FROM y3; + } +} 32 +} ;# endif trigger + +# Ticket #668: VDBE stack overflow occurs when the left-hand side +# of an IN expression is NULL and the result is used as an integer, not +# as a jump. +# +ifcapable subquery { + do_test misc-8.1 { + execsql { + SELECT count(CASE WHEN b IN ('abc','xyz') THEN 'x' END) FROM t3 + } + } {2} + do_test misc-8.2 { + execsql { + SELECT count(*) FROM t3 WHERE 1+(b IN ('abc','xyz'))==2 + } + } {2} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/misc4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/misc4.test new file mode 100644 index 0000000..743d365 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/misc4.test @@ -0,0 +1,197 @@ +# 2004 Jun 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for miscellanous features that were +# left out of other test files. +# +# $Id: misc4.test,v 1.22 2007/08/13 15:28:35 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Prepare a statement that will create a temporary table. Then do +# a rollback. Then try to execute the prepared statement. +# +do_test misc4-1.1 { + set DB [sqlite3_connection_pointer db] + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + } +} {} + +ifcapable tempdb { + do_test misc4-1.2 { + set sql {CREATE TEMP TABLE t2 AS SELECT * FROM t1} + set stmt [sqlite3_prepare $DB $sql -1 TAIL] + execsql { + BEGIN; + CREATE TABLE t3(a,b,c); + INSERT INTO t1 SELECT * FROM t1; + ROLLBACK; + } + } {} + + # Because the previous transaction included a DDL statement and + # was rolled back, statement $stmt was marked as expired. Executing it + # now returns SQLITE_SCHEMA. + do_test misc4-1.2.1 { + list [sqlite3_step $stmt] [sqlite3_finalize $stmt] + } {SQLITE_ERROR SQLITE_SCHEMA} + do_test misc4-1.2.2 { + set stmt [sqlite3_prepare $DB $sql -1 TAIL] + set TAIL + } {} + + do_test misc4-1.3 { + sqlite3_step $stmt + } SQLITE_DONE + do_test misc4-1.4 { + execsql { + SELECT * FROM temp.t2; + } + } {1} + + # Drop the temporary table, then rerun the prepared statement to + # recreate it again. This recreates ticket #807. + # + do_test misc4-1.5 { + execsql {DROP TABLE t2} + sqlite3_reset $stmt + sqlite3_step $stmt + } {SQLITE_ERROR} + do_test misc4-1.6 { + sqlite3_finalize $stmt + } {SQLITE_SCHEMA} +} + +# Prepare but do not execute various CREATE statements. Then before +# those statements are executed, try to use the tables, indices, views, +# are triggers that were created. +# +do_test misc4-2.1 { + set stmt [sqlite3_prepare $DB {CREATE TABLE t3(x);} -1 TAIL] + catchsql { + INSERT INTO t3 VALUES(1); + } +} {1 {no such table: t3}} +do_test misc4-2.2 { + sqlite3_step $stmt +} SQLITE_DONE +do_test misc4-2.3 { + sqlite3_finalize $stmt +} SQLITE_OK +do_test misc4-2.4 { + catchsql { + INSERT INTO t3 VALUES(1); + } +} {0 {}} + +# Ticket #966 +# +ifcapable compound { +do_test misc4-3.1 { + execsql { + CREATE TABLE Table1(ID integer primary key, Value TEXT); + INSERT INTO Table1 VALUES(1, 'x'); + CREATE TABLE Table2(ID integer NOT NULL, Value TEXT); + INSERT INTO Table2 VALUES(1, 'z'); + INSERT INTO Table2 VALUES (1, 'a'); + SELECT ID, Value FROM Table1 + UNION SELECT ID, max(Value) FROM Table2 GROUP BY 1 + ORDER BY 1, 2; + } +} {1 x 1 z} +do_test misc4-3.2 { + catchsql { + SELECT ID, Value FROM Table1 + UNION SELECT ID, max(Value) FROM Table2 GROUP BY 1, 2 + ORDER BY 1, 2; + } +} {1 {aggregate functions are not allowed in the GROUP BY clause}} +} ;# ifcapable compound + +# Ticket #1047. Make sure column types are preserved in subqueries. +# +ifcapable subquery { + do_test misc4-4.1 { + execsql { + create table a(key varchar, data varchar); + create table b(key varchar, period integer); + insert into a values('01','data01'); + insert into a values('+1','data+1'); + + insert into b values ('01',1); + insert into b values ('01',2); + insert into b values ('+1',3); + insert into b values ('+1',4); + + select a.*, x.* + from a, (select key,sum(period) from b group by key) as x + where a.key=x.key; + } + } {01 data01 01 3 +1 data+1 +1 7} + + # This test case tests the same property as misc4-4.1, but it is + # a bit smaller which makes it easier to work with while debugging. + do_test misc4-4.2 { + execsql { + CREATE TABLE ab(a TEXT, b TEXT); + INSERT INTO ab VALUES('01', '1'); + } + execsql { + select * from ab, (select b from ab) as x where x.b = ab.a; + } + } {} +} + + +# Ticket #1036. When creating tables from a SELECT on a view, use the +# short names of columns. +# +ifcapable view { + do_test misc4-5.1 { + execsql { + create table t4(a,b); + create table t5(a,c); + insert into t4 values (1,2); + insert into t5 values (1,3); + create view myview as select t4.a a from t4 inner join t5 on t4.a=t5.a; + create table problem as select * from myview; + } + execsql2 { + select * FROM problem; + } + } {a 1} + do_test misc4-5.2 { + execsql2 { + create table t6 as select * from t4, t5; + select * from t6; + } + } {a 1 b 2 a:1 1 c 3} +} + +# Ticket #1086 +do_test misc4-6.1 { + execsql { + CREATE TABLE abc(a); + INSERT INTO abc VALUES(1); + CREATE TABLE def(d, e, f, PRIMARY KEY(d, e)); + } +} {} +do_test misc4-6.2 { + execsql { + SELECT a FROM abc LEFT JOIN def ON (abc.a=def.d); + } +} {1} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/misc5.test b/libraries/sqlite/unix/sqlite-3.5.1/test/misc5.test new file mode 100644 index 0000000..86963b2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/misc5.test @@ -0,0 +1,620 @@ +# 2005 Mar 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for miscellanous features that were +# left out of other test files. +# +# $Id: misc5.test,v 1.17 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Build records using the MakeRecord opcode such that the size of the +# header is at the transition point in the size of a varint. +# +# This test causes an assertion failure or a buffer overrun in version +# 3.1.5 and earlier. +# +for {set i 120} {$i<140} {incr i} { + do_test misc5-1.$i { + catchsql {DROP TABLE t1} + set sql1 {CREATE TABLE t1} + set sql2 {INSERT INTO t1 VALUES} + set sep ( + for {set j 0} {$j<$i} {incr j} { + append sql1 ${sep}a$j + append sql2 ${sep}$j + set sep , + } + append sql1 {);} + append sql2 {);} + execsql $sql1$sql2 + } {} +} + +# Make sure large integers are stored correctly. +# +ifcapable conflict { + do_test misc5-2.1 { + execsql { + create table t2(x unique); + insert into t2 values(1); + insert or ignore into t2 select x*2 from t2; + insert or ignore into t2 select x*4 from t2; + insert or ignore into t2 select x*16 from t2; + insert or ignore into t2 select x*256 from t2; + insert or ignore into t2 select x*65536 from t2; + insert or ignore into t2 select x*2147483648 from t2; + insert or ignore into t2 select x-1 from t2; + insert or ignore into t2 select x+1 from t2; + insert or ignore into t2 select -x from t2; + select count(*) from t2; + } + } 371 +} else { + do_test misc5-2.1 { + execsql { + BEGIN; + create table t2(x unique); + create table t2_temp(x); + insert into t2_temp values(1); + insert into t2_temp select x*2 from t2_temp; + insert into t2_temp select x*4 from t2_temp; + insert into t2_temp select x*16 from t2_temp; + insert into t2_temp select x*256 from t2_temp; + insert into t2_temp select x*65536 from t2_temp; + insert into t2_temp select x*2147483648 from t2_temp; + insert into t2_temp select x-1 from t2_temp; + insert into t2_temp select x+1 from t2_temp; + insert into t2_temp select -x from t2_temp; + INSERT INTO t2 SELECT DISTINCT(x) FROM t2_temp; + DROP TABLE t2_temp; + COMMIT; + select count(*) from t2; + } + } 371 +} +do_test misc5-2.2 { + execsql { + select x from t2 order by x; + } +} \ +"-4611686018427387905\ +-4611686018427387904\ +-4611686018427387903\ +-2305843009213693953\ +-2305843009213693952\ +-2305843009213693951\ +-1152921504606846977\ +-1152921504606846976\ +-1152921504606846975\ +-576460752303423489\ +-576460752303423488\ +-576460752303423487\ +-288230376151711745\ +-288230376151711744\ +-288230376151711743\ +-144115188075855873\ +-144115188075855872\ +-144115188075855871\ +-72057594037927937\ +-72057594037927936\ +-72057594037927935\ +-36028797018963969\ +-36028797018963968\ +-36028797018963967\ +-18014398509481985\ +-18014398509481984\ +-18014398509481983\ +-9007199254740993\ +-9007199254740992\ +-9007199254740991\ +-4503599627370497\ +-4503599627370496\ +-4503599627370495\ +-2251799813685249\ +-2251799813685248\ +-2251799813685247\ +-1125899906842625\ +-1125899906842624\ +-1125899906842623\ +-562949953421313\ +-562949953421312\ +-562949953421311\ +-281474976710657\ +-281474976710656\ +-281474976710655\ +-140737488355329\ +-140737488355328\ +-140737488355327\ +-70368744177665\ +-70368744177664\ +-70368744177663\ +-35184372088833\ +-35184372088832\ +-35184372088831\ +-17592186044417\ +-17592186044416\ +-17592186044415\ +-8796093022209\ +-8796093022208\ +-8796093022207\ +-4398046511105\ +-4398046511104\ +-4398046511103\ +-2199023255553\ +-2199023255552\ +-2199023255551\ +-1099511627777\ +-1099511627776\ +-1099511627775\ +-549755813889\ +-549755813888\ +-549755813887\ +-274877906945\ +-274877906944\ +-274877906943\ +-137438953473\ +-137438953472\ +-137438953471\ +-68719476737\ +-68719476736\ +-68719476735\ +-34359738369\ +-34359738368\ +-34359738367\ +-17179869185\ +-17179869184\ +-17179869183\ +-8589934593\ +-8589934592\ +-8589934591\ +-4294967297\ +-4294967296\ +-4294967295\ +-2147483649\ +-2147483648\ +-2147483647\ +-1073741825\ +-1073741824\ +-1073741823\ +-536870913\ +-536870912\ +-536870911\ +-268435457\ +-268435456\ +-268435455\ +-134217729\ +-134217728\ +-134217727\ +-67108865\ +-67108864\ +-67108863\ +-33554433\ +-33554432\ +-33554431\ +-16777217\ +-16777216\ +-16777215\ +-8388609\ +-8388608\ +-8388607\ +-4194305\ +-4194304\ +-4194303\ +-2097153\ +-2097152\ +-2097151\ +-1048577\ +-1048576\ +-1048575\ +-524289\ +-524288\ +-524287\ +-262145\ +-262144\ +-262143\ +-131073\ +-131072\ +-131071\ +-65537\ +-65536\ +-65535\ +-32769\ +-32768\ +-32767\ +-16385\ +-16384\ +-16383\ +-8193\ +-8192\ +-8191\ +-4097\ +-4096\ +-4095\ +-2049\ +-2048\ +-2047\ +-1025\ +-1024\ +-1023\ +-513\ +-512\ +-511\ +-257\ +-256\ +-255\ +-129\ +-128\ +-127\ +-65\ +-64\ +-63\ +-33\ +-32\ +-31\ +-17\ +-16\ +-15\ +-9\ +-8\ +-7\ +-5\ +-4\ +-3\ +-2\ +-1\ +0\ +1\ +2\ +3\ +4\ +5\ +7\ +8\ +9\ +15\ +16\ +17\ +31\ +32\ +33\ +63\ +64\ +65\ +127\ +128\ +129\ +255\ +256\ +257\ +511\ +512\ +513\ +1023\ +1024\ +1025\ +2047\ +2048\ +2049\ +4095\ +4096\ +4097\ +8191\ +8192\ +8193\ +16383\ +16384\ +16385\ +32767\ +32768\ +32769\ +65535\ +65536\ +65537\ +131071\ +131072\ +131073\ +262143\ +262144\ +262145\ +524287\ +524288\ +524289\ +1048575\ +1048576\ +1048577\ +2097151\ +2097152\ +2097153\ +4194303\ +4194304\ +4194305\ +8388607\ +8388608\ +8388609\ +16777215\ +16777216\ +16777217\ +33554431\ +33554432\ +33554433\ +67108863\ +67108864\ +67108865\ +134217727\ +134217728\ +134217729\ +268435455\ +268435456\ +268435457\ +536870911\ +536870912\ +536870913\ +1073741823\ +1073741824\ +1073741825\ +2147483647\ +2147483648\ +2147483649\ +4294967295\ +4294967296\ +4294967297\ +8589934591\ +8589934592\ +8589934593\ +17179869183\ +17179869184\ +17179869185\ +34359738367\ +34359738368\ +34359738369\ +68719476735\ +68719476736\ +68719476737\ +137438953471\ +137438953472\ +137438953473\ +274877906943\ +274877906944\ +274877906945\ +549755813887\ +549755813888\ +549755813889\ +1099511627775\ +1099511627776\ +1099511627777\ +2199023255551\ +2199023255552\ +2199023255553\ +4398046511103\ +4398046511104\ +4398046511105\ +8796093022207\ +8796093022208\ +8796093022209\ +17592186044415\ +17592186044416\ +17592186044417\ +35184372088831\ +35184372088832\ +35184372088833\ +70368744177663\ +70368744177664\ +70368744177665\ +140737488355327\ +140737488355328\ +140737488355329\ +281474976710655\ +281474976710656\ +281474976710657\ +562949953421311\ +562949953421312\ +562949953421313\ +1125899906842623\ +1125899906842624\ +1125899906842625\ +2251799813685247\ +2251799813685248\ +2251799813685249\ +4503599627370495\ +4503599627370496\ +4503599627370497\ +9007199254740991\ +9007199254740992\ +9007199254740993\ +18014398509481983\ +18014398509481984\ +18014398509481985\ +36028797018963967\ +36028797018963968\ +36028797018963969\ +72057594037927935\ +72057594037927936\ +72057594037927937\ +144115188075855871\ +144115188075855872\ +144115188075855873\ +288230376151711743\ +288230376151711744\ +288230376151711745\ +576460752303423487\ +576460752303423488\ +576460752303423489\ +1152921504606846975\ +1152921504606846976\ +1152921504606846977\ +2305843009213693951\ +2305843009213693952\ +2305843009213693953\ +4611686018427387903\ +4611686018427387904\ +4611686018427387905" + +# Ticket #1210. Do proper reference counting of Table structures +# so that deeply nested SELECT statements can be flattened correctly. +# +ifcapable subquery { + do_test misc5-3.1 { + execsql { + CREATE TABLE songs(songid, artist, timesplayed); + INSERT INTO songs VALUES(1,'one',1); + INSERT INTO songs VALUES(2,'one',2); + INSERT INTO songs VALUES(3,'two',3); + INSERT INTO songs VALUES(4,'three',5); + INSERT INTO songs VALUES(5,'one',7); + INSERT INTO songs VALUES(6,'two',11); + SELECT DISTINCT artist + FROM ( + SELECT DISTINCT artist + FROM songs + WHERE songid IN ( + SELECT songid + FROM songs + WHERE LOWER(artist) = ( + SELECT DISTINCT LOWER(artist) + FROM ( + SELECT DISTINCT artist,sum(timesplayed) AS total + FROM songs + GROUP BY LOWER(artist) + ORDER BY total DESC + LIMIT 10 + ) + WHERE artist <> '' + ) + ) + ) + ORDER BY LOWER(artist) ASC; + } + } {two} +} + +# Ticket #1370. Do not overwrite small files (less than 1024 bytes) +# when trying to open them as a database. +# +do_test misc5-4.1 { + db close + file delete -force test.db + set fd [open test.db w] + puts $fd "This is not really a database" + close $fd + sqlite3 db test.db + catchsql { + CREATE TABLE t1(a,b,c); + } +} {1 {file is encrypted or is not a database}} + +# Ticket #1371. Allow floating point numbers of the form .N or N. +# +do_test misc5-5.1 { + execsql {SELECT .1 } +} 0.1 +do_test misc5-5.2 { + execsql {SELECT 2. } +} 2.0 +do_test misc5-5.3 { + execsql {SELECT 3.e0 } +} 3.0 +do_test misc5-5.4 { + execsql {SELECT .4e+1} +} 4.0 + +# Ticket #1582. Ensure that an unknown table in a LIMIT clause applied to +# a UNION ALL query causes an error, not a crash. +# +db close +file delete -force test.db +sqlite3 db test.db +ifcapable subquery&&compound { + do_test misc5-6.1 { + catchsql { + SELECT * FROM sqlite_master + UNION ALL + SELECT * FROM sqlite_master + LIMIT (SELECT count(*) FROM blah); + } + } {1 {no such table: blah}} + do_test misc5-6.2 { + execsql { + CREATE TABLE logs(msg TEXT, timestamp INTEGER, dbtime TEXT); + } + catchsql { + SELECT * FROM logs WHERE logs.id >= (SELECT head FROM logs_base) + UNION ALL + SELECT * FROM logs + LIMIT (SELECT lmt FROM logs_base) ; + } + } {1 {no such column: logs.id}} +} + +# Overflow the lemon parser stack by providing an overly complex +# expression. Make sure that the overflow is detected and reported. +# +do_test misc5-7.1 { + execsql {CREATE TABLE t1(x)} + set sql "INSERT INTO t1 VALUES(" + set tail "" + for {set i 0} {$i<200} {incr i} { + append sql "(1+" + append tail ")" + } + append sql 2$tail + catchsql $sql +} {1 {parser stack overflow}} + +# Check the MISUSE return from sqlitee3_busy_timeout +# +do_test misc5-8.1-misuse { + set DB [sqlite3_connection_pointer db] + db close + sqlite3_busy_timeout $DB 1000 +} SQLITE_MISUSE +sqlite3 db test.db + +# Ticket #1911 +# +ifcapable compound { + do_test misc5-9.1 { + execsql { + SELECT name, type FROM sqlite_master WHERE name IS NULL + UNION + SELECT type, name FROM sqlite_master WHERE type IS NULL + ORDER BY 1, 2, 1, 2, 1, 2 + } + } {} + do_test misc5-9.2 { + execsql { + SELECT name, type FROM sqlite_master WHERE name IS NULL + UNION + SELECT type, name FROM sqlite_master WHERE type IS NULL + ORDER BY 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2 + } + } {} +} + +# Ticket #1912. Make the tokenizer require a space after a numeric +# literal. +# +do_test misc5-10.1 { + catchsql { + SELECT 123abc + } +} {1 {unrecognized token: "123abc"}} +do_test misc5-10.2 { + catchsql { + SELECT 1*123.4e5ghi; + } +} {1 {unrecognized token: "123.4e5ghi"}} + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/misc6.test b/libraries/sqlite/unix/sqlite-3.5.1/test/misc6.test new file mode 100644 index 0000000..9840dd9 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/misc6.test @@ -0,0 +1,48 @@ +# 2006 September 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure sqlite3_value_text() +# always returns a null-terminated string. +# +# $Id: misc6.test,v 1.3 2007/04/23 23:56:32 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test misc6-1.1 { + set DB [sqlite3_connection_pointer db] + sqlite3_create_function $DB + set STMT [sqlite3_prepare $DB {SELECT hex8(?)} -1 DUMMY] + set sqlite_static_bind_value {0123456789} + set sqlite_static_bind_nbyte 5 + sqlite_bind $STMT 1 {} static-nbytes + sqlite3_step $STMT +} SQLITE_ROW +do_test misc6-1.2 { + sqlite3_column_text $STMT 0 +} {3031323334} +ifcapable utf16 { + do_test misc6-1.3 { + sqlite3_finalize $STMT + set STMT [sqlite3_prepare $DB {SELECT hex16(?)} -1 DUMMY] + set sqlite_static_bind_value {0123456789} + set sqlite_static_bind_nbyte 5 + sqlite_bind $STMT 1 {} static-nbytes + sqlite3_step $STMT + } SQLITE_ROW + do_test misc6-1.4 { + sqlite3_column_text $STMT 0 + } {00300031003200330034} +} +sqlite3_finalize $STMT + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/misc7.test b/libraries/sqlite/unix/sqlite-3.5.1/test/misc7.test new file mode 100644 index 0000000..ea856f2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/misc7.test @@ -0,0 +1,439 @@ +# 2006 September 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: misc7.test,v 1.15 2007/08/22 20:18:22 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +#do_test misc7-1 { +# c_misuse_test +#} {} + +do_test misc7-2 { + c_realloc_test +} {} + +do_test misc7-3 { + c_collation_test +} {} + +# Try to open a directory: +# +do_test misc7-4 { + file delete mydir + file mkdir mydir + set rc [catch { + sqlite3 db2 ./mydir + } msg] + list $rc $msg +} {1 {unable to open database file}} + +# Try to open a file with a directory where it's journal file should be. +# +do_test misc7-5 { + file delete mydir + file mkdir mydir-journal + sqlite3 db2 ./mydir + catchsql { + CREATE TABLE abc(a, b, c); + } db2 +} {1 {unable to open database file}} +db2 close + +#-------------------------------------------------------------------- +# The following tests, misc7-6.* test the libraries behaviour when +# it cannot open a file. To force this condition, we use up all the +# file-descriptors before running sqlite. This probably only works +# on unix. +# + +proc use_up_files {} { + set ret [list] + catch { + while 1 { lappend ret [open test.db] } + } + return $ret +} + +proc do_fileopen_test {prefix sql} { + set fd_list [use_up_files] + set ::go 1 + set ::n 1 + set ::sql $sql + while {$::go} { + catch {db close} + do_test ${prefix}.${::n} { + set rc [catch { + sqlite db test.db + db eval $::sql + } msg] + if {$rc == 0} {set ::go 0} + + expr {$rc == 0 || ($rc == 1 && [string first unable $msg]==0)} + } 1 + + close [lindex $fd_list 0] + set fd_list [lrange $fd_list 1 end] + incr ::n + } + foreach fd $fd_list { + close $fd + } + db close +} + +execsql { CREATE TABLE abc(a PRIMARY KEY, b, c); } +db close + +if {$tcl_platform(platform)!="windows"} { + do_fileopen_test misc7-6.1 { + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(2, 3, 4); + INSERT INTO abc SELECT a+2, b, c FROM abc; + COMMIT; + } + + do_fileopen_test misc7-6.2 { + PRAGMA temp.cache_size = 1000; + } +} + +# +# End of tests for out-of-file-descriptors condition. +#-------------------------------------------------------------------- + +sqlite3 db test.db +execsql { + DELETE FROM abc; + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(2, 3, 4); + INSERT INTO abc SELECT a+2, b, c FROM abc; +} + + +#-------------------------------------------------------------------- +# Test that the sqlite3_busy_timeout call seems to delay approximately +# the right amount of time. +# +do_test misc7-7.0 { + sqlite3 db2 test.db + sqlite3_busy_timeout [sqlite3_connection_pointer db] 2000 + execsql { + BEGIN EXCLUSIVE; + } db2 + + # Now db2 has an exclusive lock on the database file, and db has + # a busy-timeout of 2000 milliseconds. So check that trying to + # access the database using connection db delays for at least 1500 ms. + # + set tm [time { + set result [catchsql { + SELECT * FROM sqlite_master; + } db] + }] + set delay [lindex $tm 0] ;# In microseconds + lappend result [expr {$delay>1500000 && $delay<4000000}] +} {1 {database is locked} 1} +db2 close + +#-------------------------------------------------------------------- +# Test that nothing goes horribly wrong when attaching a database +# after the omit_readlock pragma has been exercised. +# +do_test misc7-7.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + PRAGMA omit_readlock = 1; + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.hello(world); + SELECT name FROM aux.sqlite_master; + } +} {hello} +do_test misc7-7.2 { + execsql { + DETACH aux; + } +} {} + +# Test the UTF-16 version of the "out of memory" message (used when +# malloc fails during sqlite3_open() ). +# +ifcapable utf16 { + do_test misc7-8 { + encoding convertfrom unicode [sqlite3_errmsg16 0x00000000] + } {out of memory} +} + +do_test misc7-9 { + execsql { + SELECT * + FROM (SELECT name+1 AS one FROM sqlite_master LIMIT 1 OFFSET 1) + WHERE one LIKE 'hello%'; + } +} {} + +#-------------------------------------------------------------------- +# Improve coverage for vtab code. +# +ifcapable vtab { + # Run some debug code to improve reported coverage + # + + # set sqlite_where_trace 1 + do_test misc7-10 { + register_echo_module [sqlite3_connection_pointer db] + execsql { + CREATE VIRTUAL TABLE t1 USING echo(abc); + SELECT a FROM t1 WHERE a = 1 ORDER BY b; + } + } {1} + set sqlite_where_trace 0 + + # Specify an ORDER BY clause that cannot be indexed. + do_test misc7-11 { + execsql { + SELECT t1.a, t2.a FROM t1, t1 AS t2 ORDER BY 2 LIMIT 1; + } + } {1 1} + + # The whole point of this is to test an error code other than + # SQLITE_NOMEM from the vtab xBestIndex callback. + # + do_ioerr_test misc7-12 -tclprep { + sqlite3 db2 test.db + register_echo_module [sqlite3_connection_pointer db2] + db2 eval { + CREATE TABLE abc(a PRIMARY KEY, b, c); + INSERT INTO abc VALUES(1, 2, 3); + CREATE VIRTUAL TABLE t1 USING echo(abc); + } + db2 close + } -tclbody { + register_echo_module [sqlite3_connection_pointer db] + execsql {SELECT * FROM t1 WHERE a = 1;} + } + + # The case where the virtual table module returns a very large number + # as the cost of a scan (greater than SQLITE_BIG_DOUBLE in the code). + # + do_test misc7-13 { + sqlite3 db test.db + register_echo_module [sqlite3_connection_pointer db] + set ::echo_module_cost 2.0e+99 + execsql {SELECT * FROM t1 WHERE a = 1;} + } {1 2 3} + unset ::echo_module_cost +} + +db close +file delete -force test.db +file delete -force test.db-journal +sqlite3 db test.db + +ifcapable explain { + do_test misc7-14 { + execsql { + CREATE TABLE abc(a PRIMARY KEY, b, c); + } + execsql { + EXPLAIN QUERY PLAN SELECT * FROM abc AS t2 WHERE rowid = 1; + } + } {0 0 {TABLE abc AS t2 USING PRIMARY KEY}} + do_test misc7-15 { + execsql { + EXPLAIN QUERY PLAN SELECT * FROM abc AS t2 WHERE a = 1; + } + } {0 0 {TABLE abc AS t2 WITH INDEX sqlite_autoindex_abc_1}} +} + +db close +file delete -force test.db +file delete -force test.db-journal +sqlite3 db test.db + +#-------------------------------------------------------------------- +# This is all to force the pager_remove_from_stmt_list() function +# (inside pager.c) to remove a pager from the middle of the +# statement-list. +# +do_test misc7-15.1 { + execsql { + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE abc(a PRIMARY KEY, b, c); + INSERT INTO abc + VALUES(randstr(100,100), randstr(100,100), randstr(100,100)); + INSERT INTO abc SELECT + randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc SELECT + randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc SELECT + randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc SELECT + randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc SELECT + randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc SELECT + randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc SELECT + randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc SELECT + randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + COMMIT; + } + expr {[file size test.db]>10240} +} {1} +do_test misc7-15.2 { + execsql { + DELETE FROM abc WHERE rowid > 12; + INSERT INTO abc SELECT + randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + } +} {} + +db close +file delete -force test.db +file delete -force test.db-journal +sqlite3 db test.db + +do_ioerr_test misc7-16 -sqlprep { + PRAGMA cache_size = 10; + PRAGMA default_cache_size = 10; + CREATE TABLE t3(a, b, UNIQUE(a, b)); + INSERT INTO t3 VALUES( randstr(100, 100), randstr(100, 100) ); + INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; + INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; + INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; + INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; + INSERT INTO t3 SELECT randstr(100, 100), randstr(100, 100) FROM t3; + UPDATE t3 + SET b = 'hello world' + WHERE rowid >= (SELECT max(rowid)-1 FROM t3); +} -tclbody { + set rc [catch {db eval { + BEGIN; + PRAGMA cache_size = 10; + INSERT INTO t3 VALUES( randstr(100, 100), randstr(100, 100) ); + UPDATE t3 SET a = b; + COMMIT; + }} msg] + + if {!$rc || ($rc && [string first "columns" $msg]==0)} { + set msg + } else { + error $msg + } +} + +sqlite3 db test.db + +do_test misc7-16.X { + execsql { + SELECT count(*) FROM t3; + } +} {32} + +set sqlite_pager_n_sort_bucket 4 +do_test misc7-17 { + execsql { + PRAGMA integrity_check; + VACUUM; + PRAGMA integrity_check; + } +} {ok ok} +set sqlite_pager_n_sort_bucket 0 + +#---------------------------------------------------------------------- +# Test the situation where a hot-journal is discovered but write-access +# to it is denied. This should return SQLITE_BUSY. +# +# These tests do not work on windows due to restrictions in the +# windows file system. +# +if {$tcl_platform(platform)!="windows"} { +do_test misc7-17.1 { + execsql { + BEGIN; + DELETE FROM t3 WHERE (oid%3)==0; + } + copy_file test.db bak.db + copy_file test.db-journal bak.db-journal + execsql { + COMMIT; + } + + db close + copy_file bak.db test.db + copy_file bak.db-journal test.db-journal + sqlite3 db test.db + + catch {file attributes test.db-journal -permissions r--------} + catch {file attributes test.db-journal -readonly 1} + catchsql { + SELECT count(*) FROM t3; + } +} {1 {database is locked}} +do_test misc7-17.2 { + catch {file attributes test.db-journal -permissions rw-------} + catch {file attributes test.db-journal -readonly 0} + catchsql { + SELECT count(*) FROM t3; + } +} {0 32} + +set ::pending_byte_page [expr ($::sqlite_pending_byte / 1024) + 1] +do_test misc7-17.3 { + db eval { + pragma writable_schema = true; + UPDATE sqlite_master + SET rootpage = $pending_byte_page + WHERE type = 'table' AND name = 't3'; + } + execsql { + SELECT rootpage FROM sqlite_master WHERE type = 'table' AND name = 't3'; + } +} $::pending_byte_page + +do_test misc7-17.4 { + db close + sqlite3 db test.db + catchsql { + SELECT count(*) FROM t3; + } +} {1 {database disk image is malformed}} +} + +# Ticket #2470 +# +do_test misc7-18.1 { + execsql { + CREATE TABLE table_1 (col_10); + CREATE TABLE table_2 ( + col_1, col_2, col_3, col_4, col_5, + col_6, col_7, col_8, col_9, col_10 + ); + SELECT col_10 + FROM + (SELECT table_1.col_10 AS col_10 FROM table_1), + (SELECT table_1.col_10, table_2.col_9 AS qcol_9 + FROM table_1, table_2 + GROUP BY table_1.col_10, qcol_9); + } +} {} + +db close +file delete -force test.db + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/misuse.test b/libraries/sqlite/unix/sqlite-3.5.1/test/misuse.test new file mode 100644 index 0000000..3734aa0 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/misuse.test @@ -0,0 +1,207 @@ +# 2002 May 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for the SQLITE_MISUSE detection logic. +# This test file leaks memory and file descriptors. +# +# $Id: misuse.test,v 1.11 2006/01/03 00:33:50 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +proc catchsql2 {sql} { + set r [ + catch { + set res [list] + db eval $sql data { + if { $res==[list] } { + foreach f $data(*) {lappend res $f} + } + foreach f $data(*) {lappend res $data($f)} + } + set res + } msg + ] + lappend r $msg +} + + +# Make sure the test logic works +# +do_test misuse-1.1 { + db close + catch {file delete -force test2.db} + catch {file delete -force test2.db-journal} + sqlite3 db test2.db; set ::DB [sqlite3_connection_pointer db] + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + } + catchsql2 { + SELECT * FROM t1 + } +} {0 {a b 1 2}} +do_test misuse-1.2 { + catchsql2 { + SELECT x_coalesce(NULL,a) AS 'xyz' FROM t1 + } +} {1 {no such function: x_coalesce}} +do_test misuse-1.3 { + sqlite3_create_function $::DB + catchsql2 { + SELECT x_coalesce(NULL,a) AS 'xyz' FROM t1 + } +} {0 {xyz 1}} + +# Use the x_sqlite_exec() SQL function to simulate the effect of two +# threads trying to use the same database at the same time. +# +# It used to be prohibited to invoke sqlite_exec() from within a function, +# but that has changed. The following tests used to cause errors but now +# they do not. +# +ifcapable {utf16} { + do_test misuse-1.4 { + catchsql2 { + SELECT x_sqlite_exec('SELECT * FROM t1') AS xyz; + } + } {0 {xyz {1 2}}} +} +do_test misuse-1.5 { + catchsql2 {SELECT * FROM t1} +} {0 {a b 1 2}} +do_test misuse-1.6 { + catchsql { + SELECT * FROM t1 + } +} {0 {1 2}} + +# Attempt to register a new SQL function while an sqlite_exec() is active. +# +do_test misuse-2.1 { + db close + sqlite3 db test2.db; set ::DB [sqlite3_connection_pointer db] + execsql { + SELECT * FROM t1 + } +} {1 2} +do_test misuse-2.2 { + catchsql2 {SELECT * FROM t1} +} {0 {a b 1 2}} + +# We used to disallow creating new function from within an exec(). +# But now this is acceptable. +do_test misuse-2.3 { + set v [catch { + db eval {SELECT * FROM t1} {} { + sqlite3_create_function $::DB + } + } msg] + lappend v $msg +} {0 {}} +do_test misuse-2.4 { + catchsql2 {SELECT * FROM t1} +} {0 {a b 1 2}} +do_test misuse-2.5 { + catchsql { + SELECT * FROM t1 + } +} {0 {1 2}} + +# Attempt to register a new SQL aggregate while an sqlite_exec() is active. +# +do_test misuse-3.1 { + db close + sqlite3 db test2.db; set ::DB [sqlite3_connection_pointer db] + execsql { + SELECT * FROM t1 + } +} {1 2} +do_test misuse-3.2 { + catchsql2 {SELECT * FROM t1} +} {0 {a b 1 2}} + +# We used to disallow creating new function from within an exec(). +# But now this is acceptable. +do_test misuse-3.3 { + set v [catch { + db eval {SELECT * FROM t1} {} { + sqlite3_create_aggregate $::DB + } + } msg] + lappend v $msg +} {0 {}} +do_test misuse-3.4 { + catchsql2 {SELECT * FROM t1} +} {0 {a b 1 2}} +do_test misuse-3.5 { + catchsql { + SELECT * FROM t1 + } +} {0 {1 2}} + +# Attempt to close the database from an sqlite_exec callback. +# +# Update for v3: The db cannot be closed because there are active +# VMs. The sqlite3_close call would return SQLITE_BUSY. +do_test misuse-4.1 { + db close + sqlite3 db test2.db; set ::DB [sqlite3_connection_pointer db] + execsql { + SELECT * FROM t1 + } +} {1 2} +do_test misuse-4.2 { + catchsql2 {SELECT * FROM t1} +} {0 {a b 1 2}} +do_test misuse-4.3 { + set v [catch { + db eval {SELECT * FROM t1} {} { + set r [sqlite3_close $::DB] + } + } msg] + lappend v $msg $r +} {0 {} SQLITE_BUSY} +do_test misuse-4.4 { + # Flush the TCL statement cache here, otherwise the sqlite3_close() will + # fail because there are still un-finalized() VDBEs. + db cache flush + sqlite3_close $::DB + catchsql2 {SELECT * FROM t1} +} {1 {library routine called out of sequence}} +do_test misuse-4.5 { + catchsql { + SELECT * FROM t1 + } +} {1 {library routine called out of sequence}} + +# Attempt to use a database after it has been closed. +# +do_test misuse-5.1 { + db close + sqlite3 db test2.db; set ::DB [sqlite3_connection_pointer db] + execsql { + SELECT * FROM t1 + } +} {1 2} +do_test misuse-5.2 { + catchsql2 {SELECT * FROM t1} +} {0 {a b 1 2}} +do_test misuse-5.3 { + db close + set r [catch { + sqlite3_prepare $::DB {SELECT * FROM t1} -1 TAIL + } msg] + lappend r $msg +} {1 {(21) library routine called out of sequence}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/notnull.test b/libraries/sqlite/unix/sqlite-3.5.1/test/notnull.test new file mode 100644 index 0000000..5af9940 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/notnull.test @@ -0,0 +1,505 @@ +# 2002 January 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for the NOT NULL constraint. +# +# $Id: notnull.test,v 1.4 2006/01/17 09:35:02 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !conflict { + finish_test + return +} + +do_test notnull-1.0 { + execsql { + CREATE TABLE t1 ( + a NOT NULL, + b NOT NULL DEFAULT 5, + c NOT NULL ON CONFLICT REPLACE DEFAULT 6, + d NOT NULL ON CONFLICT IGNORE DEFAULT 7, + e NOT NULL ON CONFLICT ABORT DEFAULT 8 + ); + SELECT * FROM t1; + } +} {} +do_test notnull-1.1 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d,e) VALUES(1,2,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 2 3 4 5}} +do_test notnull-1.2 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(b,c,d,e) VALUES(2,3,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-1.3 { + catchsql { + DELETE FROM t1; + INSERT OR IGNORE INTO t1(b,c,d,e) VALUES(2,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {}} +do_test notnull-1.4 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(b,c,d,e) VALUES(2,3,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-1.5 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(b,c,d,e) VALUES(2,3,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-1.6 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,c,d,e) VALUES(1,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-1.7 { + catchsql { + DELETE FROM t1; + INSERT OR IGNORE INTO t1(a,c,d,e) VALUES(1,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-1.8 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(a,c,d,e) VALUES(1,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-1.9 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(a,c,d,e) VALUES(1,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-1.10 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.b may not be NULL}} +do_test notnull-1.11 { + catchsql { + DELETE FROM t1; + INSERT OR IGNORE INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {}} +do_test notnull-1.12 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-1.13 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 2 6 4 5}} +do_test notnull-1.14 { + catchsql { + DELETE FROM t1; + INSERT OR IGNORE INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5); + SELECT * FROM t1 order by a; + } +} {0 {}} +do_test notnull-1.15 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 2 6 4 5}} +do_test notnull-1.16 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.c may not be NULL}} +do_test notnull-1.17 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(a,b,c,d,e) VALUES(1,2,3,null,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.d may not be NULL}} +do_test notnull-1.18 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(a,b,c,e) VALUES(1,2,3,5); + SELECT * FROM t1 order by a; + } +} {0 {1 2 3 7 5}} +do_test notnull-1.19 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d) VALUES(1,2,3,4); + SELECT * FROM t1 order by a; + } +} {0 {1 2 3 4 8}} +do_test notnull-1.20 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d,e) VALUES(1,2,3,4,null); + SELECT * FROM t1 order by a; + } +} {1 {t1.e may not be NULL}} +do_test notnull-1.21 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(e,d,c,b,a) VALUES(1,2,3,null,5); + SELECT * FROM t1 order by a; + } +} {0 {5 5 3 2 1}} + +do_test notnull-2.1 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET a=null; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-2.2 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR REPLACE t1 SET a=null; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-2.3 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR IGNORE t1 SET a=null; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 2 3 4 5}} +do_test notnull-2.4 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR ABORT t1 SET a=null; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-2.5 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET b=null; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.b may not be NULL}} +do_test notnull-2.6 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR REPLACE t1 SET b=null, d=e, e=d; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 5 3 5 4}} +do_test notnull-2.7 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR IGNORE t1 SET b=null, d=e, e=d; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 2 3 4 5}} +do_test notnull-2.8 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET c=null, d=e, e=d; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 2 6 5 4}} +do_test notnull-2.9 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET d=null, a=b, b=a; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 2 3 4 5}} +do_test notnull-2.10 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET e=null, a=b, b=a; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.e may not be NULL}} + +do_test notnull-3.0 { + execsql { + CREATE INDEX t1a ON t1(a); + CREATE INDEX t1b ON t1(b); + CREATE INDEX t1c ON t1(c); + CREATE INDEX t1d ON t1(d); + CREATE INDEX t1e ON t1(e); + CREATE INDEX t1abc ON t1(a,b,c); + } +} {} +do_test notnull-3.1 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d,e) VALUES(1,2,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 2 3 4 5}} +do_test notnull-3.2 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(b,c,d,e) VALUES(2,3,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-3.3 { + catchsql { + DELETE FROM t1; + INSERT OR IGNORE INTO t1(b,c,d,e) VALUES(2,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {}} +do_test notnull-3.4 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(b,c,d,e) VALUES(2,3,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-3.5 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(b,c,d,e) VALUES(2,3,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-3.6 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,c,d,e) VALUES(1,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-3.7 { + catchsql { + DELETE FROM t1; + INSERT OR IGNORE INTO t1(a,c,d,e) VALUES(1,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-3.8 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(a,c,d,e) VALUES(1,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-3.9 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(a,c,d,e) VALUES(1,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-3.10 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.b may not be NULL}} +do_test notnull-3.11 { + catchsql { + DELETE FROM t1; + INSERT OR IGNORE INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {}} +do_test notnull-3.12 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(a,b,c,d,e) VALUES(1,null,3,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 5 3 4 5}} +do_test notnull-3.13 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 2 6 4 5}} +do_test notnull-3.14 { + catchsql { + DELETE FROM t1; + INSERT OR IGNORE INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5); + SELECT * FROM t1 order by a; + } +} {0 {}} +do_test notnull-3.15 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5); + SELECT * FROM t1 order by a; + } +} {0 {1 2 6 4 5}} +do_test notnull-3.16 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(a,b,c,d,e) VALUES(1,2,null,4,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.c may not be NULL}} +do_test notnull-3.17 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(a,b,c,d,e) VALUES(1,2,3,null,5); + SELECT * FROM t1 order by a; + } +} {1 {t1.d may not be NULL}} +do_test notnull-3.18 { + catchsql { + DELETE FROM t1; + INSERT OR ABORT INTO t1(a,b,c,e) VALUES(1,2,3,5); + SELECT * FROM t1 order by a; + } +} {0 {1 2 3 7 5}} +do_test notnull-3.19 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d) VALUES(1,2,3,4); + SELECT * FROM t1 order by a; + } +} {0 {1 2 3 4 8}} +do_test notnull-3.20 { + catchsql { + DELETE FROM t1; + INSERT INTO t1(a,b,c,d,e) VALUES(1,2,3,4,null); + SELECT * FROM t1 order by a; + } +} {1 {t1.e may not be NULL}} +do_test notnull-3.21 { + catchsql { + DELETE FROM t1; + INSERT OR REPLACE INTO t1(e,d,c,b,a) VALUES(1,2,3,null,5); + SELECT * FROM t1 order by a; + } +} {0 {5 5 3 2 1}} + +do_test notnull-4.1 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET a=null; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-4.2 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR REPLACE t1 SET a=null; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-4.3 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR IGNORE t1 SET a=null; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 2 3 4 5}} +do_test notnull-4.4 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR ABORT t1 SET a=null; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.a may not be NULL}} +do_test notnull-4.5 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET b=null; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.b may not be NULL}} +do_test notnull-4.6 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR REPLACE t1 SET b=null, d=e, e=d; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 5 3 5 4}} +do_test notnull-4.7 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE OR IGNORE t1 SET b=null, d=e, e=d; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 2 3 4 5}} +do_test notnull-4.8 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET c=null, d=e, e=d; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 2 6 5 4}} +do_test notnull-4.9 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET d=null, a=b, b=a; + SELECT * FROM t1 ORDER BY a; + } +} {0 {1 2 3 4 5}} +do_test notnull-4.10 { + catchsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1,2,3,4,5); + UPDATE t1 SET e=null, a=b, b=a; + SELECT * FROM t1 ORDER BY a; + } +} {1 {t1.e may not be NULL}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/null.test b/libraries/sqlite/unix/sqlite-3.5.1/test/null.test new file mode 100644 index 0000000..9e2d601 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/null.test @@ -0,0 +1,252 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for proper treatment of the special +# value NULL. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table and some data to work with. +# +do_test null-1.0 { + execsql { + begin; + create table t1(a,b,c); + insert into t1 values(1,0,0); + insert into t1 values(2,0,1); + insert into t1 values(3,1,0); + insert into t1 values(4,1,1); + insert into t1 values(5,null,0); + insert into t1 values(6,null,1); + insert into t1 values(7,null,null); + commit; + select * from t1; + } +} {1 0 0 2 0 1 3 1 0 4 1 1 5 {} 0 6 {} 1 7 {} {}} + +# Check for how arithmetic expressions handle NULL +# +do_test null-1.1 { + execsql { + select ifnull(a+b,99) from t1; + } +} {1 2 4 5 99 99 99} +do_test null-1.2 { + execsql { + select ifnull(b*c,99) from t1; + } +} {0 0 0 1 99 99 99} + +# Check to see how the CASE expression handles NULL values. The +# first WHEN for which the test expression is TRUE is selected. +# FALSE and UNKNOWN test expressions are skipped. +# +do_test null-2.1 { + execsql { + select ifnull(case when b<>0 then 1 else 0 end, 99) from t1; + } +} {0 0 1 1 0 0 0} +do_test null-2.2 { + execsql { + select ifnull(case when not b<>0 then 1 else 0 end, 99) from t1; + } +} {1 1 0 0 0 0 0} +do_test null-2.3 { + execsql { + select ifnull(case when b<>0 and c<>0 then 1 else 0 end, 99) from t1; + } +} {0 0 0 1 0 0 0} +do_test null-2.4 { + execsql { + select ifnull(case when not (b<>0 and c<>0) then 1 else 0 end, 99) from t1; + } +} {1 1 1 0 1 0 0} +do_test null-2.5 { + execsql { + select ifnull(case when b<>0 or c<>0 then 1 else 0 end, 99) from t1; + } +} {0 1 1 1 0 1 0} +do_test null-2.6 { + execsql { + select ifnull(case when not (b<>0 or c<>0) then 1 else 0 end, 99) from t1; + } +} {1 0 0 0 0 0 0} +do_test null-2.7 { + execsql { + select ifnull(case b when c then 1 else 0 end, 99) from t1; + } +} {1 0 0 1 0 0 0} +do_test null-2.8 { + execsql { + select ifnull(case c when b then 1 else 0 end, 99) from t1; + } +} {1 0 0 1 0 0 0} + +# Check to see that NULL values are ignored in aggregate functions. +# +do_test null-3.1 { + execsql { + select count(*), count(b), count(c), sum(b), sum(c), + avg(b), avg(c), min(b), max(b) from t1; + } +} {7 4 6 2 3 0.5 0.5 0 1} + +# The sum of zero entries is a NULL, but the total of zero entries is 0. +# +do_test null-3.2 { + execsql { + SELECT sum(b), total(b) FROM t1 WHERE b<0 + } +} {{} 0.0} + +# Check to see how WHERE clauses handle NULL values. A NULL value +# is the same as UNKNOWN. The WHERE clause should only select those +# rows that are TRUE. FALSE and UNKNOWN rows are rejected. +# +do_test null-4.1 { + execsql { + select a from t1 where b<10 + } +} {1 2 3 4} +do_test null-4.2 { + execsql { + select a from t1 where not b>10 + } +} {1 2 3 4} +do_test null-4.3 { + execsql { + select a from t1 where b<10 or c=1; + } +} {1 2 3 4 6} +do_test null-4.4 { + execsql { + select a from t1 where b<10 and c=1; + } +} {2 4} +do_test null-4.5 { + execsql { + select a from t1 where not (b<10 and c=1); + } +} {1 3 5} + +# The DISTINCT keyword on a SELECT statement should treat NULL values +# as distinct +# +do_test null-5.1 { + execsql { + select distinct b from t1 order by b; + } +} {{} 0 1} + +# A UNION to two queries should treat NULL values +# as distinct +# +ifcapable compound { +do_test null-6.1 { + execsql { + select b from t1 union select c from t1 order by c; + } +} {{} 0 1} +} ;# ifcapable compound + +# The UNIQUE constraint only applies to non-null values +# +ifcapable conflict { +do_test null-7.1 { + execsql { + create table t2(a, b unique on conflict ignore); + insert into t2 values(1,1); + insert into t2 values(2,null); + insert into t2 values(3,null); + insert into t2 values(4,1); + select a from t2; + } + } {1 2 3} + do_test null-7.2 { + execsql { + create table t3(a, b, c, unique(b,c) on conflict ignore); + insert into t3 values(1,1,1); + insert into t3 values(2,null,1); + insert into t3 values(3,null,1); + insert into t3 values(4,1,1); + select a from t3; + } + } {1 2 3} +} + +# Ticket #461 - Make sure nulls are handled correctly when doing a +# lookup using an index. +# +do_test null-8.1 { + execsql { + CREATE TABLE t4(x,y); + INSERT INTO t4 VALUES(1,11); + INSERT INTO t4 VALUES(2,NULL); + SELECT x FROM t4 WHERE y=NULL; + } +} {} +ifcapable subquery { + do_test null-8.2 { + execsql { + SELECT x FROM t4 WHERE y IN (33,NULL); + } + } {} +} +do_test null-8.3 { + execsql { + SELECT x FROM t4 WHERE y<33 ORDER BY x; + } +} {1} +do_test null-8.4 { + execsql { + SELECT x FROM t4 WHERE y>6 ORDER BY x; + } +} {1} +do_test null-8.5 { + execsql { + SELECT x FROM t4 WHERE y!=33 ORDER BY x; + } +} {1} +do_test null-8.11 { + execsql { + CREATE INDEX t4i1 ON t4(y); + SELECT x FROM t4 WHERE y=NULL; + } +} {} +ifcapable subquery { + do_test null-8.12 { + execsql { + SELECT x FROM t4 WHERE y IN (33,NULL); + } + } {} +} +do_test null-8.13 { + execsql { + SELECT x FROM t4 WHERE y<33 ORDER BY x; + } +} {1} +do_test null-8.14 { + execsql { + SELECT x FROM t4 WHERE y>6 ORDER BY x; + } +} {1} +do_test null-8.15 { + execsql { + SELECT x FROM t4 WHERE y!=33 ORDER BY x; + } +} {1} + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/onefile.test b/libraries/sqlite/unix/sqlite-3.5.1/test/onefile.test new file mode 100644 index 0000000..8c1ba12 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/onefile.test @@ -0,0 +1,61 @@ +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all tests. +# +# $Id: onefile.test,v 1.2 2007/10/03 08:46:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +rename finish_test really_finish_test2 +proc finish_test {} { + catch {db close} + catch {db2 close} + catch {db3 close} +} +set ISQUICK 1 + +set INCLUDE { + conflict.test + insert.test + insert2.test + insert3.test + rollback.test + select1.test + select2.test + select3.test + temptable.test +} +#set INCLUDE insert2.test + +rename sqlite3 really_sqlite3 +proc sqlite3 {args} { + if {[string range [lindex $args 0] 0 0] ne "-"} { + lappend args -vfs fs + } + uplevel [concat really_sqlite3 $args] +} + +rename do_test really_do_test +proc do_test {name args} { + uplevel really_do_test onefile-$name $args +} + +foreach testfile [lsort -dictionary [glob $testdir/*.test]] { + set tail [file tail $testfile] + if {[lsearch -exact $INCLUDE $tail]<0} continue + source $testfile +} + +file delete -force test.db test2.db test3.db test4.db + +really_finish_test2 +rename do_test {} +rename really_do_test do_test +rename finish_test {} +rename really_finish_test2 finish_test +rename sqlite3 {} +rename really_sqlite3 sqlite3 diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/openv2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/openv2.test new file mode 100644 index 0000000..637edc6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/openv2.test @@ -0,0 +1,41 @@ +# 2007 Sep 3 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests on the sqlite3_open_v2() interface. +# +# $Id: openv2.test,v 1.1 2007/09/03 15:19:36 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +db close +file delete -force test.db test.db-journal +do_test openv2-1.1 { + set rc [catch {sqlite3 db test.db -create 0} msg] + lappend rc $msg +} {1 {unable to open database file}} +do_test openv2-1.2 { + info commands db +} {} +do_test openv2-1.3 { + sqlite3 db test.db + db eval {CREATE TABLE t1(x)} + db close + sqlite3 db test.db -readonly 1 + db eval {SELECT name FROM sqlite_master} +} {t1} +do_test openv2-1.4 { + catchsql { + INSERT INTO t1 VALUES(123) + } +} {1 {attempt to write a readonly database}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/pager.test b/libraries/sqlite/unix/sqlite-3.5.1/test/pager.test new file mode 100644 index 0000000..bb92617 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/pager.test @@ -0,0 +1,571 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is page cache subsystem. +# +# $Id: pager.test,v 1.30 2007/08/24 16:29:24 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if {[info commands pager_open]!=""} { +db close + +# Basic sanity check. Open and close a pager. +# +do_test pager-1.0 { + catch {file delete -force ptf1.db} + catch {file delete -force ptf1.db-journal} + set v [catch { + set ::p1 [pager_open ptf1.db 10] + } msg] +} {0} +do_test pager-1.1 { + pager_stats $::p1 +} {ref 0 page 0 max 10 size -1 state 0 err 0 hit 0 miss 0 ovfl 0} +do_test pager-1.2 { + pager_pagecount $::p1 +} {0} +do_test pager-1.3 { + pager_stats $::p1 +} {ref 0 page 0 max 10 size -1 state 0 err 0 hit 0 miss 0 ovfl 0} +do_test pager-1.4 { + pager_close $::p1 +} {} + +# Try to write a few pages. +# +do_test pager-2.1 { + set v [catch { + set ::p1 [pager_open ptf1.db 10] + } msg] +} {0} +#do_test pager-2.2 { +# set v [catch { +# set ::g1 [page_get $::p1 0] +# } msg] +# lappend v $msg +#} {1 SQLITE_ERROR} +do_test pager-2.3.1 { + set ::gx [page_lookup $::p1 1] +} {} +do_test pager-2.3.2 { + pager_stats $::p1 +} {ref 0 page 0 max 10 size -1 state 0 err 0 hit 0 miss 0 ovfl 0} +do_test pager-2.3.3 { + set v [catch { + set ::g1 [page_get $::p1 1] + } msg] + if {$v} {lappend v $msg} + set v +} {0} +do_test pager-2.3.3 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager-2.3.4 { + set ::gx [page_lookup $::p1 1] + expr {$::gx!=""} +} {1} +do_test pager-2.3.5 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager-2.3.6 { + expr {$::g1==$::gx} +} {1} +do_test pager-2.3.7 { + page_unref $::gx + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager-2.4 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager-2.5 { + pager_pagecount $::p1 +} {0} +do_test pager-2.6 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager-2.7 { + page_number $::g1 +} {1} +do_test pager-2.8 { + page_read $::g1 +} {} +do_test pager-2.9 { + page_unref $::g1 +} {} + +# Update 24/03/2007: Even though the ref-count has dropped to zero, the +# pager-cache still contains some pages. Previously, it was always true +# that if there were no references to a pager it was empty. +do_test pager-2.10 { + pager_stats $::p1 +} {ref 0 page 1 max 10 size -1 state 0 err 0 hit 0 miss 1 ovfl 0} +do_test pager-2.11 { + set ::g1 [page_get $::p1 1] + expr {$::g1!=0} +} {1} +do_test pager-2.12 { + page_number $::g1 +} {1} +do_test pager-2.13 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 1 miss 1 ovfl 0} +do_test pager-2.14 { + set v [catch { + page_write $::g1 "Page-One" + } msg] + lappend v $msg +} {0 {}} +do_test pager-2.15 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 1 state 2 err 0 hit 1 miss 1 ovfl 0} +do_test pager-2.16 { + page_read $::g1 +} {Page-One} +do_test pager-2.17 { + set v [catch { + pager_commit $::p1 + } msg] + lappend v $msg +} {0 {}} +do_test pager-2.20 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size -1 state 1 err 0 hit 2 miss 1 ovfl 0} +do_test pager-2.19 { + pager_pagecount $::p1 +} {1} +do_test pager-2.21 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 1 state 1 err 0 hit 2 miss 1 ovfl 0} +do_test pager-2.22 { + page_unref $::g1 +} {} +do_test pager-2.23 { + pager_stats $::p1 +} {ref 0 page 1 max 10 size -1 state 0 err 0 hit 2 miss 1 ovfl 0} +do_test pager-2.24 { + set v [catch { + page_get $::p1 1 + } ::g1] + if {$v} {lappend v $::g1} + set v +} {0} +do_test pager-2.25 { + page_read $::g1 +} {Page-One} +do_test pager-2.26 { + set v [catch { + page_write $::g1 {page-one} + } msg] + lappend v $msg +} {0 {}} +do_test pager-2.27 { + page_read $::g1 +} {page-one} +do_test pager-2.28 { + set v [catch { + pager_rollback $::p1 + } msg] + lappend v $msg +} {0 {}} +do_test pager-2.29 { + page_unref $::g1 + set ::g1 [page_get $::p1 1] + page_read $::g1 +} {Page-One} +do_test pager-2.99 { + pager_close $::p1 +} {} + +do_test pager-3.1 { + set v [catch { + set ::p1 [pager_open ptf1.db 15] + } msg] + if {$v} {lappend v $msg} + set v +} {0} +do_test pager-3.2 { + pager_pagecount $::p1 +} {1} +do_test pager-3.3 { + set v [catch { + set ::g(1) [page_get $::p1 1] + } msg] + if {$v} {lappend v $msg} + set v +} {0} +do_test pager-3.4 { + page_read $::g(1) +} {Page-One} +do_test pager-3.5 { + for {set i 2} {$i<=20} {incr i} { + set gx [page_get $::p1 $i] + page_write $gx "Page-$i" + page_unref $gx + } + pager_commit $::p1 +} {} +for {set i 2} {$i<=20} {incr i} { + do_test pager-3.6.[expr {$i-1}] [subst { + set gx \[page_get $::p1 $i\] + set v \[page_read \$gx\] + page_unref \$gx + set v + }] "Page-$i" +} +for {set i 1} {$i<=20} {incr i} { + regsub -all CNT { + set ::g1 [page_get $::p1 CNT] + set ::g2 [page_get $::p1 CNT] + set ::vx [page_read $::g2] + expr {$::g1==$::g2} + } $i body; + do_test pager-3.7.$i.1 $body {1} + regsub -all CNT { + page_unref $::g2 + set vy [page_read $::g1] + expr {$vy==$::vx} + } $i body; + do_test pager-3.7.$i.2 $body {1} + regsub -all CNT { + page_unref $::g1 + set gx [page_get $::p1 CNT] + set vy [page_read $gx] + page_unref $gx + expr {$vy==$::vx} + } $i body; + do_test pager-3.7.$i.3 $body {1} +} +do_test pager-3.99 { + pager_close $::p1 +} {} + +# tests of the checkpoint mechanism and api +# +do_test pager-4.0 { + set v [catch { + file delete -force ptf1.db + set ::p1 [pager_open ptf1.db 15] + } msg] + if {$v} {lappend v $msg} + set v +} {0} +do_test pager-4.1 { + set g1 [page_get $::p1 1] + page_write $g1 "Page-1 v0" + for {set i 2} {$i<=20} {incr i} { + set gx [page_get $::p1 $i] + page_write $gx "Page-$i v0" + page_unref $gx + } + pager_commit $::p1 +} {} +for {set i 1} {$i<=20} {incr i} { + do_test pager-4.2.$i { + set gx [page_get $p1 $i] + set v [page_read $gx] + page_unref $gx + set v + } "Page-$i v0" +} +do_test pager-4.3 { + lrange [pager_stats $::p1] 0 1 +} {ref 1} +do_test pager-4.4 { + lrange [pager_stats $::p1] 8 9 +} {state 1} + +for {set i 1} {$i<20} {incr i} { + do_test pager-4.5.$i.0 { + set res {} + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + set shouldbe "Page-$j v[expr {$i-1}]" + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager-4.5.$i.1 { + page_write $g1 "Page-1 v$i" + lrange [pager_stats $p1] 8 9 + } {state 2} + do_test pager-4.5.$i.2 { + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + page_write $gx "Page-$j v$i" + page_unref $gx + if {$j==$i} { + pager_stmt_begin $p1 + } + } + } {} + do_test pager-4.5.$i.3 { + set res {} + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + set shouldbe "Page-$j v$i" + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager-4.5.$i.4 { + pager_rollback $p1 + set res {} + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + set shouldbe "Page-$j v[expr {$i-1}]" + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager-4.5.$i.5 { + page_write $g1 "Page-1 v$i" + lrange [pager_stats $p1] 8 9 + } {state 2} + do_test pager-4.5.$i.6 { + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + page_write $gx "Page-$j v$i" + page_unref $gx + if {$j==$i} { + pager_stmt_begin $p1 + } + } + } {} + do_test pager-4.5.$i.7 { + pager_stmt_rollback $p1 + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + if {$j<=$i || $i==1} { + set shouldbe "Page-$j v$i" + } else { + set shouldbe "Page-$j v[expr {$i-1}]" + } + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager-4.5.$i.8 { + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + page_write $gx "Page-$j v$i" + page_unref $gx + if {$j==$i} { + pager_stmt_begin $p1 + } + } + } {} + do_test pager-4.5.$i.9 { + pager_stmt_commit $p1 + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + set shouldbe "Page-$j v$i" + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager-4.5.$i.10 { + pager_commit $p1 + lrange [pager_stats $p1] 8 9 + } {state 1} +} + +# Test that nothing bad happens when sqlite3pager_set_cachesize() is +# called with a negative argument. +do_test pager-4.6.1 { + pager_close [pager_open ptf2.db -15] +} {} + +# Test truncate on an in-memory database is Ok. +ifcapable memorydb { + do_test pager-4.6.2 { + set ::p2 [pager_open :memory: 10] + pager_truncate $::p2 5 + } {} + do_test pager-4.6.3 { + for {set i 1} {$i<5} {incr i} { + set p [page_get $::p2 $i] + page_write $p "Page $i" + pager_commit $::p2 + page_unref $p + } + # pager_truncate $::p2 3 + } {} + do_test pager-4.6.4 { + pager_close $::p2 + } {} +} + +do_test pager-4.99 { + pager_close $::p1 +} {} + + + + file delete -force ptf1.db + +} ;# end if( not mem: and has pager_open command ); + +if 0 { +# Ticket #615: an assertion fault inside the pager. It is a benign +# fault, but we might as well test for it. +# +do_test pager-5.1 { + sqlite3 db test.db + execsql { + BEGIN; + CREATE TABLE t1(x); + PRAGMA synchronous=off; + COMMIT; + } +} {} +} + +# The following tests cover rolling back hot journal files. +# They can't be run on windows because the windows version of +# SQLite holds a mandatory exclusive lock on journal files it has open. +# +if {$tcl_platform(platform)!="windows"} { +do_test pager-6.1 { + file delete -force test2.db + file delete -force test2.db-journal + sqlite3 db2 test2.db + execsql { + PRAGMA synchronous = 0; + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, randstr(200,200)); + INSERT INTO abc VALUES(1, 2, randstr(200,200)); + INSERT INTO abc VALUES(1, 2, randstr(200,200)); + INSERT INTO abc VALUES(1, 2, randstr(200,200)); + INSERT INTO abc VALUES(1, 2, randstr(200,200)); + INSERT INTO abc VALUES(1, 2, randstr(200,200)); + INSERT INTO abc VALUES(1, 2, randstr(200,200)); + INSERT INTO abc VALUES(1, 2, randstr(200,200)); + INSERT INTO abc VALUES(1, 2, randstr(200,200)); + BEGIN; + UPDATE abc SET c = randstr(200,200); + } db2 + copy_file test2.db test.db + copy_file test2.db-journal test.db-journal + + set f [open test.db-journal a] + fconfigure $f -encoding binary + seek $f [expr [file size test.db-journal] - 1032] start + puts -nonewline $f "\00\00\00\00" + close $f + + sqlite3 db test.db + execsql { + SELECT sql FROM sqlite_master + } +} {{CREATE TABLE abc(a, b, c)}} + +do_test pager-6.2 { + copy_file test2.db test.db + copy_file test2.db-journal test.db-journal + + set f [open test.db-journal a] + fconfigure $f -encoding binary + seek $f [expr [file size test.db-journal] - 1032] start + puts -nonewline $f "\00\00\00\FF" + close $f + + sqlite3 db test.db + execsql { + SELECT sql FROM sqlite_master + } +} {{CREATE TABLE abc(a, b, c)}} + +do_test pager-6.3 { + copy_file test2.db test.db + copy_file test2.db-journal test.db-journal + + set f [open test.db-journal a] + fconfigure $f -encoding binary + seek $f [expr [file size test.db-journal] - 4] start + puts -nonewline $f "\00\00\00\00" + close $f + + sqlite3 db test.db + execsql { + SELECT sql FROM sqlite_master + } +} {{CREATE TABLE abc(a, b, c)}} + +do_test pager-6.4.1 { + execsql { + BEGIN; + SELECT sql FROM sqlite_master; + } + copy_file test2.db-journal test.db-journal; + sqlite3 db3 test.db + catchsql { + BEGIN; + SELECT sql FROM sqlite_master; + } db3; +} {1 {database is locked}} +do_test pager-6.4.2 { + file delete -force test.db-journal + catchsql { + SELECT sql FROM sqlite_master; + } db3; +} {0 {{CREATE TABLE abc(a, b, c)}}} +do_test pager-6.4.3 { + db3 close + execsql { + COMMIT; + } +} {} + +do_test pager-6.5 { + copy_file test2.db test.db + copy_file test2.db-journal test.db-journal + + set f [open test.db-journal a] + fconfigure $f -encoding binary + puts -nonewline $f "hello" + puts -nonewline $f "\x00\x00\x00\x05\x01\x02\x03\x04" + puts -nonewline $f "\xd9\xd5\x05\xf9\x20\xa1\x63\xd7" + close $f + + sqlite3 db test.db + execsql { + SELECT sql FROM sqlite_master + } +} {{CREATE TABLE abc(a, b, c)}} + +do_test pager-6.5 { + db2 close +} {} +} +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/pager2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/pager2.test new file mode 100644 index 0000000..52dfe73 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/pager2.test @@ -0,0 +1,408 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is page cache subsystem. +# +# $Id: pager2.test,v 1.6 2007/03/23 18:12:07 danielk1977 Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Don't run this test file if the pager test interface [pager_open] is not +# available, or the library was compiled without in-memory database support. +# +if {[info commands pager_open]!=""} { +ifcapable memorydb { +db close + +# Basic sanity check. Open and close a pager. +# +do_test pager2-1.0 { + set v [catch { + set ::p1 [pager_open :memory: 10] + } msg] +} {0} +do_test pager2-1.1 { + pager_stats $::p1 +} {ref 0 page 0 max 10 size 0 state 0 err 0 hit 0 miss 0 ovfl 0} +do_test pager2-1.2 { + pager_pagecount $::p1 +} {0} +do_test pager2-1.3 { + pager_stats $::p1 +} {ref 0 page 0 max 10 size 0 state 0 err 0 hit 0 miss 0 ovfl 0} +do_test pager2-1.4 { + pager_close $::p1 +} {} + +# Try to write a few pages. +# +do_test pager2-2.1 { + set v [catch { + set ::p1 [pager_open :memory: 10] + } msg] +} {0} +#do_test pager2-2.2 { +# set v [catch { +# set ::g1 [page_get $::p1 0] +# } msg] +# lappend v $msg +#} {1 SQLITE_ERROR} +do_test pager2-2.3.1 { + set ::gx [page_lookup $::p1 1] +} {} +do_test pager2-2.3.2 { + pager_stats $::p1 +} {ref 0 page 0 max 10 size 0 state 0 err 0 hit 0 miss 0 ovfl 0} +do_test pager2-2.3.3 { + set v [catch { + set ::g1 [page_get $::p1 1] + } msg] + if {$v} {lappend v $msg} + set v +} {0} +do_test pager2-2.3.3 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager2-2.3.4 { + set ::gx [page_lookup $::p1 1] + expr {$::gx!=""} +} {1} +do_test pager2-2.3.5 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager2-2.3.6 { + expr {$::g1==$::gx} +} {1} +do_test pager2-2.3.7 { + page_unref $::gx + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager2-2.4 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager2-2.5 { + pager_pagecount $::p1 +} {0} +do_test pager2-2.6 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +do_test pager2-2.7 { + page_number $::g1 +} {1} +do_test pager2-2.8 { + page_read $::g1 +} {} +do_test pager2-2.9 { + page_unref $::g1 +} {} +do_test pager2-2.10 { + pager_stats $::p1 +} {ref 0 page 1 max 10 size 0 state 0 err 0 hit 0 miss 1 ovfl 0} +do_test pager2-2.11 { + set ::g1 [page_get $::p1 1] + expr {$::g1!=0} +} {1} +do_test pager2-2.12 { + page_number $::g1 +} {1} +do_test pager2-2.13 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 1 miss 1 ovfl 0} +do_test pager2-2.14 { + set v [catch { + page_write $::g1 "Page-One" + } msg] + lappend v $msg +} {0 {}} +do_test pager2-2.15 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 1 state 4 err 0 hit 1 miss 1 ovfl 0} +do_test pager2-2.16 { + page_read $::g1 +} {Page-One} +do_test pager2-2.17 { + set v [catch { + pager_commit $::p1 + } msg] + lappend v $msg +} {0 {}} +do_test pager2-2.20 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 1 state 1 err 0 hit 1 miss 1 ovfl 0} +do_test pager2-2.19 { + pager_pagecount $::p1 +} {1} +do_test pager2-2.21 { + pager_stats $::p1 +} {ref 1 page 1 max 10 size 1 state 1 err 0 hit 1 miss 1 ovfl 0} +do_test pager2-2.22 { + page_unref $::g1 +} {} +do_test pager2-2.23 { + pager_stats $::p1 +} {ref 0 page 1 max 10 size 1 state 0 err 0 hit 1 miss 1 ovfl 0} +do_test pager2-2.24 { + set v [catch { + page_get $::p1 1 + } ::g1] + if {$v} {lappend v $::g1} + set v +} {0} +do_test pager2-2.25 { + page_read $::g1 +} {Page-One} +do_test pager2-2.26 { + set v [catch { + page_write $::g1 {page-one} + } msg] + lappend v $msg +} {0 {}} +do_test pager2-2.27 { + page_read $::g1 +} {page-one} +do_test pager2-2.28 { + set v [catch { + pager_rollback $::p1 + } msg] + lappend v $msg +} {0 {}} +do_test pager2-2.29 { + page_unref $::g1 + set ::g1 [page_get $::p1 1] + page_read $::g1 +} {Page-One} +#do_test pager2-2.99 { +# pager_close $::p1 +#} {} + +#do_test pager2-3.1 { +# set v [catch { +# set ::p1 [pager_open :memory: 15] +# } msg] +# if {$v} {lappend v $msg} +# set v +#} {0} +do_test pager2-3.2 { + pager_pagecount $::p1 +} {1} +do_test pager2-3.3 { + set v [catch { + set ::g(1) [page_get $::p1 1] + } msg] + if {$v} {lappend v $msg} + set v +} {0} +do_test pager2-3.4 { + page_read $::g(1) +} {Page-One} +do_test pager2-3.5 { + for {set i 2} {$i<=20} {incr i} { + set gx [page_get $::p1 $i] + page_write $gx "Page-$i" + page_unref $gx + } + pager_commit $::p1 +} {} +for {set i 2} {$i<=20} {incr i} { + do_test pager2-3.6.[expr {$i-1}] [subst { + set gx \[page_get $::p1 $i\] + set v \[page_read \$gx\] + page_unref \$gx + set v + }] "Page-$i" +} +for {set i 1} {$i<=20} {incr i} { + regsub -all CNT { + set ::g1 [page_get $::p1 CNT] + set ::g2 [page_get $::p1 CNT] + set ::vx [page_read $::g2] + expr {$::g1==$::g2} + } $i body; + do_test pager2-3.7.$i.1 $body {1} + regsub -all CNT { + page_unref $::g2 + set vy [page_read $::g1] + expr {$vy==$::vx} + } $i body; + do_test pager2-3.7.$i.2 $body {1} + regsub -all CNT { + page_unref $::g1 + set gx [page_get $::p1 CNT] + set vy [page_read $gx] + page_unref $gx + expr {$vy==$::vx} + } $i body; + do_test pager2-3.7.$i.3 $body {1} +} +do_test pager2-3.99 { + pager_close $::p1 +} {} + +# tests of the checkpoint mechanism and api +# +do_test pager2-4.0 { + set v [catch { + set ::p1 [pager_open :memory: 15] + } msg] + if {$v} {lappend v $msg} + set v +} {0} +do_test pager2-4.1 { + set g1 [page_get $::p1 1] + page_write $g1 "Page-1 v0" + for {set i 2} {$i<=20} {incr i} { + set gx [page_get $::p1 $i] + page_write $gx "Page-$i v0" + page_unref $gx + } + pager_commit $::p1 +} {} +for {set i 1} {$i<=20} {incr i} { + do_test pager2-4.2.$i { + set gx [page_get $p1 $i] + set v [page_read $gx] + page_unref $gx + set v + } "Page-$i v0" +} +do_test pager2-4.3 { + lrange [pager_stats $::p1] 0 1 +} {ref 1} +do_test pager2-4.4 { + lrange [pager_stats $::p1] 8 9 +} {state 1} + +for {set i 1} {$i<20} {incr i} { + do_test pager2-4.5.$i.0 { + set res {} + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + set shouldbe "Page-$j v[expr {$i-1}]" + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager2-4.5.$i.1 { + page_write $g1 "Page-1 v$i" + lrange [pager_stats $p1] 8 9 + } {state 4} + do_test pager2-4.5.$i.2 { + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + page_write $gx "Page-$j v$i" + page_unref $gx + if {$j==$i} { + pager_stmt_begin $p1 + } + } + } {} + do_test pager2-4.5.$i.3 { + set res {} + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + set shouldbe "Page-$j v$i" + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager2-4.5.$i.4 { + pager_rollback $p1 + set res {} + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + set shouldbe "Page-$j v[expr {$i-1}]" + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager2-4.5.$i.5 { + page_write $g1 "Page-1 v$i" + lrange [pager_stats $p1] 8 9 + } {state 4} + do_test pager2-4.5.$i.6 { + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + page_write $gx "Page-$j v$i" + page_unref $gx + if {$j==$i} { + pager_stmt_begin $p1 + } + } + } {} + do_test pager2-4.5.$i.7 { + pager_stmt_rollback $p1 + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + if {$j<=$i || $i==1} { + set shouldbe "Page-$j v$i" + } else { + set shouldbe "Page-$j v[expr {$i-1}]" + } + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager2-4.5.$i.8 { + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + page_write $gx "Page-$j v$i" + page_unref $gx + if {$j==$i} { + pager_stmt_begin $p1 + } + } + } {} + do_test pager2-4.5.$i.9 { + pager_stmt_commit $p1 + for {set j 2} {$j<=20} {incr j} { + set gx [page_get $p1 $j] + set value [page_read $gx] + page_unref $gx + set shouldbe "Page-$j v$i" + if {$value!=$shouldbe} { + lappend res $value $shouldbe + } + } + set res + } {} + do_test pager2-4.5.$i.10 { + pager_commit $p1 + lrange [pager_stats $p1] 8 9 + } {state 1} +} + +do_test pager2-4.99 { + pager_close $::p1 +} {} + +} ;# ifcapable inmemory +} ;# end if( has pager_open command ); + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/pager3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/pager3.test new file mode 100644 index 0000000..59a97c5 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/pager3.test @@ -0,0 +1,73 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is page cache subsystem. +# +# $Id: pager3.test,v 1.3 2005/03/29 03:11:00 danielk1977 Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# This test makes sure the database file is truncated back to the correct +# length on a rollback. +# +# After some preliminary setup, a transaction is start at NOTE (1). +# The create table on the following line allocates an additional page +# at the end of the database file. But that page is not written because +# the database still has a RESERVED lock, not an EXCLUSIVE lock. The +# new page is held in memory and the size of the file is unchanged. +# The insert at NOTE (2) begins adding additional pages. Then it hits +# a constraint error and aborts. The abort causes sqlite3OsTruncate() +# to be called to restore the file to the same length as it was after +# the create table. But the create table results had not yet been +# written so the file is actually lengthened by this truncate. Finally, +# the rollback at NOTE (3) is called to undo all the changes since the +# begin. This rollback should truncate the database again. +# +# This test was added because the second truncate at NOTE (3) was not +# occurring on early versions of SQLite 3.0. +# +ifcapable tempdb { + do_test pager3-1.1 { + execsql { + create table t1(a unique, b); + insert into t1 values(1, 'abcdefghijklmnopqrstuvwxyz'); + insert into t1 values(2, 'abcdefghijklmnopqrstuvwxyz'); + update t1 set b=b||a||b; + update t1 set b=b||a||b; + update t1 set b=b||a||b; + update t1 set b=b||a||b; + update t1 set b=b||a||b; + update t1 set b=b||a||b; + create temp table t2 as select * from t1; + begin; ------- NOTE (1) + create table t3(x); + } + catchsql { + insert into t1 select 4-a, b from t2; ----- NOTE (2) + } + execsql { + rollback; ------- NOTE (3) + } + db close + sqlite3 db test.db + set r ok + ifcapable {integrityck} { + set r [execsql { + pragma integrity_check; + }] + } + set r + } ok +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/pageropt.test b/libraries/sqlite/unix/sqlite-3.5.1/test/pageropt.test new file mode 100644 index 0000000..41f3d5c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/pageropt.test @@ -0,0 +1,201 @@ +# 2007 April 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# The focus of the tests in this file are to verify that the +# pager optimizations implemented in version 3.3.14 work. +# +# $Id: pageropt.test,v 1.3 2007/08/12 20:07:59 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!pager_pragmas} { + finish_test + return +} + +# Run the SQL statement supplied by the argument and return +# the results. Prepend four integers to the beginning of the +# result which are +# +# (1) The number of page reads from the database +# (2) The number of page writes to the database +# (3) The number of page writes to the journal +# (4) The number of cache pages freed +# +proc pagercount_sql {sql {db db}} { + global sqlite3_pager_readdb_count + global sqlite3_pager_writedb_count + global sqlite3_pager_writej_count + global sqlite3_pager_pgfree_count + set sqlite3_pager_readdb_count 0 + set sqlite3_pager_writedb_count 0 + set sqlite3_pager_writej_count 0 + set sqlite3_pager_pgfree_count 0 + set r [$db eval $sql] + set cnt [list $sqlite3_pager_readdb_count \ + $sqlite3_pager_writedb_count \ + $sqlite3_pager_writej_count \ + $sqlite3_pager_pgfree_count] + return [concat $cnt $r] +} + +# Setup the test database +# +do_test pageropt-1.1 { + sqlite3_soft_heap_limit 0 + execsql { + PRAGMA auto_vacuum = OFF; + PRAGMA page_size = 1024; + } + pagercount_sql { + CREATE TABLE t1(x); + } +} {0 2 0 0} +do_test pageropt-1.2 { + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(5000)); + } +} {0 6 2 0} + +# Verify that values remain in cache on for subsequent reads. +# We should not have to go back to disk. +# +do_test pageropt-1.3 { + pagercount_sql { + SELECT length(x) FROM t1 + } +} {0 0 0 0 5000} + +# If another thread reads the database, the original cache +# remains valid. +# +sqlite3 db2 test.db +set blobcontent [db2 one {SELECT hex(x) FROM t1}] +do_test pageropt-1.4 { + pagercount_sql { + SELECT hex(x) FROM t1 + } +} [list 0 0 0 0 $blobcontent] + +# But if the other thread modifies the database, then the cache +# must refill. +# +do_test pageropt-1.5 { + db2 eval {CREATE TABLE t2(y)} + pagercount_sql { + SELECT hex(x) FROM t1 + } +} [list 6 0 0 6 $blobcontent] +do_test pageropt-1.6 { + pagercount_sql { + SELECT hex(x) FROM t1 + } +} [list 0 0 0 0 $blobcontent] + +# Verify that the last page of an overflow chain is not read from +# disk when deleting a row. The one row of t1(x) has four pages +# of overflow. So deleting that row from t1 should involve reading +# the sqlite_master table (1 page) the main page of t1 (1 page) and +# the three overflow pages of t1 for a total of 5 pages. +# +# Pages written are page 1 (for the freelist pointer), the root page +# of the table, and one of the overflow chain pointers because it +# becomes the trunk of the freelist. Total 3. +# +do_test pageropt-2.1 { + db close + sqlite3 db test.db + pagercount_sql { + DELETE FROM t1 WHERE rowid=1 + } +} {5 3 3 0} + +# When pulling pages off of the freelist, there is no reason +# to actually bring in the old content. +# +do_test pageropt-2.2 { + db close + sqlite3 db test.db + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(1500)); + } +} {3 4 3 0} +do_test pageropt-2.3 { + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(1500)); + } +} {0 4 3 0} + +# Note the new optimization that when pulling the very last page off of the +# freelist we do not read the content of that page. +# +do_test pageropt-2.4 { + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(1500)); + } +} {0 5 3 0} + +# Appending a large quantity of data does not involve writing much +# to the journal file. +# +do_test pageropt-3.1 { + pagercount_sql { + INSERT INTO t2 SELECT * FROM t1; + } +} {1 7 2 0} + +# Once again, we do not need to read the last page of an overflow chain +# while deleting. +# +do_test pageropt-3.2 { + pagercount_sql { + DROP TABLE t2; + } +} {0 2 3 0} +do_test pageropt-3.3 { + pagercount_sql { + DELETE FROM t1; + } +} {0 3 3 0} + +# There are now 11 pages on the freelist. Move them all into an +# overflow chain by inserting a single large record. Starting from +# a cold cache, only page 1, the root page of table t1, and the trunk +# of the freelist need to be read (3 pages). And only those three +# pages need to be journalled. But 13 pages need to be written: +# page1, the root page of table t1, and an 11 page overflow chain. +# +do_test pageropt-4.1 { + db close + sqlite3 db test.db + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(11300)) + } +} {3 13 3 0} + +# Now we delete that big entries starting from a cold cache and an +# empty freelist. The first 10 of the 11 pages overflow chain have +# to be read, together with page1 and the root of the t1 table. 12 +# reads total. But only page1, the t1 root, and the trunk of the +# freelist need to be journalled and written back. +# +do_test pageroot-4.2 { + db close + sqlite3 db test.db + pagercount_sql { + DELETE FROM t1 + } +} {12 3 3 0} + +sqlite3_soft_heap_limit $soft_limit +catch {db2 close} +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/pagesize.test b/libraries/sqlite/unix/sqlite-3.5.1/test/pagesize.test new file mode 100644 index 0000000..a598b9f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/pagesize.test @@ -0,0 +1,182 @@ +# 2004 September 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# This file implements tests for the page_size PRAGMA. +# +# $Id: pagesize.test,v 1.12 2007/04/06 21:42:22 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# This test script depends entirely on "PRAGMA page_size". So if this +# pragma is not available, omit the whole file. +ifcapable !pager_pragmas { + finish_test + return +} + +do_test pagesize-1.1 { + execsql {PRAGMA page_size} +} 1024 +ifcapable {explain} { + do_test pagesize-1.2 { + catch {execsql {EXPLAIN PRAGMA page_size}} + } 0 +} +do_test pagesize-1.3 { + execsql { + CREATE TABLE t1(a); + PRAGMA page_size=2048; + PRAGMA page_size; + } +} 1024 + +do_test pagesize-1.4 { + db close + file delete -force test.db + sqlite3 db test.db + execsql { + PRAGMA page_size=511; + PRAGMA page_size; + } +} 1024 +do_test pagesize-1.5 { + execsql { + PRAGMA page_size=512; + PRAGMA page_size; + } +} 512 +if {![info exists SQLITE_MAX_PAGE_SIZE] || $SQLITE_MAX_PAGE_SIZE>=8192} { + do_test pagesize-1.6 { + execsql { + PRAGMA page_size=8192; + PRAGMA page_size; + } + } 8192 + do_test pagesize-1.7 { + execsql { + PRAGMA page_size=65537; + PRAGMA page_size; + } + } 8192 + do_test pagesize-1.8 { + execsql { + PRAGMA page_size=1234; + PRAGMA page_size + } + } 8192 +} +foreach PGSZ {512 2048 4096 8192} { + if {[info exists SQLITE_MAX_PAGE_SIZE] + && $SQLITE_MAX_PAGE_SIZE<$PGSZ} continue + ifcapable memorydb { + do_test pagesize-2.$PGSZ.0 { + db close + sqlite3 db :memory: + execsql "PRAGMA page_size=$PGSZ;" + execsql {PRAGMA page_size} + } 1024 + } + do_test pagesize-2.$PGSZ.1 { + db close + file delete -force test.db + sqlite3 db test.db + execsql "PRAGMA page_size=$PGSZ" + execsql { + CREATE TABLE t1(x); + PRAGMA page_size; + } + } $PGSZ + do_test pagesize-2.$PGSZ.2 { + db close + sqlite3 db test.db + execsql { + PRAGMA page_size + } + } $PGSZ + do_test pagesize-2.$PGSZ.3 { + file size test.db + } [expr {$PGSZ*($AUTOVACUUM?3:2)}] + ifcapable {vacuum} { + do_test pagesize-2.$PGSZ.4 { + execsql {VACUUM} + } {} + } + integrity_check pagesize-2.$PGSZ.5 + do_test pagesize-2.$PGSZ.6 { + db close + sqlite3 db test.db + execsql {PRAGMA page_size} + } $PGSZ + do_test pagesize-2.$PGSZ.7 { + execsql { + INSERT INTO t1 VALUES(randstr(10,9000)); + INSERT INTO t1 VALUES(randstr(10,9000)); + INSERT INTO t1 VALUES(randstr(10,9000)); + BEGIN; + INSERT INTO t1 SELECT x||x FROM t1; + INSERT INTO t1 SELECT x||x FROM t1; + INSERT INTO t1 SELECT x||x FROM t1; + INSERT INTO t1 SELECT x||x FROM t1; + SELECT count(*) FROM t1; + } + } 48 + do_test pagesize-2.$PGSZ.8 { + execsql { + ROLLBACK; + SELECT count(*) FROM t1; + } + } 3 + integrity_check pagesize-2.$PGSZ.9 + do_test pagesize-2.$PGSZ.10 { + db close + sqlite3 db test.db + execsql {PRAGMA page_size} + } $PGSZ + do_test pagesize-2.$PGSZ.11 { + execsql { + INSERT INTO t1 SELECT x||x FROM t1; + INSERT INTO t1 SELECT x||x FROM t1; + INSERT INTO t1 SELECT x||x FROM t1; + INSERT INTO t1 SELECT x||x FROM t1; + INSERT INTO t1 SELECT x||x FROM t1; + INSERT INTO t1 SELECT x||x FROM t1; + SELECT count(*) FROM t1; + } + } 192 + do_test pagesize-2.$PGSZ.12 { + execsql { + BEGIN; + DELETE FROM t1 WHERE rowid%5!=0; + SELECT count(*) FROM t1; + } + } 38 + do_test pagesize-2.$PGSZ.13 { + execsql { + ROLLBACK; + SELECT count(*) FROM t1; + } + } 192 + integrity_check pagesize-2.$PGSZ.14 + do_test pagesize-2.$PGSZ.15 { + execsql {DELETE FROM t1 WHERE rowid%5!=0} + ifcapable {vacuum} {execsql VACUUM} + execsql {SELECT count(*) FROM t1} + } 38 + do_test pagesize-2.$PGSZ.16 { + execsql {DROP TABLE t1} + ifcapable {vacuum} {execsql VACUUM} + } {} + integrity_check pagesize-2.$PGSZ.17 +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/pragma.test b/libraries/sqlite/unix/sqlite-3.5.1/test/pragma.test new file mode 100644 index 0000000..501c608 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/pragma.test @@ -0,0 +1,1037 @@ +# 2002 March 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for the PRAGMA command. +# +# $Id: pragma.test,v 1.54 2007/05/17 16:38:30 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Test organization: +# +# pragma-1.*: Test cache_size, default_cache_size and synchronous on main db. +# pragma-2.*: Test synchronous on attached db. +# pragma-3.*: Test detection of table/index inconsistency by integrity_check. +# pragma-4.*: Test cache_size and default_cache_size on attached db. +# pragma-5.*: Test that pragma synchronous may not be used inside of a +# transaction. +# pragma-6.*: Test schema-query pragmas. +# pragma-7.*: Miscellaneous tests. +# pragma-8.*: Test user_version and schema_version pragmas. +# pragma-9.*: Test temp_store and temp_store_directory. +# pragma-10.*: Test the count_changes pragma in the presence of triggers. +# pragma-11.*: Test the collation_list pragma. +# + +ifcapable !pragma { + finish_test + return +} + +# Delete the preexisting database to avoid the special setup +# that the "all.test" script does. +# +db close +file delete test.db test.db-journal +file delete test3.db test3.db-journal +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] + + +ifcapable pager_pragmas { +set DFLT_CACHE_SZ [db one {PRAGMA default_cache_size}] +set TEMP_CACHE_SZ [db one {PRAGMA temp.default_cache_size}] +do_test pragma-1.1 { + execsql { + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} [list $DFLT_CACHE_SZ $DFLT_CACHE_SZ 2] +do_test pragma-1.2 { + execsql { + PRAGMA synchronous=OFF; + PRAGMA cache_size=1234; + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} [list 1234 $DFLT_CACHE_SZ 0] +do_test pragma-1.3 { + db close + sqlite3 db test.db + execsql { + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} [list $DFLT_CACHE_SZ $DFLT_CACHE_SZ 2] +do_test pragma-1.4 { + execsql { + PRAGMA synchronous=OFF; + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} [list $DFLT_CACHE_SZ $DFLT_CACHE_SZ 0] +do_test pragma-1.5 { + execsql { + PRAGMA cache_size=4321; + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} [list 4321 $DFLT_CACHE_SZ 0] +do_test pragma-1.6 { + execsql { + PRAGMA synchronous=ON; + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} [list 4321 $DFLT_CACHE_SZ 1] +do_test pragma-1.7 { + db close + sqlite3 db test.db + execsql { + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} [list $DFLT_CACHE_SZ $DFLT_CACHE_SZ 2] +do_test pragma-1.8 { + execsql { + PRAGMA default_cache_size=123; + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} {123 123 2} +do_test pragma-1.9.1 { + db close + sqlite3 db test.db; set ::DB [sqlite3_connection_pointer db] + execsql { + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} {123 123 2} +ifcapable vacuum { + do_test pragma-1.9.2 { + execsql { + VACUUM; + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } + } {123 123 2} +} +do_test pragma-1.10 { + execsql { + PRAGMA synchronous=NORMAL; + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} {123 123 1} +do_test pragma-1.11 { + execsql { + PRAGMA synchronous=FULL; + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} {123 123 2} +do_test pragma-1.12 { + db close + sqlite3 db test.db; set ::DB [sqlite3_connection_pointer db] + execsql { + PRAGMA cache_size; + PRAGMA default_cache_size; + PRAGMA synchronous; + } +} {123 123 2} + +# Make sure the pragma handler understands numeric values in addition +# to keywords like "off" and "full". +# +do_test pragma-1.13 { + execsql { + PRAGMA synchronous=0; + PRAGMA synchronous; + } +} {0} +do_test pragma-1.14 { + execsql { + PRAGMA synchronous=2; + PRAGMA synchronous; + } +} {2} +} ;# ifcapable pager_pragmas + +# Test turning "flag" pragmas on and off. +# +ifcapable debug { + # Pragma "vdbe_listing" is only available if compiled with SQLITE_DEBUG + # + do_test pragma-1.15 { + execsql { + PRAGMA vdbe_listing=YES; + PRAGMA vdbe_listing; + } + } {1} + do_test pragma-1.16 { + execsql { + PRAGMA vdbe_listing=NO; + PRAGMA vdbe_listing; + } + } {0} +} + +do_test pragma-1.17 { + execsql { + PRAGMA parser_trace=ON; + PRAGMA parser_trace=OFF; + } +} {} +do_test pragma-1.18 { + execsql { + PRAGMA bogus = -1234; -- Parsing of negative values + } +} {} + +# Test modifying the safety_level of an attached database. +do_test pragma-2.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' AS aux; + } +} {} +ifcapable pager_pragmas { +do_test pragma-2.2 { + execsql { + pragma aux.synchronous; + } +} {2} +do_test pragma-2.3 { + execsql { + pragma aux.synchronous = OFF; + pragma aux.synchronous; + pragma synchronous; + } +} {0 2} +do_test pragma-2.4 { + execsql { + pragma aux.synchronous = ON; + pragma synchronous; + pragma aux.synchronous; + } +} {2 1} +} ;# ifcapable pager_pragmas + +# Construct a corrupted index and make sure the integrity_check +# pragma finds it. +# +# These tests won't work if the database is encrypted +# +do_test pragma-3.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum=OFF; + BEGIN; + CREATE TABLE t2(a,b,c); + CREATE INDEX i2 ON t2(a); + INSERT INTO t2 VALUES(11,2,3); + INSERT INTO t2 VALUES(22,3,4); + COMMIT; + SELECT rowid, * from t2; + } +} {1 11 2 3 2 22 3 4} +if {![sqlite3 -has-codec] && $sqlite_options(integrityck)} { + do_test pragma-3.2 { + set rootpage [execsql {SELECT rootpage FROM sqlite_master WHERE name='i2'}] + set db [btree_open test.db 100 0] + btree_begin_transaction $db + set c [btree_cursor $db $rootpage 1] + btree_first $c + btree_delete $c + btree_commit $db + btree_close $db + execsql {PRAGMA integrity_check} + } {{rowid 1 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.3 { + execsql {PRAGMA integrity_check=1} + } {{rowid 1 missing from index i2}} + do_test pragma-3.4 { + execsql { + ATTACH DATABASE 'test.db' AS t2; + PRAGMA integrity_check + } + } {{rowid 1 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.5 { + execsql { + PRAGMA integrity_check=3 + } + } {{rowid 1 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2}} + do_test pragma-3.6 { + execsql { + PRAGMA integrity_check=xyz + } + } {{rowid 1 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.7 { + execsql { + PRAGMA integrity_check=0 + } + } {{rowid 1 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {wrong # of entries in index i2}} + + # Add additional corruption by appending unused pages to the end of + # the database file testerr.db + # + do_test pragma-3.8 { + execsql {DETACH t2} + file delete -force testerr.db testerr.db-journal + set out [open testerr.db w] + fconfigure $out -translation binary + set in [open test.db r] + fconfigure $in -translation binary + puts -nonewline $out [read $in] + seek $in 0 + puts -nonewline $out [read $in] + close $in + close $out + execsql {REINDEX t2} + execsql {PRAGMA integrity_check} + } {ok} + do_test pragma-3.9 { + execsql { + ATTACH 'testerr.db' AS t2; + PRAGMA integrity_check + } + } {{*** in database t2 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.10 { + execsql { + PRAGMA integrity_check=1 + } + } {{*** in database t2 *** +Page 4 is never used}} + do_test pragma-3.11 { + execsql { + PRAGMA integrity_check=5 + } + } {{*** in database t2 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.12 { + execsql { + PRAGMA integrity_check=4 + } + } {{*** in database t2 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used} {rowid 1 missing from index i2}} + do_test pragma-3.13 { + execsql { + PRAGMA integrity_check=3 + } + } {{*** in database t2 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used}} + do_test pragma-3.14 { + execsql { + PRAGMA integrity_check(2) + } + } {{*** in database t2 *** +Page 4 is never used +Page 5 is never used}} + do_test pragma-3.15 { + execsql { + ATTACH 'testerr.db' AS t3; + PRAGMA integrity_check + } + } {{*** in database t2 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2} {*** in database t3 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.16 { + execsql { + PRAGMA integrity_check(9) + } + } {{*** in database t2 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2} {*** in database t3 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used} {rowid 1 missing from index i2}} + do_test pragma-3.17 { + execsql { + PRAGMA integrity_check=7 + } + } {{*** in database t2 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2} {*** in database t3 *** +Page 4 is never used +Page 5 is never used}} + do_test pragma-3.18 { + execsql { + PRAGMA integrity_check=4 + } + } {{*** in database t2 *** +Page 4 is never used +Page 5 is never used +Page 6 is never used} {rowid 1 missing from index i2}} +} +do_test pragma-3.99 { + catchsql {DETACH t3} + catchsql {DETACH t2} + file delete -force testerr.db testerr.db-journal + catchsql {DROP INDEX i2} +} {0 {}} + +# Test modifying the cache_size of an attached database. +ifcapable pager_pragmas { +do_test pragma-4.1 { + execsql { + ATTACH 'test2.db' AS aux; + pragma aux.cache_size; + pragma aux.default_cache_size; + } +} [list $DFLT_CACHE_SZ $DFLT_CACHE_SZ] +do_test pragma-4.2 { + execsql { + pragma aux.cache_size = 50; + pragma aux.cache_size; + pragma aux.default_cache_size; + } +} [list 50 $DFLT_CACHE_SZ] +do_test pragma-4.3 { + execsql { + pragma aux.default_cache_size = 456; + pragma aux.cache_size; + pragma aux.default_cache_size; + } +} {456 456} +do_test pragma-4.4 { + execsql { + pragma cache_size; + pragma default_cache_size; + } +} [list $DFLT_CACHE_SZ $DFLT_CACHE_SZ] +do_test pragma-4.5 { + execsql { + DETACH aux; + ATTACH 'test3.db' AS aux; + pragma aux.cache_size; + pragma aux.default_cache_size; + } +} [list $DFLT_CACHE_SZ $DFLT_CACHE_SZ] +do_test pragma-4.6 { + execsql { + DETACH aux; + ATTACH 'test2.db' AS aux; + pragma aux.cache_size; + pragma aux.default_cache_size; + } +} {456 456} +} ;# ifcapable pager_pragmas + +# Test that modifying the sync-level in the middle of a transaction is +# disallowed. +ifcapable pager_pragmas { +do_test pragma-5.0 { + execsql { + pragma synchronous; + } +} {2} +do_test pragma-5.1 { + catchsql { + BEGIN; + pragma synchronous = OFF; + } +} {1 {Safety level may not be changed inside a transaction}} +do_test pragma-5.2 { + execsql { + pragma synchronous; + } +} {2} +catchsql {COMMIT;} +} ;# ifcapable pager_pragmas + +# Test schema-query pragmas +# +ifcapable schema_pragmas { +ifcapable tempdb { + do_test pragma-6.1 { + set res {} + execsql {SELECT * FROM sqlite_temp_master} + foreach {idx name file} [execsql {pragma database_list}] { + lappend res $idx $name + } + set res + } {0 main 1 temp 2 aux} +} +do_test pragma-6.2 { + execsql { + pragma table_info(t2) + } +} {0 a {} 0 {} 0 1 b {} 0 {} 0 2 c {} 0 {} 0} +db nullvalue <> +do_test pragma-6.2.2 { + execsql { + CREATE TABLE t5( + a TEXT DEFAULT CURRENT_TIMESTAMP, + b DEFAULT (5+3), + c TEXT, + d INTEGER DEFAULT NULL, + e TEXT DEFAULT '' + ); + PRAGMA table_info(t5); + } +} {0 a TEXT 0 CURRENT_TIMESTAMP 0 1 b {} 0 5+3 0 2 c TEXT 0 <> 0 3 d INTEGER 0 NULL 0 4 e TEXT 0 '' 0} +db nullvalue {} +ifcapable {foreignkey} { + do_test pragma-6.3 { + execsql { + CREATE TABLE t3(a int references t2(b), b UNIQUE); + pragma foreign_key_list(t3); + } + } {0 0 t2 a b} + do_test pragma-6.4 { + execsql { + pragma index_list(t3); + } + } {0 sqlite_autoindex_t3_1 1} +} +ifcapable {!foreignkey} { + execsql {CREATE TABLE t3(a,b UNIQUE)} +} +do_test pragma-6.5 { + execsql { + CREATE INDEX t3i1 ON t3(a,b); + pragma index_info(t3i1); + } +} {0 0 a 1 1 b} +} ;# ifcapable schema_pragmas +# Miscellaneous tests +# +ifcapable schema_pragmas { +do_test pragma-7.1 { + # Make sure a pragma knows to read the schema if it needs to + db close + sqlite3 db test.db + execsql { + pragma index_list(t3); + } +} {0 t3i1 0 1 sqlite_autoindex_t3_1 1} +} ;# ifcapable schema_pragmas +ifcapable {utf16} { + do_test pragma-7.2 { + db close + sqlite3 db test.db + catchsql { + pragma encoding=bogus; + } + } {1 {unsupported encoding: bogus}} +} +ifcapable tempdb { + do_test pragma-7.3 { + db close + sqlite3 db test.db + execsql { + pragma lock_status; + } + } {main unlocked temp closed} +} else { + do_test pragma-7.3 { + db close + sqlite3 db test.db + execsql { + pragma lock_status; + } + } {main unlocked} +} + + +#---------------------------------------------------------------------- +# Test cases pragma-8.* test the "PRAGMA schema_version" and "PRAGMA +# user_version" statements. +# +# pragma-8.1: PRAGMA schema_version +# pragma-8.2: PRAGMA user_version +# + +ifcapable schema_version { + +# First check that we can set the schema version and then retrieve the +# same value. +do_test pragma-8.1.1 { + execsql { + PRAGMA schema_version = 105; + } +} {} +do_test pragma-8.1.2 { + execsql2 { + PRAGMA schema_version; + } +} {schema_version 105} +do_test pragma-8.1.3 { + execsql { + PRAGMA schema_version = 106; + } +} {} +do_test pragma-8.1.4 { + execsql { + PRAGMA schema_version; + } +} 106 + +# Check that creating a table modifies the schema-version (this is really +# to verify that the value being read is in fact the schema version). +do_test pragma-8.1.5 { + execsql { + CREATE TABLE t4(a, b, c); + INSERT INTO t4 VALUES(1, 2, 3); + SELECT * FROM t4; + } +} {1 2 3} +do_test pragma-8.1.6 { + execsql { + PRAGMA schema_version; + } +} 107 + +# Now open a second connection to the database. Ensure that changing the +# schema-version using the first connection forces the second connection +# to reload the schema. This has to be done using the C-API test functions, +# because the TCL API accounts for SCHEMA_ERROR and retries the query. +do_test pragma-8.1.7 { + sqlite3 db2 test.db; set ::DB2 [sqlite3_connection_pointer db2] + execsql { + SELECT * FROM t4; + } db2 +} {1 2 3} +do_test pragma-8.1.8 { + execsql { + PRAGMA schema_version = 108; + } +} {} +do_test pragma-8.1.9 { + set ::STMT [sqlite3_prepare $::DB2 "SELECT * FROM t4" -1 DUMMY] + sqlite3_step $::STMT +} SQLITE_ERROR +do_test pragma-8.1.10 { + sqlite3_finalize $::STMT +} SQLITE_SCHEMA + +# Make sure the schema-version can be manipulated in an attached database. +file delete -force test2.db +file delete -force test2.db-journal +do_test pragma-8.1.11 { + execsql { + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t1(a, b, c); + PRAGMA aux.schema_version = 205; + } +} {} +do_test pragma-8.1.12 { + execsql { + PRAGMA aux.schema_version; + } +} 205 +do_test pragma-8.1.13 { + execsql { + PRAGMA schema_version; + } +} 108 + +# And check that modifying the schema-version in an attached database +# forces the second connection to reload the schema. +do_test pragma-8.1.14 { + sqlite3 db2 test.db; set ::DB2 [sqlite3_connection_pointer db2] + execsql { + ATTACH 'test2.db' AS aux; + SELECT * FROM aux.t1; + } db2 +} {} +do_test pragma-8.1.15 { + execsql { + PRAGMA aux.schema_version = 206; + } +} {} +do_test pragma-8.1.16 { + set ::STMT [sqlite3_prepare $::DB2 "SELECT * FROM aux.t1" -1 DUMMY] + sqlite3_step $::STMT +} SQLITE_ERROR +do_test pragma-8.1.17 { + sqlite3_finalize $::STMT +} SQLITE_SCHEMA +do_test pragma-8.1.18 { + db2 close +} {} + +# Now test that the user-version can be read and written (and that we aren't +# accidentally manipulating the schema-version instead). +do_test pragma-8.2.1 { + execsql2 { + PRAGMA user_version; + } +} {user_version 0} +do_test pragma-8.2.2 { + execsql { + PRAGMA user_version = 2; + } +} {} +do_test pragma-8.2.3.1 { + execsql2 { + PRAGMA user_version; + } +} {user_version 2} +do_test pragma-8.2.3.2 { + db close + sqlite3 db test.db + execsql { + PRAGMA user_version; + } +} {2} +do_test pragma-8.2.4.1 { + execsql { + PRAGMA schema_version; + } +} {108} +ifcapable vacuum { + do_test pragma-8.2.4.2 { + execsql { + VACUUM; + PRAGMA user_version; + } + } {2} + do_test pragma-8.2.4.3 { + execsql { + PRAGMA schema_version; + } + } {109} +} +db eval {ATTACH 'test2.db' AS aux} + +# Check that the user-version in the auxilary database can be manipulated ( +# and that we aren't accidentally manipulating the same in the main db). +do_test pragma-8.2.5 { + execsql { + PRAGMA aux.user_version; + } +} {0} +do_test pragma-8.2.6 { + execsql { + PRAGMA aux.user_version = 3; + } +} {} +do_test pragma-8.2.7 { + execsql { + PRAGMA aux.user_version; + } +} {3} +do_test pragma-8.2.8 { + execsql { + PRAGMA main.user_version; + } +} {2} + +# Now check that a ROLLBACK resets the user-version if it has been modified +# within a transaction. +do_test pragma-8.2.9 { + execsql { + BEGIN; + PRAGMA aux.user_version = 10; + PRAGMA user_version = 11; + } +} {} +do_test pragma-8.2.10 { + execsql { + PRAGMA aux.user_version; + } +} {10} +do_test pragma-8.2.11 { + execsql { + PRAGMA main.user_version; + } +} {11} +do_test pragma-8.2.12 { + execsql { + ROLLBACK; + PRAGMA aux.user_version; + } +} {3} +do_test pragma-8.2.13 { + execsql { + PRAGMA main.user_version; + } +} {2} + +# Try a negative value for the user-version +do_test pragma-8.2.14 { + execsql { + PRAGMA user_version = -450; + } +} {} +do_test pragma-8.2.15 { + execsql { + PRAGMA user_version; + } +} {-450} +} ; # ifcapable schema_version + + +# Test temp_store and temp_store_directory pragmas +# +ifcapable pager_pragmas { +do_test pragma-9.1 { + db close + sqlite3 db test.db + execsql { + PRAGMA temp_store; + } +} {0} +do_test pragma-9.2 { + execsql { + PRAGMA temp_store=file; + PRAGMA temp_store; + } +} {1} +do_test pragma-9.3 { + execsql { + PRAGMA temp_store=memory; + PRAGMA temp_store; + } +} {2} +do_test pragma-9.4 { + execsql { + PRAGMA temp_store_directory; + } +} {} +do_test pragma-9.5 { + set pwd [string map {' ''} [pwd]] + execsql " + PRAGMA temp_store_directory='$pwd'; + " +} {} +do_test pragma-9.6 { + execsql { + PRAGMA temp_store_directory; + } +} [list [pwd]] +do_test pragma-9.7 { + catchsql { + PRAGMA temp_store_directory='/NON/EXISTENT/PATH/FOOBAR'; + } +} {1 {not a writable directory}} +do_test pragma-9.8 { + execsql { + PRAGMA temp_store_directory=''; + } +} {} +if {![info exists TEMP_STORE] || $TEMP_STORE<=1} { + ifcapable tempdb { + do_test pragma-9.9 { + execsql { + PRAGMA temp_store_directory; + PRAGMA temp_store=FILE; + CREATE TEMP TABLE temp_store_directory_test(a integer); + INSERT INTO temp_store_directory_test values (2); + SELECT * FROM temp_store_directory_test; + } + } {2} + do_test pragma-9.10 { + catchsql " + PRAGMA temp_store_directory='$pwd'; + SELECT * FROM temp_store_directory_test; + " + } {1 {no such table: temp_store_directory_test}} + } +} +do_test pragma-9.11 { + execsql { + PRAGMA temp_store = 0; + PRAGMA temp_store; + } +} {0} +do_test pragma-9.12 { + execsql { + PRAGMA temp_store = 1; + PRAGMA temp_store; + } +} {1} +do_test pragma-9.13 { + execsql { + PRAGMA temp_store = 2; + PRAGMA temp_store; + } +} {2} +do_test pragma-9.14 { + execsql { + PRAGMA temp_store = 3; + PRAGMA temp_store; + } +} {0} +breakpoint +do_test pragma-9.15 { + catchsql { + BEGIN EXCLUSIVE; + CREATE TEMP TABLE temp_table(t); + INSERT INTO temp_table VALUES('valuable data'); + PRAGMA temp_store = 1; + } +} {1 {temporary storage cannot be changed from within a transaction}} +do_test pragma-9.16 { + execsql { + SELECT * FROM temp_table; + COMMIT; + } +} {{valuable data}} +} ;# ifcapable pager_pragmas + +ifcapable trigger { + +do_test pragma-10.0 { + catchsql { + DROP TABLE main.t1; + } + execsql { + PRAGMA count_changes = 1; + + CREATE TABLE t1(a PRIMARY KEY); + CREATE TABLE t1_mirror(a); + CREATE TABLE t1_mirror2(a); + CREATE TRIGGER t1_bi BEFORE INSERT ON t1 BEGIN + INSERT INTO t1_mirror VALUES(new.a); + END; + CREATE TRIGGER t1_ai AFTER INSERT ON t1 BEGIN + INSERT INTO t1_mirror2 VALUES(new.a); + END; + CREATE TRIGGER t1_bu BEFORE UPDATE ON t1 BEGIN + UPDATE t1_mirror SET a = new.a WHERE a = old.a; + END; + CREATE TRIGGER t1_au AFTER UPDATE ON t1 BEGIN + UPDATE t1_mirror2 SET a = new.a WHERE a = old.a; + END; + CREATE TRIGGER t1_bd BEFORE DELETE ON t1 BEGIN + DELETE FROM t1_mirror WHERE a = old.a; + END; + CREATE TRIGGER t1_ad AFTER DELETE ON t1 BEGIN + DELETE FROM t1_mirror2 WHERE a = old.a; + END; + } +} {} + +do_test pragma-10.1 { + execsql { + INSERT INTO t1 VALUES(randstr(10,10)); + } +} {1} +do_test pragma-10.2 { + execsql { + UPDATE t1 SET a = randstr(10,10); + } +} {1} +do_test pragma-10.3 { + execsql { + DELETE FROM t1; + } +} {1} + +} ;# ifcapable trigger + +ifcapable schema_pragmas { + do_test pragma-11.1 { + execsql2 { + pragma collation_list; + } + } {seq 0 name NOCASE seq 1 name BINARY} + do_test pragma-11.2 { + db collate New_Collation blah... + execsql { + pragma collation_list; + } + } {0 New_Collation 1 NOCASE 2 BINARY} +} + +ifcapable schema_pragmas&&tempdb { + do_test pragma-12.1 { + sqlite3 db2 test.db + execsql { + PRAGMA temp.table_info('abc'); + } db2 + } {} + db2 close + + do_test pragma-12.2 { + sqlite3 db2 test.db + execsql { + PRAGMA temp.default_cache_size = 200; + PRAGMA temp.default_cache_size; + } db2 + } {200} + db2 close + + do_test pragma-12.3 { + sqlite3 db2 test.db + execsql { + PRAGMA temp.cache_size = 400; + PRAGMA temp.cache_size; + } db2 + } {400} + db2 close +} + +ifcapable bloblit { + +do_test pragma-13.1 { + execsql { + DROP TABLE IF EXISTS t4; + PRAGMA vdbe_trace=on; + PRAGMA vdbe_listing=on; + PRAGMA sql_trace=on; + CREATE TABLE t4(a INTEGER PRIMARY KEY,b); + INSERT INTO t4(b) VALUES(x'0123456789abcdef0123456789abcdef0123456789'); + INSERT INTO t4(b) VALUES(randstr(30,30)); + INSERT INTO t4(b) VALUES(1.23456); + INSERT INTO t4(b) VALUES(NULL); + INSERT INTO t4(b) VALUES(0); + INSERT INTO t4(b) SELECT b||b||b||b FROM t4; + SELECT * FROM t4; + } + execsql { + PRAGMA vdbe_trace=off; + PRAGMA vdbe_listing=off; + PRAGMA sql_trace=off; + } +} {} + +} ;# ifcapable bloblit + +# Reset the sqlite3_temp_directory variable for the next run of tests: +sqlite3 dbX :memory: +dbX eval {PRAGMA temp_store_directory = ""} +dbX close + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/pragma2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/pragma2.test new file mode 100644 index 0000000..faf04c0 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/pragma2.test @@ -0,0 +1,117 @@ +# 2002 March 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for the PRAGMA command. +# +# $Id: pragma2.test,v 1.3 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Test organization: +# +# pragma2-1.*: Test freelist_count pragma on the main database. +# pragma2-2.*: Test freelist_count pragma on an attached database. +# pragma2-3.*: Test trying to write to the freelist_count is a no-op. +# + +ifcapable !pragma||!schema_pragmas { + finish_test + return +} + +# Delete the preexisting database to avoid the special setup +# that the "all.test" script does. +# +db close +file delete test.db test.db-journal +file delete test3.db test3.db-journal +sqlite3 db test.db; set DB [sqlite3_connection_pointer db] +db eval {PRAGMA auto_vacuum=0} + +do_test pragma2-1.1 { + execsql { + PRAGMA freelist_count; + } +} {0} +do_test pragma2-1.2 { + execsql { + CREATE TABLE abc(a, b, c); + PRAGMA freelist_count; + } +} {0} +do_test pragma2-1.3 { + execsql { + DROP TABLE abc; + PRAGMA freelist_count; + } +} {1} +do_test pragma2-1.4 { + execsql { + PRAGMA main.freelist_count; + } +} {1} + +file delete -force test2.db +file delete -force test2.db-journal + +do_test pragma2-2.1 { + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA aux.auto_vacuum=OFF; + PRAGMA aux.freelist_count; + } +} {0} +do_test pragma2-2.2 { + execsql { + CREATE TABLE aux.abc(a, b, c); + PRAGMA aux.freelist_count; + } +} {0} +do_test pragma2-2.3 { + set ::val [string repeat 0123456789 1000] + execsql { + INSERT INTO aux.abc VALUES(1, 2, $::val); + PRAGMA aux.freelist_count; + } +} {0} +do_test pragma2-2.4 { + expr {[file size test2.db] / 1024} +} {11} +do_test pragma2-2.5 { + execsql { + DELETE FROM aux.abc; + PRAGMA aux.freelist_count; + } +} {9} + +do_test pragma2-3.1 { + execsql { + PRAGMA aux.freelist_count; + PRAGMA main.freelist_count; + PRAGMA freelist_count; + } +} {9 1 1} +do_test pragma2-3.2 { + execsql { + PRAGMA freelist_count = 500; + PRAGMA freelist_count; + } +} {1 1} +do_test pragma2-3.3 { + execsql { + PRAGMA aux.freelist_count = 500; + PRAGMA aux.freelist_count; + } +} {9 9} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/printf.test b/libraries/sqlite/unix/sqlite-3.5.1/test/printf.test new file mode 100644 index 0000000..7bb2e77 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/printf.test @@ -0,0 +1,324 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the sqlite_*_printf() interface. +# +# $Id: printf.test,v 1.27 2007/09/03 07:31:10 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +set n 1 +foreach v {1 2 5 10 99 100 1000000 999999999 0 -1 -2 -5 -10 -99 -100 -9999999} { + set v32 [expr {$v&0xffffffff}] + do_test printf-1.$n.1 [subst { + sqlite3_mprintf_int {Three integers: %d %x %o} $v $v $v + }] [format {Three integers: %d %x %o} $v $v32 $v32] + do_test printf-1.$n.2 [subst { + sqlite3_mprintf_int {Three integers: (%6d) (%6x) (%6o)} $v $v $v + }] [format {Three integers: (%6d) (%6x) (%6o)} $v $v32 $v32] + do_test printf-1.$n.3 [subst { + sqlite3_mprintf_int {Three integers: (%-6d) (%-6x) (%-6o)} $v $v $v + }] [format {Three integers: (%-6d) (%-6x) (%-6o)} $v $v32 $v32] + do_test printf-1.$n.4 [subst { + sqlite3_mprintf_int {Three integers: (%+6d) (%+6x) (%+6o)} $v $v $v + }] [format {Three integers: (%+6d) (%+6x) (%+6o)} $v $v32 $v32] + do_test printf-1.$n.5 [subst { + sqlite3_mprintf_int {Three integers: (%06d) (%06x) (%06o)} $v $v $v + }] [format {Three integers: (%06d) (%06x) (%06o)} $v $v32 $v32] + do_test printf-1.$n.6 [subst { + sqlite3_mprintf_int {Three integers: (% 6d) (% 6x) (% 6o)} $v $v $v + }] [format {Three integers: (% 6d) (% 6x) (% 6o)} $v $v32 $v32] + do_test printf-1.$n.7 [subst { + sqlite3_mprintf_int {Three integers: (%#6d) (%#6x) (%#6o)} $v $v $v + }] [format {Three integers: (%#6d) (%#6x) (%#6o)} $v $v32 $v32] + incr n +} + + +if {$::tcl_platform(platform)!="windows"} { + +set m 1 +foreach {a b} {1 1 5 5 10 10 10 5} { + set n 1 + foreach x {0.001 1.0e-20 1.0 0.0 100.0 9.99999 -0.00543 -1.0 -99.99999} { + do_test printf-2.$m.$n.1 [subst { + sqlite3_mprintf_double {A double: %*.*f} $a $b $x + }] [format {A double: %*.*f} $a $b $x] + do_test printf-2.$m.$n.2 [subst { + sqlite3_mprintf_double {A double: %*.*e} $a $b $x + }] [format {A double: %*.*e} $a $b $x] + do_test printf-2.$m.$n.3 [subst { + sqlite3_mprintf_double {A double: %*.*g} $a $b $x + }] [format {A double: %*.*g} $a $b $x] + do_test printf-2.$m.$n.4 [subst { + sqlite3_mprintf_double {A double: %d %d %g} $a $b $x + }] [format {A double: %d %d %g} $a $b $x] + do_test printf-2.$m.$n.5 [subst { + sqlite3_mprintf_double {A double: %d %d %#g} $a $b $x + }] [format {A double: %d %d %#g} $a $b $x] + do_test printf-2.$m.$n.6 [subst { + sqlite3_mprintf_double {A double: %d %d %010g} $a $b $x + }] [format {A double: %d %d %010g} $a $b $x] + incr n + } + incr m +} + +} ;# endif not windows + +do_test printf-3.1 { + sqlite3_mprintf_str {A String: (%*.*s)} 10 10 {This is the string} +} [format {A String: (%*.*s)} 10 10 {This is the string}] +do_test printf-3.2 { + sqlite3_mprintf_str {A String: (%*.*s)} 10 5 {This is the string} +} [format {A String: (%*.*s)} 10 5 {This is the string}] +do_test printf-3.3 { + sqlite3_mprintf_str {A String: (%*.*s)} -10 5 {This is the string} +} [format {A String: (%*.*s)} -10 5 {This is the string}] +do_test printf-3.4 { + sqlite3_mprintf_str {%d %d A String: (%s)} 1 2 {This is the string} +} [format {%d %d A String: (%s)} 1 2 {This is the string}] +do_test printf-3.5 { + sqlite3_mprintf_str {%d %d A String: (%30s)} 1 2 {This is the string} +} [format {%d %d A String: (%30s)} 1 2 {This is the string}] +do_test printf-3.6 { + sqlite3_mprintf_str {%d %d A String: (%-30s)} 1 2 {This is the string} +} [format {%d %d A String: (%-30s)} 1 2 {This is the string}] +do_test snprintf-3.11 { + sqlite3_snprintf_str 2 {x%d %d %s} 10 10 {This is the string} +} {x} +do_test snprintf-3.12 { + sqlite3_snprintf_str 3 {x%d %d %s} 10 10 {This is the string} +} {x1} +do_test snprintf-3.13 { + sqlite3_snprintf_str 4 {x%d %d %s} 10 10 {This is the string} +} {x10} +do_test snprintf-3.14 { + sqlite3_snprintf_str 5 {x%d %d %s} 10 10 {This is the string} +} {x10 } +do_test snprintf-3.15 { + sqlite3_snprintf_str 6 {x%d %d %s} 10 10 {This is the string} +} {x10 1} +do_test snprintf-3.16 { + sqlite3_snprintf_str 7 {x%d %d %s} 10 10 {This is the string} +} {x10 10} +do_test snprintf-3.17 { + sqlite3_snprintf_str 8 {x%d %d %s} 10 10 {This is the string} +} {x10 10 } +do_test snprintf-3.18 { + sqlite3_snprintf_str 9 {x%d %d %s} 10 10 {This is the string} +} {x10 10 T} +do_test snprintf-3.19 { + sqlite3_snprintf_str 100 {x%d %d %s} 10 10 {This is the string} +} {x10 10 This is the string} + +do_test printf-4.1 { + sqlite3_mprintf_str {%d %d A quoted string: '%q'} 1 2 {Hi Y'all} +} {1 2 A quoted string: 'Hi Y''all'} +do_test printf-4.2 { + sqlite3_mprintf_str {%d %d A NULL pointer in %%q: '%q'} 1 2 +} {1 2 A NULL pointer in %q: '(NULL)'} +do_test printf-4.3 { + sqlite3_mprintf_str {%d %d A quoted string: %Q} 1 2 {Hi Y'all} +} {1 2 A quoted string: 'Hi Y''all'} +do_test printf-4.4 { + sqlite3_mprintf_str {%d %d A NULL pointer in %%Q: %Q} 1 2 +} {1 2 A NULL pointer in %Q: NULL} + +do_test printf-5.1 { + set x [sqlite3_mprintf_str {%d %d %100000s} 0 0 {Hello}] + string length $x +} {344} +do_test printf-5.2 { + sqlite3_mprintf_str {%d %d (%-10.10s) %} -9 -10 {HelloHelloHello} +} {-9 -10 (HelloHello) %} + +do_test printf-6.1 { + sqlite3_mprintf_z_test , one two three four five six +} {,one,two,three,four,five,six} + + +do_test printf-7.1 { + sqlite3_mprintf_scaled {A double: %g} 1.0e307 1.0 +} {A double: 1e+307} +do_test printf-7.2 { + sqlite3_mprintf_scaled {A double: %g} 1.0e307 10.0 +} {A double: 1e+308} +do_test printf-7.3 { + sqlite3_mprintf_scaled {A double: %g} 1.0e307 100.0 +} {A double: Inf} +do_test printf-7.4 { + sqlite3_mprintf_scaled {A double: %g} -1.0e307 100.0 +} {A double: -Inf} +do_test printf-7.5 { + sqlite3_mprintf_scaled {A double: %+g} 1.0e307 100.0 +} {A double: +Inf} + +do_test printf-8.1 { + sqlite3_mprintf_int {%u %u %u} 0x7fffffff 0x80000000 0xffffffff +} {2147483647 2147483648 4294967295} +do_test printf-8.2 { + sqlite3_mprintf_int {%lu %lu %lu} 0x7fffffff 0x80000000 0xffffffff +} {2147483647 2147483648 4294967295} +do_test printf-8.3 { + sqlite3_mprintf_int64 {%llu %llu %llu} 2147483647 2147483648 4294967296 +} {2147483647 2147483648 4294967296} +do_test printf-8.4 { + sqlite3_mprintf_int64 {%lld %lld %lld} 2147483647 2147483648 4294967296 +} {2147483647 2147483648 4294967296} +do_test printf-8.5 { + sqlite3_mprintf_int64 {%llx %llx %llx} 2147483647 2147483648 4294967296 +} {7fffffff 80000000 100000000} +do_test printf-8.6 { + sqlite3_mprintf_int64 {%llx %llo %lld} -1 -1 -1 +} {ffffffffffffffff 1777777777777777777777 -1} +do_test printf-8.7 { + sqlite3_mprintf_int64 {%llx %llx %llx} +2147483647 +2147483648 +4294967296 +} {7fffffff 80000000 100000000} + +do_test printf-9.1 { + sqlite3_mprintf_int {%*.*c} 4 4 65 +} {AAAA} +do_test printf-9.2 { + sqlite3_mprintf_int {%*.*c} -4 1 66 +} {B } +do_test printf-9.3 { + sqlite3_mprintf_int {%*.*c} 4 1 67 +} { C} +do_test printf-9.4 { + sqlite3_mprintf_int {%d %d %c} 4 1 67 +} {4 1 C} +set ten { } +set fifty $ten$ten$ten$ten$ten +do_test printf-9.5 { + sqlite3_mprintf_int {%d %*c} 1 -201 67 +} "1 C$fifty$fifty$fifty$fifty" +do_test printf-9.6 { + sqlite3_mprintf_int {hi%12345.12346yhello} 0 0 0 +} {hi} + +# Ticket #812 +# +do_test printf-10.1 { + sqlite3_mprintf_stronly %s {} +} {} + +# Ticket #831 +# +do_test printf-10.2 { + sqlite3_mprintf_stronly %q {} +} {} + +# Ticket #1340: Test for loss of precision on large positive exponents +# +do_test printf-10.3 { + sqlite3_mprintf_double {%d %d %f} 1 1 1e300 +} {1 1 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.000000} + +# The non-standard '!' flag on a 'g' conversion forces a decimal point +# and at least one digit on either side of the decimal point. +# +do_test printf-11.1 { + sqlite3_mprintf_double {%d %d %!g} 1 1 1 +} {1 1 1.0} +do_test printf-11.2 { + sqlite3_mprintf_double {%d %d %!g} 1 1 123 +} {1 1 123.0} +do_test printf-11.3 { + sqlite3_mprintf_double {%d %d %!g} 1 1 12.3 +} {1 1 12.3} +do_test printf-11.4 { + sqlite3_mprintf_double {%d %d %!g} 1 1 0.123 +} {1 1 0.123} +do_test printf-11.5 { + sqlite3_mprintf_double {%d %d %!.15g} 1 1 1 +} {1 1 1.0} +do_test printf-11.6 { + sqlite3_mprintf_double {%d %d %!.15g} 1 1 1e10 +} {1 1 10000000000.0} +do_test printf-11.7 { + sqlite3_mprintf_double {%d %d %!.15g} 1 1 1e300 +} {1 1 1.0e+300} + +# Additional tests for coverage +# +do_test printf-12.1 { + sqlite3_mprintf_double {%d %d %.2000g} 1 1 1.0 +} {1 1 1} + +# Floating point boundary cases +# +do_test printf-13.1 { + sqlite3_mprintf_hexdouble %.20f 4024000000000000 +} {10.00000000000000000000} +do_test printf-13.2 { + sqlite3_mprintf_hexdouble %.20f 4197d78400000000 +} {100000000.00000000000000000000} +do_test printf-13.3 { + sqlite3_mprintf_hexdouble %.20f 4693b8b5b5056e17 +} {100000000000000000000000000000000.00000000000000000000} +do_test printf-13.4 { + sqlite3_mprintf_hexdouble %.20f 7ff0000000000000 +} {Inf} +do_test printf-13.5 { + sqlite3_mprintf_hexdouble %.20f fff0000000000000 +} {-Inf} +do_test printf-13.6 { + sqlite3_mprintf_hexdouble %.20f fff8000000000000 +} {NaN} + +do_test printf-14.1 { + sqlite3_mprintf_str {abc-%y-123} 0 0 {not used} +} {abc-} +do_test printf-14.2 { + sqlite3_mprintf_n_test {xyzzy} +} 5 +do_test printf-14.3 { + sqlite3_mprintf_str {abc-%T-123} 0 0 {not used} +} {abc-} + +do_test printf-15.1 { + sqlite3_snprintf_int 5 {12345} 0 +} {1234} +do_test printf-15.2 { + sqlite3_snprintf_int 5 {} 0 +} {} +do_test printf-15.3 { + sqlite3_snprintf_int 0 {} 0 +} {abcdefghijklmnopqrstuvwxyz} + +# Now test malloc() failure within a sqlite3_mprintf(): +# +ifcapable memdebug { + foreach var {a b c d} { + set $var [string repeat $var 400] + } + set str1 "[string repeat A 360]%d%d%s" + set str2 [string repeat B 5000] + set zSuccess "[string repeat A 360]11[string repeat B 5000]" + foreach ::iRepeat {0 1} { + set nTestNum 1 + while {1} { + sqlite3_memdebug_fail $nTestNum -repeat $::iRepeat + set z [sqlite3_mprintf_str $str1 1 1 $str2] + set nFail [sqlite3_memdebug_fail -1 -benign nBenign] + do_test printf-malloc-$::iRepeat.$nTestNum { + expr {($nFail>0 && $z eq "") || ($nFail==$nBenign && $z eq $zSuccess)} + } {1} + if {$nFail == 0} break + incr nTestNum + } + } +} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/progress.test b/libraries/sqlite/unix/sqlite-3.5.1/test/progress.test new file mode 100755 index 0000000..b25a100 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/progress.test @@ -0,0 +1,177 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the 'progress callback'. +# +# $Id: progress.test,v 1.8 2007/06/15 14:53:53 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If the progress callback is not available in this build, skip this +# whole file. +ifcapable !progress { + finish_test + return +} + +# Build some test data +# +execsql { + BEGIN; + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + INSERT INTO t1 VALUES(5); + INSERT INTO t1 VALUES(6); + INSERT INTO t1 VALUES(7); + INSERT INTO t1 VALUES(8); + INSERT INTO t1 VALUES(9); + INSERT INTO t1 VALUES(10); + COMMIT; +} + + +# Test that the progress callback is invoked. +do_test progress-1.0 { + set counter 0 + db progress 1 "[namespace code {incr counter}] ; expr 0" + execsql { + SELECT * FROM t1 + } + expr $counter > 1 +} 1 +do_test progress-1.0.1 { + db progress +} {::namespace inscope :: {incr counter} ; expr 0} +do_test progress-1.0.2 { + set v [catch {db progress xyz bogus} msg] + lappend v $msg +} {1 {expected integer but got "xyz"}} + +# Test that the query is abandoned when the progress callback returns non-zero +do_test progress-1.1 { + set counter 0 + db progress 1 "[namespace code {incr counter}] ; expr 1" + set rc [catch {execsql { + SELECT * FROM t1 + }}] + list $counter $rc +} {1 1} + +# Test that the query is rolled back when the progress callback returns +# non-zero. +do_test progress-1.2 { + + # This figures out how many opcodes it takes to copy 5 extra rows into t1. + db progress 1 "[namespace code {incr five_rows}] ; expr 0" + set five_rows 0 + execsql { + INSERT INTO t1 SELECT a+10 FROM t1 WHERE a < 6 + } + db progress 0 "" + execsql { + DELETE FROM t1 WHERE a > 10 + } + + # Now set up the progress callback to abandon the query after the number of + # opcodes to copy 5 rows. That way, when we try to copy 6 rows, we know + # some data will have been inserted into the table by the time the progress + # callback abandons the query. + db progress $five_rows "expr 1" + catchsql { + INSERT INTO t1 SELECT a+10 FROM t1 WHERE a < 9 + } + execsql { + SELECT count(*) FROM t1 + } +} 10 + +# Test that an active transaction remains active and not rolled back +# after the progress query abandons a query. +# +# UPDATE: It is now recognised that this is a sure route to database +# corruption. So the transaction is rolled back. +do_test progress-1.3 { + + db progress 0 "" + execsql BEGIN + execsql { + INSERT INTO t1 VALUES(11) + } + db progress 1 "expr 1" + catchsql { + INSERT INTO t1 VALUES(12) + } + db progress 0 "" + catchsql COMMIT +} {1 {cannot commit - no transaction is active}} +do_test progress-1.3.1 { + execsql { + SELECT count(*) FROM t1 + } +} 10 + +# Check that a value of 0 for N means no progress callback +do_test progress-1.4 { + set counter 0 + db progress 0 "[namespace code {incr counter}] ; expr 0" + execsql { + SELECT * FROM t1; + } + set counter +} 0 + +db progress 0 "" + +# Make sure other queries can be run from within the progress +# handler. Ticket #1827 +# +do_test progress-1.5 { + set rx 0 + proc set_rx {args} { + db progress 0 {} + set ::rx [db eval {SELECT count(*) FROM t1}] + return [expr 0] + } + db progress 10 set_rx + db eval { + SELECT sum(a) FROM t1 + } +} {55} +do_test progress-1.6 { + set ::rx +} {10} + +# Check that abandoning a query using the progress handler does +# not cause other queries to abort. Ticket #2415. +do_test progress-1.7 { + execsql { + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, 6); + INSERT INTO abc VALUES(7, 8, 9); + } + + set ::res [list] + db eval {SELECT a, b, c FROM abc} { + lappend ::res $a $b $c + db progress 10 "expr 1" + catch {db eval {SELECT a, b, c FROM abc} { }} msg + lappend ::res $msg + } + + set ::res +} {1 2 3 interrupted 4 5 6 interrupted 7 8 9 interrupted} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/ptrchng.test b/libraries/sqlite/unix/sqlite-3.5.1/test/ptrchng.test new file mode 100644 index 0000000..69bc193 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/ptrchng.test @@ -0,0 +1,222 @@ +# 2007 April 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# The focus of the tests in this file are to verify that the +# underlying TEXT or BLOB representation of an sqlite3_value +# changes appropriately when APIs from the following set are +# called: +# +# sqlite3_value_text() +# sqlite3_value_text16() +# sqlite3_value_blob() +# sqlite3_value_bytes() +# sqlite3_value_bytes16() +# +# $Id: ptrchng.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !bloblit { + finish_test + return +} + +# Register the "pointer_change" SQL function. +# +sqlite3_create_function db + +do_test ptrchng-1.1 { + execsql { + CREATE TABLE t1(x INTEGER PRIMARY KEY, y BLOB); + INSERT INTO t1 VALUES(1, 'abc'); + INSERT INTO t1 VALUES(2, + 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234356789'); + INSERT INTO t1 VALUES(3, x'626c6f62'); + INSERT INTO t1 VALUES(4, + x'000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f2021222324' + ); + SELECT count(*) FROM t1; + } +} {4} + +# For the short entries that fit in the Mem.zBuf[], the pointer should +# never change regardless of what type conversions occur. +# +do_test ptrchng-2.1 { + execsql { + SELECT pointer_change(y, 'text', 'noop', 'blob') FROM t1 WHERE x=1 + } +} {0} +do_test ptrchng-2.2 { + execsql { + SELECT pointer_change(y, 'blob', 'noop', 'text') FROM t1 WHERE x=1 + } +} {0} +ifcapable utf16 { + do_test ptrchng-2.3 { + execsql { + SELECT pointer_change(y, 'text', 'noop', 'text16') FROM t1 WHERE x=1 + } + } {0} + do_test ptrchng-2.4 { + execsql { + SELECT pointer_change(y, 'blob', 'noop', 'text16') FROM t1 WHERE x=1 + } + } {0} + do_test ptrchng-2.5 { + execsql { + SELECT pointer_change(y, 'text16', 'noop', 'blob') FROM t1 WHERE x=1 + } + } {0} + do_test ptrchng-2.6 { + execsql { + SELECT pointer_change(y, 'text16', 'noop', 'text') FROM t1 WHERE x=1 + } + } {0} +} +do_test ptrchng-2.11 { + execsql { + SELECT pointer_change(y, 'text', 'noop', 'blob') FROM t1 WHERE x=3 + } +} {0} +do_test ptrchng-2.12 { + execsql { + SELECT pointer_change(y, 'blob', 'noop', 'text') FROM t1 WHERE x=3 + } +} {0} +ifcapable utf16 { + do_test ptrchng-2.13 { + execsql { + SELECT pointer_change(y, 'text', 'noop', 'text16') FROM t1 WHERE x=3 + } + } {0} + do_test ptrchng-2.14 { + execsql { + SELECT pointer_change(y, 'blob', 'noop', 'text16') FROM t1 WHERE x=3 + } + } {0} + do_test ptrchng-2.15 { + execsql { + SELECT pointer_change(y, 'text16', 'noop', 'blob') FROM t1 WHERE x=3 + } + } {0} + do_test ptrchng-2.16 { +btree_breakpoint + execsql { + SELECT pointer_change(y, 'text16', 'noop', 'text') FROM t1 WHERE x=3 + } + } {0} +} + +# For the long entries that do not fit in the Mem.zBuf[], the pointer +# should change sometimes. +# +do_test ptrchng-3.1 { + execsql { + SELECT pointer_change(y, 'text', 'noop', 'blob') FROM t1 WHERE x=2 + } +} {0} +do_test ptrchng-3.2 { + execsql { + SELECT pointer_change(y, 'blob', 'noop', 'text') FROM t1 WHERE x=2 + } +} {0} +ifcapable utf16 { + do_test ptrchng-3.3 { + execsql { + SELECT pointer_change(y, 'text', 'noop', 'text16') FROM t1 WHERE x=2 + } + } {1} + do_test ptrchng-3.4 { + execsql { + SELECT pointer_change(y, 'blob', 'noop', 'text16') FROM t1 WHERE x=2 + } + } {1} + do_test ptrchng-3.5 { + execsql { + SELECT pointer_change(y, 'text16', 'noop', 'blob') FROM t1 WHERE x=2 + } + } {0} + do_test ptrchng-3.6 { + execsql { + SELECT pointer_change(y, 'text16', 'noop', 'text') FROM t1 WHERE x=2 + } + } {1} +} +do_test ptrchng-3.11 { + execsql { + SELECT pointer_change(y, 'text', 'noop', 'blob') FROM t1 WHERE x=4 + } +} {0} +do_test ptrchng-3.12 { + execsql { + SELECT pointer_change(y, 'blob', 'noop', 'text') FROM t1 WHERE x=4 + } +} {0} +ifcapable utf16 { + do_test ptrchng-3.13 { + execsql { + SELECT pointer_change(y, 'text', 'noop', 'text16') FROM t1 WHERE x=4 + } + } {1} + do_test ptrchng-3.14 { + execsql { + SELECT pointer_change(y, 'blob', 'noop', 'text16') FROM t1 WHERE x=4 + } + } {1} + do_test ptrchng-3.15 { + execsql { + SELECT pointer_change(y, 'text16', 'noop', 'blob') FROM t1 WHERE x=4 + } + } {0} + do_test ptrchng-3.16 { + execsql { + SELECT pointer_change(y, 'text16', 'noop', 'text') FROM t1 WHERE x=4 + } + } {1} +} + +# A call to _bytes() should never reformat a _text() or _blob(). +# +do_test ptrchng-4.1 { + execsql { + SELECT pointer_change(y, 'text', 'bytes', 'text') FROM t1 + } +} {0 0 0 0} +do_test ptrchng-4.2 { + execsql { + SELECT pointer_change(y, 'blob', 'bytes', 'blob') FROM t1 + } +} {0 0 0 0} + +# A call to _blob() should never trigger a reformat +# +do_test ptrchng-5.1 { + execsql { + SELECT pointer_change(y, 'text', 'bytes', 'blob') FROM t1 + } +} {0 0 0 0} +ifcapable utf16 { + do_test ptrchng-5.2 { + execsql { + SELECT pointer_change(y, 'text16', 'noop', 'blob') FROM t1 + } + } {0 0 0 0} + do_test ptrchng-5.3 { + execsql { + SELECT pointer_change(y, 'text16', 'bytes16', 'blob') FROM t1 + } + } {0 0 0 0} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/quick.test b/libraries/sqlite/unix/sqlite-3.5.1/test/quick.test new file mode 100644 index 0000000..a87ae93 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/quick.test @@ -0,0 +1,109 @@ +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all tests. +# +# $Id: quick.test,v 1.64 2007/09/14 16:20:01 danielk1977 Exp $ + +proc lshift {lvar} { + upvar $lvar l + set ret [lindex $l 0] + set l [lrange $l 1 end] + return $ret +} +while {[set arg [lshift argv]] != ""} { + switch -- $arg { + -sharedpagercache { + sqlite3_enable_shared_cache 1 + } + -soak { + set SOAKTEST 1 + } + default { + set argv [linsert $argv 0 $arg] + break + } + } +} + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +rename finish_test really_finish_test +proc finish_test {} {} +set ISQUICK 1 + +set EXCLUDE { + all.test + async.test + async2.test + btree2.test + btree3.test + btree4.test + btree5.test + btree6.test + corrupt.test + crash.test + crash2.test + crash3.test + exclusive3.test + fuzz.test + fuzz_malloc.test + in2.test + loadext.test + malloc.test + malloc2.test + malloc3.test + malloc4.test + memleak.test + misc7.test + misuse.test + onefile.test + quick.test + soak.test + speed1.test + speed2.test + sqllimits1.test + + thread001.test + thread002.test + + incrvacuum_ioerr.test + autovacuum_crash.test + btree8.test + utf16.test + shared_err.test + vtab_err.test +} + +if {[sqlite3 -has-codec]} { + # lappend EXCLUDE \ + # conflict.test +} + + +# Files to include in the test. If this list is empty then everything +# that is not in the EXCLUDE list is run. +# +set INCLUDE { +} + +foreach testfile [lsort -dictionary [glob $testdir/*.test]] { + set tail [file tail $testfile] + if {[lsearch -exact $EXCLUDE $tail]>=0} continue + if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue + source $testfile + catch {db close} + if {$sqlite_open_file_count>0} { + puts "$tail did not close all files: $sqlite_open_file_count" + incr nErr + lappend ::failList $tail + set sqlite_open_file_count 0 + } +} +source $testdir/misuse.test + +set sqlite_open_file_count 0 +really_finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/quote.test b/libraries/sqlite/unix/sqlite-3.5.1/test/quote.test new file mode 100644 index 0000000..f13d6f9 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/quote.test @@ -0,0 +1,89 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is the ability to specify table and column names +# as quoted strings. +# +# $Id: quote.test,v 1.7 2007/04/25 11:32:30 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table with a strange name and with strange column names. +# +do_test quote-1.0 { + catchsql {CREATE TABLE '@abc' ( '#xyz' int, '!pqr' text );} +} {0 {}} + +# Insert, update and query the table. +# +do_test quote-1.1 { + catchsql {INSERT INTO '@abc' VALUES(5,'hello')} +} {0 {}} +do_test quote-1.2.1 { + catchsql {SELECT * FROM '@abc'} +} {0 {5 hello}} +do_test quote-1.2.2 { + catchsql {SELECT * FROM [@abc]} ;# SqlServer compatibility +} {0 {5 hello}} +do_test quote-1.2.3 { + catchsql {SELECT * FROM `@abc`} ;# MySQL compatibility +} {0 {5 hello}} +do_test quote-1.3 { + catchsql { + SELECT '@abc'.'!pqr', '@abc'.'#xyz'+5 FROM '@abc' + } +} {0 {hello 10}} +do_test quote-1.3.1 { + catchsql { + SELECT '!pqr', '#xyz'+5 FROM '@abc' + } +} {0 {!pqr 5}} +do_test quote-1.3.2 { + catchsql { + SELECT "!pqr", "#xyz"+5 FROM '@abc' + } +} {0 {hello 10}} +do_test quote-1.3.3 { + catchsql { + SELECT [!pqr], `#xyz`+5 FROM '@abc' + } +} {0 {hello 10}} +do_test quote-1.3.4 { + set r [catch { + execsql {SELECT '@abc'.'!pqr', '@abc'.'#xyz'+5 FROM '@abc'} + } msg ] + lappend r $msg +} {0 {hello 10}} +do_test quote-1.4 { + set r [catch { + execsql {UPDATE '@abc' SET '#xyz'=11} + } msg ] + lappend r $msg +} {0 {}} +do_test quote-1.5 { + set r [catch { + execsql {SELECT '@abc'.'!pqr', '@abc'.'#xyz'+5 FROM '@abc'} + } msg ] + lappend r $msg +} {0 {hello 16}} + +# Drop the table with the strange name. +# +do_test quote-1.6 { + set r [catch { + execsql {DROP TABLE '@abc'} + } msg ] + lappend r $msg +} {0 {}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/rdonly.test b/libraries/sqlite/unix/sqlite-3.5.1/test/rdonly.test new file mode 100644 index 0000000..2f6ebc7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/rdonly.test @@ -0,0 +1,65 @@ +# 2007 April 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite treats a database +# as readonly if its write version is set to high. +# +# $Id: rdonly.test,v 1.1 2007/04/24 17:27:52 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# Create a database. +# +do_test rdonly-1.1 { + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + SELECT * FROM t1; + } +} {1} + +# Changes the write version from 1 to 2. Verify that the database +# can be read but not written. +# +do_test rdonly-1.2 { + db close + hexio_get_int [hexio_read test.db 18 1] +} 1 +do_test rdonly-1.3 { + hexio_write test.db 18 02 + sqlite3 db test.db + execsql { + SELECT * FROM t1; + } +} {1} +do_test rdonly-1.4 { + catchsql { + INSERT INTO t1 VALUES(2) + } +} {1 {attempt to write a readonly database}} + +# Change the write version back to 1. Verify that the database +# is read-write again. +# +do_test rdonly-1.5 { + db close + hexio_write test.db 18 01 + sqlite3 db test.db + catchsql { + INSERT INTO t1 VALUES(2); + SELECT * FROM t1; + } +} {0 {1 2}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/reindex.test b/libraries/sqlite/unix/sqlite-3.5.1/test/reindex.test new file mode 100644 index 0000000..503d797 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/reindex.test @@ -0,0 +1,172 @@ +# 2004 November 5 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# This file implements tests for the REINDEX command. +# +# $Id: reindex.test,v 1.3 2005/01/27 00:22:04 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# There is nothing to test if REINDEX is disable for this build. +# +ifcapable {!reindex} { + finish_test + return +} + +# Basic sanity checks. +# +do_test reindex-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(3,4); + CREATE INDEX i1 ON t1(a); + REINDEX; + } +} {} +integrity_check reindex-1.2 +do_test reindex-1.3 { + execsql { + REINDEX t1; + } +} {} +integrity_check reindex-1.4 +do_test reindex-1.5 { + execsql { + REINDEX i1; + } +} {} +integrity_check reindex-1.6 +do_test reindex-1.7 { + execsql { + REINDEX main.t1; + } +} {} +do_test reindex-1.8 { + execsql { + REINDEX main.i1; + } +} {} +do_test reindex-1.9 { + catchsql { + REINDEX bogus + } +} {1 {unable to identify the object to be reindexed}} + +# Set up a table for testing that includes several different collating +# sequences including some that we can modify. +# +do_test reindex-2.1 { + proc c1 {a b} { + return [expr {-[string compare $a $b]}] + } + proc c2 {a b} { + return [expr {-[string compare [string tolower $a] [string tolower $b]]}] + } + db collate c1 c1 + db collate c2 c2 + execsql { + CREATE TABLE t2( + a TEXT PRIMARY KEY COLLATE c1, + b TEXT UNIQUE COLLATE c2, + c TEXT COLLATE nocase, + d TEST COLLATE binary + ); + INSERT INTO t2 VALUES('abc','abc','abc','abc'); + INSERT INTO t2 VALUES('ABCD','ABCD','ABCD','ABCD'); + INSERT INTO t2 VALUES('bcd','bcd','bcd','bcd'); + INSERT INTO t2 VALUES('BCDE','BCDE','BCDE','BCDE'); + SELECT a FROM t2 ORDER BY a; + } +} {bcd abc BCDE ABCD} +do_test reindex-2.2 { + execsql { + SELECT b FROM t2 ORDER BY b; + } +} {BCDE bcd ABCD abc} +do_test reindex-2.3 { + execsql { + SELECT c FROM t2 ORDER BY c; + } +} {abc ABCD bcd BCDE} +do_test reindex-2.4 { + execsql { + SELECT d FROM t2 ORDER BY d; + } +} {ABCD BCDE abc bcd} + +# Change a collating sequence function. Verify that REINDEX rebuilds +# the index. +# +do_test reindex-2.5 { + proc c1 {a b} { + return [string compare $a $b] + } + execsql { + SELECT a FROM t2 ORDER BY a; + } +} {bcd abc BCDE ABCD} +ifcapable {integrityck} { + do_test reindex-2.5.1 { + string equal ok [execsql {PRAGMA integrity_check}] + } {0} +} +do_test reindex-2.6 { + execsql { + REINDEX c2; + SELECT a FROM t2 ORDER BY a; + } +} {bcd abc BCDE ABCD} +do_test reindex-2.7 { + execsql { + REINDEX t1; + SELECT a FROM t2 ORDER BY a; + } +} {bcd abc BCDE ABCD} +do_test reindex-2.8 { + execsql { + REINDEX c1; + SELECT a FROM t2 ORDER BY a; + } +} {ABCD BCDE abc bcd} +integrity_check reindex-2.8.1 + +# Try to REINDEX an index for which the collation sequence is not available. +# +do_test reindex-3.1 { + sqlite3 db2 test.db + catchsql { + REINDEX c1; + } db2 +} {1 {no such collation sequence: c1}} +do_test reindex-3.2 { + proc need_collate {collation} { + db2 collate c1 c1 + } + db2 collation_needed need_collate + catchsql { + REINDEX c1; + } db2 +} {0 {}} +do_test reindex-3.3 { + catchsql { + REINDEX; + } db2 +} {1 {no such collation sequence: c2}} + +do_test reindex-3.99 { + db2 close +} {} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/rollback.test b/libraries/sqlite/unix/sqlite-3.5.1/test/rollback.test new file mode 100644 index 0000000..b0047d6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/rollback.test @@ -0,0 +1,82 @@ +# 2004 June 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is verifying that a rollback in one statement +# caused by an ON CONFLICT ROLLBACK clause aborts any other pending +# statements. +# +# $Id: rollback.test,v 1.6 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +set DB [sqlite3_connection_pointer db] + +do_test rollback-1.1 { + execsql { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + SELECT * FROM t1; + } +} {1 2 3 4} + +ifcapable conflict { + do_test rollback-1.2 { + execsql { + CREATE TABLE t3(a unique on conflict rollback); + INSERT INTO t3 SELECT a FROM t1; + BEGIN; + INSERT INTO t1 SELECT * FROM t1; + } + } {} +} +do_test rollback-1.3 { + set STMT [sqlite3_prepare $DB "SELECT a FROM t1" -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} + +ifcapable conflict { + # This causes a ROLLBACK, which deletes the table out from underneath the + # SELECT statement. + # + do_test rollback-1.4 { + catchsql { + INSERT INTO t3 SELECT a FROM t1; + } + } {1 {column a is not unique}} + + # Try to continue with the SELECT statement + # + do_test rollback-1.5 { + sqlite3_step $STMT + } {SQLITE_ERROR} + + # Restart the SELECT statement + # + do_test rollback-1.6 { sqlite3_reset $STMT } {SQLITE_ABORT} +} else { + do_test rollback-1.6 { sqlite3_reset $STMT } {SQLITE_OK} +} + +do_test rollback-1.7 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test rollback-1.8 { + sqlite3_step $STMT +} {SQLITE_ROW} +do_test rollback-1.9 { + sqlite3_finalize $STMT +} {SQLITE_OK} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/rowid.test b/libraries/sqlite/unix/sqlite-3.5.1/test/rowid.test new file mode 100644 index 0000000..9048cc8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/rowid.test @@ -0,0 +1,674 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the magic ROWID column that is +# found on all tables. +# +# $Id: rowid.test,v 1.19 2007/04/25 11:32:30 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Basic ROWID functionality tests. +# +do_test rowid-1.1 { + execsql { + CREATE TABLE t1(x int, y int); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(3,4); + SELECT x FROM t1 ORDER BY y; + } +} {1 3} +do_test rowid-1.2 { + set r [execsql {SELECT rowid FROM t1 ORDER BY x}] + global x2rowid rowid2x + set x2rowid(1) [lindex $r 0] + set x2rowid(3) [lindex $r 1] + set rowid2x($x2rowid(1)) 1 + set rowid2x($x2rowid(3)) 3 + llength $r +} {2} +do_test rowid-1.3 { + global x2rowid + set sql "SELECT x FROM t1 WHERE rowid==$x2rowid(1)" + execsql $sql +} {1} +do_test rowid-1.4 { + global x2rowid + set sql "SELECT x FROM t1 WHERE rowid==$x2rowid(3)" + execsql $sql +} {3} +do_test rowid-1.5 { + global x2rowid + set sql "SELECT x FROM t1 WHERE oid==$x2rowid(1)" + execsql $sql +} {1} +do_test rowid-1.6 { + global x2rowid + set sql "SELECT x FROM t1 WHERE OID==$x2rowid(3)" + execsql $sql +} {3} +do_test rowid-1.7 { + global x2rowid + set sql "SELECT x FROM t1 WHERE _rowid_==$x2rowid(1)" + execsql $sql +} {1} +do_test rowid-1.7.1 { + while 1 { + set norow [expr {int(rand()*1000000)}] + if {$norow!=$x2rowid(1) && $norow!=$x2rowid(3)} break + } + execsql "SELECT x FROM t1 WHERE rowid=$norow" +} {} +do_test rowid-1.8 { + global x2rowid + set v [execsql {SELECT x, oid FROM t1 order by x}] + set v2 [list 1 $x2rowid(1) 3 $x2rowid(3)] + expr {$v==$v2} +} {1} +do_test rowid-1.9 { + global x2rowid + set v [execsql {SELECT x, RowID FROM t1 order by x}] + set v2 [list 1 $x2rowid(1) 3 $x2rowid(3)] + expr {$v==$v2} +} {1} +do_test rowid-1.10 { + global x2rowid + set v [execsql {SELECT x, _rowid_ FROM t1 order by x}] + set v2 [list 1 $x2rowid(1) 3 $x2rowid(3)] + expr {$v==$v2} +} {1} + +# We can insert or update the ROWID column. +# +do_test rowid-2.1 { + catchsql { + INSERT INTO t1(rowid,x,y) VALUES(1234,5,6); + SELECT rowid, * FROM t1; + } +} {0 {1 1 2 2 3 4 1234 5 6}} +do_test rowid-2.2 { + catchsql { + UPDATE t1 SET rowid=12345 WHERE x==1; + SELECT rowid, * FROM t1 + } +} {0 {2 3 4 1234 5 6 12345 1 2}} +do_test rowid-2.3 { + catchsql { + INSERT INTO t1(y,x,oid) VALUES(8,7,1235); + SELECT rowid, * FROM t1 WHERE rowid>1000; + } +} {0 {1234 5 6 1235 7 8 12345 1 2}} +do_test rowid-2.4 { + catchsql { + UPDATE t1 SET oid=12346 WHERE x==1; + SELECT rowid, * FROM t1; + } +} {0 {2 3 4 1234 5 6 1235 7 8 12346 1 2}} +do_test rowid-2.5 { + catchsql { + INSERT INTO t1(x,_rowid_,y) VALUES(9,1236,10); + SELECT rowid, * FROM t1 WHERE rowid>1000; + } +} {0 {1234 5 6 1235 7 8 1236 9 10 12346 1 2}} +do_test rowid-2.6 { + catchsql { + UPDATE t1 SET _rowid_=12347 WHERE x==1; + SELECT rowid, * FROM t1 WHERE rowid>1000; + } +} {0 {1234 5 6 1235 7 8 1236 9 10 12347 1 2}} + +# But we can use ROWID in the WHERE clause of an UPDATE that does not +# change the ROWID. +# +do_test rowid-2.7 { + global x2rowid + set sql "UPDATE t1 SET x=2 WHERE OID==$x2rowid(3)" + execsql $sql + execsql {SELECT x FROM t1 ORDER BY x} +} {1 2 5 7 9} +do_test rowid-2.8 { + global x2rowid + set sql "UPDATE t1 SET x=3 WHERE _rowid_==$x2rowid(3)" + execsql $sql + execsql {SELECT x FROM t1 ORDER BY x} +} {1 3 5 7 9} + +# We cannot index by ROWID +# +do_test rowid-2.9 { + set v [catch {execsql {CREATE INDEX idxt1 ON t1(rowid)}} msg] + lappend v $msg +} {1 {table t1 has no column named rowid}} +do_test rowid-2.10 { + set v [catch {execsql {CREATE INDEX idxt1 ON t1(_rowid_)}} msg] + lappend v $msg +} {1 {table t1 has no column named _rowid_}} +do_test rowid-2.11 { + set v [catch {execsql {CREATE INDEX idxt1 ON t1(oid)}} msg] + lappend v $msg +} {1 {table t1 has no column named oid}} +do_test rowid-2.12 { + set v [catch {execsql {CREATE INDEX idxt1 ON t1(x, rowid)}} msg] + lappend v $msg +} {1 {table t1 has no column named rowid}} + +# Columns defined in the CREATE statement override the buildin ROWID +# column names. +# +do_test rowid-3.1 { + execsql { + CREATE TABLE t2(rowid int, x int, y int); + INSERT INTO t2 VALUES(0,2,3); + INSERT INTO t2 VALUES(4,5,6); + INSERT INTO t2 VALUES(7,8,9); + SELECT * FROM t2 ORDER BY x; + } +} {0 2 3 4 5 6 7 8 9} +do_test rowid-3.2 { + execsql {SELECT * FROM t2 ORDER BY rowid} +} {0 2 3 4 5 6 7 8 9} +do_test rowid-3.3 { + execsql {SELECT rowid, x, y FROM t2 ORDER BY rowid} +} {0 2 3 4 5 6 7 8 9} +do_test rowid-3.4 { + set r1 [execsql {SELECT _rowid_, rowid FROM t2 ORDER BY rowid}] + foreach {a b c d e f} $r1 {} + set r2 [execsql {SELECT _rowid_, rowid FROM t2 ORDER BY x DESC}] + foreach {u v w x y z} $r2 {} + expr {$u==$e && $w==$c && $y==$a} +} {1} +# sqlite3 v3 - do_probtest doesn't exist anymore? +if 0 { +do_probtest rowid-3.5 { + set r1 [execsql {SELECT _rowid_, rowid FROM t2 ORDER BY rowid}] + foreach {a b c d e f} $r1 {} + expr {$a!=$b && $c!=$d && $e!=$f} +} {1} +} + +# Let's try some more complex examples, including some joins. +# +do_test rowid-4.1 { + execsql { + DELETE FROM t1; + DELETE FROM t2; + } + for {set i 1} {$i<=50} {incr i} { + execsql "INSERT INTO t1(x,y) VALUES($i,[expr {$i*$i}])" + } + execsql {INSERT INTO t2 SELECT _rowid_, x*y, y*y FROM t1} + execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t1.rowid==t2.rowid} +} {256} +do_test rowid-4.2 { + execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1.rowid==t2.rowid} +} {256} +do_test rowid-4.2.1 { + execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1.oid==t2.rowid} +} {256} +do_test rowid-4.2.2 { + execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1._rowid_==t2.rowid} +} {256} +do_test rowid-4.2.3 { + execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t2.rowid==t1.rowid} +} {256} +do_test rowid-4.2.4 { + execsql {SELECT t2.y FROM t2, t1 WHERE t2.rowid==t1.oid AND t1.x==4} +} {256} +do_test rowid-4.2.5 { + execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t1._rowid_==t2.rowid} +} {256} +do_test rowid-4.2.6 { + execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t2.rowid==t1.rowid} +} {256} +do_test rowid-4.2.7 { + execsql {SELECT t2.y FROM t1, t2 WHERE t2.rowid==t1.oid AND t1.x==4} +} {256} +do_test rowid-4.3 { + execsql {CREATE INDEX idxt1 ON t1(x)} + execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t1.rowid==t2.rowid} +} {256} +do_test rowid-4.3.1 { + execsql {SELECT t2.y FROM t1, t2 WHERE t1.x==4 AND t1._rowid_==t2.rowid} +} {256} +do_test rowid-4.3.2 { + execsql {SELECT t2.y FROM t1, t2 WHERE t2.rowid==t1.oid AND 4==t1.x} +} {256} +do_test rowid-4.4 { + execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1.rowid==t2.rowid} +} {256} +do_test rowid-4.4.1 { + execsql {SELECT t2.y FROM t2, t1 WHERE t1.x==4 AND t1._rowid_==t2.rowid} +} {256} +do_test rowid-4.4.2 { + execsql {SELECT t2.y FROM t2, t1 WHERE t2.rowid==t1.oid AND 4==t1.x} +} {256} +do_test rowid-4.5 { + execsql {CREATE INDEX idxt2 ON t2(y)} + set sqlite_search_count 0 + concat [execsql { + SELECT t1.x FROM t2, t1 + WHERE t2.y==256 AND t1.rowid==t2.rowid + }] $sqlite_search_count +} {4 3} +do_test rowid-4.5.1 { + set sqlite_search_count 0 + concat [execsql { + SELECT t1.x FROM t2, t1 + WHERE t1.OID==t2.rowid AND t2.y==81 + }] $sqlite_search_count +} {3 3} +do_test rowid-4.6 { + execsql { + SELECT t1.x FROM t1, t2 + WHERE t2.y==256 AND t1.rowid==t2.rowid + } +} {4} + +do_test rowid-5.1.1 { + ifcapable subquery { + execsql {DELETE FROM t1 WHERE _rowid_ IN (SELECT oid FROM t1 WHERE x>8)} + } else { + set oids [execsql {SELECT oid FROM t1 WHERE x>8}] + set where "_rowid_ = [join $oids { OR _rowid_ = }]" + execsql "DELETE FROM t1 WHERE $where" + } +} {} +do_test rowid-5.1.2 { + execsql {SELECT max(x) FROM t1} +} {8} + +# Make sure a "WHERE rowid=X" clause works when there is no ROWID of X. +# +do_test rowid-6.1 { + execsql { + SELECT x FROM t1 + } +} {1 2 3 4 5 6 7 8} +do_test rowid-6.2 { + for {set ::norow 1} {1} {incr ::norow} { + if {[execsql "SELECT x FROM t1 WHERE rowid=$::norow"]==""} break + } + execsql [subst { + DELETE FROM t1 WHERE rowid=$::norow + }] +} {} +do_test rowid-6.3 { + execsql { + SELECT x FROM t1 + } +} {1 2 3 4 5 6 7 8} + +# Beginning with version 2.3.4, SQLite computes rowids of new rows by +# finding the maximum current rowid and adding one. It falls back to +# the old random algorithm if the maximum rowid is the largest integer. +# The following tests are for this new behavior. +# +do_test rowid-7.0 { + execsql { + DELETE FROM t1; + DROP TABLE t2; + DROP INDEX idxt1; + INSERT INTO t1 VALUES(1,2); + SELECT rowid, * FROM t1; + } +} {1 1 2} +do_test rowid-7.1 { + execsql { + INSERT INTO t1 VALUES(99,100); + SELECT rowid,* FROM t1 + } +} {1 1 2 2 99 100} +do_test rowid-7.2 { + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b); + INSERT INTO t2(b) VALUES(55); + SELECT * FROM t2; + } +} {1 55} +do_test rowid-7.3 { + execsql { + INSERT INTO t2(b) VALUES(66); + SELECT * FROM t2; + } +} {1 55 2 66} +do_test rowid-7.4 { + execsql { + INSERT INTO t2(a,b) VALUES(1000000,77); + INSERT INTO t2(b) VALUES(88); + SELECT * FROM t2; + } +} {1 55 2 66 1000000 77 1000001 88} +do_test rowid-7.5 { + execsql { + INSERT INTO t2(a,b) VALUES(2147483647,99); + INSERT INTO t2(b) VALUES(11); + SELECT b FROM t2 ORDER BY b; + } +} {11 55 66 77 88 99} +ifcapable subquery { + do_test rowid-7.6 { + execsql { + SELECT b FROM t2 WHERE a NOT IN(1,2,1000000,1000001,2147483647); + } + } {11} + do_test rowid-7.7 { + execsql { + INSERT INTO t2(b) VALUES(22); + INSERT INTO t2(b) VALUES(33); + INSERT INTO t2(b) VALUES(44); + INSERT INTO t2(b) VALUES(55); + SELECT b FROM t2 WHERE a NOT IN(1,2,1000000,1000001,2147483647) + ORDER BY b; + } + } {11 22 33 44 55} +} +do_test rowid-7.8 { + execsql { + DELETE FROM t2 WHERE a!=2; + INSERT INTO t2(b) VALUES(111); + SELECT * FROM t2; + } +} {2 66 3 111} + +ifcapable {trigger} { +# Make sure AFTER triggers that do INSERTs do not change the last_insert_rowid. +# Ticket #290 +# +do_test rowid-8.1 { + execsql { + CREATE TABLE t3(a integer primary key); + CREATE TABLE t4(x); + INSERT INTO t4 VALUES(1); + CREATE TRIGGER r3 AFTER INSERT on t3 FOR EACH ROW BEGIN + INSERT INTO t4 VALUES(NEW.a+10); + END; + SELECT * FROM t3; + } +} {} +do_test rowid-8.2 { + execsql { + SELECT rowid, * FROM t4; + } +} {1 1} +do_test rowid-8.3 { + execsql { + INSERT INTO t3 VALUES(123); + SELECT last_insert_rowid(); + } +} {123} +do_test rowid-8.4 { + execsql { + SELECT * FROM t3; + } +} {123} +do_test rowid-8.5 { + execsql { + SELECT rowid, * FROM t4; + } +} {1 1 2 133} +do_test rowid-8.6 { + execsql { + INSERT INTO t3 VALUES(NULL); + SELECT last_insert_rowid(); + } +} {124} +do_test rowid-8.7 { + execsql { + SELECT * FROM t3; + } +} {123 124} +do_test rowid-8.8 { + execsql { + SELECT rowid, * FROM t4; + } +} {1 1 2 133 3 134} +} ;# endif trigger + +# If triggers are not enable, simulate their effect for the tests that +# follow. +ifcapable {!trigger} { + execsql { + CREATE TABLE t3(a integer primary key); + INSERT INTO t3 VALUES(123); + INSERT INTO t3 VALUES(124); + } +} + +# ticket #377: Comparison between integer primiary key and floating point +# values. +# +do_test rowid-9.1 { + execsql { + SELECT * FROM t3 WHERE a<123.5 + } +} {123} +do_test rowid-9.2 { + execsql { + SELECT * FROM t3 WHERE a<124.5 + } +} {123 124} +do_test rowid-9.3 { + execsql { + SELECT * FROM t3 WHERE a>123.5 + } +} {124} +do_test rowid-9.4 { + execsql { + SELECT * FROM t3 WHERE a>122.5 + } +} {123 124} +do_test rowid-9.5 { + execsql { + SELECT * FROM t3 WHERE a==123.5 + } +} {} +do_test rowid-9.6 { + execsql { + SELECT * FROM t3 WHERE a==123.000 + } +} {123} +do_test rowid-9.7 { + execsql { + SELECT * FROM t3 WHERE a>100.5 AND a<200.5 + } +} {123 124} +do_test rowid-9.8 { + execsql { + SELECT * FROM t3 WHERE a>'xyz'; + } +} {} +do_test rowid-9.9 { + execsql { + SELECT * FROM t3 WHERE a<'xyz'; + } +} {123 124} +do_test rowid-9.10 { + execsql { + SELECT * FROM t3 WHERE a>=122.9 AND a<=123.1 + } +} {123} + +# Ticket #567. Comparisons of ROWID or integery primary key against +# floating point numbers still do not always work. +# +do_test rowid-10.1 { + execsql { + CREATE TABLE t5(a); + INSERT INTO t5 VALUES(1); + INSERT INTO t5 VALUES(2); + INSERT INTO t5 SELECT a+2 FROM t5; + INSERT INTO t5 SELECT a+4 FROM t5; + SELECT rowid, * FROM t5; + } +} {1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8} +do_test rowid-10.2 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>=5.5} +} {6 6 7 7 8 8} +do_test rowid-10.3 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>=5.0} +} {5 5 6 6 7 7 8 8} +do_test rowid-10.4 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>5.5} +} {6 6 7 7 8 8} +do_test rowid-10.3.2 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>5.0} +} {6 6 7 7 8 8} +do_test rowid-10.5 { + execsql {SELECT rowid, a FROM t5 WHERE 5.5<=rowid} +} {6 6 7 7 8 8} +do_test rowid-10.6 { + execsql {SELECT rowid, a FROM t5 WHERE 5.5=rowid} +} {1 1 2 2 3 3 4 4 5 5} +do_test rowid-10.10 { + execsql {SELECT rowid, a FROM t5 WHERE 5.5>rowid} +} {1 1 2 2 3 3 4 4 5 5} +do_test rowid-10.11 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>=5.5 ORDER BY rowid DESC} +} {8 8 7 7 6 6} +do_test rowid-10.11.2 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>=5.0 ORDER BY rowid DESC} +} {8 8 7 7 6 6 5 5} +do_test rowid-10.12 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>5.5 ORDER BY rowid DESC} +} {8 8 7 7 6 6} +do_test rowid-10.12.2 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>5.0 ORDER BY rowid DESC} +} {8 8 7 7 6 6} +do_test rowid-10.13 { + execsql {SELECT rowid, a FROM t5 WHERE 5.5<=rowid ORDER BY rowid DESC} +} {8 8 7 7 6 6} +do_test rowid-10.14 { + execsql {SELECT rowid, a FROM t5 WHERE 5.5=rowid ORDER BY rowid DESC} +} {5 5 4 4 3 3 2 2 1 1} +do_test rowid-10.18 { + execsql {SELECT rowid, a FROM t5 WHERE 5.5>rowid ORDER BY rowid DESC} +} {5 5 4 4 3 3 2 2 1 1} + +do_test rowid-10.30 { + execsql { + CREATE TABLE t6(a); + INSERT INTO t6(rowid,a) SELECT -a,a FROM t5; + SELECT rowid, * FROM t6; + } +} {-8 8 -7 7 -6 6 -5 5 -4 4 -3 3 -2 2 -1 1} +do_test rowid-10.31.1 { + execsql {SELECT rowid, a FROM t6 WHERE rowid>=-5.5} +} {-5 5 -4 4 -3 3 -2 2 -1 1} +do_test rowid-10.31.2 { + execsql {SELECT rowid, a FROM t6 WHERE rowid>=-5.0} +} {-5 5 -4 4 -3 3 -2 2 -1 1} +do_test rowid-10.32.1 { + execsql {SELECT rowid, a FROM t6 WHERE rowid>=-5.5 ORDER BY rowid DESC} +} {-1 1 -2 2 -3 3 -4 4 -5 5} +do_test rowid-10.32.1 { + execsql {SELECT rowid, a FROM t6 WHERE rowid>=-5.0 ORDER BY rowid DESC} +} {-1 1 -2 2 -3 3 -4 4 -5 5} +do_test rowid-10.33 { + execsql {SELECT rowid, a FROM t6 WHERE -5.5<=rowid} +} {-5 5 -4 4 -3 3 -2 2 -1 1} +do_test rowid-10.34 { + execsql {SELECT rowid, a FROM t6 WHERE -5.5<=rowid ORDER BY rowid DESC} +} {-1 1 -2 2 -3 3 -4 4 -5 5} +do_test rowid-10.35.1 { + execsql {SELECT rowid, a FROM t6 WHERE rowid>-5.5} +} {-5 5 -4 4 -3 3 -2 2 -1 1} +do_test rowid-10.35.2 { + execsql {SELECT rowid, a FROM t6 WHERE rowid>-5.0} +} {-4 4 -3 3 -2 2 -1 1} +do_test rowid-10.36.1 { + execsql {SELECT rowid, a FROM t6 WHERE rowid>-5.5 ORDER BY rowid DESC} +} {-1 1 -2 2 -3 3 -4 4 -5 5} +do_test rowid-10.36.2 { + execsql {SELECT rowid, a FROM t6 WHERE rowid>-5.0 ORDER BY rowid DESC} +} {-1 1 -2 2 -3 3 -4 4} +do_test rowid-10.37 { + execsql {SELECT rowid, a FROM t6 WHERE -5.5=rowid} +} {-8 8 -7 7 -6 6} +do_test rowid-10.42 { + execsql {SELECT rowid, a FROM t6 WHERE -5.5>=rowid ORDER BY rowid DESC} +} {-6 6 -7 7 -8 8} +do_test rowid-10.43 { + execsql {SELECT rowid, a FROM t6 WHERE rowid<-5.5} +} {-8 8 -7 7 -6 6} +do_test rowid-10.44 { + execsql {SELECT rowid, a FROM t6 WHERE rowid<-5.5 ORDER BY rowid DESC} +} {-6 6 -7 7 -8 8} +do_test rowid-10.44 { + execsql {SELECT rowid, a FROM t6 WHERE -5.5>rowid} +} {-8 8 -7 7 -6 6} +do_test rowid-10.46 { + execsql {SELECT rowid, a FROM t6 WHERE -5.5>rowid ORDER BY rowid DESC} +} {-6 6 -7 7 -8 8} + +# Comparison of rowid against string values. +# +do_test rowid-11.1 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>'abc'} +} {} +do_test rowid-11.2 { + execsql {SELECT rowid, a FROM t5 WHERE rowid>='abc'} +} {} +do_test rowid-11.3 { + execsql {SELECT rowid, a FROM t5 WHERE rowid<'abc'} +} {1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8} +do_test rowid-11.4 { + execsql {SELECT rowid, a FROM t5 WHERE rowid<='abc'} +} {1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8} + +# Test the automatic generation of rowids when the table already contains +# a rowid with the maximum value. +# +do_test rowid-12.1 { + execsql { + CREATE TABLE t7(x INTEGER PRIMARY KEY, y); + INSERT INTO t7 VALUES(9223372036854775807,'a'); + SELECT y FROM t7; + } +} {a} +do_test rowid-12.2 { + execsql { + INSERT INTO t7 VALUES(NULL,'b'); + SELECT y FROM t7; + } +} {b a} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/safety.test b/libraries/sqlite/unix/sqlite-3.5.1/test/safety.test new file mode 100644 index 0000000..fb8c56c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/safety.test @@ -0,0 +1,68 @@ +# 2005 January 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the sqlite3SafetyOn and sqlite3SafetyOff +# functions. Those routines are not strictly necessary - they are +# designed to detect misuse of the library. +# +# $Id: safety.test,v 1.2 2006/01/03 00:33:50 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test safety-1.1 { + set DB [sqlite3_connection_pointer db] + db eval {CREATE TABLE t1(a)} + sqlite_set_magic $DB SQLITE_MAGIC_BUSY + catchsql { + SELECT name FROM sqlite_master; + } +} {1 {library routine called out of sequence}} +do_test safety-1.2 { + sqlite_set_magic $DB SQLITE_MAGIC_OPEN + catchsql { + SELECT name FROM sqlite_master + } +} {0 t1} + +do_test safety-2.1 { + proc safety_on {} "sqlite_set_magic $DB SQLITE_MAGIC_BUSY" + db function safety_on safety_on + catchsql { + SELECT safety_on(), name FROM sqlite_master + } +} {1 {library routine called out of sequence}} +do_test safety-2.2 { + catchsql { + SELECT 'hello' + } +} {1 {library routine called out of sequence}} +do_test safety-2.3 { + sqlite3_close $DB +} {SQLITE_MISUSE} +do_test safety-2.4 { + sqlite_set_magic $DB SQLITE_MAGIC_OPEN + execsql { + SELECT name FROM sqlite_master + } +} {t1} + +do_test safety-3.1 { + set rc [catch { + db eval {SELECT name FROM sqlite_master} { + sqlite_set_magic $DB SQLITE_MAGIC_BUSY + } + } msg] + lappend rc $msg +} {1 {library routine called out of sequence}} +sqlite_set_magic $DB SQLITE_MAGIC_OPEN + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/schema.test b/libraries/sqlite/unix/sqlite-3.5.1/test/schema.test new file mode 100644 index 0000000..7adda55 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/schema.test @@ -0,0 +1,365 @@ +# 2005 Jan 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file tests the various conditions under which an SQLITE_SCHEMA +# error should be returned. +# +# $Id: schema.test,v 1.7 2007/08/13 15:28:35 danielk1977 Exp $ + +#--------------------------------------------------------------------- +# When any of the following types of SQL statements or actions are +# executed, all pre-compiled statements are invalidated. An attempt +# to execute an invalidated statement always returns SQLITE_SCHEMA. +# +# CREATE/DROP TABLE...................................schema-1.* +# CREATE/DROP VIEW....................................schema-2.* +# CREATE/DROP TRIGGER.................................schema-3.* +# CREATE/DROP INDEX...................................schema-4.* +# DETACH..............................................schema-5.* +# Deleting a user-function............................schema-6.* +# Deleting a collation sequence.......................schema-7.* +# Setting or changing the authorization function......schema-8.* +# Rollback of a DDL statement.........................schema-12.* +# +# Test cases schema-9.* and schema-10.* test some specific bugs +# that came up during development. +# +# Test cases schema-11.* test that it is impossible to delete or +# change a collation sequence or user-function while SQL statements +# are executing. Adding new collations or functions is allowed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test schema-1.1 { + set ::STMT [sqlite3_prepare $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + CREATE TABLE abc(a, b, c); + } + sqlite3_step $::STMT +} {SQLITE_ERROR} +do_test schema-1.2 { + sqlite3_finalize $::STMT +} {SQLITE_SCHEMA} +do_test schema-1.3 { + set ::STMT [sqlite3_prepare $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + DROP TABLE abc; + } + sqlite3_step $::STMT +} {SQLITE_ERROR} +do_test schema-1.4 { + sqlite3_finalize $::STMT +} {SQLITE_SCHEMA} + +ifcapable view { + do_test schema-2.1 { + set ::STMT [sqlite3_prepare $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + CREATE VIEW v1 AS SELECT * FROM sqlite_master; + } + sqlite3_step $::STMT + } {SQLITE_ERROR} + do_test schema-2.2 { + sqlite3_finalize $::STMT + } {SQLITE_SCHEMA} + do_test schema-2.3 { + set ::STMT [sqlite3_prepare $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + DROP VIEW v1; + } + sqlite3_step $::STMT + } {SQLITE_ERROR} + do_test schema-2.4 { + sqlite3_finalize $::STMT + } {SQLITE_SCHEMA} +} + +ifcapable trigger { + do_test schema-3.1 { + execsql { + CREATE TABLE abc(a, b, c); + } + set ::STMT [sqlite3_prepare $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + CREATE TRIGGER abc_trig AFTER INSERT ON abc BEGIN + SELECT 1, 2, 3; + END; + } + sqlite3_step $::STMT + } {SQLITE_ERROR} + do_test schema-3.2 { + sqlite3_finalize $::STMT + } {SQLITE_SCHEMA} + do_test schema-3.3 { + set ::STMT [sqlite3_prepare $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + DROP TRIGGER abc_trig; + } + sqlite3_step $::STMT + } {SQLITE_ERROR} + do_test schema-3.4 { + sqlite3_finalize $::STMT + } {SQLITE_SCHEMA} +} + +do_test schema-4.1 { + catchsql { + CREATE TABLE abc(a, b, c); + } + set ::STMT [sqlite3_prepare $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + CREATE INDEX abc_index ON abc(a); + } + sqlite3_step $::STMT +} {SQLITE_ERROR} +do_test schema-4.2 { + sqlite3_finalize $::STMT +} {SQLITE_SCHEMA} +do_test schema-4.3 { + set ::STMT [sqlite3_prepare $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + DROP INDEX abc_index; + } + sqlite3_step $::STMT +} {SQLITE_ERROR} +do_test schema-4.4 { + sqlite3_finalize $::STMT +} {SQLITE_SCHEMA} + +#--------------------------------------------------------------------- +# Tests 5.1 to 5.4 check that prepared statements are invalidated when +# a database is DETACHed (but not when one is ATTACHed). +# +do_test schema-5.1 { + set sql {SELECT * FROM abc;} + set ::STMT [sqlite3_prepare $::DB $sql -1 TAIL] + execsql { + ATTACH 'test2.db' AS aux; + } + sqlite3_step $::STMT +} {SQLITE_DONE} +do_test schema-5.2 { + sqlite3_reset $::STMT +} {SQLITE_OK} +do_test schema-5.3 { + execsql { + DETACH aux; + } + sqlite3_step $::STMT +} {SQLITE_ERROR} +do_test schema-5.4 { + sqlite3_finalize $::STMT +} {SQLITE_SCHEMA} + +#--------------------------------------------------------------------- +# Tests 6.* check that prepared statements are invalidated when +# a user-function is deleted (but not when one is added). +do_test schema-6.1 { + set sql {SELECT * FROM abc;} + set ::STMT [sqlite3_prepare $::DB $sql -1 TAIL] + db function hello_function {} + sqlite3_step $::STMT +} {SQLITE_DONE} +do_test schema-6.2 { + sqlite3_reset $::STMT +} {SQLITE_OK} +do_test schema-6.3 { + sqlite_delete_function $::DB hello_function + sqlite3_step $::STMT +} {SQLITE_ERROR} +do_test schema-6.4 { + sqlite3_finalize $::STMT +} {SQLITE_SCHEMA} + +#--------------------------------------------------------------------- +# Tests 7.* check that prepared statements are invalidated when +# a collation sequence is deleted (but not when one is added). +# +ifcapable utf16 { + do_test schema-7.1 { + set sql {SELECT * FROM abc;} + set ::STMT [sqlite3_prepare $::DB $sql -1 TAIL] + add_test_collate $::DB 1 1 1 + sqlite3_step $::STMT + } {SQLITE_DONE} + do_test schema-7.2 { + sqlite3_reset $::STMT + } {SQLITE_OK} + do_test schema-7.3 { + add_test_collate $::DB 0 0 0 + sqlite3_step $::STMT + } {SQLITE_ERROR} + do_test schema-7.4 { + sqlite3_finalize $::STMT + } {SQLITE_SCHEMA} +} + +#--------------------------------------------------------------------- +# Tests 8.1 and 8.2 check that prepared statements are invalidated when +# the authorization function is set. +# +ifcapable auth { + do_test schema-8.1 { + set ::STMT [sqlite3_prepare $::DB {SELECT * FROM sqlite_master} -1 TAIL] + db auth {} + sqlite3_step $::STMT + } {SQLITE_ERROR} + do_test schema-8.3 { + sqlite3_finalize $::STMT + } {SQLITE_SCHEMA} +} + +#--------------------------------------------------------------------- +# schema-9.1: Test that if a table is dropped by one database connection, +# other database connections are aware of the schema change. +# schema-9.2: Test that if a view is dropped by one database connection, +# other database connections are aware of the schema change. +# +do_test schema-9.1 { + sqlite3 db2 test.db + execsql { + DROP TABLE abc; + } db2 + db2 close + catchsql { + SELECT * FROM abc; + } +} {1 {no such table: abc}} +execsql { + CREATE TABLE abc(a, b, c); +} +ifcapable view { + do_test schema-9.2 { + execsql { + CREATE VIEW abcview AS SELECT * FROM abc; + } + sqlite3 db2 test.db + execsql { + DROP VIEW abcview; + } db2 + db2 close + catchsql { + SELECT * FROM abcview; + } + } {1 {no such table: abcview}} +} + +#--------------------------------------------------------------------- +# Test that if a CREATE TABLE statement fails because there are other +# btree cursors open on the same database file it does not corrupt +# the sqlite_master table. +# +# 2007-05-02: These tests have been overcome by events. Open btree +# cursors no longer block CREATE TABLE. But there is no reason not +# to keep the tests in the test suite. +# +do_test schema-10.1 { + execsql { + INSERT INTO abc VALUES(1, 2, 3); + } + set sql {SELECT * FROM abc} + set ::STMT [sqlite3_prepare $::DB $sql -1 TAIL] + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test schema-10.2 { + catchsql { + CREATE TABLE t2(a, b, c); + } +} {0 {}} +do_test schema-10.3 { + sqlite3_finalize $::STMT +} {SQLITE_OK} +do_test schema-10.4 { + sqlite3 db2 test.db + execsql { + SELECT * FROM abc + } db2 +} {1 2 3} +do_test schema-10.5 { + db2 close +} {} + +#--------------------------------------------------------------------- +# Attempting to delete or replace a user-function or collation sequence +# while there are active statements returns an SQLITE_BUSY error. +# +# schema-11.1 - 11.4: User function. +# schema-11.5 - 11.8: Collation sequence. +# +do_test schema-11.1 { + db function tstfunc {} + set sql {SELECT * FROM abc} + set ::STMT [sqlite3_prepare $::DB $sql -1 TAIL] + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test schema-11.2 { + sqlite_delete_function $::DB tstfunc +} {SQLITE_BUSY} +do_test schema-11.3 { + set rc [catch { + db function tstfunc {} + } msg] + list $rc $msg +} {1 {Unable to delete/modify user-function due to active statements}} +do_test schema-11.4 { + sqlite3_finalize $::STMT +} {SQLITE_OK} +do_test schema-11.5 { + db collate tstcollate {} + set sql {SELECT * FROM abc} + set ::STMT [sqlite3_prepare $::DB $sql -1 TAIL] + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test schema-11.6 { + sqlite_delete_collation $::DB tstcollate +} {SQLITE_BUSY} +do_test schema-11.7 { + set rc [catch { + db collate tstcollate {} + } msg] + list $rc $msg +} {1 {Unable to delete/modify collation sequence due to active statements}} +do_test schema-11.8 { + sqlite3_finalize $::STMT +} {SQLITE_OK} + +# The following demonstrates why statements need to be expired whenever +# there is a rollback (explicit or otherwise). +# +do_test schema-12.1 { + # Begin a transaction and create a table. This increments + # the schema cookie. Then compile an SQL statement, using + # the current (incremented) value of the cookie. + execsql { + BEGIN; + CREATE TABLE t3(a, b, c); + } + set ::STMT [sqlite3_prepare $::DB "CREATE TABLE t4(a,b,c)" -1 TAIL] + + # Rollback the transaction, resetting the schema cookie to the value + # it had at the start of this test case. Then create a table, + # incrementing the schema cookie. + execsql { + ROLLBACK; + CREATE TABLE t4(a, b, c); + } + + # The schema cookie now has the same value as it did when SQL statement + # $::STMT was prepared. So unless it has been expired, it would be + # possible to run the "CREATE TABLE t4" statement and create a + # duplicate table. + list [sqlite3_step $::STMT] [sqlite3_finalize $::STMT] +} {SQLITE_ERROR SQLITE_SCHEMA} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/schema2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/schema2.test new file mode 100644 index 0000000..593c80d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/schema2.test @@ -0,0 +1,338 @@ +# 2006 November 08 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file tests the various conditions under which an SQLITE_SCHEMA +# error should be returned. This is a copy of schema.test that +# has been altered to use sqlite3_prepare_v2 instead of sqlite3_prepare +# +# $Id: schema2.test,v 1.2 2007/05/02 17:54:56 drh Exp $ + +#--------------------------------------------------------------------- +# When any of the following types of SQL statements or actions are +# executed, all pre-compiled statements are invalidated. An attempt +# to execute an invalidated statement always returns SQLITE_SCHEMA. +# +# CREATE/DROP TABLE...................................schema2-1.* +# CREATE/DROP VIEW....................................schema2-2.* +# CREATE/DROP TRIGGER.................................schema2-3.* +# CREATE/DROP INDEX...................................schema2-4.* +# DETACH..............................................schema2-5.* +# Deleting a user-function............................schema2-6.* +# Deleting a collation sequence.......................schema2-7.* +# Setting or changing the authorization function......schema2-8.* +# +# Test cases schema2-9.* and schema2-10.* test some specific bugs +# that came up during development. +# +# Test cases schema2-11.* test that it is impossible to delete or +# change a collation sequence or user-function while SQL statements +# are executing. Adding new collations or functions is allowed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test schema2-1.1 { + set ::STMT [sqlite3_prepare_v2 $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + CREATE TABLE abc(a, b, c); + } + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test schema2-1.2 { + sqlite3_finalize $::STMT +} {SQLITE_OK} +do_test schema2-1.3 { + set ::STMT [sqlite3_prepare_v2 $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + DROP TABLE abc; + } + sqlite3_step $::STMT +} {SQLITE_DONE} +do_test schema2-1.4 { + sqlite3_finalize $::STMT +} {SQLITE_OK} + + +ifcapable view { + do_test schema2-2.1 { + set ::STMT [sqlite3_prepare_v2 $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + CREATE VIEW v1 AS SELECT * FROM sqlite_master; + } + sqlite3_step $::STMT + } {SQLITE_ROW} + do_test schema2-2.2 { + sqlite3_finalize $::STMT + } {SQLITE_OK} + do_test schema2-2.3 { + set ::STMT [sqlite3_prepare_v2 $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + DROP VIEW v1; + } + sqlite3_step $::STMT + } {SQLITE_DONE} + do_test schema2-2.4 { + sqlite3_finalize $::STMT + } {SQLITE_OK} +} + +ifcapable trigger { + do_test schema2-3.1 { + execsql { + CREATE TABLE abc(a, b, c); + } + set ::STMT [sqlite3_prepare_v2 $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + CREATE TRIGGER abc_trig AFTER INSERT ON abc BEGIN + SELECT 1, 2, 3; + END; + } + sqlite3_step $::STMT + } {SQLITE_ROW} + do_test schema2-3.2 { + sqlite3_finalize $::STMT + } {SQLITE_OK} + do_test schema2-3.3 { + set ::STMT [sqlite3_prepare_v2 $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + DROP TRIGGER abc_trig; + } + sqlite3_step $::STMT + } {SQLITE_ROW} + do_test schema2-3.4 { + sqlite3_finalize $::STMT + } {SQLITE_OK} +} + +do_test schema2-4.1 { + catchsql { + CREATE TABLE abc(a, b, c); + } + set ::STMT [sqlite3_prepare_v2 $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + CREATE INDEX abc_index ON abc(a); + } + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test schema2-4.2 { + sqlite3_finalize $::STMT +} {SQLITE_OK} +do_test schema2-4.3 { + set ::STMT [sqlite3_prepare_v2 $::DB {SELECT * FROM sqlite_master} -1 TAIL] + execsql { + DROP INDEX abc_index; + } + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test schema2-4.4 { + sqlite3_finalize $::STMT +} {SQLITE_OK} + +#--------------------------------------------------------------------- +# Tests 5.1 to 5.4 check that prepared statements are invalidated when +# a database is DETACHed (but not when one is ATTACHed). +# +do_test schema2-5.1 { + set sql {SELECT * FROM abc;} + set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 TAIL] + execsql { + ATTACH 'test2.db' AS aux; + } + sqlite3_step $::STMT +} {SQLITE_DONE} +do_test schema2-5.2 { + sqlite3_reset $::STMT +} {SQLITE_OK} +do_test schema2-5.3 { + execsql { + DETACH aux; + } + sqlite3_step $::STMT +} {SQLITE_DONE} +do_test schema2-5.4 { + sqlite3_finalize $::STMT +} {SQLITE_OK} + +#--------------------------------------------------------------------- +# Tests 6.* check that prepared statements are invalidated when +# a user-function is deleted (but not when one is added). +do_test schema2-6.1 { + set sql {SELECT * FROM abc;} + set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 TAIL] + db function hello_function {} + sqlite3_step $::STMT +} {SQLITE_DONE} +do_test schema2-6.2 { + sqlite3_reset $::STMT +} {SQLITE_OK} +do_test schema2-6.3 { + sqlite_delete_function $::DB hello_function + sqlite3_step $::STMT +} {SQLITE_DONE} +do_test schema2-6.4 { + sqlite3_finalize $::STMT +} {SQLITE_OK} + +#--------------------------------------------------------------------- +# Tests 7.* check that prepared statements are invalidated when +# a collation sequence is deleted (but not when one is added). +# +ifcapable utf16 { + do_test schema2-7.1 { + set sql {SELECT * FROM abc;} + set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 TAIL] + add_test_collate $::DB 1 1 1 + sqlite3_step $::STMT + } {SQLITE_DONE} + do_test schema2-7.2 { + sqlite3_reset $::STMT + } {SQLITE_OK} + do_test schema2-7.3 { + add_test_collate $::DB 0 0 0 + sqlite3_step $::STMT + } {SQLITE_DONE} + do_test schema2-7.4 { + sqlite3_finalize $::STMT + } {SQLITE_OK} +} + +#--------------------------------------------------------------------- +# Tests 8.1 and 8.2 check that prepared statements are invalidated when +# the authorization function is set. +# +ifcapable auth { + do_test schema2-8.1 { + set ::STMT [sqlite3_prepare_v2 $::DB {SELECT * FROM sqlite_master} -1 TAIL] + db auth {} + sqlite3_step $::STMT + } {SQLITE_ROW} + do_test schema2-8.3 { + sqlite3_finalize $::STMT + } {SQLITE_OK} +} + +#--------------------------------------------------------------------- +# schema2-9.1: Test that if a table is dropped by one database connection, +# other database connections are aware of the schema change. +# schema2-9.2: Test that if a view is dropped by one database connection, +# other database connections are aware of the schema change. +# +do_test schema2-9.1 { + sqlite3 db2 test.db + execsql { + DROP TABLE abc; + } db2 + db2 close + catchsql { + SELECT * FROM abc; + } +} {1 {no such table: abc}} +execsql { + CREATE TABLE abc(a, b, c); +} +ifcapable view { + do_test schema2-9.2 { + execsql { + CREATE VIEW abcview AS SELECT * FROM abc; + } + sqlite3 db2 test.db + execsql { + DROP VIEW abcview; + } db2 + db2 close + catchsql { + SELECT * FROM abcview; + } + } {1 {no such table: abcview}} +} + +#--------------------------------------------------------------------- +# Test that if a CREATE TABLE statement fails because there are other +# btree cursors open on the same database file it does not corrupt +# the sqlite_master table. +# +# 2007-05-02: These tests have been overcome by events. Open btree +# cursors no longer block CREATE TABLE. But there is no reason not +# to keep the tests in the test suite. +# +do_test schema2-10.1 { + execsql { + INSERT INTO abc VALUES(1, 2, 3); + } + set sql {SELECT * FROM abc} + set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 TAIL] + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test schema2-10.2 { + catchsql { + CREATE TABLE t2(a, b, c); + } +} {0 {}} +do_test schema2-10.3 { + sqlite3_finalize $::STMT +} {SQLITE_OK} +do_test schema2-10.4 { + sqlite3 db2 test.db + execsql { + SELECT * FROM abc + } db2 +} {1 2 3} +do_test schema2-10.5 { + db2 close +} {} + +#--------------------------------------------------------------------- +# Attempting to delete or replace a user-function or collation sequence +# while there are active statements returns an SQLITE_BUSY error. +# +# schema2-11.1 - 11.4: User function. +# schema2-11.5 - 11.8: Collation sequence. +# +do_test schema2-11.1 { + db function tstfunc {} + set sql {SELECT * FROM abc} + set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 TAIL] + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test schema2-11.2 { + sqlite_delete_function $::DB tstfunc +} {SQLITE_BUSY} +do_test schema2-11.3 { + set rc [catch { + db function tstfunc {} + } msg] + list $rc $msg +} {1 {Unable to delete/modify user-function due to active statements}} +do_test schema2-11.4 { + sqlite3_finalize $::STMT +} {SQLITE_OK} +do_test schema2-11.5 { + db collate tstcollate {} + set sql {SELECT * FROM abc} + set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 TAIL] + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test schema2-11.6 { + sqlite_delete_collation $::DB tstcollate +} {SQLITE_BUSY} +do_test schema2-11.7 { + set rc [catch { + db collate tstcollate {} + } msg] + list $rc $msg +} {1 {Unable to delete/modify collation sequence due to active statements}} +do_test schema2-11.8 { + sqlite3_finalize $::STMT +} {SQLITE_OK} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/select1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/select1.test new file mode 100644 index 0000000..0393145 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/select1.test @@ -0,0 +1,913 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the SELECT statement. +# +# $Id: select1.test,v 1.54 2007/07/23 22:51:15 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Try to select on a non-existant table. +# +do_test select1-1.1 { + set v [catch {execsql {SELECT * FROM test1}} msg] + lappend v $msg +} {1 {no such table: test1}} + + +execsql {CREATE TABLE test1(f1 int, f2 int)} + +do_test select1-1.2 { + set v [catch {execsql {SELECT * FROM test1, test2}} msg] + lappend v $msg +} {1 {no such table: test2}} +do_test select1-1.3 { + set v [catch {execsql {SELECT * FROM test2, test1}} msg] + lappend v $msg +} {1 {no such table: test2}} + +execsql {INSERT INTO test1(f1,f2) VALUES(11,22)} + + +# Make sure the columns are extracted correctly. +# +do_test select1-1.4 { + execsql {SELECT f1 FROM test1} +} {11} +do_test select1-1.5 { + execsql {SELECT f2 FROM test1} +} {22} +do_test select1-1.6 { + execsql {SELECT f2, f1 FROM test1} +} {22 11} +do_test select1-1.7 { + execsql {SELECT f1, f2 FROM test1} +} {11 22} +do_test select1-1.8 { + execsql {SELECT * FROM test1} +} {11 22} +do_test select1-1.8.1 { + execsql {SELECT *, * FROM test1} +} {11 22 11 22} +do_test select1-1.8.2 { + execsql {SELECT *, min(f1,f2), max(f1,f2) FROM test1} +} {11 22 11 22} +do_test select1-1.8.3 { + execsql {SELECT 'one', *, 'two', * FROM test1} +} {one 11 22 two 11 22} + +execsql {CREATE TABLE test2(r1 real, r2 real)} +execsql {INSERT INTO test2(r1,r2) VALUES(1.1,2.2)} + +do_test select1-1.9 { + execsql {SELECT * FROM test1, test2} +} {11 22 1.1 2.2} +do_test select1-1.9.1 { + execsql {SELECT *, 'hi' FROM test1, test2} +} {11 22 1.1 2.2 hi} +do_test select1-1.9.2 { + execsql {SELECT 'one', *, 'two', * FROM test1, test2} +} {one 11 22 1.1 2.2 two 11 22 1.1 2.2} +do_test select1-1.10 { + execsql {SELECT test1.f1, test2.r1 FROM test1, test2} +} {11 1.1} +do_test select1-1.11 { + execsql {SELECT test1.f1, test2.r1 FROM test2, test1} +} {11 1.1} +do_test select1-1.11.1 { + execsql {SELECT * FROM test2, test1} +} {1.1 2.2 11 22} +do_test select1-1.11.2 { + execsql {SELECT * FROM test1 AS a, test1 AS b} +} {11 22 11 22} +do_test select1-1.12 { + execsql {SELECT max(test1.f1,test2.r1), min(test1.f2,test2.r2) + FROM test2, test1} +} {11 2.2} +do_test select1-1.13 { + execsql {SELECT min(test1.f1,test2.r1), max(test1.f2,test2.r2) + FROM test1, test2} +} {1.1 22} + +set long {This is a string that is too big to fit inside a NBFS buffer} +do_test select1-2.0 { + execsql " + DROP TABLE test2; + DELETE FROM test1; + INSERT INTO test1 VALUES(11,22); + INSERT INTO test1 VALUES(33,44); + CREATE TABLE t3(a,b); + INSERT INTO t3 VALUES('abc',NULL); + INSERT INTO t3 VALUES(NULL,'xyz'); + INSERT INTO t3 SELECT * FROM test1; + CREATE TABLE t4(a,b); + INSERT INTO t4 VALUES(NULL,'$long'); + SELECT * FROM t3; + " +} {abc {} {} xyz 11 22 33 44} + +# Error messges from sqliteExprCheck +# +do_test select1-2.1 { + set v [catch {execsql {SELECT count(f1,f2) FROM test1}} msg] + lappend v $msg +} {1 {wrong number of arguments to function count()}} +do_test select1-2.2 { + set v [catch {execsql {SELECT count(f1) FROM test1}} msg] + lappend v $msg +} {0 2} +do_test select1-2.3 { + set v [catch {execsql {SELECT Count() FROM test1}} msg] + lappend v $msg +} {0 2} +do_test select1-2.4 { + set v [catch {execsql {SELECT COUNT(*) FROM test1}} msg] + lappend v $msg +} {0 2} +do_test select1-2.5 { + set v [catch {execsql {SELECT COUNT(*)+1 FROM test1}} msg] + lappend v $msg +} {0 3} +do_test select1-2.5.1 { + execsql {SELECT count(*),count(a),count(b) FROM t3} +} {4 3 3} +do_test select1-2.5.2 { + execsql {SELECT count(*),count(a),count(b) FROM t4} +} {1 0 1} +do_test select1-2.5.3 { + execsql {SELECT count(*),count(a),count(b) FROM t4 WHERE b=5} +} {0 0 0} +do_test select1-2.6 { + set v [catch {execsql {SELECT min(*) FROM test1}} msg] + lappend v $msg +} {1 {wrong number of arguments to function min()}} +do_test select1-2.7 { + set v [catch {execsql {SELECT Min(f1) FROM test1}} msg] + lappend v $msg +} {0 11} +do_test select1-2.8 { + set v [catch {execsql {SELECT MIN(f1,f2) FROM test1}} msg] + lappend v [lsort $msg] +} {0 {11 33}} +do_test select1-2.8.1 { + execsql {SELECT coalesce(min(a),'xyzzy') FROM t3} +} {11} +do_test select1-2.8.2 { + execsql {SELECT min(coalesce(a,'xyzzy')) FROM t3} +} {11} +do_test select1-2.8.3 { + execsql {SELECT min(b), min(b) FROM t4} +} [list $long $long] +do_test select1-2.9 { + set v [catch {execsql {SELECT MAX(*) FROM test1}} msg] + lappend v $msg +} {1 {wrong number of arguments to function MAX()}} +do_test select1-2.10 { + set v [catch {execsql {SELECT Max(f1) FROM test1}} msg] + lappend v $msg +} {0 33} +do_test select1-2.11 { + set v [catch {execsql {SELECT max(f1,f2) FROM test1}} msg] + lappend v [lsort $msg] +} {0 {22 44}} +do_test select1-2.12 { + set v [catch {execsql {SELECT MAX(f1,f2)+1 FROM test1}} msg] + lappend v [lsort $msg] +} {0 {23 45}} +do_test select1-2.13 { + set v [catch {execsql {SELECT MAX(f1)+1 FROM test1}} msg] + lappend v $msg +} {0 34} +do_test select1-2.13.1 { + execsql {SELECT coalesce(max(a),'xyzzy') FROM t3} +} {abc} +do_test select1-2.13.2 { + execsql {SELECT max(coalesce(a,'xyzzy')) FROM t3} +} {xyzzy} +do_test select1-2.14 { + set v [catch {execsql {SELECT SUM(*) FROM test1}} msg] + lappend v $msg +} {1 {wrong number of arguments to function SUM()}} +do_test select1-2.15 { + set v [catch {execsql {SELECT Sum(f1) FROM test1}} msg] + lappend v $msg +} {0 44} +do_test select1-2.16 { + set v [catch {execsql {SELECT sum(f1,f2) FROM test1}} msg] + lappend v $msg +} {1 {wrong number of arguments to function sum()}} +do_test select1-2.17 { + set v [catch {execsql {SELECT SUM(f1)+1 FROM test1}} msg] + lappend v $msg +} {0 45} +do_test select1-2.17.1 { + execsql {SELECT sum(a) FROM t3} +} {44.0} +do_test select1-2.18 { + set v [catch {execsql {SELECT XYZZY(f1) FROM test1}} msg] + lappend v $msg +} {1 {no such function: XYZZY}} +do_test select1-2.19 { + set v [catch {execsql {SELECT SUM(min(f1,f2)) FROM test1}} msg] + lappend v $msg +} {0 44} +do_test select1-2.20 { + set v [catch {execsql {SELECT SUM(min(f1)) FROM test1}} msg] + lappend v $msg +} {1 {misuse of aggregate function min()}} + +# Ticket #2526 +# +do_test select1-2.21 { + catchsql { + SELECT min(f1) AS m FROM test1 GROUP BY f1 HAVING max(m+5)<10 + } +} {1 {misuse of aliased aggregate m}} +do_test select1-2.22 { + catchsql { + SELECT coalesce(min(f1)+5,11) AS m FROM test1 + GROUP BY f1 + HAVING max(m+5)<10 + } +} {1 {misuse of aliased aggregate m}} +do_test select1-2.23 { + execsql { + CREATE TABLE tkt2526(a,b,c PRIMARY KEY); + INSERT INTO tkt2526 VALUES('x','y',NULL); + INSERT INTO tkt2526 VALUES('x','z',NULL); + } + catchsql { + SELECT count(a) AS cn FROM tkt2526 GROUP BY a HAVING cn=11}} msg] + lappend v [lsort $msg] +} {0 {11 33}} +do_test select1-3.5 { + set v [catch {execsql {SELECT f1 FROM test1 WHERE f1>11}} msg] + lappend v [lsort $msg] +} {0 33} +do_test select1-3.6 { + set v [catch {execsql {SELECT f1 FROM test1 WHERE f1!=11}} msg] + lappend v [lsort $msg] +} {0 33} +do_test select1-3.7 { + set v [catch {execsql {SELECT f1 FROM test1 WHERE min(f1,f2)!=11}} msg] + lappend v [lsort $msg] +} {0 33} +do_test select1-3.8 { + set v [catch {execsql {SELECT f1 FROM test1 WHERE max(f1,f2)!=11}} msg] + lappend v [lsort $msg] +} {0 {11 33}} +do_test select1-3.9 { + set v [catch {execsql {SELECT f1 FROM test1 WHERE count(f1,f2)!=11}} msg] + lappend v $msg +} {1 {wrong number of arguments to function count()}} + +# ORDER BY expressions +# +do_test select1-4.1 { + set v [catch {execsql {SELECT f1 FROM test1 ORDER BY f1}} msg] + lappend v $msg +} {0 {11 33}} +do_test select1-4.2 { + set v [catch {execsql {SELECT f1 FROM test1 ORDER BY -f1}} msg] + lappend v $msg +} {0 {33 11}} +do_test select1-4.3 { + set v [catch {execsql {SELECT f1 FROM test1 ORDER BY min(f1,f2)}} msg] + lappend v $msg +} {0 {11 33}} +do_test select1-4.4 { + set v [catch {execsql {SELECT f1 FROM test1 ORDER BY min(f1)}} msg] + lappend v $msg +} {1 {misuse of aggregate function min()}} + +# The restriction not allowing constants in the ORDER BY clause +# has been removed. See ticket #1768 +#do_test select1-4.5 { +# catchsql { +# SELECT f1 FROM test1 ORDER BY 8.4; +# } +#} {1 {ORDER BY terms must not be non-integer constants}} +#do_test select1-4.6 { +# catchsql { +# SELECT f1 FROM test1 ORDER BY '8.4'; +# } +#} {1 {ORDER BY terms must not be non-integer constants}} +#do_test select1-4.7.1 { +# catchsql { +# SELECT f1 FROM test1 ORDER BY 'xyz'; +# } +#} {1 {ORDER BY terms must not be non-integer constants}} +#do_test select1-4.7.2 { +# catchsql { +# SELECT f1 FROM test1 ORDER BY -8.4; +# } +#} {1 {ORDER BY terms must not be non-integer constants}} +#do_test select1-4.7.3 { +# catchsql { +# SELECT f1 FROM test1 ORDER BY +8.4; +# } +#} {1 {ORDER BY terms must not be non-integer constants}} +#do_test select1-4.7.4 { +# catchsql { +# SELECT f1 FROM test1 ORDER BY 4294967296; -- constant larger than 32 bits +# } +#} {1 {ORDER BY terms must not be non-integer constants}} + +do_test select1-4.5 { + execsql { + SELECT f1 FROM test1 ORDER BY 8.4 + } +} {11 33} +do_test select1-4.6 { + execsql { + SELECT f1 FROM test1 ORDER BY '8.4' + } +} {11 33} + +do_test select1-4.8 { + execsql { + CREATE TABLE t5(a,b); + INSERT INTO t5 VALUES(1,10); + INSERT INTO t5 VALUES(2,9); + SELECT * FROM t5 ORDER BY 1; + } +} {1 10 2 9} +do_test select1-4.9.1 { + execsql { + SELECT * FROM t5 ORDER BY 2; + } +} {2 9 1 10} +do_test select1-4.9.2 { + execsql { + SELECT * FROM t5 ORDER BY +2; + } +} {2 9 1 10} +do_test select1-4.10.1 { + catchsql { + SELECT * FROM t5 ORDER BY 3; + } +} {1 {ORDER BY column number 3 out of range - should be between 1 and 2}} +do_test select1-4.10.2 { + catchsql { + SELECT * FROM t5 ORDER BY -1; + } +} {1 {ORDER BY column number -1 out of range - should be between 1 and 2}} +do_test select1-4.11 { + execsql { + INSERT INTO t5 VALUES(3,10); + SELECT * FROM t5 ORDER BY 2, 1 DESC; + } +} {2 9 3 10 1 10} +do_test select1-4.12 { + execsql { + SELECT * FROM t5 ORDER BY 1 DESC, b; + } +} {3 10 2 9 1 10} +do_test select1-4.13 { + execsql { + SELECT * FROM t5 ORDER BY b DESC, 1; + } +} {1 10 3 10 2 9} + + +# ORDER BY ignored on an aggregate query +# +do_test select1-5.1 { + set v [catch {execsql {SELECT max(f1) FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 33} + +execsql {CREATE TABLE test2(t1 test, t2 text)} +execsql {INSERT INTO test2 VALUES('abc','xyz')} + +# Check for column naming +# +do_test select1-6.1 { + set v [catch {execsql2 {SELECT f1 FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 {f1 11 f1 33}} +do_test select1-6.1.1 { + db eval {PRAGMA full_column_names=on} + set v [catch {execsql2 {SELECT f1 FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 {test1.f1 11 test1.f1 33}} +do_test select1-6.1.2 { + set v [catch {execsql2 {SELECT f1 as 'f1' FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 {f1 11 f1 33}} +do_test select1-6.1.3 { + set v [catch {execsql2 {SELECT * FROM test1 WHERE f1==11}} msg] + lappend v $msg +} {0 {f1 11 f2 22}} +do_test select1-6.1.4 { + set v [catch {execsql2 {SELECT DISTINCT * FROM test1 WHERE f1==11}} msg] + db eval {PRAGMA full_column_names=off} + lappend v $msg +} {0 {f1 11 f2 22}} +do_test select1-6.1.5 { + set v [catch {execsql2 {SELECT * FROM test1 WHERE f1==11}} msg] + lappend v $msg +} {0 {f1 11 f2 22}} +do_test select1-6.1.6 { + set v [catch {execsql2 {SELECT DISTINCT * FROM test1 WHERE f1==11}} msg] + lappend v $msg +} {0 {f1 11 f2 22}} +do_test select1-6.2 { + set v [catch {execsql2 {SELECT f1 as xyzzy FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 {xyzzy 11 xyzzy 33}} +do_test select1-6.3 { + set v [catch {execsql2 {SELECT f1 as "xyzzy" FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 {xyzzy 11 xyzzy 33}} +do_test select1-6.3.1 { + set v [catch {execsql2 {SELECT f1 as 'xyzzy ' FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 {{xyzzy } 11 {xyzzy } 33}} +do_test select1-6.4 { + set v [catch {execsql2 {SELECT f1+F2 as xyzzy FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 {xyzzy 33 xyzzy 77}} +do_test select1-6.4a { + set v [catch {execsql2 {SELECT f1+F2 FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 {f1+F2 33 f1+F2 77}} +do_test select1-6.5 { + set v [catch {execsql2 {SELECT test1.f1+F2 FROM test1 ORDER BY f2}} msg] + lappend v $msg +} {0 {test1.f1+F2 33 test1.f1+F2 77}} +do_test select1-6.5.1 { + execsql2 {PRAGMA full_column_names=on} + set v [catch {execsql2 {SELECT test1.f1+F2 FROM test1 ORDER BY f2}} msg] + execsql2 {PRAGMA full_column_names=off} + lappend v $msg +} {0 {test1.f1+F2 33 test1.f1+F2 77}} +do_test select1-6.6 { + set v [catch {execsql2 {SELECT test1.f1+F2, t1 FROM test1, test2 + ORDER BY f2}} msg] + lappend v $msg +} {0 {test1.f1+F2 33 t1 abc test1.f1+F2 77 t1 abc}} +do_test select1-6.7 { + set v [catch {execsql2 {SELECT A.f1, t1 FROM test1 as A, test2 + ORDER BY f2}} msg] + lappend v $msg +} {0 {f1 11 t1 abc f1 33 t1 abc}} +do_test select1-6.8 { + set v [catch {execsql2 {SELECT A.f1, f1 FROM test1 as A, test1 as B + ORDER BY f2}} msg] + lappend v $msg +} {1 {ambiguous column name: f1}} +do_test select1-6.8b { + set v [catch {execsql2 {SELECT A.f1, B.f1 FROM test1 as A, test1 as B + ORDER BY f2}} msg] + lappend v $msg +} {1 {ambiguous column name: f2}} +do_test select1-6.8c { + set v [catch {execsql2 {SELECT A.f1, f1 FROM test1 as A, test1 as A + ORDER BY f2}} msg] + lappend v $msg +} {1 {ambiguous column name: A.f1}} +do_test select1-6.9.1 { + set v [catch {execsql {SELECT A.f1, B.f1 FROM test1 as A, test1 as B + ORDER BY A.f1, B.f1}} msg] + lappend v $msg +} {0 {11 11 11 33 33 11 33 33}} +do_test select1-6.9.2 { + set v [catch {execsql2 {SELECT A.f1, B.f1 FROM test1 as A, test1 as B + ORDER BY A.f1, B.f1}} msg] + lappend v $msg +} {0 {f1 11 f1 11 f1 33 f1 33 f1 11 f1 11 f1 33 f1 33}} + +ifcapable compound { +do_test select1-6.10 { + set v [catch {execsql2 { + SELECT f1 FROM test1 UNION SELECT f2 FROM test1 + ORDER BY f2; + }} msg] + lappend v $msg +} {0 {f1 11 f1 22 f1 33 f1 44}} +do_test select1-6.11 { + set v [catch {execsql2 { + SELECT f1 FROM test1 UNION SELECT f2+100 FROM test1 + ORDER BY f2+100; + }} msg] + lappend v $msg +} {1 {ORDER BY term number 1 does not match any result column}} + +# Ticket #2296 +do_test select1-6.20 { + execsql { + CREATE TABLE t6(a TEXT, b TEXT); + INSERT INTO t6 VALUES('a','0'); + INSERT INTO t6 VALUES('b','1'); + INSERT INTO t6 VALUES('c','2'); + INSERT INTO t6 VALUES('d','3'); + SELECT a FROM t6 WHERE b IN + (SELECT b FROM t6 WHERE a<='b' UNION SELECT '3' AS x + ORDER BY 1 LIMIT 1) + } +} {a} +do_test select1-6.21 { + execsql { + SELECT a FROM t6 WHERE b IN + (SELECT b FROM t6 WHERE a<='b' UNION SELECT '3' AS x + ORDER BY 1 DESC LIMIT 1) + } +} {d} +do_test select1-6.22 { + execsql { + SELECT a FROM t6 WHERE b IN + (SELECT b FROM t6 WHERE a<='b' UNION SELECT '3' AS x + ORDER BY b LIMIT 2) + ORDER BY a; + } +} {a b} +do_test select1-6.23 { + execsql { + SELECT a FROM t6 WHERE b IN + (SELECT b FROM t6 WHERE a<='b' UNION SELECT '3' AS x + ORDER BY x DESC LIMIT 2) + ORDER BY a; + } +} {b d} + +} ;#ifcapable compound + +do_test select1-7.1 { + set v [catch {execsql { + SELECT f1 FROM test1 WHERE f2=; + }} msg] + lappend v $msg +} {1 {near ";": syntax error}} +ifcapable compound { +do_test select1-7.2 { + set v [catch {execsql { + SELECT f1 FROM test1 UNION SELECT WHERE; + }} msg] + lappend v $msg +} {1 {near "WHERE": syntax error}} +} ;# ifcapable compound +do_test select1-7.3 { + set v [catch {execsql {SELECT f1 FROM test1 as 'hi', test2 as}} msg] + lappend v $msg +} {1 {near "as": syntax error}} +do_test select1-7.4 { + set v [catch {execsql { + SELECT f1 FROM test1 ORDER BY; + }} msg] + lappend v $msg +} {1 {near ";": syntax error}} +do_test select1-7.5 { + set v [catch {execsql { + SELECT f1 FROM test1 ORDER BY f1 desc, f2 where; + }} msg] + lappend v $msg +} {1 {near "where": syntax error}} +do_test select1-7.6 { + set v [catch {execsql { + SELECT count(f1,f2 FROM test1; + }} msg] + lappend v $msg +} {1 {near "FROM": syntax error}} +do_test select1-7.7 { + set v [catch {execsql { + SELECT count(f1,f2+) FROM test1; + }} msg] + lappend v $msg +} {1 {near ")": syntax error}} +do_test select1-7.8 { + set v [catch {execsql { + SELECT f1 FROM test1 ORDER BY f2, f1+; + }} msg] + lappend v $msg +} {1 {near ";": syntax error}} +do_test select1-7.9 { + catchsql { + SELECT f1 FROM test1 LIMIT 5+3 OFFSET 11 ORDER BY f2; + } +} {1 {near "ORDER": syntax error}} + +do_test select1-8.1 { + execsql {SELECT f1 FROM test1 WHERE 4.3+2.4 OR 1 ORDER BY f1} +} {11 33} +do_test select1-8.2 { + execsql { + SELECT f1 FROM test1 WHERE ('x' || f1) BETWEEN 'x10' AND 'x20' + ORDER BY f1 + } +} {11} +do_test select1-8.3 { + execsql { + SELECT f1 FROM test1 WHERE 5-3==2 + ORDER BY f1 + } +} {11 33} + +# TODO: This test is failing because f1 is now being loaded off the +# disk as a vdbe integer, not a string. Hence the value of f1/(f1-11) +# changes because of rounding. Disable the test for now. +if 0 { +do_test select1-8.4 { + execsql { + SELECT coalesce(f1/(f1-11),'x'), + coalesce(min(f1/(f1-11),5),'y'), + coalesce(max(f1/(f1-33),6),'z') + FROM test1 ORDER BY f1 + } +} {x y 6 1.5 1.5 z} +} +do_test select1-8.5 { + execsql { + SELECT min(1,2,3), -max(1,2,3) + FROM test1 ORDER BY f1 + } +} {1 -3 1 -3} + + +# Check the behavior when the result set is empty +# +# SQLite v3 always sets r(*). +# +# do_test select1-9.1 { +# catch {unset r} +# set r(*) {} +# db eval {SELECT * FROM test1 WHERE f1<0} r {} +# set r(*) +# } {} +do_test select1-9.2 { + execsql {PRAGMA empty_result_callbacks=on} + catch {unset r} + set r(*) {} + db eval {SELECT * FROM test1 WHERE f1<0} r {} + set r(*) +} {f1 f2} +ifcapable subquery { + do_test select1-9.3 { + set r(*) {} + db eval {SELECT * FROM test1 WHERE f1<(select count(*) from test2)} r {} + set r(*) + } {f1 f2} +} +do_test select1-9.4 { + set r(*) {} + db eval {SELECT * FROM test1 ORDER BY f1} r {} + set r(*) +} {f1 f2} +do_test select1-9.5 { + set r(*) {} + db eval {SELECT * FROM test1 WHERE f1<0 ORDER BY f1} r {} + set r(*) +} {f1 f2} +unset r + +# Check for ORDER BY clauses that refer to an AS name in the column list +# +do_test select1-10.1 { + execsql { + SELECT f1 AS x FROM test1 ORDER BY x + } +} {11 33} +do_test select1-10.2 { + execsql { + SELECT f1 AS x FROM test1 ORDER BY -x + } +} {33 11} +do_test select1-10.3 { + execsql { + SELECT f1-23 AS x FROM test1 ORDER BY abs(x) + } +} {10 -12} +do_test select1-10.4 { + execsql { + SELECT f1-23 AS x FROM test1 ORDER BY -abs(x) + } +} {-12 10} +do_test select1-10.5 { + execsql { + SELECT f1-22 AS x, f2-22 as y FROM test1 + } +} {-11 0 11 22} +do_test select1-10.6 { + execsql { + SELECT f1-22 AS x, f2-22 as y FROM test1 WHERE x>0 AND y<50 + } +} {11 22} + +# Check the ability to specify "TABLE.*" in the result set of a SELECT +# +do_test select1-11.1 { + execsql { + DELETE FROM t3; + DELETE FROM t4; + INSERT INTO t3 VALUES(1,2); + INSERT INTO t4 VALUES(3,4); + SELECT * FROM t3, t4; + } +} {1 2 3 4} +do_test select1-11.2.1 { + execsql { + SELECT * FROM t3, t4; + } +} {1 2 3 4} +do_test select1-11.2.2 { + execsql2 { + SELECT * FROM t3, t4; + } +} {a 3 b 4 a 3 b 4} +do_test select1-11.4.1 { + execsql { + SELECT t3.*, t4.b FROM t3, t4; + } +} {1 2 4} +do_test select1-11.4.2 { + execsql { + SELECT "t3".*, t4.b FROM t3, t4; + } +} {1 2 4} +do_test select1-11.5.1 { + execsql2 { + SELECT t3.*, t4.b FROM t3, t4; + } +} {a 1 b 4 b 4} +do_test select1-11.6 { + execsql2 { + SELECT x.*, y.b FROM t3 AS x, t4 AS y; + } +} {a 1 b 4 b 4} +do_test select1-11.7 { + execsql { + SELECT t3.b, t4.* FROM t3, t4; + } +} {2 3 4} +do_test select1-11.8 { + execsql2 { + SELECT t3.b, t4.* FROM t3, t4; + } +} {b 4 a 3 b 4} +do_test select1-11.9 { + execsql2 { + SELECT x.b, y.* FROM t3 AS x, t4 AS y; + } +} {b 4 a 3 b 4} +do_test select1-11.10 { + catchsql { + SELECT t5.* FROM t3, t4; + } +} {1 {no such table: t5}} +do_test select1-11.11 { + catchsql { + SELECT t3.* FROM t3 AS x, t4; + } +} {1 {no such table: t3}} +ifcapable subquery { + do_test select1-11.12 { + execsql2 { + SELECT t3.* FROM t3, (SELECT max(a), max(b) FROM t4) + } + } {a 1 b 2} + do_test select1-11.13 { + execsql2 { + SELECT t3.* FROM (SELECT max(a), max(b) FROM t4), t3 + } + } {a 1 b 2} + do_test select1-11.14 { + execsql2 { + SELECT * FROM t3, (SELECT max(a), max(b) FROM t4) AS 'tx' + } + } {a 1 b 2 max(a) 3 max(b) 4} + do_test select1-11.15 { + execsql2 { + SELECT y.*, t3.* FROM t3, (SELECT max(a), max(b) FROM t4) AS y + } + } {max(a) 3 max(b) 4 a 1 b 2} +} +do_test select1-11.16 { + execsql2 { + SELECT y.* FROM t3 as y, t4 as z + } +} {a 1 b 2} + +# Tests of SELECT statements without a FROM clause. +# +do_test select1-12.1 { + execsql2 { + SELECT 1+2+3 + } +} {1+2+3 6} +do_test select1-12.2 { + execsql2 { + SELECT 1,'hello',2 + } +} {1 1 'hello' hello 2 2} +do_test select1-12.3 { + execsql2 { + SELECT 1 AS 'a','hello' AS 'b',2 AS 'c' + } +} {a 1 b hello c 2} +do_test select1-12.4 { + execsql { + DELETE FROM t3; + INSERT INTO t3 VALUES(1,2); + } +} {} + +ifcapable compound { +do_test select1-12.5 { + execsql { + SELECT * FROM t3 UNION SELECT 3 AS 'a', 4 ORDER BY a; + } +} {1 2 3 4} + +do_test select1-12.6 { + execsql { + SELECT 3, 4 UNION SELECT * FROM t3; + } +} {1 2 3 4} +} ;# ifcapable compound + +ifcapable subquery { + do_test select1-12.7 { + execsql { + SELECT * FROM t3 WHERE a=(SELECT 1); + } + } {1 2} + do_test select1-12.8 { + execsql { + SELECT * FROM t3 WHERE a=(SELECT 2); + } + } {} +} + +ifcapable {compound && subquery} { + do_test select1-12.9 { + execsql2 { + SELECT x FROM ( + SELECT a AS x, b AS y FROM t3 UNION SELECT a,b FROM t4 ORDER BY a,b + ) ORDER BY x; + } + } {x 1 x 3} + do_test select1-12.10 { + execsql2 { + SELECT z.x FROM ( + SELECT a AS x,b AS y FROM t3 UNION SELECT a, b FROM t4 ORDER BY a,b + ) AS 'z' ORDER BY x; + } + } {x 1 x 3} +} ;# ifcapable compound + + +# Check for a VDBE stack growth problem that existed at one point. +# +ifcapable subquery { + do_test select1-13.1 { + execsql { + BEGIN; + create TABLE abc(a, b, c, PRIMARY KEY(a, b)); + INSERT INTO abc VALUES(1, 1, 1); + } + for {set i 0} {$i<10} {incr i} { + execsql { + INSERT INTO abc SELECT a+(select max(a) FROM abc), + b+(select max(a) FROM abc), c+(select max(a) FROM abc) FROM abc; + } + } + execsql {COMMIT} + + # This used to seg-fault when the problem existed. + execsql { + SELECT count( + (SELECT a FROM abc WHERE a = NULL AND b >= upper.c) + ) FROM abc AS upper; + } + } {0} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/select2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/select2.test new file mode 100644 index 0000000..1c6a5c8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/select2.test @@ -0,0 +1,185 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the SELECT statement. +# +# $Id: select2.test,v 1.25 2005/07/21 03:15:01 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table with some data +# +execsql {CREATE TABLE tbl1(f1 int, f2 int)} +execsql {BEGIN} +for {set i 0} {$i<=30} {incr i} { + execsql "INSERT INTO tbl1 VALUES([expr {$i%9}],[expr {$i%10}])" +} +execsql {COMMIT} + +# Do a second query inside a first. +# +do_test select2-1.1 { + set sql {SELECT DISTINCT f1 FROM tbl1 ORDER BY f1} + set r {} + catch {unset data} + db eval $sql data { + set f1 $data(f1) + lappend r $f1: + set sql2 "SELECT f2 FROM tbl1 WHERE f1=$f1 ORDER BY f2" + db eval $sql2 d2 { + lappend r $d2(f2) + } + } + set r +} {0: 0 7 8 9 1: 0 1 8 9 2: 0 1 2 9 3: 0 1 2 3 4: 2 3 4 5: 3 4 5 6: 4 5 6 7: 5 6 7 8: 6 7 8} + +do_test select2-1.2 { + set sql {SELECT DISTINCT f1 FROM tbl1 WHERE f1>3 AND f1<5} + set r {} + db eval $sql data { + set f1 $data(f1) + lappend r $f1: + set sql2 "SELECT f2 FROM tbl1 WHERE f1=$f1 ORDER BY f2" + db eval $sql2 d2 { + lappend r $d2(f2) + } + } + set r +} {4: 2 3 4} + +# Create a largish table. Do this twice, once using the TCL cache and once +# without. Compare the performance to make sure things go faster with the +# cache turned on. +# +ifcapable tclvar { + do_test select2-2.0.1 { + set t1 [time { + execsql {CREATE TABLE tbl2(f1 int, f2 int, f3 int); BEGIN;} + for {set i 1} {$i<=30000} {incr i} { + set i2 [expr {$i*2}] + set i3 [expr {$i*3}] + db eval {INSERT INTO tbl2 VALUES($i,$i2,$i3)} + } + execsql {COMMIT} + }] + list + } {} + puts "time with cache: $::t1" +} +catch {execsql {DROP TABLE tbl2}} +do_test select2-2.0.2 { + set t2 [time { + execsql {CREATE TABLE tbl2(f1 int, f2 int, f3 int); BEGIN;} + for {set i 1} {$i<=30000} {incr i} { + set i2 [expr {$i*2}] + set i3 [expr {$i*3}] + execsql "INSERT INTO tbl2 VALUES($i,$i2,$i3)" + } + execsql {COMMIT} + }] + list +} {} +puts "time without cache: $t2" +ifcapable tclvar { + do_test select2-2.0.3 { + expr {[lindex $t1 0]<[lindex $t2 0]} + } 1 +} + +do_test select2-2.1 { + execsql {SELECT count(*) FROM tbl2} +} {30000} +do_test select2-2.2 { + execsql {SELECT count(*) FROM tbl2 WHERE f2>1000} +} {29500} + +do_test select2-3.1 { + execsql {SELECT f1 FROM tbl2 WHERE 1000=f2} +} {500} + +do_test select2-3.2a { + execsql {CREATE INDEX idx1 ON tbl2(f2)} +} {} +do_test select2-3.2b { + execsql {SELECT f1 FROM tbl2 WHERE 1000=f2} +} {500} +do_test select2-3.2c { + execsql {SELECT f1 FROM tbl2 WHERE f2=1000} +} {500} +do_test select2-3.2d { + set sqlite_search_count 0 +btree_breakpoint + execsql {SELECT * FROM tbl2 WHERE 1000=f2} + set sqlite_search_count +} {3} +do_test select2-3.2e { + set sqlite_search_count 0 + execsql {SELECT * FROM tbl2 WHERE f2=1000} + set sqlite_search_count +} {3} + +# Make sure queries run faster with an index than without +# +do_test select2-3.3 { + execsql {DROP INDEX idx1} + set sqlite_search_count 0 + execsql {SELECT f1 FROM tbl2 WHERE f2==2000} + set sqlite_search_count +} {29999} + +# Make sure we can optimize functions in the WHERE clause that +# use fields from two or more different table. (Bug #6) +# +do_test select2-4.1 { + execsql { + CREATE TABLE aa(a); + CREATE TABLE bb(b); + INSERT INTO aa VALUES(1); + INSERT INTO aa VALUES(3); + INSERT INTO bb VALUES(2); + INSERT INTO bb VALUES(4); + SELECT * FROM aa, bb WHERE max(a,b)>2; + } +} {1 4 3 2 3 4} +do_test select2-4.2 { + execsql { + INSERT INTO bb VALUES(0); + SELECT * FROM aa, bb WHERE b; + } +} {1 2 1 4 3 2 3 4} +do_test select2-4.3 { + execsql { + SELECT * FROM aa, bb WHERE NOT b; + } +} {1 0 3 0} +do_test select2-4.4 { + execsql { + SELECT * FROM aa, bb WHERE min(a,b); + } +} {1 2 1 4 3 2 3 4} +do_test select2-4.5 { + execsql { + SELECT * FROM aa, bb WHERE NOT min(a,b); + } +} {1 0 3 0} +do_test select2-4.6 { + execsql { + SELECT * FROM aa, bb WHERE CASE WHEN a=b-1 THEN 1 END; + } +} {1 2 3 4} +do_test select2-4.7 { + execsql { + SELECT * FROM aa, bb WHERE CASE WHEN a=b-1 THEN 0 ELSE 1 END; + } +} {1 4 1 0 3 2 3 0} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/select3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/select3.test new file mode 100644 index 0000000..ec8245a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/select3.test @@ -0,0 +1,264 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing aggregate functions and the +# GROUP BY and HAVING clauses of SELECT statements. +# +# $Id: select3.test,v 1.21 2007/06/20 12:18:31 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Build some test data +# +do_test select3-1.0 { + execsql { + CREATE TABLE t1(n int, log int); + BEGIN; + } + for {set i 1} {$i<32} {incr i} { + for {set j 0} {pow(2,$j)<$i} {incr j} {} + execsql "INSERT INTO t1 VALUES($i,$j)" + } + execsql { + COMMIT + } + execsql {SELECT DISTINCT log FROM t1 ORDER BY log} +} {0 1 2 3 4 5} + +# Basic aggregate functions. +# +do_test select3-1.1 { + execsql {SELECT count(*) FROM t1} +} {31} +do_test select3-1.2 { + execsql { + SELECT min(n),min(log),max(n),max(log),sum(n),sum(log),avg(n),avg(log) + FROM t1 + } +} {1 0 31 5 496 124 16.0 4.0} +do_test select3-1.3 { + execsql {SELECT max(n)/avg(n), max(log)/avg(log) FROM t1} +} {1.9375 1.25} + +# Try some basic GROUP BY clauses +# +do_test select3-2.1 { + execsql {SELECT log, count(*) FROM t1 GROUP BY log ORDER BY log} +} {0 1 1 1 2 2 3 4 4 8 5 15} +do_test select3-2.2 { + execsql {SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log} +} {0 1 1 2 2 3 3 5 4 9 5 17} +do_test select3-2.3.1 { + execsql {SELECT log, avg(n) FROM t1 GROUP BY log ORDER BY log} +} {0 1.0 1 2.0 2 3.5 3 6.5 4 12.5 5 24.0} +do_test select3-2.3.2 { + execsql {SELECT log, avg(n)+1 FROM t1 GROUP BY log ORDER BY log} +} {0 2.0 1 3.0 2 4.5 3 7.5 4 13.5 5 25.0} +do_test select3-2.4 { + execsql {SELECT log, avg(n)-min(n) FROM t1 GROUP BY log ORDER BY log} +} {0 0.0 1 0.0 2 0.5 3 1.5 4 3.5 5 7.0} +do_test select3-2.5 { + execsql {SELECT log*2+1, avg(n)-min(n) FROM t1 GROUP BY log ORDER BY log} +} {1 0.0 3 0.0 5 0.5 7 1.5 9 3.5 11 7.0} +do_test select3-2.6 { + execsql { + SELECT log*2+1 as x, count(*) FROM t1 GROUP BY x ORDER BY x + } +} {1 1 3 1 5 2 7 4 9 8 11 15} +do_test select3-2.7 { + execsql { + SELECT log*2+1 AS x, count(*) AS y FROM t1 GROUP BY x ORDER BY y, x + } +} {1 1 3 1 5 2 7 4 9 8 11 15} +do_test select3-2.8 { + execsql { + SELECT log*2+1 AS x, count(*) AS y FROM t1 GROUP BY x ORDER BY 10-(x+y) + } +} {11 15 9 8 7 4 5 2 3 1 1 1} +#do_test select3-2.9 { +# catchsql { +# SELECT log, count(*) FROM t1 GROUP BY 'x' ORDER BY log; +# } +#} {1 {GROUP BY terms must not be non-integer constants}} +do_test select3-2.10 { + catchsql { + SELECT log, count(*) FROM t1 GROUP BY 0 ORDER BY log; + } +} {1 {GROUP BY column number 0 out of range - should be between 1 and 2}} +do_test select3-2.11 { + catchsql { + SELECT log, count(*) FROM t1 GROUP BY 3 ORDER BY log; + } +} {1 {GROUP BY column number 3 out of range - should be between 1 and 2}} +do_test select3-2.12 { + catchsql { + SELECT log, count(*) FROM t1 GROUP BY 1 ORDER BY log; + } +} {0 {0 1 1 1 2 2 3 4 4 8 5 15}} + +# Cannot have an empty GROUP BY +do_test select3-2.13 { + catchsql { + SELECT log, count(*) FROM t1 GROUP BY ORDER BY log; + } +} {1 {near "ORDER": syntax error}} +do_test select3-2.14 { + catchsql { + SELECT log, count(*) FROM t1 GROUP BY; + } +} {1 {near ";": syntax error}} + +# Cannot have a HAVING without a GROUP BY +# +do_test select3-3.1 { + set v [catch {execsql {SELECT log, count(*) FROM t1 HAVING log>=4}} msg] + lappend v $msg +} {1 {a GROUP BY clause is required before HAVING}} + +# Toss in some HAVING clauses +# +do_test select3-4.1 { + execsql {SELECT log, count(*) FROM t1 GROUP BY log HAVING log>=4 ORDER BY log} +} {4 8 5 15} +do_test select3-4.2 { + execsql { + SELECT log, count(*) FROM t1 + GROUP BY log + HAVING count(*)>=4 + ORDER BY log + } +} {3 4 4 8 5 15} +do_test select3-4.3 { + execsql { + SELECT log, count(*) FROM t1 + GROUP BY log + HAVING count(*)>=4 + ORDER BY max(n)+0 + } +} {3 4 4 8 5 15} +do_test select3-4.4 { + execsql { + SELECT log AS x, count(*) AS y FROM t1 + GROUP BY x + HAVING y>=4 + ORDER BY max(n)+0 + } +} {3 4 4 8 5 15} +do_test select3-4.5 { + execsql { + SELECT log AS x FROM t1 + GROUP BY x + HAVING count(*)>=4 + ORDER BY max(n)+0 + } +} {3 4 5} + +do_test select3-5.1 { + execsql { + SELECT log, count(*), avg(n), max(n+log*2) FROM t1 + GROUP BY log + ORDER BY max(n+log*2)+0, avg(n)+0 + } +} {0 1 1.0 1 1 1 2.0 4 2 2 3.5 8 3 4 6.5 14 4 8 12.5 24 5 15 24.0 41} +do_test select3-5.2 { + execsql { + SELECT log, count(*), avg(n), max(n+log*2) FROM t1 + GROUP BY log + ORDER BY max(n+log*2)+0, min(log,avg(n))+0 + } +} {0 1 1.0 1 1 1 2.0 4 2 2 3.5 8 3 4 6.5 14 4 8 12.5 24 5 15 24.0 41} + +# Test sorting of GROUP BY results in the presence of an index +# on the GROUP BY column. +# +do_test select3-6.1 { + execsql { + SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log; + } +} {0 1 1 2 2 3 3 5 4 9 5 17} +do_test select3-6.2 { + execsql { + SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log DESC; + } +} {5 17 4 9 3 5 2 3 1 2 0 1} +do_test select3-6.3 { + execsql { + SELECT log, min(n) FROM t1 GROUP BY log ORDER BY 1; + } +} {0 1 1 2 2 3 3 5 4 9 5 17} +do_test select3-6.4 { + execsql { + SELECT log, min(n) FROM t1 GROUP BY log ORDER BY 1 DESC; + } +} {5 17 4 9 3 5 2 3 1 2 0 1} +do_test select3-6.5 { + execsql { + CREATE INDEX i1 ON t1(log); + SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log; + } +} {0 1 1 2 2 3 3 5 4 9 5 17} +do_test select3-6.6 { + execsql { + SELECT log, min(n) FROM t1 GROUP BY log ORDER BY log DESC; + } +} {5 17 4 9 3 5 2 3 1 2 0 1} +do_test select3-6.7 { + execsql { + SELECT log, min(n) FROM t1 GROUP BY log ORDER BY 1; + } +} {0 1 1 2 2 3 3 5 4 9 5 17} +do_test select3-6.8 { + execsql { + SELECT log, min(n) FROM t1 GROUP BY log ORDER BY 1 DESC; + } +} {5 17 4 9 3 5 2 3 1 2 0 1} + +# Sometimes an aggregate query can return no rows at all. +# +do_test select3-7.1 { + execsql { + CREATE TABLE t2(a,b); + INSERT INTO t2 VALUES(1,2); + SELECT a, sum(b) FROM t2 WHERE b=5 GROUP BY a; + } +} {} +do_test select3-7.2 { + execsql { + SELECT a, sum(b) FROM t2 WHERE b=5; + } +} {{} {}} + +# If a table column is of type REAL but we are storing integer values +# in it, the values are stored as integers to take up less space. The +# values are converted by to REAL as they are read out of the table. +# Make sure the GROUP BY clause does this conversion correctly. +# Ticket #2251. +# +do_test select3-8.1 { + execsql { + CREATE TABLE A ( + A1 DOUBLE, + A2 VARCHAR COLLATE NOCASE, + A3 DOUBLE + ); + INSERT INTO A VALUES(39136,'ABC',1201900000); + INSERT INTO A VALUES(39136,'ABC',1207000000); + SELECT typeof(sum(a3)) FROM a; + } +} {real} +do_test select3-8.2 { + execsql { + SELECT typeof(sum(a3)) FROM a GROUP BY a1; + } +} {real} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/select4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/select4.test new file mode 100644 index 0000000..5c3b808 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/select4.test @@ -0,0 +1,617 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing UNION, INTERSECT and EXCEPT operators +# in SELECT statements. +# +# $Id: select4.test,v 1.20 2006/06/20 11:01:09 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Most tests in this file depend on compound-select. But there are a couple +# right at the end that test DISTINCT, so we cannot omit the entire file. +# +ifcapable compound { + +# Build some test data +# +execsql { + CREATE TABLE t1(n int, log int); + BEGIN; +} +for {set i 1} {$i<32} {incr i} { + for {set j 0} {pow(2,$j)<$i} {incr j} {} + execsql "INSERT INTO t1 VALUES($i,$j)" +} +execsql { + COMMIT; +} + +do_test select4-1.0 { + execsql {SELECT DISTINCT log FROM t1 ORDER BY log} +} {0 1 2 3 4 5} + +# Union All operator +# +do_test select4-1.1a { + lsort [execsql {SELECT DISTINCT log FROM t1}] +} {0 1 2 3 4 5} +do_test select4-1.1b { + lsort [execsql {SELECT n FROM t1 WHERE log=3}] +} {5 6 7 8} +do_test select4-1.1c { + execsql { + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + } +} {0 1 2 3 4 5 5 6 7 8} +do_test select4-1.1d { + execsql { + CREATE TABLE t2 AS + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + SELECT * FROM t2; + } +} {0 1 2 3 4 5 5 6 7 8} +execsql {DROP TABLE t2} +do_test select4-1.1e { + execsql { + CREATE TABLE t2 AS + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY log DESC; + SELECT * FROM t2; + } +} {8 7 6 5 5 4 3 2 1 0} +execsql {DROP TABLE t2} +do_test select4-1.1f { + execsql { + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=2 + } +} {0 1 2 3 4 5 3 4} +do_test select4-1.1g { + execsql { + CREATE TABLE t2 AS + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=2; + SELECT * FROM t2; + } +} {0 1 2 3 4 5 3 4} +execsql {DROP TABLE t2} +ifcapable subquery { + do_test select4-1.2 { + execsql { + SELECT log FROM t1 WHERE n IN + (SELECT DISTINCT log FROM t1 UNION ALL + SELECT n FROM t1 WHERE log=3) + ORDER BY log; + } + } {0 1 2 2 3 3 3 3} +} +do_test select4-1.3 { + set v [catch {execsql { + SELECT DISTINCT log FROM t1 ORDER BY log + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + }} msg] + lappend v $msg +} {1 {ORDER BY clause should come after UNION ALL not before}} + +# Union operator +# +do_test select4-2.1 { + execsql { + SELECT DISTINCT log FROM t1 + UNION + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + } +} {0 1 2 3 4 5 6 7 8} +ifcapable subquery { + do_test select4-2.2 { + execsql { + SELECT log FROM t1 WHERE n IN + (SELECT DISTINCT log FROM t1 UNION + SELECT n FROM t1 WHERE log=3) + ORDER BY log; + } + } {0 1 2 2 3 3 3 3} +} +do_test select4-2.3 { + set v [catch {execsql { + SELECT DISTINCT log FROM t1 ORDER BY log + UNION + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + }} msg] + lappend v $msg +} {1 {ORDER BY clause should come after UNION not before}} + +# Except operator +# +do_test select4-3.1.1 { + execsql { + SELECT DISTINCT log FROM t1 + EXCEPT + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + } +} {0 1 2 3 4} +do_test select4-3.1.2 { + execsql { + CREATE TABLE t2 AS + SELECT DISTINCT log FROM t1 + EXCEPT + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + SELECT * FROM t2; + } +} {0 1 2 3 4} +execsql {DROP TABLE t2} +do_test select4-3.1.3 { + execsql { + CREATE TABLE t2 AS + SELECT DISTINCT log FROM t1 + EXCEPT + SELECT n FROM t1 WHERE log=3 + ORDER BY log DESC; + SELECT * FROM t2; + } +} {4 3 2 1 0} +execsql {DROP TABLE t2} +ifcapable subquery { + do_test select4-3.2 { + execsql { + SELECT log FROM t1 WHERE n IN + (SELECT DISTINCT log FROM t1 EXCEPT + SELECT n FROM t1 WHERE log=3) + ORDER BY log; + } + } {0 1 2 2} +} +do_test select4-3.3 { + set v [catch {execsql { + SELECT DISTINCT log FROM t1 ORDER BY log + EXCEPT + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + }} msg] + lappend v $msg +} {1 {ORDER BY clause should come after EXCEPT not before}} + +# Intersect operator +# +do_test select4-4.1.1 { + execsql { + SELECT DISTINCT log FROM t1 + INTERSECT + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + } +} {5} + +do_test select4-4.1.2 { + execsql { + SELECT DISTINCT log FROM t1 UNION ALL SELECT 6 + INTERSECT + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + } +} {5 6} +do_test select4-4.1.3 { + execsql { + CREATE TABLE t2 AS + SELECT DISTINCT log FROM t1 UNION ALL SELECT 6 + INTERSECT + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + SELECT * FROM t2; + } +} {5 6} +execsql {DROP TABLE t2} +do_test select4-4.1.4 { + execsql { + CREATE TABLE t2 AS + SELECT DISTINCT log FROM t1 UNION ALL SELECT 6 + INTERSECT + SELECT n FROM t1 WHERE log=3 + ORDER BY log DESC; + SELECT * FROM t2; + } +} {6 5} +execsql {DROP TABLE t2} +ifcapable subquery { + do_test select4-4.2 { + execsql { + SELECT log FROM t1 WHERE n IN + (SELECT DISTINCT log FROM t1 INTERSECT + SELECT n FROM t1 WHERE log=3) + ORDER BY log; + } + } {3} +} +do_test select4-4.3 { + set v [catch {execsql { + SELECT DISTINCT log FROM t1 ORDER BY log + INTERSECT + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + }} msg] + lappend v $msg +} {1 {ORDER BY clause should come after INTERSECT not before}} + +# Various error messages while processing UNION or INTERSECT +# +do_test select4-5.1 { + set v [catch {execsql { + SELECT DISTINCT log FROM t2 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + }} msg] + lappend v $msg +} {1 {no such table: t2}} +do_test select4-5.2 { + set v [catch {execsql { + SELECT DISTINCT log AS "xyzzy" FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY xyzzy; + }} msg] + lappend v $msg +} {0 {0 1 2 3 4 5 5 6 7 8}} +do_test select4-5.2b { + set v [catch {execsql { + SELECT DISTINCT log AS xyzzy FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY 'xyzzy'; + }} msg] + lappend v $msg +} {0 {0 1 2 3 4 5 5 6 7 8}} +do_test select4-5.2c { + set v [catch {execsql { + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY 'xyzzy'; + }} msg] + lappend v $msg +} {1 {ORDER BY term number 1 does not match any result column}} +do_test select4-5.2d { + set v [catch {execsql { + SELECT DISTINCT log FROM t1 + INTERSECT + SELECT n FROM t1 WHERE log=3 + ORDER BY 'xyzzy'; + }} msg] + lappend v $msg +} {1 {ORDER BY term number 1 does not match any result column}} +do_test select4-5.2e { + set v [catch {execsql { + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY n; + }} msg] + lappend v $msg +} {0 {0 1 2 3 4 5 5 6 7 8}} +do_test select4-5.2f { + catchsql { + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + } +} {0 {0 1 2 3 4 5 5 6 7 8}} +do_test select4-5.2g { + catchsql { + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY 1; + } +} {0 {0 1 2 3 4 5 5 6 7 8}} +do_test select4-5.2h { + catchsql { + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY 2; + } +} {1 {ORDER BY position 2 should be between 1 and 1}} +do_test select4-5.2i { + catchsql { + SELECT DISTINCT 1, log FROM t1 + UNION ALL + SELECT 2, n FROM t1 WHERE log=3 + ORDER BY 2, 1; + } +} {0 {1 0 1 1 1 2 1 3 1 4 1 5 2 5 2 6 2 7 2 8}} +do_test select4-5.2j { + catchsql { + SELECT DISTINCT 1, log FROM t1 + UNION ALL + SELECT 2, n FROM t1 WHERE log=3 + ORDER BY 1, 2 DESC; + } +} {0 {1 5 1 4 1 3 1 2 1 1 1 0 2 8 2 7 2 6 2 5}} +do_test select4-5.2k { + catchsql { + SELECT DISTINCT 1, log FROM t1 + UNION ALL + SELECT 2, n FROM t1 WHERE log=3 + ORDER BY n, 1; + } +} {0 {1 0 1 1 1 2 1 3 1 4 1 5 2 5 2 6 2 7 2 8}} +do_test select4-5.3 { + set v [catch {execsql { + SELECT DISTINCT log, n FROM t1 + UNION ALL + SELECT n FROM t1 WHERE log=3 + ORDER BY log; + }} msg] + lappend v $msg +} {1 {SELECTs to the left and right of UNION ALL do not have the same number of result columns}} +do_test select4-5.4 { + set v [catch {execsql { + SELECT log FROM t1 WHERE n=2 + UNION ALL + SELECT log FROM t1 WHERE n=3 + UNION ALL + SELECT log FROM t1 WHERE n=4 + UNION ALL + SELECT log FROM t1 WHERE n=5 + ORDER BY log; + }} msg] + lappend v $msg +} {0 {1 2 2 3}} + +do_test select4-6.1 { + execsql { + SELECT log, count(*) as cnt FROM t1 GROUP BY log + UNION + SELECT log, n FROM t1 WHERE n=7 + ORDER BY cnt, log; + } +} {0 1 1 1 2 2 3 4 3 7 4 8 5 15} +do_test select4-6.2 { + execsql { + SELECT log, count(*) FROM t1 GROUP BY log + UNION + SELECT log, n FROM t1 WHERE n=7 + ORDER BY count(*), log; + } +} {0 1 1 1 2 2 3 4 3 7 4 8 5 15} + +# NULLs are indistinct for the UNION operator. +# Make sure the UNION operator recognizes this +# +do_test select4-6.3 { + execsql { + SELECT NULL UNION SELECT NULL UNION + SELECT 1 UNION SELECT 2 AS 'x' + ORDER BY x; + } +} {{} 1 2} +do_test select4-6.3.1 { + execsql { + SELECT NULL UNION ALL SELECT NULL UNION ALL + SELECT 1 UNION ALL SELECT 2 AS 'x' + ORDER BY x; + } +} {{} {} 1 2} + +# Make sure the DISTINCT keyword treats NULLs as indistinct. +# +ifcapable subquery { + do_test select4-6.4 { + execsql { + SELECT * FROM ( + SELECT NULL, 1 UNION ALL SELECT NULL, 1 + ); + } + } {{} 1 {} 1} + do_test select4-6.5 { + execsql { + SELECT DISTINCT * FROM ( + SELECT NULL, 1 UNION ALL SELECT NULL, 1 + ); + } + } {{} 1} + do_test select4-6.6 { + execsql { + SELECT DISTINCT * FROM ( + SELECT 1,2 UNION ALL SELECT 1,2 + ); + } + } {1 2} +} + +# Test distinctness of NULL in other ways. +# +do_test select4-6.7 { + execsql { + SELECT NULL EXCEPT SELECT NULL + } +} {} + + +# Make sure column names are correct when a compound select appears as +# an expression in the WHERE clause. +# +do_test select4-7.1 { + execsql { + CREATE TABLE t2 AS SELECT log AS 'x', count(*) AS 'y' FROM t1 GROUP BY log; + SELECT * FROM t2 ORDER BY x; + } +} {0 1 1 1 2 2 3 4 4 8 5 15} +ifcapable subquery { + do_test select4-7.2 { + execsql2 { + SELECT * FROM t1 WHERE n IN (SELECT n FROM t1 INTERSECT SELECT x FROM t2) + ORDER BY n + } + } {n 1 log 0 n 2 log 1 n 3 log 2 n 4 log 2 n 5 log 3} + do_test select4-7.3 { + execsql2 { + SELECT * FROM t1 WHERE n IN (SELECT n FROM t1 EXCEPT SELECT x FROM t2) + ORDER BY n LIMIT 2 + } + } {n 6 log 3 n 7 log 3} + do_test select4-7.4 { + execsql2 { + SELECT * FROM t1 WHERE n IN (SELECT n FROM t1 UNION SELECT x FROM t2) + ORDER BY n LIMIT 2 + } + } {n 1 log 0 n 2 log 1} +} ;# ifcapable subquery + +} ;# ifcapable compound + +# Make sure DISTINCT works appropriately on TEXT and NUMERIC columns. +do_test select4-8.1 { + execsql { + BEGIN; + CREATE TABLE t3(a text, b float, c text); + INSERT INTO t3 VALUES(1, 1.1, '1.1'); + INSERT INTO t3 VALUES(2, 1.10, '1.10'); + INSERT INTO t3 VALUES(3, 1.10, '1.1'); + INSERT INTO t3 VALUES(4, 1.1, '1.10'); + INSERT INTO t3 VALUES(5, 1.2, '1.2'); + INSERT INTO t3 VALUES(6, 1.3, '1.3'); + COMMIT; + } + execsql { + SELECT DISTINCT b FROM t3 ORDER BY c; + } +} {1.1 1.2 1.3} +do_test select4-8.2 { + execsql { + SELECT DISTINCT c FROM t3 ORDER BY c; + } +} {1.1 1.10 1.2 1.3} + +# Make sure the names of columns are takenf rom the right-most subquery +# right in a compound query. Ticket #1721 +# +ifcapable compound { + +do_test select4-9.1 { + execsql2 { + SELECT x, y FROM t2 UNION SELECT a, b FROM t3 ORDER BY x LIMIT 1 + } +} {x 0 y 1} +do_test select4-9.2 { + execsql2 { + SELECT x, y FROM t2 UNION ALL SELECT a, b FROM t3 ORDER BY x LIMIT 1 + } +} {x 0 y 1} +do_test select4-9.3 { + execsql2 { + SELECT x, y FROM t2 EXCEPT SELECT a, b FROM t3 ORDER BY x LIMIT 1 + } +} {x 0 y 1} +do_test select4-9.4 { + execsql2 { + SELECT x, y FROM t2 INTERSECT SELECT 0 AS a, 1 AS b; + } +} {x 0 y 1} +do_test select4-9.5 { + execsql2 { + SELECT 0 AS x, 1 AS y + UNION + SELECT 2 AS p, 3 AS q + UNION + SELECT 4 AS a, 5 AS b + ORDER BY x LIMIT 1 + } +} {x 0 y 1} + +ifcapable subquery { +do_test select4-9.6 { + execsql2 { + SELECT * FROM ( + SELECT 0 AS x, 1 AS y + UNION + SELECT 2 AS p, 3 AS q + UNION + SELECT 4 AS a, 5 AS b + ) ORDER BY 1 LIMIT 1; + } +} {x 0 y 1} +do_test select4-9.7 { + execsql2 { + SELECT * FROM ( + SELECT 0 AS x, 1 AS y + UNION + SELECT 2 AS p, 3 AS q + UNION + SELECT 4 AS a, 5 AS b + ) ORDER BY x LIMIT 1; + } +} {x 0 y 1} +} ;# ifcapable subquery + +do_test select4-9.8 { + execsql2 { + SELECT 0 AS x, 1 AS y + UNION + SELECT 2 AS y, -3 AS x + ORDER BY x LIMIT 1; + } +} {x 0 y 1} +do_test select4-9.9.1 { + execsql2 { + SELECT 1 AS a, 2 AS b UNION ALL SELECT 3 AS b, 4 AS a + } +} {a 1 b 2 a 3 b 4} + +ifcapable subquery { +do_test select4-9.9.2 { + execsql2 { + SELECT * FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3 AS b, 4 AS a) + WHERE b=3 + } +} {} +do_test select4-9.10 { + execsql2 { + SELECT * FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3 AS b, 4 AS a) + WHERE b=2 + } +} {a 1 b 2} +do_test select4-9.11 { + execsql2 { + SELECT * FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3 AS e, 4 AS b) + WHERE b=2 + } +} {a 1 b 2} +do_test select4-9.12 { + execsql2 { + SELECT * FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3 AS e, 4 AS b) + WHERE b>0 + } +} {a 1 b 2 a 3 b 4} +} ;# ifcapable subquery + +} ;# ifcapable compound + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/select5.test b/libraries/sqlite/unix/sqlite-3.5.1/test/select5.test new file mode 100644 index 0000000..fe53c72 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/select5.test @@ -0,0 +1,192 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing aggregate functions and the +# GROUP BY and HAVING clauses of SELECT statements. +# +# $Id: select5.test,v 1.16 2006/01/21 12:08:55 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Build some test data +# +execsql { + CREATE TABLE t1(x int, y int); + BEGIN; +} +for {set i 1} {$i<32} {incr i} { + for {set j 0} {pow(2,$j)<$i} {incr j} {} + execsql "INSERT INTO t1 VALUES([expr {32-$i}],[expr {10-$j}])" +} +execsql { + COMMIT +} + +do_test select5-1.0 { + execsql {SELECT DISTINCT y FROM t1 ORDER BY y} +} {5 6 7 8 9 10} + +# Sort by an aggregate function. +# +do_test select5-1.1 { + execsql {SELECT y, count(*) FROM t1 GROUP BY y ORDER BY y} +} {5 15 6 8 7 4 8 2 9 1 10 1} +do_test select5-1.2 { + execsql {SELECT y, count(*) FROM t1 GROUP BY y ORDER BY count(*), y} +} {9 1 10 1 8 2 7 4 6 8 5 15} +do_test select5-1.3 { + execsql {SELECT count(*), y FROM t1 GROUP BY y ORDER BY count(*), y} +} {1 9 1 10 2 8 4 7 8 6 15 5} + +# Some error messages associated with aggregates and GROUP BY +# +do_test select5-2.1.1 { + catchsql { + SELECT y, count(*) FROM t1 GROUP BY z ORDER BY y + } +} {1 {no such column: z}} +do_test select5-2.1.2 { + catchsql { + SELECT y, count(*) FROM t1 GROUP BY temp.t1.y ORDER BY y + } +} {1 {no such column: temp.t1.y}} +do_test select5-2.2 { + set v [catch {execsql { + SELECT y, count(*) FROM t1 GROUP BY z(y) ORDER BY y + }} msg] + lappend v $msg +} {1 {no such function: z}} +do_test select5-2.3 { + set v [catch {execsql { + SELECT y, count(*) FROM t1 GROUP BY y HAVING count(*)<3 ORDER BY y + }} msg] + lappend v $msg +} {0 {8 2 9 1 10 1}} +do_test select5-2.4 { + set v [catch {execsql { + SELECT y, count(*) FROM t1 GROUP BY y HAVING z(y)<3 ORDER BY y + }} msg] + lappend v $msg +} {1 {no such function: z}} +do_test select5-2.5 { + set v [catch {execsql { + SELECT y, count(*) FROM t1 GROUP BY y HAVING count(*)100 + } +} {{}} +do_test select5-4.2 { + execsql { + SELECT count(x) FROM t1 WHERE x>100 + } +} {0} +do_test select5-4.3 { + execsql { + SELECT min(x) FROM t1 WHERE x>100 + } +} {{}} +do_test select5-4.4 { + execsql { + SELECT max(x) FROM t1 WHERE x>100 + } +} {{}} +do_test select5-4.5 { + execsql { + SELECT sum(x) FROM t1 WHERE x>100 + } +} {{}} + +# Some tests for queries with a GROUP BY clause but no aggregate functions. +# +# Note: The query in test case 5-5.5 are not legal SQL. So if the +# implementation changes in the future and it returns different results, +# this is not such a big deal. +# +do_test select5-5.1 { + execsql { + CREATE TABLE t2(a, b, c); + INSERT INTO t2 VALUES(1, 2, 3); + INSERT INTO t2 VALUES(1, 4, 5); + INSERT INTO t2 VALUES(6, 4, 7); + CREATE INDEX t2_idx ON t2(a); + } +} {} +do_test select5-5.2 { + execsql { + SELECT a FROM t2 GROUP BY a; + } +} {1 6} +do_test select5-5.3 { + execsql { + SELECT a FROM t2 WHERE a>2 GROUP BY a; + } +} {6} +do_test select5-5.4 { + execsql { + SELECT a, b FROM t2 GROUP BY a, b; + } +} {1 2 1 4 6 4} +do_test select5-5.5 { + execsql { + SELECT a, b FROM t2 GROUP BY a; + } +} {1 4 6 4} + +# NULL compare equal to each other for the purposes of processing +# the GROUP BY clause. +# +do_test select5-6.1 { + execsql { + CREATE TABLE t3(x,y); + INSERT INTO t3 VALUES(1,NULL); + INSERT INTO t3 VALUES(2,NULL); + INSERT INTO t3 VALUES(3,4); + SELECT count(x), y FROM t3 GROUP BY y ORDER BY 1 + } +} {1 4 2 {}} +do_test select5-6.2 { + execsql { + CREATE TABLE t4(x,y,z); + INSERT INTO t4 VALUES(1,2,NULL); + INSERT INTO t4 VALUES(2,3,NULL); + INSERT INTO t4 VALUES(3,NULL,5); + INSERT INTO t4 VALUES(4,NULL,6); + INSERT INTO t4 VALUES(4,NULL,6); + INSERT INTO t4 VALUES(5,NULL,NULL); + INSERT INTO t4 VALUES(5,NULL,NULL); + INSERT INTO t4 VALUES(6,7,8); + SELECT max(x), count(x), y, z FROM t4 GROUP BY y, z ORDER BY 1 + } +} {1 1 2 {} 2 1 3 {} 3 1 {} 5 4 2 {} 6 5 2 {} {} 6 1 7 8} + +do_test select5.7.2 { + execsql { + SELECT count(*), count(x) as cnt FROM t4 GROUP BY y ORDER BY cnt; + } +} {1 1 1 1 1 1 5 5} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/select6.test b/libraries/sqlite/unix/sqlite-3.5.1/test/select6.test new file mode 100644 index 0000000..d90414b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/select6.test @@ -0,0 +1,507 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing SELECT statements that contain +# subqueries in their FROM clause. +# +# $Id: select6.test,v 1.26 2006/11/30 13:06:00 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Omit this whole file if the library is build without subquery support. +ifcapable !subquery { + finish_test + return +} + +do_test select6-1.0 { + execsql { + BEGIN; + CREATE TABLE t1(x, y); + INSERT INTO t1 VALUES(1,1); + INSERT INTO t1 VALUES(2,2); + INSERT INTO t1 VALUES(3,2); + INSERT INTO t1 VALUES(4,3); + INSERT INTO t1 VALUES(5,3); + INSERT INTO t1 VALUES(6,3); + INSERT INTO t1 VALUES(7,3); + INSERT INTO t1 VALUES(8,4); + INSERT INTO t1 VALUES(9,4); + INSERT INTO t1 VALUES(10,4); + INSERT INTO t1 VALUES(11,4); + INSERT INTO t1 VALUES(12,4); + INSERT INTO t1 VALUES(13,4); + INSERT INTO t1 VALUES(14,4); + INSERT INTO t1 VALUES(15,4); + INSERT INTO t1 VALUES(16,5); + INSERT INTO t1 VALUES(17,5); + INSERT INTO t1 VALUES(18,5); + INSERT INTO t1 VALUES(19,5); + INSERT INTO t1 VALUES(20,5); + COMMIT; + SELECT DISTINCT y FROM t1 ORDER BY y; + } +} {1 2 3 4 5} + +do_test select6-1.1 { + execsql2 {SELECT * FROM (SELECT x, y FROM t1 WHERE x<2)} +} {x 1 y 1} +do_test select6-1.2 { + execsql {SELECT count(*) FROM (SELECT y FROM t1)} +} {20} +do_test select6-1.3 { + execsql {SELECT count(*) FROM (SELECT DISTINCT y FROM t1)} +} {5} +do_test select6-1.4 { + execsql {SELECT count(*) FROM (SELECT DISTINCT * FROM (SELECT y FROM t1))} +} {5} +do_test select6-1.5 { + execsql {SELECT count(*) FROM (SELECT * FROM (SELECT DISTINCT y FROM t1))} +} {5} + +do_test select6-1.6 { + execsql { + SELECT * + FROM (SELECT count(*),y FROM t1 GROUP BY y) AS a, + (SELECT max(x),y FROM t1 GROUP BY y) as b + WHERE a.y=b.y ORDER BY a.y + } +} {1 1 1 1 2 2 3 2 4 3 7 3 8 4 15 4 5 5 20 5} +do_test select6-1.7 { + execsql { + SELECT a.y, a.[count(*)], [max(x)], [count(*)] + FROM (SELECT count(*),y FROM t1 GROUP BY y) AS a, + (SELECT max(x),y FROM t1 GROUP BY y) as b + WHERE a.y=b.y ORDER BY a.y + } +} {1 1 1 1 2 2 3 2 3 4 7 4 4 8 15 8 5 5 20 5} +do_test select6-1.8 { + execsql { + SELECT q, p, r + FROM (SELECT count(*) as p , y as q FROM t1 GROUP BY y) AS a, + (SELECT max(x) as r, y as s FROM t1 GROUP BY y) as b + WHERE q=s ORDER BY s + } +} {1 1 1 2 2 3 3 4 7 4 8 15 5 5 20} +do_test select6-1.9 { + execsql { + SELECT q, p, r, b.[min(x)+y] + FROM (SELECT count(*) as p , y as q FROM t1 GROUP BY y) AS a, + (SELECT max(x) as r, y as s, min(x)+y FROM t1 GROUP BY y) as b + WHERE q=s ORDER BY s + } +} {1 1 1 2 2 2 3 4 3 4 7 7 4 8 15 12 5 5 20 21} + +do_test select6-2.0 { + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b); + INSERT INTO t2 SELECT * FROM t1; + SELECT DISTINCT b FROM t2 ORDER BY b; + } +} {1 2 3 4 5} +do_test select6-2.1 { + execsql2 {SELECT * FROM (SELECT a, b FROM t2 WHERE a<2)} +} {a 1 b 1} +do_test select6-2.2 { + execsql {SELECT count(*) FROM (SELECT b FROM t2)} +} {20} +do_test select6-2.3 { + execsql {SELECT count(*) FROM (SELECT DISTINCT b FROM t2)} +} {5} +do_test select6-2.4 { + execsql {SELECT count(*) FROM (SELECT DISTINCT * FROM (SELECT b FROM t2))} +} {5} +do_test select6-2.5 { + execsql {SELECT count(*) FROM (SELECT * FROM (SELECT DISTINCT b FROM t2))} +} {5} + +do_test select6-2.6 { + execsql { + SELECT * + FROM (SELECT count(*),b FROM t2 GROUP BY b) AS a, + (SELECT max(a),b FROM t2 GROUP BY b) as b + WHERE a.b=b.b ORDER BY a.b + } +} {1 1 1 1 2 2 3 2 4 3 7 3 8 4 15 4 5 5 20 5} +do_test select6-2.7 { + execsql { + SELECT a.b, a.[count(*)], [max(a)], [count(*)] + FROM (SELECT count(*),b FROM t2 GROUP BY b) AS a, + (SELECT max(a),b FROM t2 GROUP BY b) as b + WHERE a.b=b.b ORDER BY a.b + } +} {1 1 1 1 2 2 3 2 3 4 7 4 4 8 15 8 5 5 20 5} +do_test select6-2.8 { + execsql { + SELECT q, p, r + FROM (SELECT count(*) as p , b as q FROM t2 GROUP BY b) AS a, + (SELECT max(a) as r, b as s FROM t2 GROUP BY b) as b + WHERE q=s ORDER BY s + } +} {1 1 1 2 2 3 3 4 7 4 8 15 5 5 20} +do_test select6-2.9 { + execsql { + SELECT a.q, a.p, b.r + FROM (SELECT count(*) as p , b as q FROM t2 GROUP BY q) AS a, + (SELECT max(a) as r, b as s FROM t2 GROUP BY s) as b + WHERE a.q=b.s ORDER BY a.q + } +} {1 1 1 2 2 3 3 4 7 4 8 15 5 5 20} + +do_test select6-3.1 { + execsql2 { + SELECT * FROM (SELECT * FROM (SELECT * FROM t1 WHERE x=3)); + } +} {x 3 y 2} +do_test select6-3.2 { + execsql { + SELECT * FROM + (SELECT a.q, a.p, b.r + FROM (SELECT count(*) as p , b as q FROM t2 GROUP BY q) AS a, + (SELECT max(a) as r, b as s FROM t2 GROUP BY s) as b + WHERE a.q=b.s ORDER BY a.q) + ORDER BY "a.q" + } +} {1 1 1 2 2 3 3 4 7 4 8 15 5 5 20} +do_test select6-3.3 { + execsql { + SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1) + } +} {10.5 3.7 14.2} +do_test select6-3.4 { + execsql { + SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1 WHERE y=4) + } +} {11.5 4.0 15.5} +do_test select6-3.5 { + execsql { + SELECT x,y,x+y FROM (SELECT avg(a) as 'x', avg(b) as 'y' FROM t2 WHERE a=4) + } +} {4.0 3.0 7.0} +do_test select6-3.6 { + execsql { + SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1) + WHERE a>10 + } +} {10.5 3.7 14.2} +do_test select6-3.7 { +btree_breakpoint + execsql { + SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1) + WHERE a<10 + } +} {} +do_test select6-3.8 { + execsql { + SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1 WHERE y=4) + WHERE a>10 + } +} {11.5 4.0 15.5} +do_test select6-3.9 { + execsql { + SELECT a,b,a+b FROM (SELECT avg(x) as 'a', avg(y) as 'b' FROM t1 WHERE y=4) + WHERE a<10 + } +} {} +do_test select6-3.10 { + execsql { + SELECT a,b,a+b FROM (SELECT avg(x) as 'a', y as 'b' FROM t1 GROUP BY b) + ORDER BY a + } +} {1.0 1 2.0 2.5 2 4.5 5.5 3 8.5 11.5 4 15.5 18.0 5 23.0} +do_test select6-3.11 { + execsql { + SELECT a,b,a+b FROM + (SELECT avg(x) as 'a', y as 'b' FROM t1 GROUP BY b) + WHERE b<4 ORDER BY a + } +} {1.0 1 2.0 2.5 2 4.5 5.5 3 8.5} +do_test select6-3.12 { + execsql { + SELECT a,b,a+b FROM + (SELECT avg(x) as 'a', y as 'b' FROM t1 GROUP BY b HAVING a>1) + WHERE b<4 ORDER BY a + } +} {2.5 2 4.5 5.5 3 8.5} +do_test select6-3.13 { + execsql { + SELECT a,b,a+b FROM + (SELECT avg(x) as 'a', y as 'b' FROM t1 GROUP BY b HAVING a>1) + ORDER BY a + } +} {2.5 2 4.5 5.5 3 8.5 11.5 4 15.5 18.0 5 23.0} +do_test select6-3.14 { + execsql { + SELECT [count(*)],y FROM (SELECT count(*), y FROM t1 GROUP BY y) + ORDER BY [count(*)] + } +} {1 1 2 2 4 3 5 5 8 4} +do_test select6-3.15 { + execsql { + SELECT [count(*)],y FROM (SELECT count(*), y FROM t1 GROUP BY y) + ORDER BY y + } +} {1 1 2 2 4 3 8 4 5 5} + +do_test select6-4.1 { + execsql { + SELECT a,b,c FROM + (SELECT x AS 'a', y AS 'b', x+y AS 'c' FROM t1 WHERE y=4) + WHERE a<10 ORDER BY a; + } +} {8 4 12 9 4 13} +do_test select6-4.2 { + execsql { + SELECT y FROM (SELECT DISTINCT y FROM t1) WHERE y<5 ORDER BY y + } +} {1 2 3 4} +do_test select6-4.3 { + execsql { + SELECT DISTINCT y FROM (SELECT y FROM t1) WHERE y<5 ORDER BY y + } +} {1 2 3 4} +do_test select6-4.4 { + execsql { + SELECT avg(y) FROM (SELECT DISTINCT y FROM t1) WHERE y<5 ORDER BY y + } +} {2.5} +do_test select6-4.5 { + execsql { + SELECT avg(y) FROM (SELECT DISTINCT y FROM t1 WHERE y<5) ORDER BY y + } +} {2.5} + +do_test select6-5.1 { + execsql { + SELECT a,x,b FROM + (SELECT x+3 AS 'a', x FROM t1 WHERE y=3) AS 'p', + (SELECT x AS 'b' FROM t1 WHERE y=4) AS 'q' + WHERE a=b + ORDER BY a + } +} {8 5 8 9 6 9 10 7 10} +do_test select6-5.2 { + execsql { + SELECT a,x,b FROM + (SELECT x+3 AS 'a', x FROM t1 WHERE y=3), + (SELECT x AS 'b' FROM t1 WHERE y=4) + WHERE a=b + ORDER BY a + } +} {8 5 8 9 6 9 10 7 10} + +# Tests of compound sub-selects +# +do_test select5-6.1 { + execsql { + DELETE FROM t1 WHERE x>4; + SELECT * FROM t1 + } +} {1 1 2 2 3 2 4 3} +ifcapable compound { + do_test select6-6.2 { + execsql { + SELECT * FROM ( + SELECT x AS 'a' FROM t1 UNION ALL SELECT x+10 AS 'a' FROM t1 + ) ORDER BY a; + } + } {1 2 3 4 11 12 13 14} + do_test select6-6.3 { + execsql { + SELECT * FROM ( + SELECT x AS 'a' FROM t1 UNION ALL SELECT x+1 AS 'a' FROM t1 + ) ORDER BY a; + } + } {1 2 2 3 3 4 4 5} + do_test select6-6.4 { + execsql { + SELECT * FROM ( + SELECT x AS 'a' FROM t1 UNION SELECT x+1 AS 'a' FROM t1 + ) ORDER BY a; + } + } {1 2 3 4 5} + do_test select6-6.5 { + execsql { + SELECT * FROM ( + SELECT x AS 'a' FROM t1 INTERSECT SELECT x+1 AS 'a' FROM t1 + ) ORDER BY a; + } + } {2 3 4} + do_test select6-6.6 { + execsql { + SELECT * FROM ( + SELECT x AS 'a' FROM t1 EXCEPT SELECT x*2 AS 'a' FROM t1 + ) ORDER BY a; + } + } {1 3} +} ;# ifcapable compound + +# Subselects with no FROM clause +# +do_test select6-7.1 { + execsql { + SELECT * FROM (SELECT 1) + } +} {1} +do_test select6-7.2 { + execsql { + SELECT c,b,a,* FROM (SELECT 1 AS 'a', 2 AS 'b', 'abc' AS 'c') + } +} {abc 2 1 1 2 abc} +do_test select6-7.3 { + execsql { + SELECT c,b,a,* FROM (SELECT 1 AS 'a', 2 AS 'b', 'abc' AS 'c' WHERE 0) + } +} {} +do_test select6-7.4 { + execsql2 { + SELECT c,b,a,* FROM (SELECT 1 AS 'a', 2 AS 'b', 'abc' AS 'c' WHERE 1) + } +} {c abc b 2 a 1 a 1 b 2 c abc} + +# The remaining tests in this file depend on the EXPLAIN keyword. +# Skip these tests if EXPLAIN is disabled in the current build. +# +ifcapable {!explain} { + finish_test + return +} + +# The following procedure compiles the SQL given as an argument and returns +# TRUE if that SQL uses any transient tables and returns FALSE if no +# transient tables are used. This is used to make sure that the +# sqliteFlattenSubquery() routine in select.c is doing its job. +# +proc is_flat {sql} { + return [expr 0>[lsearch [execsql "EXPLAIN $sql"] OpenEphemeral]] +} + +# Check that the flattener works correctly for deeply nested subqueries +# involving joins. +# +do_test select6-8.1 { + execsql { + BEGIN; + CREATE TABLE t3(p,q); + INSERT INTO t3 VALUES(1,11); + INSERT INTO t3 VALUES(2,22); + CREATE TABLE t4(q,r); + INSERT INTO t4 VALUES(11,111); + INSERT INTO t4 VALUES(22,222); + COMMIT; + SELECT * FROM t3 NATURAL JOIN t4; + } +} {1 11 111 2 22 222} +do_test select6-8.2 { + execsql { + SELECT y, p, q, r FROM + (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m, + (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n + WHERE y=p + } +} {1 1 11 111 2 2 22 222 2 2 22 222} +# If view support is omitted from the build, then so is the query +# "flattener". So omit this test and test select6-8.6 in that case. +ifcapable view { +do_test select6-8.3 { + is_flat { + SELECT y, p, q, r FROM + (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m, + (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n + WHERE y=p + } +} {1} +} ;# ifcapable view +do_test select6-8.4 { + execsql { + SELECT DISTINCT y, p, q, r FROM + (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m, + (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n + WHERE y=p + } +} {1 1 11 111 2 2 22 222} +do_test select6-8.5 { + execsql { + SELECT * FROM + (SELECT y, p, q, r FROM + (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m, + (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n + WHERE y=p) AS e, + (SELECT r AS z FROM t4 WHERE q=11) AS f + WHERE e.r=f.z + } +} {1 1 11 111 111} +ifcapable view { +do_test select6-8.6 { + is_flat { + SELECT * FROM + (SELECT y, p, q, r FROM + (SELECT t1.y AS y, t2.b AS b FROM t1, t2 WHERE t1.x=t2.a) AS m, + (SELECT t3.p AS p, t3.q AS q, t4.r AS r FROM t3 NATURAL JOIN t4) as n + WHERE y=p) AS e, + (SELECT r AS z FROM t4 WHERE q=11) AS f + WHERE e.r=f.z + } +} {1} +} ;# ifcapable view + +# Ticket #1634 +# +do_test select6-9.1 { + execsql { + SELECT a.x, b.x FROM t1 AS a, (SELECT x FROM t1 LIMIT 2) AS b + } +} {1 1 1 2 2 1 2 2 3 1 3 2 4 1 4 2} +do_test select6-9.2 { + execsql { + SELECT x FROM (SELECT x FROM t1 LIMIT 2); + } +} {1 2} +do_test select6-9.3 { + execsql { + SELECT x FROM (SELECT x FROM t1 LIMIT 2 OFFSET 1); + } +} {2 3} +do_test select6-9.4 { + execsql { + SELECT x FROM (SELECT x FROM t1) LIMIT 2; + } +} {1 2} +do_test select6-9.5 { + execsql { + SELECT x FROM (SELECT x FROM t1) LIMIT 2 OFFSET 1; + } +} {2 3} +do_test select6-9.6 { + execsql { + SELECT x FROM (SELECT x FROM t1 LIMIT 2) LIMIT 3; + } +} {1 2} +do_test select6-9.7 { + execsql { + SELECT x FROM (SELECT x FROM t1 LIMIT -1) LIMIT 3; + } +} {1 2 3} +do_test select6-9.8 { + execsql { + SELECT x FROM (SELECT x FROM t1 LIMIT -1); + } +} {1 2 3 4} +do_test select6-9.9 { + execsql { + SELECT x FROM (SELECT x FROM t1 LIMIT -1 OFFSET 1); + } +} {2 3 4} + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/select7.test b/libraries/sqlite/unix/sqlite-3.5.1/test/select7.test new file mode 100644 index 0000000..3837c88 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/select7.test @@ -0,0 +1,159 @@ +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing compute SELECT statements and nested +# views. +# +# $Id: select7.test,v 1.11 2007/09/12 17:01:45 danielk1977 Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable compound { + +# A 3-way INTERSECT. Ticket #875 +ifcapable tempdb { + do_test select7-1.1 { + execsql { + create temp table t1(x); + insert into t1 values('amx'); + insert into t1 values('anx'); + insert into t1 values('amy'); + insert into t1 values('bmy'); + select * from t1 where x like 'a__' + intersect select * from t1 where x like '_m_' + intersect select * from t1 where x like '__x'; + } + } {amx} +} + + +# Nested views do not handle * properly. Ticket #826. +# +ifcapable view { +do_test select7-2.1 { + execsql { + CREATE TABLE x(id integer primary key, a TEXT NULL); + INSERT INTO x (a) VALUES ('first'); + CREATE TABLE tempx(id integer primary key, a TEXT NULL); + INSERT INTO tempx (a) VALUES ('t-first'); + CREATE VIEW tv1 AS SELECT x.id, tx.id FROM x JOIN tempx tx ON tx.id=x.id; + CREATE VIEW tv1b AS SELECT x.id, tx.id FROM x JOIN tempx tx on tx.id=x.id; + CREATE VIEW tv2 AS SELECT * FROM tv1 UNION SELECT * FROM tv1b; + SELECT * FROM tv2; + } +} {1 1} +} ;# ifcapable view + +} ;# ifcapable compound + +# Do not allow GROUP BY without an aggregate. Ticket #1039. +# +# Change: force any query with a GROUP BY clause to be processed as +# an aggregate query, whether it contains aggregates or not. +# +ifcapable subquery { + # do_test select7-3.1 { + # catchsql { + # SELECT * FROM (SELECT * FROM sqlite_master) GROUP BY name + # } + # } {1 {GROUP BY may only be used on aggregate queries}} + do_test select7-3.1 { + catchsql { + SELECT * FROM (SELECT * FROM sqlite_master) GROUP BY name + } + } [list 0 [execsql {SELECT * FROM sqlite_master ORDER BY name}]] +} + +# Ticket #2018 - Make sure names are resolved correctly on all +# SELECT statements of a compound subquery. +# +ifcapable {subquery && compound} { + do_test select7-4.1 { + execsql { + CREATE TABLE IF NOT EXISTS photo(pk integer primary key, x); + CREATE TABLE IF NOT EXISTS tag(pk integer primary key, fk int, name); + + SELECT P.pk from PHOTO P WHERE NOT EXISTS ( + SELECT T2.pk from TAG T2 WHERE T2.fk = P.pk + EXCEPT + SELECT T3.pk from TAG T3 WHERE T3.fk = P.pk AND T3.name LIKE '%foo%' + ); + } + } {} + do_test select7-4.2 { + execsql { + INSERT INTO photo VALUES(1,1); + INSERT INTO photo VALUES(2,2); + INSERT INTO photo VALUES(3,3); + INSERT INTO tag VALUES(11,1,'one'); + INSERT INTO tag VALUES(12,1,'two'); + INSERT INTO tag VALUES(21,1,'one-b'); + SELECT P.pk from PHOTO P WHERE NOT EXISTS ( + SELECT T2.pk from TAG T2 WHERE T2.fk = P.pk + EXCEPT + SELECT T3.pk from TAG T3 WHERE T3.fk = P.pk AND T3.name LIKE '%foo%' + ); + } + } {2 3} +} + +# ticket #2347 +# +ifcapable {subquery && compound} { + do_test select7-5.1 { + catchsql { + CREATE TABLE t2(a,b); + SELECT 5 IN (SELECT a,b FROM t2); + } + } [list 1 \ + {only a single result allowed for a SELECT that is part of an expression}] + do_test select7-5.2 { + catchsql { + SELECT 5 IN (SELECT * FROM t2); + } + } [list 1 \ + {only a single result allowed for a SELECT that is part of an expression}] + do_test select7-5.3 { + catchsql { + SELECT 5 IN (SELECT a,b FROM t2 UNION SELECT b,a FROM t2); + } + } [list 1 \ + {only a single result allowed for a SELECT that is part of an expression}] + do_test select7-5.4 { + catchsql { + SELECT 5 IN (SELECT * FROM t2 UNION SELECT * FROM t2); + } + } [list 1 \ + {only a single result allowed for a SELECT that is part of an expression}] +} + +# Verify that an error occurs if you have too many terms on a +# compound select statement. +# +ifcapable compound { + if {$SQLITE_MAX_COMPOUND_SELECT>0} { + set sql {SELECT 0} + set result 0 + for {set i 1} {$i<$SQLITE_MAX_COMPOUND_SELECT} {incr i} { + append sql " UNION ALL SELECT $i" + lappend result $i + } + do_test select7-6.1 { + catchsql $sql + } [list 0 $result] + append sql { UNION ALL SELECT 99999999} + do_test select7-6.2 { + catchsql $sql + } {1 {too many terms in compound SELECT}} + } +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/server1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/server1.test new file mode 100644 index 0000000..134a9f5 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/server1.test @@ -0,0 +1,171 @@ +# 2006 January 09 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is testing the server mode of SQLite. +# +# This file is derived from thread1.test +# +# $Id: server1.test,v 1.5 2007/08/29 18:20:17 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Skip this whole file if the server testing code is not enabled +# +if {[llength [info command client_step]]==0 || [sqlite3 -has-codec]} { + finish_test + return +} + +# The sample server implementation does not work right when memory +# management is enabled. +# +ifcapable memorymanage { + finish_test + return +} + +# Create some data to work with +# +do_test server1-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,'abcdefgh'); + INSERT INTO t1 SELECT a+1, b||b FROM t1; + INSERT INTO t1 SELECT a+2, b||b FROM t1; + INSERT INTO t1 SELECT a+4, b||b FROM t1; + SELECT count(*), max(length(b)) FROM t1; + } +} {8 64} + +# Interleave two threads on read access. Then make sure a third +# thread can write the database. In other words: +# +# read-lock A +# read-lock B +# unlock A +# unlock B +# write-lock C +# +do_test server1-1.2 { + client_create A test.db + client_create B test.db + client_create C test.db + client_compile A {SELECT a FROM t1} + client_step A + client_result A +} SQLITE_ROW +do_test server1-1.3 { + client_argc A +} 1 +do_test server1-1.4 { + client_argv A 0 +} 1 +do_test server1-1.5 { + client_compile B {SELECT b FROM t1} + client_step B + client_result B +} SQLITE_ROW +do_test server1-1.6 { + client_argc B +} 1 +do_test server1-1.7 { + client_argv B 0 +} abcdefgh +do_test server1-1.8 { + client_finalize A + client_result A +} SQLITE_OK +do_test server1-1.9 { + client_finalize B + client_result B +} SQLITE_OK +do_test server1-1.10 { + client_compile C {CREATE TABLE t2(x,y)} + client_step C + client_result C +} SQLITE_DONE +do_test server1-1.11 { + client_finalize C + client_result C +} SQLITE_OK +do_test server1-1.12 { + catchsql {SELECT name FROM sqlite_master} + execsql {SELECT name FROM sqlite_master} +} {t1 t2} + + +# Read from table t1. Do not finalize the statement. This +# will leave the lock pending. +# +do_test server1-2.1 { + client_halt * + client_create A test.db + client_compile A {SELECT a FROM t1} + client_step A + client_result A +} SQLITE_ROW + +# Read from the same table from another thread. This is allows. +# +do_test server1-2.2 { + client_create B test.db + client_compile B {SELECT b FROM t1} + client_step B + client_result B +} SQLITE_ROW + +# Write to a different table from another thread. This is allowed +# because in server mode with a shared cache we have table-level locking. +# +do_test server1-2.3 { + client_create C test.db + client_compile C {INSERT INTO t2 VALUES(98,99)} + client_step C + client_result C + client_finalize C + client_result C +} SQLITE_OK + +# But we cannot insert into table t1 because threads A and B have it locked. +# +do_test server1-2.4 { + client_compile C {INSERT INTO t1 VALUES(98,99)} + client_step C + client_result C + client_finalize C + client_result C +} SQLITE_LOCKED +do_test server1-2.5 { + client_finalize B + client_wait B + client_compile C {INSERT INTO t1 VALUES(98,99)} + client_step C + client_result C + client_finalize C + client_result C +} SQLITE_LOCKED + +# Insert into t1 is successful after finishing the other two threads. +do_test server1-2.6 { + client_finalize A + client_wait A + client_compile C {INSERT INTO t1 VALUES(98,99)} + client_step C + client_result C + client_finalize C + client_result C +} SQLITE_OK + +client_halt * +sqlite3_enable_shared_cache 0 +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/shared.test b/libraries/sqlite/unix/sqlite-3.5.1/test/shared.test new file mode 100644 index 0000000..a69774f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/shared.test @@ -0,0 +1,911 @@ +# 2005 December 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: shared.test,v 1.27 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +db close + +ifcapable !shared_cache { + finish_test + return +} + +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +foreach av [list 0 1] { + +# Open the database connection and execute the auto-vacuum pragma +file delete -force test.db +sqlite3 db test.db + +ifcapable autovacuum { + do_test shared-[expr $av+1].1.0 { + execsql "pragma auto_vacuum=$::av" + execsql {pragma auto_vacuum} + } "$av" +} else { + if {$av} { + db close + break + } +} + +# $av is currently 0 if this loop iteration is to test with auto-vacuum turned +# off, and 1 if it is turned on. Increment it so that (1 -> no auto-vacuum) +# and (2 -> auto-vacuum). The sole reason for this is so that it looks nicer +# when we use this variable as part of test-case names. +# +incr av + +# Test organization: +# +# shared-1.*: Simple test to verify basic sanity of table level locking when +# two connections share a pager cache. +# shared-2.*: Test that a read transaction can co-exist with a +# write-transaction, including a simple test to ensure the +# external locking protocol is still working. +# shared-3.*: Simple test of read-uncommitted mode. +# shared-4.*: Check that the schema is locked and unlocked correctly. +# shared-5.*: Test that creating/dropping schema items works when databases +# are attached in different orders to different handles. +# shared-6.*: Locking, UNION ALL queries and sub-queries. +# shared-7.*: Autovacuum and shared-cache. +# shared-8.*: Tests related to the text encoding of shared-cache databases. +# shared-9.*: TEMP triggers and shared-cache databases. +# shared-10.*: Tests of sqlite3_close(). +# shared-11.*: Test transaction locking. +# + +do_test shared-$av.1.1 { + # Open a second database on the file test.db. It should use the same pager + # cache and schema as the original connection. Verify that only 1 file is + # opened. + sqlite3 db2 test.db + set ::sqlite_open_file_count +} {1} +do_test shared-$av.1.2 { + # Add a table and a single row of data via the first connection. + # Ensure that the second connection can see them. + execsql { + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + } db + execsql { + SELECT * FROM abc; + } db2 +} {1 2 3} +do_test shared-$av.1.3 { + # Have the first connection begin a transaction and obtain a read-lock + # on table abc. This should not prevent the second connection from + # querying abc. + execsql { + BEGIN; + SELECT * FROM abc; + } + execsql { + SELECT * FROM abc; + } db2 +} {1 2 3} +do_test shared-$av.1.4 { + # Try to insert a row into abc via connection 2. This should fail because + # of the read-lock connection 1 is holding on table abc (obtained in the + # previous test case). + catchsql { + INSERT INTO abc VALUES(4, 5, 6); + } db2 +} {1 {database table is locked: abc}} +do_test shared-$av.1.5 { + # Using connection 2 (the one without the open transaction), try to create + # a new table. This should fail because of the open read transaction + # held by connection 1. + catchsql { + CREATE TABLE def(d, e, f); + } db2 +} {1 {database table is locked: sqlite_master}} +do_test shared-$av.1.6 { + # Upgrade connection 1's transaction to a write transaction. Create + # a new table - def - and insert a row into it. Because the connection 1 + # transaction modifies the schema, it should not be possible for + # connection 2 to access the database at all until the connection 1 + # has finished the transaction. + execsql { + CREATE TABLE def(d, e, f); + INSERT INTO def VALUES('IV', 'V', 'VI'); + } +} {} +do_test shared-$av.1.7 { + # Read from the sqlite_master table with connection 1 (inside the + # transaction). Then test that we can not do this with connection 2. This + # is because of the schema-modified lock established by connection 1 + # in the previous test case. + execsql { + SELECT * FROM sqlite_master; + } + catchsql { + SELECT * FROM sqlite_master; + } db2 +} {1 {database schema is locked: main}} +do_test shared-$av.1.8 { + # Commit the connection 1 transaction. + execsql { + COMMIT; + } +} {} + +do_test shared-$av.2.1 { + # Open connection db3 to the database. Use a different path to the same + # file so that db3 does *not* share the same pager cache as db and db2 + # (there should be two open file handles). + if {$::tcl_platform(platform)=="unix"} { + sqlite3 db3 ./test.db + } else { + sqlite3 db3 TEST.DB + } + set ::sqlite_open_file_count +} {2} +do_test shared-$av.2.2 { + # Start read transactions on db and db2 (the shared pager cache). Ensure + # db3 cannot write to the database. + execsql { + BEGIN; + SELECT * FROM abc; + } + execsql { + BEGIN; + SELECT * FROM abc; + } db2 + catchsql { + INSERT INTO abc VALUES(1, 2, 3); + } db2 +} {1 {database table is locked: abc}} +do_test shared-$av.2.3 { + # Turn db's transaction into a write-transaction. db3 should still be + # able to read from table def (but will not see the new row). Connection + # db2 should not be able to read def (because of the write-lock). + +# Todo: The failed "INSERT INTO abc ..." statement in the above test +# has started a write-transaction on db2 (should this be so?). This +# would prevent connection db from starting a write-transaction. So roll the +# db2 transaction back and replace it with a new read transaction. + execsql { + ROLLBACK; + BEGIN; + SELECT * FROM abc; + } db2 + + execsql { + INSERT INTO def VALUES('VII', 'VIII', 'IX'); + } + concat [ + catchsql { SELECT * FROM def; } db3 + ] [ + catchsql { SELECT * FROM def; } db2 + ] +} {0 {IV V VI} 1 {database table is locked: def}} +do_test shared-$av.2.4 { + # Commit the open transaction on db. db2 still holds a read-transaction. + # This should prevent db3 from writing to the database, but not from + # reading. + execsql { + COMMIT; + } + concat [ + catchsql { SELECT * FROM def; } db3 + ] [ + catchsql { INSERT INTO def VALUES('X', 'XI', 'XII'); } db3 + ] +} {0 {IV V VI VII VIII IX} 1 {database is locked}} + +catchsql COMMIT db2 + +do_test shared-$av.3.1.1 { + # This test case starts a linear scan of table 'seq' using a + # read-uncommitted connection. In the middle of the scan, rows are added + # to the end of the seq table (ahead of the current cursor position). + # The uncommitted rows should be included in the results of the scan. + execsql " + CREATE TABLE seq(i PRIMARY KEY, x); + INSERT INTO seq VALUES(1, '[string repeat X 500]'); + INSERT INTO seq VALUES(2, '[string repeat X 500]'); + " + execsql {SELECT * FROM sqlite_master} db2 + execsql {PRAGMA read_uncommitted = 1} db2 + + set ret [list] + db2 eval {SELECT i FROM seq ORDER BY i} { + if {$i < 4} { + set max [execsql {SELECT max(i) FROM seq}] + db eval { + INSERT INTO seq SELECT i + :max, x FROM seq; + } + } + lappend ret $i + } + set ret +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16} +do_test shared-$av.3.1.2 { + # Another linear scan through table seq using a read-uncommitted connection. + # This time, delete each row as it is read. Should not affect the results of + # the scan, but the table should be empty after the scan is concluded + # (test 3.1.3 verifies this). + set ret [list] + db2 eval {SELECT i FROM seq} { + db eval {DELETE FROM seq WHERE i = :i} + lappend ret $i + } + set ret +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16} +do_test shared-$av.3.1.3 { + execsql { + SELECT * FROM seq; + } +} {} + +catch {db close} +catch {db2 close} +catch {db3 close} + +#-------------------------------------------------------------------------- +# Tests shared-4.* test that the schema locking rules are applied +# correctly. i.e.: +# +# 1. All transactions require a read-lock on the schemas of databases they +# access. +# 2. Transactions that modify a database schema require a write-lock on that +# schema. +# 3. It is not possible to compile a statement while another handle has a +# write-lock on the schema. +# + +# Open two database handles db and db2. Each has a single attach database +# (as well as main): +# +# db.main -> ./test.db +# db.test2 -> ./test2.db +# db2.main -> ./test2.db +# db2.test -> ./test.db +# +file delete -force test.db +file delete -force test2.db +file delete -force test2.db-journal +sqlite3 db test.db +sqlite3 db2 test2.db +do_test shared-$av.4.1.1 { + set sqlite_open_file_count +} {2} +do_test shared-$av.4.1.2 { + execsql {ATTACH 'test2.db' AS test2} + set sqlite_open_file_count +} {2} +do_test shared-$av.4.1.3 { + execsql {ATTACH 'test.db' AS test} db2 + set sqlite_open_file_count +} {2} + +# Sanity check: Create a table in ./test.db via handle db, and test that handle +# db2 can "see" the new table immediately. A handle using a seperate pager +# cache would have to reload the database schema before this were possible. +# +do_test shared-$av.4.2.1 { + execsql { + CREATE TABLE abc(a, b, c); + CREATE TABLE def(d, e, f); + INSERT INTO abc VALUES('i', 'ii', 'iii'); + INSERT INTO def VALUES('I', 'II', 'III'); + } +} {} +do_test shared-$av.4.2.2 { + execsql { + SELECT * FROM test.abc; + } db2 +} {i ii iii} + +# Open a read-transaction and read from table abc via handle 2. Check that +# handle 1 can read table abc. Check that handle 1 cannot modify table abc +# or the database schema. Then check that handle 1 can modify table def. +# +do_test shared-$av.4.3.1 { + execsql { + BEGIN; + SELECT * FROM test.abc; + } db2 +} {i ii iii} +do_test shared-$av.4.3.2 { + catchsql { + INSERT INTO abc VALUES('iv', 'v', 'vi'); + } +} {1 {database table is locked: abc}} +do_test shared-$av.4.3.3 { + catchsql { + CREATE TABLE ghi(g, h, i); + } +} {1 {database table is locked: sqlite_master}} +do_test shared-$av.4.3.3 { + catchsql { + INSERT INTO def VALUES('IV', 'V', 'VI'); + } +} {0 {}} +do_test shared-$av.4.3.4 { + # Cleanup: commit the transaction opened by db2. + execsql { + COMMIT + } db2 +} {} + +# Open a write-transaction using handle 1 and modify the database schema. +# Then try to execute a compiled statement to read from the same +# database via handle 2 (fails to get the lock on sqlite_master). Also +# try to compile a read of the same database using handle 2 (also fails). +# Finally, compile a read of the other database using handle 2. This +# should also fail. +# +ifcapable compound { + do_test shared-$av.4.4.1.2 { + # Sanity check 1: Check that the schema is what we think it is when viewed + # via handle 1. + execsql { + CREATE TABLE test2.ghi(g, h, i); + SELECT 'test.db:'||name FROM sqlite_master + UNION ALL + SELECT 'test2.db:'||name FROM test2.sqlite_master; + } + } {test.db:abc test.db:def test2.db:ghi} + do_test shared-$av.4.4.1.2 { + # Sanity check 2: Check that the schema is what we think it is when viewed + # via handle 2. + execsql { + SELECT 'test2.db:'||name FROM sqlite_master + UNION ALL + SELECT 'test.db:'||name FROM test.sqlite_master; + } db2 + } {test2.db:ghi test.db:abc test.db:def} +} + +do_test shared-$av.4.4.2 { + set ::DB2 [sqlite3_connection_pointer db2] + set sql {SELECT * FROM abc} + set ::STMT1 [sqlite3_prepare $::DB2 $sql -1 DUMMY] + execsql { + BEGIN; + CREATE TABLE jkl(j, k, l); + } + sqlite3_step $::STMT1 +} {SQLITE_ERROR} +do_test shared-$av.4.4.3 { + sqlite3_finalize $::STMT1 +} {SQLITE_LOCKED} +do_test shared-$av.4.4.4 { + set rc [catch { + set ::STMT1 [sqlite3_prepare $::DB2 $sql -1 DUMMY] + } msg] + list $rc $msg +} {1 {(6) database schema is locked: test}} +do_test shared-$av.4.4.5 { + set rc [catch { + set ::STMT1 [sqlite3_prepare $::DB2 "SELECT * FROM ghi" -1 DUMMY] + } msg] + list $rc $msg +} {1 {(6) database schema is locked: test}} + + +catch {db2 close} +catch {db close} + +#-------------------------------------------------------------------------- +# Tests shared-5.* +# +foreach db [list test.db test1.db test2.db test3.db] { + file delete -force $db ${db}-journal +} +do_test shared-$av.5.1.1 { + sqlite3 db1 test.db + sqlite3 db2 test.db + execsql { + ATTACH 'test1.db' AS test1; + ATTACH 'test2.db' AS test2; + ATTACH 'test3.db' AS test3; + } db1 + execsql { + ATTACH 'test3.db' AS test3; + ATTACH 'test2.db' AS test2; + ATTACH 'test1.db' AS test1; + } db2 +} {} +do_test shared-$av.5.1.2 { + execsql { + CREATE TABLE test1.t1(a, b); + CREATE INDEX test1.i1 ON t1(a, b); + } db1 +} {} +ifcapable view { + do_test shared-$av.5.1.3 { + execsql { + CREATE VIEW test1.v1 AS SELECT * FROM t1; + } db1 + } {} +} +ifcapable trigger { + do_test shared-$av.5.1.4 { + execsql { + CREATE TRIGGER test1.trig1 AFTER INSERT ON t1 BEGIN + INSERT INTO t1 VALUES(new.a, new.b); + END; + } db1 + } {} +} +do_test shared-$av.5.1.5 { + execsql { + DROP INDEX i1; + } db2 +} {} +ifcapable view { + do_test shared-$av.5.1.6 { + execsql { + DROP VIEW v1; + } db2 + } {} +} +ifcapable trigger { + do_test shared-$av.5.1.7 { + execsql { + DROP TRIGGER trig1; + } db2 + } {} +} +do_test shared-$av.5.1.8 { + execsql { + DROP TABLE t1; + } db2 +} {} +ifcapable compound { + do_test shared-$av.5.1.9 { + execsql { + SELECT * FROM sqlite_master UNION ALL SELECT * FROM test1.sqlite_master + } db1 + } {} +} + +#-------------------------------------------------------------------------- +# Tests shared-6.* test that a query obtains all the read-locks it needs +# before starting execution of the query. This means that there is no chance +# some rows of data will be returned before a lock fails and SQLITE_LOCK +# is returned. +# +do_test shared-$av.6.1.1 { + execsql { + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t2 VALUES(3, 4); + } db1 +} {} +ifcapable compound { + do_test shared-$av.6.1.2 { + execsql { + SELECT * FROM t1 UNION ALL SELECT * FROM t2; + } db2 + } {1 2 3 4} +} +do_test shared-$av.6.1.3 { + # Establish a write lock on table t2 via connection db2. Then make a + # UNION all query using connection db1 that first accesses t1, followed + # by t2. If the locks are grabbed at the start of the statement (as + # they should be), no rows are returned. If (as was previously the case) + # they are grabbed as the tables are accessed, the t1 rows will be + # returned before the query fails. + # + execsql { + BEGIN; + INSERT INTO t2 VALUES(5, 6); + } db2 + set ret [list] + catch { + db1 eval {SELECT * FROM t1 UNION ALL SELECT * FROM t2} { + lappend ret $a $b + } + } + set ret +} {} +do_test shared-$av.6.1.4 { + execsql { + COMMIT; + BEGIN; + INSERT INTO t1 VALUES(7, 8); + } db2 + set ret [list] + catch { + db1 eval { + SELECT (CASE WHEN a>4 THEN (SELECT a FROM t1) ELSE 0 END) AS d FROM t2; + } { + lappend ret $d + } + } + set ret +} {} + +catch {db1 close} +catch {db2 close} +foreach f [list test.db test2.db] { + file delete -force $f ${f}-journal +} + +#-------------------------------------------------------------------------- +# Tests shared-7.* test auto-vacuum does not invalidate cursors from +# other shared-cache users when it reorganizes the database on +# COMMIT. +# +do_test shared-$av.7.1 { + # This test case sets up a test database in auto-vacuum mode consisting + # of two tables, t1 and t2. Both have a single index. Table t1 is + # populated first (so consists of pages toward the start of the db file), + # t2 second (pages toward the end of the file). + sqlite3 db test.db + sqlite3 db2 test.db + execsql { + BEGIN; + CREATE TABLE t1(a PRIMARY KEY, b); + CREATE TABLE t2(a PRIMARY KEY, b); + } + set ::contents {} + for {set i 0} {$i < 100} {incr i} { + set a [string repeat "$i " 20] + set b [string repeat "$i " 20] + db eval { + INSERT INTO t1 VALUES(:a, :b); + } + lappend ::contents [list [expr $i+1] $a $b] + } + execsql { + INSERT INTO t2 SELECT * FROM t1; + COMMIT; + } +} {} +do_test shared-$av.7.2 { + # This test case deletes the contents of table t1 (the one at the start of + # the file) while many cursors are open on table t2 and it's index. All of + # the non-root pages will be moved from the end to the start of the file + # when the DELETE is committed - this test verifies that moving the pages + # does not disturb the open cursors. + # + + proc lockrow {db tbl oids body} { + set ret [list] + db eval "SELECT oid AS i, a, b FROM $tbl ORDER BY a" { + if {$i==[lindex $oids 0]} { + set noids [lrange $oids 1 end] + if {[llength $noids]==0} { + set subret [eval $body] + } else { + set subret [lockrow $db $tbl $noids $body] + } + } + lappend ret [list $i $a $b] + } + return [linsert $subret 0 $ret] + } + proc locktblrows {db tbl body} { + set oids [db eval "SELECT oid FROM $tbl"] + lockrow $db $tbl $oids $body + } + + set scans [locktblrows db t2 { + execsql { + DELETE FROM t1; + } db2 + }] + set error 0 + + # Test that each SELECT query returned the expected contents of t2. + foreach s $scans { + if {[lsort -integer -index 0 $s]!=$::contents} { + set error 1 + } + } + set error +} {0} + +catch {db close} +catch {db2 close} +unset -nocomplain contents + +#-------------------------------------------------------------------------- +# The following tests try to trick the shared-cache code into assuming +# the wrong encoding for a database. +# +file delete -force test.db test.db-journal +ifcapable utf16 { + do_test shared-$av.8.1.1 { + sqlite3 db test.db + execsql { + PRAGMA encoding = 'UTF-16'; + SELECT * FROM sqlite_master; + } + } {} + do_test shared-$av.8.1.2 { + string range [execsql {PRAGMA encoding;}] 0 end-2 + } {UTF-16} + do_test shared-$av.8.1.3 { + sqlite3 db2 test.db + execsql { + PRAGMA encoding = 'UTF-8'; + CREATE TABLE abc(a, b, c); + } db2 + } {} + do_test shared-$av.8.1.4 { + execsql { + SELECT * FROM sqlite_master; + } + } "table abc abc [expr $AUTOVACUUM?3:2] {CREATE TABLE abc(a, b, c)}" + do_test shared-$av.8.1.5 { + db2 close + execsql { + PRAGMA encoding; + } + } {UTF-8} + file delete -force test2.db test2.db-journal + do_test shared-$av.8.2.1 { + execsql { + ATTACH 'test2.db' AS aux; + SELECT * FROM aux.sqlite_master; + } + } {} + do_test shared-$av.8.2.2 { + sqlite3 db2 test2.db + execsql { + PRAGMA encoding = 'UTF-16'; + CREATE TABLE def(d, e, f); + } db2 + string range [execsql {PRAGMA encoding;} db2] 0 end-2 + } {UTF-16} + +# Bug #2547 is causing this to fail. +if 0 { + do_test shared-$av.8.2.3 { + catchsql { + SELECT * FROM aux.sqlite_master; + } + } {1 {attached databases must use the same text encoding as main database}} +} +} + +catch {db close} +catch {db2 close} +file delete -force test.db test2.db + +#--------------------------------------------------------------------------- +# The following tests - shared-9.* - test interactions between TEMP triggers +# and shared-schemas. +# +ifcapable trigger&&tempdb { + +do_test shared-$av.9.1 { + sqlite3 db test.db + sqlite3 db2 test.db + execsql { + CREATE TABLE abc(a, b, c); + CREATE TABLE abc_mirror(a, b, c); + CREATE TEMP TRIGGER BEFORE INSERT ON abc BEGIN + INSERT INTO abc_mirror(a, b, c) VALUES(new.a, new.b, new.c); + END; + INSERT INTO abc VALUES(1, 2, 3); + SELECT * FROM abc_mirror; + } +} {1 2 3} +do_test shared-$av.9.2 { + execsql { + INSERT INTO abc VALUES(4, 5, 6); + SELECT * FROM abc_mirror; + } db2 +} {1 2 3} +do_test shared-$av.9.3 { + db close + db2 close +} {} + +} ; # End shared-9.* + +#--------------------------------------------------------------------------- +# The following tests - shared-10.* - test that the library behaves +# correctly when a connection to a shared-cache is closed. +# +do_test shared-$av.10.1 { + # Create a small sample database with two connections to it (db and db2). + file delete -force test.db + sqlite3 db test.db + sqlite3 db2 test.db + execsql { + CREATE TABLE ab(a PRIMARY KEY, b); + CREATE TABLE de(d PRIMARY KEY, e); + INSERT INTO ab VALUES('Chiang Mai', 100000); + INSERT INTO ab VALUES('Bangkok', 8000000); + INSERT INTO de VALUES('Ubon', 120000); + INSERT INTO de VALUES('Khon Kaen', 200000); + } +} {} +do_test shared-$av.10.2 { + # Open a read-transaction with the first connection, a write-transaction + # with the second. + execsql { + BEGIN; + SELECT * FROM ab; + } + execsql { + BEGIN; + INSERT INTO de VALUES('Pataya', 30000); + } db2 +} {} +do_test shared-$av.10.3 { + # An external connection should be able to read the database, but not + # prepare a write operation. + if {$::tcl_platform(platform)=="unix"} { + sqlite3 db3 ./test.db + } else { + sqlite3 db3 TEST.DB + } + execsql { + SELECT * FROM ab; + } db3 + catchsql { + BEGIN; + INSERT INTO de VALUES('Pataya', 30000); + } db3 +} {1 {database is locked}} +do_test shared-$av.10.4 { + # Close the connection with the write-transaction open + db2 close +} {} +do_test shared-$av.10.5 { + # Test that the db2 transaction has been automatically rolled back. + # If it has not the ('Pataya', 30000) entry will still be in the table. + execsql { + SELECT * FROM de; + } +} {Ubon 120000 {Khon Kaen} 200000} +do_test shared-$av.10.5 { + # Closing db2 should have dropped the shared-cache back to a read-lock. + # So db3 should be able to prepare a write... + catchsql {INSERT INTO de VALUES('Pataya', 30000);} db3 +} {0 {}} +do_test shared-$av.10.6 { + # ... but not commit it. + catchsql {COMMIT} db3 +} {1 {database is locked}} +do_test shared-$av.10.7 { + # Commit the (read-only) db transaction. Check via db3 to make sure the + # contents of table "de" are still as they should be. + execsql { + COMMIT; + } + execsql { + SELECT * FROM de; + } db3 +} {Ubon 120000 {Khon Kaen} 200000 Pataya 30000} +do_test shared-$av.10.9 { + # Commit the external transaction. + catchsql {COMMIT} db3 +} {0 {}} +integrity_check shared-$av.10.10 +do_test shared-$av.10.11 { + db close + db3 close +} {} + +do_test shared-$av.11.1 { + file delete -force test.db + sqlite3 db test.db + sqlite3 db2 test.db + execsql { + CREATE TABLE abc(a, b, c); + CREATE TABLE abc2(a, b, c); + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + } +} {} +do_test shared-$av.11.2 { + catchsql {BEGIN;} db2 + catchsql {SELECT * FROM abc;} db2 +} {1 {database table is locked: abc}} +do_test shared-$av.11.3 { + catchsql {BEGIN} db2 +} {1 {cannot start a transaction within a transaction}} +do_test shared-$av.11.4 { + catchsql {SELECT * FROM abc2;} db2 +} {0 {}} +do_test shared-$av.11.5 { + catchsql {INSERT INTO abc2 VALUES(1, 2, 3);} db2 +} {1 {database is locked}} +do_test shared-$av.11.6 { + catchsql {SELECT * FROM abc2} +} {0 {}} +do_test shared-$av.11.6 { + execsql { + ROLLBACK; + PRAGMA read_uncommitted = 1; + } db2 +} {} +do_test shared-$av.11.7 { + execsql { + INSERT INTO abc2 VALUES(4, 5, 6); + INSERT INTO abc2 VALUES(7, 8, 9); + } +} {} +do_test shared-$av.11.8 { + set res [list] + breakpoint + db2 eval { + SELECT abc.a as I, abc2.a as II FROM abc, abc2; + } { + execsql { + DELETE FROM abc WHERE 1; + } + lappend res $I $II + } + set res +} {1 4 {} 7} +if {[llength [info command sqlite3_shared_cache_report]]==1} { + do_test shared-$av.11.9 { + sqlite3_shared_cache_report + } [list [file normalize test.db] 2] +} + +do_test shared-$av.11.11 { + db close + db2 close +} {} + +# This tests that if it is impossible to free any pages, SQLite will +# exceed the limit set by PRAGMA cache_size. +file delete -force test.db test.db-journal +sqlite3 db test.db +ifcapable pager_pragmas { + do_test shared-$av.12.1 { + execsql { + PRAGMA cache_size = 10; + PRAGMA cache_size; + } + } {10} +} +do_test shared-$av.12.2 { + set ::db_handles [list] + for {set i 1} {$i < 15} {incr i} { + lappend ::db_handles db$i + sqlite3 db$i test.db + execsql "CREATE TABLE db${i}(a, b, c)" db$i + execsql "INSERT INTO db${i} VALUES(1, 2, 3)" + } +} {} +proc nested_select {handles} { + [lindex $handles 0] eval "SELECT * FROM [lindex $handles 0]" { + lappend ::res $a $b $c + if {[llength $handles]>1} { + nested_select [lrange $handles 1 end] + } + } +} +do_test shared-$av.12.3 { + set ::res [list] + nested_select $::db_handles + set ::res +} [string range [string repeat "1 2 3 " [llength $::db_handles]] 0 end-1] + +do_test shared-$av.12.X { + db close + foreach h $::db_handles { + $h close + } +} {} + +} + +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/shared2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/shared2.test new file mode 100644 index 0000000..5f18f8b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/shared2.test @@ -0,0 +1,131 @@ +# 2005 January 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: shared2.test,v 1.5 2007/08/23 02:47:54 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +db close + +ifcapable !shared_cache { + finish_test + return +} +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +# Test that if we delete all rows from a table any read-uncommitted +# cursors are correctly invalidated. Test on both table and index btrees. +do_test shared2-1.1 { + sqlite3 db1 test.db + sqlite3 db2 test.db + + # Set up some data. Table "numbers" has 64 rows after this block + # is executed. + execsql { + BEGIN; + CREATE TABLE numbers(a PRIMARY KEY, b); + INSERT INTO numbers(oid) VALUES(NULL); + INSERT INTO numbers(oid) SELECT NULL FROM numbers; + INSERT INTO numbers(oid) SELECT NULL FROM numbers; + INSERT INTO numbers(oid) SELECT NULL FROM numbers; + INSERT INTO numbers(oid) SELECT NULL FROM numbers; + INSERT INTO numbers(oid) SELECT NULL FROM numbers; + INSERT INTO numbers(oid) SELECT NULL FROM numbers; + UPDATE numbers set a = oid, b = 'abcdefghijklmnopqrstuvwxyz0123456789'; + COMMIT; + } db1 +} {} +do_test shared2-1.2 { + # Put connection 2 in read-uncommitted mode and start a SELECT on table + # 'numbers'. Half way through the SELECT, use connection 1 to delete the + # contents of this table. + execsql { + pragma read_uncommitted = 1; + } db2 + set count [execsql {SELECT count(*) FROM numbers} db2] + db2 eval {SELECT a FROM numbers ORDER BY oid} { + if {$a==32} { + execsql { + BEGIN; + DELETE FROM numbers; + } db1 + } + } + list $a $count +} {32 64} +do_test shared2-1.3 { + # Same test as 1.2, except scan using the index this time. + execsql { + ROLLBACK; + } db1 + set count [execsql {SELECT count(*) FROM numbers} db2] + db2 eval {SELECT a, b FROM numbers ORDER BY a} { + if {$a==32} { + execsql { + DELETE FROM numbers; + } db1 + } + } + list $a $count +} {32 64} + +#--------------------------------------------------------------------------- +# These tests, shared2.2.*, test the outcome when data is added to or +# removed from a table due to a rollback while a read-uncommitted +# cursor is scanning it. +# +do_test shared2-2.1 { + execsql { + INSERT INTO numbers VALUES(1, 'Medium length text field'); + INSERT INTO numbers VALUES(2, 'Medium length text field'); + INSERT INTO numbers VALUES(3, 'Medium length text field'); + INSERT INTO numbers VALUES(4, 'Medium length text field'); + BEGIN; + DELETE FROM numbers WHERE (a%2)=0; + } db1 + set res [list] + db2 eval { + SELECT a FROM numbers ORDER BY a; + } { + lappend res $a + if {$a==3} { + execsql {ROLLBACK} db1 + } + } + set res +} {1 3 4} +do_test shared2-2.2 { + execsql { + BEGIN; + INSERT INTO numbers VALUES(5, 'Medium length text field'); + INSERT INTO numbers VALUES(6, 'Medium length text field'); + } db1 + set res [list] + db2 eval { + SELECT a FROM numbers ORDER BY a; + } { + lappend res $a + if {$a==5} { + execsql {ROLLBACK} db1 + } + } + set res +} {1 2 3 4 5} + +db1 close +db2 close + +do_test shared2-3.2 { + sqlite3_enable_shared_cache 1 +} {1} + +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/shared3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/shared3.test new file mode 100644 index 0000000..4cef49c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/shared3.test @@ -0,0 +1,47 @@ +# 2005 January 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: shared3.test,v 1.1 2006/05/24 12:43:28 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +db close + +ifcapable !shared_cache { + finish_test + return +} +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +# Ticket #1824 +# +do_test shared3-1.1 { + file delete -force test.db test.db-journal + sqlite3 db1 test.db + db1 eval { + PRAGMA encoding=UTF16; + CREATE TABLE t1(x,y); + INSERT INTO t1 VALUES('abc','This is a test string'); + } + db1 close + sqlite3 db1 test.db + db1 eval {SELECT * FROM t1} +} {abc {This is a test string}} +do_test shared3-1.2 { + sqlite3 db2 test.db + db2 eval {SELECT y FROM t1 WHERE x='abc'} +} {{This is a test string}} + +db1 close +db2 close + +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/shared_err.test b/libraries/sqlite/unix/sqlite-3.5.1/test/shared_err.test new file mode 100644 index 0000000..7c1794a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/shared_err.test @@ -0,0 +1,463 @@ +# 2005 December 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of the tests in this file are IO errors that occur in a shared +# cache context. What happens to connection B if one connection A encounters +# an IO-error whilst reading or writing the file-system? +# +# $Id: shared_err.test,v 1.17 2007/09/03 16:12:10 drh Exp $ + +proc skip {args} {} + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl +db close + +ifcapable !shared_cache||!subquery { + finish_test + return +} + +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +do_ioerr_test shared_ioerr-1 -tclprep { + sqlite3 db2 test.db + execsql { + PRAGMA read_uncommitted = 1; + CREATE TABLE t1(a,b,c); + BEGIN; + SELECT * FROM sqlite_master; + } db2 +} -sqlbody { + SELECT * FROM sqlite_master; + INSERT INTO t1 VALUES(1,2,3); + BEGIN TRANSACTION; + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(4,5,6); + ROLLBACK; + SELECT * FROM t1; + BEGIN TRANSACTION; + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(4,5,6); + COMMIT; + SELECT * FROM t1; + DELETE FROM t1 WHERE a<100; +} -cleanup { + do_test shared_ioerr-1.$n.cleanup.1 { + set res [catchsql { + SELECT * FROM t1; + } db2] + set possible_results [list \ + "1 {disk I/O error}" \ + "0 {1 2 3}" \ + "0 {1 2 3 1 2 3 4 5 6}" \ + "0 {1 2 3 1 2 3 4 5 6 1 2 3 4 5 6}" \ + "0 {}" \ + ] + set rc [expr [lsearch -exact $possible_results $res] >= 0] + if {$rc != 1} { + puts "" + puts "Result: $res" + } + set rc + } {1} + db2 close +} + +do_ioerr_test shared_ioerr-2 -tclprep { + sqlite3 db2 test.db + execsql { + PRAGMA read_uncommitted = 1; + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1(oid) VALUES(NULL); + INSERT INTO t1(oid) SELECT NULL FROM t1; + INSERT INTO t1(oid) SELECT NULL FROM t1; + INSERT INTO t1(oid) SELECT NULL FROM t1; + INSERT INTO t1(oid) SELECT NULL FROM t1; + INSERT INTO t1(oid) SELECT NULL FROM t1; + INSERT INTO t1(oid) SELECT NULL FROM t1; + INSERT INTO t1(oid) SELECT NULL FROM t1; + INSERT INTO t1(oid) SELECT NULL FROM t1; + INSERT INTO t1(oid) SELECT NULL FROM t1; + INSERT INTO t1(oid) SELECT NULL FROM t1; + UPDATE t1 set a = oid, b = 'abcdefghijklmnopqrstuvwxyz0123456789'; + CREATE INDEX i1 ON t1(a); + COMMIT; + BEGIN; + SELECT * FROM sqlite_master; + } db2 +} -tclbody { + set ::residx 0 + execsql {DELETE FROM t1 WHERE 0 = (a % 2);} + incr ::residx + + # When this transaction begins the table contains 512 entries. The + # two statements together add 512+146 more if it succeeds. + # (1024/7==146) + execsql {BEGIN;} + execsql {INSERT INTO t1 SELECT a+1, b FROM t1;} + execsql {INSERT INTO t1 SELECT 'string' || a, b FROM t1 WHERE 0 = (a%7);} + execsql {COMMIT;} + + incr ::residx +} -cleanup { + do_test shared_ioerr-2.$n.cleanup.1 { + set res [catchsql { + SELECT max(a), min(a), count(*) FROM (SELECT a FROM t1 order by a); + } db2] + set possible_results [list \ + {0 {1024 1 1024}} \ + {0 {1023 1 512}} \ + {0 {string994 1 1170}} \ + ] + set idx [lsearch -exact $possible_results $res] + set success [expr {$idx==$::residx || $res=="1 {disk I/O error}"}] + if {!$success} { + puts "" + puts "Result: \"$res\" ($::residx)" + } + set success + } {1} + db2 close +} + +# This test is designed to provoke an IO error when a cursor position is +# "saved" (because another cursor is going to modify the underlying table). +# +do_ioerr_test shared_ioerr-3 -tclprep { + sqlite3 db2 test.db + execsql { + PRAGMA read_uncommitted = 1; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE t1(a, b, UNIQUE(a, b)); + } db2 + for {set i 0} {$i < 200} {incr i} { + set a [string range [string repeat "[format %03d $i]." 5] 0 end-1] + + set b [string repeat $i 2000] + execsql {INSERT INTO t1 VALUES($a, $b)} db2 + } + execsql {COMMIT} db2 + set ::DB2 [sqlite3_connection_pointer db2] + set ::STMT [sqlite3_prepare $::DB2 "SELECT a FROM t1 ORDER BY a" -1 DUMMY] + sqlite3_step $::STMT ;# Cursor points at 000.000.000.000 + sqlite3_step $::STMT ;# Cursor points at 001.001.001.001 + +} -tclbody { + execsql { + BEGIN; + INSERT INTO t1 VALUES('201.201.201.201.201', NULL); + UPDATE t1 SET a = '202.202.202.202.202' WHERE a LIKE '201%'; + COMMIT; + } +} -cleanup { + set ::steprc [sqlite3_step $::STMT] + set ::column [sqlite3_column_text $::STMT 0] + set ::finalrc [sqlite3_finalize $::STMT] + + # There are three possible outcomes here (assuming persistent IO errors): + # + # 1. If the [sqlite3_step] did not require any IO (required pages in + # the cache), then the next row ("002...") may be retrieved + # successfully. + # + # 2. If the [sqlite3_step] does require IO, then [sqlite3_step] returns + # SQLITE_ERROR and [sqlite3_finalize] returns IOERR. + # + # 3. If, after the initial IO error, SQLite tried to rollback the + # active transaction and a second IO error was encountered, then + # statement $::STMT will have been aborted. This means [sqlite3_stmt] + # returns SQLITE_ABORT, and the statement cursor does not move. i.e. + # [sqlite3_column] still returns the current row ("001...") and + # [sqlite3_finalize] returns SQLITE_OK. + # + + do_test shared_ioerr-3.$n.cleanup.1 { + expr { + $::steprc eq "SQLITE_ROW" || + $::steprc eq "SQLITE_ERROR" || + $::steprc eq "SQLITE_ABORT" + } + } {1} + do_test shared_ioerr-3.$n.cleanup.2 { + expr { + ($::steprc eq "SQLITE_ROW" && $::column eq "002.002.002.002.002") || + ($::steprc eq "SQLITE_ERROR" && $::column eq "") || + ($::steprc eq "SQLITE_ABORT" && $::column eq "001.001.001.001.001") + } + } {1} + do_test shared_ioerr-3.$n.cleanup.3 { + expr { + ($::steprc eq "SQLITE_ROW" && $::finalrc eq "SQLITE_OK") || + ($::steprc eq "SQLITE_ERROR" && $::finalrc eq "SQLITE_IOERR") || + ($::steprc eq "SQLITE_ERROR" && $::finalrc eq "SQLITE_ABORT") + } + } {1} + +# db2 eval {select * from sqlite_master} + db2 close +} + +# This is a repeat of the previous test except that this time we +# are doing a reverse-order scan of the table when the cursor is +# "saved". +# +do_ioerr_test shared_ioerr-3rev -tclprep { + sqlite3 db2 test.db + execsql { + PRAGMA read_uncommitted = 1; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE t1(a, b, UNIQUE(a, b)); + } db2 + for {set i 0} {$i < 200} {incr i} { + set a [string range [string repeat "[format %03d $i]." 5] 0 end-1] + + set b [string repeat $i 2000] + execsql {INSERT INTO t1 VALUES($a, $b)} db2 + } + execsql {COMMIT} db2 + set ::DB2 [sqlite3_connection_pointer db2] + set ::STMT [sqlite3_prepare $::DB2 \ + "SELECT a FROM t1 ORDER BY a DESC" -1 DUMMY] + sqlite3_step $::STMT ;# Cursor points at 199.199.199.199.199 + sqlite3_step $::STMT ;# Cursor points at 198.198.198.198.198 + +} -tclbody { + execsql { + BEGIN; + INSERT INTO t1 VALUES('201.201.201.201.201', NULL); + UPDATE t1 SET a = '202.202.202.202.202' WHERE a LIKE '201%'; + COMMIT; + } +} -cleanup { + set ::steprc [sqlite3_step $::STMT] + set ::column [sqlite3_column_text $::STMT 0] + set ::finalrc [sqlite3_finalize $::STMT] + + # There are three possible outcomes here (assuming persistent IO errors): + # + # 1. If the [sqlite3_step] did not require any IO (required pages in + # the cache), then the next row ("002...") may be retrieved + # successfully. + # + # 2. If the [sqlite3_step] does require IO, then [sqlite3_step] returns + # SQLITE_ERROR and [sqlite3_finalize] returns IOERR. + # + # 3. If, after the initial IO error, SQLite tried to rollback the + # active transaction and a second IO error was encountered, then + # statement $::STMT will have been aborted. This means [sqlite3_stmt] + # returns SQLITE_ABORT, and the statement cursor does not move. i.e. + # [sqlite3_column] still returns the current row ("001...") and + # [sqlite3_finalize] returns SQLITE_OK. + # + + do_test shared_ioerr-3rev.$n.cleanup.1 { + expr { + $::steprc eq "SQLITE_ROW" || + $::steprc eq "SQLITE_ERROR" || + $::steprc eq "SQLITE_ABORT" + } + } {1} + do_test shared_ioerr-3rev.$n.cleanup.2 { + expr { + ($::steprc eq "SQLITE_ROW" && $::column eq "197.197.197.197.197") || + ($::steprc eq "SQLITE_ERROR" && $::column eq "") || + ($::steprc eq "SQLITE_ABORT" && $::column eq "198.198.198.198.198") + } + } {1} + do_test shared_ioerr-3rev.$n.cleanup.3 { + expr { + ($::steprc eq "SQLITE_ROW" && $::finalrc eq "SQLITE_OK") || + ($::steprc eq "SQLITE_ERROR" && $::finalrc eq "SQLITE_IOERR") || + ($::steprc eq "SQLITE_ERROR" && $::finalrc eq "SQLITE_ABORT") + } + } {1} + +# db2 eval {select * from sqlite_master} + db2 close +} + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping tests shared_err-4 through -9:\ + not compiled with -DSQLITE_MEMDEBUG..." + db close + sqlite3_enable_shared_cache $::enable_shared_cache + finish_test + return +} + +# Provoke a malloc() failure when a cursor position is being saved. This +# only happens with index cursors (because they malloc() space to save the +# current key value). It does not happen with tables, because an integer +# key does not require a malloc() to store. +# +# The library should return an SQLITE_NOMEM to the caller. The query that +# owns the cursor (the one for which the position is not saved) should +# continue unaffected. +# +do_malloc_test shared_err-4 -tclprep { + sqlite3 db2 test.db + execsql { + PRAGMA read_uncommitted = 1; + BEGIN; + CREATE TABLE t1(a, b, UNIQUE(a, b)); + } db2 + for {set i 0} {$i < 5} {incr i} { + set a [string repeat $i 10] + set b [string repeat $i 2000] + execsql {INSERT INTO t1 VALUES($a, $b)} db2 + } + execsql {COMMIT} db2 + set ::DB2 [sqlite3_connection_pointer db2] + set ::STMT [sqlite3_prepare $::DB2 "SELECT a FROM t1 ORDER BY a" -1 DUMMY] + sqlite3_step $::STMT ;# Cursor points at 0000000000 + sqlite3_step $::STMT ;# Cursor points at 1111111111 +} -tclbody { + execsql { + INSERT INTO t1 VALUES(6, NULL); + } +} -cleanup { + do_test shared_malloc-4.$::n.cleanup.1 { + set ::rc [sqlite3_step $::STMT] + expr {$::rc=="SQLITE_ROW" || $::rc=="SQLITE_ERROR"} + } {1} + if {$::rc=="SQLITE_ROW"} { + do_test shared_malloc-4.$::n.cleanup.2 { + sqlite3_column_text $::STMT 0 + } {2222222222} + } + do_test shared_malloc-4.$::n.cleanup.3 { + set rc [sqlite3_finalize $::STMT] + expr {$rc=="SQLITE_OK" || $rc=="SQLITE_ABORT" || $rc=="SQLITE_NOMEM"} + } {1} +# db2 eval {select * from sqlite_master} + db2 close +} + +do_malloc_test shared_err-5 -tclbody { + db close + sqlite3 dbX test.db + sqlite3 dbY test.db + dbX close + dbY close +} -cleanup { + catch {dbX close} + catch {dbY close} +} + +do_malloc_test shared_err-6 -tclbody { + catch {db close} + sqlite3_thread_cleanup + sqlite3_enable_shared_cache 0 +} -cleanup { + sqlite3_enable_shared_cache 1 +} + +# As of 3.5.0, sqlite3_enable_shared_cache can be called at +# any time and from any thread +#do_test shared_err-misuse-7.1 { +# sqlite3 db test.db +# catch { +# sqlite3_enable_shared_cache 0 +# } msg +# set msg +#} {library routine called out of sequence} + +# Again provoke a malloc() failure when a cursor position is being saved, +# this time during a ROLLBACK operation by some other handle. +# +# The library should return an SQLITE_NOMEM to the caller. The query that +# owns the cursor (the one for which the position is not saved) should +# be aborted. +# +set ::aborted 0 +do_malloc_test shared_err-8 -tclprep { + sqlite3 db2 test.db + execsql { + PRAGMA read_uncommitted = 1; + BEGIN; + CREATE TABLE t1(a, b, UNIQUE(a, b)); + } db2 + for {set i 0} {$i < 2} {incr i} { + set a [string repeat $i 10] + set b [string repeat $i 2000] + execsql {INSERT INTO t1 VALUES($a, $b)} db2 + } + execsql {COMMIT} db2 + set ::DB2 [sqlite3_connection_pointer db2] + set ::STMT [sqlite3_prepare $::DB2 "SELECT a FROM t1 ORDER BY a" -1 DUMMY] + sqlite3_step $::STMT ;# Cursor points at 0000000000 + sqlite3_step $::STMT ;# Cursor points at 1111111111 +} -tclbody { + execsql { + BEGIN; + INSERT INTO t1 VALUES(6, NULL); + ROLLBACK; + } +} -cleanup { + do_test shared_malloc-8.$::n.cleanup.1 { + lrange [execsql { + SELECT a FROM t1; + } db2] 0 1 + } {0000000000 1111111111} + do_test shared_malloc-8.$::n.cleanup.2 { + set rc1 [sqlite3_step $::STMT] + set rc2 [sqlite3_finalize $::STMT] + if {$rc2=="SQLITE_ABORT"} { + incr ::aborted + } + expr { + ($rc1=="SQLITE_DONE" && $rc2=="SQLITE_OK") || + ($rc1=="SQLITE_ERROR" && $rc2=="SQLITE_ABORT") || + ($rc1=="SQLITE_ERROR" && $rc2=="SQLITE_NOMEM") + } + } {1} + db2 close +} +do_test shared_malloc-8.X { + # Test that one or more queries were aborted due to the malloc() failure. + expr $::aborted>=1 +} {1} + +# This test is designed to catch a specific bug that was present during +# development of 3.5.0. If a malloc() failed while setting the page-size, +# a buffer (Pager.pTmpSpace) was being freed. This could cause a seg-fault +# later if another connection tried to use the pager. +# +# This test will crash 3.4.2. +# +do_malloc_test shared_err-9 -tclprep { + sqlite3 db2 test.db +} -sqlbody { + PRAGMA page_size = 4096; + PRAGMA page_size = 1024; +} -cleanup { + db2 eval { + CREATE TABLE abc(a, b, c); + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + ROLLBACK; + } + db2 close +} + + +catch {db close} +catch {db2 close} +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/shortread1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/shortread1.test new file mode 100644 index 0000000..647f2ff --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/shortread1.test @@ -0,0 +1,52 @@ +# 2007 Sep 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file attempts to duplicate an error scenario seen on a +# customer system using version 3.2.2. The problem appears to +# have been fixed (perhaps by accident) with check-in [3503]. +# These tests will prevent an accidental recurrance. +# +# $Id: shortread1.test,v 1.1 2007/09/14 01:48:12 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test shortread1-1.1 { + execsql { + CREATE TABLE t1(a TEXT); + BEGIN; + INSERT INTO t1 VALUES(hex(randomblob(5000))); + INSERT INTO t1 VALUES(hex(randomblob(100))); + PRAGMA freelist_count; + } +} {0} +do_test shortread1-1.2 { + execsql { + DELETE FROM t1 WHERE rowid=1; + PRAGMA freelist_count; + } +} {11} +do_test shortread1-1.3 { + sqlite3_release_memory [expr {1024*9}] + execsql { + INSERT INTO t1 VALUES(hex(randomblob(5000))); + PRAGMA freelist_count; + } +} {0} +do_test shortread1-1.4 { + execsql { + COMMIT; + SELECT count(*) FROM t1; + } +} {2} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/soak.test b/libraries/sqlite/unix/sqlite-3.5.1/test/soak.test new file mode 100644 index 0000000..089264f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/soak.test @@ -0,0 +1,90 @@ +# 2007 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file is the driver for the "soak" tests. It is a peer of the +# quick.test and all.test scripts. +# +# $Id: soak.test,v 1.2 2007/05/30 10:36:47 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +rename finish_test really_finish_test +proc finish_test {} {} + +# By default, guarantee that the tests will run for at least 1 hour. +# +set TIMEOUT 3600 + +# Process command-line arguments. +# +if {[llength $argv]>0} { + foreach {name value} $argv { + switch -- $name { + -timeout { + set TIMEOUT $value + } + default { + puts stderr "Unknown option: $name" + exit + } + } + } +} +set argv [list] + +# Test plan: +# +# The general principle is to run those SQLite tests that use +# pseudo-random data in some way over and over again for a very +# long time. The number of tests run depends on the value of +# global variable $TIMEOUT - tests are run for at least $TIMEOUT +# seconds. +# +# fuzz.test (pseudo-random SQL statements) +# trans.test (pseudo-random changes to a database followed by rollbacks) +# +# fuzzy malloc? +# +# Many database changes maintaining some kind of invariant. +# Storing checksums etc. +# + +# List of test files that are run by this file. +# +set SOAKTESTS { + fuzz.test + fuzz_malloc.test + trans.test +} + +set ISQUICK 1 + +set soak_starttime [clock seconds] +set soak_finishtime [expr {$soak_starttime + $TIMEOUT}] + +# Loop until the timeout is reached or an error occurs. +# +for {set iRun 0} {[clock seconds] < $soak_finishtime && $nErr==0} {incr iRun} { + + set iIdx [expr {$iRun % [llength $SOAKTESTS]}] + source [file join $testdir [lindex $SOAKTESTS $iIdx]] + catch {db close} + + if {$sqlite_open_file_count>0} { + puts "$tail did not close all files: $sqlite_open_file_count" + incr nErr + lappend ::failList $tail + set sqlite_open_file_count 0 + } + +} + +really_finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/softheap1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/softheap1.test new file mode 100644 index 0000000..62b9251 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/softheap1.test @@ -0,0 +1,47 @@ +# 2007 Aug 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test script reproduces the problem reported by ticket #2565, +# A database corruption bug that occurs in auto_vacuum mode when +# the soft_heap_limit is set low enough to be triggered. +# +# $Id: softheap1.test,v 1.3 2007/09/12 17:01:45 danielk1977 Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !integrityck { + finish_test + return +} + +sqlite3_soft_heap_limit 5000 +do_test softheap1-1.1 { + execsql { + PRAGMA auto_vacuum=1; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(hex(randomblob(1000))); + BEGIN; + } + execsql { + CREATE TABLE t2 AS SELECT * FROM t1; + } + execsql { + ROLLBACK; + } + execsql { + PRAGMA integrity_check; + } +} {ok} +sqlite3_soft_heap_limit $soft_limit + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/sort.test b/libraries/sqlite/unix/sqlite-3.5.1/test/sort.test new file mode 100644 index 0000000..08d496b --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/sort.test @@ -0,0 +1,467 @@ +# 2001 September 15. +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the CREATE TABLE statement. +# +# $Id: sort.test,v 1.25 2005/11/14 22:29:06 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a bunch of data to sort against +# +do_test sort-1.0 { + execsql { + CREATE TABLE t1( + n int, + v varchar(10), + log int, + roman varchar(10), + flt real + ); + INSERT INTO t1 VALUES(1,'one',0,'I',3.141592653); + INSERT INTO t1 VALUES(2,'two',1,'II',2.15); + INSERT INTO t1 VALUES(3,'three',1,'III',4221.0); + INSERT INTO t1 VALUES(4,'four',2,'IV',-0.0013442); + INSERT INTO t1 VALUES(5,'five',2,'V',-11); + INSERT INTO t1 VALUES(6,'six',2,'VI',0.123); + INSERT INTO t1 VALUES(7,'seven',2,'VII',123.0); + INSERT INTO t1 VALUES(8,'eight',3,'VIII',-1.6); + } + execsql {SELECT count(*) FROM t1} +} {8} + +do_test sort-1.1 { + execsql {SELECT n FROM t1 ORDER BY n} +} {1 2 3 4 5 6 7 8} +do_test sort-1.1.1 { + execsql {SELECT n FROM t1 ORDER BY n ASC} +} {1 2 3 4 5 6 7 8} +do_test sort-1.1.1 { + execsql {SELECT ALL n FROM t1 ORDER BY n ASC} +} {1 2 3 4 5 6 7 8} +do_test sort-1.2 { + execsql {SELECT n FROM t1 ORDER BY n DESC} +} {8 7 6 5 4 3 2 1} +do_test sort-1.3a { + execsql {SELECT v FROM t1 ORDER BY v} +} {eight five four one seven six three two} +do_test sort-1.3b { + execsql {SELECT n FROM t1 ORDER BY v} +} {8 5 4 1 7 6 3 2} +do_test sort-1.4 { + execsql {SELECT n FROM t1 ORDER BY v DESC} +} {2 3 6 7 1 4 5 8} +do_test sort-1.5 { + execsql {SELECT flt FROM t1 ORDER BY flt} +} {-11.0 -1.6 -0.0013442 0.123 2.15 3.141592653 123.0 4221.0} +do_test sort-1.6 { + execsql {SELECT flt FROM t1 ORDER BY flt DESC} +} {4221.0 123.0 3.141592653 2.15 0.123 -0.0013442 -1.6 -11.0} +do_test sort-1.7 { + execsql {SELECT roman FROM t1 ORDER BY roman} +} {I II III IV V VI VII VIII} +do_test sort-1.8 { + execsql {SELECT n FROM t1 ORDER BY log, flt} +} {1 2 3 5 4 6 7 8} +do_test sort-1.8.1 { + execsql {SELECT n FROM t1 ORDER BY log asc, flt} +} {1 2 3 5 4 6 7 8} +do_test sort-1.8.2 { + execsql {SELECT n FROM t1 ORDER BY log, flt ASC} +} {1 2 3 5 4 6 7 8} +do_test sort-1.8.3 { + execsql {SELECT n FROM t1 ORDER BY log ASC, flt asc} +} {1 2 3 5 4 6 7 8} +do_test sort-1.9 { + execsql {SELECT n FROM t1 ORDER BY log, flt DESC} +} {1 3 2 7 6 4 5 8} +do_test sort-1.9.1 { + execsql {SELECT n FROM t1 ORDER BY log ASC, flt DESC} +} {1 3 2 7 6 4 5 8} +do_test sort-1.10 { + execsql {SELECT n FROM t1 ORDER BY log DESC, flt} +} {8 5 4 6 7 2 3 1} +do_test sort-1.11 { + execsql {SELECT n FROM t1 ORDER BY log DESC, flt DESC} +} {8 7 6 4 5 3 2 1} + +# These tests are designed to reach some hard-to-reach places +# inside the string comparison routines. +# +# (Later) The sorting behavior changed in 2.7.0. But we will +# keep these tests. You can never have too many test cases! +# +do_test sort-2.1.1 { + execsql { + UPDATE t1 SET v='x' || -flt; + UPDATE t1 SET v='x-2b' where v=='x-0.123'; + SELECT v FROM t1 ORDER BY v; + } +} {x-123.0 x-2.15 x-2b x-3.141592653 x-4221.0 x0.0013442 x1.6 x11.0} +do_test sort-2.1.2 { + execsql { + SELECT v FROM t1 ORDER BY substr(v,2,999); + } +} {x-123.0 x-2.15 x-2b x-3.141592653 x-4221.0 x0.0013442 x1.6 x11.0} +do_test sort-2.1.3 { + execsql { + SELECT v FROM t1 ORDER BY substr(v,2,999)+0.0; + } +} {x-4221.0 x-123.0 x-3.141592653 x-2.15 x-2b x0.0013442 x1.6 x11.0} +do_test sort-2.1.4 { + execsql { + SELECT v FROM t1 ORDER BY substr(v,2,999) DESC; + } +} {x11.0 x1.6 x0.0013442 x-4221.0 x-3.141592653 x-2b x-2.15 x-123.0} +do_test sort-2.1.5 { + execsql { + SELECT v FROM t1 ORDER BY substr(v,2,999)+0.0 DESC; + } +} {x11.0 x1.6 x0.0013442 x-2b x-2.15 x-3.141592653 x-123.0 x-4221.0} + +# This is a bug fix for 2.2.4. +# Strings are normally mapped to upper-case for a caseless comparison. +# But this can cause problems for characters in between 'Z' and 'a'. +# +do_test sort-3.1 { + execsql { + CREATE TABLE t2(a,b); + INSERT INTO t2 VALUES('AGLIENTU',1); + INSERT INTO t2 VALUES('AGLIE`',2); + INSERT INTO t2 VALUES('AGNA',3); + SELECT a, b FROM t2 ORDER BY a; + } +} {AGLIENTU 1 AGLIE` 2 AGNA 3} +do_test sort-3.2 { + execsql { + SELECT a, b FROM t2 ORDER BY a DESC; + } +} {AGNA 3 AGLIE` 2 AGLIENTU 1} +do_test sort-3.3 { + execsql { + DELETE FROM t2; + INSERT INTO t2 VALUES('aglientu',1); + INSERT INTO t2 VALUES('aglie`',2); + INSERT INTO t2 VALUES('agna',3); + SELECT a, b FROM t2 ORDER BY a; + } +} {aglie` 2 aglientu 1 agna 3} +do_test sort-3.4 { + execsql { + SELECT a, b FROM t2 ORDER BY a DESC; + } +} {agna 3 aglientu 1 aglie` 2} + +# Version 2.7.0 testing. +# +do_test sort-4.1 { + execsql { + INSERT INTO t1 VALUES(9,'x2.7',3,'IX',4.0e5); + INSERT INTO t1 VALUES(10,'x5.0e10',3,'X',-4.0e5); + INSERT INTO t1 VALUES(11,'x-4.0e9',3,'XI',4.1e4); + INSERT INTO t1 VALUES(12,'x01234567890123456789',3,'XII',-4.2e3); + SELECT n FROM t1 ORDER BY n; + } +} {1 2 3 4 5 6 7 8 9 10 11 12} +do_test sort-4.2 { + execsql { + SELECT n||'' FROM t1 ORDER BY 1; + } +} {1 10 11 12 2 3 4 5 6 7 8 9} +do_test sort-4.3 { + execsql { + SELECT n+0 FROM t1 ORDER BY 1; + } +} {1 2 3 4 5 6 7 8 9 10 11 12} +do_test sort-4.4 { + execsql { + SELECT n||'' FROM t1 ORDER BY 1 DESC; + } +} {9 8 7 6 5 4 3 2 12 11 10 1} +do_test sort-4.5 { + execsql { + SELECT n+0 FROM t1 ORDER BY 1 DESC; + } +} {12 11 10 9 8 7 6 5 4 3 2 1} +do_test sort-4.6 { + execsql { + SELECT v FROM t1 ORDER BY 1; + } +} {x-123.0 x-2.15 x-2b x-3.141592653 x-4.0e9 x-4221.0 x0.0013442 x01234567890123456789 x1.6 x11.0 x2.7 x5.0e10} +do_test sort-4.7 { + execsql { + SELECT v FROM t1 ORDER BY 1 DESC; + } +} {x5.0e10 x2.7 x11.0 x1.6 x01234567890123456789 x0.0013442 x-4221.0 x-4.0e9 x-3.141592653 x-2b x-2.15 x-123.0} +do_test sort-4.8 { + execsql { + SELECT substr(v,2,99) FROM t1 ORDER BY 1; + } +} {-123.0 -2.15 -2b -3.141592653 -4.0e9 -4221.0 0.0013442 01234567890123456789 1.6 11.0 2.7 5.0e10} +#do_test sort-4.9 { +# execsql { +# SELECT substr(v,2,99)+0.0 FROM t1 ORDER BY 1; +# } +#} {-4000000000 -4221 -123 -3.141592653 -2.15 -2 0.0013442 1.6 2.7 11 50000000000 1.23456789012346e+18} + +do_test sort-5.1 { + execsql { + create table t3(a,b); + insert into t3 values(5,NULL); + insert into t3 values(6,NULL); + insert into t3 values(3,NULL); + insert into t3 values(4,'cd'); + insert into t3 values(1,'ab'); + insert into t3 values(2,NULL); + select a from t3 order by b, a; + } +} {2 3 5 6 1 4} +do_test sort-5.2 { + execsql { + select a from t3 order by b, a desc; + } +} {6 5 3 2 1 4} +do_test sort-5.3 { + execsql { + select a from t3 order by b desc, a; + } +} {4 1 2 3 5 6} +do_test sort-5.4 { + execsql { + select a from t3 order by b desc, a desc; + } +} {4 1 6 5 3 2} + +do_test sort-6.1 { + execsql { + create index i3 on t3(b,a); + select a from t3 order by b, a; + } +} {2 3 5 6 1 4} +do_test sort-6.2 { + execsql { + select a from t3 order by b, a desc; + } +} {6 5 3 2 1 4} +do_test sort-6.3 { + execsql { + select a from t3 order by b desc, a; + } +} {4 1 2 3 5 6} +do_test sort-6.4 { + execsql { + select a from t3 order by b desc, a desc; + } +} {4 1 6 5 3 2} + +do_test sort-7.1 { + execsql { + CREATE TABLE t4( + a INTEGER, + b VARCHAR(30) + ); + INSERT INTO t4 VALUES(1,1); + INSERT INTO t4 VALUES(2,2); + INSERT INTO t4 VALUES(11,11); + INSERT INTO t4 VALUES(12,12); + SELECT a FROM t4 ORDER BY 1; + } +} {1 2 11 12} +do_test sort-7.2 { + execsql { + SELECT b FROM t4 ORDER BY 1 + } +} {1 11 12 2} + +# Omit tests sort-7.3 to sort-7.8 if view support was disabled at +# compilatation time. +ifcapable view { +do_test sort-7.3 { + execsql { + CREATE VIEW v4 AS SELECT * FROM t4; + SELECT a FROM v4 ORDER BY 1; + } +} {1 2 11 12} +do_test sort-7.4 { + execsql { + SELECT b FROM v4 ORDER BY 1; + } +} {1 11 12 2} + +ifcapable compound { +do_test sort-7.5 { + execsql { + SELECT a FROM t4 UNION SELECT a FROM v4 ORDER BY 1; + } +} {1 2 11 12} +do_test sort-7.6 { + execsql { + SELECT b FROM t4 UNION SELECT a FROM v4 ORDER BY 1; + } +} {1 2 11 12 1 11 12 2} ;# text from t4.b and numeric from v4.a +do_test sort-7.7 { + execsql { + SELECT a FROM t4 UNION SELECT b FROM v4 ORDER BY 1; + } +} {1 2 11 12 1 11 12 2} ;# numeric from t4.a and text from v4.b +do_test sort-7.8 { + execsql { + SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1; + } +} {1 11 12 2} +} ;# ifcapable compound +} ;# ifcapable view + +#### Version 3 works differently here: +#do_test sort-7.9 { +# execsql { +# SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE numeric; +# } +#} {1 2 11 12} +#do_test sort-7.10 { +# execsql { +# SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE integer; +# } +#} {1 2 11 12} +#do_test sort-7.11 { +# execsql { +# SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE text; +# } +#} {1 11 12 2} +#do_test sort-7.12 { +# execsql { +# SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE blob; +# } +#} {1 11 12 2} +#do_test sort-7.13 { +# execsql { +# SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE clob; +# } +#} {1 11 12 2} +#do_test sort-7.14 { +# execsql { +# SELECT b FROM t4 UNION SELECT b FROM v4 ORDER BY 1 COLLATE varchar; +# } +#} {1 11 12 2} + +# Ticket #297 +# +do_test sort-8.1 { + execsql { + CREATE TABLE t5(a real, b text); + INSERT INTO t5 VALUES(100,'A1'); + INSERT INTO t5 VALUES(100.0,'A2'); + SELECT * FROM t5 ORDER BY a, b; + } +} {100.0 A1 100.0 A2} + + +ifcapable {bloblit} { +# BLOBs should sort after TEXT +# +do_test sort-9.1 { + execsql { + CREATE TABLE t6(x, y); + INSERT INTO t6 VALUES(1,1); + INSERT INTO t6 VALUES(2,'1'); + INSERT INTO t6 VALUES(3,x'31'); + INSERT INTO t6 VALUES(4,NULL); + SELECT x FROM t6 ORDER BY y; + } +} {4 1 2 3} +do_test sort-9.2 { + execsql { + SELECT x FROM t6 ORDER BY y DESC; + } +} {3 2 1 4} +do_test sort-9.3 { + execsql { + SELECT x FROM t6 WHERE y<1 + } +} {} +do_test sort-9.4 { + execsql { + SELECT x FROM t6 WHERE y<'1' + } +} {1} +do_test sort-9.5 { + execsql { + SELECT x FROM t6 WHERE y1 + } +} {2 3} +do_test sort-9.7 { + execsql { + SELECT x FROM t6 WHERE y>'1' + } +} {3} +} ;# endif bloblit + +# Ticket #1092 - ORDER BY on rowid fields. +do_test sort-10.1 { + execsql { + CREATE TABLE t7(c INTEGER PRIMARY KEY); + INSERT INTO t7 VALUES(1); + INSERT INTO t7 VALUES(2); + INSERT INTO t7 VALUES(3); + INSERT INTO t7 VALUES(4); + } +} {} +do_test sort-10.2 { + execsql { + SELECT c FROM t7 WHERE c<=3 ORDER BY c DESC; + } +} {3 2 1} +do_test sort-10.3 { + execsql { + SELECT c FROM t7 WHERE c<3 ORDER BY c DESC; + } +} {2 1} + +# ticket #1358. Just because one table in a join gives a unique +# result does not mean they all do. We cannot disable sorting unless +# all tables in the join give unique results. +# +do_test sort-11.1 { + execsql { + create table t8(a unique, b, c); + insert into t8 values(1,2,3); + insert into t8 values(2,3,4); + create table t9(x,y); + insert into t9 values(2,4); + insert into t9 values(2,3); + select y from t8, t9 where a=1 order by a, y; + } +} {3 4} + +# Trouble reported on the mailing list. Check for overly aggressive +# (which is to say, incorrect) optimization of order-by with a rowid +# in a join. +# +do_test sort-12.1 { + execsql { + create table a (id integer primary key); + create table b (id integer primary key, aId integer, text); + insert into a values (1); + insert into b values (2, 1, 'xxx'); + insert into b values (1, 1, 'zzz'); + insert into b values (3, 1, 'yyy'); + select a.id, b.id, b.text from a join b on (a.id = b.aId) + order by a.id, b.text; + } +} {1 2 xxx 1 3 yyy 1 1 zzz} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/speed1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/speed1.test new file mode 100644 index 0000000..8b6ab9d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/speed1.test @@ -0,0 +1,289 @@ +# 2006 November 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is measuring executing speed. +# +# $Id: speed1.test,v 1.5 2007/03/31 22:34:16 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +speed_trial_init speed1 + +# Set a uniform random seed +expr srand(0) + +set sqlout [open speed1.txt w] +proc tracesql {sql} { + puts $::sqlout $sql\; +} +#db trace tracesql + +# The number_name procedure below converts its argment (an integer) +# into a string which is the English-language name for that number. +# +# Example: +# +# puts [number_name 123] -> "one hundred twenty three" +# +set ones {zero one two three four five six seven eight nine + ten eleven twelve thirteen fourteen fifteen sixteen seventeen + eighteen nineteen} +set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety} +proc number_name {n} { + if {$n>=1000} { + set txt "[number_name [expr {$n/1000}]] thousand" + set n [expr {$n%1000}] + } else { + set txt {} + } + if {$n>=100} { + append txt " [lindex $::ones [expr {$n/100}]] hundred" + set n [expr {$n%100}] + } + if {$n>=20} { + append txt " [lindex $::tens [expr {$n/10}]]" + set n [expr {$n%10}] + } + if {$n>0} { + append txt " [lindex $::ones $n]" + } + set txt [string trim $txt] + if {$txt==""} {set txt zero} + return $txt +} + +# Create a database schema. +# +do_test speed1-1.0 { + execsql { + PRAGMA page_size=1024; + PRAGMA cache_size=8192; + PRAGMA locking_mode=EXCLUSIVE; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); + CREATE TABLE t2(a INTEGER, b INTEGER, c TEXT); + CREATE INDEX i2a ON t2(a); + CREATE INDEX i2b ON t2(b); + } + execsql { + SELECT name FROM sqlite_master ORDER BY 1; + } +} {i2a i2b t1 t2} + + +# 50000 INSERTs on an unindexed table +# +set sql {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + append sql "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');\n" +} +db eval BEGIN +speed_trial speed1-insert1 50000 row $sql +db eval COMMIT + +# 50000 INSERTs on an indexed table +# +set sql {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + append sql "INSERT INTO t2 VALUES($i,$r,'[number_name $r]');\n" +} +db eval BEGIN +speed_trial speed1-insert2 50000 row $sql +db eval COMMIT + + + +# 50 SELECTs on an integer comparison. There is no index so +# a full table scan is required. +# +set sql {} +for {set i 0} {$i<50} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + append sql "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;" +} +db eval BEGIN +speed_trial speed1-select1 [expr {50*50000}] row $sql +db eval COMMIT + +# 50 SELECTs on an LIKE comparison. There is no index so a full +# table scan is required. +# +set sql {} +for {set i 0} {$i<50} {incr i} { + append sql \ + "SELECT count(*), avg(b) FROM t1 WHERE c LIKE '%[number_name $i]%';" +} +db eval BEGIN +speed_trial speed1-select2 [expr {50*50000}] row $sql +db eval COMMIT + +# Create indices +# +db eval BEGIN +speed_trial speed1-createidx 150000 row { + CREATE INDEX i1a ON t1(a); + CREATE INDEX i1b ON t1(b); + CREATE INDEX i1c ON t1(c); +} +db eval COMMIT + +# 5000 SELECTs on an integer comparison where the integer is +# indexed. +# +set sql {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + append sql "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;" +} +db eval BEGIN +speed_trial speed1-select3 5000 stmt $sql +db eval COMMIT + +# 100000 random SELECTs against rowid. +# +set sql {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + append sql "SELECT c FROM t1 WHERE rowid=$id;" +} +db eval BEGIN +speed_trial speed1-select4 100000 row $sql +db eval COMMIT + +# 100000 random SELECTs against a unique indexed column. +# +set sql {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + append sql "SELECT c FROM t1 WHERE a=$id;" +} +db eval BEGIN +speed_trial speed1-select5 100000 row $sql +db eval COMMIT + +# 50000 random SELECTs against an indexed column text column +# +set sql {} +db eval {SELECT c FROM t1 ORDER BY random() LIMIT 50000} { + append sql "SELECT c FROM t1 WHERE c='$c';" +} +db eval BEGIN +speed_trial speed1-select6 50000 row $sql +db eval COMMIT + + +# Vacuum +speed_trial speed1-vacuum 100000 row VACUUM + +# 5000 updates of ranges where the field being compared is indexed. +# +set sql {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*2}] + set upr [expr {($i+1)*2}] + append sql "UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr;" +} +db eval BEGIN +speed_trial speed1-update1 5000 stmt $sql +db eval COMMIT + +# 50000 single-row updates. An index is used to find the row quickly. +# +set sql {} +for {set i 0} {$i<50000} {incr i} { + set r [expr {int(rand()*500000)}] + append sql "UPDATE t1 SET b=$r WHERE a=$i;" +} +db eval BEGIN +speed_trial speed1-update2 50000 row $sql +db eval COMMIT + +# 1 big text update that touches every row in the table. +# +speed_trial speed1-update3 50000 row { + UPDATE t1 SET c=a; +} + +# Many individual text updates. Each row in the table is +# touched through an index. +# +set sql {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + append sql "UPDATE t1 SET c='[number_name $r]' WHERE a=$i;" +} +db eval BEGIN +speed_trial speed1-update4 50000 row $sql +db eval COMMIT + +# Delete all content in a table. +# +speed_trial speed1-delete1 50000 row {DELETE FROM t1} + +# Copy one table into another +# +speed_trial speed1-copy1 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Delete all content in a table, one row at a time. +# +speed_trial speed1-delete2 50000 row {DELETE FROM t1 WHERE 1} + +# Refill the table yet again +# +speed_trial speed1-copy2 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Drop the table and recreate it without its indices. +# +db eval BEGIN +speed_trial speed1-drop1 50000 row { + DROP TABLE t1; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); +} +db eval COMMIT + +# Refill the table yet again. This copy should be faster because +# there are no indices to deal with. +# +speed_trial speed1-copy3 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Select 20000 rows from the table at random. +# +speed_trial speed1-random1 50000 row { + SELECT rowid FROM t1 ORDER BY random() LIMIT 20000 +} + +# Delete 20000 random rows from the table. +# +speed_trial speed1-random-del1 20000 row { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000) +} +do_test speed1-1.1 { + db one {SELECT count(*) FROM t1} +} 30000 + + +# Delete 20000 more rows at random from the table. +# +speed_trial speed1-random-del2 20000 row { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000) +} +do_test speed1-1.2 { + db one {SELECT count(*) FROM t1} +} 10000 +speed_trial_summary speed1 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/speed2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/speed2.test new file mode 100644 index 0000000..f6d1a4c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/speed2.test @@ -0,0 +1,339 @@ +# 2006 November 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is measuring executing speed. +# +# $Id: speed2.test,v 1.7 2007/04/16 15:02:20 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +speed_trial_init speed2 + +# Set a uniform random seed +expr srand(0) + +set sqlout [open speed2.txt w] +proc tracesql {sql} { + puts $::sqlout $sql\; +} +#db trace tracesql + +# The number_name procedure below converts its argment (an integer) +# into a string which is the English-language name for that number. +# +# Example: +# +# puts [number_name 123] -> "one hundred twenty three" +# +set ones {zero one two three four five six seven eight nine + ten eleven twelve thirteen fourteen fifteen sixteen seventeen + eighteen nineteen} +set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety} +proc number_name {n} { + if {$n>=1000} { + set txt "[number_name [expr {$n/1000}]] thousand" + set n [expr {$n%1000}] + } else { + set txt {} + } + if {$n>=100} { + append txt " [lindex $::ones [expr {$n/100}]] hundred" + set n [expr {$n%100}] + } + if {$n>=20} { + append txt " [lindex $::tens [expr {$n/10}]]" + set n [expr {$n%10}] + } + if {$n>0} { + append txt " [lindex $::ones $n]" + } + set txt [string trim $txt] + if {$txt==""} {set txt zero} + return $txt +} + +# Create a database schema. +# +do_test speed2-1.0 { + execsql { + PRAGMA page_size=1024; + PRAGMA cache_size=8192; + PRAGMA locking_mode=EXCLUSIVE; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); + CREATE TABLE t2(a INTEGER, b INTEGER, c TEXT); + CREATE INDEX i2a ON t2(a); + CREATE INDEX i2b ON t2(b); + } + execsql { + SELECT name FROM sqlite_master ORDER BY 1; + } +} {i2a i2b t1 t2} + + +# 50000 INSERTs on an unindexed table +# +set sql {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + append sql "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');\n" +} +db eval BEGIN +speed_trial speed2-insert1 50000 row $sql +db eval COMMIT + +# 50000 INSERTs on an indexed table +# +set sql {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + append sql "INSERT INTO t2 VALUES($i,$r,'[number_name $r]');\n" +} +db eval BEGIN +speed_trial speed2-insert2 50000 row $sql +db eval COMMIT + + + +# 50 SELECTs on an integer comparison. There is no index so +# a full table scan is required. +# +set sql {} +for {set i 0} {$i<50} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + append sql "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;" +} +speed_trial speed2-select1a [expr {50*50000}] row $sql + +# 50 SELECTs on an LIKE comparison. There is no index so a full +# table scan is required. +# +set sql {} +for {set i 0} {$i<50} {incr i} { + append sql \ + "SELECT count(*), avg(b) FROM t1 WHERE c LIKE '%[number_name $i]%';" +} +speed_trial speed2-select2a [expr {50*50000}] row $sql + +# Vacuum +speed_trial speed2-vacuum1 100000 row VACUUM + +# 50 SELECTs on an integer comparison. There is no index so +# a full table scan is required. +# +set sql {} +for {set i 0} {$i<50} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + append sql "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;" +} +speed_trial speed2-select1b [expr {50*50000}] row $sql + +# 50 SELECTs on an LIKE comparison. There is no index so a full +# table scan is required. +# +set sql {} +for {set i 0} {$i<50} {incr i} { + append sql \ + "SELECT count(*), avg(b) FROM t1 WHERE c LIKE '%[number_name $i]%';" +} +speed_trial speed2-select2b [expr {50*50000}] row $sql + +# Create indices +# +db eval BEGIN +speed_trial speed2-createidx 150000 row { + CREATE INDEX i1a ON t1(a); + CREATE INDEX i1b ON t1(b); + CREATE INDEX i1c ON t1(c); +} +db eval COMMIT + +# 5000 SELECTs on an integer comparison where the integer is +# indexed. +# +set sql {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + append sql "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;" +} +speed_trial speed2-select3a 5000 stmt $sql + +# 100000 random SELECTs against rowid. +# +set sql {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + append sql "SELECT c=='hi' FROM t1 WHERE rowid=$id;\n" +} +speed_trial speed2-select4a 100000 row $sql + +# 100000 random SELECTs against a unique indexed column. +# +set sql {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + append sql "SELECT c FROM t1 WHERE a=$id;" +} +speed_trial speed2-select5a 100000 row $sql + +# 50000 random SELECTs against an indexed column text column +# +set sql {} +db eval {SELECT c FROM t1 ORDER BY random() LIMIT 50000} { + append sql "SELECT c FROM t1 WHERE c='$c';" +} +speed_trial speed2-select6a 50000 row $sql + +# Vacuum +speed_trial speed2-vacuum2 100000 row VACUUM + + +# 5000 SELECTs on an integer comparison where the integer is +# indexed. +# +set sql {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + append sql "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;" +} +speed_trial speed2-select3b 5000 stmt $sql + +# 100000 random SELECTs against rowid. +# +set sql {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + append sql "SELECT c=='hi' FROM t1 WHERE rowid=$id;\n" +} +speed_trial speed2-select4b 100000 row $sql + +# 100000 random SELECTs against a unique indexed column. +# +set sql {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + append sql "SELECT c FROM t1 WHERE a=$id;" +} +speed_trial speed2-select5b 100000 row $sql + +# 50000 random SELECTs against an indexed column text column +# +set sql {} +db eval {SELECT c FROM t1 ORDER BY random() LIMIT 50000} { + append sql "SELECT c FROM t1 WHERE c='$c';" +} +speed_trial speed2-select6b 50000 row $sql + +# 5000 updates of ranges where the field being compared is indexed. +# +set sql {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*2}] + set upr [expr {($i+1)*2}] + append sql "UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr;" +} +db eval BEGIN +speed_trial speed2-update1 5000 stmt $sql +db eval COMMIT + +# 50000 single-row updates. An index is used to find the row quickly. +# +set sql {} +for {set i 0} {$i<50000} {incr i} { + set r [expr {int(rand()*500000)}] + append sql "UPDATE t1 SET b=$r WHERE a=$i;" +} +db eval BEGIN +speed_trial speed2-update2 50000 row $sql +db eval COMMIT + +# 1 big text update that touches every row in the table. +# +speed_trial speed2-update3 50000 row { + UPDATE t1 SET c=a; +} + +# Many individual text updates. Each row in the table is +# touched through an index. +# +set sql {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + append sql "UPDATE t1 SET c='[number_name $r]' WHERE a=$i;" +} +db eval BEGIN +speed_trial speed2-update4 50000 row $sql +db eval COMMIT + +# Delete all content in a table. +# +speed_trial speed2-delete1 50000 row {DELETE FROM t1} + +# Copy one table into another +# +speed_trial speed2-copy1 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Delete all content in a table, one row at a time. +# +speed_trial speed2-delete2 50000 row {DELETE FROM t1 WHERE 1} + +# Refill the table yet again +# +speed_trial speed2-copy2 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Drop the table and recreate it without its indices. +# +db eval BEGIN +speed_trial speed2-drop1 50000 row { + DROP TABLE t1; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); +} +db eval COMMIT + +# Refill the table yet again. This copy should be faster because +# there are no indices to deal with. +# +speed_trial speed2-copy3 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Select 20000 rows from the table at random. +# +speed_trial speed2-random1 50000 row { + SELECT rowid FROM t1 ORDER BY random() LIMIT 20000 +} + +# Delete 20000 random rows from the table. +# +speed_trial speed2-random-del1 20000 row { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000) +} +do_test speed2-1.1 { + db one {SELECT count(*) FROM t1} +} 30000 + + +# Delete 20000 more rows at random from the table. +# +speed_trial speed2-random-del2 20000 row { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000) +} +do_test speed2-1.2 { + db one {SELECT count(*) FROM t1} +} 10000 +speed_trial_summary speed2 + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/speed3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/speed3.test new file mode 100644 index 0000000..b43cfaa --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/speed3.test @@ -0,0 +1,186 @@ +# 2007 May 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing that the overflow-page related +# enhancements added after version 3.3.17 speed things up. +# +# $Id: speed3.test,v 1.4 2007/09/12 17:01:45 danielk1977 Exp $ +# + +#--------------------------------------------------------------------- +# Test plan: +# +# If auto-vacuum is enabled for the database, the following cases +# should show performance improvement with respect to 3.3.17. +# +# + When deleting rows that span overflow pages. This is faster +# because the overflow pages no longer need to be read before +# they can be moved to the free list (test cases speed3-1.X). +# +# + When reading a column value stored on an overflow page that +# is not the first overflow page for the row. The improvement +# in this case is because the overflow pages between the tree +# page and the overflow page containing the value do not have +# to be read (test cases speed3-2.X). +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !tclvar { + finish_test + return +} + +speed_trial_init speed1 + +# Set a uniform random seed +expr srand(0) + +set ::NROW 1000 + +# The number_name procedure below converts its argment (an integer) +# into a string which is the English-language name for that number. +# +# Example: +# +# puts [number_name 123] -> "one hundred twenty three" +# +set ones {zero one two three four five six seven eight nine + ten eleven twelve thirteen fourteen fifteen sixteen seventeen + eighteen nineteen} +set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety} +proc number_name {n} { + if {$n>=1000} { + set txt "[number_name [expr {$n/1000}]] thousand" + set n [expr {$n%1000}] + } else { + set txt {} + } + if {$n>=100} { + append txt " [lindex $::ones [expr {$n/100}]] hundred" + set n [expr {$n%100}] + } + if {$n>=20} { + append txt " [lindex $::tens [expr {$n/10}]]" + set n [expr {$n%10}] + } + if {$n>0} { + append txt " [lindex $::ones $n]" + } + set txt [string trim $txt] + if {$txt==""} {set txt zero} + return $txt +} + +proc populate_t1 {db} { + $db transaction { + for {set ii 0} {$ii < $::NROW} {incr ii} { + set N [number_name $ii] + set repeats [expr {(10000/[string length $N])+1}] + set text [string range [string repeat $N $repeats] 0 10000] + $db eval {INSERT INTO main.t1 VALUES($ii, $text, $ii)} + } + $db eval {INSERT INTO aux.t1 SELECT * FROM main.t1} + } +} + + +proc io_log {db} { + db_enter db + array set stats1 [btree_pager_stats [btree_from_db db]] + array set stats2 [btree_pager_stats [btree_from_db db 2]] + db_leave db +# puts "1: [array get stats1]" +# puts "2: [array get stats2]" + puts "Incrvacuum: Read $stats1(read), wrote $stats1(write)" + puts "Normal : Read $stats2(read), wrote $stats2(write)" +} + +proc overflow_report {db} { + set bt [btree_from_db db] + set csr [btree_cursor $bt 3 0] + + for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} { + puts "[btree_ovfl_info $bt $csr]" + } + + btree_close_cursor $csr + +} + +proc reset_db {} { + db close + sqlite3 db test.db + db eval { + PRAGMA main.cache_size = 200000; + PRAGMA main.auto_vacuum = 'incremental'; + ATTACH 'test2.db' AS 'aux'; + PRAGMA aux.auto_vacuum = 'none'; + } +} + +file delete -force test2.db test2.db-journal +reset_db + +# Set up a database in auto-vacuum mode and create a database schema. +# +do_test speed3-0.1 { + execsql { + CREATE TABLE main.t1(a INTEGER, b TEXT, c INTEGER); + } + execsql { + SELECT name FROM sqlite_master ORDER BY 1; + } +} {t1} +do_test speed3-0.2 { + execsql { + CREATE TABLE aux.t1(a INTEGER, b TEXT, c INTEGER); + } + execsql { + SELECT name FROM aux.sqlite_master ORDER BY 1; + } +} {t1} +do_test speed3-0.3 { + populate_t1 db + execsql { + SELECT count(*) FROM main.t1; + SELECT count(*) FROM aux.t1; + } +} "$::NROW $::NROW" +do_test speed3-0.4 { + execsql { + PRAGMA main.auto_vacuum; + PRAGMA aux.auto_vacuum; + } +} {2 0} + +# Delete all content in a table, one row at a time. +# +#io_log db +#overflow_report db +reset_db +speed_trial speed3-1.incrvacuum $::NROW row {DELETE FROM main.t1 WHERE 1} +speed_trial speed3-1.normal $::NROW row {DELETE FROM aux.t1 WHERE 1} +io_log db + +# Select the "C" column (located at the far end of the overflow +# chain) from each table row. +# +#db eval {PRAGMA incremental_vacuum(500000)} +populate_t1 db +#overflow_report db +reset_db +speed_trial speed3-2.incrvacuum $::NROW row {SELECT c FROM main.t1} +speed_trial speed3-2.normal $::NROW row {SELECT c FROM aux.t1} +io_log db + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/sqllimits1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/sqllimits1.test new file mode 100644 index 0000000..6641e08 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/sqllimits1.test @@ -0,0 +1,576 @@ +# 2007 May 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests to verify that the limits defined in +# sqlite source file limits.h are enforced. +# +# $Id: sqllimits1.test,v 1.18 2007/09/06 23:39:37 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Test organization: +# +# sqllimits-1.*: SQLITE_MAX_LENGTH +# sqllimits-2.*: SQLITE_MAX_SQL_LENGTH +# sqllimits-3.*: SQLITE_MAX_PAGE_COUNT (and the max_page_count pragma) +# sqllimits-4.*: SQLITE_MAX_COLUMN +# +# +# sqllimits-7.*: SQLITE_MAX_FUNCTION_ARG +# sqllimits-8.*: SQLITE_MAX_ATTACHED +# sqllimits-9.*: SQLITE_MAX_VARIABLE_NUMBER +# sqllimits-10.*: SQLITE_MAX_PAGE_SIZE +# sqllimits-11.*: SQLITE_MAX_LIKE_PATTERN_LENGTH +# +# Todo: +# +# sqllimits-5.*: SQLITE_MAX_EXPR_DEPTH (sqlite todo) +# sqllimits-6.*: SQLITE_MAX_VDBE_OP (sqlite todo) +# sqllimits-12.*: SQLITE_MAX_PAGE_COUNT (sqlite todo) +# + +unset -nocomplain saved +foreach var [info vars SQLITE_MAX_*] { + set saved($var) [set $var] +} + +set SQLITE_MAX_LENGTH 100000 +set SQLITE_MAX_COLUMN +set SQLITE_MAX_SQL_LENGTH 50000 +set SQLITE_MAX_EXPR_DEPTH 1000 +set SQLITE_MAX_COMPOUND_SELECT 5 +set SQLITE_MAX_VDBE_OP +set SQLITE_MAX_FUNCTION_ARG +set SQLITE_MAX_VARIABLE_NUMBER +set SQLITE_MAX_PAGE_SIZE +set SQLITE_MAX_PAGE_COUNT +set SQLITE_MAX_LIKE_PATTERN_LENGTH 1000 + +#-------------------------------------------------------------------- +# Test cases sqllimits-1.* test that the SQLITE_MAX_LENGTH limit +# is enforced. +# +do_test sqllimits-1.1.1 { + catchsql { SELECT randomblob(2147483647) } +} {1 {string or blob too big}} +do_test sqllimits-1.1.2 { + catchsql { SELECT zeroblob(2147483647) } +} {1 {string or blob too big}} + +# Large, but allowable, blob-size. +# +set ::LARGESIZE [expr $SQLITE_MAX_LENGTH - 1] + +do_test sqllimits-1.2 { + catchsql { SELECT LENGTH(randomblob($::LARGESIZE)) } +} "0 $::LARGESIZE" + +do_test sqllimits-1.3 { + catchsql { SELECT quote(randomblob($::LARGESIZE)) } +} {1 {string or blob too big}} + +do_test sqllimits-1.4 { + catchsql { SELECT LENGTH(zeroblob($::LARGESIZE)) } +} "0 $::LARGESIZE" + +do_test sqllimits-1.5 { + catchsql { SELECT quote(zeroblob($::LARGESIZE)) } +} {1 {string or blob too big}} + +do_test sqllimits-1.6 { + catchsql { SELECT zeroblob(-1) } +} {0 {{}}} + +do_test sqllimits-1.9 { + set ::str [string repeat A 65537] + set ::rep [string repeat B 65537] + catchsql { SELECT replace($::str, 'A', $::rep) } +} {1 {string or blob too big}} + +do_test sqllimits-1.10 { + set ::str [string repeat %J 2100] + catchsql { SELECT strftime($::str, '2003-10-31') } +} {1 {string or blob too big}} + +do_test sqllimits-1.11 { + set ::str1 [string repeat A [expr {$SQLITE_MAX_LENGTH - 10}]] + set ::str2 [string repeat B [expr {$SQLITE_MAX_LENGTH - 10}]] + catchsql { SELECT $::str1 || $::str2 } +} {1 {string or blob too big}} + +do_test sqllimits-1.12 { + set ::str1 [string repeat ' [expr {$SQLITE_MAX_LENGTH - 10}]] + catchsql { SELECT quote($::str1) } +} {1 {string or blob too big}} + +do_test sqllimits-1.13 { + set ::str1 [string repeat ' [expr {$SQLITE_MAX_LENGTH - 10}]] + catchsql { SELECT hex($::str1) } +} {1 {string or blob too big}} + +do_test sqllimits-1.14.1 { + set ::STMT [sqlite3_prepare $::DB "SELECT ?" -1 TAIL] + sqlite3_bind_zeroblob $::STMT 1 [expr {$SQLITE_MAX_LENGTH + 1}] +} {} +do_test sqllimits-1.14.2 { + sqlite3_step $::STMT +} {SQLITE_ERROR} +do_test sqllimits-1.14.3 { + sqlite3_finalize $::STMT +} {SQLITE_TOOBIG} + +#-------------------------------------------------------------------- +# Test cases sqllimits-2.* test that the SQLITE_MAX_SQL_LENGTH limit +# is enforced. +# +do_test sqllimits-2.1 { + set sql "SELECT 1 WHERE 1==1" + set N [expr {$::SQLITE_MAX_SQL_LENGTH / [string length " AND 1==1"]}] + append sql [string repeat " AND 1==1" $N] + catchsql $sql +} {1 {String or BLOB exceeded size limit}} + +#-------------------------------------------------------------------- +# Test cases sqllimits-3.* test that the limit set using the +# max_page_count pragma. +# +do_test sqllimits-3.1 { + execsql { + PRAGMA max_page_count = 1000; + } +} {1000} +do_test sqllimits-3.2 { + execsql { CREATE TABLE trig (a INTEGER, b INTEGER); } + + # Set up a tree of triggers to fire when a row is inserted + # into table "trig". + # + # INSERT -> insert_b -> update_b -> insert_a -> update_a (chain 1) + # -> update_a -> insert_a -> update_b (chain 2) + # -> insert_a -> update_b -> insert_b -> update_a (chain 3) + # -> update_a -> insert_b -> update_b (chain 4) + # + # Table starts with N rows. + # + # Chain 1: insert_b (update N rows) + # -> update_b (insert 1 rows) + # -> insert_a (update N rows) + # -> update_a (insert 1 rows) + # + # chains 2, 3 and 4 are similar. Each inserts more than N^2 rows, where + # N is the number of rows at the conclusion of the previous chain. + # + # Therefore, a single insert adds (N^16 plus some) rows to the database. + # A really long loop... + # + execsql { + CREATE TRIGGER update_b BEFORE UPDATE ON trig + FOR EACH ROW BEGIN + INSERT INTO trig VALUES (65, 'update_b'); + END; + + CREATE TRIGGER update_a AFTER UPDATE ON trig + FOR EACH ROW BEGIN + INSERT INTO trig VALUES (65, 'update_a'); + END; + + CREATE TRIGGER insert_b BEFORE INSERT ON trig + FOR EACH ROW BEGIN + UPDATE trig SET a = 1; + END; + + CREATE TRIGGER insert_a AFTER INSERT ON trig + FOR EACH ROW BEGIN + UPDATE trig SET a = 1; + END; + } +} {} + +do_test sqllimits1-3.3 { + execsql { + INSERT INTO trig VALUES (1,1); + } +} {} + +do_test sqllimits1-3.4 { + execsql { + SELECT COUNT(*) FROM trig; + } +} {7} + +# This tries to insert so many rows it fills up the database (limited +# to 1MB, so not that noteworthy an achievement). +# +do_test sqllimits1-3.5 { + catchsql { + INSERT INTO trig VALUES (1,10); + } +} {1 {database or disk is full}} + +do_test sqllimits1-3.6 { + catchsql { + SELECT COUNT(*) FROM trig; + } +} {0 7} + +# Now check the response of the library to opening a file larger than +# the current max_page_count value. The response is to change the +# internal max_page_count value to match the actual size of the file. +do_test sqllimits1-3.7.1 { + execsql { + PRAGMA max_page_count = 1000000; + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a, b, c FROM abc; + INSERT INTO abc SELECT b, a, c FROM abc; + INSERT INTO abc SELECT c, b, a FROM abc; + } + expr [file size test.db] / 1024 +} {1691} +do_test sqllimits1-3.7.2 { + db close + sqlite3 db test.db + execsql { + PRAGMA max_page_count = 1000; + } + execsql { + SELECT count(*) FROM sqlite_master; + } +} {6} +do_test sqllimits1-3.7.3 { + execsql { + PRAGMA max_page_count; + } +} {1691} +do_test sqllimits1-3.7.4 { + execsql { + DROP TABLE abc; + } +} {} + +#-------------------------------------------------------------------- +# Test cases sqllimits1-4.* test the SQLITE_MAX_COLUMN limit. +# +do_test sqllimits-1.4.1 { + # Columns in a table. + set cols [list] + for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + lappend cols "c$i" + } + catchsql "CREATE TABLE t([join $cols ,])" +} {1 {too many columns on t}} + +do_test sqllimits-1.4.2 { + # Columns in the result-set of a SELECT. + set cols [list] + for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + lappend cols "sql AS sql$i" + } + catchsql "SELECT [join $cols ,] FROM sqlite_master" +} {1 {too many columns in result set}} + +do_test sqllimits-1.4.3 { + # Columns in the result-set of a sub-SELECT. + set cols [list] + for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + lappend cols "sql AS sql$i" + } + catchsql "SELECT sql4 FROM (SELECT [join $cols ,] FROM sqlite_master)" +} {1 {too many columns in result set}} + +do_test sqllimits-1.4.4 { + # Columns in an index. + set cols [list] + for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + lappend cols c + } + set sql1 "CREATE TABLE t1(c);" + set sql2 "CREATE INDEX i1 ON t1([join $cols ,]);" + catchsql "$sql1 ; $sql2" +} {1 {too many columns in index}} + +do_test sqllimits-1.4.5 { + # Columns in a GROUP BY clause. + catchsql "SELECT * FROM t1 GROUP BY [join $cols ,]" +} {1 {too many terms in GROUP BY clause}} + +do_test sqllimits-1.4.6 { + # Columns in an ORDER BY clause. + catchsql "SELECT * FROM t1 ORDER BY [join $cols ,]" +} {1 {too many terms in ORDER BY clause}} + +do_test sqllimits-1.4.7 { + # Assignments in an UPDATE statement. + set cols [list] + for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + lappend cols "c = 1" + } + catchsql "UPDATE t1 SET [join $cols ,];" +} {1 {too many columns in set list}} + +do_test sqllimits-1.4.8 { + # Columns in a view definition: + set cols [list] + for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + lappend cols "c$i" + } + catchsql "CREATE VIEW v1 AS SELECT [join $cols ,] FROM t1;" +} {1 {too many columns in result set}} + +do_test sqllimits-1.4.9 { + # Columns in a view definition (testing * expansion): + set cols [list] + for {set i 0} {$i < $SQLITE_MAX_COLUMN} {incr i} { + lappend cols "c$i" + } + catchsql "CREATE TABLE t2([join $cols ,])" + catchsql "CREATE VIEW v1 AS SELECT *, c1 AS o FROM t2;" +} {1 {too many columns in result set}} + +#-------------------------------------------------------------------- +# These tests - sqllimits-5.* - test that the SQLITE_MAX_EXPR_DEPTH +# limit is enforced. The limit refers to the number of terms in +# the expression. +# +if {$::SQLITE_MAX_EXPR_DEPTH != 1000} { + puts -nonewline stderr "WARNING: Compile with -DSQLITE_MAX_EXPR_DEPTH to run " + puts stderr "tests sqllimits-1.5.X" +} else { + do_test sqllimits-1.5.1 { + set max $::SQLITE_MAX_EXPR_DEPTH + set expr "(1 [string repeat {AND 1 } $max])" + catchsql [subst { + SELECT $expr + }] + } "1 {Expression tree is too large (maximum depth $::SQLITE_MAX_EXPR_DEPTH)}" + + # Attempting to beat the expression depth limit using nested SELECT + # queries causes a parser stack overflow. + do_test sqllimits-1.5.2 { + set max $::SQLITE_MAX_EXPR_DEPTH + set expr "SELECT 1" + for {set i 0} {$i <= $max} {incr i} { + set expr "SELECT ($expr)" + } + catchsql [subst { $expr }] + } "1 {parser stack overflow}" + + + do_test sqllimits-1.5.3 { + execsql { + PRAGMA max_page_count = 1000000; -- 1 GB + CREATE TABLE v0(a); + INSERT INTO v0 VALUES(1); + } + db transaction { + for {set i 1} {$i < 200} {incr i} { + set expr "(a [string repeat {AND 1 } 50]) AS a" + execsql [subst { + CREATE VIEW v${i} AS SELECT $expr FROM v[expr {$i-1}] + }] + } + } + } {} + + do_test sqllimits-1.5.4 { + catchsql { + SELECT a FROM v199 + } + } "1 {Expression tree is too large (maximum depth $::SQLITE_MAX_EXPR_DEPTH)}" +} + +#-------------------------------------------------------------------- +# Test cases sqllimits-6.* test that the SQLITE_MAX_VDBE_OP +# limit works as expected. The limit refers to the number of opcodes +# in a single VDBE program. +# +# TODO + +#-------------------------------------------------------------------- +# Test the SQLITE_MAX_FUNCTION_ARG limit works. Test case names +# match the pattern "sqllimits-7.*". +# +do_test sqllimits-1.7.1 { + set max $::SQLITE_MAX_FUNCTION_ARG + set vals [list] + for {set i 0} {$i < $SQLITE_MAX_FUNCTION_ARG} {incr i} { + lappend vals $i + } + catchsql "SELECT max([join $vals ,])" +} "0 [expr {$::SQLITE_MAX_FUNCTION_ARG - 1}]" +do_test sqllimits-1.7.2 { + set max $::SQLITE_MAX_FUNCTION_ARG + set vals [list] + for {set i 0} {$i <= $SQLITE_MAX_FUNCTION_ARG} {incr i} { + lappend vals $i + } + catchsql "SELECT max([join $vals ,])" +} {1 {too many arguments on function max}} + +# Test that it is SQLite, and not the implementation of the +# user function that is throwing the error. +proc myfunc {args} {error "I don't like to be called!"} +do_test sqllimits-1.7.2 { + db function myfunc myfunc + set max $::SQLITE_MAX_FUNCTION_ARG + set vals [list] + for {set i 0} {$i <= $SQLITE_MAX_FUNCTION_ARG} {incr i} { + lappend vals $i + } + catchsql "SELECT myfunc([join $vals ,])" +} {1 {too many arguments on function myfunc}} + +#-------------------------------------------------------------------- +# Test cases sqllimits-8.*: Test the SQLITE_MAX_ATTACHED limit. +# +# TODO +do_test sqllimits-1.8.1 { + set max $::SQLITE_MAX_ATTACHED + for {set i 0} {$i < ($max)} {incr i} { + file delete -force test${i}.db test${i}.db-journal + } + for {set i 0} {$i < ($max)} {incr i} { + execsql "ATTACH 'test${i}.db' AS aux${i}" + } + catchsql "ATTACH 'test${i}.db' AS aux${i}" +} "1 {too many attached databases - max $::SQLITE_MAX_ATTACHED}" +do_test sqllimits-1.8.2 { + set max $::SQLITE_MAX_ATTACHED + for {set i 0} {$i < ($max)} {incr i} { + execsql "DETACH aux${i}" + } +} {} + +#-------------------------------------------------------------------- +# Test cases sqllimits-9.*: Check that the SQLITE_MAX_VARIABLE_NUMBER +# limit works. +# +do_test sqllimits-1.9.1 { + set max $::SQLITE_MAX_VARIABLE_NUMBER + catchsql "SELECT ?[expr {$max+1}] FROM t1" +} "1 {variable number must be between ?1 and ?$::SQLITE_MAX_VARIABLE_NUMBER}" +do_test sqllimits-1.9.2 { + set max $::SQLITE_MAX_VARIABLE_NUMBER + set vals [list] + for {set i 0} {$i < ($max+3)} {incr i} { + lappend vals ? + } + catchsql "SELECT [join $vals ,] FROM t1" +} "1 {too many SQL variables}" + + +#-------------------------------------------------------------------- +# sqllimits-10.*: Test the SQLITE_MAX_PAGE_SIZE define is enforced. +# This is probably tested elsewhere too (pagerX.test). Attempts +# to raise the page size above this limit are silently ignored. +# +do_test sqllimits-1.10.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + set max $::SQLITE_MAX_PAGE_SIZE + catchsql "PRAGMA page_size = [expr {$max*2}]" +} {0 {}} +do_test sqllimits-1.10.2 { + catchsql "PRAGMA page_size" +} {0 1024} +do_test sqllimits-1.10.3 { + set max $::SQLITE_MAX_PAGE_SIZE + catchsql "PRAGMA page_size = $max" +} {0 {}} +do_test sqllimits-1.10.4 { + execsql "pragma page_size" +} $::SQLITE_MAX_PAGE_SIZE +do_test sqllimits-1.10.5 { + set max $::SQLITE_MAX_PAGE_SIZE + execsql "pragma page_size = [expr {$max - 5}]" + execsql "pragma page_size" +} $::SQLITE_MAX_PAGE_SIZE + +# Opening a database where the page size is too large should generate +# an error. +# +do_test sqllimits-1.10.5 { + db close + file delete -force test.db + set ::SQLITE_MAX_PAGE_SIZE 32768 + sqlite3 db test.db + db eval {PRAGMA page_size=32768} + db eval {CREATE TABLE t1(x);} + db eval {PRAGMA page_size} +} {32768} +do_test sqllimits-1.10.6 { + db close + set ::SQLITE_MAX_PAGE_SIZE 8192 + sqlite3 db test.db + catchsql {SELECT name FROM sqlite_master} +} {1 {file is encrypted or is not a database}} +db close +file delete -force test.db +sqlite3 db test.db + +#-------------------------------------------------------------------- +# Test cases sqllimits-11.* verify that the +# SQLITE_MAX_LIKE_PATTERN_LENGTH limit is enforced. This limit only +# applies to the built-in LIKE operator, supplying an external +# implementation by overriding the like() scalar function bypasses +# this limitation. +# +# These tests check that the limit is not incorrectly applied to +# the left-hand-side of the LIKE operator (the string being tested +# against the pattern). +# +do_test sqllimits-1.11.1 { + set max $::SQLITE_MAX_LIKE_PATTERN_LENGTH + set ::pattern [string repeat "A%" [expr $max/2]] + set ::string [string repeat "A" [expr {$max*2}]] + execsql { + SELECT $::string LIKE $::pattern; + } +} {1} +do_test sqllimits-1.11.2 { + set max $::SQLITE_MAX_LIKE_PATTERN_LENGTH + set ::pattern [string repeat "A%" [expr {($max/2) + 1}]] + set ::string [string repeat "A" [expr {$max*2}]] + catchsql { + SELECT $::string LIKE $::pattern; + } +} {1 {LIKE or GLOB pattern too complex}} + +#-------------------------------------------------------------------- +# This test case doesn't really belong with the other limits tests. +# It is in this file because it is taxing to run, like the limits tests. +# +do_test sqllimits-1.12.1 { + set ::N [expr int(([expr pow(2,32)]/50) + 1)] + expr (($::N*50) & 0xffffffff)<55 +} {1} +do_test sqllimits-1.12.2 { + set ::format "[string repeat A 60][string repeat "%J" $::N]" + catchsql { + SELECT strftime($::format, 1); + } +} {1 {string or blob too big}} + + +foreach {key value} [array get saved] { + catch {set $key $value} +} +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/subquery.test b/libraries/sqlite/unix/sqlite-3.5.1/test/subquery.test new file mode 100644 index 0000000..e81269d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/subquery.test @@ -0,0 +1,494 @@ +# 2005 January 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing correlated subqueries +# +# $Id: subquery.test,v 1.15 2007/09/18 16:53:53 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !subquery { + finish_test + return +} + +do_test subquery-1.1 { + execsql { + BEGIN; + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(3,4); + INSERT INTO t1 VALUES(5,6); + INSERT INTO t1 VALUES(7,8); + CREATE TABLE t2(x,y); + INSERT INTO t2 VALUES(1,1); + INSERT INTO t2 VALUES(3,9); + INSERT INTO t2 VALUES(5,25); + INSERT INTO t2 VALUES(7,49); + COMMIT; + } + execsql { + SELECT a, (SELECT y FROM t2 WHERE x=a) FROM t1 WHERE b<8 + } +} {1 1 3 9 5 25} +do_test subquery-1.2 { + execsql { + UPDATE t1 SET b=b+(SELECT y FROM t2 WHERE x=a); + SELECT * FROM t1; + } +} {1 3 3 13 5 31 7 57} + +do_test subquery-1.3 { + execsql { + SELECT b FROM t1 WHERE EXISTS(SELECT * FROM t2 WHERE y=a) + } +} {3} +do_test subquery-1.4 { + execsql { + SELECT b FROM t1 WHERE NOT EXISTS(SELECT * FROM t2 WHERE y=a) + } +} {13 31 57} + +# Simple tests to make sure correlated subqueries in WHERE clauses +# are used by the query optimizer correctly. +do_test subquery-1.5 { + execsql { + SELECT a, x FROM t1, t2 WHERE t1.a = (SELECT x); + } +} {1 1 3 3 5 5 7 7} +do_test subquery-1.6 { + execsql { + CREATE INDEX i1 ON t1(a); + SELECT a, x FROM t1, t2 WHERE t1.a = (SELECT x); + } +} {1 1 3 3 5 5 7 7} +do_test subquery-1.7 { + execsql { + SELECT a, x FROM t2, t1 WHERE t1.a = (SELECT x); + } +} {1 1 3 3 5 5 7 7} + +# Try an aggregate in both the subquery and the parent query. +do_test subquery-1.8 { + execsql { + SELECT count(*) FROM t1 WHERE a > (SELECT count(*) FROM t2); + } +} {2} + +# Test a correlated subquery disables the "only open the index" optimization. +do_test subquery-1.9.1 { + execsql { + SELECT (y*2)>b FROM t1, t2 WHERE a=x; + } +} {0 1 1 1} +do_test subquery-1.9.2 { + execsql { + SELECT a FROM t1 WHERE (SELECT (y*2)>b FROM t2 WHERE a=x); + } +} {3 5 7} + +# Test that the flattening optimization works with subquery expressions. +do_test subquery-1.10.1 { + execsql { + SELECT (SELECT a), b FROM t1; + } +} {1 3 3 13 5 31 7 57} +do_test subquery-1.10.2 { + execsql { + SELECT * FROM (SELECT (SELECT a), b FROM t1); + } +} {1 3 3 13 5 31 7 57} +do_test subquery-1.10.3 { + execsql { + SELECT * FROM (SELECT (SELECT sum(a) FROM t1)); + } +} {16} +do_test subquery-1.10.4 { + execsql { + CREATE TABLE t5 (val int, period text PRIMARY KEY); + INSERT INTO t5 VALUES(5, '2001-3'); + INSERT INTO t5 VALUES(10, '2001-4'); + INSERT INTO t5 VALUES(15, '2002-1'); + INSERT INTO t5 VALUES(5, '2002-2'); + INSERT INTO t5 VALUES(10, '2002-3'); + INSERT INTO t5 VALUES(15, '2002-4'); + INSERT INTO t5 VALUES(10, '2003-1'); + INSERT INTO t5 VALUES(5, '2003-2'); + INSERT INTO t5 VALUES(25, '2003-3'); + INSERT INTO t5 VALUES(5, '2003-4'); + + SELECT "a.period", vsum + FROM (SELECT + a.period, + (select sum(val) from t5 where period between a.period and '2002-4') vsum + FROM t5 a where a.period between '2002-1' and '2002-4') + WHERE vsum < 45 ; + } +} {2002-2 30 2002-3 25 2002-4 15} +do_test subquery-1.10.5 { + execsql { + SELECT "a.period", vsum from + (select a.period, + (select sum(val) from t5 where period between a.period and '2002-4') vsum + FROM t5 a where a.period between '2002-1' and '2002-4') + WHERE vsum < 45 ; + } +} {2002-2 30 2002-3 25 2002-4 15} +do_test subquery-1.10.6 { + execsql { + DROP TABLE t5; + } +} {} + + + +#------------------------------------------------------------------ +# The following test cases - subquery-2.* - are not logically +# organized. They're here largely because they were failing during +# one stage of development of sub-queries. +# +do_test subquery-2.1 { + execsql { + SELECT (SELECT 10); + } +} {10} +do_test subquery-2.2.1 { + execsql { + CREATE TABLE t3(a PRIMARY KEY, b); + INSERT INTO t3 VALUES(1, 2); + INSERT INTO t3 VALUES(3, 1); + } +} {} +do_test subquery-2.2.2 { + execsql { + SELECT * FROM t3 WHERE a IN (SELECT b FROM t3); + } +} {1 2} +do_test subquery-2.2.3 { + execsql { + DROP TABLE t3; + } +} {} +do_test subquery-2.3.1 { + execsql { + CREATE TABLE t3(a TEXT); + INSERT INTO t3 VALUES('10'); + } +} {} +do_test subquery-2.3.2 { + execsql { + SELECT a IN (10.0, 20) FROM t3; + } +} {0} +do_test subquery-2.3.3 { + execsql { + DROP TABLE t3; + } +} {} +do_test subquery-2.4.1 { + execsql { + CREATE TABLE t3(a TEXT); + INSERT INTO t3 VALUES('XX'); + } +} {} +do_test subquery-2.4.2 { + execsql { + SELECT count(*) FROM t3 WHERE a IN (SELECT 'XX') + } +} {1} +do_test subquery-2.4.3 { + execsql { + DROP TABLE t3; + } +} {} +do_test subquery-2.5.1 { + execsql { + CREATE TABLE t3(a INTEGER); + INSERT INTO t3 VALUES(10); + + CREATE TABLE t4(x TEXT); + INSERT INTO t4 VALUES('10.0'); + } +} {} +do_test subquery-2.5.2 { + # In the expr "x IN (SELECT a FROM t3)" the RHS of the IN operator + # has text affinity and the LHS has integer affinity. The rule is + # that we try to convert both sides to an integer before doing the + # comparision. Hence, the integer value 10 in t3 will compare equal + # to the string value '10.0' in t4 because the t4 value will be + # converted into an integer. + execsql { + SELECT * FROM t4 WHERE x IN (SELECT a FROM t3); + } +} {10.0} +do_test subquery-2.5.3.1 { + # The t4i index cannot be used to resolve the "x IN (...)" constraint + # because the constraint has integer affinity but t4i has text affinity. + execsql { + CREATE INDEX t4i ON t4(x); + SELECT * FROM t4 WHERE x IN (SELECT a FROM t3); + } +} {10.0} +do_test subquery-2.5.3.2 { + # Verify that the t4i index was not used in the previous query + set ::sqlite_query_plan +} {t4 {}} +do_test subquery-2.5.4 { + execsql { + DROP TABLE t3; + DROP TABLE t4; + } +} {} + +#------------------------------------------------------------------ +# The following test cases - subquery-3.* - test tickets that +# were raised during development of correlated subqueries. +# + +# Ticket 1083 +ifcapable view { + do_test subquery-3.1 { + catchsql { DROP TABLE t1; } + catchsql { DROP TABLE t2; } + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + CREATE VIEW v1 AS SELECT b FROM t1 WHERE a>0; + CREATE TABLE t2(p,q); + INSERT INTO t2 VALUES(2,9); + SELECT * FROM v1 WHERE EXISTS(SELECT * FROM t2 WHERE p=v1.b); + } + } {2} +} else { + catchsql { DROP TABLE t1; } + catchsql { DROP TABLE t2; } + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + CREATE TABLE t2(p,q); + INSERT INTO t2 VALUES(2,9); + } +} + +# Ticket 1084 +do_test subquery-3.2 { + catchsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + } + execsql { + SELECT (SELECT t1.a) FROM t1; + } +} {1} + +# Test Cases subquery-3.3.* test correlated subqueries where the +# parent query is an aggregate query. Ticket #1105 is an example +# of such a query. +# +do_test subquery-3.3.1 { + execsql { + SELECT a, (SELECT b) FROM t1 GROUP BY a; + } +} {1 2} +do_test subquery-3.3.2 { + catchsql {DROP TABLE t2} + execsql { + CREATE TABLE t2(c, d); + INSERT INTO t2 VALUES(1, 'one'); + INSERT INTO t2 VALUES(2, 'two'); + SELECT a, (SELECT d FROM t2 WHERE a=c) FROM t1 GROUP BY a; + } +} {1 one} +do_test subquery-3.3.3 { + execsql { + INSERT INTO t1 VALUES(2, 4); + SELECT max(a), (SELECT d FROM t2 WHERE a=c) FROM t1; + } +} {2 two} +do_test subquery-3.3.4 { + execsql { + SELECT a, (SELECT (SELECT d FROM t2 WHERE a=c)) FROM t1 GROUP BY a; + } +} {1 one 2 two} +do_test subquery-3.3.5 { + execsql { + SELECT a, (SELECT count(*) FROM t2 WHERE a=c) FROM t1; + } +} {1 1 2 1} + +#------------------------------------------------------------------ +# These tests - subquery-4.* - use the TCL statement cache to try +# and expose bugs to do with re-using statements that have been +# passed to sqlite3_reset(). +# +# One problem was that VDBE memory cells were not being initialised +# to NULL on the second and subsequent executions. +# +do_test subquery-4.1.1 { + execsql { + SELECT (SELECT a FROM t1); + } +} {1} +do_test subquery-4.2 { + execsql { + DELETE FROM t1; + SELECT (SELECT a FROM t1); + } +} {{}} +do_test subquery-4.2.1 { + execsql { + CREATE TABLE t3(a PRIMARY KEY); + INSERT INTO t3 VALUES(10); + } + execsql {INSERT INTO t3 VALUES((SELECT max(a) FROM t3)+1)} +} {} +do_test subquery-4.2.2 { + execsql {INSERT INTO t3 VALUES((SELECT max(a) FROM t3)+1)} +} {} + +#------------------------------------------------------------------ +# The subquery-5.* tests make sure string literals in double-quotes +# are handled efficiently. Double-quote literals are first checked +# to see if they match any column names. If there is not column name +# match then those literals are used a string constants. When a +# double-quoted string appears, we want to make sure that the search +# for a matching column name did not cause an otherwise static subquery +# to become a dynamic (correlated) subquery. +# +do_test subquery-5.1 { + proc callcntproc {n} { + incr ::callcnt + return $n + } + set callcnt 0 + db function callcnt callcntproc + execsql { + CREATE TABLE t4(x,y); + INSERT INTO t4 VALUES('one',1); + INSERT INTO t4 VALUES('two',2); + INSERT INTO t4 VALUES('three',3); + INSERT INTO t4 VALUES('four',4); + CREATE TABLE t5(a,b); + INSERT INTO t5 VALUES(1,11); + INSERT INTO t5 VALUES(2,22); + INSERT INTO t5 VALUES(3,33); + INSERT INTO t5 VALUES(4,44); + SELECT b FROM t5 WHERE a IN + (SELECT callcnt(y)+0 FROM t4 WHERE x="two") + } +} {22} +do_test subquery-5.2 { + # This is the key test. The subquery should have only run once. If + # The double-quoted identifier "two" were causing the subquery to be + # processed as a correlated subquery, then it would have run 4 times. + set callcnt +} {1} + + +# Ticket #1380. Make sure correlated subqueries on an IN clause work +# correctly when the left-hand side of the IN operator is constant. +# +do_test subquery-6.1 { + set callcnt 0 + execsql { + SELECT x FROM t4 WHERE 1 IN (SELECT callcnt(count(*)) FROM t5 WHERE a=y) + } +} {one two three four} +do_test subquery-6.2 { + set callcnt +} {4} +do_test subquery-6.3 { + set callcnt 0 + execsql { + SELECT x FROM t4 WHERE 1 IN (SELECT callcnt(count(*)) FROM t5 WHERE a=1) + } +} {one two three four} +do_test subquery-6.4 { + set callcnt +} {1} + +if 0 { ############# disable until we get #2652 fixed +# Ticket #2652. Allow aggregate functions of outer queries inside +# a non-aggregate subquery. +# +do_test subquery-7.1 { + execsql { + CREATE TABLE t7(c7); + INSERT INTO t7 VALUES(1); + INSERT INTO t7 VALUES(2); + INSERT INTO t7 VALUES(3); + CREATE TABLE t8(c8); + INSERT INTO t8 VALUES(100); + INSERT INTO t8 VALUES(200); + INSERT INTO t8 VALUES(300); + CREATE TABLE t9(c9); + INSERT INTO t9 VALUES(10000); + INSERT INTO t9 VALUES(20000); + INSERT INTO t9 VALUES(30000); + + SELECT (SELECT c7+c8 FROM t7) FROM t8; + } +} {101 201 301} +do_test subquery-7.2 { + execsql { + SELECT (SELECT max(c7)+c8 FROM t7) FROM t8; + } +} {103 203 303} +do_test subquery-7.3 { + execsql { + SELECT (SELECT c7+max(c8) FROM t8) FROM t7 + } +} {301} +do_test subquery-7.4 { + execsql { + SELECT (SELECT max(c7)+max(c8) FROM t8) FROM t7 + } +} {303} +do_test subquery-7.5 { + execsql { + SELECT (SELECT c8 FROM t8 WHERE rowid=max(c7)) FROM t7 + } +} {300} +do_test subquery-7.6 { + execsql { + SELECT (SELECT (SELECT max(c7+c8+c9) FROM t9) FROM t8) FROM t7 + } +} {30101 30102 30103} +do_test subquery-7.7 { + execsql { + SELECT (SELECT (SELECT c7+max(c8+c9) FROM t9) FROM t8) FROM t7 + } +} {30101 30102 30103} +do_test subquery-7.8 { + execsql { + SELECT (SELECT (SELECT max(c7)+c8+c9 FROM t9) FROM t8) FROM t7 + } +} {10103} +do_test subquery-7.9 { + execsql { + SELECT (SELECT (SELECT c7+max(c8)+c9 FROM t9) FROM t8) FROM t7 + } +} {10301 10302 10303} +do_test subquery-7.10 { + execsql { + SELECT (SELECT (SELECT c7+c8+max(c9) FROM t9) FROM t8) FROM t7 + } +} {30101 30102 30103} +do_test subquery-7.11 { + execsql { + SELECT (SELECT (SELECT max(c7)+max(c8)+max(c9) FROM t9) FROM t8) FROM t7 + } +} {30303} +} ;############# Disabled + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/subselect.test b/libraries/sqlite/unix/sqlite-3.5.1/test/subselect.test new file mode 100644 index 0000000..a43bca3 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/subselect.test @@ -0,0 +1,202 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing SELECT statements that are part of +# expressions. +# +# $Id: subselect.test,v 1.14 2007/04/12 03:54:39 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Omit this whole file if the library is build without subquery support. +ifcapable !subquery { + finish_test + return +} + +# Basic sanity checking. Try a simple subselect. +# +do_test subselect-1.1 { + execsql { + CREATE TABLE t1(a int, b int); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(3,4); + INSERT INTO t1 VALUES(5,6); + } + execsql {SELECT * FROM t1 WHERE a = (SELECT count(*) FROM t1)} +} {3 4} + +# Try a select with more than one result column. +# +do_test subselect-1.2 { + set v [catch {execsql {SELECT * FROM t1 WHERE a = (SELECT * FROM t1)}} msg] + lappend v $msg +} {1 {only a single result allowed for a SELECT that is part of an expression}} + +# A subselect without an aggregate. +# +do_test subselect-1.3a { + execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=2)} +} {2} +do_test subselect-1.3b { + execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=4)} +} {4} +do_test subselect-1.3c { + execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=6)} +} {6} +do_test subselect-1.3c { + execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=8)} +} {} + +# What if the subselect doesn't return any value. We should get +# NULL as the result. Check it out. +# +do_test subselect-1.4 { + execsql {SELECT b from t1 where a = coalesce((SELECT a FROM t1 WHERE b=5),1)} +} {2} + +# Try multiple subselects within a single expression. +# +do_test subselect-1.5 { + execsql { + CREATE TABLE t2(x int, y int); + INSERT INTO t2 VALUES(1,2); + INSERT INTO t2 VALUES(2,4); + INSERT INTO t2 VALUES(3,8); + INSERT INTO t2 VALUES(4,16); + } + execsql { + SELECT y from t2 + WHERE x = (SELECT sum(b) FROM t1 where a notnull) - (SELECT sum(a) FROM t1) + } +} {8} + +# Try something useful. Delete every entry from t2 where the +# x value is less than half of the maximum. +# +do_test subselect-1.6 { + execsql {DELETE FROM t2 WHERE x < 0.5*(SELECT max(x) FROM t2)} + execsql {SELECT x FROM t2 ORDER BY x} +} {2 3 4} + +# Make sure sorting works for SELECTs there used as a scalar expression. +# +do_test subselect-2.1 { + execsql { + SELECT (SELECT a FROM t1 ORDER BY a), (SELECT a FROM t1 ORDER BY a DESC) + } +} {1 5} +do_test subselect-2.2 { + execsql { + SELECT 1 IN (SELECT a FROM t1 ORDER BY a); + } +} {1} +do_test subselect-2.3 { + execsql { + SELECT 2 IN (SELECT a FROM t1 ORDER BY a DESC); + } +} {0} + +# Verify that the ORDER BY clause is honored in a subquery. +# +ifcapable compound { +do_test subselect-3.1 { + execsql { + CREATE TABLE t3(x int); + INSERT INTO t3 SELECT a FROM t1 UNION ALL SELECT b FROM t1; + SELECT * FROM t3 ORDER BY x; + } +} {1 2 3 4 5 6} +} ;# ifcapable compound +ifcapable !compound { +do_test subselect-3.1 { + execsql { + CREATE TABLE t3(x int); + INSERT INTO t3 SELECT a FROM t1; + INSERT INTO t3 SELECT b FROM t1; + SELECT * FROM t3 ORDER BY x; + } +} {1 2 3 4 5 6} +} ;# ifcapable !compound + +do_test subselect-3.2 { + execsql { + SELECT sum(x) FROM (SELECT x FROM t3 ORDER BY x LIMIT 2); + } +} {3} +do_test subselect-3.3 { + execsql { + SELECT sum(x) FROM (SELECT x FROM t3 ORDER BY x DESC LIMIT 2); + } +} {11} +do_test subselect-3.4 { + execsql { + SELECT (SELECT x FROM t3 ORDER BY x); + } +} {1} +do_test subselect-3.5 { + execsql { + SELECT (SELECT x FROM t3 ORDER BY x DESC); + } +} {6} +do_test subselect-3.6 { + execsql { + SELECT (SELECT x FROM t3 ORDER BY x LIMIT 1); + } +} {1} +do_test subselect-3.7 { + execsql { + SELECT (SELECT x FROM t3 ORDER BY x DESC LIMIT 1); + } +} {6} +do_test subselect-3.8 { + execsql { + SELECT (SELECT x FROM t3 ORDER BY x LIMIT 1 OFFSET 2); + } +} {3} +do_test subselect-3.9 { + execsql { + SELECT (SELECT x FROM t3 ORDER BY x DESC LIMIT 1 OFFSET 2); + } +} {4} +do_test subselect-3.10 { + execsql { + SELECT x FROM t3 WHERE x IN + (SELECT x FROM t3 ORDER BY x DESC LIMIT 1 OFFSET 2); + } +} {4} + +# Ticket #2295. +# Make sure type affinities work correctly on subqueries with +# an ORDER BY clause. +# +do_test subselect-4.1 { + execsql { + CREATE TABLE t4(a TEXT, b TEXT); + INSERT INTO t4 VALUES('a','1'); + INSERT INTO t4 VALUES('b','2'); + INSERT INTO t4 VALUES('c','3'); + SELECT a FROM t4 WHERE b IN (SELECT b FROM t4 ORDER BY b); + } +} {a b c} +do_test subselect-4.2 { + execsql { + SELECT a FROM t4 WHERE b IN (SELECT b FROM t4 ORDER BY b LIMIT 1); + } +} {a} +do_test subselect-4.3 { + execsql { + SELECT a FROM t4 WHERE b IN (SELECT b FROM t4 ORDER BY b DESC LIMIT 1); + } +} {c} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/substr.test b/libraries/sqlite/unix/sqlite-3.5.1/test/substr.test new file mode 100644 index 0000000..6fae6cd --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/substr.test @@ -0,0 +1,108 @@ +# 2007 May 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the built-in SUBSTR() functions. +# +# $Id: substr.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !tclvar { + finish_test + return +} + +# Create a table to work with. +# +execsql { + CREATE TABLE t1(t text, b blob) +} +proc substr-test {id string i1 i2 result} { + db eval { + DELETE FROM t1; + INSERT INTO t1(t) VALUES($string) + } + do_test substr-$id.1 [subst { + execsql { + SELECT substr(t, $i1, $i2) FROM t1 + } + }] [list $result] + set qstr '[string map {' ''} $string]' + do_test substr-$id.2 [subst { + execsql { + SELECT substr($qstr, $i1, $i2) + } + }] [list $result] +} +proc subblob-test {id hex i1 i2 hexresult} { + db eval " + DELETE FROM t1; + INSERT INTO t1(b) VALUES(x'$hex') + " + do_test substr-$id.1 [subst { + execsql { + SELECT hex(substr(b, $i1, $i2)) FROM t1 + } + }] [list $hexresult] + do_test substr-$id.2 [subst { + execsql { + SELECT hex(substr(x'$hex', $i1, $i2)) + } + }] [list $hexresult] +} + +# Basic SUBSTR functionality +# +substr-test 1.1 abcdefg 1 1 a +substr-test 1.2 abcdefg 2 1 b +substr-test 1.3 abcdefg 1 2 ab +substr-test 1.4 abcdefg 1 100 abcdefg +substr-test 1.5 abcdefg 0 1 a +substr-test 1.6 abcdefg -1 1 g +substr-test 1.7 abcdefg -1 10 g +substr-test 1.8 abcdefg -5 3 cde +substr-test 1.9 abcdefg -7 3 abc +substr-test 1.10 abcdefg -100 98 abcde + +# Make sure everything works with long unicode characters +# +substr-test 2.1 \u1234\u2345\u3456 1 1 \u1234 +substr-test 2.2 \u1234\u2345\u3456 2 1 \u2345 +substr-test 2.3 \u1234\u2345\u3456 1 2 \u1234\u2345 +substr-test 2.4 \u1234\u2345\u3456 -1 1 \u3456 +substr-test 2.5 a\u1234b\u2345c\u3456c -5 3 b\u2345c + +# Basic functionality for BLOBs +# +subblob-test 3.1 61626364656667 1 1 61 +subblob-test 3.2 61626364656667 2 1 62 +subblob-test 3.3 61626364656667 1 2 6162 +subblob-test 3.4 61626364656667 1 100 61626364656667 +subblob-test 3.5 61626364656667 0 1 61 +subblob-test 3.6 61626364656667 -1 1 67 +subblob-test 3.7 61626364656667 -1 10 67 +subblob-test 3.8 61626364656667 -5 3 636465 +subblob-test 3.9 61626364656667 -7 3 616263 +subblob-test 3.10 61626364656667 -100 98 6162636465 + +# If these blobs were strings, then they would contain multi-byte +# characters. But since they are blobs, the substr indices refer +# to bytes. +# +subblob-test 4.1 61E188B462E28D8563E3919663 1 1 61 +subblob-test 4.2 61E188B462E28D8563E3919663 2 1 E1 +subblob-test 4.3 61E188B462E28D8563E3919663 1 2 61E1 +subblob-test 4.4 61E188B462E28D8563E3919663 -2 1 96 +subblob-test 4.5 61E188B462E28D8563E3919663 -5 4 63E39196 +subblob-test 4.6 61E188B462E28D8563E3919663 -100 98 61E188B462E28D8563E391 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/sync.test b/libraries/sqlite/unix/sqlite-3.5.1/test/sync.test new file mode 100644 index 0000000..88a1b7d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/sync.test @@ -0,0 +1,97 @@ +# 2005 August 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that fsync is disabled when +# pragma synchronous=off even for multi-database commits. +# +# $Id: sync.test,v 1.5 2006/02/11 01:25:51 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# +# These tests are only applicable on unix when pager pragma are +# enabled. +# +if {$::tcl_platform(platform)!="unix"} { + finish_test + return +} +ifcapable !pager_pragmas { + finish_test + return +} + +do_test sync-1.1 { + set sqlite_sync_count 0 + file delete -force test2.db + file delete -force test2.db-journal + execsql { + PRAGMA fullfsync=OFF; + CREATE TABLE t1(a,b); + ATTACH DATABASE 'test2.db' AS db2; + CREATE TABLE db2.t2(x,y); + } + ifcapable !dirsync { + incr sqlite_sync_count 2 + } + set sqlite_sync_count +} 8 +ifcapable pager_pragmas { + do_test sync-1.2 { + set sqlite_sync_count 0 + execsql { + PRAGMA main.synchronous=on; + PRAGMA db2.synchronous=on; + BEGIN; + INSERT INTO t1 VALUES(1,2); + INSERT INTO t2 VALUES(3,4); + COMMIT; + } + ifcapable !dirsync { + incr sqlite_sync_count 3 + } + set sqlite_sync_count + } 8 +} +do_test sync-1.3 { + set sqlite_sync_count 0 + execsql { + PRAGMA main.synchronous=full; + PRAGMA db2.synchronous=full; + BEGIN; + INSERT INTO t1 VALUES(3,4); + INSERT INTO t2 VALUES(5,6); + COMMIT; + } + ifcapable !dirsync { + incr sqlite_sync_count 3 + } + set sqlite_sync_count +} 10 +ifcapable pager_pragmas { + do_test sync-1.4 { + set sqlite_sync_count 0 + execsql { + PRAGMA main.synchronous=off; + PRAGMA db2.synchronous=off; + BEGIN; + INSERT INTO t1 VALUES(5,6); + INSERT INTO t2 VALUES(7,8); + COMMIT; + } + set sqlite_sync_count + } 0 +} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/table.test b/libraries/sqlite/unix/sqlite-3.5.1/test/table.test new file mode 100644 index 0000000..718f171 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/table.test @@ -0,0 +1,674 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the CREATE TABLE statement. +# +# $Id: table.test,v 1.47 2007/05/02 17:54:56 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a basic table and verify it is added to sqlite_master +# +do_test table-1.1 { + execsql { + CREATE TABLE test1 ( + one varchar(10), + two text + ) + } + execsql { + SELECT sql FROM sqlite_master WHERE type!='meta' + } +} {{CREATE TABLE test1 ( + one varchar(10), + two text + )}} + + +# Verify the other fields of the sqlite_master file. +# +do_test table-1.3 { + execsql {SELECT name, tbl_name, type FROM sqlite_master WHERE type!='meta'} +} {test1 test1 table} + +# Close and reopen the database. Verify that everything is +# still the same. +# +do_test table-1.4 { + db close + sqlite3 db test.db + execsql {SELECT name, tbl_name, type from sqlite_master WHERE type!='meta'} +} {test1 test1 table} + +# Drop the database and make sure it disappears. +# +do_test table-1.5 { + execsql {DROP TABLE test1} + execsql {SELECT * FROM sqlite_master WHERE type!='meta'} +} {} + +# Close and reopen the database. Verify that the table is +# still gone. +# +do_test table-1.6 { + db close + sqlite3 db test.db + execsql {SELECT name FROM sqlite_master WHERE type!='meta'} +} {} + +# Repeat the above steps, but this time quote the table name. +# +do_test table-1.10 { + execsql {CREATE TABLE "create" (f1 int)} + execsql {SELECT name FROM sqlite_master WHERE type!='meta'} +} {create} +do_test table-1.11 { + execsql {DROP TABLE "create"} + execsql {SELECT name FROM "sqlite_master" WHERE type!='meta'} +} {} +do_test table-1.12 { + execsql {CREATE TABLE test1("f1 ho" int)} + execsql {SELECT name as "X" FROM sqlite_master WHERE type!='meta'} +} {test1} +do_test table-1.13 { + execsql {DROP TABLE "TEST1"} + execsql {SELECT name FROM "sqlite_master" WHERE type!='meta'} +} {} + + + +# Verify that we cannot make two tables with the same name +# +do_test table-2.1 { + execsql {CREATE TABLE TEST2(one text)} + catchsql {CREATE TABLE test2(two text default 'hi')} +} {1 {table test2 already exists}} +do_test table-2.1.1 { + catchsql {CREATE TABLE "test2" (two)} +} {1 {table "test2" already exists}} +do_test table-2.1b { + set v [catch {execsql {CREATE TABLE sqlite_master(two text)}} msg] + lappend v $msg +} {1 {object name reserved for internal use: sqlite_master}} +do_test table-2.1c { + db close + sqlite3 db test.db + set v [catch {execsql {CREATE TABLE sqlite_master(two text)}} msg] + lappend v $msg +} {1 {object name reserved for internal use: sqlite_master}} +do_test table-2.1d { + catchsql {CREATE TABLE IF NOT EXISTS test2(x,y)} +} {0 {}} +do_test table-2.1e { + catchsql {CREATE TABLE IF NOT EXISTS test2(x UNIQUE, y TEXT PRIMARY KEY)} +} {0 {}} +do_test table-2.1f { + execsql {DROP TABLE test2; SELECT name FROM sqlite_master WHERE type!='meta'} +} {} + +# Verify that we cannot make a table with the same name as an index +# +do_test table-2.2a { + execsql {CREATE TABLE test2(one text); CREATE INDEX test3 ON test2(one)} + set v [catch {execsql {CREATE TABLE test3(two text)}} msg] + lappend v $msg +} {1 {there is already an index named test3}} +do_test table-2.2b { + db close + sqlite3 db test.db + set v [catch {execsql {CREATE TABLE test3(two text)}} msg] + lappend v $msg +} {1 {there is already an index named test3}} +do_test table-2.2c { + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {test2 test3} +do_test table-2.2d { + execsql {DROP INDEX test3} + set v [catch {execsql {CREATE TABLE test3(two text)}} msg] + lappend v $msg +} {0 {}} +do_test table-2.2e { + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {test2 test3} +do_test table-2.2f { + execsql {DROP TABLE test2; DROP TABLE test3} + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {} + +# Create a table with many field names +# +set big_table \ +{CREATE TABLE big( + f1 varchar(20), + f2 char(10), + f3 varchar(30) primary key, + f4 text, + f5 text, + f6 text, + f7 text, + f8 text, + f9 text, + f10 text, + f11 text, + f12 text, + f13 text, + f14 text, + f15 text, + f16 text, + f17 text, + f18 text, + f19 text, + f20 text +)} +do_test table-3.1 { + execsql $big_table + execsql {SELECT sql FROM sqlite_master WHERE type=='table'} +} \{$big_table\} +do_test table-3.2 { + set v [catch {execsql {CREATE TABLE BIG(xyz foo)}} msg] + lappend v $msg +} {1 {table BIG already exists}} +do_test table-3.3 { + set v [catch {execsql {CREATE TABLE biG(xyz foo)}} msg] + lappend v $msg +} {1 {table biG already exists}} +do_test table-3.4 { + set v [catch {execsql {CREATE TABLE bIg(xyz foo)}} msg] + lappend v $msg +} {1 {table bIg already exists}} +do_test table-3.5 { + db close + sqlite3 db test.db + set v [catch {execsql {CREATE TABLE Big(xyz foo)}} msg] + lappend v $msg +} {1 {table Big already exists}} +do_test table-3.6 { + execsql {DROP TABLE big} + execsql {SELECT name FROM sqlite_master WHERE type!='meta'} +} {} + +# Try creating large numbers of tables +# +set r {} +for {set i 1} {$i<=100} {incr i} { + lappend r [format test%03d $i] +} +do_test table-4.1 { + for {set i 1} {$i<=100} {incr i} { + set sql "CREATE TABLE [format test%03d $i] (" + for {set k 1} {$k<$i} {incr k} { + append sql "field$k text," + } + append sql "last_field text)" + execsql $sql + } + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} $r +do_test table-4.1b { + db close + sqlite3 db test.db + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} $r + +# Drop the even numbered tables +# +set r {} +for {set i 1} {$i<=100} {incr i 2} { + lappend r [format test%03d $i] +} +do_test table-4.2 { + for {set i 2} {$i<=100} {incr i 2} { + # if {$i==38} {execsql {pragma vdbe_trace=on}} + set sql "DROP TABLE [format TEST%03d $i]" + execsql $sql + } + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} $r +#exit + +# Drop the odd number tables +# +do_test table-4.3 { + for {set i 1} {$i<=100} {incr i 2} { + set sql "DROP TABLE [format test%03d $i]" + execsql $sql + } + execsql {SELECT name FROM sqlite_master WHERE type!='meta' ORDER BY name} +} {} + +# Try to drop a table that does not exist +# +do_test table-5.1.1 { + catchsql {DROP TABLE test009} +} {1 {no such table: test009}} +do_test table-5.1.2 { + catchsql {DROP TABLE IF EXISTS test009} +} {0 {}} + +# Try to drop sqlite_master +# +do_test table-5.2 { + catchsql {DROP TABLE IF EXISTS sqlite_master} +} {1 {table sqlite_master may not be dropped}} + +# Make sure an EXPLAIN does not really create a new table +# +do_test table-5.3 { + ifcapable {explain} { + execsql {EXPLAIN CREATE TABLE test1(f1 int)} + } + execsql {SELECT name FROM sqlite_master WHERE type!='meta'} +} {} + +# Make sure an EXPLAIN does not really drop an existing table +# +do_test table-5.4 { + execsql {CREATE TABLE test1(f1 int)} + ifcapable {explain} { + execsql {EXPLAIN DROP TABLE test1} + } + execsql {SELECT name FROM sqlite_master WHERE type!='meta'} +} {test1} + +# Create a table with a goofy name +# +#do_test table-6.1 { +# execsql {CREATE TABLE 'Spaces In This Name!'(x int)} +# execsql {INSERT INTO 'spaces in this name!' VALUES(1)} +# set list [glob -nocomplain testdb/spaces*.tbl] +#} {testdb/spaces+in+this+name+.tbl} + +# Try using keywords as table names or column names. +# +do_test table-7.1 { + set v [catch {execsql { + CREATE TABLE weird( + desc text, + asc text, + key int, + [14_vac] boolean, + fuzzy_dog_12 varchar(10), + begin blob, + end clob + ) + }} msg] + lappend v $msg +} {0 {}} +do_test table-7.2 { + execsql { + INSERT INTO weird VALUES('a','b',9,0,'xyz','hi','y''all'); + SELECT * FROM weird; + } +} {a b 9 0 xyz hi y'all} +do_test table-7.3 { + execsql2 { + SELECT * FROM weird; + } +} {desc a asc b key 9 14_vac 0 fuzzy_dog_12 xyz begin hi end y'all} + +# Try out the CREATE TABLE AS syntax +# +do_test table-8.1 { + execsql2 { + CREATE TABLE t2 AS SELECT * FROM weird; + SELECT * FROM t2; + } +} {desc a asc b key 9 14_vac 0 fuzzy_dog_12 xyz begin hi end y'all} +do_test table-8.1.1 { + execsql { + SELECT sql FROM sqlite_master WHERE name='t2'; + } +} {{CREATE TABLE t2( + "desc" text, + "asc" text, + "key" int, + "14_vac" boolean, + fuzzy_dog_12 varchar(10), + "begin" blob, + "end" clob +)}} +do_test table-8.2 { + execsql { + CREATE TABLE "t3""xyz"(a,b,c); + INSERT INTO [t3"xyz] VALUES(1,2,3); + SELECT * FROM [t3"xyz]; + } +} {1 2 3} +do_test table-8.3 { + execsql2 { + CREATE TABLE [t4"abc] AS SELECT count(*) as cnt, max(b+c) FROM [t3"xyz]; + SELECT * FROM [t4"abc]; + } +} {cnt 1 max(b+c) 5} + +# Update for v3: The declaration type of anything except a column is now a +# NULL pointer, so the created table has no column types. (Changed result +# from {{CREATE TABLE 't4"abc'(cnt NUMERIC,"max(b+c)" NUMERIC)}}). +do_test table-8.3.1 { + execsql { + SELECT sql FROM sqlite_master WHERE name='t4"abc' + } +} {{CREATE TABLE "t4""abc"(cnt,"max(b+c)")}} + +ifcapable tempdb { + do_test table-8.4 { + execsql2 { + CREATE TEMPORARY TABLE t5 AS SELECT count(*) AS [y'all] FROM [t3"xyz]; + SELECT * FROM t5; + } + } {y'all 1} +} + +do_test table-8.5 { + db close + sqlite3 db test.db + execsql2 { + SELECT * FROM [t4"abc]; + } +} {cnt 1 max(b+c) 5} +do_test table-8.6 { + execsql2 { + SELECT * FROM t2; + } +} {desc a asc b key 9 14_vac 0 fuzzy_dog_12 xyz begin hi end y'all} +do_test table-8.7 { + catchsql { + SELECT * FROM t5; + } +} {1 {no such table: t5}} +do_test table-8.8 { + catchsql { + CREATE TABLE t5 AS SELECT * FROM no_such_table; + } +} {1 {no such table: no_such_table}} + +# Make sure we cannot have duplicate column names within a table. +# +do_test table-9.1 { + catchsql { + CREATE TABLE t6(a,b,a); + } +} {1 {duplicate column name: a}} +do_test table-9.2 { + catchsql { + CREATE TABLE t6(a varchar(100), b blob, a integer); + } +} {1 {duplicate column name: a}} + +# Check the foreign key syntax. +# +ifcapable {foreignkey} { +do_test table-10.1 { + catchsql { + CREATE TABLE t6(a REFERENCES t4(a) NOT NULL); + INSERT INTO t6 VALUES(NULL); + } +} {1 {t6.a may not be NULL}} +do_test table-10.2 { + catchsql { + DROP TABLE t6; + CREATE TABLE t6(a REFERENCES t4(a) MATCH PARTIAL); + } +} {0 {}} +do_test table-10.3 { + catchsql { + DROP TABLE t6; + CREATE TABLE t6(a REFERENCES t4 MATCH FULL ON DELETE SET NULL NOT NULL); + } +} {0 {}} +do_test table-10.4 { + catchsql { + DROP TABLE t6; + CREATE TABLE t6(a REFERENCES t4 MATCH FULL ON UPDATE SET DEFAULT DEFAULT 1); + } +} {0 {}} +do_test table-10.5 { + catchsql { + DROP TABLE t6; + CREATE TABLE t6(a NOT NULL NOT DEFERRABLE INITIALLY IMMEDIATE); + } +} {0 {}} +do_test table-10.6 { + catchsql { + DROP TABLE t6; + CREATE TABLE t6(a NOT NULL DEFERRABLE INITIALLY DEFERRED); + } +} {0 {}} +do_test table-10.7 { + catchsql { + DROP TABLE t6; + CREATE TABLE t6(a, + FOREIGN KEY (a) REFERENCES t4(b) DEFERRABLE INITIALLY DEFERRED + ); + } +} {0 {}} +do_test table-10.8 { + catchsql { + DROP TABLE t6; + CREATE TABLE t6(a,b,c, + FOREIGN KEY (b,c) REFERENCES t4(x,y) MATCH PARTIAL + ON UPDATE SET NULL ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + ); + } +} {0 {}} +do_test table-10.9 { + catchsql { + DROP TABLE t6; + CREATE TABLE t6(a,b,c, + FOREIGN KEY (b,c) REFERENCES t4(x) + ); + } +} {1 {number of columns in foreign key does not match the number of columns in the referenced table}} +do_test table-10.10 { + catchsql {DROP TABLE t6} + catchsql { + CREATE TABLE t6(a,b,c, + FOREIGN KEY (b,c) REFERENCES t4(x,y,z) + ); + } +} {1 {number of columns in foreign key does not match the number of columns in the referenced table}} +do_test table-10.11 { + catchsql {DROP TABLE t6} + catchsql { + CREATE TABLE t6(a,b, c REFERENCES t4(x,y)); + } +} {1 {foreign key on c should reference only one column of table t4}} +do_test table-10.12 { + catchsql {DROP TABLE t6} + catchsql { + CREATE TABLE t6(a,b,c, + FOREIGN KEY (b,x) REFERENCES t4(x,y) + ); + } +} {1 {unknown column "x" in foreign key definition}} +do_test table-10.13 { + catchsql {DROP TABLE t6} + catchsql { + CREATE TABLE t6(a,b,c, + FOREIGN KEY (x,b) REFERENCES t4(x,y) + ); + } +} {1 {unknown column "x" in foreign key definition}} +} ;# endif foreignkey + +# Test for the "typeof" function. More tests for the +# typeof() function are found in bind.test and types.test. +# +do_test table-11.1 { + execsql { + CREATE TABLE t7( + a integer primary key, + b number(5,10), + c character varying (8), + d VARCHAR(9), + e clob, + f BLOB, + g Text, + h + ); + INSERT INTO t7(a) VALUES(1); + SELECT typeof(a), typeof(b), typeof(c), typeof(d), + typeof(e), typeof(f), typeof(g), typeof(h) + FROM t7 LIMIT 1; + } +} {integer null null null null null null null} +do_test table-11.2 { + execsql { + SELECT typeof(a+b), typeof(a||b), typeof(c+d), typeof(c||d) + FROM t7 LIMIT 1; + } +} {null null null null} + +# Test that when creating a table using CREATE TABLE AS, column types are +# assigned correctly for (SELECT ...) and 'x AS y' expressions. +do_test table-12.1 { + ifcapable subquery { + execsql { + CREATE TABLE t8 AS SELECT b, h, a as i, (SELECT f FROM t7) as j FROM t7; + } + } else { + execsql { + CREATE TABLE t8 AS SELECT b, h, a as i, f as j FROM t7; + } + } +} {} +do_test table-12.2 { + execsql { + SELECT sql FROM sqlite_master WHERE tbl_name = 't8' + } +} {{CREATE TABLE t8(b number(5,10),h,i integer,j BLOB)}} + +#-------------------------------------------------------------------- +# Test cases table-13.* +# +# Test the ability to have default values of CURRENT_TIME, CURRENT_DATE +# and CURRENT_TIMESTAMP. +# +do_test table-13.1 { + execsql { + CREATE TABLE tablet8( + a integer primary key, + tm text DEFAULT CURRENT_TIME, + dt text DEFAULT CURRENT_DATE, + dttm text DEFAULT CURRENT_TIMESTAMP + ); + SELECT * FROM tablet8; + } +} {} +set i 0 +foreach {date time seconds} { + 1976-07-04 12:00:00 205329600 + 1994-04-16 14:00:00 766504800 + 2000-01-01 00:00:00 946684800 + 2003-12-31 12:34:56 1072874096 +} { + incr i + set sqlite_current_time $seconds + do_test table-13.2.$i { + execsql " + INSERT INTO tablet8(a) VALUES($i); + SELECT tm, dt, dttm FROM tablet8 WHERE a=$i; + " + } [list $time $date [list $date $time]] +} +set sqlite_current_time 0 + +#-------------------------------------------------------------------- +# Test cases table-14.* +# +# Test that a table cannot be created or dropped while other virtual +# machines are active. This is required because otherwise when in +# auto-vacuum mode the btree-layer may need to move the root-pages of +# a table for which there is an open cursor. +# +# 2007-05-02: A open btree cursor no longer blocks CREATE TABLE. +# But DROP TABLE is still prohibited because we do not want to +# delete a table out from under a running query. +# + +# db eval { +# pragma vdbe_trace = 0; +# } +# Try to create a table from within a callback: +unset -nocomplain result +do_test table-14.1 { + set rc [ + catch { + db eval {SELECT * FROM tablet8 LIMIT 1} {} { + db eval {CREATE TABLE t9(a, b, c)} + } + } msg + ] + set result [list $rc $msg] +} {0 {}} + +# Try to drop a table from within a callback: +do_test table-14.3 { + set rc [ + catch { + db eval {SELECT * FROM tablet8 LIMIT 1} {} { + db eval {DROP TABLE t9;} + } + } msg + ] + set result [list $rc $msg] +} {1 {database table is locked}} + +# Now attach a database and ensure that a table can be created in the +# attached database whilst in a callback from a query on the main database. +do_test table-14.4 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + attach 'test2.db' as aux; + } + db eval {SELECT * FROM tablet8 LIMIT 1} {} { + db eval {CREATE TABLE aux.t1(a, b, c)} + } +} {} + +# On the other hand, it should be impossible to drop a table when any VMs +# are active. This is because VerifyCookie instructions may have already +# been executed, and btree root-pages may not move after this (which a +# delete table might do). +do_test table-14.4 { + set rc [ + catch { + db eval {SELECT * FROM tablet8 LIMIT 1} {} { + db eval {DROP TABLE aux.t1;} + } + } msg + ] + set result [list $rc $msg] +} {1 {database table is locked}} + +# Create and drop 2000 tables. This is to check that the balance_shallow() +# routine works correctly on the sqlite_master table. At one point it +# contained a bug that would prevent the right-child pointer of the +# child page from being copied to the root page. +# +do_test table-15.1 { + execsql {BEGIN} + for {set i 0} {$i<2000} {incr i} { + execsql "CREATE TABLE tbl$i (a, b, c)" + } + execsql {COMMIT} +} {} +do_test table-15.2 { + execsql {BEGIN} + for {set i 0} {$i<2000} {incr i} { + execsql "DROP TABLE tbl$i" + } + execsql {COMMIT} +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tableapi.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tableapi.test new file mode 100644 index 0000000..1738291 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tableapi.test @@ -0,0 +1,219 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the sqlite_exec_printf() and +# sqlite_get_table_printf() APIs. +# +# $Id: tableapi.test,v 1.13 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tableapi-1.0 { + set ::dbx [sqlite3_open test.db] + catch {sqlite_exec_printf $::dbx {DROP TABLE xyz} {}} + sqlite3_exec_printf $::dbx {CREATE TABLE %s(a int, b text)} xyz +} {0 {}} +do_test tableapi-1.1 { + sqlite3_exec_printf $::dbx { + INSERT INTO xyz VALUES(1,'%q') + } {Hi Y'all} +} {0 {}} +do_test tableapi-1.2 { + sqlite3_exec_printf $::dbx {SELECT * FROM xyz} {} +} {0 {a b 1 {Hi Y'all}}} + +do_test tableapi-2.1 { + sqlite3_get_table_printf $::dbx { + BEGIN TRANSACTION; + SELECT * FROM xyz WHERE b='%q' + } {Hi Y'all} +} {0 1 2 a b 1 {Hi Y'all}} +do_test tableapi-2.2 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz + } {} +} {0 1 2 a b 1 {Hi Y'all}} +do_test tableapi-2.3 { + for {set i 2} {$i<=50} {incr i} { + sqlite3_get_table_printf $::dbx \ + "INSERT INTO xyz VALUES($i,'(%s)')" $i + } + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz ORDER BY a + } {} +} {0 50 2 a b 1 {Hi Y'all} 2 (2) 3 (3) 4 (4) 5 (5) 6 (6) 7 (7) 8 (8) 9 (9) 10 (10) 11 (11) 12 (12) 13 (13) 14 (14) 15 (15) 16 (16) 17 (17) 18 (18) 19 (19) 20 (20) 21 (21) 22 (22) 23 (23) 24 (24) 25 (25) 26 (26) 27 (27) 28 (28) 29 (29) 30 (30) 31 (31) 32 (32) 33 (33) 34 (34) 35 (35) 36 (36) 37 (37) 38 (38) 39 (39) 40 (40) 41 (41) 42 (42) 43 (43) 44 (44) 45 (45) 46 (46) 47 (47) 48 (48) 49 (49) 50 (50)} +do_test tableapi-2.3.1 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>49 ORDER BY a + } {} +} {0 1 2 a b 50 (50)} +do_test tableapi-2.3.2 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>47 ORDER BY a + } {} +} {0 3 2 a b 48 (48) 49 (49) 50 (50)} +do_test tableapi-2.4 { + set manyquote '''''''' + append manyquote $manyquote + append manyquote $manyquote + append manyquote $manyquote + append manyquote $manyquote + append manyquote $manyquote + append manyquote $manyquote + set ::big_str "$manyquote Hello $manyquote" + sqlite3_get_table_printf $::dbx { + INSERT INTO xyz VALUES(51,'%q') + } $::big_str +} {0 0 0} +do_test tableapi-2.5 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>49 ORDER BY a; + } {} +} "0 2 2 a b 50 (50) 51 \173$::big_str\175" +do_test tableapi-2.6 { + sqlite3_get_table_printf $::dbx { + INSERT INTO xyz VALUES(52,NULL) + } {} + ifcapable subquery { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a IN (42,50,52) ORDER BY a DESC + } {} + } else { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a=42 OR a=50 OR a=52 ORDER BY a DESC + } {} + } +} {0 3 2 a b 52 NULL 50 (50) 42 (42)} +do_test tableapi-2.7 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>1000 + } {} +} {0 0 0} + +# Repeat all tests with the empty_result_callbacks pragma turned on +# +do_test tableapi-3.1 { + sqlite3_get_table_printf $::dbx { + ROLLBACK; + PRAGMA empty_result_callbacks = ON; + SELECT * FROM xyz WHERE b='%q' + } {Hi Y'all} +} {0 1 2 a b 1 {Hi Y'all}} +do_test tableapi-3.2 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz + } {} +} {0 1 2 a b 1 {Hi Y'all}} +do_test tableapi-3.3 { + for {set i 2} {$i<=50} {incr i} { + sqlite3_get_table_printf $::dbx \ + "INSERT INTO xyz VALUES($i,'(%s)')" $i + } + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz ORDER BY a + } {} +} {0 50 2 a b 1 {Hi Y'all} 2 (2) 3 (3) 4 (4) 5 (5) 6 (6) 7 (7) 8 (8) 9 (9) 10 (10) 11 (11) 12 (12) 13 (13) 14 (14) 15 (15) 16 (16) 17 (17) 18 (18) 19 (19) 20 (20) 21 (21) 22 (22) 23 (23) 24 (24) 25 (25) 26 (26) 27 (27) 28 (28) 29 (29) 30 (30) 31 (31) 32 (32) 33 (33) 34 (34) 35 (35) 36 (36) 37 (37) 38 (38) 39 (39) 40 (40) 41 (41) 42 (42) 43 (43) 44 (44) 45 (45) 46 (46) 47 (47) 48 (48) 49 (49) 50 (50)} +do_test tableapi-3.3.1 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>49 ORDER BY a + } {} +} {0 1 2 a b 50 (50)} +do_test tableapi-3.3.2 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>47 ORDER BY a + } {} +} {0 3 2 a b 48 (48) 49 (49) 50 (50)} +do_test tableapi-3.4 { + sqlite3_get_table_printf $::dbx { + INSERT INTO xyz VALUES(51,'%q') + } $::big_str +} {0 0 0} +do_test tableapi-3.5 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>49 ORDER BY a; + } {} +} "0 2 2 a b 50 (50) 51 \173$::big_str\175" +do_test tableapi-3.6 { + sqlite3_get_table_printf $::dbx { + INSERT INTO xyz VALUES(52,NULL) + } {} + ifcapable subquery { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a IN (42,50,52) ORDER BY a DESC + } {} + } else { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a=42 OR a=50 OR a=52 ORDER BY a DESC + } {} + } +} {0 3 2 a b 52 NULL 50 (50) 42 (42)} +do_test tableapi-3.7 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>1000 + } {} +} {0 0 2 a b} + +do_test tableapi-4.1 { + set rc [catch { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz; SELECT * FROM sqlite_master + } {} + } msg] + concat $rc $msg +} {0 1 {sqlite3_get_table() called with two or more incompatible queries}} + +# A report on the mailing list says that the sqlite_get_table() api fails +# on queries involving more than 40 columns. The following code attempts +# to test that complaint +# +do_test tableapi-5.1 { + set sql "CREATE TABLE t2(" + set sep "" + for {set i 1} {$i<=100} {incr i} { + append sql ${sep}x$i + set sep , + } + append sql ) + sqlite3_get_table_printf $::dbx $sql {} + set sql "INSERT INTO t2 VALUES(" + set sep "" + for {set i 1} {$i<=100} {incr i} { + append sql ${sep}$i + set sep , + } + append sql ) + sqlite3_get_table_printf $::dbx $sql {} + sqlite3_get_table_printf $::dbx {SELECT * FROM t2} {} +} {0 1 100 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 x32 x33 x34 x35 x36 x37 x38 x39 x40 x41 x42 x43 x44 x45 x46 x47 x48 x49 x50 x51 x52 x53 x54 x55 x56 x57 x58 x59 x60 x61 x62 x63 x64 x65 x66 x67 x68 x69 x70 x71 x72 x73 x74 x75 x76 x77 x78 x79 x80 x81 x82 x83 x84 x85 x86 x87 x88 x89 x90 x91 x92 x93 x94 x95 x96 x97 x98 x99 x100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100} +do_test tableapi-5.2 { + set sql "INSERT INTO t2 VALUES(" + set sep "" + for {set i 1} {$i<=100} {incr i} { + append sql ${sep}[expr {$i+1000}] + set sep , + } + append sql ) + sqlite3_get_table_printf $::dbx $sql {} + sqlite3_get_table_printf $::dbx {SELECT * FROM t2} {} +} {0 2 100 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 x32 x33 x34 x35 x36 x37 x38 x39 x40 x41 x42 x43 x44 x45 x46 x47 x48 x49 x50 x51 x52 x53 x54 x55 x56 x57 x58 x59 x60 x61 x62 x63 x64 x65 x66 x67 x68 x69 x70 x71 x72 x73 x74 x75 x76 x77 x78 x79 x80 x81 x82 x83 x84 x85 x86 x87 x88 x89 x90 x91 x92 x93 x94 x95 x96 x97 x98 x99 x100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100} + +ifcapable schema_pragmas { + do_test tableapi-6.1 { + sqlite3_get_table_printf $::dbx {PRAGMA user_version} {} + } {0 1 1 user_version 0} +} + +do_test tableapi-99.0 { + sqlite3_close $::dbx +} {SQLITE_OK} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tclsqlite.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tclsqlite.test new file mode 100644 index 0000000..50bdf63 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tclsqlite.test @@ -0,0 +1,496 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for TCL interface to the +# SQLite library. +# +# Actually, all tests are based on the TCL interface, so the main +# interface is pretty well tested. This file contains some addition +# tests for fringe issues that the main test suite does not cover. +# +# $Id: tclsqlite.test,v 1.62 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Check the error messages generated by tclsqlite +# +if {[sqlite3 -has-codec]} { + set r "sqlite_orig HANDLE FILENAME ?-key CODEC-KEY?" +} else { + set r "sqlite3 HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN?" +} +do_test tcl-1.1 { + set v [catch {sqlite3 bogus} msg] + lappend v $msg +} [list 1 "wrong # args: should be \"$r\""] +do_test tcl-1.2 { + set v [catch {db bogus} msg] + lappend v $msg +} {1 {bad option "bogus": must be authorizer, busy, cache, changes, close, collate, collation_needed, commit_hook, complete, copy, enable_load_extension, errorcode, eval, exists, function, incrblob, interrupt, last_insert_rowid, nullvalue, onecolumn, profile, progress, rekey, rollback_hook, timeout, total_changes, trace, transaction, update_hook, or version}} +do_test tcl-1.3 { + execsql {CREATE TABLE t1(a int, b int)} + execsql {INSERT INTO t1 VALUES(10,20)} + set v [catch { + db eval {SELECT * FROM t1} data { + error "The error message" + } + } msg] + lappend v $msg +} {1 {The error message}} +do_test tcl-1.4 { + set v [catch { + db eval {SELECT * FROM t2} data { + error "The error message" + } + } msg] + lappend v $msg +} {1 {no such table: t2}} +do_test tcl-1.5 { + set v [catch { + db eval {SELECT * FROM t1} data { + break + } + } msg] + lappend v $msg +} {0 {}} +do_test tcl-1.6 { + set v [catch { + db eval {SELECT * FROM t1} data { + expr x* + } + } msg] + regsub {:.*$} $msg {} msg + lappend v $msg +} {1 {syntax error in expression "x*"}} +do_test tcl-1.7 { + set v [catch {db} msg] + lappend v $msg +} {1 {wrong # args: should be "db SUBCOMMAND ..."}} +if {[catch {db auth {}}]==0} { + do_test tcl-1.8 { + set v [catch {db authorizer 1 2 3} msg] + lappend v $msg + } {1 {wrong # args: should be "db authorizer ?CALLBACK?"}} +} +do_test tcl-1.9 { + set v [catch {db busy 1 2 3} msg] + lappend v $msg +} {1 {wrong # args: should be "db busy CALLBACK"}} +do_test tcl-1.10 { + set v [catch {db progress 1} msg] + lappend v $msg +} {1 {wrong # args: should be "db progress N CALLBACK"}} +do_test tcl-1.11 { + set v [catch {db changes xyz} msg] + lappend v $msg +} {1 {wrong # args: should be "db changes "}} +do_test tcl-1.12 { + set v [catch {db commit_hook a b c} msg] + lappend v $msg +} {1 {wrong # args: should be "db commit_hook ?CALLBACK?"}} +ifcapable {complete} { + do_test tcl-1.13 { + set v [catch {db complete} msg] + lappend v $msg + } {1 {wrong # args: should be "db complete SQL"}} +} +do_test tcl-1.14 { + set v [catch {db eval} msg] + lappend v $msg +} {1 {wrong # args: should be "db eval SQL ?ARRAY-NAME? ?SCRIPT?"}} +do_test tcl-1.15 { + set v [catch {db function} msg] + lappend v $msg +} {1 {wrong # args: should be "db function NAME SCRIPT"}} +do_test tcl-1.16 { + set v [catch {db last_insert_rowid xyz} msg] + lappend v $msg +} {1 {wrong # args: should be "db last_insert_rowid "}} +do_test tcl-1.17 { + set v [catch {db rekey} msg] + lappend v $msg +} {1 {wrong # args: should be "db rekey KEY"}} +do_test tcl-1.18 { + set v [catch {db timeout} msg] + lappend v $msg +} {1 {wrong # args: should be "db timeout MILLISECONDS"}} +do_test tcl-1.19 { + set v [catch {db collate} msg] + lappend v $msg +} {1 {wrong # args: should be "db collate NAME SCRIPT"}} +do_test tcl-1.20 { + set v [catch {db collation_needed} msg] + lappend v $msg +} {1 {wrong # args: should be "db collation_needed SCRIPT"}} +do_test tcl-1.21 { + set v [catch {db total_changes xyz} msg] + lappend v $msg +} {1 {wrong # args: should be "db total_changes "}} +do_test tcl-1.20 { + set v [catch {db copy} msg] + lappend v $msg +} {1 {wrong # args: should be "db copy CONFLICT-ALGORITHM TABLE FILENAME ?SEPARATOR? ?NULLINDICATOR?"}} +do_test tcl-1.21 { + set v [catch {sqlite3 db2 test.db -vfs nosuchvfs} msg] + lappend v $msg +} {1 {no such vfs: nosuchvfs}} + +catch {unset ::result} +do_test tcl-2.1 { + execsql "CREATE TABLE t\u0123x(a int, b\u1235 float)" +} {} +ifcapable schema_pragmas { + do_test tcl-2.2 { + execsql "PRAGMA table_info(t\u0123x)" + } "0 a int 0 {} 0 1 b\u1235 float 0 {} 0" +} +do_test tcl-2.3 { + execsql "INSERT INTO t\u0123x VALUES(1,2.3)" + db eval "SELECT * FROM t\u0123x" result break + set result(*) +} "a b\u1235" + + +# Test the onecolumn method +# +do_test tcl-3.1 { + execsql { + INSERT INTO t1 SELECT a*2, b*2 FROM t1; + INSERT INTO t1 SELECT a*2+1, b*2+1 FROM t1; + INSERT INTO t1 SELECT a*2+3, b*2+3 FROM t1; + } + set rc [catch {db onecolumn {SELECT * FROM t1 ORDER BY a}} msg] + lappend rc $msg +} {0 10} +do_test tcl-3.2 { + db onecolumn {SELECT * FROM t1 WHERE a<0} +} {} +do_test tcl-3.3 { + set rc [catch {db onecolumn} errmsg] + lappend rc $errmsg +} {1 {wrong # args: should be "db onecolumn SQL"}} +do_test tcl-3.4 { + set rc [catch {db onecolumn {SELECT bogus}} errmsg] + lappend rc $errmsg +} {1 {no such column: bogus}} +ifcapable {tclvar} { + do_test tcl-3.5 { + set b 50 + set rc [catch {db one {SELECT * FROM t1 WHERE b>$b}} msg] + lappend rc $msg + } {0 41} + do_test tcl-3.6 { + set b 500 + set rc [catch {db one {SELECT * FROM t1 WHERE b>$b}} msg] + lappend rc $msg + } {0 {}} + do_test tcl-3.7 { + set b 500 + set rc [catch {db one { + INSERT INTO t1 VALUES(99,510); + SELECT * FROM t1 WHERE b>$b + }} msg] + lappend rc $msg + } {0 99} +} +ifcapable {!tclvar} { + execsql {INSERT INTO t1 VALUES(99,510)} +} + +# Turn the busy handler on and off +# +do_test tcl-4.1 { + proc busy_callback {cnt} { + break + } + db busy busy_callback + db busy +} {busy_callback} +do_test tcl-4.2 { + db busy {} + db busy +} {} + +ifcapable {tclvar} { + # Parsing of TCL variable names within SQL into bound parameters. + # + do_test tcl-5.1 { + execsql {CREATE TABLE t3(a,b,c)} + catch {unset x} + set x(1) 5 + set x(2) 7 + execsql { + INSERT INTO t3 VALUES($::x(1),$::x(2),$::x(3)); + SELECT * FROM t3 + } + } {5 7 {}} + do_test tcl-5.2 { + execsql { + SELECT typeof(a), typeof(b), typeof(c) FROM t3 + } + } {text text null} + do_test tcl-5.3 { + catch {unset x} + set x [binary format h12 686900686f00] + execsql { + UPDATE t3 SET a=$::x; + } + db eval { + SELECT a FROM t3 + } break + binary scan $a h12 adata + set adata + } {686900686f00} + do_test tcl-5.4 { + execsql { + SELECT typeof(a), typeof(b), typeof(c) FROM t3 + } + } {blob text null} +} + +# Operation of "break" and "continue" within row scripts +# +do_test tcl-6.1 { + db eval {SELECT * FROM t1} { + break + } + lappend a $b +} {10 20} +do_test tcl-6.2 { + set cnt 0 + db eval {SELECT * FROM t1} { + if {$a>40} continue + incr cnt + } + set cnt +} {4} +do_test tcl-6.3 { + set cnt 0 + db eval {SELECT * FROM t1} { + if {$a<40} continue + incr cnt + } + set cnt +} {5} +do_test tcl-6.4 { + proc return_test {x} { + db eval {SELECT * FROM t1} { + if {$a==$x} {return $b} + } + } + return_test 10 +} 20 +do_test tcl-6.5 { + return_test 20 +} 40 +do_test tcl-6.6 { + return_test 99 +} 510 +do_test tcl-6.7 { + return_test 0 +} {} + +do_test tcl-7.1 { + db version + expr 0 +} {0} + +# modify and reset the NULL representation +# +do_test tcl-8.1 { + db nullvalue NaN + execsql {INSERT INTO t1 VALUES(30,NULL)} + db eval {SELECT * FROM t1 WHERE b IS NULL} +} {30 NaN} +do_test tcl-8.2 { + db nullvalue NULL + db nullvalue +} {NULL} +do_test tcl-8.3 { + db nullvalue {} + db eval {SELECT * FROM t1 WHERE b IS NULL} +} {30 {}} + +# Test the return type of user-defined functions +# +do_test tcl-9.1 { + db function ret_str {return "hi"} + execsql {SELECT typeof(ret_str())} +} {text} +do_test tcl-9.2 { + db function ret_dbl {return [expr {rand()*0.5}]} + execsql {SELECT typeof(ret_dbl())} +} {real} +do_test tcl-9.3 { + db function ret_int {return [expr {int(rand()*200)}]} + execsql {SELECT typeof(ret_int())} +} {integer} + +# Recursive calls to the same user-defined function +# +ifcapable tclvar { + do_test tcl-9.10 { + proc userfunc_r1 {n} { + if {$n<=0} {return 0} + set nm1 [expr {$n-1}] + return [expr {[db eval {SELECT r1($nm1)}]+$n}] + } + db function r1 userfunc_r1 + execsql {SELECT r1(10)} + } {55} + do_test tcl-9.11 { + execsql {SELECT r1(100)} + } {5050} +} + +# Tests for the new transaction method +# +do_test tcl-10.1 { + db transaction {} +} {} +do_test tcl-10.2 { + db transaction deferred {} +} {} +do_test tcl-10.3 { + db transaction immediate {} +} {} +do_test tcl-10.4 { + db transaction exclusive {} +} {} +do_test tcl-10.5 { + set rc [catch {db transaction xyzzy {}} msg] + lappend rc $msg +} {1 {bad transaction type "xyzzy": must be deferred, exclusive, or immediate}} +do_test tcl-10.6 { + set rc [catch {db transaction {error test-error}} msg] + lappend rc $msg +} {1 test-error} +do_test tcl-10.7 { + db transaction { + db eval {CREATE TABLE t4(x)} + db transaction { + db eval {INSERT INTO t4 VALUES(1)} + } + } + db eval {SELECT * FROM t4} +} 1 +do_test tcl-10.8 { + catch { + db transaction { + db eval {INSERT INTO t4 VALUES(2)} + db eval {INSERT INTO t4 VALUES(3)} + db eval {INSERT INTO t4 VALUES(4)} + error test-error + } + } + db eval {SELECT * FROM t4} +} 1 +do_test tcl-10.9 { + db transaction { + db eval {INSERT INTO t4 VALUES(2)} + catch { + db transaction { + db eval {INSERT INTO t4 VALUES(3)} + db eval {INSERT INTO t4 VALUES(4)} + error test-error + } + } + } + db eval {SELECT * FROM t4} +} {1 2 3 4} +do_test tcl-10.10 { + for {set i 0} {$i<1} {incr i} { + db transaction { + db eval {INSERT INTO t4 VALUES(5)} + continue + } + } + db eval {SELECT * FROM t4} +} {1 2 3 4 5} +do_test tcl-10.11 { + for {set i 0} {$i<10} {incr i} { + db transaction { + db eval {INSERT INTO t4 VALUES(6)} + break + } + } + db eval {SELECT * FROM t4} +} {1 2 3 4 5 6} +do_test tcl-10.12 { + set rc [catch { + for {set i 0} {$i<10} {incr i} { + db transaction { + db eval {INSERT INTO t4 VALUES(7)} + return + } + } + }] +} {2} +do_test tcl-10.13 { + db eval {SELECT * FROM t4} +} {1 2 3 4 5 6 7} + +do_test tcl-11.1 { + db exists {SELECT x,x*2,x+x FROM t4 WHERE x==4} +} {1} +do_test tcl-11.2 { + db exists {SELECT 0 FROM t4 WHERE x==4} +} {1} +do_test tcl-11.3 { + db exists {SELECT 1 FROM t4 WHERE x==8} +} {0} + +do_test tcl-12.1 { + unset -nocomplain a b c version + set version [db version] + scan $version "%d.%d.%d" a b c + expr $a*1000000 + $b*1000 + $c +} [sqlite3_libversion_number] + + +# Check to see that when bindings of the form @aaa are used instead +# of $aaa, that objects are treated as bytearray and are inserted +# as BLOBs. +# +ifcapable tclvar { + do_test tcl-13.1 { + db eval {CREATE TABLE t5(x BLOB)} + set x abc123 + db eval {INSERT INTO t5 VALUES($x)} + db eval {SELECT typeof(x) FROM t5} + } {text} + do_test tcl-13.2 { + binary scan $x H notUsed + db eval { + DELETE FROM t5; + INSERT INTO t5 VALUES($x); + SELECT typeof(x) FROM t5; + } + } {text} + do_test tcl-13.3 { + db eval { + DELETE FROM t5; + INSERT INTO t5 VALUES(@x); + SELECT typeof(x) FROM t5; + } + } {blob} + do_test tcl-13.4 { + set y 1234 + db eval { + DELETE FROM t5; + INSERT INTO t5 VALUES(@y); + SELECT hex(x), typeof(x) FROM t5 + } + } {31323334 blob} +} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/temptable.test b/libraries/sqlite/unix/sqlite-3.5.1/test/temptable.test new file mode 100644 index 0000000..363f999 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/temptable.test @@ -0,0 +1,414 @@ +# 2001 October 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for temporary tables and indices. +# +# $Id: temptable.test,v 1.17 2006/01/24 00:15:16 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !tempdb { + finish_test + return +} + +# Create an alternative connection to the database +# +do_test temptable-1.0 { + sqlite3 db2 ./test.db + set dummy {} +} {} + +# Create a permanent table. +# +do_test temptable-1.1 { + execsql {CREATE TABLE t1(a,b,c);} + execsql {INSERT INTO t1 VALUES(1,2,3);} + execsql {SELECT * FROM t1} +} {1 2 3} +do_test temptable-1.2 { + catch {db2 eval {SELECT * FROM sqlite_master}} + db2 eval {SELECT * FROM t1} +} {1 2 3} +do_test temptable-1.3 { + execsql {SELECT name FROM sqlite_master} +} {t1} +do_test temptable-1.4 { + db2 eval {SELECT name FROM sqlite_master} +} {t1} + +# Create a temporary table. Verify that only one of the two +# processes can see it. +# +do_test temptable-1.5 { + db2 eval { + CREATE TEMP TABLE t2(x,y,z); + INSERT INTO t2 VALUES(4,5,6); + } + db2 eval {SELECT * FROM t2} +} {4 5 6} +do_test temptable-1.6 { + catch {execsql {SELECT * FROM sqlite_master}} + catchsql {SELECT * FROM t2} +} {1 {no such table: t2}} +do_test temptable-1.7 { + catchsql {INSERT INTO t2 VALUES(8,9,0);} +} {1 {no such table: t2}} +do_test temptable-1.8 { + db2 eval {INSERT INTO t2 VALUES(8,9,0);} + db2 eval {SELECT * FROM t2 ORDER BY x} +} {4 5 6 8 9 0} +do_test temptable-1.9 { + db2 eval {DELETE FROM t2 WHERE x==8} + db2 eval {SELECT * FROM t2 ORDER BY x} +} {4 5 6} +do_test temptable-1.10 { + db2 eval {DELETE FROM t2} + db2 eval {SELECT * FROM t2} +} {} +do_test temptable-1.11 { + db2 eval { + INSERT INTO t2 VALUES(7,6,5); + INSERT INTO t2 VALUES(4,3,2); + SELECT * FROM t2 ORDER BY x; + } +} {4 3 2 7 6 5} +do_test temptable-1.12 { + db2 eval {DROP TABLE t2;} + set r [catch {db2 eval {SELECT * FROM t2}} msg] + lappend r $msg +} {1 {no such table: t2}} + +# Make sure temporary tables work with transactions +# +do_test temptable-2.1 { + execsql { + BEGIN TRANSACTION; + CREATE TEMPORARY TABLE t2(x,y); + INSERT INTO t2 VALUES(1,2); + SELECT * FROM t2; + } +} {1 2} +do_test temptable-2.2 { + execsql {ROLLBACK} + catchsql {SELECT * FROM t2} +} {1 {no such table: t2}} +do_test temptable-2.3 { + execsql { + BEGIN TRANSACTION; + CREATE TEMPORARY TABLE t2(x,y); + INSERT INTO t2 VALUES(1,2); + SELECT * FROM t2; + } +} {1 2} +do_test temptable-2.4 { + execsql {COMMIT} + catchsql {SELECT * FROM t2} +} {0 {1 2}} +do_test temptable-2.5 { + set r [catch {db2 eval {SELECT * FROM t2}} msg] + lappend r $msg +} {1 {no such table: t2}} + +# Make sure indices on temporary tables are also temporary. +# +do_test temptable-3.1 { + execsql { + CREATE INDEX i2 ON t2(x); + SELECT name FROM sqlite_master WHERE type='index'; + } +} {} +do_test temptable-3.2 { + execsql { + SELECT y FROM t2 WHERE x=1; + } +} {2} +do_test temptable-3.3 { + execsql { + DROP INDEX i2; + SELECT y FROM t2 WHERE x=1; + } +} {2} +do_test temptable-3.4 { + execsql { + CREATE INDEX i2 ON t2(x); + DROP TABLE t2; + } + catchsql {DROP INDEX i2} +} {1 {no such index: i2}} + +# Check for correct name collision processing. A name collision can +# occur when process A creates a temporary table T then process B +# creates a permanent table also named T. The temp table in process A +# hides the existance of the permanent table. +# +do_test temptable-4.1 { + execsql { + CREATE TEMP TABLE t2(x,y); + INSERT INTO t2 VALUES(10,20); + SELECT * FROM t2; + } db2 +} {10 20} +do_test temptable-4.2 { + execsql { + CREATE TABLE t2(x,y,z); + INSERT INTO t2 VALUES(9,8,7); + SELECT * FROM t2; + } +} {9 8 7} +do_test temptable-4.3 { + catchsql { + SELECT * FROM t2; + } db2 +} {0 {10 20}} +do_test temptable-4.4.1 { + catchsql { + SELECT * FROM temp.t2; + } db2 +} {0 {10 20}} +do_test temptable-4.4.2 { + catchsql { + SELECT * FROM main.t2; + } db2 +} {1 {no such table: main.t2}} +#do_test temptable-4.4.3 { +# catchsql { +# SELECT name FROM main.sqlite_master WHERE type='table'; +# } db2 +#} {1 {database schema has changed}} +do_test temptable-4.4.4 { + catchsql { + SELECT name FROM main.sqlite_master WHERE type='table'; + } db2 +} {0 {t1 t2}} +do_test temptable-4.4.5 { + catchsql { + SELECT * FROM main.t2; + } db2 +} {0 {9 8 7}} +do_test temptable-4.4.6 { + # TEMP takes precedence over MAIN + catchsql { + SELECT * FROM t2; + } db2 +} {0 {10 20}} +do_test temptable-4.5 { + catchsql { + DROP TABLE t2; -- should drop TEMP + SELECT * FROM t2; -- data should be from MAIN + } db2 +} {0 {9 8 7}} +do_test temptable-4.6 { + db2 close + sqlite3 db2 ./test.db + catchsql { + SELECT * FROM t2; + } db2 +} {0 {9 8 7}} +do_test temptable-4.7 { + catchsql { + DROP TABLE t2; + SELECT * FROM t2; + } +} {1 {no such table: t2}} +do_test temptable-4.8 { + db2 close + sqlite3 db2 ./test.db + execsql { + CREATE TEMP TABLE t2(x unique,y); + INSERT INTO t2 VALUES(1,2); + SELECT * FROM t2; + } db2 +} {1 2} +do_test temptable-4.9 { + execsql { + CREATE TABLE t2(x unique, y); + INSERT INTO t2 VALUES(3,4); + SELECT * FROM t2; + } +} {3 4} +do_test temptable-4.10.1 { + catchsql { + SELECT * FROM t2; + } db2 +} {0 {1 2}} +# Update: The schema is reloaded in test temptable-4.10.1. And tclsqlite.c +# handles it and retries the query anyway. +# do_test temptable-4.10.2 { +# catchsql { +# SELECT name FROM sqlite_master WHERE type='table' +# } db2 +# } {1 {database schema has changed}} +do_test temptable-4.10.3 { + catchsql { + SELECT name FROM sqlite_master WHERE type='table' + } db2 +} {0 {t1 t2}} +do_test temptable-4.11 { + execsql { + SELECT * FROM t2; + } db2 +} {1 2} +do_test temptable-4.12 { + execsql { + SELECT * FROM t2; + } +} {3 4} +do_test temptable-4.13 { + catchsql { + DROP TABLE t2; -- drops TEMP.T2 + SELECT * FROM t2; -- uses MAIN.T2 + } db2 +} {0 {3 4}} +do_test temptable-4.14 { + execsql { + SELECT * FROM t2; + } +} {3 4} +do_test temptable-4.15 { + db2 close + sqlite3 db2 ./test.db + execsql { + SELECT * FROM t2; + } db2 +} {3 4} + +# Now create a temporary table in db2 and a permanent index in db. The +# temporary table in db2 should mask the name of the permanent index, +# but the permanent index should still be accessible and should still +# be updated when its corresponding table changes. +# +do_test temptable-5.1 { + execsql { + CREATE TEMP TABLE mask(a,b,c) + } db2 + execsql { + CREATE INDEX mask ON t2(x); + SELECT * FROM t2; + } +} {3 4} +#do_test temptable-5.2 { +# catchsql { +# SELECT * FROM t2; +# } db2 +#} {1 {database schema has changed}} +do_test temptable-5.3 { + catchsql { + SELECT * FROM t2; + } db2 +} {0 {3 4}} +do_test temptable-5.4 { + execsql { + SELECT y FROM t2 WHERE x=3 + } +} {4} +do_test temptable-5.5 { + execsql { + SELECT y FROM t2 WHERE x=3 + } db2 +} {4} +do_test temptable-5.6 { + execsql { + INSERT INTO t2 VALUES(1,2); + SELECT y FROM t2 WHERE x=1; + } db2 +} {2} +do_test temptable-5.7 { + execsql { + SELECT y FROM t2 WHERE x=3 + } db2 +} {4} +do_test temptable-5.8 { + execsql { + SELECT y FROM t2 WHERE x=1; + } +} {2} +do_test temptable-5.9 { + execsql { + SELECT y FROM t2 WHERE x=3 + } +} {4} + +db2 close + +# Test for correct operation of read-only databases +# +do_test temptable-6.1 { + execsql { + CREATE TABLE t8(x); + INSERT INTO t8 VALUES('xyzzy'); + SELECT * FROM t8; + } +} {xyzzy} +do_test temptable-6.2 { + db close + catch {file attributes test.db -permissions 0444} + catch {file attributes test.db -readonly 1} + sqlite3 db test.db + if {[file writable test.db]} { + error "Unable to make the database file test.db readonly - rerun this test as an unprivileged user" + } + execsql { + SELECT * FROM t8; + } +} {xyzzy} +do_test temptable-6.3 { + if {[file writable test.db]} { + error "Unable to make the database file test.db readonly - rerun this test as an unprivileged user" + } + catchsql { + CREATE TABLE t9(x,y); + } +} {1 {attempt to write a readonly database}} +do_test temptable-6.4 { + catchsql { + CREATE TEMP TABLE t9(x,y); + } +} {0 {}} +do_test temptable-6.5 { + catchsql { + INSERT INTO t9 VALUES(1,2); + SELECT * FROM t9; + } +} {0 {1 2}} +do_test temptable-6.6 { + if {[file writable test.db]} { + error "Unable to make the database file test.db readonly - rerun this test as an unprivileged user" + } + catchsql { + INSERT INTO t8 VALUES('hello'); + SELECT * FROM t8; + } +} {1 {attempt to write a readonly database}} +do_test temptable-6.7 { + catchsql { + SELECT * FROM t8,t9; + } +} {0 {xyzzy 1 2}} +do_test temptable-6.8 { + db close + sqlite3 db test.db + catchsql { + SELECT * FROM t8,t9; + } +} {1 {no such table: t9}} + +file delete -force test2.db test2.db-journal +do_test temptable-7.1 { + catchsql { + ATTACH 'test2.db' AS two; + CREATE TEMP TABLE two.abc(x,y); + } +} {1 {temporary table name must be unqualified}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tester.tcl b/libraries/sqlite/unix/sqlite-3.5.1/test/tester.tcl new file mode 100644 index 0000000..a1ca65c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tester.tcl @@ -0,0 +1,554 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements some common TCL routines used for regression +# testing the SQLite library +# +# $Id: tester.tcl,v 1.91 2007/09/01 09:02:54 danielk1977 Exp $ + + +set tcl_precision 15 +set sqlite_pending_byte 0x0010000 + +# +# Check the command-line arguments for a default soft-heap-limit. +# Store this default value in the global variable ::soft_limit and +# update the soft-heap-limit each time this script is run. In that +# way if an individual test file changes the soft-heap-limit, it +# will be reset at the start of the next test file. +# +if {![info exists soft_limit]} { + set soft_limit 0 + for {set i 0} {$i<[llength $argv]} {incr i} { + if {[regexp {^--soft-heap-limit=(.+)$} [lindex $argv $i] all value]} { + if {$value!="off"} { + set soft_limit $value + } + set argv [lreplace $argv $i $i] + } + } +} +sqlite3_soft_heap_limit $soft_limit + +# +# Check the command-line arguments to set the memory debugger +# backtrace depth. +# +# See the sqlite3_memdebug_backtrace() function in mem2.c or +# test_malloc.c for additional information. +# +for {set i 0} {$i<[llength $argv]} {incr i} { + if {[regexp {^--backtrace=(\d+)$} [lindex $argv $i] all value]} { + sqlite3_memdebug_backtrace $value + set argv [lreplace $argv $i $i] + } +} + + +# Use the pager codec if it is available +# +if {[sqlite3 -has-codec] && [info command sqlite_orig]==""} { + rename sqlite3 sqlite_orig + proc sqlite3 {args} { + if {[llength $args]==2 && [string index [lindex $args 0] 0]!="-"} { + lappend args -key {xyzzy} + } + uplevel 1 sqlite_orig $args + } +} + + +# Create a test database +# +catch {db close} +file delete -force test.db +file delete -force test.db-journal +sqlite3 db ./test.db +set ::DB [sqlite3_connection_pointer db] +if {[info exists ::SETUP_SQL]} { + db eval $::SETUP_SQL +} + +# Abort early if this script has been run before. +# +if {[info exists nTest]} return + +# Set the test counters to zero +# +set nErr 0 +set nTest 0 +set skip_test 0 +set failList {} +set maxErr 1000 +if {![info exists speedTest]} { + set speedTest 0 +} + +# Invoke the do_test procedure to run a single test +# +proc do_test {name cmd expected} { + global argv nErr nTest skip_test maxErr + sqlite3_memdebug_settitle $name + if {$skip_test} { + set skip_test 0 + return + } + if {[llength $argv]==0} { + set go 1 + } else { + set go 0 + foreach pattern $argv { + if {[string match $pattern $name]} { + set go 1 + break + } + } + } + if {!$go} return + incr nTest + puts -nonewline $name... + flush stdout + if {[catch {uplevel #0 "$cmd;\n"} result]} { + puts "\nError: $result" + incr nErr + lappend ::failList $name + if {$nErr>$maxErr} {puts "*** Giving up..."; finalize_testing} + } elseif {[string compare $result $expected]} { + puts "\nExpected: \[$expected\]\n Got: \[$result\]" + incr nErr + lappend ::failList $name + if {$nErr>=$maxErr} {puts "*** Giving up..."; finalize_testing} + } else { + puts " Ok" + } + flush stdout +} + +# Run an SQL script. +# Return the number of microseconds per statement. +# +proc speed_trial {name numstmt units sql} { + puts -nonewline [format {%-21.21s } $name...] + flush stdout + set speed [time {sqlite3_exec_nr db $sql}] + set tm [lindex $speed 0] + set rate [expr {1000000.0*$numstmt/$tm}] + set u2 $units/s + puts [format {%12d uS %20.5f %s} $tm $rate $u2] + global total_time + set total_time [expr {$total_time+$tm}] +} +proc speed_trial_init {name} { + global total_time + set total_time 0 +} +proc speed_trial_summary {name} { + global total_time + puts [format {%-21.21s %12d uS TOTAL} $name $total_time] +} + +# Run this routine last +# +proc finish_test {} { + finalize_testing +} +proc finalize_testing {} { + global nTest nErr sqlite_open_file_count + + catch {db close} + catch {db2 close} + catch {db3 close} + + sqlite3 db {} + # sqlite3_clear_tsd_memdebug + db close + set heaplimit [sqlite3_soft_heap_limit] + if {$heaplimit!=$::soft_limit} { + puts "soft-heap-limit changed by this script\ + from $::soft_limit to $heaplimit" + } elseif {$heaplimit!="" && $heaplimit>0} { + puts "soft-heap-limit set to $heaplimit" + } + sqlite3_soft_heap_limit 0 + incr nTest + puts "$nErr errors out of $nTest tests" + if {$nErr>0} { + puts "Failures on these tests: $::failList" + } + if {$nErr>0 && ![working_64bit_int]} { + puts "******************************************************************" + puts "N.B.: The version of TCL that you used to build this test harness" + puts "is defective in that it does not support 64-bit integers. Some or" + puts "all of the test failures above might be a result from this defect" + puts "in your TCL build." + puts "******************************************************************" + } + if {$sqlite_open_file_count} { + puts "$sqlite_open_file_count files were left open" + incr nErr + } + if {[sqlite3_memory_used]>0} { + puts "Unfreed memory: [sqlite3_memory_used] bytes" + incr nErr + ifcapable memdebug { + puts "Writing unfreed memory log to \"./memleak.txt\"" + sqlite3_memdebug_dump ./memleak.txt + } + } else { + puts "All memory allocations freed - no leaks" + } + puts "Maximum memory usage: [sqlite3_memory_highwater] bytes" + foreach f [glob -nocomplain test.db-*-journal] { + file delete -force $f + } + foreach f [glob -nocomplain test.db-mj*] { + file delete -force $f + } + exit [expr {$nErr>0}] +} + +# A procedure to execute SQL +# +proc execsql {sql {db db}} { + # puts "SQL = $sql" + uplevel [list $db eval $sql] +} + +# Execute SQL and catch exceptions. +# +proc catchsql {sql {db db}} { + # puts "SQL = $sql" + set r [catch {$db eval $sql} msg] + lappend r $msg + return $r +} + +# Do an VDBE code dump on the SQL given +# +proc explain {sql {db db}} { + puts "" + puts "addr opcode p1 p2 p3 " + puts "---- ------------ ------ ------ ---------------" + $db eval "explain $sql" {} { + puts [format {%-4d %-12.12s %-6d %-6d %s} $addr $opcode $p1 $p2 $p3] + } +} + +# Another procedure to execute SQL. This one includes the field +# names in the returned list. +# +proc execsql2 {sql} { + set result {} + db eval $sql data { + foreach f $data(*) { + lappend result $f $data($f) + } + } + return $result +} + +# Use the non-callback API to execute multiple SQL statements +# +proc stepsql {dbptr sql} { + set sql [string trim $sql] + set r 0 + while {[string length $sql]>0} { + if {[catch {sqlite3_prepare $dbptr $sql -1 sqltail} vm]} { + return [list 1 $vm] + } + set sql [string trim $sqltail] +# while {[sqlite_step $vm N VAL COL]=="SQLITE_ROW"} { +# foreach v $VAL {lappend r $v} +# } + while {[sqlite3_step $vm]=="SQLITE_ROW"} { + for {set i 0} {$i<[sqlite3_data_count $vm]} {incr i} { + lappend r [sqlite3_column_text $vm $i] + } + } + if {[catch {sqlite3_finalize $vm} errmsg]} { + return [list 1 $errmsg] + } + } + return $r +} + +# Delete a file or directory +# +proc forcedelete {filename} { + if {[catch {file delete -force $filename}]} { + exec rm -rf $filename + } +} + +# Do an integrity check of the entire database +# +proc integrity_check {name} { + ifcapable integrityck { + do_test $name { + execsql {PRAGMA integrity_check} + } {ok} + } +} + +# Evaluate a boolean expression of capabilities. If true, execute the +# code. Omit the code if false. +# +proc ifcapable {expr code {else ""} {elsecode ""}} { + regsub -all {[a-z_0-9]+} $expr {$::sqlite_options(&)} e2 + if ($e2) { + set c [catch {uplevel 1 $code} r] + } else { + set c [catch {uplevel 1 $elsecode} r] + } + return -code $c $r +} + +# This proc execs a seperate process that crashes midway through executing +# the SQL script $sql on database test.db. +# +# The crash occurs during a sync() of file $crashfile. When the crash +# occurs a random subset of all unsynced writes made by the process are +# written into the files on disk. Argument $crashdelay indicates the +# number of file syncs to wait before crashing. +# +# The return value is a list of two elements. The first element is a +# boolean, indicating whether or not the process actually crashed or +# reported some other error. The second element in the returned list is the +# error message. This is "child process exited abnormally" if the crash +# occured. +# +# crashsql -delay CRASHDELAY -file CRASHFILE ?-blocksize BLOCKSIZE? $sql +# +proc crashsql {args} { + if {$::tcl_platform(platform)!="unix"} { + error "crashsql should only be used on unix" + } + + set blocksize "" + set crashdelay 1 + set crashfile "" + set dc "" + set sql [lindex $args end] + + for {set ii 0} {$ii < [llength $args]-1} {incr ii 2} { + set z [lindex $args $ii] + set n [string length $z] + set z2 [lindex $args [expr $ii+1]] + + if {$n>1 && [string first $z -delay]==0} {set crashdelay $z2} \ + elseif {$n>1 && [string first $z -file]==0} {set crashfile $z2} \ + elseif {$n>1 && [string first $z -blocksize]==0} {set blocksize "-s $z2" } \ + elseif {$n>1 && [string first $z -characteristics]==0} {set dc "-c {$z2}" } \ + else { error "Unrecognized option: $z" } + } + + if {$crashfile eq ""} { + error "Compulsory option -file missing" + } + + set cfile [file join [pwd] $crashfile] + + set f [open crash.tcl w] + puts $f "sqlite3_crash_enable 1" + puts $f "sqlite3_crashparams $blocksize $dc $crashdelay $cfile" + puts $f "set sqlite_pending_byte $::sqlite_pending_byte" + puts $f "sqlite3 db test.db -vfs crash" + + # This block sets the cache size of the main database to 10 + # pages. This is done in case the build is configured to omit + # "PRAGMA cache_size". + puts $f {db eval {SELECT * FROM sqlite_master;}} + puts $f {set bt [btree_from_db db]} + puts $f {btree_set_cache_size $bt 10} + + puts $f "db eval {" + puts $f "$sql" + puts $f "}" + close $f + + set r [catch { + exec [info nameofexec] crash.tcl >@stdout + } msg] + lappend r $msg +} + +# Usage: do_ioerr_test +# +# This proc is used to implement test cases that check that IO errors +# are correctly handled. The first argument, , is an integer +# used to name the tests executed by this proc. Options are as follows: +# +# -tclprep TCL script to run to prepare test. +# -sqlprep SQL script to run to prepare test. +# -tclbody TCL script to run with IO error simulation. +# -sqlbody TCL script to run with IO error simulation. +# -exclude List of 'N' values not to test. +# -erc Use extended result codes +# -persist Make simulated I/O errors persistent +# -start Value of 'N' to begin with (default 1) +# +# -cksum Boolean. If true, test that the database does +# not change during the execution of the test case. +# +proc do_ioerr_test {testname args} { + + set ::ioerropts(-start) 1 + set ::ioerropts(-cksum) 0 + set ::ioerropts(-erc) 0 + set ::ioerropts(-count) 100000000 + set ::ioerropts(-persist) 1 + array set ::ioerropts $args + + set ::go 1 + for {set n $::ioerropts(-start)} {$::go} {incr n} { + set ::TN $n + incr ::ioerropts(-count) -1 + if {$::ioerropts(-count)<0} break + + # Skip this IO error if it was specified with the "-exclude" option. + if {[info exists ::ioerropts(-exclude)]} { + if {[lsearch $::ioerropts(-exclude) $n]!=-1} continue + } + + # Delete the files test.db and test2.db, then execute the TCL and + # SQL (in that order) to prepare for the test case. + do_test $testname.$n.1 { + set ::sqlite_io_error_pending 0 + catch {db close} + catch {file delete -force test.db} + catch {file delete -force test.db-journal} + catch {file delete -force test2.db} + catch {file delete -force test2.db-journal} + set ::DB [sqlite3 db test.db; sqlite3_connection_pointer db] + sqlite3_extended_result_codes $::DB $::ioerropts(-erc) + if {[info exists ::ioerropts(-tclprep)]} { + eval $::ioerropts(-tclprep) + } + if {[info exists ::ioerropts(-sqlprep)]} { + execsql $::ioerropts(-sqlprep) + } + expr 0 + } {0} + + # Read the 'checksum' of the database. + if {$::ioerropts(-cksum)} { + set checksum [cksum] + } + + # Set the Nth IO error to fail. + do_test $testname.$n.2 [subst { + set ::sqlite_io_error_persist $::ioerropts(-persist) + set ::sqlite_io_error_pending $n + }] $n + + # Create a single TCL script from the TCL and SQL specified + # as the body of the test. + set ::ioerrorbody {} + if {[info exists ::ioerropts(-tclbody)]} { + append ::ioerrorbody "$::ioerropts(-tclbody)\n" + } + if {[info exists ::ioerropts(-sqlbody)]} { + append ::ioerrorbody "db eval {$::ioerropts(-sqlbody)}" + } + + # Execute the TCL Script created in the above block. If + # there are at least N IO operations performed by SQLite as + # a result of the script, the Nth will fail. + do_test $testname.$n.3 { + set r [catch $::ioerrorbody msg] + set rc [sqlite3_errcode $::DB] + if {$::ioerropts(-erc)} { + # If we are in extended result code mode, make sure all of the + # IOERRs we get back really do have their extended code values. + # If an extended result code is returned, the sqlite3_errcode + # TCLcommand will return a string of the form: SQLITE_IOERR+nnnn + # where nnnn is a number + if {[regexp {^SQLITE_IOERR} $rc] && ![regexp {IOERR\+\d} $rc]} { + return $rc + } + } else { + # If we are not in extended result code mode, make sure no + # extended error codes are returned. + if {[regexp {\+\d} $rc]} { + return $rc + } + } + # The test repeats as long as $::go is true. + set ::go [expr {$::sqlite_io_error_pending<=0}] + set s [expr $::sqlite_io_error_hit==0] + set ::sqlite_io_error_hit 0 + + # One of two things must have happened. either + # 1. We never hit the IO error and the SQL returned OK + # 2. An IO error was hit and the SQL failed + # + expr { ($s && !$r && !$::go) || (!$s && $r && $::go) } + } {1} + + # If an IO error occured, then the checksum of the database should + # be the same as before the script that caused the IO error was run. + if {$::go && $::ioerropts(-cksum)} { + do_test $testname.$n.4 { + catch {db close} + set ::DB [sqlite3 db test.db; sqlite3_connection_pointer db] + cksum + } $checksum + } + + set ::sqlite_io_error_pending 0 + if {[info exists ::ioerropts(-cleanup)]} { + catch $::ioerropts(-cleanup) + } + } + set ::sqlite_io_error_pending 0 + set ::sqlite_io_error_persist 0 + unset ::ioerropts +} + +# Return a checksum based on the contents of database 'db'. +# +proc cksum {{db db}} { + set txt [$db eval { + SELECT name, type, sql FROM sqlite_master order by name + }]\n + foreach tbl [$db eval { + SELECT name FROM sqlite_master WHERE type='table' order by name + }] { + append txt [$db eval "SELECT * FROM $tbl"]\n + } + foreach prag {default_synchronous default_cache_size} { + append txt $prag-[$db eval "PRAGMA $prag"]\n + } + set cksum [string length $txt]-[md5 $txt] + # puts $cksum-[file size test.db] + return $cksum +} + +# Copy file $from into $to. This is used because some versions of +# TCL for windows (notably the 8.4.1 binary package shipped with the +# current mingw release) have a broken "file copy" command. +# +proc copy_file {from to} { + if {$::tcl_platform(platform)=="unix"} { + file copy -force $from $to + } else { + set f [open $from] + fconfigure $f -translation binary + set t [open $to w] + fconfigure $t -translation binary + puts -nonewline $t [read $f [file size $from]] + close $t + close $f + } +} + +# If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set +# to non-zero, then set the global variable $AUTOVACUUM to 1. +set AUTOVACUUM $sqlite_options(default_autovacuum) diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/thread001.test b/libraries/sqlite/unix/sqlite-3.5.1/test/thread001.test new file mode 100644 index 0000000..a6ad19d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/thread001.test @@ -0,0 +1,139 @@ +# 2007 September 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: thread001.test,v 1.4 2007/09/10 07:35:47 danielk1977 Exp $ + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl +source $testdir/thread_common.tcl +if {[info commands sqlthread] eq ""} { + return +} + +set ::NTHREAD 10 + +# Run this test three times: +# +# 1) All threads use the same database handle. +# 2) All threads use their own database handles. +# 3) All threads use their own database handles, shared-cache is enabled. +# +foreach {tn same_db shared_cache} [list \ + 1 1 0 \ + 2 0 0 \ + 3 0 1 \ +] { + # Empty the database. + # + catchsql { DROP TABLE ab; } + + do_test thread001.$tn.0 { + db close + sqlite3_enable_shared_cache $shared_cache + sqlite3_enable_shared_cache $shared_cache + } $shared_cache + sqlite3 db test.db + + set dbconfig "" + if {$same_db} { + set dbconfig [list set ::DB [sqlite3_connection_pointer db]] + } + + # Set up a database and a schema. The database contains a single + # table with two columns. The first column ("a") is an INTEGER PRIMARY + # KEY. The second contains the md5sum of all rows in the table with + # a smaller value stored in column "a". + # + do_test thread001.$tn.1 { + execsql { + CREATE TABLE ab(a INTEGER PRIMARY KEY, b); + CREATE INDEX ab_i ON ab(b); + INSERT INTO ab SELECT NULL, md5sum(a, b) FROM ab; + SELECT count(*) FROM ab; + } + } {1} + do_test thread001.$tn.2 { + execsql { + SELECT + (SELECT md5sum(a, b) FROM ab WHERE a < (SELECT max(a) FROM ab)) == + (SELECT b FROM ab WHERE a = (SELECT max(a) FROM ab)) + } + } {1} + do_test thread001.$tn.3 { + execsql { PRAGMA integrity_check } + } {ok} + + set thread_program { + set needToClose 0 + if {![info exists ::DB]} { + set ::DB [sqlthread open test.db] + set needToClose 1 + } + + for {set i 0} {$i < 100} {incr i} { + # Test that the invariant is true. + do_test t1 { + execsql { + SELECT + (SELECT md5sum(a, b) FROM ab WHERE a < (SELECT max(a) FROM ab)) == + (SELECT b FROM ab WHERE a = (SELECT max(a) FROM ab)) + } + } {1} + + # Add another row to the database. + execsql { INSERT INTO ab SELECT NULL, md5sum(a, b) FROM ab } + } + + if {$needToClose} { + sqlite3_close $::DB + } + + list OK + } + + # Kick off $::NTHREAD threads: + # + array unset finished + for {set i 0} {$i < $::NTHREAD} {incr i} { + thread_spawn finished($i) $dbconfig $thread_procs $thread_program + } + + # Wait for all threads to finish, then check they all returned "OK". + # + for {set i 0} {$i < $::NTHREAD} {incr i} { + if {![info exists finished($i)]} { + vwait finished($i) + } + do_test thread001.$tn.4.$i { + set ::finished($i) + } OK + } + + # Check the database still looks Ok. + # + do_test thread001.$tn.5 { + execsql { SELECT count(*) FROM ab; } + } [expr {1 + $::NTHREAD*100}] + do_test thread001.$tn.6 { + execsql { + SELECT + (SELECT md5sum(a, b) FROM ab WHERE a < (SELECT max(a) FROM ab)) == + (SELECT b FROM ab WHERE a = (SELECT max(a) FROM ab)) + } + } {1} + do_test thread001.$tn.7 { + execsql { PRAGMA integrity_check } + } {ok} +} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/thread002.test b/libraries/sqlite/unix/sqlite-3.5.1/test/thread002.test new file mode 100644 index 0000000..94141be --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/thread002.test @@ -0,0 +1,105 @@ +# 2007 September 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test attempts to deadlock SQLite in shared-cache mode. +# +# +# $Id: thread002.test,v 1.1 2007/09/10 10:53:02 danielk1977 Exp $ + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl +source $testdir/thread_common.tcl +if {[info commands sqlthread] eq ""} { + return +} + +db close +sqlite3_enable_shared_cache 1 + +set ::NTHREAD 10 + +do_test thread002.1 { + # Create 3 databases with identical schemas: + for {set ii 0} {$ii < 3} {incr ii} { + file delete -force test${ii}.db + sqlite3 db test${ii}.db + execsql { + CREATE TABLE t1(k, v); + CREATE INDEX t1_i ON t1(v); + INSERT INTO t1(v) VALUES(1.0); + } + db close + } +} {} + +set thread_program { + set ::DB [sqlite3_open test.db] + for {set ii 1} {$ii <= 3} {incr ii} { + set T [lindex $order [expr $ii-1]] + execsql "ATTACH 'test${T}.db' AS aux${ii}" + } + + for {set ii 0} {$ii < 100} {incr ii} { + execsql { SELECT * FROM aux1.t1 } + execsql { INSERT INTO aux1.t1(v) SELECT sum(v) FROM aux2.t1 } + + execsql { SELECT * FROM aux2.t1 } + execsql { INSERT INTO aux2.t1(v) SELECT sum(v) FROM aux3.t1 } + + execsql { SELECT * FROM aux3.t1 } + execsql { INSERT INTO aux3.t1(v) SELECT sum(v) FROM aux1.t1 } + + execsql { CREATE TABLE aux1.t2(a,b) } + execsql { DROP TABLE aux1.t2 } + + # if {($ii%10)==0} {puts -nonewline . ; flush stdout} + puts -nonewline . ; flush stdout + } + + sqlite3_close $::DB + list OK +} + +set order_list [list {0 1 2} {0 2 1} {1 0 2} {1 2 0} {2 0 1} {2 1 0}] + +array unset finished +for {set ii 0} {$ii < $::NTHREAD} {incr ii} { + set order [lindex $order_list [expr $ii%6]] + thread_spawn finished($ii) $thread_procs "set order {$order}" $thread_program +} + +# Wait for all threads to finish, then check they all returned "OK". +# +for {set i 0} {$i < $::NTHREAD} {incr i} { + if {![info exists finished($i)]} { + vwait finished($i) + } + do_test thread001.2.$i { + set ::finished($i) + } OK +} + +# Check all three databases are Ok. +for {set ii 0} {$ii < 3} {incr ii} { + do_test thread002.3.$ii { + sqlite3 db test${ii}.db + set res [list \ + [execsql {SELECT count(*) FROM t1}] \ + [execsql {PRAGMA integrity_check}] \ + ] + db close + set res + } [list [expr 1 + $::NTHREAD*100] ok] +} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/thread1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/thread1.test new file mode 100644 index 0000000..c50d245 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/thread1.test @@ -0,0 +1,172 @@ +# 2003 December 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is multithreading behavior +# +# $Id: thread1.test,v 1.7 2004/06/19 00:16:31 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Skip this whole file if the thread testing code is not enabled +# +if {[llength [info command thread_step]]==0 || [sqlite3 -has-codec]} { + finish_test + return +} + +# Create some data to work with +# +do_test thread1-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,'abcdefgh'); + INSERT INTO t1 SELECT a+1, b||b FROM t1; + INSERT INTO t1 SELECT a+2, b||b FROM t1; + INSERT INTO t1 SELECT a+4, b||b FROM t1; + SELECT count(*), max(length(b)) FROM t1; + } +} {8 64} + +# Interleave two threads on read access. Then make sure a third +# thread can write the database. In other words: +# +# read-lock A +# read-lock B +# unlock A +# unlock B +# write-lock C +# +# At one point, the write-lock of C would fail on Linux. +# +do_test thread1-1.2 { + thread_create A test.db + thread_create B test.db + thread_create C test.db + thread_compile A {SELECT a FROM t1} + thread_step A + thread_result A +} SQLITE_ROW +do_test thread1-1.3 { + thread_argc A +} 1 +do_test thread1-1.4 { + thread_argv A 0 +} 1 +do_test thread1-1.5 { + thread_compile B {SELECT b FROM t1} + thread_step B + thread_result B +} SQLITE_ROW +do_test thread1-1.6 { + thread_argc B +} 1 +do_test thread1-1.7 { + thread_argv B 0 +} abcdefgh +do_test thread1-1.8 { + thread_finalize A + thread_result A +} SQLITE_OK +do_test thread1-1.9 { + thread_finalize B + thread_result B +} SQLITE_OK +do_test thread1-1.10 { + thread_compile C {CREATE TABLE t2(x,y)} + thread_step C + thread_result C +} SQLITE_DONE +do_test thread1-1.11 { + thread_finalize C + thread_result C +} SQLITE_OK +do_test thread1-1.12 { + catchsql {SELECT name FROM sqlite_master} + execsql {SELECT name FROM sqlite_master} +} {t1 t2} + + +# +# The following tests - thread1-2.* - test the following scenario: +# +# 1: Read-lock thread A +# 2: Read-lock thread B +# 3: Attempt to write in thread C -> SQLITE_BUSY +# 4: Check db write failed from main thread. +# 5: Unlock from thread A. +# 6: Attempt to write in thread C -> SQLITE_BUSY +# 7: Check db write failed from main thread. +# 8: Unlock from thread B. +# 9: Attempt to write in thread C -> SQLITE_DONE +# 10: Finalize the write from thread C +# 11: Check db write succeeded from main thread. +# +do_test thread1-2.1 { + thread_halt * + thread_create A test.db + thread_compile A {SELECT a FROM t1} + thread_step A + thread_result A +} SQLITE_ROW +do_test thread1-2.2 { + thread_create B test.db + thread_compile B {SELECT b FROM t1} + thread_step B + thread_result B +} SQLITE_ROW +do_test thread1-2.3 { + thread_create C test.db + thread_compile C {INSERT INTO t2 VALUES(98,99)} + thread_step C + thread_result C + thread_finalize C + thread_result C +} SQLITE_BUSY + +do_test thread1-2.4 { + execsql {SELECT * FROM t2} +} {} + +do_test thread1-2.5 { + thread_finalize A + thread_result A +} SQLITE_OK +do_test thread1-2.6 { + thread_compile C {INSERT INTO t2 VALUES(98,99)} + thread_step C + thread_result C + thread_finalize C + thread_result C +} SQLITE_BUSY +do_test thread1-2.7 { + execsql {SELECT * FROM t2} +} {} +do_test thread1-2.8 { + thread_finalize B + thread_result B +} SQLITE_OK +do_test thread1-2.9 { + thread_compile C {INSERT INTO t2 VALUES(98,99)} + thread_step C + thread_result C +} SQLITE_DONE +do_test thread1-2.10 { + thread_finalize C + thread_result C +} SQLITE_OK +do_test thread1-2.11 { + execsql {SELECT * FROM t2} +} {98 99} + +thread_halt * +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/thread2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/thread2.test new file mode 100644 index 0000000..1d9a208 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/thread2.test @@ -0,0 +1,246 @@ +# 2006 January 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is multithreading behavior +# +# $Id: thread2.test,v 1.2 2006/01/18 18:33:42 danielk1977 Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# This file swaps database connections between threads. This +# is illegal if memory-management is enabled, so skip this file +# in that case. +ifcapable memorymanage { + finish_test + return +} + + +# Skip this whole file if the thread testing code is not enabled +# +if {[llength [info command thread_step]]==0 || [sqlite3 -has-codec]} { + finish_test + return +} +if {![info exists threadsOverrideEachOthersLocks]} { + finish_test + return +} + +# Create some data to work with +# +do_test thread1-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,'abcdefgh'); + INSERT INTO t1 SELECT a+1, b||b FROM t1; + INSERT INTO t1 SELECT a+2, b||b FROM t1; + INSERT INTO t1 SELECT a+4, b||b FROM t1; + SELECT count(*), max(length(b)) FROM t1; + } +} {8 64} + +# Use the thread_swap command to move the database connections between +# threads, then verify that they still work. +# +do_test thread2-1.2 { + db close + thread_create A test.db + thread_create B test.db + thread_swap A B + thread_compile A {SELECT a FROM t1 LIMIT 1} + thread_result A +} {SQLITE_OK} +do_test thread2-1.3 { + thread_step A + thread_result A +} {SQLITE_ROW} +do_test thread2-1.4 { + thread_argv A 0 +} {1} +do_test thread2-1.5 { + thread_finalize A + thread_result A +} {SQLITE_OK} +do_test thread2-1.6 { + thread_compile B {SELECT a FROM t1 LIMIT 1} + thread_result B +} {SQLITE_OK} +do_test thread2-1.7 { + thread_step B + thread_result B +} {SQLITE_ROW} +do_test thread2-1.8 { + thread_argv B 0 +} {1} +do_test thread2-1.9 { + thread_finalize B + thread_result B +} {SQLITE_OK} + +# Swap them again. +# +do_test thread2-2.2 { + thread_swap A B + thread_compile A {SELECT a FROM t1 LIMIT 1} + thread_result A +} {SQLITE_OK} +do_test thread2-2.3 { + thread_step A + thread_result A +} {SQLITE_ROW} +do_test thread2-2.4 { + thread_argv A 0 +} {1} +do_test thread2-2.5 { + thread_finalize A + thread_result A +} {SQLITE_OK} +do_test thread2-2.6 { + thread_compile B {SELECT a FROM t1 LIMIT 1} + thread_result B +} {SQLITE_OK} +do_test thread2-2.7 { + thread_step B + thread_result B +} {SQLITE_ROW} +do_test thread2-2.8 { + thread_argv B 0 +} {1} +do_test thread2-2.9 { + thread_finalize B + thread_result B +} {SQLITE_OK} +thread_halt A +thread_halt B + +# Save the original (correct) value of threadsOverrideEachOthersLocks +# so that it can be restored. If this value is left set incorrectly, lots +# of things will go wrong in future tests. +# +set orig_threadOverride $threadsOverrideEachOthersLocks + +# Pretend we are on a system (like RedHat9) were threads do not +# override each others locks. +# +set threadsOverrideEachOthersLocks 0 + +# Verify that we can move database connections between threads as +# long as no locks are held. +# +do_test thread2-3.1 { + thread_create A test.db + set DB [thread_db_get A] + thread_halt A +} {} +do_test thread2-3.2 { + set STMT [sqlite3_prepare $DB {SELECT a FROM t1 LIMIT 1} -1 TAIL] + sqlite3_step $STMT +} SQLITE_ROW +do_test thread2-3.3 { + sqlite3_column_int $STMT 0 +} 1 +do_test thread2-3.4 { + sqlite3_finalize $STMT +} SQLITE_OK +do_test thread2-3.5 { + set STMT [sqlite3_prepare $DB {SELECT max(a) FROM t1} -1 TAIL] + sqlite3_step $STMT +} SQLITE_ROW +do_test thread2-3.6 { + sqlite3_column_int $STMT 0 +} 8 +do_test thread2-3.7 { + sqlite3_finalize $STMT +} SQLITE_OK +do_test thread2-3.8 { + sqlite3_close $DB +} {SQLITE_OK} + +do_test thread2-3.10 { + thread_create A test.db + thread_compile A {SELECT a FROM t1 LIMIT 1} + thread_step A + thread_finalize A + set DB [thread_db_get A] + thread_halt A +} {} +do_test thread2-3.11 { + set STMT [sqlite3_prepare $DB {SELECT a FROM t1 LIMIT 1} -1 TAIL] + sqlite3_step $STMT +} SQLITE_ROW +do_test thread2-3.12 { + sqlite3_column_int $STMT 0 +} 1 +do_test thread2-3.13 { + sqlite3_finalize $STMT +} SQLITE_OK +do_test thread2-3.14 { + sqlite3_close $DB +} SQLITE_OK + +do_test thread2-3.20 { + thread_create A test.db + thread_compile A {SELECT a FROM t1 LIMIT 3} + thread_step A + set STMT [thread_stmt_get A] + set DB [thread_db_get A] + thread_halt A +} {} +do_test thread2-3.21 { + sqlite3_step $STMT +} SQLITE_ROW +do_test thread2-3.22 { + sqlite3_column_int $STMT 0 +} 2 +do_test thread2-3.23 { + # The unlock fails here. But because we never check the return + # code from sqlite3OsUnlock (because we cannot do anything about it + # if it fails) we do not realize that an error has occurred. + sqlite3_finalize $STMT +} SQLITE_OK +do_test thread2-3.25 { + sqlite3_close $DB +} SQLITE_OK + +do_test thread2-3.30 { + thread_create A test.db + thread_compile A {BEGIN} + thread_step A + thread_finalize A + thread_compile A {SELECT a FROM t1 LIMIT 1} + thread_step A + thread_finalize A + set DB [thread_db_get A] + thread_halt A +} {} +do_test thread2-3.31 { + set STMT [sqlite3_prepare $DB {INSERT INTO t1 VALUES(99,'error')} -1 TAIL] + sqlite3_step $STMT +} SQLITE_ERROR +do_test thread2-3.32 { + sqlite3_finalize $STMT +} SQLITE_MISUSE +do_test thread2-3.33 { + sqlite3_close $DB +} SQLITE_OK + +# VERY important to set the override flag back to its true value. +# +set threadsOverrideEachOthersLocks $orig_threadOverride + +# Also important to halt the worker threads, which are using spin +# locks and eating away CPU cycles. +# +thread_halt * +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/thread_common.tcl b/libraries/sqlite/unix/sqlite-3.5.1/test/thread_common.tcl new file mode 100644 index 0000000..5e34b17 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/thread_common.tcl @@ -0,0 +1,88 @@ +# 2007 September 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: thread_common.tcl,v 1.2 2007/09/10 10:53:02 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if {[info commands sqlthread] eq ""} { + puts -nonewline "Skipping thread-safety tests - " + puts " not running a threadsafe sqlite/tcl build" + puts -nonewline "Both SQLITE_THREADSAFE and TCL_THREADS must be defined when" + puts " building testfixture" + finish_test + return +} + +# The following script is sourced by every thread spawned using +# [sqlthread spawn]: +set thread_procs { + + # Execute the supplied SQL using database handle $::DB. + # + proc execsql {sql} { + + set rc SQLITE_LOCKED + while {$rc eq "SQLITE_LOCKED" + || $rc eq "SQLITE_BUSY" + || $rc eq "SQLITE_SCHEMA"} { + set res [list] + + set err [catch { + set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 dummy_tail] + } msg] + + if {$err == 0} { + while {[set rc [sqlite3_step $::STMT]] eq "SQLITE_ROW"} { + for {set i 0} {$i < [sqlite3_column_count $::STMT]} {incr i} { + lappend res [sqlite3_column_text $::STMT 0] + } + } + set rc [sqlite3_finalize $::STMT] + } else { + if {[string first (6) $msg]} { + set rc SQLITE_LOCKED + } else { + set rc SQLITE_ERROR + } + } + + if {[string first locked [sqlite3_errmsg $::DB]]>=0} { + set rc SQLITE_LOCKED + } + + if {$rc eq "SQLITE_LOCKED" || $rc eq "SQLITE_BUSY"} { + #puts -nonewline "([sqlthread id] $rc)" + #flush stdout + after 20 + } + } + + if {$rc ne "SQLITE_OK"} { + error "$rc - [sqlite3_errmsg $::DB]" + } + set res + } + + proc do_test {name script result} { + set res [eval $script] + if {$res ne $result} { + error "$name failed: expected \"$result\" got \"$res\"" + } + } +} + +proc thread_spawn {varname args} { + sqlthread spawn $varname [join $args ;] +} + +return 0 diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/threadtest1.c b/libraries/sqlite/unix/sqlite-3.5.1/test/threadtest1.c new file mode 100644 index 0000000..56fcce3 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/threadtest1.c @@ -0,0 +1,289 @@ +/* +** 2002 January 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements a simple standalone program used to test whether +** or not the SQLite library is threadsafe. +** +** Testing the thread safety of SQLite is difficult because there are very +** few places in the code that are even potentially unsafe, and those +** places execute for very short periods of time. So even if the library +** is compiled with its mutexes disabled, it is likely to work correctly +** in a multi-threaded program most of the time. +** +** This file is NOT part of the standard SQLite library. It is used for +** testing only. +*/ +#include "sqlite.h" +#include +#include +#include +#include +#include +#include + +/* +** Enable for tracing +*/ +static int verbose = 0; + +/* +** Come here to die. +*/ +static void Exit(int rc){ + exit(rc); +} + +extern char *sqlite3_mprintf(const char *zFormat, ...); +extern char *sqlite3_vmprintf(const char *zFormat, va_list); + +/* +** When a lock occurs, yield. +*/ +static int db_is_locked(void *NotUsed, int iCount){ + /* sched_yield(); */ + if( verbose ) printf("BUSY %s #%d\n", (char*)NotUsed, iCount); + usleep(100); + return iCount<25; +} + +/* +** Used to accumulate query results by db_query() +*/ +struct QueryResult { + const char *zFile; /* Filename - used for error reporting */ + int nElem; /* Number of used entries in azElem[] */ + int nAlloc; /* Number of slots allocated for azElem[] */ + char **azElem; /* The result of the query */ +}; + +/* +** The callback function for db_query +*/ +static int db_query_callback( + void *pUser, /* Pointer to the QueryResult structure */ + int nArg, /* Number of columns in this result row */ + char **azArg, /* Text of data in all columns */ + char **NotUsed /* Names of the columns */ +){ + struct QueryResult *pResult = (struct QueryResult*)pUser; + int i; + if( pResult->nElem + nArg >= pResult->nAlloc ){ + if( pResult->nAlloc==0 ){ + pResult->nAlloc = nArg+1; + }else{ + pResult->nAlloc = pResult->nAlloc*2 + nArg + 1; + } + pResult->azElem = realloc( pResult->azElem, pResult->nAlloc*sizeof(char*)); + if( pResult->azElem==0 ){ + fprintf(stdout,"%s: malloc failed\n", pResult->zFile); + return 1; + } + } + if( azArg==0 ) return 0; + for(i=0; iazElem[pResult->nElem++] = + sqlite3_mprintf("%s",azArg[i] ? azArg[i] : ""); + } + return 0; +} + +/* +** Execute a query against the database. NULL values are returned +** as an empty string. The list is terminated by a single NULL pointer. +*/ +char **db_query(sqlite *db, const char *zFile, const char *zFormat, ...){ + char *zSql; + int rc; + char *zErrMsg = 0; + va_list ap; + struct QueryResult sResult; + va_start(ap, zFormat); + zSql = sqlite3_vmprintf(zFormat, ap); + va_end(ap); + memset(&sResult, 0, sizeof(sResult)); + sResult.zFile = zFile; + if( verbose ) printf("QUERY %s: %s\n", zFile, zSql); + rc = sqlite3_exec(db, zSql, db_query_callback, &sResult, &zErrMsg); + if( rc==SQLITE_SCHEMA ){ + if( zErrMsg ) free(zErrMsg); + rc = sqlite3_exec(db, zSql, db_query_callback, &sResult, &zErrMsg); + } + if( verbose ) printf("DONE %s %s\n", zFile, zSql); + if( zErrMsg ){ + fprintf(stdout,"%s: query failed: %s - %s\n", zFile, zSql, zErrMsg); + free(zErrMsg); + free(zSql); + Exit(1); + } + sqlite3_free(zSql); + if( sResult.azElem==0 ){ + db_query_callback(&sResult, 0, 0, 0); + } + sResult.azElem[sResult.nElem] = 0; + return sResult.azElem; +} + +/* +** Execute an SQL statement. +*/ +void db_execute(sqlite *db, const char *zFile, const char *zFormat, ...){ + char *zSql; + int rc; + char *zErrMsg = 0; + va_list ap; + va_start(ap, zFormat); + zSql = sqlite3_vmprintf(zFormat, ap); + va_end(ap); + if( verbose ) printf("EXEC %s: %s\n", zFile, zSql); + do{ + rc = sqlite3_exec(db, zSql, 0, 0, &zErrMsg); + }while( rc==SQLITE_BUSY ); + if( verbose ) printf("DONE %s: %s\n", zFile, zSql); + if( zErrMsg ){ + fprintf(stdout,"%s: command failed: %s - %s\n", zFile, zSql, zErrMsg); + free(zErrMsg); + sqlite3_free(zSql); + Exit(1); + } + sqlite3_free(zSql); +} + +/* +** Free the results of a db_query() call. +*/ +void db_query_free(char **az){ + int i; + for(i=0; az[i]; i++){ + sqlite3_free(az[i]); + } + free(az); +} + +/* +** Check results +*/ +void db_check(const char *zFile, const char *zMsg, char **az, ...){ + va_list ap; + int i; + char *z; + va_start(ap, az); + for(i=0; (z = va_arg(ap, char*))!=0; i++){ + if( az[i]==0 || strcmp(az[i],z)!=0 ){ + fprintf(stdout,"%s: %s: bad result in column %d: %s\n", + zFile, zMsg, i+1, az[i]); + db_query_free(az); + Exit(1); + } + } + va_end(ap); + db_query_free(az); +} + +pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t sig = PTHREAD_COND_INITIALIZER; +int thread_cnt = 0; + +static void *worker_bee(void *pArg){ + const char *zFilename = (char*)pArg; + char *azErr; + int i, cnt; + int t = atoi(zFilename); + char **az; + sqlite *db; + + pthread_mutex_lock(&lock); + thread_cnt++; + pthread_mutex_unlock(&lock); + printf("%s: START\n", zFilename); + fflush(stdout); + for(cnt=0; cnt<10; cnt++){ + sqlite3_open(&zFilename[2], &db); + if( db==0 ){ + fprintf(stdout,"%s: can't open\n", zFilename); + Exit(1); + } + sqlite3_busy_handler(db, db_is_locked, zFilename); + db_execute(db, zFilename, "CREATE TABLE t%d(a,b,c);", t); + for(i=1; i<=100; i++){ + db_execute(db, zFilename, "INSERT INTO t%d VALUES(%d,%d,%d);", + t, i, i*2, i*i); + } + az = db_query(db, zFilename, "SELECT count(*) FROM t%d", t); + db_check(zFilename, "tX size", az, "100", 0); + az = db_query(db, zFilename, "SELECT avg(b) FROM t%d", t); + db_check(zFilename, "tX avg", az, "101", 0); + db_execute(db, zFilename, "DELETE FROM t%d WHERE a>50", t); + az = db_query(db, zFilename, "SELECT avg(b) FROM t%d", t); + db_check(zFilename, "tX avg2", az, "51", 0); + for(i=1; i<=50; i++){ + char z1[30], z2[30]; + az = db_query(db, zFilename, "SELECT b, c FROM t%d WHERE a=%d", t, i); + sprintf(z1, "%d", i*2); + sprintf(z2, "%d", i*i); + db_check(zFilename, "readback", az, z1, z2, 0); + } + db_execute(db, zFilename, "DROP TABLE t%d;", t); + sqlite3_close(db); + } + printf("%s: END\n", zFilename); + /* unlink(zFilename); */ + fflush(stdout); + pthread_mutex_lock(&lock); + thread_cnt--; + if( thread_cnt<=0 ){ + pthread_cond_signal(&sig); + } + pthread_mutex_unlock(&lock); + return 0; +} + +int main(int argc, char **argv){ + char *zFile; + int i, n; + pthread_t id; + if( argc>2 && strcmp(argv[1], "-v")==0 ){ + verbose = 1; + argc--; + argv++; + } + if( argc<2 || (n=atoi(argv[1]))<1 ) n = 10; + for(i=0; i0 ){ + pthread_cond_wait(&sig, &lock); + } + pthread_mutex_unlock(&lock); + for(i=0; i +#include +#include +#include +#include +#include "sqlite.h" + +/* +** Name of the database +*/ +#define DB_FILE "test.db" + +/* +** When this variable becomes non-zero, all threads stop +** what they are doing. +*/ +volatile int all_stop = 0; + +/* +** Callback from the integrity check. If the result is anything other +** than "ok" it means the integrity check has failed. Set the "all_stop" +** global variable to stop all other activity. Print the error message +** or print OK if the string "ok" is seen. +*/ +int check_callback(void *pid, int argc, char **argv, char **notUsed2){ + int id = (int)pid; + if( strcmp(argv[0],"ok") ){ + all_stop = 1; + fprintf(stderr,"id: %s\n", id, argv[0]); + }else{ + /* fprintf(stderr,"%d: OK\n", id); */ + } + return 0; +} + +/* +** Do an integrity check on the database. If the first integrity check +** fails, try it a second time. +*/ +int integrity_check(sqlite *db, int id){ + int rc; + if( all_stop ) return 0; + /* fprintf(stderr,"%d: CHECK\n", id); */ + rc = sqlite3_exec(db, "pragma integrity_check", check_callback, 0, 0); + if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ + fprintf(stderr,"%d, Integrity check returns %d\n", id, rc); + } + if( all_stop ){ + sqlite3_exec(db, "pragma integrity_check", check_callback, 0, 0); + } + return 0; +} + +/* +** This is the worker thread +*/ +void *worker(void *workerArg){ + sqlite *db; + int id = (int)workerArg; + int rc; + int cnt = 0; + fprintf(stderr, "Starting worker %d\n", id); + while( !all_stop && cnt++<10000 ){ + if( cnt%100==0 ) printf("%d: %d\n", id, cnt); + while( (sqlite3_open(DB_FILE, &db))!=SQLITE_OK ) sched_yield(); + sqlite3_exec(db, "PRAGMA synchronous=OFF", 0, 0, 0); + /* integrity_check(db, id); */ + if( all_stop ){ sqlite3_close(db); break; } + /* fprintf(stderr, "%d: BEGIN\n", id); */ + rc = sqlite3_exec(db, "INSERT INTO t1 VALUES('bogus data')", 0, 0, 0); + /* fprintf(stderr, "%d: END rc=%d\n", id, rc); */ + sqlite3_close(db); + } + fprintf(stderr, "Worker %d finished\n", id); + return 0; +} + +/* +** Initialize the database and start the threads +*/ +int main(int argc, char **argv){ + sqlite *db; + int i, rc; + pthread_t aThread[5]; + + if( strcmp(DB_FILE,":memory:") ){ + char *zJournal = sqlite3_mprintf("%s-journal", DB_FILE); + unlink(DB_FILE); + unlink(zJournal); + sqlite3_free(zJournal); + } + sqlite3_open(DB_FILE, &db); + if( db==0 ){ + fprintf(stderr,"unable to initialize database\n"); + exit(1); + } + rc = sqlite3_exec(db, "CREATE TABLE t1(x);", 0,0,0); + if( rc ){ + fprintf(stderr,"cannot create table t1: %d\n", rc); + exit(1); + } + sqlite3_close(db); + for(i=0; iS', + 'cf07c8348fdf675cc1f7696b7d45191b'); + CREATE TABLE UserGroups ( + userGroupId INTEGER PRIMARY KEY, + userGroup STRING UNIQUE + ); + INSERT INTO "UserGroups" VALUES(1, 'test'); + INSERT INTO "UserGroups" VALUES(2, 'limited'); + + CREATE TABLE UserGroupMembers ( + userGroupId INTEGER, + userId INTEGER + ); + INSERT INTO "UserGroupMembers" VALUES(1, 1); + INSERT INTO "UserGroupMembers" VALUES(2, 2); + + CREATE TABLE Permissions ( + userGroupId INTEGER, + labelId INTEGER NOT NULL, + itemId INTEGER NOT NULL, + write INTEGER, + capped INTEGER, + admin INTEGER + ); + INSERT INTO "Permissions" VALUES(1, 0, 0, 1, 0, 1); + INSERT INTO "Permissions" VALUES(2, 2, 4, 0, 0, 0); + } +} {} + +# Run the query with an index +# +do_test tkt1443-1.1 { + execsql { + select distinct + Items.Item as trove, UP.pattern as pattern + from + ( select + Permissions.labelId as labelId, + PerItems.item as pattern + from + Users, UserGroupMembers, Permissions + left outer join Items as PerItems + on Permissions.itemId = PerItems.itemId + where + Users.user = 'limited' + and Users.userId = UserGroupMembers.userId + and UserGroupMembers.userGroupId = Permissions.userGroupId + ) as UP join LabelMap on ( UP.labelId = 0 or + UP.labelId = LabelMap.labelId ), + Labels, Items + where + Labels.label = 'localhost@rpl:branch' + and Labels.labelId = LabelMap.labelId + and LabelMap.itemId = Items.itemId + ORDER BY +trove, +pattern + } +} {double .*:runtime double:runtime .*:runtime double:source .*:runtime} + +# Create an index and rerun the query. +# Verify that the results are the same +# +do_test tkt1443-1.2 { + execsql { + CREATE UNIQUE INDEX PermissionsIdx + ON Permissions(userGroupId, labelId, itemId); + select distinct + Items.Item as trove, UP.pattern as pattern + from + ( select + Permissions.labelId as labelId, + PerItems.item as pattern + from + Users, UserGroupMembers, Permissions + left outer join Items as PerItems + on Permissions.itemId = PerItems.itemId + where + Users.user = 'limited' + and Users.userId = UserGroupMembers.userId + and UserGroupMembers.userGroupId = Permissions.userGroupId + ) as UP join LabelMap on ( UP.labelId = 0 or + UP.labelId = LabelMap.labelId ), + Labels, Items + where + Labels.label = 'localhost@rpl:branch' + and Labels.labelId = LabelMap.labelId + and LabelMap.itemId = Items.itemId + ORDER BY +trove, +pattern + } +} {double .*:runtime double:runtime .*:runtime double:source .*:runtime} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1444.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1444.test new file mode 100644 index 0000000..13870db --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1444.test @@ -0,0 +1,56 @@ +# 2005 September 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1444 has been +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !compound||!view { + finish_test + return +} + +# The use of a VIEW that contained an ORDER BY clause within a UNION ALL +# was causing problems. See ticket #1444. +# +do_test tkt1444-1.1 { + execsql { + CREATE TABLE DemoTable (x INTEGER, TextKey TEXT, DKey Real); + CREATE INDEX DemoTableIdx ON DemoTable (TextKey); + INSERT INTO DemoTable VALUES(9,8,7); + INSERT INTO DemoTable VALUES(1,2,3); + CREATE VIEW DemoView AS SELECT * FROM DemoTable ORDER BY TextKey; + SELECT * FROM DemoTable UNION ALL SELECT * FROM DemoView ORDER BY 1; + } +} {1 2 3.0 1 2 3.0 9 8 7.0 9 8 7.0} +do_test tkt1444-1.2 { + execsql { + SELECT * FROM DemoTable UNION ALL SELECT * FROM DemoView; + } +} {9 8 7.0 1 2 3.0 1 2 3.0 9 8 7.0} +do_test tkt1444-1.3 { + execsql { + DROP VIEW DemoView; + CREATE VIEW DemoView AS SELECT * FROM DemoTable; + SELECT * FROM DemoTable UNION ALL SELECT * FROM DemoView ORDER BY 1; + } +} {1 2 3.0 1 2 3.0 9 8 7.0 9 8 7.0} +do_test tkt1444-1.4 { + execsql { + SELECT * FROM DemoTable UNION ALL SELECT * FROM DemoView; + } +} {9 8 7.0 1 2 3.0 9 8 7.0 1 2 3.0} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1449.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1449.test new file mode 100644 index 0000000..5f27ee7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1449.test @@ -0,0 +1,262 @@ +# 2005 September 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1449 has been +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Somewhere in tkt1449-1.1 is a VIEW definition that uses a subquery and +# a compound SELECT. So we cannot run this file if any of these features +# are not available. +ifcapable !subquery||!compound||!view { + finish_test + return +} + +# The following schema generated problems in ticket #1449. We've retained +# the original schema here because it is some unbelievably complex, it seemed +# like a good test case for SQLite. +# +do_test tkt1449-1.1 { + execsql { + BEGIN; + CREATE TABLE ACLS(ISSUEID text(50) not null, OBJECTID text(50) not null, PARTICIPANTID text(50) not null, PERMISSIONBITS int not null, constraint PK_ACLS primary key (ISSUEID, OBJECTID, PARTICIPANTID)); + CREATE TABLE ACTIONITEMSTATUSES(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, FRIENDLYNAME text(100) not null, REVISION int not null, SHORTNAME text(30) not null, LONGNAME text(200) not null, ATTACHMENTHANDLING int not null, RESULT int not null, NOTIFYCREATOR text(1) null, NOTIFYASSIGNEE text(1) null, NOTIFYFYI text(1) null, NOTIFYCLOSURETEAM text(1) null, NOTIFYCOORDINATORS text(1) null, COMMENTREQUIRED text(1) not null, constraint PK_ACTIONITEMSTATUSES primary key (ISSUEID, OBJECTID)); + CREATE TABLE ACTIONITEMTYPES(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, REVISION int not null, LABEL text(200) not null, INSTRUCTIONS text not null, EMAILINSTRUCTIONS text null, ALLOWEDSTATUSES text not null, INITIALSTATUS text(100) not null, COMMENTREQUIRED text(1) not null, ATTACHMENTHANDLING int not null, constraint PK_ACTIONITEMTYPES primary key (ISSUEID, OBJECTID)); + CREATE TABLE ATTACHMENTS(TQUNID text(36) not null, OBJECTID text(50) null, ISSUEID text(50) null, DATASTREAM blob not null, CONTENTENCODING text(50) null, CONTENTCHARSET text(50) null, CONTENTTYPE text(100) null, CONTENTID text(100) null, CONTENTLOCATION text(100) null, CONTENTNAME text(100) not null, constraint PK_ATTACHMENTS primary key (TQUNID)); + CREATE TABLE COMPLIANCEPOLICIES(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, BODY text null, constraint PK_COMPLIANCEPOLICIES primary key (ISSUEID, OBJECTID)); + CREATE TABLE DBHISTORY(DATETIME text(25) not null, OPERATION text(20) not null, KUBIVERSION text(100) not null, FROMVERSION int null, TOVERSION int null); + CREATE TABLE DBINFO(FINGERPRINT text(32) not null, VERSION int not null); + CREATE TABLE DETACHEDATTACHMENTS (TQUNID text(36) not null, ISSUEID text(50) not null, OBJECTID text(50) not null, PATH text(300) not null, DETACHEDFILELASTMODTIMESTAMP text(25) null, CONTENTID text(100) not null, constraint PK_DETACHEDATTACHMENTS primary key (TQUNID)); + CREATE TABLE DOCREFERENCES(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, REFERENCEDOCUMENTID text(50) null, constraint PK_DOCREFERENCES primary key (ISSUEID, OBJECTID)); + CREATE TABLE DQ (TQUNID text(36) not null, ISSUEID text(50) not null, DEPENDSID text(50) null, DEPENDSTYPE int null, DEPENDSCOMMANDSTREAM blob null, DEPENDSNODEIDSEQNOKEY text(100) null, DEPENDSACLVERSION int null, constraint PK_DQ primary key (TQUNID)); + CREATE TABLE EMAILQ(TIMEQUEUED int not null, NODEID text(50) not null, MIME blob not null, TQUNID text(36) not null); + CREATE TABLE ENTERPRISEDATA(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, DATE1 text(25) null, DATE2 text(25) null, DATE3 text(25) null, DATE4 text(25) null, DATE5 text(25) null, DATE6 text(25) null, DATE7 text(25) null, DATE8 text(25) null, DATE9 text(25) null, DATE10 text(25) null, VALUE1 int null, VALUE2 int null, VALUE3 int null, VALUE4 int null, VALUE5 int null, VALUE6 int null, VALUE7 int null, VALUE8 int null, VALUE9 int null, VALUE10 int null, VALUE11 int null, VALUE12 int null, VALUE13 int null, VALUE14 int null, VALUE15 int null, VALUE16 int null, VALUE17 int null, VALUE18 int null, VALUE19 int null, VALUE20 int null, STRING1 text(300) null, STRING2 text(300) null, STRING3 text(300) null, STRING4 text(300) null, STRING5 text(300) null, STRING6 text(300) null, STRING7 text(300) null, STRING8 text(300) null, STRING9 text(300) null, STRING10 text(300) null, LONGSTRING1 text null, LONGSTRING2 text null, LONGSTRING3 text null, LONGSTRING4 text null, LONGSTRING5 text null, LONGSTRING6 text null, LONGSTRING7 text null, LONGSTRING8 text null, LONGSTRING9 text null, LONGSTRING10 text null, constraint PK_ENTERPRISEDATA primary key (ISSUEID, OBJECTID)); + CREATE TABLE FILEMORGUE(TQUNID text(36) not null, PATH text(300) not null, DELETEFOLDERWHENEMPTY text(1) null, constraint PK_FILEMORGUE primary key (TQUNID)); + CREATE TABLE FILES(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, PARENTENTITYID text(50) null, BODY text null, BODYCONTENTTYPE text(100) null, ISOBSOLETE text(1) null, FILENAME text(300) not null, VISIBLENAME text(300) not null, VERSIONSTRING text(300) not null, DOCUMENTHASH text(40) not null, ISFINAL text(1) null, DOCREFERENCEID text(50) not null, constraint PK_FILES primary key (ISSUEID, OBJECTID)); + CREATE TABLE FOLDERS(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, CONTAINERNAME text(300) null, CONTAINERACLSETTINGS text null, constraint PK_FOLDERS primary key (ISSUEID, OBJECTID)); + CREATE TABLE GLOBALSETTINGS(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, SINGULARPROJECTLABEL text(30) not null, PLURALPROJECTLABEL text(30) not null, PROJECTREQUIRED text(1) not null, CUSTOMPROJECTSALLOWED text(1) not null, ACTIONITEMSPECXML text null, PROJECTLISTXML text null, ENTERPRISEDATALABELS text null, ENTERPRISEDATATABXSL text null, constraint PK_GLOBALSETTINGS primary key (ISSUEID, OBJECTID)); + CREATE TABLE GLOBALSTRINGPROPERTIES(ID int not null, VALUE text(300) not null, constraint PK_GLOBALSTRINGPROPERTIES primary key (ID)); + CREATE TABLE IMQ(TQUNID text(36) not null, DATETIMEQUEUED text(25) not null, ISSUEID text(50) not null, KUBIBUILD text(30) not null, FAILCOUNT int not null, LASTRUN text(25) null, ENVELOPESTREAM blob not null, PAYLOADSTREAM blob not null, constraint PK_IMQ primary key (TQUNID)); + CREATE TABLE INVITATIONNODES(INVITATIONID text(50) not null, RECIPIENTNODEID text(50) not null, DATECREATED text(25) not null, constraint PK_INVITATIONNODES primary key (INVITATIONID, RECIPIENTNODEID)); + CREATE TABLE INVITATIONS (INVITATIONID text(50) not null, SENDERNODEID text(50) not null, RECIPIENTEMAILADDR text(200) not null, RECIPIENTUSERID text(50) null, RECIPIENTNODES text null, ISSUEID text(50) not null, ENVELOPE text not null, MESSAGEBLOB blob not null, INVITATIONSTATE int not null, TQUNID text(36) not null, DATECREATED text(25) not null); + CREATE TABLE ISSUES (CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, CONTAINERNAME text(300) null, CONTAINERACLSETTINGS text null, ISINITIALIZED text(1) null, BLINDINVITES text null, ISSYSTEMISSUE text(1) not null, ISSUETYPE int not null, ACTIVITYTYPEID text(50) null, ISINCOMPLETE text(1) not null, constraint PK_ISSUES primary key (ISSUEID, OBJECTID)); + CREATE TABLE ISSUESETTINGS (CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, ISSUENAME text(300) not null, ISSUEACLSETTINGS text not null, ISSUEDUEDATE text(25) null, ISSUEPRIORITY int null, ISSUESTATUS int null, DESCRIPTION text null, PROJECTID text(100) null, PROJECTNAME text null, PROJECTNAMEISCUSTOM text(1) null, ISSYSTEMISSUE text(1) not null, ACTIONITEMREVNUM int not null, constraint PK_ISSUESETTINGS primary key (ISSUEID, OBJECTID)); + CREATE TABLE KMTPMSG (MSGID integer not null, SENDERID text(50) null, RECIPIENTIDLIST text not null, ISSUEID text(50) null, MESSAGETYPE int not null, ENVELOPE text null, MESSAGEBLOB blob not null, RECEIVEDDATE text(25) not null, constraint PK_KMTPMSG primary key (MSGID)); + CREATE TABLE KMTPNODEQ(NODEID text(50) not null, MSGID int not null, RECEIVEDDATE text(25) not null, SENDCOUNT int not null); + CREATE TABLE KMTPQ(MSGID integer not null, SENDERID text(50) null, RECIPIENTIDLIST text not null, ISSUEID text(50) null, MESSAGETYPE int not null, ENVELOPE text null, MESSAGEBLOB blob not null, constraint PK_KMTPQ primary key (MSGID)); + CREATE TABLE LOGENTRIES(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, PARENTENTITYID text(50) null, BODY text null, BODYCONTENTTYPE text(100) null, ISOBSOLETE text(1) null, ACTIONTYPE int not null, ASSOCIATEDOBJECTIDS text null, OLDENTITIES text null, NEWENTITIES text null, OTHERENTITIES text null, constraint PK_LOGENTRIES primary key (ISSUEID, OBJECTID)); + CREATE TABLE LSBI(TQUNID text(36) not null, ISSUEID text(50) not null, TABLEITEMID text(50) null, TABLENODEID text(50) null, TABLECMD int null, TABLECONTAINERID text(50) null, TABLESEQNO int null, DIRTYCONTENT text null, STUBBED text(1) null, ENTITYSTUBDATA text null, UPDATENUMBER int not null, constraint PK_LSBI primary key (TQUNID)); + CREATE TABLE LSBN(TQUNID text(36) not null, ISSUEID text(50) not null, NODEID text(50) not null, STORESEQNO int not null, SYNCSEQNO int not null, LASTMSGDATE text(25) null, constraint PK_LSBN primary key (TQUNID)); + CREATE TABLE MMQ(TQUNID text(36) not null, ISSUEID text(50) not null, TABLEREQUESTNODE text(50) null, MMQENTRYINDEX text(60) null, DIRECTION int null, NODEID text(50) null, TABLEFIRSTSEQNO int null, TABLELASTSEQNO int null, NEXTRESENDTIMEOUT text(25) null, TABLETIMEOUTMULTIPLIER int null, constraint PK_MMQ primary key (TQUNID)); + CREATE TABLE NODEREG(NODEID text(50) not null, USERID text(50) null, CREATETIME text(25) not null, TQUNID text(36) not null); + CREATE TABLE NODES (NODEID text(50) not null, USERID text(50) null, NODESTATE int not null, NODECERT text null, KUBIVERSION int not null, KUBIBUILD text(30) not null, TQUNID text(36) not null, LASTBINDDATE text(25) null, LASTUNBINDDATE text(25) null, LASTBINDIP text(15) null, NUMBINDS int not null, NUMSENDS int not null, NUMPOLLS int not null, NUMRECVS int not null); + CREATE TABLE PARTICIPANTNODES(ISSUEID text(50) not null, OBJECTID text(50) not null, NODEID text(50) not null, USERID text(50) null, NODESTATE int not null, NODECERT text null, KUBIVERSION int not null, KUBIBUILD text(30) not null, TQUNID text(36) not null); + CREATE TABLE PARTICIPANTS(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, PARTICIPANTSTATE int not null, PARTICIPANTROLE int not null, PARTICIPANTTEAM int not null, ISREQUIREDMEMBER text(1) null, USERID text(50) null, ISAGENT text(1) null, NAME text(150) not null, EMAILADDRESS text(200) not null, ISEMAILONLY text(1) not null, INVITATION text null, ACCEPTRESENDCOUNT int null, ACCEPTRESENDTIMEOUT text(25) null, ACCEPTLASTSENTTONODEID text(50) null, constraint PK_PARTICIPANTS primary key (ISSUEID, OBJECTID)); + CREATE TABLE PARTICIPANTSETTINGS(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, PARTICIPANTID text(50) not null, TASKPIMSYNC text(1) null, MOBILESUPPORT text(1) null, NOTIFYBYEMAIL text(1) null, MARKEDCRITICAL text(1) null, constraint PK_PARTICIPANTSETTINGS primary key (ISSUEID, OBJECTID)); + CREATE TABLE PARTITIONS(PARTITIONID text(50) not null, NAME text(100) not null, LDAPDN text(300) not null, SERVERNODEID text(50) not null, TQUNID text(36) not null); + CREATE TABLE PROJECTS(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, NAME text(100) not null, ID text(100) null, constraint PK_PROJECTS primary key (ISSUEID, OBJECTID)); + CREATE TABLE TASKCOMPLETIONS(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, PARENTENTITYID text(50) null, BODY text null, BODYCONTENTTYPE text(100) null, ISOBSOLETE text(1) null, TASKID text(50) not null, DISPOSITION int not null, STATUSID text(50) not null, SHORTNAME text(30) not null, LONGNAME text(200) not null, constraint PK_TASKCOMPLETIONS primary key (ISSUEID, OBJECTID)); + CREATE TABLE TASKS(CLASSID int null, SEQNO int not null, LASTMODONNODEID text(50) not null, PREVMODONNODEID text(50) null, ISSUEID text(50) not null, OBJECTID text(50) not null, REVISIONNUM int not null, CONTAINERID text(50) not null, AUTHORID text(50) not null, CREATIONDATE text(25) null, LASTMODIFIEDDATE text(25) null, UPDATENUMBER int null, PREVREVISIONNUM int null, LASTCMD int null, LASTCMDACLVERSION int null, USERDEFINEDFIELD text(300) null, LASTMODIFIEDBYID text(50) null, PARENTENTITYID text(50) null, BODY text null, BODYCONTENTTYPE text(100) null, ISOBSOLETE text(1) null, DUETIME text(25) null, ASSIGNEDTO text(50) not null, TARGETOBJECTIDS text null, RESPONSEID text(50) not null, TYPEID text(50) not null, LABEL text(200) not null, INSTRUCTIONS text not null, ALLOWEDSTATUSES text not null, ISSERIALREVIEW text(1) null, DAYSTOREVIEW int null, REVIEWERIDS text(500) null, REVIEWTYPE int null, REVIEWGROUP text(300) null, constraint PK_TASKS primary key (ISSUEID, OBJECTID)); + CREATE TABLE USERS (USERID text(50) not null, USERSID text(100) not null, ENTERPRISEUSER text(1) not null, USEREMAILADDRESS text(200) null, EMAILVALIDATED text(1) null, VALIDATIONCOOKIE text(50) null, CREATETIME text(25) not null, TQUNID text(36) not null, PARTITIONID text(50) null); + CREATE VIEW CRITICALISSUES as + + + select + USERID, ISSUEID, ISSUENAME, min(DATE1) DATE1 + from ( + select p.USERID USERID, p.ISSUEID ISSUEID, iset.ISSUENAME ISSUENAME, t.DUETIME DATE1 + from PARTICIPANTS p + join TASKS t on t.ASSIGNEDTO = p.OBJECTID + join TASKCOMPLETIONS tc on tc.TASKID = t.OBJECTID + join ISSUESETTINGS iset on iset.ISSUEID = p.ISSUEID + where (t.ISOBSOLETE = 'n' or t.ISOBSOLETE is null) + and tc.DISPOSITION = 1 + and iset.ISSUESTATUS = 1 + union + select p.USERID USERID, p.ISSUEID ISSUEID, iset.ISSUENAME ISSUENAME, iset.ISSUEDUEDATE DATE1 + from PARTICIPANTS p + join PARTICIPANTSETTINGS ps on ps.PARTICIPANTID = p.OBJECTID + join ISSUESETTINGS iset on iset.ISSUEID = p.ISSUEID + where ps.MARKEDCRITICAL = 'y' + and iset.ISSUESTATUS = 1 + ) as CRITICALDATA + group by USERID, ISSUEID, ISSUENAME; + CREATE VIEW CURRENTFILES as + + + select + d.ISSUEID as ISSUEID, + d.REFERENCEDOCUMENTID as OBJECTID, + f.VISIBLENAME as VISIBLENAME + from + DOCREFERENCES d + join FILES f on f.OBJECTID = d.REFERENCEDOCUMENTID; + CREATE VIEW ISSUEDATA as + + + select + ISSUES.OBJECTID as ISSUEID, + ISSUES.CREATIONDATE as CREATIONDATE, + ISSUES.AUTHORID as AUTHORID, + ISSUES.LASTMODIFIEDDATE as LASTMODIFIEDDATE, + ISSUES.LASTMODIFIEDBYID as LASTMODIFIEDBYID, + ISSUESETTINGS.ISSUENAME as ISSUENAME, + ISSUES.ISINITIALIZED as ISINITIALIZED, + ISSUES.ISSYSTEMISSUE as ISSYSTEMISSUE, + ISSUES.ISSUETYPE as ISSUETYPE, + ISSUES.ISINCOMPLETE as ISINCOMPLETE, + ISSUESETTINGS.REVISIONNUM as ISSUESETTINGS_REVISIONNUM, + ISSUESETTINGS.LASTMODIFIEDDATE as ISSUESETTINGS_LASTMODIFIEDDATE, + ISSUESETTINGS.LASTMODIFIEDBYID as ISSUESETTINGS_LASTMODIFIEDBYID, + ISSUESETTINGS.ISSUEDUEDATE as ISSUEDUEDATE, + ISSUESETTINGS.ISSUEPRIORITY as ISSUEPRIORITY, + ISSUESETTINGS.ISSUESTATUS as ISSUESTATUS, + ISSUESETTINGS.DESCRIPTION as DESCRIPTION, + ISSUESETTINGS.PROJECTID as PROJECTID, + ISSUESETTINGS.PROJECTNAME as PROJECTNAME, + ISSUESETTINGS.PROJECTNAMEISCUSTOM as PROJECTNAMEISCUSTOM, + ENTERPRISEDATA.REVISIONNUM as ENTERPRISEDATA_REVISIONNUM, + ENTERPRISEDATA.CREATIONDATE as ENTERPRISEDATA_CREATIONDATE, + ENTERPRISEDATA.AUTHORID as ENTERPRISEDATA_AUTHORID, + ENTERPRISEDATA.LASTMODIFIEDDATE as ENTERPRISEDATA_LASTMODIFIEDDATE, + ENTERPRISEDATA.LASTMODIFIEDBYID as ENTERPRISEDATA_LASTMODIFIEDBYID, + ENTERPRISEDATA.DATE1 as DATE1, + ENTERPRISEDATA.DATE2 as DATE2, + ENTERPRISEDATA.DATE3 as DATE3, + ENTERPRISEDATA.DATE4 as DATE4, + ENTERPRISEDATA.DATE5 as DATE5, + ENTERPRISEDATA.DATE6 as DATE6, + ENTERPRISEDATA.DATE7 as DATE7, + ENTERPRISEDATA.DATE8 as DATE8, + ENTERPRISEDATA.DATE9 as DATE9, + ENTERPRISEDATA.DATE10 as DATE10, + ENTERPRISEDATA.VALUE1 as VALUE1, + ENTERPRISEDATA.VALUE2 as VALUE2, + ENTERPRISEDATA.VALUE3 as VALUE3, + ENTERPRISEDATA.VALUE4 as VALUE4, + ENTERPRISEDATA.VALUE5 as VALUE5, + ENTERPRISEDATA.VALUE6 as VALUE6, + ENTERPRISEDATA.VALUE7 as VALUE7, + ENTERPRISEDATA.VALUE8 as VALUE8, + ENTERPRISEDATA.VALUE9 as VALUE9, + ENTERPRISEDATA.VALUE10 as VALUE10, + ENTERPRISEDATA.VALUE11 as VALUE11, + ENTERPRISEDATA.VALUE12 as VALUE12, + ENTERPRISEDATA.VALUE13 as VALUE13, + ENTERPRISEDATA.VALUE14 as VALUE14, + ENTERPRISEDATA.VALUE15 as VALUE15, + ENTERPRISEDATA.VALUE16 as VALUE16, + ENTERPRISEDATA.VALUE17 as VALUE17, + ENTERPRISEDATA.VALUE18 as VALUE18, + ENTERPRISEDATA.VALUE19 as VALUE19, + ENTERPRISEDATA.VALUE20 as VALUE20, + ENTERPRISEDATA.STRING1 as STRING1, + ENTERPRISEDATA.STRING2 as STRING2, + ENTERPRISEDATA.STRING3 as STRING3, + ENTERPRISEDATA.STRING4 as STRING4, + ENTERPRISEDATA.STRING5 as STRING5, + ENTERPRISEDATA.STRING6 as STRING6, + ENTERPRISEDATA.STRING7 as STRING7, + ENTERPRISEDATA.STRING8 as STRING8, + ENTERPRISEDATA.STRING9 as STRING9, + ENTERPRISEDATA.STRING10 as STRING10, + ENTERPRISEDATA.LONGSTRING1 as LONGSTRING1, + ENTERPRISEDATA.LONGSTRING2 as LONGSTRING2, + ENTERPRISEDATA.LONGSTRING3 as LONGSTRING3, + ENTERPRISEDATA.LONGSTRING4 as LONGSTRING4, + ENTERPRISEDATA.LONGSTRING5 as LONGSTRING5, + ENTERPRISEDATA.LONGSTRING6 as LONGSTRING6, + ENTERPRISEDATA.LONGSTRING7 as LONGSTRING7, + ENTERPRISEDATA.LONGSTRING8 as LONGSTRING8, + ENTERPRISEDATA.LONGSTRING9 as LONGSTRING9, + ENTERPRISEDATA.LONGSTRING10 as LONGSTRING10 + from + ISSUES + join ISSUESETTINGS on ISSUES.OBJECTID = ISSUESETTINGS.ISSUEID + left outer join ENTERPRISEDATA on ISSUES.OBJECTID = ENTERPRISEDATA.ISSUEID; + CREATE VIEW ITEMS as + + select 'FILES' as TABLENAME, CLASSID, SEQNO, LASTMODONNODEID, PREVMODONNODEID, ISSUEID, OBJECTID, REVISIONNUM, CONTAINERID, AUTHORID, CREATIONDATE, LASTMODIFIEDDATE, UPDATENUMBER, PREVREVISIONNUM, LASTCMD, LASTCMDACLVERSION, USERDEFINEDFIELD, LASTMODIFIEDBYID, PARENTENTITYID, BODY, BODYCONTENTTYPE, ISOBSOLETE, FILENAME, VISIBLENAME, VERSIONSTRING, DOCUMENTHASH, ISFINAL, DOCREFERENCEID, NULL as ACTIONTYPE, NULL as ASSOCIATEDOBJECTIDS, NULL as OLDENTITIES, NULL as NEWENTITIES, NULL as OTHERENTITIES, NULL as TQUNID, NULL as TABLEITEMID, NULL as TABLENODEID, NULL as TABLECMD, NULL as TABLECONTAINERID, NULL as TABLESEQNO, NULL as DIRTYCONTENT, NULL as STUBBED, NULL as ENTITYSTUBDATA, NULL as PARTICIPANTSTATE, NULL as PARTICIPANTROLE, NULL as PARTICIPANTTEAM, NULL as ISREQUIREDMEMBER, NULL as USERID, NULL as ISAGENT, NULL as NAME, NULL as EMAILADDRESS, NULL as ISEMAILONLY, NULL as INVITATION, NULL as ACCEPTRESENDCOUNT, NULL as ACCEPTRESENDTIMEOUT, NULL as ACCEPTLASTSENTTONODEID, NULL as TASKID, NULL as DISPOSITION, NULL as STATUSID, NULL as SHORTNAME, NULL as LONGNAME, NULL as DUETIME, NULL as ASSIGNEDTO, NULL as TARGETOBJECTIDS, NULL as RESPONSEID, NULL as TYPEID, NULL as LABEL, NULL as INSTRUCTIONS, NULL as ALLOWEDSTATUSES, NULL as ISSERIALREVIEW, NULL as DAYSTOREVIEW, NULL as REVIEWERIDS, NULL as REVIEWTYPE, NULL as REVIEWGROUP from FILES + union all + select 'LOGENTRIES' as TABLENAME, CLASSID, SEQNO, LASTMODONNODEID, PREVMODONNODEID, ISSUEID, OBJECTID, REVISIONNUM, CONTAINERID, AUTHORID, CREATIONDATE, LASTMODIFIEDDATE, UPDATENUMBER, PREVREVISIONNUM, LASTCMD, LASTCMDACLVERSION, USERDEFINEDFIELD, LASTMODIFIEDBYID, PARENTENTITYID, BODY, BODYCONTENTTYPE, ISOBSOLETE, NULL as FILENAME, NULL as VISIBLENAME, NULL as VERSIONSTRING, NULL as DOCUMENTHASH, NULL as ISFINAL, NULL as DOCREFERENCEID, ACTIONTYPE, ASSOCIATEDOBJECTIDS, OLDENTITIES, NEWENTITIES, OTHERENTITIES, NULL as TQUNID, NULL as TABLEITEMID, NULL as TABLENODEID, NULL as TABLECMD, NULL as TABLECONTAINERID, NULL as TABLESEQNO, NULL as DIRTYCONTENT, NULL as STUBBED, NULL as ENTITYSTUBDATA, NULL as PARTICIPANTSTATE, NULL as PARTICIPANTROLE, NULL as PARTICIPANTTEAM, NULL as ISREQUIREDMEMBER, NULL as USERID, NULL as ISAGENT, NULL as NAME, NULL as EMAILADDRESS, NULL as ISEMAILONLY, NULL as INVITATION, NULL as ACCEPTRESENDCOUNT, NULL as ACCEPTRESENDTIMEOUT, NULL as ACCEPTLASTSENTTONODEID, NULL as TASKID, NULL as DISPOSITION, NULL as STATUSID, NULL as SHORTNAME, NULL as LONGNAME, NULL as DUETIME, NULL as ASSIGNEDTO, NULL as TARGETOBJECTIDS, NULL as RESPONSEID, NULL as TYPEID, NULL as LABEL, NULL as INSTRUCTIONS, NULL as ALLOWEDSTATUSES, NULL as ISSERIALREVIEW, NULL as DAYSTOREVIEW, NULL as REVIEWERIDS, NULL as REVIEWTYPE, NULL as REVIEWGROUP from LOGENTRIES + union all + select 'LSBI' as TABLENAME, NULL as CLASSID, NULL as SEQNO, NULL as LASTMODONNODEID, NULL as PREVMODONNODEID, ISSUEID, NULL as OBJECTID, NULL as REVISIONNUM, NULL as CONTAINERID, NULL as AUTHORID, NULL as CREATIONDATE, NULL as LASTMODIFIEDDATE, UPDATENUMBER, NULL as PREVREVISIONNUM, NULL as LASTCMD, NULL as LASTCMDACLVERSION, NULL as USERDEFINEDFIELD, NULL as LASTMODIFIEDBYID, NULL as PARENTENTITYID, NULL as BODY, NULL as BODYCONTENTTYPE, NULL as ISOBSOLETE, NULL as FILENAME, NULL as VISIBLENAME, NULL as VERSIONSTRING, NULL as DOCUMENTHASH, NULL as ISFINAL, NULL as DOCREFERENCEID, NULL as ACTIONTYPE, NULL as ASSOCIATEDOBJECTIDS, NULL as OLDENTITIES, NULL as NEWENTITIES, NULL as OTHERENTITIES, TQUNID, TABLEITEMID, TABLENODEID, TABLECMD, TABLECONTAINERID, TABLESEQNO, DIRTYCONTENT, STUBBED, ENTITYSTUBDATA, NULL as PARTICIPANTSTATE, NULL as PARTICIPANTROLE, NULL as PARTICIPANTTEAM, NULL as ISREQUIREDMEMBER, NULL as USERID, NULL as ISAGENT, NULL as NAME, NULL as EMAILADDRESS, NULL as ISEMAILONLY, NULL as INVITATION, NULL as ACCEPTRESENDCOUNT, NULL as ACCEPTRESENDTIMEOUT, NULL as ACCEPTLASTSENTTONODEID, NULL as TASKID, NULL as DISPOSITION, NULL as STATUSID, NULL as SHORTNAME, NULL as LONGNAME, NULL as DUETIME, NULL as ASSIGNEDTO, NULL as TARGETOBJECTIDS, NULL as RESPONSEID, NULL as TYPEID, NULL as LABEL, NULL as INSTRUCTIONS, NULL as ALLOWEDSTATUSES, NULL as ISSERIALREVIEW, NULL as DAYSTOREVIEW, NULL as REVIEWERIDS, NULL as REVIEWTYPE, NULL as REVIEWGROUP from LSBI where TABLECMD=3 + union all + select 'PARTICIPANTS' as TABLENAME, CLASSID, SEQNO, LASTMODONNODEID, PREVMODONNODEID, ISSUEID, OBJECTID, REVISIONNUM, CONTAINERID, AUTHORID, CREATIONDATE, LASTMODIFIEDDATE, UPDATENUMBER, PREVREVISIONNUM, LASTCMD, LASTCMDACLVERSION, USERDEFINEDFIELD, LASTMODIFIEDBYID, NULL as PARENTENTITYID, NULL as BODY, NULL as BODYCONTENTTYPE, NULL as ISOBSOLETE, NULL as FILENAME, NULL as VISIBLENAME, NULL as VERSIONSTRING, NULL as DOCUMENTHASH, NULL as ISFINAL, NULL as DOCREFERENCEID, NULL as ACTIONTYPE, NULL as ASSOCIATEDOBJECTIDS, NULL as OLDENTITIES, NULL as NEWENTITIES, NULL as OTHERENTITIES, NULL as TQUNID, NULL as TABLEITEMID, NULL as TABLENODEID, NULL as TABLECMD, NULL as TABLECONTAINERID, NULL as TABLESEQNO, NULL as DIRTYCONTENT, NULL as STUBBED, NULL as ENTITYSTUBDATA, PARTICIPANTSTATE, PARTICIPANTROLE, PARTICIPANTTEAM, ISREQUIREDMEMBER, USERID, ISAGENT, NAME, EMAILADDRESS, ISEMAILONLY, INVITATION, ACCEPTRESENDCOUNT, ACCEPTRESENDTIMEOUT, ACCEPTLASTSENTTONODEID, NULL as TASKID, NULL as DISPOSITION, NULL as STATUSID, NULL as SHORTNAME, NULL as LONGNAME, NULL as DUETIME, NULL as ASSIGNEDTO, NULL as TARGETOBJECTIDS, NULL as RESPONSEID, NULL as TYPEID, NULL as LABEL, NULL as INSTRUCTIONS, NULL as ALLOWEDSTATUSES, NULL as ISSERIALREVIEW, NULL as DAYSTOREVIEW, NULL as REVIEWERIDS, NULL as REVIEWTYPE, NULL as REVIEWGROUP from PARTICIPANTS + union all + select 'TASKCOMPLETIONS' as TABLENAME, CLASSID, SEQNO, LASTMODONNODEID, PREVMODONNODEID, ISSUEID, OBJECTID, REVISIONNUM, CONTAINERID, AUTHORID, CREATIONDATE, LASTMODIFIEDDATE, UPDATENUMBER, PREVREVISIONNUM, LASTCMD, LASTCMDACLVERSION, USERDEFINEDFIELD, LASTMODIFIEDBYID, PARENTENTITYID, BODY, BODYCONTENTTYPE, ISOBSOLETE, NULL as FILENAME, NULL as VISIBLENAME, NULL as VERSIONSTRING, NULL as DOCUMENTHASH, NULL as ISFINAL, NULL as DOCREFERENCEID, NULL as ACTIONTYPE, NULL as ASSOCIATEDOBJECTIDS, NULL as OLDENTITIES, NULL as NEWENTITIES, NULL as OTHERENTITIES, NULL as TQUNID, NULL as TABLEITEMID, NULL as TABLENODEID, NULL as TABLECMD, NULL as TABLECONTAINERID, NULL as TABLESEQNO, NULL as DIRTYCONTENT, NULL as STUBBED, NULL as ENTITYSTUBDATA, NULL as PARTICIPANTSTATE, NULL as PARTICIPANTROLE, NULL as PARTICIPANTTEAM, NULL as ISREQUIREDMEMBER, NULL as USERID, NULL as ISAGENT, NULL as NAME, NULL as EMAILADDRESS, NULL as ISEMAILONLY, NULL as INVITATION, NULL as ACCEPTRESENDCOUNT, NULL as ACCEPTRESENDTIMEOUT, NULL as ACCEPTLASTSENTTONODEID, TASKID, DISPOSITION, STATUSID, SHORTNAME, LONGNAME, NULL as DUETIME, NULL as ASSIGNEDTO, NULL as TARGETOBJECTIDS, NULL as RESPONSEID, NULL as TYPEID, NULL as LABEL, NULL as INSTRUCTIONS, NULL as ALLOWEDSTATUSES, NULL as ISSERIALREVIEW, NULL as DAYSTOREVIEW, NULL as REVIEWERIDS, NULL as REVIEWTYPE, NULL as REVIEWGROUP from TASKCOMPLETIONS + union all + select 'TASKS' as TABLENAME, CLASSID, SEQNO, LASTMODONNODEID, PREVMODONNODEID, ISSUEID, OBJECTID, REVISIONNUM, CONTAINERID, AUTHORID, CREATIONDATE, LASTMODIFIEDDATE, UPDATENUMBER, PREVREVISIONNUM, LASTCMD, LASTCMDACLVERSION, USERDEFINEDFIELD, LASTMODIFIEDBYID, PARENTENTITYID, BODY, BODYCONTENTTYPE, ISOBSOLETE, NULL as FILENAME, NULL as VISIBLENAME, NULL as VERSIONSTRING, NULL as DOCUMENTHASH, NULL as ISFINAL, NULL as DOCREFERENCEID, NULL as ACTIONTYPE, NULL as ASSOCIATEDOBJECTIDS, NULL as OLDENTITIES, NULL as NEWENTITIES, NULL as OTHERENTITIES, NULL as TQUNID, NULL as TABLEITEMID, NULL as TABLENODEID, NULL as TABLECMD, NULL as TABLECONTAINERID, NULL as TABLESEQNO, NULL as DIRTYCONTENT, NULL as STUBBED, NULL as ENTITYSTUBDATA, NULL as PARTICIPANTSTATE, NULL as PARTICIPANTROLE, NULL as PARTICIPANTTEAM, NULL as ISREQUIREDMEMBER, NULL as USERID, NULL as ISAGENT, NULL as NAME, NULL as EMAILADDRESS, NULL as ISEMAILONLY, NULL as INVITATION, NULL as ACCEPTRESENDCOUNT, NULL as ACCEPTRESENDTIMEOUT, NULL as ACCEPTLASTSENTTONODEID, NULL as TASKID, NULL as DISPOSITION, NULL as STATUSID, NULL as SHORTNAME, NULL as LONGNAME, DUETIME, ASSIGNEDTO, TARGETOBJECTIDS, RESPONSEID, TYPEID, LABEL, INSTRUCTIONS, ALLOWEDSTATUSES, ISSERIALREVIEW, DAYSTOREVIEW, REVIEWERIDS, REVIEWTYPE, REVIEWGROUP from TASKS; + CREATE VIEW TASKINFO as + + + select + t.ISSUEID as ISSUEID, + t.OBJECTID as OBJECTID, + t.ASSIGNEDTO as ASSIGNEDTO, + t.TARGETOBJECTIDS as TARGETOBJECTIDS, + t.DUETIME as DUETIME, + t.ISOBSOLETE as ISOBSOLETE, + tc.DISPOSITION as DISPOSITION + from + TASKS t + join TASKCOMPLETIONS tc on tc.TASKID = t.OBJECTID; + CREATE INDEX DQ_ISSUEID_DEPENDSID on DQ (ISSUEID, DEPENDSID); + CREATE INDEX EMAILQ_TIMEQUEUED on EMAILQ (TIMEQUEUED); + CREATE INDEX FOLDERS_CONTAINERID_ISSUEID on FOLDERS (CONTAINERID, ISSUEID); + CREATE INDEX IMQ_DATETIMEQUEUED on IMQ (DATETIMEQUEUED); + CREATE INDEX INVITATIONS_RECIPIENTUSERID_INVITATIONID on INVITATIONS (RECIPIENTUSERID, INVITATIONID); + CREATE INDEX INVITATIONS_TQUNID on INVITATIONS (TQUNID); + CREATE INDEX ISSUESETTINGS_CONTAINERID on ISSUESETTINGS (CONTAINERID); + CREATE INDEX KMTPMSG_RECEIVEDDATE on KMTPMSG (RECEIVEDDATE desc); + CREATE INDEX KMTPNODEQ_MSGID on KMTPNODEQ (MSGID); + CREATE INDEX KMTPNODEQ_NODEID_MSGID on KMTPNODEQ (NODEID, MSGID); + CREATE INDEX KMTPNODEQ_RECEIVEDDATE on KMTPNODEQ (RECEIVEDDATE desc); + CREATE INDEX LSBI_ISSUEID_TABLEITEMID on LSBI (ISSUEID, TABLEITEMID); + CREATE INDEX LSBN_ISSUEID_NODEID on LSBN (ISSUEID, NODEID); + CREATE INDEX MMQ_ISSUEID_MMQENTRYINDEX on MMQ (ISSUEID, MMQENTRYINDEX); + CREATE INDEX NODEREG_NODEID_USERID on NODEREG (NODEID, USERID); + CREATE INDEX NODEREG_TQUNID on NODEREG (TQUNID); + CREATE INDEX NODEREG_USERID_NODEID on NODEREG (USERID, NODEID); + CREATE INDEX NODES_NODEID on NODES (NODEID); + CREATE INDEX NODES_TQUNID on NODES (TQUNID); + CREATE INDEX PARTICIPANTNODES_ISSUEID_OBJECTID_NODEID on PARTICIPANTNODES (ISSUEID, OBJECTID, NODEID); + CREATE INDEX PARTICIPANTNODES_TQUNID on PARTICIPANTNODES (TQUNID); + CREATE INDEX PARTICIPANTSETTINGS_PARTICIPANTID on PARTICIPANTSETTINGS (PARTICIPANTID); + CREATE INDEX PARTITIONS_LDAPDN on PARTITIONS (LDAPDN); + CREATE INDEX PARTITIONS_PARTITIONID_SERVERNODEID on PARTITIONS (PARTITIONID, SERVERNODEID); + CREATE INDEX PARTITIONS_SERVERNODEID_PARTITIONID on PARTITIONS (SERVERNODEID, PARTITIONID); + CREATE INDEX PARTITIONS_TQUNID on PARTITIONS (TQUNID); + CREATE INDEX TASKCOMPLETIONS_TASKID on TASKCOMPLETIONS (TASKID); + CREATE INDEX TASKS_ASSIGNEDTO on TASKS (ASSIGNEDTO); + CREATE INDEX USERS_PARTITIONID_USERID on USERS (PARTITIONID, USERID); + CREATE INDEX USERS_TQUNID on USERS (TQUNID); + CREATE INDEX USERS_USERID_PARTITIONID on USERS (USERID, PARTITIONID); + CREATE INDEX USERS_USERSID_USERID on USERS (USERSID, USERID); + COMMIT; + } +} {} + +# Given the schema above, the following query was cause an assertion fault +# do to an uninitialized field in a Select structure. +# +do_test tkt1449-1.2 { + execsql { + select NEWENTITIES from ITEMS where ((ISSUEID = 'x') and (OBJECTID = 'y')) + } +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1473.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1473.test new file mode 100644 index 0000000..3950272 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1473.test @@ -0,0 +1,728 @@ +# 2005 September 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1473 has been +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !compound { + finish_test + return +} + +do_test tkt1473-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(3,4); + SELECT * FROM t1 + } +} {1 2 3 4} + +do_test tkt1473-1.2 { + execsql { + SELECT 1 FROM t1 WHERE a=1 UNION ALL SELECT 2 FROM t1 WHERE b=0 + } +} {1} +do_test tkt1473-1.3 { + execsql { + SELECT 1 FROM t1 WHERE a=1 UNION SELECT 2 FROM t1 WHERE b=0 + } +} {1} +do_test tkt1473-1.4 { + execsql { + SELECT 1 FROM t1 WHERE a=1 UNION ALL SELECT 2 FROM t1 WHERE b=4 + } +} {1 2} +do_test tkt1473-1.5 { + execsql { + SELECT 1 FROM t1 WHERE a=1 UNION SELECT 2 FROM t1 WHERE b=4 + } +} {1 2} +do_test tkt1473-1.6 { + execsql { + SELECT 1 FROM t1 WHERE a=0 UNION ALL SELECT 2 FROM t1 WHERE b=4 + } +} {2} +do_test tkt1473-1.7 { + execsql { + SELECT 1 FROM t1 WHERE a=0 UNION SELECT 2 FROM t1 WHERE b=4 + } +} {2} +do_test tkt1473-1.8 { + execsql { + SELECT 1 FROM t1 WHERE a=0 UNION ALL SELECT 2 FROM t1 WHERE b=0 + } +} {} +do_test tkt1473-1.9 { + execsql { + SELECT 1 FROM t1 WHERE a=0 UNION SELECT 2 FROM t1 WHERE b=0 + } +} {} + +# Everything from this point on depends on sub-queries. So skip it +# if sub-queries are not available. +ifcapable !subquery { + finish_test + return +} + +do_test tkt1473-2.2 { + execsql { + SELECT (SELECT 1 FROM t1 WHERE a=1 UNION ALL SELECT 2 FROM t1 WHERE b=0) + } +} {1} +do_test tkt1473-2.3 { + execsql { + SELECT (SELECT 1 FROM t1 WHERE a=1 UNION SELECT 2 FROM t1 WHERE b=0) + } +} {1} +do_test tkt1473-2.4 { + execsql { + SELECT (SELECT 1 FROM t1 WHERE a=1 UNION ALL SELECT 2 FROM t1 WHERE b=4) + } +} {1} +do_test tkt1473-2.5 { + execsql { + SELECT (SELECT 1 FROM t1 WHERE a=1 UNION SELECT 2 FROM t1 WHERE b=4) + } +} {1} +do_test tkt1473-2.6 { + execsql { + SELECT (SELECT 1 FROM t1 WHERE a=0 UNION ALL SELECT 2 FROM t1 WHERE b=4) + } +} {2} +do_test tkt1473-2.7 { + execsql { + SELECT (SELECT 1 FROM t1 WHERE a=0 UNION SELECT 2 FROM t1 WHERE b=4) + } +} {2} +do_test tkt1473-2.8 { + execsql { + SELECT (SELECT 1 FROM t1 WHERE a=0 UNION ALL SELECT 2 FROM t1 WHERE b=0) + } +} {{}} +do_test tkt1473-2.9 { + execsql { + SELECT (SELECT 1 FROM t1 WHERE a=0 UNION SELECT 2 FROM t1 WHERE b=0) + } +} {{}} + +do_test tkt1473-3.2 { + execsql { + SELECT EXISTS + (SELECT 1 FROM t1 WHERE a=1 UNION ALL SELECT 2 FROM t1 WHERE b=0) + } +} {1} +do_test tkt1473-3.3 { + execsql { + SELECT EXISTS + (SELECT 1 FROM t1 WHERE a=1 UNION SELECT 2 FROM t1 WHERE b=0) + } +} {1} +do_test tkt1473-3.4 { + execsql { + SELECT EXISTS + (SELECT 1 FROM t1 WHERE a=1 UNION ALL SELECT 2 FROM t1 WHERE b=4) + } +} {1} +do_test tkt1473-3.5 { + execsql { + SELECT EXISTS + (SELECT 1 FROM t1 WHERE a=1 UNION SELECT 2 FROM t1 WHERE b=4) + } +} {1} +do_test tkt1473-3.6 { + execsql { + SELECT EXISTS + (SELECT 1 FROM t1 WHERE a=0 UNION ALL SELECT 2 FROM t1 WHERE b=4) + } +} {1} +do_test tkt1473-3.7 { + execsql { + SELECT EXISTS + (SELECT 1 FROM t1 WHERE a=0 UNION SELECT 2 FROM t1 WHERE b=4) + } +} {1} +do_test tkt1473-3.8 { + execsql { + SELECT EXISTS + (SELECT 1 FROM t1 WHERE a=0 UNION ALL SELECT 2 FROM t1 WHERE b=0) + } +} {0} +do_test tkt1473-3.9 { + execsql { + SELECT EXISTS + (SELECT 1 FROM t1 WHERE a=0 UNION SELECT 2 FROM t1 WHERE b=0) + } +} {0} + +do_test tkt1473-4.1 { + execsql { + CREATE TABLE t2(x,y); + INSERT INTO t2 VALUES(1,2); + INSERT INTO t2 SELECT x+2, y+2 FROM t2; + INSERT INTO t2 SELECT x+4, y+4 FROM t2; + INSERT INTO t2 SELECT x+8, y+8 FROM t2; + INSERT INTO t2 SELECT x+16, y+16 FROM t2; + INSERT INTO t2 SELECT x+32, y+32 FROM t2; + INSERT INTO t2 SELECT x+64, y+64 FROM t2; + SELECT count(*), sum(x), sum(y) FROM t2; + } +} {64 4096 4160} +do_test tkt1473-4.2 { + execsql { + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=3 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=2 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=4 + } +} {2 4 8 10} +do_test tkt1473-4.3 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=3 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=2 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {2} +do_test tkt1473-4.4 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=-1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=3 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=2 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {4} +do_test tkt1473-4.5 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=-1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=-1 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=2 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=-4 + ) + } +} {8} +do_test tkt1473-4.6 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=-1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=-2 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=-3 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {10} +do_test tkt1473-4.7 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=-1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=-2 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=-3 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=-4 + ) + } +} {{}} + +do_test tkt1473-5.3 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=3 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=2 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {1} +do_test tkt1473-5.4 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=-1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=3 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=2 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {1} + +do_test tkt1473-5.5 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=-1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=-1 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=2 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=-4 + ) + } +} {1} +do_test tkt1473-5.6 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=-1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=-2 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=-3 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {1} +do_test tkt1473-5.7 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION ALL + SELECT 2 FROM t2 WHERE x=-1 + UNION ALL + SELECT 3 FROM t2 WHERE x=2 + UNION ALL + SELECT 4 FROM t2 WHERE x=-2 + UNION ALL + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION ALL + SELECT 7 FROM t2 WHERE y=1 + UNION ALL + SELECT 8 FROM t2 WHERE y=-3 + UNION ALL + SELECT 9 FROM t2 WHERE y=3 + UNION ALL + SELECT 10 FROM t2 WHERE y=-4 + ) + } +} {0} + +do_test tkt1473-6.3 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION + SELECT 2 FROM t2 WHERE x=1 + UNION + SELECT 3 FROM t2 WHERE x=2 + UNION + SELECT 4 FROM t2 WHERE x=3 + UNION + SELECT 5 FROM t2 WHERE x=4 + UNION + SELECT 6 FROM t2 WHERE y=0 + UNION + SELECT 7 FROM t2 WHERE y=1 + UNION + SELECT 8 FROM t2 WHERE y=2 + UNION + SELECT 9 FROM t2 WHERE y=3 + UNION + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {1} +do_test tkt1473-6.4 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION + SELECT 2 FROM t2 WHERE x=-1 + UNION + SELECT 3 FROM t2 WHERE x=2 + UNION + SELECT 4 FROM t2 WHERE x=3 + UNION + SELECT 5 FROM t2 WHERE x=4 + UNION + SELECT 6 FROM t2 WHERE y=0 + UNION + SELECT 7 FROM t2 WHERE y=1 + UNION + SELECT 8 FROM t2 WHERE y=2 + UNION + SELECT 9 FROM t2 WHERE y=3 + UNION + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {1} + +do_test tkt1473-6.5 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION + SELECT 2 FROM t2 WHERE x=-1 + UNION + SELECT 3 FROM t2 WHERE x=2 + UNION + SELECT 4 FROM t2 WHERE x=-1 + UNION + SELECT 5 FROM t2 WHERE x=4 + UNION + SELECT 6 FROM t2 WHERE y=0 + UNION + SELECT 7 FROM t2 WHERE y=1 + UNION + SELECT 8 FROM t2 WHERE y=2 + UNION + SELECT 9 FROM t2 WHERE y=3 + UNION + SELECT 10 FROM t2 WHERE y=-4 + ) + } +} {1} +do_test tkt1473-6.6 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION + SELECT 2 FROM t2 WHERE x=-1 + UNION + SELECT 3 FROM t2 WHERE x=2 + UNION + SELECT 4 FROM t2 WHERE x=-2 + UNION + SELECT 5 FROM t2 WHERE x=4 + UNION + SELECT 6 FROM t2 WHERE y=0 + UNION + SELECT 7 FROM t2 WHERE y=1 + UNION + SELECT 8 FROM t2 WHERE y=-3 + UNION + SELECT 9 FROM t2 WHERE y=3 + UNION + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {1} +do_test tkt1473-6.7 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION + SELECT 2 FROM t2 WHERE x=-1 + UNION + SELECT 3 FROM t2 WHERE x=2 + UNION + SELECT 4 FROM t2 WHERE x=-2 + UNION + SELECT 5 FROM t2 WHERE x=4 + UNION + SELECT 6 FROM t2 WHERE y=0 + UNION + SELECT 7 FROM t2 WHERE y=1 + UNION + SELECT 8 FROM t2 WHERE y=-3 + UNION + SELECT 9 FROM t2 WHERE y=3 + UNION + SELECT 10 FROM t2 WHERE y=-4 + ) + } +} {0} +do_test tkt1473-6.8 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION + SELECT 2 FROM t2 WHERE x=-1 + UNION + SELECT 3 FROM t2 WHERE x=2 + UNION + SELECT 4 FROM t2 WHERE x=-2 + UNION + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION + SELECT 7 FROM t2 WHERE y=1 + UNION + SELECT 8 FROM t2 WHERE y=-3 + UNION + SELECT 9 FROM t2 WHERE y=3 + UNION + SELECT 10 FROM t2 WHERE y=4 + ) + } +} {1} +do_test tkt1473-6.9 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 + UNION + SELECT 2 FROM t2 WHERE x=-1 + UNION + SELECT 3 FROM t2 WHERE x=2 + UNION + SELECT 4 FROM t2 WHERE x=-2 + UNION + SELECT 5 FROM t2 WHERE x=4 + UNION ALL + SELECT 6 FROM t2 WHERE y=0 + UNION + SELECT 7 FROM t2 WHERE y=1 + UNION + SELECT 8 FROM t2 WHERE y=-3 + UNION + SELECT 9 FROM t2 WHERE y=3 + UNION + SELECT 10 FROM t2 WHERE y=-4 + ) + } +} {0} + +do_test tkt1473-7.1 { + execsql { + SELECT 1 FROM t2 WHERE x=1 EXCEPT SELECT 2 FROM t2 WHERE y=2 + } +} {1} +do_test tkt1473-7.2 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=1 EXCEPT SELECT 2 FROM t2 WHERE y=2 + ) + } +} {1} +do_test tkt1473-7.3 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=1 EXCEPT SELECT 2 FROM t2 WHERE y=2 + ) + } +} {1} +do_test tkt1473-7.4 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=0 EXCEPT SELECT 2 FROM t2 WHERE y=2 + ) + } +} {{}} +do_test tkt1473-7.5 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=0 EXCEPT SELECT 2 FROM t2 WHERE y=2 + ) + } +} {0} + +do_test tkt1473-8.1 { + execsql { + SELECT 1 FROM t2 WHERE x=1 INTERSECT SELECT 2 FROM t2 WHERE y=2 + } +} {} +do_test tkt1473-8.1 { + execsql { + SELECT 1 FROM t2 WHERE x=1 INTERSECT SELECT 1 FROM t2 WHERE y=2 + } +} {1} +do_test tkt1473-8.3 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=1 INTERSECT SELECT 2 FROM t2 WHERE y=2 + ) + } +} {{}} +do_test tkt1473-8.4 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=1 INTERSECT SELECT 1 FROM t2 WHERE y=2 + ) + } +} {1} +do_test tkt1473-8.5 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=1 INTERSECT SELECT 2 FROM t2 WHERE y=2 + ) + } +} {0} +do_test tkt1473-8.6 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=1 INTERSECT SELECT 1 FROM t2 WHERE y=2 + ) + } +} {1} +do_test tkt1473-8.7 { + execsql { + SELECT ( + SELECT 1 FROM t2 WHERE x=0 INTERSECT SELECT 1 FROM t2 WHERE y=2 + ) + } +} {{}} +do_test tkt1473-8.8 { + execsql { + SELECT EXISTS ( + SELECT 1 FROM t2 WHERE x=1 INTERSECT SELECT 1 FROM t2 WHERE y=0 + ) + } +} {0} + + + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1501.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1501.test new file mode 100644 index 0000000..19ec7f7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1501.test @@ -0,0 +1,36 @@ +# 2005 November 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1501 is +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !compound { + finish_test + return +} + +do_test tkt1501-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + SELECT a, b, 'abc' FROM t1 + UNION + SELECT b, a, 'xyz' FROM t1 + ORDER BY 2, 3; + } +} {2 1 xyz 1 2 abc} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1512.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1512.test new file mode 100644 index 0000000..a2c39db --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1512.test @@ -0,0 +1,54 @@ +# 2005 September 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1512 is +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!vacuum || !autovacuum} { + finish_test + return +} +if {[db one {PRAGMA auto_vacuum}]} { + finish_test + return +} + +do_test tkt1512-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(3,4); + SELECT * FROM t1 + } +} {1 2 3 4} +do_test tkt1512-1.2 { + file size test.db +} {2048} +do_test tkt1512-1.3 { + execsql { + DROP TABLE t1; + } + file size test.db +} {2048} +do_test tkt1512-1.4 { + execsql { + VACUUM; + } + file size test.db +} {1024} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1514.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1514.test new file mode 100644 index 0000000..aff37e8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1514.test @@ -0,0 +1,27 @@ +# 2005 November 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1514 is +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt1514-1.1 { + catchsql { + CREATE TABLE t1(a,b); + SELECT a FROM t1 WHERE max(b)<10 GROUP BY a; + } +} {1 {misuse of aggregate: max(b)}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1536.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1536.test new file mode 100644 index 0000000..46e4619 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1536.test @@ -0,0 +1,38 @@ +# 2005 November 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1536 is +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt1536-1.1 { + execsql { + CREATE TABLE t1( + a INTEGER PRIMARY KEY, + b TEXT + ); + INSERT INTO t1 VALUES(1,'01'); + SELECT typeof(a), typeof(b) FROM t1; + } +} {integer text} +do_test tkt1536-1.2 { + execsql { + INSERT INTO t1(b) SELECT b FROM t1; + SELECT b FROM t1 WHERE rowid=2; + } +} {01} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1537.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1537.test new file mode 100644 index 0000000..633f91f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1537.test @@ -0,0 +1,122 @@ +# 2005 November 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1537 is +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt1537-1.1 { + execsql { + CREATE TABLE t1(id, a1, a2); + INSERT INTO t1 VALUES(1, NULL, NULL); + INSERT INTO t1 VALUES(2, 1, 3); + CREATE TABLE t2(id, b); + INSERT INTO t2 VALUES(3, 1); + INSERT INTO t2 VALUES(4, NULL); + SELECT * FROM t1 LEFT JOIN t2 ON a1=b OR a2=+b; + } +} {1 {} {} {} {} 2 1 3 3 1} +do_test tkt1537-1.2 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON a1=b OR a2=b; + } +} {1 {} {} {} {} 2 1 3 3 1} +do_test tkt1537-1.3 { + execsql { + SELECT * FROM t2 LEFT JOIN t1 ON a1=b OR a2=b; + } +} {3 1 2 1 3 4 {} {} {} {}} +ifcapable subquery { + do_test tkt1537-1.4 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b IN (a1,a2); + } + } {1 {} {} {} {} 2 1 3 3 1} + do_test tkt1537-1.5 { + execsql { + SELECT * FROM t2 LEFT JOIN t1 ON b IN (a2,a1); + } + } {3 1 2 1 3 4 {} {} {} {}} +} +do_test tkt1537-1.6 { + execsql { + CREATE INDEX t1a1 ON t1(a1); + CREATE INDEX t1a2 ON t1(a2); + CREATE INDEX t2b ON t2(b); + SELECT * FROM t1 LEFT JOIN t2 ON a1=b OR a2=b; + } +} {1 {} {} {} {} 2 1 3 3 1} +do_test tkt1537-1.7 { + execsql { + SELECT * FROM t2 LEFT JOIN t1 ON a1=b OR a2=b; + } +} {3 1 2 1 3 4 {} {} {} {}} + +ifcapable subquery { + do_test tkt1537-1.8 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b IN (a1,a2); + } + } {1 {} {} {} {} 2 1 3 3 1} + do_test tkt1537-1.9 { + execsql { + SELECT * FROM t2 LEFT JOIN t1 ON b IN (a2,a1); + } + } {3 1 2 1 3 4 {} {} {} {}} +} + +execsql { + DROP INDEX t1a1; + DROP INDEX t1a2; + DROP INDEX t2b; +} + +do_test tkt1537-2.1 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b BETWEEN a1 AND a2; + } +} {1 {} {} {} {} 2 1 3 3 1} +do_test tkt1537-2.2 { + execsql { + CREATE INDEX t2b ON t2(b); + SELECT * FROM t1 LEFT JOIN t2 ON b BETWEEN a1 AND a2; + } +} {1 {} {} {} {} 2 1 3 3 1} +do_test tkt1537-2.3 { + execsql { + SELECT * FROM t2 LEFT JOIN t1 ON b BETWEEN a1 AND a2; + } +} {3 1 2 1 3 4 {} {} {} {}} +do_test tkt1537-2.4 { + execsql { + CREATE INDEX t1a1 ON t1(a1); + CREATE INDEX t1a2 ON t1(a2); + SELECT * FROM t2 LEFT JOIN t1 ON b BETWEEN a1 AND a2; + } +} {3 1 2 1 3 4 {} {} {} {}} + +do_test tkt1537-3.1 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b GLOB 'abc*' WHERE t1.id=1; + } +} {1 {} {} {} {}} +do_test tkt1537-3.2 { + execsql { + SELECT * FROM t2 LEFT JOIN t1 ON a1 GLOB 'abc*' WHERE t2.id=3; + } +} {3 1 {} {} {}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1567.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1567.test new file mode 100644 index 0000000..6c4548a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1567.test @@ -0,0 +1,51 @@ +# 2005 December 19 2005 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1567 is +# fixed. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt1567-1.1 { + execsql { + CREATE TABLE t1(a TEXT PRIMARY KEY); + } + set bigstr abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ + for {set i 0} {$i<100} {incr i} { + set x [format %5d [expr $i*2]] + set sql "INSERT INTO t1 VALUES('$x-$bigstr')" + execsql $sql + } +} {} +integrity_check tkt1567-1.2 + +do_test tkt1567-1.3 { + execsql { + BEGIN; + UPDATE t1 SET a = a||'x' WHERE rowid%2==0; + } +} {} +do_test tkt1567-1.4 { + catchsql { + UPDATE t1 SET a = CASE WHEN rowid<90 THEN substr(a,1,10) ELSE '9999' END; + } +} {1 {column a is not unique}} +do_test tkt1567-1.5 { + execsql { + COMMIT; + } +} {} +integrity_check tkt1567-1.6 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1644.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1644.test new file mode 100644 index 0000000..aa26a88 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1644.test @@ -0,0 +1,111 @@ +# 2006 January 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1644 is +# fixed. Ticket #1644 complains that precompiled statements +# are not expired correctly as a result of changes to TEMP +# views and triggers. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !tempdb||!view { + finish_test + return +} + +# Create two tables T1 and T2 and make V1 point to T1. +do_test tkt1644-1.1 { + execsql { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + CREATE TABLE t2(b); + INSERT INTO t2 VALUES(99); + CREATE TEMP VIEW v1 AS SELECT * FROM t1; + SELECT * FROM v1; + } +} {1} + +# The "SELECT * FROM v1" should be in the TCL interface cache below. +# It will continue to point to T1 unless the cache is invalidated when +# the view changes. +# +do_test tkt1644-1.2 { + execsql { + DROP VIEW v1; + CREATE TEMP VIEW v1 AS SELECT * FROM t2; + SELECT * FROM v1; + } +} {99} + +# Cache an access to the T1 table. +# +do_test tkt1644-1.3 { + execsql { + SELECT * FROM t1; + } +} {1} + +# Create a temp table T1. Make sure the cache is invalidated so that +# the statement is recompiled and refers to the empty temp table. +# +do_test tkt1644-1.4 { + execsql { + CREATE TEMP TABLE t1(x); + } + execsql { + SELECT * FROM t1; + } +} {} + +ifcapable view { + do_test tkt1644-2.1 { + execsql { + CREATE TEMP TABLE temp_t1(a, b); + } + set ::DB [sqlite3_connection_pointer db] + set ::STMT [sqlite3_prepare $::DB "SELECT * FROM temp_t1" -1 DUMMY] + execsql { + DROP TABLE temp_t1; + } + list [sqlite3_step $::STMT] [sqlite3_finalize $::STMT] + } {SQLITE_ERROR SQLITE_SCHEMA} + + do_test tkt1644-2.2 { + execsql { + CREATE TABLE real_t1(a, b); + CREATE TEMP VIEW temp_v1 AS SELECT * FROM real_t1; + } + set ::DB [sqlite3_connection_pointer db] + set ::STMT [sqlite3_prepare $::DB "SELECT * FROM temp_v1" -1 DUMMY] + execsql { + DROP VIEW temp_v1; + } + list [sqlite3_step $::STMT] [sqlite3_finalize $::STMT] + } {SQLITE_ERROR SQLITE_SCHEMA} + + do_test tkt1644-2.3 { + execsql { + CREATE TEMP VIEW temp_v1 AS SELECT * FROM real_t1 LIMIT 10 OFFSET 10; + } + set ::DB [sqlite3_connection_pointer db] + set ::STMT [sqlite3_prepare $::DB "SELECT * FROM temp_v1" -1 DUMMY] + execsql { + DROP VIEW temp_v1; + } + list [sqlite3_step $::STMT] [sqlite3_finalize $::STMT] + } {SQLITE_ERROR SQLITE_SCHEMA} +} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1667.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1667.test new file mode 100644 index 0000000..31b698c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1667.test @@ -0,0 +1,85 @@ +# 2006 February 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1667 has been +# fixed. +# +# +# $Id: tkt1667.test,v 1.2 2006/06/20 11:01:09 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !autovacuum||!tclvar { + finish_test + return +} + +db close +file delete -force test.db test.db-journal + +# Set the pending byte offset such that the page it is on is +# the first autovacuum pointer map page in the file (assume a page +# size of 1024). + +set first_ptrmap_page [expr 1024/5 + 3] +set sqlite_pending_byte [expr 1024 * ($first_ptrmap_page-1)] + +sqlite db test.db + +do_test tkt1667-1 { + execsql { + PRAGMA auto_vacuum = 1; + BEGIN; + CREATE TABLE t1(a, b); + } + for {set i 0} {$i < 500} {incr i} { + execsql { + INSERT INTO t1 VALUES($i, randstr(1000, 2000)) + } + } + execsql { + COMMIT; + } +} {} +for {set i 0} {$i < 500} {incr i} { + do_test tkt1667-2.$i.1 { + execsql { + DELETE FROM t1 WHERE a = $i; + } + } {} + integrity_check tkt1667-2.$i.2 +} + +do_test tkt1667-3 { + execsql { + BEGIN; + } + for {set i 0} {$i < 500} {incr i} { + execsql { + INSERT INTO t1 VALUES($i, randstr(1000, 2000)) + } + } + execsql { + COMMIT; + } +} {} +do_test tkt1667-4.1 { + execsql { + DELETE FROM t1; + } +} {} +integrity_check tkt1667-4.2 + +finish_test + + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1873.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1873.test new file mode 100644 index 0000000..0eca230 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt1873.test @@ -0,0 +1,67 @@ +# 2006 June 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #1873 has been +# fixed. +# +# +# $Id: tkt1873.test,v 1.1 2006/06/27 16:34:58 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +file delete -force test2.db test2.db-journal + +do_test tkt1873-1.1 { + execsql { + CREATE TABLE t1(x, y); + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t2(x, y); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + INSERT INTO t2 VALUES(5, 6); + INSERT INTO t2 VALUES(7, 8); + } +} {} + +do_test tkt1873-1.2 { + set rc [catch { + db eval {SELECT * FROM t2 LIMIT 1} { + db eval {DETACH aux} + } + } msg] + list $rc $msg +} {1 {database aux is locked}} + +do_test tkt1873-1.3 { + set rc [catch { + db eval {SELECT * FROM t1 LIMIT 1} { + db eval {DETACH aux} + } + } msg] + list $rc $msg +} {0 {}} + +do_test tkt1873-1.4 { + catchsql { + select * from t2; + } +} {1 {no such table: t2}} + +do_test tkt1873-1.5 { + catchsql { + ATTACH 'test2.db' AS aux; + select * from t2; + } +} {0 {5 6 7 8}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt2141.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt2141.test new file mode 100644 index 0000000..a40659f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt2141.test @@ -0,0 +1,61 @@ +# 2007 January 03 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #2141 has been +# fixed. +# +# +# $Id: tkt2141.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !subquery { + finish_test + return +} + +do_test tkt2141-1.1 { + execsql { + CREATE TABLE tab1 (t1_id integer PRIMARY KEY, t1_desc); + INSERT INTO tab1 VALUES(1,'rec 1 tab 1'); + CREATE TABLE tab2 (t2_id integer PRIMARY KEY, t2_id_t1, t2_desc); + INSERT INTO tab2 VALUES(1,1,'rec 1 tab 2'); + CREATE TABLE tab3 (t3_id integer PRIMARY KEY, t3_id_t2, t3_desc); + INSERT INTO tab3 VALUES(1,1,'aa'); + SELECT * + FROM tab1 t1 LEFT JOIN tab2 t2 ON t1.t1_id = t2.t2_id_t1 + WHERE t2.t2_id IN + (SELECT t2_id FROM tab2, tab3 ON t2_id = t3_id_t2 + WHERE t3_id IN (1,2) GROUP BY t2_id); + } +} {1 {rec 1 tab 1} 1 1 {rec 1 tab 2}} +do_test tkt2141-1.2 { + execsql { + SELECT * + FROM tab1 t1 LEFT JOIN tab2 t2 ON t1.t1_id = t2.t2_id_t1 + WHERE t2.t2_id IN + (SELECT t2_id FROM tab2, tab3 ON t2_id = t3_id_t2 + WHERE t3_id IN (1,2)); + } +} {1 {rec 1 tab 1} 1 1 {rec 1 tab 2}} +do_test tkt2141-1.3 { + execsql { + SELECT * + FROM tab1 t1 LEFT JOIN tab2 t2 + WHERE t2.t2_id IN + (SELECT t2_id FROM tab2, tab3 ON t2_id = t3_id_t2 + WHERE t3_id IN (1,2)); + } +} {1 {rec 1 tab 1} 1 1 {rec 1 tab 2}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/tkt2192.test b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt2192.test new file mode 100644 index 0000000..97b5152 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/tkt2192.test @@ -0,0 +1,140 @@ +# 2007 January 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #2192 has been +# fixed. +# +# +# $Id: tkt2192.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !datetime { + finish_test + return +} + +do_test tkt2191-1.1 { + execsql { + -- Raw data (RBS) -------- + + create table records ( + date real, + type text, + description text, + value integer, + acc_name text, + acc_no text + ); + + -- Direct Debits ---------------- + create view direct_debits as + select * from records where type = 'D/D'; + + create view monthly_direct_debits as + select strftime('%Y-%m', date) as date, (-1 * sum(value)) as value + from direct_debits + group by strftime('%Y-%m', date); + + -- Expense Categories --------------- + create view energy as + select strftime('%Y-%m', date) as date, (-1 * sum(value)) as value + from direct_debits + where description like '%NPOWER%' + group by strftime('%Y-%m', date); + + create view phone_internet as + select strftime('%Y-%m', date) as date, (-1 * sum(value)) as value + from direct_debits + where description like '%BT DIRECT%' + or description like '%SUPANET%' + or description like '%ORANGE%' + group by strftime('%Y-%m', date); + + create view credit_cards as + select strftime('%Y-%m', date) as date, (-1 * sum(value)) as value + from direct_debits where description like '%VISA%' + group by strftime('%Y-%m', date); + + -- Overview --------------------- + + create view expense_overview as + select 'Energy' as expense, date, value from energy + union + select 'Phone/Internet' as expense, date, value from phone_internet + union + select 'Credit Card' as expense, date, value from credit_cards; + + create view jan as + select 'jan', expense, value from expense_overview + where date like '%-01'; + + create view nov as + select 'nov', expense, value from expense_overview + where date like '%-11'; + + create view summary as + select * from jan join nov on (jan.expense = nov.expense); + } +} {} +do_test tkt2192-1.2 { + # set ::sqlite_addop_trace 1 + execsql { + select * from summary; + } +} {} +do_test tkt2192-2.1 { + execsql { + CREATE TABLE t1(a,b); + CREATE VIEW v1 AS + SELECT * FROM t1 WHERE b%7=0 UNION SELECT * FROM t1 WHERE b%5=0; + INSERT INTO t1 VALUES(1,7); + INSERT INTO t1 VALUES(2,10); + INSERT INTO t1 VALUES(3,14); + INSERT INTO t1 VALUES(4,15); + INSERT INTO t1 VALUES(1,16); + INSERT INTO t1 VALUES(2,17); + INSERT INTO t1 VALUES(3,20); + INSERT INTO t1 VALUES(4,21); + INSERT INTO t1 VALUES(1,22); + INSERT INTO t1 VALUES(2,24); + INSERT INTO t1 VALUES(3,25); + INSERT INTO t1 VALUES(4,26); + INSERT INTO t1 VALUES(1,27); + + SELECT b FROM v1 ORDER BY b; + } +} {7 10 14 15 20 21 25} +do_test tkt2192-2.2 { + execsql { + SELECT * FROM v1 ORDER BY a, b; + } +} {1 7 2 10 3 14 3 20 3 25 4 15 4 21} +do_test tkt2192-2.3 { + execsql { + SELECT x.a || '/' || x.b || '/' || y.b + FROM v1 AS x JOIN v1 AS y ON x.a=y.a AND x.b0} + } 1 + ifcapable pager_pragmas { + do_test trans-9.$i.5-$cnt { + expr {$sqlite_fullsync_count>0} + } [expr {$i%2==0}] + } else { + do_test trans-9.$i.5-$cnt { + expr {$sqlite_fullsync_count==0} + } {1} + } + } + } + set ::pager_old_format 0 +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/trigger1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger1.test new file mode 100644 index 0000000..c1cb755 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger1.test @@ -0,0 +1,631 @@ +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file tests creating and dropping triggers, and interaction thereof +# with the database COMMIT/ROLLBACK logic. +# +# 1. CREATE and DROP TRIGGER tests +# trig-1.1: Error if table does not exist +# trig-1.2: Error if trigger already exists +# trig-1.3: Created triggers are deleted if the transaction is rolled back +# trig-1.4: DROP TRIGGER removes trigger +# trig-1.5: Dropped triggers are restored if the transaction is rolled back +# trig-1.6: Error if dropped trigger doesn't exist +# trig-1.7: Dropping the table automatically drops all triggers +# trig-1.8: A trigger created on a TEMP table is not inserted into sqlite_master +# trig-1.9: Ensure that we cannot create a trigger on sqlite_master +# trig-1.10: +# trig-1.11: +# trig-1.12: Ensure that INSTEAD OF triggers cannot be created on tables +# trig-1.13: Ensure that AFTER triggers cannot be created on views +# trig-1.14: Ensure that BEFORE triggers cannot be created on views +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + +do_test trigger1-1.1.1 { + catchsql { + CREATE TRIGGER trig UPDATE ON no_such_table BEGIN + SELECT * from sqlite_master; + END; + } +} {1 {no such table: main.no_such_table}} + +ifcapable tempdb { + do_test trigger1-1.1.2 { + catchsql { + CREATE TEMP TRIGGER trig UPDATE ON no_such_table BEGIN + SELECT * from sqlite_master; + END; + } + } {1 {no such table: no_such_table}} +} + +execsql { + CREATE TABLE t1(a); +} +do_test trigger1-1.1.3 { + catchsql { + CREATE TRIGGER trig UPDATE ON t1 FOR EACH STATEMENT BEGIN + SELECT * FROM sqlite_master; + END; + } +} {1 {near "STATEMENT": syntax error}} +execsql { + CREATE TRIGGER tr1 INSERT ON t1 BEGIN + INSERT INTO t1 values(1); + END; +} +do_test trigger1-1.2.0 { + catchsql { + CREATE TRIGGER IF NOT EXISTS tr1 DELETE ON t1 BEGIN + SELECT * FROM sqlite_master; + END + } +} {0 {}} +do_test trigger1-1.2.1 { + catchsql { + CREATE TRIGGER tr1 DELETE ON t1 BEGIN + SELECT * FROM sqlite_master; + END + } +} {1 {trigger tr1 already exists}} +do_test trigger1-1.2.2 { + catchsql { + CREATE TRIGGER "tr1" DELETE ON t1 BEGIN + SELECT * FROM sqlite_master; + END + } +} {1 {trigger "tr1" already exists}} +do_test trigger1-1.2.3 { + catchsql { + CREATE TRIGGER [tr1] DELETE ON t1 BEGIN + SELECT * FROM sqlite_master; + END + } +} {1 {trigger [tr1] already exists}} + +do_test trigger1-1.3 { + catchsql { + BEGIN; + CREATE TRIGGER tr2 INSERT ON t1 BEGIN + SELECT * from sqlite_master; END; + ROLLBACK; + CREATE TRIGGER tr2 INSERT ON t1 BEGIN + SELECT * from sqlite_master; END; + } +} {0 {}} + +do_test trigger1-1.4 { + catchsql { + DROP TRIGGER IF EXISTS tr1; + CREATE TRIGGER tr1 DELETE ON t1 BEGIN + SELECT * FROM sqlite_master; + END + } +} {0 {}} + +do_test trigger1-1.5 { + execsql { + BEGIN; + DROP TRIGGER tr2; + ROLLBACK; + DROP TRIGGER tr2; + } +} {} + +do_test trigger1-1.6.1 { + catchsql { + DROP TRIGGER IF EXISTS biggles; + } +} {0 {}} + +do_test trigger1-1.6.2 { + catchsql { + DROP TRIGGER biggles; + } +} {1 {no such trigger: biggles}} + +do_test trigger1-1.7 { + catchsql { + DROP TABLE t1; + DROP TRIGGER tr1; + } +} {1 {no such trigger: tr1}} + +ifcapable tempdb { + execsql { + CREATE TEMP TABLE temp_table(a); + } + do_test trigger1-1.8 { + execsql { + CREATE TRIGGER temp_trig UPDATE ON temp_table BEGIN + SELECT * from sqlite_master; + END; + SELECT count(*) FROM sqlite_master WHERE name = 'temp_trig'; + } + } {0} +} + +do_test trigger1-1.9 { + catchsql { + CREATE TRIGGER tr1 AFTER UPDATE ON sqlite_master BEGIN + SELECT * FROM sqlite_master; + END; + } +} {1 {cannot create trigger on system table}} + +# Check to make sure that a DELETE statement within the body of +# a trigger does not mess up the DELETE that caused the trigger to +# run in the first place. +# +do_test trigger1-1.10 { + execsql { + create table t1(a,b); + insert into t1 values(1,'a'); + insert into t1 values(2,'b'); + insert into t1 values(3,'c'); + insert into t1 values(4,'d'); + create trigger r1 after delete on t1 for each row begin + delete from t1 WHERE a=old.a+2; + end; + delete from t1 where a=1 OR a=3; + select * from t1; + drop table t1; + } +} {2 b 4 d} + +do_test trigger1-1.11 { + execsql { + create table t1(a,b); + insert into t1 values(1,'a'); + insert into t1 values(2,'b'); + insert into t1 values(3,'c'); + insert into t1 values(4,'d'); + create trigger r1 after update on t1 for each row begin + delete from t1 WHERE a=old.a+2; + end; + update t1 set b='x-' || b where a=1 OR a=3; + select * from t1; + drop table t1; + } +} {1 x-a 2 b 4 d} + +# Ensure that we cannot create INSTEAD OF triggers on tables +do_test trigger1-1.12 { + catchsql { + create table t1(a,b); + create trigger t1t instead of update on t1 for each row begin + delete from t1 WHERE a=old.a+2; + end; + } +} {1 {cannot create INSTEAD OF trigger on table: main.t1}} + +ifcapable view { +# Ensure that we cannot create BEFORE triggers on views +do_test trigger1-1.13 { + catchsql { + create view v1 as select * from t1; + create trigger v1t before update on v1 for each row begin + delete from t1 WHERE a=old.a+2; + end; + } +} {1 {cannot create BEFORE trigger on view: main.v1}} +# Ensure that we cannot create AFTER triggers on views +do_test trigger1-1.14 { + catchsql { + drop view v1; + create view v1 as select * from t1; + create trigger v1t AFTER update on v1 for each row begin + delete from t1 WHERE a=old.a+2; + end; + } +} {1 {cannot create AFTER trigger on view: main.v1}} +} ;# ifcapable view + +# Check for memory leaks in the trigger parser +# +do_test trigger1-2.1 { + catchsql { + CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN + SELECT * FROM; -- Syntax error + END; + } +} {1 {near ";": syntax error}} +do_test trigger1-2.2 { + catchsql { + CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN + SELECT * FROM t1; + SELECT * FROM; -- Syntax error + END; + } +} {1 {near ";": syntax error}} + +# Create a trigger that refers to a table that might not exist. +# +ifcapable tempdb { + do_test trigger1-3.1 { + execsql { + CREATE TEMP TABLE t2(x,y); + } + catchsql { + CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN + INSERT INTO t2 VALUES(NEW.a,NEW.b); + END; + } + } {0 {}} + do_test trigger-3.2 { + catchsql { + INSERT INTO t1 VALUES(1,2); + SELECT * FROM t2; + } + } {1 {no such table: main.t2}} + do_test trigger-3.3 { + db close + set rc [catch {sqlite3 db test.db} err] + if {$rc} {lappend rc $err} + set rc + } {0} + do_test trigger-3.4 { + catchsql { + INSERT INTO t1 VALUES(1,2); + SELECT * FROM t2; + } + } {1 {no such table: main.t2}} + do_test trigger-3.5 { + catchsql { + CREATE TEMP TABLE t2(x,y); + INSERT INTO t1 VALUES(1,2); + SELECT * FROM t2; + } + } {1 {no such table: main.t2}} + do_test trigger-3.6 { + catchsql { + DROP TRIGGER r1; + CREATE TEMP TRIGGER r1 AFTER INSERT ON t1 BEGIN + INSERT INTO t2 VALUES(NEW.a,NEW.b); + END; + INSERT INTO t1 VALUES(1,2); + SELECT * FROM t2; + } + } {0 {1 2}} + do_test trigger-3.7 { + execsql { + DROP TABLE t2; + CREATE TABLE t2(x,y); + SELECT * FROM t2; + } + } {} + + # There are two versions of trigger-3.8 and trigger-3.9. One that uses + # compound SELECT statements, and another that does not. + ifcapable compound { + do_test trigger1-3.8 { + execsql { + INSERT INTO t1 VALUES(3,4); + SELECT * FROM t1 UNION ALL SELECT * FROM t2; + } + } {1 2 3 4 3 4} + do_test trigger1-3.9 { + db close + sqlite3 db test.db + execsql { + INSERT INTO t1 VALUES(5,6); + SELECT * FROM t1 UNION ALL SELECT * FROM t2; + } + } {1 2 3 4 5 6 3 4} + } ;# ifcapable compound + ifcapable !compound { + do_test trigger1-3.8 { + execsql { + INSERT INTO t1 VALUES(3,4); + SELECT * FROM t1; + SELECT * FROM t2; + } + } {1 2 3 4 3 4} + do_test trigger1-3.9 { + db close + sqlite3 db test.db + execsql { + INSERT INTO t1 VALUES(5,6); + SELECT * FROM t1; + SELECT * FROM t2; + } + } {1 2 3 4 5 6 3 4} + } ;# ifcapable !compound + + do_test trigger1-4.1 { + execsql { + CREATE TEMP TRIGGER r1 BEFORE INSERT ON t1 BEGIN + INSERT INTO t2 VALUES(NEW.a,NEW.b); + END; + INSERT INTO t1 VALUES(7,8); + SELECT * FROM t2; + } + } {3 4 7 8} + do_test trigger1-4.2 { + sqlite3 db2 test.db + execsql { + INSERT INTO t1 VALUES(9,10); + } db2; + db2 close + execsql { + SELECT * FROM t2; + } + } {3 4 7 8} + do_test trigger1-4.3 { + execsql { + DROP TABLE t1; + SELECT * FROM t2; + }; + } {3 4 7 8} + do_test trigger1-4.4 { + db close + sqlite3 db test.db + execsql { + SELECT * FROM t2; + }; + } {3 4 7 8} +} else { + execsql { + CREATE TABLE t2(x,y); + DROP TABLE t1; + INSERT INTO t2 VALUES(3, 4); + INSERT INTO t2 VALUES(7, 8); + } +} + + +integrity_check trigger1-5.1 + +# Create a trigger with the same name as a table. Make sure the +# trigger works. Then drop the trigger. Make sure the table is +# still there. +# +set view_v1 {} +ifcapable view { + set view_v1 {view v1} +} +do_test trigger1-6.1 { + execsql {SELECT type, name FROM sqlite_master} +} [concat $view_v1 {table t2}] +do_test trigger1-6.2 { + execsql { + CREATE TRIGGER t2 BEFORE DELETE ON t2 BEGIN + SELECT RAISE(ABORT,'deletes are not allows'); + END; + SELECT type, name FROM sqlite_master; + } +} [concat $view_v1 {table t2 trigger t2}] +do_test trigger1-6.3 { + catchsql {DELETE FROM t2} +} {1 {deletes are not allows}} +do_test trigger1-6.4 { + execsql {SELECT * FROM t2} +} {3 4 7 8} +do_test trigger1-6.5 { + db close + sqlite3 db test.db + execsql {SELECT type, name FROM sqlite_master} +} [concat $view_v1 {table t2 trigger t2}] +do_test trigger1-6.6 { + execsql { + DROP TRIGGER t2; + SELECT type, name FROM sqlite_master; + } +} [concat $view_v1 {table t2}] +do_test trigger1-6.7 { + execsql {SELECT * FROM t2} +} {3 4 7 8} +do_test trigger1-6.8 { + db close + sqlite3 db test.db + execsql {SELECT * FROM t2} +} {3 4 7 8} + +integrity_check trigger-7.1 + +# Check to make sure the name of a trigger can be quoted so that keywords +# can be used as trigger names. Ticket #468 +# +do_test trigger1-8.1 { + execsql { + CREATE TRIGGER 'trigger' AFTER INSERT ON t2 BEGIN SELECT 1; END; + SELECT name FROM sqlite_master WHERE type='trigger'; + } +} {trigger} +do_test trigger1-8.2 { + execsql { + DROP TRIGGER 'trigger'; + SELECT name FROM sqlite_master WHERE type='trigger'; + } +} {} +do_test trigger1-8.3 { + execsql { + CREATE TRIGGER "trigger" AFTER INSERT ON t2 BEGIN SELECT 1; END; + SELECT name FROM sqlite_master WHERE type='trigger'; + } +} {trigger} +do_test trigger1-8.4 { + execsql { + DROP TRIGGER "trigger"; + SELECT name FROM sqlite_master WHERE type='trigger'; + } +} {} +do_test trigger1-8.5 { + execsql { + CREATE TRIGGER [trigger] AFTER INSERT ON t2 BEGIN SELECT 1; END; + SELECT name FROM sqlite_master WHERE type='trigger'; + } +} {trigger} +do_test trigger1-8.6 { + execsql { + DROP TRIGGER [trigger]; + SELECT name FROM sqlite_master WHERE type='trigger'; + } +} {} + +ifcapable conflict { + # Make sure REPLACE works inside of triggers. + # + # There are two versions of trigger-9.1 and trigger-9.2. One that uses + # compound SELECT statements, and another that does not. + ifcapable compound { + do_test trigger1-9.1 { + execsql { + CREATE TABLE t3(a,b); + CREATE TABLE t4(x UNIQUE, b); + CREATE TRIGGER r34 AFTER INSERT ON t3 BEGIN + REPLACE INTO t4 VALUES(new.a,new.b); + END; + INSERT INTO t3 VALUES(1,2); + SELECT * FROM t3 UNION ALL SELECT 99, 99 UNION ALL SELECT * FROM t4; + } + } {1 2 99 99 1 2} + do_test trigger1-9.2 { + execsql { + INSERT INTO t3 VALUES(1,3); + SELECT * FROM t3 UNION ALL SELECT 99, 99 UNION ALL SELECT * FROM t4; + } + } {1 2 1 3 99 99 1 3} + } else { + do_test trigger1-9.1 { + execsql { + CREATE TABLE t3(a,b); + CREATE TABLE t4(x UNIQUE, b); + CREATE TRIGGER r34 AFTER INSERT ON t3 BEGIN + REPLACE INTO t4 VALUES(new.a,new.b); + END; + INSERT INTO t3 VALUES(1,2); + SELECT * FROM t3; SELECT 99, 99; SELECT * FROM t4; + } + } {1 2 99 99 1 2} + do_test trigger1-9.2 { + execsql { + INSERT INTO t3 VALUES(1,3); + SELECT * FROM t3; SELECT 99, 99; SELECT * FROM t4; + } + } {1 2 1 3 99 99 1 3} + } + execsql { + DROP TABLE t3; + DROP TABLE t4; + } +} + + +# Ticket #764. At one stage TEMP triggers would fail to re-install when the +# schema was reloaded. The following tests ensure that TEMP triggers are +# correctly re-installed. +# +# Also verify that references within trigger programs are resolved at +# statement compile time, not trigger installation time. This means, for +# example, that you can drop and re-create tables referenced by triggers. +ifcapable tempdb { + do_test trigger1-10.0 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' AS aux; + } + } {} + do_test trigger1-10.1 { + execsql { + CREATE TABLE main.t4(a, b, c); + CREATE TABLE temp.t4(a, b, c); + CREATE TABLE aux.t4(a, b, c); + CREATE TABLE insert_log(db, a, b, c); + } + } {} + do_test trigger1-10.2 { + execsql { + CREATE TEMP TRIGGER trig1 AFTER INSERT ON main.t4 BEGIN + INSERT INTO insert_log VALUES('main', new.a, new.b, new.c); + END; + CREATE TEMP TRIGGER trig2 AFTER INSERT ON temp.t4 BEGIN + INSERT INTO insert_log VALUES('temp', new.a, new.b, new.c); + END; + CREATE TEMP TRIGGER trig3 AFTER INSERT ON aux.t4 BEGIN + INSERT INTO insert_log VALUES('aux', new.a, new.b, new.c); + END; + } + } {} + do_test trigger1-10.3 { + execsql { + INSERT INTO main.t4 VALUES(1, 2, 3); + INSERT INTO temp.t4 VALUES(4, 5, 6); + INSERT INTO aux.t4 VALUES(7, 8, 9); + } + } {} + do_test trigger1-10.4 { + execsql { + SELECT * FROM insert_log; + } + } {main 1 2 3 temp 4 5 6 aux 7 8 9} + do_test trigger1-10.5 { + execsql { + BEGIN; + INSERT INTO main.t4 VALUES(1, 2, 3); + INSERT INTO temp.t4 VALUES(4, 5, 6); + INSERT INTO aux.t4 VALUES(7, 8, 9); + ROLLBACK; + } + } {} + do_test trigger1-10.6 { + execsql { + SELECT * FROM insert_log; + } + } {main 1 2 3 temp 4 5 6 aux 7 8 9} + do_test trigger1-10.7 { + execsql { + DELETE FROM insert_log; + INSERT INTO main.t4 VALUES(11, 12, 13); + INSERT INTO temp.t4 VALUES(14, 15, 16); + INSERT INTO aux.t4 VALUES(17, 18, 19); + } + } {} + do_test trigger1-10.8 { + execsql { + SELECT * FROM insert_log; + } + } {main 11 12 13 temp 14 15 16 aux 17 18 19} + do_test trigger1-10.8 { + # Drop and re-create the insert_log table in a different database. Note + # that we can change the column names because the trigger programs don't + # use them explicitly. + execsql { + DROP TABLE insert_log; + CREATE TABLE aux.insert_log(db, d, e, f); + } + } {} + do_test trigger1-10.10 { + execsql { + INSERT INTO main.t4 VALUES(21, 22, 23); + INSERT INTO temp.t4 VALUES(24, 25, 26); + INSERT INTO aux.t4 VALUES(27, 28, 29); + } + } {} + do_test trigger1-10.11 { + execsql { + SELECT * FROM insert_log; + } + } {main 21 22 23 temp 24 25 26 aux 27 28 29} +} + +do_test trigger1-11.1 { + catchsql {SELECT raise(abort,'message');} +} {1 {RAISE() may only be used within a trigger-program}} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/trigger2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger2.test new file mode 100644 index 0000000..b150c41 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger2.test @@ -0,0 +1,742 @@ +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Regression testing of FOR EACH ROW table triggers +# +# 1. Trigger execution order tests. +# These tests ensure that BEFORE and AFTER triggers are fired at the correct +# times relative to each other and the triggering statement. +# +# trigger2-1.1.*: ON UPDATE trigger execution model. +# trigger2-1.2.*: DELETE trigger execution model. +# trigger2-1.3.*: INSERT trigger execution model. +# +# 2. Trigger program execution tests. +# These tests ensure that trigger programs execute correctly (ie. that a +# trigger program can correctly execute INSERT, UPDATE, DELETE * SELECT +# statements, and combinations thereof). +# +# 3. Selective trigger execution +# This tests that conditional triggers (ie. UPDATE OF triggers and triggers +# with WHEN clauses) are fired only fired when they are supposed to be. +# +# trigger2-3.1: UPDATE OF triggers +# trigger2-3.2: WHEN clause +# +# 4. Cascaded trigger execution +# Tests that trigger-programs may cause other triggers to fire. Also that a +# trigger-program is never executed recursively. +# +# trigger2-4.1: Trivial cascading trigger +# trigger2-4.2: Trivial recursive trigger handling +# +# 5. Count changes behaviour. +# Verify that rows altered by triggers are not included in the return value +# of the "count changes" interface. +# +# 6. ON CONFLICT clause handling +# trigger2-6.1[a-f]: INSERT statements +# trigger2-6.2[a-f]: UPDATE statements +# +# 7. & 8. Triggers on views fire correctly. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + +# 1. +ifcapable subquery { + set ii 0 + set tbl_definitions [list \ + {CREATE TABLE tbl (a, b);} \ + {CREATE TABLE tbl (a INTEGER PRIMARY KEY, b);} \ + {CREATE TABLE tbl (a, b PRIMARY KEY);} \ + {CREATE TABLE tbl (a, b); CREATE INDEX tbl_idx ON tbl(b);} \ + ] + ifcapable tempdb { + lappend tbl_definitions \ + {CREATE TEMP TABLE tbl (a, b); CREATE INDEX tbl_idx ON tbl(b);} + lappend tbl_definitions {CREATE TEMP TABLE tbl (a, b);} + lappend tbl_definitions \ + {CREATE TEMPORARY TABLE tbl (a INTEGER PRIMARY KEY, b);} + } + foreach tbl_defn $tbl_definitions { + incr ii + catchsql { DROP INDEX tbl_idx; } + catchsql { + DROP TABLE rlog; + DROP TABLE clog; + DROP TABLE tbl; + DROP TABLE other_tbl; + } + + execsql $tbl_defn + + execsql { + INSERT INTO tbl VALUES(1, 2); + INSERT INTO tbl VALUES(3, 4); + + CREATE TABLE rlog (idx, old_a, old_b, db_sum_a, db_sum_b, new_a, new_b); + CREATE TABLE clog (idx, old_a, old_b, db_sum_a, db_sum_b, new_a, new_b); + + CREATE TRIGGER before_update_row BEFORE UPDATE ON tbl FOR EACH ROW + BEGIN + INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog), + old.a, old.b, + (SELECT coalesce(sum(a),0) FROM tbl), + (SELECT coalesce(sum(b),0) FROM tbl), + new.a, new.b); + END; + + CREATE TRIGGER after_update_row AFTER UPDATE ON tbl FOR EACH ROW + BEGIN + INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog), + old.a, old.b, + (SELECT coalesce(sum(a),0) FROM tbl), + (SELECT coalesce(sum(b),0) FROM tbl), + new.a, new.b); + END; + + CREATE TRIGGER conditional_update_row AFTER UPDATE ON tbl FOR EACH ROW + WHEN old.a = 1 + BEGIN + INSERT INTO clog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM clog), + old.a, old.b, + (SELECT coalesce(sum(a),0) FROM tbl), + (SELECT coalesce(sum(b),0) FROM tbl), + new.a, new.b); + END; + } + + do_test trigger2-1.$ii.1 { + set r {} + foreach v [execsql { + UPDATE tbl SET a = a * 10, b = b * 10; + SELECT * FROM rlog ORDER BY idx; + SELECT * FROM clog ORDER BY idx; + }] { + lappend r [expr {int($v)}] + } + set r + } [list 1 1 2 4 6 10 20 \ + 2 1 2 13 24 10 20 \ + 3 3 4 13 24 30 40 \ + 4 3 4 40 60 30 40 \ + 1 1 2 13 24 10 20 ] + + execsql { + DELETE FROM rlog; + DELETE FROM tbl; + INSERT INTO tbl VALUES (100, 100); + INSERT INTO tbl VALUES (300, 200); + CREATE TRIGGER delete_before_row BEFORE DELETE ON tbl FOR EACH ROW + BEGIN + INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog), + old.a, old.b, + (SELECT coalesce(sum(a),0) FROM tbl), + (SELECT coalesce(sum(b),0) FROM tbl), + 0, 0); + END; + + CREATE TRIGGER delete_after_row AFTER DELETE ON tbl FOR EACH ROW + BEGIN + INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog), + old.a, old.b, + (SELECT coalesce(sum(a),0) FROM tbl), + (SELECT coalesce(sum(b),0) FROM tbl), + 0, 0); + END; + } + do_test trigger2-1.$ii.2 { + set r {} + foreach v [execsql { + DELETE FROM tbl; + SELECT * FROM rlog; + }] { + lappend r [expr {int($v)}] + } + set r + } [list 1 100 100 400 300 0 0 \ + 2 100 100 300 200 0 0 \ + 3 300 200 300 200 0 0 \ + 4 300 200 0 0 0 0 ] + + execsql { + DELETE FROM rlog; + CREATE TRIGGER insert_before_row BEFORE INSERT ON tbl FOR EACH ROW + BEGIN + INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog), + 0, 0, + (SELECT coalesce(sum(a),0) FROM tbl), + (SELECT coalesce(sum(b),0) FROM tbl), + new.a, new.b); + END; + + CREATE TRIGGER insert_after_row AFTER INSERT ON tbl FOR EACH ROW + BEGIN + INSERT INTO rlog VALUES ( (SELECT coalesce(max(idx),0) + 1 FROM rlog), + 0, 0, + (SELECT coalesce(sum(a),0) FROM tbl), + (SELECT coalesce(sum(b),0) FROM tbl), + new.a, new.b); + END; + } + do_test trigger2-1.$ii.3 { + execsql { + + CREATE TABLE other_tbl(a, b); + INSERT INTO other_tbl VALUES(1, 2); + INSERT INTO other_tbl VALUES(3, 4); + -- INSERT INTO tbl SELECT * FROM other_tbl; + INSERT INTO tbl VALUES(5, 6); + DROP TABLE other_tbl; + + SELECT * FROM rlog; + } + } [list 1 0 0 0 0 5 6 \ + 2 0 0 5 6 5 6 ] + + integrity_check trigger2-1.$ii.4 + } + catchsql { + DROP TABLE rlog; + DROP TABLE clog; + DROP TABLE tbl; + DROP TABLE other_tbl; + } +} + +# 2. +set ii 0 +foreach tr_program { + {UPDATE tbl SET b = old.b;} + {INSERT INTO log VALUES(new.c, 2, 3);} + {DELETE FROM log WHERE a = 1;} + {INSERT INTO tbl VALUES(500, new.b * 10, 700); + UPDATE tbl SET c = old.c; + DELETE FROM log;} + {INSERT INTO log select * from tbl;} +} { + foreach test_varset [ list \ + { + set statement {UPDATE tbl SET c = 10 WHERE a = 1;} + set prep {INSERT INTO tbl VALUES(1, 2, 3);} + set newC 10 + set newB 2 + set newA 1 + set oldA 1 + set oldB 2 + set oldC 3 + } \ + { + set statement {DELETE FROM tbl WHERE a = 1;} + set prep {INSERT INTO tbl VALUES(1, 2, 3);} + set oldA 1 + set oldB 2 + set oldC 3 + } \ + { + set statement {INSERT INTO tbl VALUES(1, 2, 3);} + set newA 1 + set newB 2 + set newC 3 + } + ] \ + { + set statement {} + set prep {} + set newA {''} + set newB {''} + set newC {''} + set oldA {''} + set oldB {''} + set oldC {''} + + incr ii + + eval $test_varset + + set statement_type [string range $statement 0 5] + set tr_program_fixed $tr_program + if {$statement_type == "DELETE"} { + regsub -all new\.a $tr_program_fixed {''} tr_program_fixed + regsub -all new\.b $tr_program_fixed {''} tr_program_fixed + regsub -all new\.c $tr_program_fixed {''} tr_program_fixed + } + if {$statement_type == "INSERT"} { + regsub -all old\.a $tr_program_fixed {''} tr_program_fixed + regsub -all old\.b $tr_program_fixed {''} tr_program_fixed + regsub -all old\.c $tr_program_fixed {''} tr_program_fixed + } + + + set tr_program_cooked $tr_program + regsub -all new\.a $tr_program_cooked $newA tr_program_cooked + regsub -all new\.b $tr_program_cooked $newB tr_program_cooked + regsub -all new\.c $tr_program_cooked $newC tr_program_cooked + regsub -all old\.a $tr_program_cooked $oldA tr_program_cooked + regsub -all old\.b $tr_program_cooked $oldB tr_program_cooked + regsub -all old\.c $tr_program_cooked $oldC tr_program_cooked + + catchsql { + DROP TABLE tbl; + DROP TABLE log; + } + + execsql { + CREATE TABLE tbl(a PRIMARY KEY, b, c); + CREATE TABLE log(a, b, c); + } + + set query {SELECT * FROM tbl; SELECT * FROM log;} + set prep "$prep; INSERT INTO log VALUES(1, 2, 3);\ + INSERT INTO log VALUES(10, 20, 30);" + +# Check execution of BEFORE programs: + + set before_data [ execsql "$prep $tr_program_cooked $statement $query" ] + + execsql "DELETE FROM tbl; DELETE FROM log; $prep"; + execsql "CREATE TRIGGER the_trigger BEFORE [string range $statement 0 6]\ + ON tbl BEGIN $tr_program_fixed END;" + + do_test trigger2-2.$ii-before "execsql {$statement $query}" $before_data + + execsql "DROP TRIGGER the_trigger;" + execsql "DELETE FROM tbl; DELETE FROM log;" + +# Check execution of AFTER programs + set after_data [ execsql "$prep $statement $tr_program_cooked $query" ] + + execsql "DELETE FROM tbl; DELETE FROM log; $prep"; + execsql "CREATE TRIGGER the_trigger AFTER [string range $statement 0 6]\ + ON tbl BEGIN $tr_program_fixed END;" + + do_test trigger2-2.$ii-after "execsql {$statement $query}" $after_data + execsql "DROP TRIGGER the_trigger;" + + integrity_check trigger2-2.$ii-integrity + } +} +catchsql { + DROP TABLE tbl; + DROP TABLE log; +} + +# 3. + +# trigger2-3.1: UPDATE OF triggers +execsql { + CREATE TABLE tbl (a, b, c, d); + CREATE TABLE log (a); + INSERT INTO log VALUES (0); + INSERT INTO tbl VALUES (0, 0, 0, 0); + INSERT INTO tbl VALUES (1, 0, 0, 0); + CREATE TRIGGER tbl_after_update_cd BEFORE UPDATE OF c, d ON tbl + BEGIN + UPDATE log SET a = a + 1; + END; +} +do_test trigger2-3.1 { + execsql { + UPDATE tbl SET b = 1, c = 10; -- 2 + UPDATE tbl SET b = 10; -- 0 + UPDATE tbl SET d = 4 WHERE a = 0; --1 + UPDATE tbl SET a = 4, b = 10; --0 + SELECT * FROM log; + } +} {3} +execsql { + DROP TABLE tbl; + DROP TABLE log; +} + +# trigger2-3.2: WHEN clause +set when_triggers [list {t1 BEFORE INSERT ON tbl WHEN new.a > 20}] +ifcapable subquery { + lappend when_triggers \ + {t2 BEFORE INSERT ON tbl WHEN (SELECT count(*) FROM tbl) = 0} +} + +execsql { + CREATE TABLE tbl (a, b, c, d); + CREATE TABLE log (a); + INSERT INTO log VALUES (0); +} + +foreach trig $when_triggers { + execsql "CREATE TRIGGER $trig BEGIN UPDATE log set a = a + 1; END;" +} + +ifcapable subquery { + set t232 {1 0 1} +} else { + set t232 {0 0 1} +} +do_test trigger2-3.2 { + execsql { + + INSERT INTO tbl VALUES(0, 0, 0, 0); -- 1 (ifcapable subquery) + SELECT * FROM log; + UPDATE log SET a = 0; + + INSERT INTO tbl VALUES(0, 0, 0, 0); -- 0 + SELECT * FROM log; + UPDATE log SET a = 0; + + INSERT INTO tbl VALUES(200, 0, 0, 0); -- 1 + SELECT * FROM log; + UPDATE log SET a = 0; + } +} $t232 +execsql { + DROP TABLE tbl; + DROP TABLE log; +} +integrity_check trigger2-3.3 + +# Simple cascaded trigger +execsql { + CREATE TABLE tblA(a, b); + CREATE TABLE tblB(a, b); + CREATE TABLE tblC(a, b); + + CREATE TRIGGER tr1 BEFORE INSERT ON tblA BEGIN + INSERT INTO tblB values(new.a, new.b); + END; + + CREATE TRIGGER tr2 BEFORE INSERT ON tblB BEGIN + INSERT INTO tblC values(new.a, new.b); + END; +} +do_test trigger2-4.1 { + execsql { + INSERT INTO tblA values(1, 2); + SELECT * FROM tblA; + SELECT * FROM tblB; + SELECT * FROM tblC; + } +} {1 2 1 2 1 2} +execsql { + DROP TABLE tblA; + DROP TABLE tblB; + DROP TABLE tblC; +} + +# Simple recursive trigger +execsql { + CREATE TABLE tbl(a, b, c); + CREATE TRIGGER tbl_trig BEFORE INSERT ON tbl + BEGIN + INSERT INTO tbl VALUES (new.a, new.b, new.c); + END; +} +do_test trigger2-4.2 { + execsql { + INSERT INTO tbl VALUES (1, 2, 3); + select * from tbl; + } +} {1 2 3 1 2 3} +execsql { + DROP TABLE tbl; +} + +# 5. +execsql { + CREATE TABLE tbl(a, b, c); + CREATE TRIGGER tbl_trig BEFORE INSERT ON tbl + BEGIN + INSERT INTO tbl VALUES (1, 2, 3); + INSERT INTO tbl VALUES (2, 2, 3); + UPDATE tbl set b = 10 WHERE a = 1; + DELETE FROM tbl WHERE a = 1; + DELETE FROM tbl; + END; +} +do_test trigger2-5 { + execsql { + INSERT INTO tbl VALUES(100, 200, 300); + } + db changes +} {1} +execsql { + DROP TABLE tbl; +} + +ifcapable conflict { + # Handling of ON CONFLICT by INSERT statements inside triggers + execsql { + CREATE TABLE tbl (a primary key, b, c); + CREATE TRIGGER ai_tbl AFTER INSERT ON tbl BEGIN + INSERT OR IGNORE INTO tbl values (new.a, 0, 0); + END; + } + do_test trigger2-6.1a { + execsql { + BEGIN; + INSERT INTO tbl values (1, 2, 3); + SELECT * from tbl; + } + } {1 2 3} + do_test trigger2-6.1b { + catchsql { + INSERT OR ABORT INTO tbl values (2, 2, 3); + } + } {1 {column a is not unique}} + do_test trigger2-6.1c { + execsql { + SELECT * from tbl; + } + } {1 2 3} + do_test trigger2-6.1d { + catchsql { + INSERT OR FAIL INTO tbl values (2, 2, 3); + } + } {1 {column a is not unique}} + do_test trigger2-6.1e { + execsql { + SELECT * from tbl; + } + } {1 2 3 2 2 3} + do_test trigger2-6.1f { + execsql { + INSERT OR REPLACE INTO tbl values (2, 2, 3); + SELECT * from tbl; + } + } {1 2 3 2 0 0} + do_test trigger2-6.1g { + catchsql { + INSERT OR ROLLBACK INTO tbl values (3, 2, 3); + } + } {1 {column a is not unique}} + do_test trigger2-6.1h { + execsql { + SELECT * from tbl; + } + } {} + execsql {DELETE FROM tbl} + + + # Handling of ON CONFLICT by UPDATE statements inside triggers + execsql { + INSERT INTO tbl values (4, 2, 3); + INSERT INTO tbl values (6, 3, 4); + CREATE TRIGGER au_tbl AFTER UPDATE ON tbl BEGIN + UPDATE OR IGNORE tbl SET a = new.a, c = 10; + END; + } + do_test trigger2-6.2a { + execsql { + BEGIN; + UPDATE tbl SET a = 1 WHERE a = 4; + SELECT * from tbl; + } + } {1 2 10 6 3 4} + do_test trigger2-6.2b { + catchsql { + UPDATE OR ABORT tbl SET a = 4 WHERE a = 1; + } + } {1 {column a is not unique}} + do_test trigger2-6.2c { + execsql { + SELECT * from tbl; + } + } {1 2 10 6 3 4} + do_test trigger2-6.2d { + catchsql { + UPDATE OR FAIL tbl SET a = 4 WHERE a = 1; + } + } {1 {column a is not unique}} + do_test trigger2-6.2e { + execsql { + SELECT * from tbl; + } + } {4 2 10 6 3 4} + do_test trigger2-6.2f.1 { + execsql { + UPDATE OR REPLACE tbl SET a = 1 WHERE a = 4; + SELECT * from tbl; + } + } {1 3 10} + do_test trigger2-6.2f.2 { + execsql { + INSERT INTO tbl VALUES (2, 3, 4); + SELECT * FROM tbl; + } + } {1 3 10 2 3 4} + do_test trigger2-6.2g { + catchsql { + UPDATE OR ROLLBACK tbl SET a = 4 WHERE a = 1; + } + } {1 {column a is not unique}} + do_test trigger2-6.2h { + execsql { + SELECT * from tbl; + } + } {4 2 3 6 3 4} + execsql { + DROP TABLE tbl; + } +} ; # ifcapable conflict + +# 7. Triggers on views +ifcapable view { + +do_test trigger2-7.1 { + execsql { + CREATE TABLE ab(a, b); + CREATE TABLE cd(c, d); + INSERT INTO ab VALUES (1, 2); + INSERT INTO ab VALUES (0, 0); + INSERT INTO cd VALUES (3, 4); + + CREATE TABLE tlog(ii INTEGER PRIMARY KEY, + olda, oldb, oldc, oldd, newa, newb, newc, newd); + + CREATE VIEW abcd AS SELECT a, b, c, d FROM ab, cd; + + CREATE TRIGGER before_update INSTEAD OF UPDATE ON abcd BEGIN + INSERT INTO tlog VALUES(NULL, + old.a, old.b, old.c, old.d, new.a, new.b, new.c, new.d); + END; + CREATE TRIGGER after_update INSTEAD OF UPDATE ON abcd BEGIN + INSERT INTO tlog VALUES(NULL, + old.a, old.b, old.c, old.d, new.a, new.b, new.c, new.d); + END; + + CREATE TRIGGER before_delete INSTEAD OF DELETE ON abcd BEGIN + INSERT INTO tlog VALUES(NULL, + old.a, old.b, old.c, old.d, 0, 0, 0, 0); + END; + CREATE TRIGGER after_delete INSTEAD OF DELETE ON abcd BEGIN + INSERT INTO tlog VALUES(NULL, + old.a, old.b, old.c, old.d, 0, 0, 0, 0); + END; + + CREATE TRIGGER before_insert INSTEAD OF INSERT ON abcd BEGIN + INSERT INTO tlog VALUES(NULL, + 0, 0, 0, 0, new.a, new.b, new.c, new.d); + END; + CREATE TRIGGER after_insert INSTEAD OF INSERT ON abcd BEGIN + INSERT INTO tlog VALUES(NULL, + 0, 0, 0, 0, new.a, new.b, new.c, new.d); + END; + } +} {}; + +do_test trigger2-7.2 { + execsql { + UPDATE abcd SET a = 100, b = 5*5 WHERE a = 1; + DELETE FROM abcd WHERE a = 1; + INSERT INTO abcd VALUES(10, 20, 30, 40); + SELECT * FROM tlog; + } +} [ list 1 1 2 3 4 100 25 3 4 \ + 2 1 2 3 4 100 25 3 4 \ + 3 1 2 3 4 0 0 0 0 \ + 4 1 2 3 4 0 0 0 0 \ + 5 0 0 0 0 10 20 30 40 \ + 6 0 0 0 0 10 20 30 40 ] + +do_test trigger2-7.3 { + execsql { + DELETE FROM tlog; + INSERT INTO abcd VALUES(10, 20, 30, 40); + UPDATE abcd SET a = 100, b = 5*5 WHERE a = 1; + DELETE FROM abcd WHERE a = 1; + SELECT * FROM tlog; + } +} [ list \ + 1 0 0 0 0 10 20 30 40 \ + 2 0 0 0 0 10 20 30 40 \ + 3 1 2 3 4 100 25 3 4 \ + 4 1 2 3 4 100 25 3 4 \ + 5 1 2 3 4 0 0 0 0 \ + 6 1 2 3 4 0 0 0 0 \ +] +do_test trigger2-7.4 { + execsql { + DELETE FROM tlog; + DELETE FROM abcd WHERE a = 1; + INSERT INTO abcd VALUES(10, 20, 30, 40); + UPDATE abcd SET a = 100, b = 5*5 WHERE a = 1; + SELECT * FROM tlog; + } +} [ list \ + 1 1 2 3 4 0 0 0 0 \ + 2 1 2 3 4 0 0 0 0 \ + 3 0 0 0 0 10 20 30 40 \ + 4 0 0 0 0 10 20 30 40 \ + 5 1 2 3 4 100 25 3 4 \ + 6 1 2 3 4 100 25 3 4 \ +] + +do_test trigger2-8.1 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1,2,3); + CREATE VIEW v1 AS + SELECT a+b AS x, b+c AS y, a+c AS z FROM t1; + SELECT * FROM v1; + } +} {3 5 4} +do_test trigger2-8.2 { + execsql { + CREATE TABLE v1log(a,b,c,d,e,f); + CREATE TRIGGER r1 INSTEAD OF DELETE ON v1 BEGIN + INSERT INTO v1log VALUES(OLD.x,NULL,OLD.y,NULL,OLD.z,NULL); + END; + DELETE FROM v1 WHERE x=1; + SELECT * FROM v1log; + } +} {} +do_test trigger2-8.3 { + execsql { + DELETE FROM v1 WHERE x=3; + SELECT * FROM v1log; + } +} {3 {} 5 {} 4 {}} +do_test trigger2-8.4 { + execsql { + INSERT INTO t1 VALUES(4,5,6); + DELETE FROM v1log; + DELETE FROM v1 WHERE y=11; + SELECT * FROM v1log; + } +} {9 {} 11 {} 10 {}} +do_test trigger2-8.5 { + execsql { + CREATE TRIGGER r2 INSTEAD OF INSERT ON v1 BEGIN + INSERT INTO v1log VALUES(NULL,NEW.x,NULL,NEW.y,NULL,NEW.z); + END; + DELETE FROM v1log; + INSERT INTO v1 VALUES(1,2,3); + SELECT * FROM v1log; + } +} {{} 1 {} 2 {} 3} +do_test trigger2-8.6 { + execsql { + CREATE TRIGGER r3 INSTEAD OF UPDATE ON v1 BEGIN + INSERT INTO v1log VALUES(OLD.x,NEW.x,OLD.y,NEW.y,OLD.z,NEW.z); + END; + DELETE FROM v1log; + UPDATE v1 SET x=x+100, y=y+200, z=z+300; + SELECT * FROM v1log; + } +} {3 103 5 205 4 304 9 109 11 211 10 310} + +} ;# ifcapable view + +integrity_check trigger2-9.9 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/trigger3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger3.test new file mode 100644 index 0000000..d08ac28 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger3.test @@ -0,0 +1,176 @@ +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file tests the RAISE() function. +# + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + +# Test that we can cause ROLLBACK, FAIL and ABORT correctly +# catchsql { DROP TABLE tbl; } +catchsql { CREATE TABLE tbl (a, b, c) } + +execsql { + CREATE TRIGGER before_tbl_insert BEFORE INSERT ON tbl BEGIN SELECT CASE + WHEN (new.a = 4) THEN RAISE(IGNORE) END; + END; + + CREATE TRIGGER after_tbl_insert AFTER INSERT ON tbl BEGIN SELECT CASE + WHEN (new.a = 1) THEN RAISE(ABORT, 'Trigger abort') + WHEN (new.a = 2) THEN RAISE(FAIL, 'Trigger fail') + WHEN (new.a = 3) THEN RAISE(ROLLBACK, 'Trigger rollback') END; + END; +} +# ABORT +do_test trigger3-1.1 { + catchsql { + BEGIN; + INSERT INTO tbl VALUES (5, 5, 6); + INSERT INTO tbl VALUES (1, 5, 6); + } +} {1 {Trigger abort}} +do_test trigger3-1.2 { + execsql { + SELECT * FROM tbl; + ROLLBACK; + } +} {5 5 6} +do_test trigger3-1.3 { + execsql {SELECT * FROM tbl} +} {} + +# FAIL +do_test trigger3-2.1 { + catchsql { + BEGIN; + INSERT INTO tbl VALUES (5, 5, 6); + INSERT INTO tbl VALUES (2, 5, 6); + } +} {1 {Trigger fail}} +do_test trigger3-2.2 { + execsql { + SELECT * FROM tbl; + ROLLBACK; + } +} {5 5 6 2 5 6} +# ROLLBACK +do_test trigger3-3.1 { + catchsql { + BEGIN; + INSERT INTO tbl VALUES (5, 5, 6); + INSERT INTO tbl VALUES (3, 5, 6); + } +} {1 {Trigger rollback}} +do_test trigger3-3.2 { + execsql { + SELECT * FROM tbl; + } +} {} +# IGNORE +do_test trigger3-4.1 { + catchsql { + BEGIN; + INSERT INTO tbl VALUES (5, 5, 6); + INSERT INTO tbl VALUES (4, 5, 6); + } +} {0 {}} +do_test trigger3-4.2 { + execsql { + SELECT * FROM tbl; + ROLLBACK; + } +} {5 5 6} + +# Check that we can also do RAISE(IGNORE) for UPDATE and DELETE +execsql {DROP TABLE tbl;} +execsql {CREATE TABLE tbl (a, b, c);} +execsql {INSERT INTO tbl VALUES(1, 2, 3);} +execsql {INSERT INTO tbl VALUES(4, 5, 6);} +execsql { + CREATE TRIGGER before_tbl_update BEFORE UPDATE ON tbl BEGIN + SELECT CASE WHEN (old.a = 1) THEN RAISE(IGNORE) END; + END; + + CREATE TRIGGER before_tbl_delete BEFORE DELETE ON tbl BEGIN + SELECT CASE WHEN (old.a = 1) THEN RAISE(IGNORE) END; + END; +} +do_test trigger3-5.1 { + execsql { + UPDATE tbl SET c = 10; + SELECT * FROM tbl; + } +} {1 2 3 4 5 10} +do_test trigger3-5.2 { + execsql { + DELETE FROM tbl; + SELECT * FROM tbl; + } +} {1 2 3} + +# Check that RAISE(IGNORE) works correctly for nested triggers: +execsql {CREATE TABLE tbl2(a, b, c)} +execsql { + CREATE TRIGGER after_tbl2_insert AFTER INSERT ON tbl2 BEGIN + UPDATE tbl SET c = 10; + INSERT INTO tbl2 VALUES (new.a, new.b, new.c); + END; +} +do_test trigger3-6 { + execsql { + INSERT INTO tbl2 VALUES (1, 2, 3); + SELECT * FROM tbl2; + SELECT * FROM tbl; + } +} {1 2 3 1 2 3 1 2 3} + +# Check that things also work for view-triggers + +ifcapable view { + +execsql {CREATE VIEW tbl_view AS SELECT * FROM tbl} +execsql { + CREATE TRIGGER tbl_view_insert INSTEAD OF INSERT ON tbl_view BEGIN + SELECT CASE WHEN (new.a = 1) THEN RAISE(ROLLBACK, 'View rollback') + WHEN (new.a = 2) THEN RAISE(IGNORE) + WHEN (new.a = 3) THEN RAISE(ABORT, 'View abort') END; + END; +} + +do_test trigger3-7.1 { + catchsql { + INSERT INTO tbl_view VALUES(1, 2, 3); + } +} {1 {View rollback}} +do_test trigger3-7.2 { + catchsql { + INSERT INTO tbl_view VALUES(2, 2, 3); + } +} {0 {}} +do_test trigger3-7.3 { + catchsql { + INSERT INTO tbl_view VALUES(3, 2, 3); + } +} {1 {View abort}} + +} ;# ifcapable view + +integrity_check trigger3-8.1 + +catchsql { DROP TABLE tbl; } +catchsql { DROP TABLE tbl2; } +catchsql { DROP VIEW tbl_view; } + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/trigger4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger4.test new file mode 100644 index 0000000..0e44ce6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger4.test @@ -0,0 +1,200 @@ +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file tests the triggers of views. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If either views or triggers are disabled in this build, omit this file. +ifcapable {!trigger || !view} { + finish_test + return +} + +do_test trigger4-1.1 { + execsql { + create table test1(id integer primary key,a); + create table test2(id integer,b); + create view test as + select test1.id as id,a as a,b as b + from test1 join test2 on test2.id = test1.id; + create trigger I_test instead of insert on test + begin + insert into test1 (id,a) values (NEW.id,NEW.a); + insert into test2 (id,b) values (NEW.id,NEW.b); + end; + insert into test values(1,2,3); + select * from test1; + } +} {1 2} +do_test trigger4-1.2 { + execsql { + select * from test2; + } +} {1 3} +do_test trigger4-1.3 { + db close + sqlite3 db test.db + execsql { + insert into test values(4,5,6); + select * from test1; + } +} {1 2 4 5} +do_test trigger4-1.4 { + execsql { + select * from test2; + } +} {1 3 4 6} + +do_test trigger4-2.1 { + execsql { + create trigger U_test instead of update on test + begin + update test1 set a=NEW.a where id=NEW.id; + update test2 set b=NEW.b where id=NEW.id; + end; + update test set a=22 where id=1; + select * from test1; + } +} {1 22 4 5} +do_test trigger4-2.2 { + execsql { + select * from test2; + } +} {1 3 4 6} +do_test trigger4-2.3 { + db close + sqlite3 db test.db + execsql { + update test set b=66 where id=4; + select * from test1; + } +} {1 22 4 5} +do_test trigger4-2.4 { + execsql { + select * from test2; + } +} {1 3 4 66} + +do_test trigger4-3.1 { + catchsql { + drop table test2; + insert into test values(7,8,9); + } +} {1 {no such table: main.test2}} +do_test trigger4-3.2 { + db close + sqlite3 db test.db + catchsql { + insert into test values(7,8,9); + } +} {1 {no such table: main.test2}} +do_test trigger4-3.3 { + catchsql { + update test set a=222 where id=1; + } +} {1 {no such table: main.test2}} +do_test trigger4-3.4 { + execsql { + select * from test1; + } +} {1 22 4 5} +do_test trigger4-3.5 { + execsql { + create table test2(id,b); + insert into test values(7,8,9); + select * from test1; + } +} {1 22 4 5 7 8} +do_test trigger4-3.6 { + execsql { + select * from test2; + } +} {7 9} +do_test trigger4-3.7 { + db close + sqlite3 db test.db + execsql { + update test set b=99 where id=7; + select * from test2; + } +} {7 99} + +do_test trigger4-4.1 { + db close + file delete -force trigtest.db + file delete -force trigtest.db-journal + sqlite3 db trigtest.db + catchsql {drop table tbl; drop view vw} + execsql { + create table tbl(a integer primary key, b integer); + create view vw as select * from tbl; + create trigger t_del_tbl instead of delete on vw for each row begin + delete from tbl where a = old.a; + end; + create trigger t_upd_tbl instead of update on vw for each row begin + update tbl set a=new.a, b=new.b where a = old.a; + end; + create trigger t_ins_tbl instead of insert on vw for each row begin + insert into tbl values (new.a,new.b); + end; + insert into tbl values(101,1001); + insert into tbl values(102,1002); + insert into tbl select a+2, b+2 from tbl; + insert into tbl select a+4, b+4 from tbl; + insert into tbl select a+8, b+8 from tbl; + insert into tbl select a+16, b+16 from tbl; + insert into tbl select a+32, b+32 from tbl; + insert into tbl select a+64, b+64 from tbl; + select count(*) from vw; + } +} {128} +do_test trigger4-4.2 { + execsql {select a, b from vw where a<103 or a>226 order by a} +} {101 1001 102 1002 227 1127 228 1128} + +#test delete from view +do_test trigger4-5.1 { + catchsql {delete from vw where a>101 and a<2000} +} {0 {}} +do_test trigger4-5.2 { + execsql {select * from vw} +} {101 1001} + +#test insert into view +do_test trigger4-6.1 { + catchsql { + insert into vw values(102,1002); + insert into vw select a+2, b+2 from vw; + insert into vw select a+4, b+4 from vw; + insert into vw select a+8, b+8 from vw; + insert into vw select a+16, b+16 from vw; + insert into vw select a+32, b+32 from vw; + insert into vw select a+64, b+64 from vw; + } +} {0 {}} +do_test trigger4-6.2 { + execsql {select count(*) from vw} +} {128} + +#test update of view +do_test trigger4-7.1 { + catchsql {update vw set b=b+1000 where a>101 and a<2000} +} {0 {}} +do_test trigger4-7.2 { + execsql {select a, b from vw where a<=102 or a>=227 order by a} +} {101 1001 102 2002 227 2127 228 2128} + +integrity_check trigger4-99.9 +db close +file delete -force trigtest.db trigtest.db-journal + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/trigger5.test b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger5.test new file mode 100644 index 0000000..75c56b1 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger5.test @@ -0,0 +1,43 @@ +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file tests the triggers of views. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + +# Ticket #844 +# +do_test trigger5-1.1 { + execsql { + CREATE TABLE Item( + a integer PRIMARY KEY NOT NULL , + b double NULL , + c int NOT NULL DEFAULT 0 + ); + CREATE TABLE Undo(UndoAction TEXT); + INSERT INTO Item VALUES (1,38205.60865,340); + CREATE TRIGGER trigItem_UNDO_AD AFTER DELETE ON Item FOR EACH ROW + BEGIN + INSERT INTO Undo SELECT 'INSERT INTO Item (a,b,c) VALUES (' + || coalesce(old.a,'NULL') || ',' || quote(old.b) || ',' || old.c || ');'; + END; + DELETE FROM Item WHERE a = 1; + SELECT * FROM Undo; + } +} {{INSERT INTO Item (a,b,c) VALUES (1,38205.60865,340);}} + +integrity_check trigger5-99.9 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/trigger6.test b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger6.test new file mode 100644 index 0000000..bb343fa --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger6.test @@ -0,0 +1,82 @@ +# 2004 December 07 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure expression of an INSERT +# and UPDATE statement are only evaluated once. See ticket #980. +# If an expression uses a function that has side-effects or which +# is not deterministic (ex: random()) then we want to make sure +# that the same evaluation occurs for the actual INSERT/UPDATE and +# for the NEW.* fields of any triggers that fire. +# +# $Id: trigger6.test,v 1.2 2005/05/05 11:04:50 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + +do_test trigger6-1.1 { + execsql { + CREATE TABLE t1(x, y); + CREATE TABLE log(a, b, c); + CREATE TRIGGER r1 BEFORE INSERT ON t1 BEGIN + INSERT INTO log VALUES(1, new.x, new.y); + END; + CREATE TRIGGER r2 BEFORE UPDATE ON t1 BEGIN + INSERT INTO log VALUES(2, new.x, new.y); + END; + } + set ::trigger6_cnt 0 + proc trigger6_counter {args} { + incr ::trigger6_cnt + return $::trigger6_cnt + } + db function counter trigger6_counter + execsql { + INSERT INTO t1 VALUES(1,counter()); + SELECT * FROM t1; + } +} {1 1} +do_test trigger6-1.2 { + execsql { + SELECT * FROM log; + } +} {1 1 1} +do_test trigger6-1.3 { + execsql { + DELETE FROM t1; + DELETE FROM log; + INSERT INTO t1 VALUES(2,counter(2,3)+4); + SELECT * FROM t1; + } +} {2 6} +do_test trigger6-1.4 { + execsql { + SELECT * FROM log; + } +} {1 2 6} +do_test trigger6-1.5 { + execsql { + DELETE FROM log; + UPDATE t1 SET y=counter(5); + SELECT * FROM t1; + } +} {2 3} +do_test trigger6-1.6 { + execsql { + SELECT * FROM log; + } +} {2 2 3} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/trigger7.test b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger7.test new file mode 100644 index 0000000..dfaf18f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger7.test @@ -0,0 +1,121 @@ +# 2005 August 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to increase coverage of trigger.c. +# +# $Id: trigger7.test,v 1.1 2005/08/19 02:26:27 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + + +# Error messages resulting from qualified trigger names. +# +do_test trigger7-1.1 { + execsql { + CREATE TABLE t1(x, y); + } + catchsql { + CREATE TEMP TRIGGER main.r1 AFTER INSERT ON t1 BEGIN + SELECT 'no nothing'; + END + } +} {1 {temporary trigger may not have qualified name}} +do_test trigger7-1.2 { + catchsql { + CREATE TRIGGER not_a_db.r1 AFTER INSERT ON t1 BEGIN + SELECT 'no nothing'; + END + } +} {1 {unknown database not_a_db}} + + +# When the UPDATE OF syntax is used, no code is generated for triggers +# that do not match the update columns. +# +ifcapable explain { + do_test trigger7-2.1 { + execsql { + CREATE TRIGGER r1 AFTER UPDATE OF x ON t1 BEGIN + SELECT '___update_t1.x___'; + END; + CREATE TRIGGER r2 AFTER UPDATE OF y ON t1 BEGIN + SELECT '___update_t1.y___'; + END; + } + set txt [db eval {EXPLAIN UPDATE t1 SET x=5}] + string match *___update_t1.x___* $txt + } 1 + do_test trigger7-2.2 { + set txt [db eval {EXPLAIN UPDATE t1 SET x=5}] + string match *___update_t1.y___* $txt + } 0 + do_test trigger7-2.3 { + set txt [db eval {EXPLAIN UPDATE t1 SET y=5}] + string match *___update_t1.x___* $txt + } 0 + do_test trigger7-2.4 { + set txt [db eval {EXPLAIN UPDATE t1 SET y=5}] + string match *___update_t1.y___* $txt + } 1 + do_test trigger7-2.5 { + set txt [db eval {EXPLAIN UPDATE t1 SET rowid=5}] + string match *___update_t1.x___* $txt + } 0 + do_test trigger7-2.6 { + set txt [db eval {EXPLAIN UPDATE t1 SET rowid=5}] + string match *___update_t1.x___* $txt + } 0 +} + +# Test the ability to create many triggers on the same table, then +# selectively drop those triggers. +# +do_test trigger7-3.1 { + execsql { + CREATE TABLE t2(x,y,z); + CREATE TRIGGER t2r1 AFTER INSERT ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r2 BEFORE INSERT ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r3 AFTER UPDATE ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r4 BEFORE UPDATE ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r5 AFTER DELETE ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r6 BEFORE DELETE ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r7 AFTER INSERT ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r8 BEFORE INSERT ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r9 AFTER UPDATE ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r10 BEFORE UPDATE ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r11 AFTER DELETE ON t2 BEGIN SELECT 1; END; + CREATE TRIGGER t2r12 BEFORE DELETE ON t2 BEGIN SELECT 1; END; + DROP TRIGGER t2r6; + } +} {} + +# This test corrupts the database file so it must be the last test +# in the series. +# +do_test trigger7-99.1 { + execsql { + PRAGMA writable_schema=on; + UPDATE sqlite_master SET sql='nonsense'; + } + db close + sqlite3 db test.db + catchsql { + DROP TRIGGER t2r5 + } +} {1 {malformed database schema - near "nonsense": syntax error}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/trigger8.test b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger8.test new file mode 100644 index 0000000..b4215fb --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/trigger8.test @@ -0,0 +1,42 @@ +# 2006 February 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure abusively large triggers +# (triggers with 100s or 1000s of statements) work. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + + +do_test trigger8-1.1 { + execsql { + CREATE TABLE t1(x); + CREATE TABLE t2(y); + } + set sql "CREATE TRIGGER r10000 AFTER INSERT ON t1 BEGIN\n" + for {set i 0} {$i<10000} {incr i} { + append sql " INSERT INTO t2 VALUES($i);\n" + } + append sql "END;" + execsql $sql + execsql { + INSERT INTO t1 VALUES(5); + SELECT count(*) FROM t2; + } +} {10000} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/types.test b/libraries/sqlite/unix/sqlite-3.5.1/test/types.test new file mode 100644 index 0000000..6ebaeb8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/types.test @@ -0,0 +1,324 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. Specfically +# it tests that the different storage classes (integer, real, text etc.) +# all work correctly. +# +# $Id: types.test,v 1.19 2006/06/27 12:51:13 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Tests in this file are organized roughly as follows: +# +# types-1.*.*: Test that values are stored using the expected storage +# classes when various forms of literals are inserted into +# columns with different affinities. +# types-1.1.*: INSERT INTO
VALUES(...) +# types-1.2.*: INSERT INTO
SELECT... +# types-1.3.*: UPDATE
SET... +# +# types-2.*.*: Check that values can be stored and retrieving using the +# various storage classes. +# types-2.1.*: INTEGER +# types-2.2.*: REAL +# types-2.3.*: NULL +# types-2.4.*: TEXT +# types-2.5.*: Records with a few different storage classes. +# +# types-3.*: Test that the '=' operator respects manifest types. +# + +# Disable encryption on the database for this test. +db close +set DB [sqlite3 db test.db; sqlite3_connection_pointer db] +sqlite3_rekey $DB {} + +# Create a table with one column for each type of affinity +do_test types-1.1.0 { + execsql { + CREATE TABLE t1(i integer, n numeric, t text, o blob); + } +} {} + +# Each element of the following list represents one test case. +# +# The first value of each sub-list is an SQL literal. The following +# four value are the storage classes that would be used if the +# literal were inserted into a column with affinity INTEGER, NUMERIC, TEXT +# or NONE, respectively. +set values { + { 5.0 integer integer text real } + { 5.1 real real text real } + { 5 integer integer text integer } + { '5.0' integer integer text text } + { '5.1' real real text text } + { '-5.0' integer integer text text } + { '-5.0' integer integer text text } + { '5' integer integer text text } + { 'abc' text text text text } + { NULL null null null null } +} +ifcapable {bloblit} { + lappend values { X'00' blob blob blob blob } +} + +# This code tests that the storage classes specified above (in the $values +# table) are correctly assigned when values are inserted using a statement +# of the form: +# +# INSERT INTO
VALUE(); +# +set tnum 1 +foreach val $values { + set lit [lindex $val 0] + execsql "DELETE FROM t1;" + execsql "INSERT INTO t1 VALUES($lit, $lit, $lit, $lit);" + do_test types-1.1.$tnum { + execsql { + SELECT typeof(i), typeof(n), typeof(t), typeof(o) FROM t1; + } + } [lrange $val 1 end] + incr tnum +} + +# This code tests that the storage classes specified above (in the $values +# table) are correctly assigned when values are inserted using a statement +# of the form: +# +# INSERT INTO t1 SELECT .... +# +set tnum 1 +foreach val $values { + set lit [lindex $val 0] + execsql "DELETE FROM t1;" + execsql "INSERT INTO t1 SELECT $lit, $lit, $lit, $lit;" + do_test types-1.2.$tnum { + execsql { + SELECT typeof(i), typeof(n), typeof(t), typeof(o) FROM t1; + } + } [lrange $val 1 end] + incr tnum +} + +# This code tests that the storage classes specified above (in the $values +# table) are correctly assigned when values are inserted using a statement +# of the form: +# +# UPDATE
SET = ; +# +set tnum 1 +foreach val $values { + set lit [lindex $val 0] + execsql "UPDATE t1 SET i = $lit, n = $lit, t = $lit, o = $lit;" + do_test types-1.3.$tnum { + execsql { + SELECT typeof(i), typeof(n), typeof(t), typeof(o) FROM t1; + } + } [lrange $val 1 end] + incr tnum +} + +execsql { + DROP TABLE t1; +} + +# Open the table with root-page $rootpage at the btree +# level. Return a list that is the length of each record +# in the table, in the tables default scanning order. +proc record_sizes {rootpage} { + set bt [btree_open test.db 10 0] + set c [btree_cursor $bt $rootpage 0] + btree_first $c + while 1 { + lappend res [btree_payload_size $c] + if {[btree_next $c]} break + } + btree_close_cursor $c + btree_close $bt + set res +} + + +# Create a table and insert some 1-byte integers. Make sure they +# can be read back OK. These should be 3 byte records. +do_test types-2.1.1 { + execsql { + CREATE TABLE t1(a integer); + INSERT INTO t1 VALUES(0); + INSERT INTO t1 VALUES(120); + INSERT INTO t1 VALUES(-120); + } +} {} +do_test types-2.1.2 { + execsql { + SELECT a FROM t1; + } +} {0 120 -120} + +# Try some 2-byte integers (4 byte records) +do_test types-2.1.3 { + execsql { + INSERT INTO t1 VALUES(30000); + INSERT INTO t1 VALUES(-30000); + } +} {} +do_test types-2.1.4 { + execsql { + SELECT a FROM t1; + } +} {0 120 -120 30000 -30000} + +# 4-byte integers (6 byte records) +do_test types-2.1.5 { + execsql { + INSERT INTO t1 VALUES(2100000000); + INSERT INTO t1 VALUES(-2100000000); + } +} {} +do_test types-2.1.6 { + execsql { + SELECT a FROM t1; + } +} {0 120 -120 30000 -30000 2100000000 -2100000000} + +# 8-byte integers (10 byte records) +do_test types-2.1.7 { + execsql { + INSERT INTO t1 VALUES(9000000*1000000*1000000); + INSERT INTO t1 VALUES(-9000000*1000000*1000000); + } +} {} +do_test types-2.1.8 { + execsql { + SELECT a FROM t1; + } +} [list 0 120 -120 30000 -30000 2100000000 -2100000000 \ + 9000000000000000000 -9000000000000000000] + +# Check that all the record sizes are as we expected. +ifcapable legacyformat { + do_test types-2.1.9 { + set root [db eval {select rootpage from sqlite_master where name = 't1'}] + record_sizes $root + } {3 3 3 4 4 6 6 10 10} +} else { + do_test types-2.1.9 { + set root [db eval {select rootpage from sqlite_master where name = 't1'}] + record_sizes $root + } {2 3 3 4 4 6 6 10 10} +} + +# Insert some reals. These should be 10 byte records. +do_test types-2.2.1 { + execsql { + CREATE TABLE t2(a float); + INSERT INTO t2 VALUES(0.0); + INSERT INTO t2 VALUES(12345.678); + INSERT INTO t2 VALUES(-12345.678); + } +} {} +do_test types-2.2.2 { + execsql { + SELECT a FROM t2; + } +} {0.0 12345.678 -12345.678} + +# Check that all the record sizes are as we expected. +ifcapable legacyformat { + do_test types-2.2.3 { + set root [db eval {select rootpage from sqlite_master where name = 't2'}] + record_sizes $root + } {3 10 10} +} else { + do_test types-2.2.3 { + set root [db eval {select rootpage from sqlite_master where name = 't2'}] + record_sizes $root + } {2 10 10} +} + +# Insert a NULL. This should be a two byte record. +do_test types-2.3.1 { + execsql { + CREATE TABLE t3(a nullvalue); + INSERT INTO t3 VALUES(NULL); + } +} {} +do_test types-2.3.2 { + execsql { + SELECT a ISNULL FROM t3; + } +} {1} + +# Check that all the record sizes are as we expected. +do_test types-2.3.3 { + set root [db eval {select rootpage from sqlite_master where name = 't3'}] + record_sizes $root +} {2} + +# Insert a couple of strings. +do_test types-2.4.1 { + set string10 abcdefghij + set string500 [string repeat $string10 50] + set string500000 [string repeat $string10 50000] + + execsql " + CREATE TABLE t4(a string); + INSERT INTO t4 VALUES('$string10'); + INSERT INTO t4 VALUES('$string500'); + INSERT INTO t4 VALUES('$string500000'); + " +} {} +do_test types-2.4.2 { + execsql { + SELECT a FROM t4; + } +} [list $string10 $string500 $string500000] + +# Check that all the record sizes are as we expected. This is dependant on +# the database encoding. +if { $sqlite_options(utf16)==0 || [execsql {pragma encoding}] == "UTF-8" } { + do_test types-2.4.3 { + set root [db eval {select rootpage from sqlite_master where name = 't4'}] + record_sizes $root + } {12 503 500004} +} else { + do_test types-2.4.3 { + set root [db eval {select rootpage from sqlite_master where name = 't4'}] + record_sizes $root + } {22 1003 1000004} +} + +do_test types-2.5.1 { + execsql { + DROP TABLE t1; + DROP TABLE t2; + DROP TABLE t3; + DROP TABLE t4; + CREATE TABLE t1(a, b, c); + } +} {} +do_test types-2.5.2 { + set string10 abcdefghij + set string500 [string repeat $string10 50] + set string500000 [string repeat $string10 50000] + + execsql "INSERT INTO t1 VALUES(NULL, '$string10', 4000);" + execsql "INSERT INTO t1 VALUES('$string500', 4000, NULL);" + execsql "INSERT INTO t1 VALUES(4000, NULL, '$string500000');" +} {} +do_test types-2.5.3 { + execsql { + SELECT * FROM t1; + } +} [list {} $string10 4000 $string500 4000 {} 4000 {} $string500000] + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/types2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/types2.test new file mode 100644 index 0000000..4a70aa5 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/types2.test @@ -0,0 +1,340 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus +# of this file is testing the interaction of manifest types, type affinity +# and comparison expressions. +# +# $Id: types2.test,v 1.7 2007/02/23 03:00:45 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Tests in this file are organized roughly as follows: +# +# types2-1.*: The '=' operator in the absence of an index. +# types2-2.*: The '=' operator implemented using an index. +# types2-3.*: The '<' operator implemented using an index. +# types2-4.*: The '>' operator in the absence of an index. +# types2-5.*: The 'IN(x, y...)' operator in the absence of an index. +# types2-6.*: The 'IN(x, y...)' operator with an index. +# types2-7.*: The 'IN(SELECT...)' operator in the absence of an index. +# types2-8.*: The 'IN(SELECT...)' operator with an index. +# +# All tests test the operators using literals and columns, but no +# other types of expressions. All expressions except columns are +# handled similarly in the implementation. + +execsql { + CREATE TABLE t1( + i1 INTEGER, + i2 INTEGER, + n1 NUMERIC, + n2 NUMERIC, + t1 TEXT, + t2 TEXT, + o1 BLOB, + o2 BLOB + ); + INSERT INTO t1 VALUES(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); +} + +proc test_bool {testname vars expr res} { + if { $vars != "" } { + execsql "UPDATE t1 SET $vars" + } + + foreach {t e r} [list $testname $expr $res] {} + + do_test $t.1 "execsql {SELECT $e FROM t1}" $r + do_test $t.2 "execsql {SELECT 1 FROM t1 WHERE $expr}" [expr $r?"1":""] + do_test $t.3 "execsql {SELECT 1 FROM t1 WHERE NOT ($e)}" [expr $r?"":"1"] +} + +# Compare literals against literals. This should always use a numeric +# comparison. +# +# Changed by ticket #805: Use no affinity for literal comparisons. +# +test_bool types2-1.1 "" {500 = 500.0} 1 +test_bool types2-1.2 "" {'500' = 500.0} 0 +test_bool types2-1.3 "" {500 = '500.0'} 0 +test_bool types2-1.4 "" {'500' = '500.0'} 0 + +# Compare literals against a column with TEXT affinity +test_bool types2-1.5 {t1=500} {500 = t1} 1 +test_bool types2-1.6 {t1=500} {'500' = t1} 1 +test_bool types2-1.7 {t1=500} {500.0 = t1} 0 +test_bool types2-1.8 {t1=500} {'500.0' = t1} 0 +test_bool types2-1.9 {t1='500'} {500 = t1} 1 +test_bool types2-1.10 {t1='500'} {'500' = t1} 1 +test_bool types2-1.11 {t1='500'} {500.0 = t1} 0 +test_bool types2-1.12 {t1='500'} {'500.0' = t1} 0 + +# Compare literals against a column with NUMERIC affinity +test_bool types2-1.13 {n1=500} {500 = n1} 1 +test_bool types2-1.14 {n1=500} {'500' = n1} 1 +test_bool types2-1.15 {n1=500} {500.0 = n1} 1 +test_bool types2-1.16 {n1=500} {'500.0' = n1} 1 +test_bool types2-1.17 {n1='500'} {500 = n1} 1 +test_bool types2-1.18 {n1='500'} {'500' = n1} 1 +test_bool types2-1.19 {n1='500'} {500.0 = n1} 1 +test_bool types2-1.20 {n1='500'} {'500.0' = n1} 1 + +# Compare literals against a column with affinity NONE +test_bool types2-1.21 {o1=500} {500 = o1} 1 +test_bool types2-1.22 {o1=500} {'500' = o1} 0 +test_bool types2-1.23 {o1=500} {500.0 = o1} 1 +test_bool types2-1.24 {o1=500} {'500.0' = o1} 0 +test_bool types2-1.25 {o1='500'} {500 = o1} 0 +test_bool types2-1.26 {o1='500'} {'500' = o1} 1 +test_bool types2-1.27 {o1='500'} {500.0 = o1} 0 +test_bool types2-1.28 {o1='500'} {'500.0' = o1} 0 + +set vals [list 10 10.0 '10' '10.0' 20 20.0 '20' '20.0' 30 30.0 '30' '30.0'] +# 1 2 3 4 5 6 7 8 9 10 11 12 + +execsql { + CREATE TABLE t2(i INTEGER, n NUMERIC, t TEXT, o XBLOBY); + CREATE INDEX t2i1 ON t2(i); + CREATE INDEX t2i2 ON t2(n); + CREATE INDEX t2i3 ON t2(t); + CREATE INDEX t2i4 ON t2(o); +} +foreach v $vals { + execsql "INSERT INTO t2 VALUES($v, $v, $v, $v);" +} + +proc test_boolset {testname where set} { + set ::tb_sql "SELECT rowid FROM t2 WHERE $where" + do_test $testname { + lsort -integer [execsql $::tb_sql] + } $set +} + +test_boolset types2-2.1 {i = 10} {1 2 3 4} +test_boolset types2-2.2 {i = 10.0} {1 2 3 4} +test_boolset types2-2.3 {i = '10'} {1 2 3 4} +test_boolset types2-2.4 {i = '10.0'} {1 2 3 4} + +test_boolset types2-2.5 {n = 20} {5 6 7 8} +test_boolset types2-2.6 {n = 20.0} {5 6 7 8} +test_boolset types2-2.7 {n = '20'} {5 6 7 8} +test_boolset types2-2.8 {n = '20.0'} {5 6 7 8} + +test_boolset types2-2.9 {t = 20} {5 7} +test_boolset types2-2.10 {t = 20.0} {6 8} +test_boolset types2-2.11 {t = '20'} {5 7} +test_boolset types2-2.12 {t = '20.0'} {6 8} + +test_boolset types2-2.10 {o = 30} {9 10} +test_boolset types2-2.11 {o = 30.0} {9 10} +test_boolset types2-2.12 {o = '30'} 11 +test_boolset types2-2.13 {o = '30.0'} 12 + +test_boolset types2-3.1 {i < 20} {1 2 3 4} +test_boolset types2-3.2 {i < 20.0} {1 2 3 4} +test_boolset types2-3.3 {i < '20'} {1 2 3 4} +test_boolset types2-3.4 {i < '20.0'} {1 2 3 4} + +test_boolset types2-3.1 {n < 20} {1 2 3 4} +test_boolset types2-3.2 {n < 20.0} {1 2 3 4} +test_boolset types2-3.3 {n < '20'} {1 2 3 4} +test_boolset types2-3.4 {n < '20.0'} {1 2 3 4} + +test_boolset types2-3.1 {t < 20} {1 2 3 4} +test_boolset types2-3.2 {t < 20.0} {1 2 3 4 5 7} +test_boolset types2-3.3 {t < '20'} {1 2 3 4} +test_boolset types2-3.4 {t < '20.0'} {1 2 3 4 5 7} + +test_boolset types2-3.1 {o < 20} {1 2} +test_boolset types2-3.2 {o < 20.0} {1 2} +test_boolset types2-3.3 {o < '20'} {1 2 3 4 5 6 9 10} +test_boolset types2-3.3 {o < '20.0'} {1 2 3 4 5 6 7 9 10} + +# Compare literals against literals (always a numeric comparison). +# Change (by ticket #805): No affinity in comparisons +test_bool types2-4.1 "" {500 > 60.0} 1 +test_bool types2-4.2 "" {'500' > 60.0} 1 +test_bool types2-4.3 "" {500 > '60.0'} 0 +test_bool types2-4.4 "" {'500' > '60.0'} 0 + +# Compare literals against a column with TEXT affinity +test_bool types2-4.5 {t1=500.0} {t1 > 500} 1 +test_bool types2-4.6 {t1=500.0} {t1 > '500' } 1 +test_bool types2-4.7 {t1=500.0} {t1 > 500.0 } 0 +test_bool types2-4.8 {t1=500.0} {t1 > '500.0' } 0 +test_bool types2-4.9 {t1='500.0'} {t1 > 500 } 1 +test_bool types2-4.10 {t1='500.0'} {t1 > '500' } 1 +test_bool types2-4.11 {t1='500.0'} {t1 > 500.0 } 0 +test_bool types2-4.12 {t1='500.0'} {t1 > '500.0' } 0 + +# Compare literals against a column with NUMERIC affinity +test_bool types2-4.13 {n1=400} {500 > n1} 1 +test_bool types2-4.14 {n1=400} {'500' > n1} 1 +test_bool types2-4.15 {n1=400} {500.0 > n1} 1 +test_bool types2-4.16 {n1=400} {'500.0' > n1} 1 +test_bool types2-4.17 {n1='400'} {500 > n1} 1 +test_bool types2-4.18 {n1='400'} {'500' > n1} 1 +test_bool types2-4.19 {n1='400'} {500.0 > n1} 1 +test_bool types2-4.20 {n1='400'} {'500.0' > n1} 1 + +# Compare literals against a column with affinity NONE +test_bool types2-4.21 {o1=500} {500 > o1} 0 +test_bool types2-4.22 {o1=500} {'500' > o1} 1 +test_bool types2-4.23 {o1=500} {500.0 > o1} 0 +test_bool types2-4.24 {o1=500} {'500.0' > o1} 1 +test_bool types2-4.25 {o1='500'} {500 > o1} 0 +test_bool types2-4.26 {o1='500'} {'500' > o1} 0 +test_bool types2-4.27 {o1='500'} {500.0 > o1} 0 +test_bool types2-4.28 {o1='500'} {'500.0' > o1} 1 + +ifcapable subquery { + # types2-5.* - The 'IN (x, y....)' operator with no index. + # + # Compare literals against literals (no affinity applied) + test_bool types2-5.1 {} {(NULL IN ('10.0', 20)) ISNULL} 1 + test_bool types2-5.2 {} {10 IN ('10.0', 20)} 0 + test_bool types2-5.3 {} {'10' IN ('10.0', 20)} 0 + test_bool types2-5.4 {} {10 IN (10.0, 20)} 1 + test_bool types2-5.5 {} {'10.0' IN (10, 20)} 0 + + # Compare literals against a column with TEXT affinity + test_bool types2-5.6 {t1='10.0'} {t1 IN (10.0, 20)} 1 + test_bool types2-5.7 {t1='10.0'} {t1 IN (10, 20)} 0 + test_bool types2-5.8 {t1='10'} {t1 IN (10.0, 20)} 0 + test_bool types2-5.9 {t1='10'} {t1 IN (20, '10.0')} 0 + test_bool types2-5.10 {t1=10} {t1 IN (20, '10')} 1 + + # Compare literals against a column with NUMERIC affinity + test_bool types2-5.11 {n1='10.0'} {n1 IN (10.0, 20)} 1 + test_bool types2-5.12 {n1='10.0'} {n1 IN (10, 20)} 1 + test_bool types2-5.13 {n1='10'} {n1 IN (10.0, 20)} 1 + test_bool types2-5.14 {n1='10'} {n1 IN (20, '10.0')} 1 + test_bool types2-5.15 {n1=10} {n1 IN (20, '10')} 1 + + # Compare literals against a column with affinity NONE + test_bool types2-5.16 {o1='10.0'} {o1 IN (10.0, 20)} 0 + test_bool types2-5.17 {o1='10.0'} {o1 IN (10, 20)} 0 + test_bool types2-5.18 {o1='10'} {o1 IN (10.0, 20)} 0 + test_bool types2-5.19 {o1='10'} {o1 IN (20, '10.0')} 0 + test_bool types2-5.20 {o1=10} {o1 IN (20, '10')} 0 + test_bool types2-5.21 {o1='10.0'} {o1 IN (10, 20, '10.0')} 1 + test_bool types2-5.22 {o1='10'} {o1 IN (10.0, 20, '10')} 1 + test_bool types2-5.23 {o1=10} {n1 IN (20, '10', 10)} 1 + + # Ticket #2248: Comparisons of strings literals that look like + # numbers. + test_bool types2-5.24 {} {'1' IN ('1')} 1 + test_bool types2-5.25 {} {'2' IN (2)} 0 + test_bool types2-5.26 {} {3 IN ('3')} 0 + test_bool types2-5.27 {} {4 IN (4)} 1 + + # The affinity of columns on the right side of IN(...) is ignored. + # All values in the expression list are treated as ordinary expressions, + # even if they are columns with affinity. + test_bool types2-5.30 {t1='10'} {10 IN (5,t1,'abc')} 0 + test_bool types2-5.31 {t1='10'} {10 IN ('abc',t1,5)} 0 + test_bool types2-5.32 {t1='010'} {10 IN (5,t1,'abc')} 0 + test_bool types2-5.33 {t1='010'} {10 IN ('abc',t1,5)} 0 + test_bool types2-5.34 {t1='10'} {'10' IN (5,t1,'abc')} 1 + test_bool types2-5.35 {t1='10'} {'10' IN ('abc',t1,5)} 1 + test_bool types2-5.36 {t1='010'} {'10' IN (5,t1,'abc')} 0 + test_bool types2-5.37 {t1='010'} {'10' IN ('abc',t1,5)} 0 + + # Columns on both the left and right of IN(...). Only the column + # on the left matters. The all values on the right are treated like + # expressions. + test_bool types2-5.40 {t1='10',n1=10} {t1 IN (5,n1,11)} 1 + test_bool types2-5.41 {t1='010',n1=10} {t1 IN (5,n1,11)} 0 + test_bool types2-5.42 {t1='10',n1=10} {n1 IN (5,t1,11)} 1 + test_bool types2-5.43 {t1='010',n1=10} {n1 IN (5,t1,11)} 1 +} + +# Tests named types2-6.* use the same infrastructure as the types2-2.* +# tests. The contents of the vals array is repeated here for easy +# reference. +# +# set vals [list 10 10.0 '10' '10.0' 20 20.0 '20' '20.0' 30 30.0 '30' '30.0'] +# 1 2 3 4 5 6 7 8 9 10 11 12 + +ifcapable subquery { + test_boolset types2-6.1 {o IN ('10', 30)} {3 9 10} + test_boolset types2-6.2 {o IN (20.0, 30.0)} {5 6 9 10} + test_boolset types2-6.3 {t IN ('10', 30)} {1 3 9 11} + test_boolset types2-6.4 {t IN (20.0, 30.0)} {6 8 10 12} + test_boolset types2-6.5 {n IN ('10', 30)} {1 2 3 4 9 10 11 12} + test_boolset types2-6.6 {n IN (20.0, 30.0)} {5 6 7 8 9 10 11 12} + test_boolset types2-6.7 {i IN ('10', 30)} {1 2 3 4 9 10 11 12} + test_boolset types2-6.8 {i IN (20.0, 30.0)} {5 6 7 8 9 10 11 12} + + # Also test than IN(x, y, z) works on a rowid: + test_boolset types2-6.9 {rowid IN (1, 6, 10)} {1 6 10} +} + +# Tests types2-7.* concentrate on expressions of the form +# "x IN (SELECT...)" with no index. +execsql { + CREATE TABLE t3(i INTEGER, n NUMERIC, t TEXT, o BLOB); + INSERT INTO t3 VALUES(1, 1, 1, 1); + INSERT INTO t3 VALUES(2, 2, 2, 2); + INSERT INTO t3 VALUES(3, 3, 3, 3); + INSERT INTO t3 VALUES('1', '1', '1', '1'); + INSERT INTO t3 VALUES('1.0', '1.0', '1.0', '1.0'); +} + +ifcapable subquery { + test_bool types2-7.1 {i1=1} {i1 IN (SELECT i FROM t3)} 1 + test_bool types2-7.2 {i1='2.0'} {i1 IN (SELECT i FROM t3)} 1 + test_bool types2-7.3 {i1='2.0'} {i1 IN (SELECT n FROM t3)} 1 + test_bool types2-7.4 {i1='2.0'} {i1 IN (SELECT t FROM t3)} 1 + test_bool types2-7.5 {i1='2.0'} {i1 IN (SELECT o FROM t3)} 1 + + test_bool types2-7.6 {n1=1} {n1 IN (SELECT n FROM t3)} 1 + test_bool types2-7.7 {n1='2.0'} {n1 IN (SELECT i FROM t3)} 1 + test_bool types2-7.8 {n1='2.0'} {n1 IN (SELECT n FROM t3)} 1 + test_bool types2-7.9 {n1='2.0'} {n1 IN (SELECT t FROM t3)} 1 + test_bool types2-7.10 {n1='2.0'} {n1 IN (SELECT o FROM t3)} 1 + + test_bool types2-7.6 {t1=1} {t1 IN (SELECT t FROM t3)} 1 + test_bool types2-7.7 {t1='2.0'} {t1 IN (SELECT t FROM t3)} 0 + test_bool types2-7.8 {t1='2.0'} {t1 IN (SELECT n FROM t3)} 1 + test_bool types2-7.9 {t1='2.0'} {t1 IN (SELECT i FROM t3)} 1 + test_bool types2-7.10 {t1='2.0'} {t1 IN (SELECT o FROM t3)} 0 + test_bool types2-7.11 {t1='1.0'} {t1 IN (SELECT t FROM t3)} 1 + test_bool types2-7.12 {t1='1.0'} {t1 IN (SELECT o FROM t3)} 1 + + test_bool types2-7.13 {o1=2} {o1 IN (SELECT o FROM t3)} 1 + test_bool types2-7.14 {o1='2'} {o1 IN (SELECT o FROM t3)} 0 + test_bool types2-7.15 {o1='2'} {o1 IN (SELECT o||'' FROM t3)} 1 +} + +# set vals [list 10 10.0 '10' '10.0' 20 20.0 '20' '20.0' 30 30.0 '30' '30.0'] +# 1 2 3 4 5 6 7 8 9 10 11 12 +execsql { + CREATE TABLE t4(i INTEGER, n NUMERIC, t VARCHAR(20), o LARGE BLOB); + INSERT INTO t4 VALUES(10, 20, 20, 30); +} +ifcapable subquery { + test_boolset types2-8.1 {i IN (SELECT i FROM t4)} {1 2 3 4} + test_boolset types2-8.2 {n IN (SELECT i FROM t4)} {1 2 3 4} + test_boolset types2-8.3 {t IN (SELECT i FROM t4)} {1 2 3 4} + test_boolset types2-8.4 {o IN (SELECT i FROM t4)} {1 2 3 4} + test_boolset types2-8.5 {i IN (SELECT t FROM t4)} {5 6 7 8} + test_boolset types2-8.6 {n IN (SELECT t FROM t4)} {5 6 7 8} + test_boolset types2-8.7 {t IN (SELECT t FROM t4)} {5 7} + test_boolset types2-8.8 {o IN (SELECT t FROM t4)} {7} + test_boolset types2-8.9 {i IN (SELECT o FROM t4)} {9 10 11 12} + test_boolset types2-8.6 {n IN (SELECT o FROM t4)} {9 10 11 12} + test_boolset types2-8.7 {t IN (SELECT o FROM t4)} {9 11} + test_boolset types2-8.8 {o IN (SELECT o FROM t4)} {9 10} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/types3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/types3.test new file mode 100644 index 0000000..3d48ad8 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/types3.test @@ -0,0 +1,98 @@ +# 2005 June 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus +# of this file is testing the interaction of SQLite manifest types +# with Tcl dual-representations. +# +# $Id: types3.test,v 1.7 2007/06/26 22:42:56 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# A variable with only a string representation comes in as TEXT +do_test types3-1.1 { + set V {} + append V {} + concat [tcl_variable_type V] [execsql {SELECT typeof(:V)}] +} {string text} + +# A variable with an integer representation comes in as INTEGER +do_test types3-1.2 { + set V [expr {int(1+2)}] + concat [tcl_variable_type V] [execsql {SELECT typeof(:V)}] +} {int integer} +set V [expr {1+12345678012345}] +if {[tcl_variable_type V]=="wideInt"} { + do_test types3-1.3 { + set V [expr {1+123456789012345}] + concat [tcl_variable_type V] [execsql {SELECT typeof(:V)}] + } {wideInt integer} +} else { + do_test types3-1.3 { + set V [expr {1+123456789012345}] + concat [tcl_variable_type V] [execsql {SELECT typeof(:V)}] + } {int integer} +} + +# A double variable comes in as REAL +do_test types3-1.4 { + set V [expr {1.0+1}] + concat [tcl_variable_type V] [execsql {SELECT typeof(:V)}] +} {double real} + +# A byte-array variable comes in a BLOB if it has no string representation +# or as TEXT if there is a string representation. +# +do_test types3-1.5 { + set V [binary format a3 abc] + concat [tcl_variable_type V] [execsql {SELECT typeof(:V)}] +} {bytearray blob} +do_test types3-1.6 { + set V "abc" + binary scan $V a3 x + concat [tcl_variable_type V] [execsql {SELECT typeof(:V)}] +} {bytearray text} + +# Check to make sure return values are of the right types. +# +ifcapable bloblit { + do_test types3-2.1 { + set V [db one {SELECT x'616263'}] + tcl_variable_type V + } bytearray +} +do_test types3-2.2 { + set V [db one {SELECT 123}] + tcl_variable_type V +} int +do_test types3-2.3 { + set V [db one {SELECT 1234567890123456}] + tcl_variable_type V +} wideInt +do_test types3-2.4.1 { + set V [db one {SELECT 1234567890123456.1}] + tcl_variable_type V +} double +do_test types3-2.4.2 { + set V [db one {SELECT 1234567890123.456}] + tcl_variable_type V +} double +do_test types3-2.5 { + set V [db one {SELECT '1234567890123456.0'}] + tcl_variable_type V +} {} +do_test types3-2.6 { + set V [db one {SELECT NULL}] + tcl_variable_type V +} {} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/unique.test b/libraries/sqlite/unix/sqlite-3.5.1/test/unique.test new file mode 100644 index 0000000..7bdc363 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/unique.test @@ -0,0 +1,253 @@ +# 2001 September 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the CREATE UNIQUE INDEX statement, +# and primary keys, and the UNIQUE constraint on table columns +# +# $Id: unique.test,v 1.8 2005/06/24 03:53:06 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Try to create a table with two primary keys. +# (This is allowed in SQLite even that it is not valid SQL) +# +do_test unique-1.1 { + catchsql { + CREATE TABLE t1( + a int PRIMARY KEY, + b int PRIMARY KEY, + c text + ); + } +} {1 {table "t1" has more than one primary key}} +do_test unique-1.1b { + catchsql { + CREATE TABLE t1( + a int PRIMARY KEY, + b int UNIQUE, + c text + ); + } +} {0 {}} +do_test unique-1.2 { + catchsql { + INSERT INTO t1(a,b,c) VALUES(1,2,3) + } +} {0 {}} +do_test unique-1.3 { + catchsql { + INSERT INTO t1(a,b,c) VALUES(1,3,4) + } +} {1 {column a is not unique}} +do_test unique-1.4 { + execsql { + SELECT * FROM t1 ORDER BY a; + } +} {1 2 3} +do_test unique-1.5 { + catchsql { + INSERT INTO t1(a,b,c) VALUES(3,2,4) + } +} {1 {column b is not unique}} +do_test unique-1.6 { + execsql { + SELECT * FROM t1 ORDER BY a; + } +} {1 2 3} +do_test unique-1.7 { + catchsql { + INSERT INTO t1(a,b,c) VALUES(3,4,5) + } +} {0 {}} +do_test unique-1.8 { + execsql { + SELECT * FROM t1 ORDER BY a; + } +} {1 2 3 3 4 5} +integrity_check unique-1.9 + +do_test unique-2.0 { + execsql { + DROP TABLE t1; + CREATE TABLE t2(a int, b int); + INSERT INTO t2(a,b) VALUES(1,2); + INSERT INTO t2(a,b) VALUES(3,4); + SELECT * FROM t2 ORDER BY a; + } +} {1 2 3 4} +do_test unique-2.1 { + catchsql { + CREATE UNIQUE INDEX i2 ON t2(a) + } +} {0 {}} +do_test unique-2.2 { + catchsql { + SELECT * FROM t2 ORDER BY a + } +} {0 {1 2 3 4}} +do_test unique-2.3 { + catchsql { + INSERT INTO t2 VALUES(1,5); + } +} {1 {column a is not unique}} +do_test unique-2.4 { + catchsql { + SELECT * FROM t2 ORDER BY a + } +} {0 {1 2 3 4}} +do_test unique-2.5 { + catchsql { + DROP INDEX i2; + SELECT * FROM t2 ORDER BY a; + } +} {0 {1 2 3 4}} +do_test unique-2.6 { + catchsql { + INSERT INTO t2 VALUES(1,5) + } +} {0 {}} +do_test unique-2.7 { + catchsql { + SELECT * FROM t2 ORDER BY a, b; + } +} {0 {1 2 1 5 3 4}} +do_test unique-2.8 { + catchsql { + CREATE UNIQUE INDEX i2 ON t2(a); + } +} {1 {indexed columns are not unique}} +do_test unique-2.9 { + catchsql { + CREATE INDEX i2 ON t2(a); + } +} {0 {}} +integrity_check unique-2.10 + +# Test the UNIQUE keyword as used on two or more fields. +# +do_test unique-3.1 { + catchsql { + CREATE TABLE t3( + a int, + b int, + c int, + d int, + unique(a,c,d) + ); + } +} {0 {}} +do_test unique-3.2 { + catchsql { + INSERT INTO t3(a,b,c,d) VALUES(1,2,3,4); + SELECT * FROM t3 ORDER BY a,b,c,d; + } +} {0 {1 2 3 4}} +do_test unique-3.3 { + catchsql { + INSERT INTO t3(a,b,c,d) VALUES(1,2,3,5); + SELECT * FROM t3 ORDER BY a,b,c,d; + } +} {0 {1 2 3 4 1 2 3 5}} +do_test unique-3.4 { + catchsql { + INSERT INTO t3(a,b,c,d) VALUES(1,4,3,5); + SELECT * FROM t3 ORDER BY a,b,c,d; + } +} {1 {columns a, c, d are not unique}} +integrity_check unique-3.5 + +# Make sure NULLs are distinct as far as the UNIQUE tests are +# concerned. +# +do_test unique-4.1 { + execsql { + CREATE TABLE t4(a UNIQUE, b, c, UNIQUE(b,c)); + INSERT INTO t4 VALUES(1,2,3); + INSERT INTO t4 VALUES(NULL, 2, NULL); + SELECT * FROM t4; + } +} {1 2 3 {} 2 {}} +do_test unique-4.2 { + catchsql { + INSERT INTO t4 VALUES(NULL, 3, 4); + } +} {0 {}} +do_test unique-4.3 { + execsql { + SELECT * FROM t4 + } +} {1 2 3 {} 2 {} {} 3 4} +do_test unique-4.4 { + catchsql { + INSERT INTO t4 VALUES(2, 2, NULL); + } +} {0 {}} +do_test unique-4.5 { + execsql { + SELECT * FROM t4 + } +} {1 2 3 {} 2 {} {} 3 4 2 2 {}} + +# Ticket #1301. Any NULL value in a set of unique columns should +# cause the rows to be distinct. +# +do_test unique-4.6 { + catchsql { + INSERT INTO t4 VALUES(NULL, 2, NULL); + } +} {0 {}} +do_test unique-4.7 { + execsql {SELECT * FROM t4} +} {1 2 3 {} 2 {} {} 3 4 2 2 {} {} 2 {}} +do_test unique-4.8 { + catchsql {CREATE UNIQUE INDEX i4a ON t4(a,b)} +} {0 {}} +do_test unique-4.9 { + catchsql {CREATE UNIQUE INDEX i4b ON t4(a,b,c)} +} {0 {}} +do_test unique-4.10 { + catchsql {CREATE UNIQUE INDEX i4c ON t4(b)} +} {1 {indexed columns are not unique}} +integrity_check unique-4.99 + +# Test the error message generation logic. In particular, make sure we +# do not overflow the static buffer used to generate the error message. +# +do_test unique-5.1 { + execsql { + CREATE TABLE t5( + first_column_with_long_name, + second_column_with_long_name, + third_column_with_long_name, + fourth_column_with_long_name, + fifth_column_with_long_name, + sixth_column_with_long_name, + UNIQUE( + first_column_with_long_name, + second_column_with_long_name, + third_column_with_long_name, + fourth_column_with_long_name, + fifth_column_with_long_name, + sixth_column_with_long_name + ) + ); + INSERT INTO t5 VALUES(1,2,3,4,5,6); + SELECT * FROM t5; + } +} {1 2 3 4 5 6} +do_test unique-5.2 { + catchsql { + INSERT INTO t5 VALUES(1,2,3,4,5,6); + } +} {1 {columns first_column_with_long_name, second_column_with_long_name, third_column_with_long_name, fourth_column_with_long_name, fifth_column_with_long_name, ... are not unique}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/update.test b/libraries/sqlite/unix/sqlite-3.5.1/test/update.test new file mode 100644 index 0000000..d56c342 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/update.test @@ -0,0 +1,596 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the UPDATE statement. +# +# $Id: update.test,v 1.17 2005/01/21 03:12:16 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Try to update an non-existent table +# +do_test update-1.1 { + set v [catch {execsql {UPDATE test1 SET f2=5 WHERE f1<1}} msg] + lappend v $msg +} {1 {no such table: test1}} + +# Try to update a read-only table +# +do_test update-2.1 { + set v [catch \ + {execsql {UPDATE sqlite_master SET name='xyz' WHERE name='123'}} msg] + lappend v $msg +} {1 {table sqlite_master may not be modified}} + +# Create a table to work with +# +do_test update-3.1 { + execsql {CREATE TABLE test1(f1 int,f2 int)} + for {set i 1} {$i<=10} {incr i} { + set sql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])" + execsql $sql + } + execsql {SELECT * FROM test1 ORDER BY f1} +} {1 2 2 4 3 8 4 16 5 32 6 64 7 128 8 256 9 512 10 1024} + +# Unknown column name in an expression +# +do_test update-3.2 { + set v [catch {execsql {UPDATE test1 SET f1=f3*2 WHERE f2==32}} msg] + lappend v $msg +} {1 {no such column: f3}} +do_test update-3.3 { + set v [catch {execsql {UPDATE test1 SET f1=test2.f1*2 WHERE f2==32}} msg] + lappend v $msg +} {1 {no such column: test2.f1}} +do_test update-3.4 { + set v [catch {execsql {UPDATE test1 SET f3=f1*2 WHERE f2==32}} msg] + lappend v $msg +} {1 {no such column: f3}} + +# Actually do some updates +# +do_test update-3.5 { + execsql {UPDATE test1 SET f2=f2*3} +} {} +do_test update-3.6 { + execsql {SELECT * FROM test1 ORDER BY f1} +} {1 6 2 12 3 24 4 48 5 96 6 192 7 384 8 768 9 1536 10 3072} +do_test update-3.7 { + execsql {PRAGMA count_changes=on} + execsql {UPDATE test1 SET f2=f2/3 WHERE f1<=5} +} {5} +do_test update-3.8 { + execsql {SELECT * FROM test1 ORDER BY f1} +} {1 2 2 4 3 8 4 16 5 32 6 192 7 384 8 768 9 1536 10 3072} +do_test update-3.9 { + execsql {UPDATE test1 SET f2=f2/3 WHERE f1>5} +} {5} +do_test update-3.10 { + execsql {SELECT * FROM test1 ORDER BY f1} +} {1 2 2 4 3 8 4 16 5 32 6 64 7 128 8 256 9 512 10 1024} + +# Swap the values of f1 and f2 for all elements +# +do_test update-3.11 { + execsql {UPDATE test1 SET F2=f1, F1=f2} +} {10} +do_test update-3.12 { + execsql {SELECT * FROM test1 ORDER BY F1} +} {2 1 4 2 8 3 16 4 32 5 64 6 128 7 256 8 512 9 1024 10} +do_test update-3.13 { + execsql {PRAGMA count_changes=off} + execsql {UPDATE test1 SET F2=f1, F1=f2} +} {} +do_test update-3.14 { + execsql {SELECT * FROM test1 ORDER BY F1} +} {1 2 2 4 3 8 4 16 5 32 6 64 7 128 8 256 9 512 10 1024} + +# Create duplicate entries and make sure updating still +# works. +# +do_test update-4.0 { + execsql { + DELETE FROM test1 WHERE f1<=5; + INSERT INTO test1(f1,f2) VALUES(8,88); + INSERT INTO test1(f1,f2) VALUES(8,888); + INSERT INTO test1(f1,f2) VALUES(77,128); + INSERT INTO test1(f1,f2) VALUES(777,128); + } + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-4.1 { + execsql {UPDATE test1 SET f2=f2+1 WHERE f1==8} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 89 8 257 8 889 9 512 10 1024 77 128 777 128} +do_test update-4.2 { + execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2>800} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 89 8 257 8 888 9 512 10 1024 77 128 777 128} +do_test update-4.3 { + execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2<800} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-4.4 { + execsql {UPDATE test1 SET f1=f1+1 WHERE f2==128} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 778 128} +do_test update-4.5 { + execsql {UPDATE test1 SET f1=f1-1 WHERE f1>100 and f2==128} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 777 128} +do_test update-4.6 { + execsql { + PRAGMA count_changes=on; + UPDATE test1 SET f1=f1-1 WHERE f1<=100 and f2==128; + } +} {2} +do_test update-4.7 { + execsql { + PRAGMA count_changes=off; + SELECT * FROM test1 ORDER BY f1,f2 + } +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} + +# Repeat the previous sequence of tests with an index. +# +do_test update-5.0 { + execsql {CREATE INDEX idx1 ON test1(f1)} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-5.1 { + execsql {UPDATE test1 SET f2=f2+1 WHERE f1==8} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 89 8 257 8 889 9 512 10 1024 77 128 777 128} +do_test update-5.2 { + execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2>800} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 89 8 257 8 888 9 512 10 1024 77 128 777 128} +do_test update-5.3 { + execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2<800} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-5.4 { + execsql {UPDATE test1 SET f1=f1+1 WHERE f2==128} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 778 128} +do_test update-5.4.1 { + execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2} +} {78 128} +do_test update-5.4.2 { + execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2} +} {778 128} +do_test update-5.4.3 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 128 8 256 8 888} +do_test update-5.5 { + execsql {UPDATE test1 SET f1=f1-1 WHERE f1>100 and f2==128} +} {} +do_test update-5.5.1 { + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 777 128} +do_test update-5.5.2 { + execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2} +} {78 128} +do_test update-5.5.3 { + execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2} +} {} +do_test update-5.5.4 { + execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2} +} {777 128} +do_test update-5.5.5 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 128 8 256 8 888} +do_test update-5.6 { + execsql { + PRAGMA count_changes=on; + UPDATE test1 SET f1=f1-1 WHERE f1<=100 and f2==128; + } +} {2} +do_test update-5.6.1 { + execsql { + PRAGMA count_changes=off; + SELECT * FROM test1 ORDER BY f1,f2 + } +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-5.6.2 { + execsql {SELECT * FROM test1 WHERE f1==77 ORDER BY f1,f2} +} {77 128} +do_test update-5.6.3 { + execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2} +} {} +do_test update-5.6.4 { + execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2} +} {777 128} +do_test update-5.6.5 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 256 8 888} + +# Repeat the previous sequence of tests with a different index. +# +execsql {PRAGMA synchronous=FULL} +do_test update-6.0 { + execsql {DROP INDEX idx1} + execsql {CREATE INDEX idx1 ON test1(f2)} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-6.1 { + execsql {UPDATE test1 SET f2=f2+1 WHERE f1==8} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 89 8 257 8 889 9 512 10 1024 77 128 777 128} +do_test update-6.1.1 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 89 8 257 8 889} +do_test update-6.1.2 { + execsql {SELECT * FROM test1 WHERE f2==89 ORDER BY f1,f2} +} {8 89} +do_test update-6.1.3 { + execsql {SELECT * FROM test1 WHERE f1==88 ORDER BY f1,f2} +} {} +do_test update-6.2 { + execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2>800} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 89 8 257 8 888 9 512 10 1024 77 128 777 128} +do_test update-6.3 { + execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2<800} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-6.3.1 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 256 8 888} +do_test update-6.3.2 { + execsql {SELECT * FROM test1 WHERE f2==89 ORDER BY f1,f2} +} {} +do_test update-6.3.3 { + execsql {SELECT * FROM test1 WHERE f2==88 ORDER BY f1,f2} +} {8 88} +do_test update-6.4 { + execsql {UPDATE test1 SET f1=f1+1 WHERE f2==128} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 778 128} +do_test update-6.4.1 { + execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2} +} {78 128} +do_test update-6.4.2 { + execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2} +} {778 128} +do_test update-6.4.3 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 128 8 256 8 888} +do_test update-6.5 { + execsql {UPDATE test1 SET f1=f1-1 WHERE f1>100 and f2==128} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 777 128} +do_test update-6.5.1 { + execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2} +} {78 128} +do_test update-6.5.2 { + execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2} +} {} +do_test update-6.5.3 { + execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2} +} {777 128} +do_test update-6.5.4 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 128 8 256 8 888} +do_test update-6.6 { + execsql {UPDATE test1 SET f1=f1-1 WHERE f1<=100 and f2==128} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-6.6.1 { + execsql {SELECT * FROM test1 WHERE f1==77 ORDER BY f1,f2} +} {77 128} +do_test update-6.6.2 { + execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2} +} {} +do_test update-6.6.3 { + execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2} +} {777 128} +do_test update-6.6.4 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 256 8 888} + +# Repeat the previous sequence of tests with multiple +# indices +# +do_test update-7.0 { + execsql {CREATE INDEX idx2 ON test1(f2)} + execsql {CREATE INDEX idx3 ON test1(f1,f2)} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-7.1 { + execsql {UPDATE test1 SET f2=f2+1 WHERE f1==8} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 89 8 257 8 889 9 512 10 1024 77 128 777 128} +do_test update-7.1.1 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 89 8 257 8 889} +do_test update-7.1.2 { + execsql {SELECT * FROM test1 WHERE f2==89 ORDER BY f1,f2} +} {8 89} +do_test update-7.1.3 { + execsql {SELECT * FROM test1 WHERE f1==88 ORDER BY f1,f2} +} {} +do_test update-7.2 { + execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2>800} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 89 8 257 8 888 9 512 10 1024 77 128 777 128} +do_test update-7.3 { + # explain {UPDATE test1 SET f2=f2-1 WHERE f1==8 and F2<300} + execsql {UPDATE test1 SET f2=f2-1 WHERE f1==8 and f2<800} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-7.3.1 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 256 8 888} +do_test update-7.3.2 { + execsql {SELECT * FROM test1 WHERE f2==89 ORDER BY f1,f2} +} {} +do_test update-7.3.3 { + execsql {SELECT * FROM test1 WHERE f2==88 ORDER BY f1,f2} +} {8 88} +do_test update-7.4 { + execsql {UPDATE test1 SET f1=f1+1 WHERE f2==128} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 778 128} +do_test update-7.4.1 { + execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2} +} {78 128} +do_test update-7.4.2 { + execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2} +} {778 128} +do_test update-7.4.3 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 128 8 256 8 888} +do_test update-7.5 { + execsql {UPDATE test1 SET f1=f1-1 WHERE f1>100 and f2==128} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 8 88 8 128 8 256 8 888 9 512 10 1024 78 128 777 128} +do_test update-7.5.1 { + execsql {SELECT * FROM test1 WHERE f1==78 ORDER BY f1,f2} +} {78 128} +do_test update-7.5.2 { + execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2} +} {} +do_test update-7.5.3 { + execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2} +} {777 128} +do_test update-7.5.4 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 128 8 256 8 888} +do_test update-7.6 { + execsql {UPDATE test1 SET f1=f1-1 WHERE f1<=100 and f2==128} + execsql {SELECT * FROM test1 ORDER BY f1,f2} +} {6 64 7 128 8 88 8 256 8 888 9 512 10 1024 77 128 777 128} +do_test update-7.6.1 { + execsql {SELECT * FROM test1 WHERE f1==77 ORDER BY f1,f2} +} {77 128} +do_test update-7.6.2 { + execsql {SELECT * FROM test1 WHERE f1==778 ORDER BY f1,f2} +} {} +do_test update-7.6.3 { + execsql {SELECT * FROM test1 WHERE f1==777 ORDER BY f1,f2} +} {777 128} +do_test update-7.6.4 { + execsql {SELECT * FROM test1 WHERE f1==8 ORDER BY f1,f2} +} {8 88 8 256 8 888} + +# Error messages +# +do_test update-9.1 { + set v [catch {execsql { + UPDATE test1 SET x=11 WHERE f1=1025 + }} msg] + lappend v $msg +} {1 {no such column: x}} +do_test update-9.2 { + set v [catch {execsql { + UPDATE test1 SET f1=x(11) WHERE f1=1025 + }} msg] + lappend v $msg +} {1 {no such function: x}} +do_test update-9.3 { + set v [catch {execsql { + UPDATE test1 SET f1=11 WHERE x=1025 + }} msg] + lappend v $msg +} {1 {no such column: x}} +do_test update-9.4 { + set v [catch {execsql { + UPDATE test1 SET f1=11 WHERE x(f1)=1025 + }} msg] + lappend v $msg +} {1 {no such function: x}} + +# Try doing updates on a unique column where the value does not +# really change. +# +do_test update-10.1 { + execsql { + DROP TABLE test1; + CREATE TABLE t1( + a integer primary key, + b UNIQUE, + c, d, + e, f, + UNIQUE(c,d) + ); + INSERT INTO t1 VALUES(1,2,3,4,5,6); + INSERT INTO t1 VALUES(2,3,4,4,6,7); + SELECT * FROM t1 + } +} {1 2 3 4 5 6 2 3 4 4 6 7} +do_test update-10.2 { + catchsql { + UPDATE t1 SET a=1, e=9 WHERE f=6; + SELECT * FROM t1; + } +} {0 {1 2 3 4 9 6 2 3 4 4 6 7}} +do_test update-10.3 { + catchsql { + UPDATE t1 SET a=1, e=10 WHERE f=7; + SELECT * FROM t1; + } +} {1 {PRIMARY KEY must be unique}} +do_test update-10.4 { + catchsql { + SELECT * FROM t1; + } +} {0 {1 2 3 4 9 6 2 3 4 4 6 7}} +do_test update-10.5 { + catchsql { + UPDATE t1 SET b=2, e=11 WHERE f=6; + SELECT * FROM t1; + } +} {0 {1 2 3 4 11 6 2 3 4 4 6 7}} +do_test update-10.6 { + catchsql { + UPDATE t1 SET b=2, e=12 WHERE f=7; + SELECT * FROM t1; + } +} {1 {column b is not unique}} +do_test update-10.7 { + catchsql { + SELECT * FROM t1; + } +} {0 {1 2 3 4 11 6 2 3 4 4 6 7}} +do_test update-10.8 { + catchsql { + UPDATE t1 SET c=3, d=4, e=13 WHERE f=6; + SELECT * FROM t1; + } +} {0 {1 2 3 4 13 6 2 3 4 4 6 7}} +do_test update-10.9 { + catchsql { + UPDATE t1 SET c=3, d=4, e=14 WHERE f=7; + SELECT * FROM t1; + } +} {1 {columns c, d are not unique}} +do_test update-10.10 { + catchsql { + SELECT * FROM t1; + } +} {0 {1 2 3 4 13 6 2 3 4 4 6 7}} + +# Make sure we can handle a subquery in the where clause. +# +ifcapable subquery { + do_test update-11.1 { + execsql { + UPDATE t1 SET e=e+1 WHERE b IN (SELECT b FROM t1); + SELECT b,e FROM t1; + } + } {2 14 3 7} + do_test update-11.2 { + execsql { + UPDATE t1 SET e=e+1 WHERE a IN (SELECT a FROM t1); + SELECT a,e FROM t1; + } + } {1 15 2 8} +} + +integrity_check update-12.1 + +# Ticket 602. Updates should occur in the same order as the records +# were discovered in the WHERE clause. +# +do_test update-13.1 { + execsql { + BEGIN; + CREATE TABLE t2(a); + INSERT INTO t2 VALUES(1); + INSERT INTO t2 VALUES(2); + INSERT INTO t2 SELECT a+2 FROM t2; + INSERT INTO t2 SELECT a+4 FROM t2; + INSERT INTO t2 SELECT a+8 FROM t2; + INSERT INTO t2 SELECT a+16 FROM t2; + INSERT INTO t2 SELECT a+32 FROM t2; + INSERT INTO t2 SELECT a+64 FROM t2; + INSERT INTO t2 SELECT a+128 FROM t2; + INSERT INTO t2 SELECT a+256 FROM t2; + INSERT INTO t2 SELECT a+512 FROM t2; + INSERT INTO t2 SELECT a+1024 FROM t2; + COMMIT; + SELECT count(*) FROM t2; + } +} {2048} +do_test update-13.2 { + execsql { + SELECT count(*) FROM t2 WHERE a=rowid; + } +} {2048} +do_test update-13.3 { + execsql { + UPDATE t2 SET rowid=rowid-1; + SELECT count(*) FROM t2 WHERE a=rowid+1; + } +} {2048} +do_test update-13.3 { + execsql { + UPDATE t2 SET rowid=rowid+10000; + UPDATE t2 SET rowid=rowid-9999; + SELECT count(*) FROM t2 WHERE a=rowid; + } +} {2048} +do_test update-13.4 { + execsql { + BEGIN; + INSERT INTO t2 SELECT a+2048 FROM t2; + INSERT INTO t2 SELECT a+4096 FROM t2; + INSERT INTO t2 SELECT a+8192 FROM t2; + SELECT count(*) FROM t2 WHERE a=rowid; + COMMIT; + } +} 16384 +do_test update-13.5 { + execsql { + UPDATE t2 SET rowid=rowid-1; + SELECT count(*) FROM t2 WHERE a=rowid+1; + } +} 16384 + +integrity_check update-13.6 + +ifcapable {trigger} { +# Test for proper detection of malformed WHEN clauses on UPDATE triggers. +# +do_test update-14.1 { + execsql { + CREATE TABLE t3(a,b,c); + CREATE TRIGGER t3r1 BEFORE UPDATE on t3 WHEN nosuchcol BEGIN + SELECT 'illegal WHEN clause'; + END; + } +} {} +do_test update-14.2 { + catchsql { + UPDATE t3 SET a=1; + } +} {1 {no such column: nosuchcol}} +do_test update-14.3 { + execsql { + CREATE TABLE t4(a,b,c); + CREATE TRIGGER t4r1 AFTER UPDATE on t4 WHEN nosuchcol BEGIN + SELECT 'illegal WHEN clause'; + END; + } +} {} +do_test update-14.4 { + catchsql { + UPDATE t4 SET a=1; + } +} {1 {no such column: nosuchcol}} + +} ;# ifcapable {trigger} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/utf16.test b/libraries/sqlite/unix/sqlite-3.5.1/test/utf16.test new file mode 100644 index 0000000..872648c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/utf16.test @@ -0,0 +1,75 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all tests. +# +# $Id: utf16.test,v 1.6 2007/01/04 16:37:04 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +rename finish_test really_finish_test2 +proc finish_test {} {} +set ISQUICK 1 + +if { [llength $argv]>0 } { + set FILES $argv + set argv [list] +} else { + set F { + alter.test alter3.test + auth.test bind.test blob.test capi2.test capi3.test collate1.test + collate2.test collate3.test collate4.test collate5.test collate6.test + conflict.test date.test delete.test expr.test fkey1.test func.test + hook.test index.test insert2.test insert.test interrupt.test in.test + intpkey.test ioerr.test join2.test join.test lastinsert.test + laststmtchanges.test limit.test lock2.test lock.test main.test + memdb.test minmax.test misc1.test misc2.test misc3.test notnull.test + null.test progress.test quote.test rowid.test select1.test select2.test + select3.test select4.test select5.test select6.test sort.test + subselect.test tableapi.test table.test temptable.test + trace.test trigger1.test trigger2.test trigger3.test + trigger4.test types2.test types.test unique.test update.test + vacuum.test view.test where.test + } + foreach f $F {lappend FILES $testdir/$f} +} + +rename sqlite3 real_sqlite3 +proc sqlite3 {args} { + set r [eval "real_sqlite3 $args"] + if { [llength $args] == 2 } { + [lindex $args 0] eval {pragma encoding = 'UTF-16'} + } + set r +} + +rename do_test really_do_test +proc do_test {args} { + set sc [concat really_do_test "utf16-[lindex $args 0]" [lrange $args 1 end]] + eval $sc +} + +foreach f $FILES { + source $f + catch {db close} + if {$sqlite_open_file_count>0} { + puts "$tail did not close all files: $sqlite_open_file_count" + incr nErr + lappend ::failList $tail + } +} + +rename sqlite3 "" +rename real_sqlite3 sqlite3 +rename finish_test "" +rename really_finish_test2 finish_test +rename do_test "" +rename really_do_test do_test +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/utf16align.test b/libraries/sqlite/unix/sqlite-3.5.1/test/utf16align.test new file mode 100644 index 0000000..fb41b77 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/utf16align.test @@ -0,0 +1,84 @@ +# 2006 February 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains code to verify that the SQLITE_UTF16_ALIGNED +# flag passed into the sqlite3_create_collation() function insures +# that all strings passed to that function are aligned on an even +# byte boundary. +# +# $Id: utf16align.test,v 1.1 2006/02/16 18:16:38 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Skip this entire test if we do not support UTF16 +# +ifcapable !utf16 { + finish_test + return +} + +# Create a database with a UTF16 encoding. Put in lots of string +# data of varying lengths. +# +do_test utf16align-1.0 { + set unaligned_string_counter 0 + add_alignment_test_collations [sqlite3_connection_pointer db] + execsql { + PRAGMA encoding=UTF16; + CREATE TABLE t1( + id INTEGER PRIMARY KEY, + spacer TEXT, + a TEXT COLLATE utf16_aligned, + b TEXT COLLATE utf16_unaligned + ); + INSERT INTO t1(a) VALUES("abc"); + INSERT INTO t1(a) VALUES("defghi"); + INSERT INTO t1(a) VALUES("jklmnopqrstuv"); + INSERT INTO t1(a) VALUES("wxyz0123456789-"); + UPDATE t1 SET b=a||'-'||a; + INSERT INTO t1(a,b) SELECT a||b, b||a FROM t1; + INSERT INTO t1(a,b) SELECT a||b, b||a FROM t1; + INSERT INTO t1(a,b) SELECT a||b, b||a FROM t1; + INSERT INTO t1(a,b) VALUES('one','two'); + INSERT INTO t1(a,b) SELECT a, b FROM t1; + UPDATE t1 SET spacer = CASE WHEN rowid&1 THEN 'x' ELSE 'xx' END; + SELECT count(*) FROM t1; + } +} 66 +do_test utf16align-1.1 { + set unaligned_string_counter +} 0 + +# Creating an index that uses the unaligned collation. We should see +# some unaligned strings passed to the collating function. +# +do_test utf16align-1.2 { + execsql { + CREATE INDEX t1i1 ON t1(spacer, b); + } + # puts $unaligned_string_counter + expr {$unaligned_string_counter>0} +} 1 + +# Create another index that uses the aligned collation. This time +# there should be no unaligned accesses +# +do_test utf16align-1.3 { + set unaligned_string_counter 0 + execsql { + CREATE INDEX t1i2 ON t1(spacer, a); + } + expr {$unaligned_string_counter>0} +} 0 +integrity_check utf16align-1.4 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vacuum.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vacuum.test new file mode 100644 index 0000000..bcf204d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vacuum.test @@ -0,0 +1,359 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the VACUUM statement. +# +# $Id: vacuum.test,v 1.38 2006/10/04 11:55:50 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If the VACUUM statement is disabled in the current build, skip all +# the tests in this file. +# +ifcapable {!vacuum} { + finish_test + return +} +if $AUTOVACUUM { + finish_test + return +} + +set fcnt 1 +proc cksum {{db db}} { + set sql "SELECT name, type, sql FROM sqlite_master ORDER BY name, type" + set txt [$db eval $sql]\n + set sql "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name" + foreach tbl [$db eval $sql] { + append txt [$db eval "SELECT * FROM $tbl"]\n + } + foreach prag {default_cache_size} { + append txt $prag-[$db eval "PRAGMA $prag"]\n + } + if 0 { + global fcnt + set fd [open dump$fcnt.txt w] + puts -nonewline $fd $txt + close $fd + incr fcnt + } + set cksum [string length $txt]-[md5 $txt] + # puts $cksum-[file size test.db] + return $cksum +} +do_test vacuum-1.1 { + execsql { + BEGIN; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c); + INSERT INTO t1 VALUES(NULL,randstr(10,100),randstr(5,50)); + INSERT INTO t1 VALUES(123456,randstr(10,100),randstr(5,50)); + INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT NULL, b||'-'||rowid, c||'-'||rowid FROM t1; + CREATE INDEX i1 ON t1(b,c); + CREATE UNIQUE INDEX i2 ON t1(c,a); + CREATE TABLE t2 AS SELECT * FROM t1; + COMMIT; + DROP TABLE t2; + } + set ::size1 [file size test.db] + set ::cksum [cksum] + expr {$::cksum!=""} +} {1} +do_test vacuum-1.2 { + execsql { + VACUUM; + } + cksum +} $cksum +ifcapable vacuum { + do_test vacuum-1.3 { + expr {[file size test.db]<$::size1} + } {1} +} +do_test vacuum-1.4 { + set sql_script { + BEGIN; + CREATE TABLE t2 AS SELECT * FROM t1; + CREATE TABLE t3 AS SELECT * FROM t1; + CREATE VIEW v1 AS SELECT b, c FROM t3; + CREATE TRIGGER r1 AFTER DELETE ON t2 BEGIN SELECT 1; END; + COMMIT; + DROP TABLE t2; + } + # If the library was compiled to omit view support, comment out the + # create view in the script $sql_script before executing it. Similarly, + # if triggers are not supported, comment out the trigger definition. + ifcapable !view { + regsub {CREATE VIEW} $sql_script {-- CREATE VIEW} sql_script + } + ifcapable !trigger { + regsub {CREATE TRIGGER} $sql_script {-- CREATE TRIGGER} sql_script + } + execsql $sql_script + set ::size1 [file size test.db] + set ::cksum [cksum] + expr {$::cksum!=""} +} {1} +do_test vacuum-1.5 { + execsql { + VACUUM; + } + cksum +} $cksum + +ifcapable vacuum { + do_test vacuum-1.6 { + expr {[file size test.db]<$::size1} + } {1} +} +ifcapable vacuum { + do_test vacuum-2.1 { + catchsql { + BEGIN; + VACUUM; + COMMIT; + } + } {1 {cannot VACUUM from within a transaction}} + catch {db eval COMMIT} +} +do_test vacuum-2.2 { + sqlite3 db2 test.db + execsql { + BEGIN; + CREATE TABLE t4 AS SELECT * FROM t1; + CREATE TABLE t5 AS SELECT * FROM t1; + COMMIT; + DROP TABLE t4; + DROP TABLE t5; + } db2 + set ::cksum [cksum db2] + catchsql { + VACUUM + } +} {0 {}} +do_test vacuum-2.3 { + cksum +} $cksum +do_test vacuum-2.4 { + catch {db2 eval {SELECT count(*) FROM sqlite_master}} + cksum db2 +} $cksum + +# Make sure the schema cookie is incremented by vacuum. +# +do_test vacuum-2.5 { + execsql { + BEGIN; + CREATE TABLE t6 AS SELECT * FROM t1; + CREATE TABLE t7 AS SELECT * FROM t1; + COMMIT; + } + sqlite3 db3 test.db + execsql { + -- The "SELECT * FROM sqlite_master" statement ensures that this test + -- works when shared-cache is enabled. If shared-cache is enabled, then + -- db3 shares a cache with db2 (but not db - it was opened as + -- "./test.db"). + SELECT * FROM sqlite_master; + SELECT * FROM t7 LIMIT 1 + } db3 + execsql { + VACUUM; + } + execsql { + INSERT INTO t7 VALUES(1234567890,'hello','world'); + } db3 + execsql { + SELECT * FROM t7 WHERE a=1234567890 + } +} {1234567890 hello world} +integrity_check vacuum-2.6 +do_test vacuum-2.7 { + execsql { + SELECT * FROM t7 WHERE a=1234567890 + } db3 +} {1234567890 hello world} +do_test vacuum-2.8 { + execsql { + INSERT INTO t7 SELECT * FROM t6; + SELECT count(*) FROM t7; + } +} 513 +integrity_check vacuum-2.9 +do_test vacuum-2.10 { + execsql { + DELETE FROM t7; + SELECT count(*) FROM t7; + } db3 +} 0 +integrity_check vacuum-2.11 +db3 close + + +# Ticket #427. Make sure VACUUM works when the EMPTY_RESULT_CALLBACKS +# pragma is turned on. +# +do_test vacuum-3.1 { + db close + db2 close + file delete test.db + sqlite3 db test.db + execsql { + PRAGMA empty_result_callbacks=on; + VACUUM; + } +} {} + +# Ticket #464. Make sure VACUUM works with the sqlite3_prepare() API. +# +do_test vacuum-4.1 { + db close + sqlite3 db test.db; set DB [sqlite3_connection_pointer db] + set VM [sqlite3_prepare $DB {VACUUM} -1 TAIL] + sqlite3_step $VM +} {SQLITE_DONE} +do_test vacuum-4.2 { + sqlite3_finalize $VM +} SQLITE_OK + +# Ticket #515. VACUUM after deleting and recreating the table that +# a view refers to. Omit this test if the library is not view-enabled. +# +ifcapable view { +do_test vacuum-5.1 { + db close + file delete -force test.db + sqlite3 db test.db + catchsql { + CREATE TABLE Test (TestID int primary key); + INSERT INTO Test VALUES (NULL); + CREATE VIEW viewTest AS SELECT * FROM Test; + + BEGIN; + CREATE TABLE tempTest (TestID int primary key, Test2 int NULL); + INSERT INTO tempTest SELECT TestID, 1 FROM Test; + DROP TABLE Test; + CREATE TABLE Test(TestID int primary key, Test2 int NULL); + INSERT INTO Test SELECT * FROM tempTest; + DROP TABLE tempTest; + COMMIT; + VACUUM; + } +} {0 {}} +do_test vacuum-5.2 { + catchsql { + VACUUM; + } +} {0 {}} +} ;# ifcapable view + +# Ensure vacuum works with complicated tables names. +do_test vacuum-6.1 { + execsql { + CREATE TABLE "abc abc"(a, b, c); + INSERT INTO "abc abc" VALUES(1, 2, 3); + VACUUM; + } +} {} +do_test vacuum-6.2 { + execsql { + select * from "abc abc"; + } +} {1 2 3} + +# Also ensure that blobs survive a vacuum. +ifcapable {bloblit} { + do_test vacuum-6.3 { + execsql { + DELETE FROM "abc abc"; + INSERT INTO "abc abc" VALUES(X'00112233', NULL, NULL); + VACUUM; + } + } {} + do_test vacuum-6.4 { + execsql { + select count(*) from "abc abc" WHERE a = X'00112233'; + } + } {1} +} + +# Check what happens when an in-memory database is vacuumed. The +# [file delete] command covers us in case the library was compiled +# without in-memory database support. +# +file delete -force :memory: +do_test vacuum-7.0 { + sqlite3 db2 :memory: + execsql { + CREATE TABLE t1(t); + VACUUM; + } db2 +} {} +db2 close + +# Ticket #873. VACUUM a database that has ' in its name. +# +do_test vacuum-8.1 { + file delete -force a'z.db + file delete -force a'z.db-journal + sqlite3 db2 a'z.db + execsql { + CREATE TABLE t1(t); + VACUUM; + } db2 +} {} +db2 close + +# Ticket #1095: Vacuum a table that uses AUTOINCREMENT +# +ifcapable {autoinc} { + do_test vacuum-9.1 { + execsql { + DROP TABLE 'abc abc'; + CREATE TABLE autoinc(a INTEGER PRIMARY KEY AUTOINCREMENT, b); + INSERT INTO autoinc(b) VALUES('hi'); + INSERT INTO autoinc(b) VALUES('there'); + DELETE FROM autoinc; + } + set ::cksum [cksum] + expr {$::cksum!=""} + } {1} + do_test vacuum-9.2 { + execsql { + VACUUM; + } + cksum + } $::cksum + do_test vacuum-9.3 { + execsql { + INSERT INTO autoinc(b) VALUES('one'); + INSERT INTO autoinc(b) VALUES('two'); + } + set ::cksum [cksum] + expr {$::cksum!=""} + } {1} + do_test vacuum-9.4 { + execsql { + VACUUM; + } + cksum + } $::cksum +} + +file delete -force {a'z.db} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vacuum2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vacuum2.test new file mode 100644 index 0000000..5a6aca7 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vacuum2.test @@ -0,0 +1,60 @@ +# 2005 February 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the VACUUM statement. +# +# $Id: vacuum2.test,v 1.3 2007/07/19 16:35:17 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If the VACUUM statement is disabled in the current build, skip all +# the tests in this file. +# +ifcapable {!vacuum||!autoinc} { + finish_test + return +} +if $AUTOVACUUM { + finish_test + return +} + +# Ticket #1121 - make sure vacuum works if all autoincrement tables +# have been deleted. +# +do_test vacuum2-1.1 { + execsql { + CREATE TABLE t1(x INTEGER PRIMARY KEY AUTOINCREMENT, y); + DROP TABLE t1; + VACUUM; + } +} {} + +# Ticket #2518. Make sure vacuum increments the change counter +# in the database header. +# +do_test vacuum2-2.1 { + execsql { + CREATE TABLE t1(x); + CREATE TABLE t2(y); + INSERT INTO t1 VALUES(1); + } + hexio_get_int [hexio_read test.db 24 4] +} [expr {[hexio_get_int [hexio_read test.db 24 4]]+3}] +do_test vacuum2-2.1 { + execsql { + VACUUM + } + hexio_get_int [hexio_read test.db 24 4] +} [expr {[hexio_get_int [hexio_read test.db 24 4]]+1}] + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/varint.test b/libraries/sqlite/unix/sqlite-3.5.1/test/varint.test new file mode 100644 index 0000000..974e88f --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/varint.test @@ -0,0 +1,32 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is variable-length integer encoding scheme. +# +# $Id: varint.test,v 1.1 2004/05/18 15:57:42 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Test reading and writing of varints. +# +set cnt 0 +foreach start {0 100 10000 1000000 0x10000000} { + foreach mult {1 0x10 0x100 0x1000 0x10000 0x100000 0x1000000 0x10000000} { + foreach incr {1 500 10000 50000000} { + incr cnt + do_test varint-1.$cnt { + btree_varint_test $start $mult 5000 $incr + } {} + } + } +} diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/view.test b/libraries/sqlite/unix/sqlite-3.5.1/test/view.test new file mode 100644 index 0000000..83ffec2 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/view.test @@ -0,0 +1,501 @@ +# 2002 February 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing VIEW statements. +# +# $Id: view.test,v 1.33 2006/09/11 23:45:50 drh Exp $ +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Omit this entire file if the library is not configured with views enabled. +ifcapable !view { + finish_test + return +} + +do_test view-1.0 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(4,5,6); + INSERT INTO t1 VALUES(7,8,9); + SELECT * FROM t1; + } +} {1 2 3 4 5 6 7 8 9} + +do_test view-1.1 { + execsql { + BEGIN; + CREATE VIEW IF NOT EXISTS v1 AS SELECT a,b FROM t1; + SELECT * FROM v1 ORDER BY a; + } +} {1 2 4 5 7 8} +do_test view-1.2 { + catchsql { + ROLLBACK; + SELECT * FROM v1 ORDER BY a; + } +} {1 {no such table: v1}} +do_test view-1.3 { + execsql { + CREATE VIEW v1 AS SELECT a,b FROM t1; + SELECT * FROM v1 ORDER BY a; + } +} {1 2 4 5 7 8} +do_test view-1.3.1 { + db close + sqlite3 db test.db + execsql { + SELECT * FROM v1 ORDER BY a; + } +} {1 2 4 5 7 8} +do_test view-1.4 { + catchsql { + DROP VIEW IF EXISTS v1; + SELECT * FROM v1 ORDER BY a; + } +} {1 {no such table: v1}} +do_test view-1.5 { + execsql { + CREATE VIEW v1 AS SELECT a,b FROM t1; + SELECT * FROM v1 ORDER BY a; + } +} {1 2 4 5 7 8} +do_test view-1.6 { + catchsql { + DROP TABLE t1; + SELECT * FROM v1 ORDER BY a; + } +} {1 {no such table: main.t1}} +do_test view-1.7 { + execsql { + CREATE TABLE t1(x,a,b,c); + INSERT INTO t1 VALUES(1,2,3,4); + INSERT INTO t1 VALUES(4,5,6,7); + INSERT INTO t1 VALUES(7,8,9,10); + SELECT * FROM v1 ORDER BY a; + } +} {2 3 5 6 8 9} +do_test view-1.8 { + db close + sqlite3 db test.db + execsql { + SELECT * FROM v1 ORDER BY a; + } +} {2 3 5 6 8 9} + +do_test view-2.1 { + execsql { + CREATE VIEW v2 AS SELECT * FROM t1 WHERE a>5 + }; # No semicolon + execsql2 { + SELECT * FROM v2; + } +} {x 7 a 8 b 9 c 10} +do_test view-2.2 { + catchsql { + INSERT INTO v2 VALUES(1,2,3,4); + } +} {1 {cannot modify v2 because it is a view}} +do_test view-2.3 { + catchsql { + UPDATE v2 SET a=10 WHERE a=5; + } +} {1 {cannot modify v2 because it is a view}} +do_test view-2.4 { + catchsql { + DELETE FROM v2; + } +} {1 {cannot modify v2 because it is a view}} +do_test view-2.5 { + execsql { + INSERT INTO t1 VALUES(11,12,13,14); + SELECT * FROM v2 ORDER BY x; + } +} {7 8 9 10 11 12 13 14} +do_test view-2.6 { + execsql { + SELECT x FROM v2 WHERE a>10 + } +} {11} + +# Test that column name of views are generated correctly. +# +do_test view-3.1 { + execsql2 { + SELECT * FROM v1 LIMIT 1 + } +} {a 2 b 3} +do_test view-3.2 { + execsql2 { + SELECT * FROM v2 LIMIT 1 + } +} {x 7 a 8 b 9 c 10} +do_test view-3.3 { + execsql2 { + DROP VIEW v1; + CREATE VIEW v1 AS SELECT a AS 'xyz', b+c AS 'pqr', c-b FROM t1; + SELECT * FROM v1 LIMIT 1 + } +} {xyz 2 pqr 7 c-b 1} + +ifcapable compound { +do_test view-3.4 { + execsql2 { + CREATE VIEW v3 AS SELECT a FROM t1 UNION SELECT b FROM t1 ORDER BY b; + SELECT * FROM v3 LIMIT 4; + } +} {a 2 a 3 a 5 a 6} +do_test view-3.5 { + execsql2 { + CREATE VIEW v4 AS + SELECT a, b FROM t1 + UNION + SELECT b AS 'x', a AS 'y' FROM t1 + ORDER BY x, y; + SELECT b FROM v4 ORDER BY b LIMIT 4; + } +} {b 2 b 3 b 5 b 6} +} ;# ifcapable compound + + +do_test view-4.1 { + catchsql { + DROP VIEW t1; + } +} {1 {use DROP TABLE to delete table t1}} +do_test view-4.2 { + execsql { + SELECT 1 FROM t1 LIMIT 1; + } +} 1 +do_test view-4.3 { + catchsql { + DROP TABLE v1; + } +} {1 {use DROP VIEW to delete view v1}} +do_test view-4.4 { + execsql { + SELECT 1 FROM v1 LIMIT 1; + } +} {1} +do_test view-4.5 { + catchsql { + CREATE INDEX i1v1 ON v1(xyz); + } +} {1 {views may not be indexed}} + +do_test view-5.1 { + execsql { + CREATE TABLE t2(y,a); + INSERT INTO t2 VALUES(22,2); + INSERT INTO t2 VALUES(33,3); + INSERT INTO t2 VALUES(44,4); + INSERT INTO t2 VALUES(55,5); + SELECT * FROM t2; + } +} {22 2 33 3 44 4 55 5} +do_test view-5.2 { + execsql { + CREATE VIEW v5 AS + SELECT t1.x AS v, t2.y AS w FROM t1 JOIN t2 USING(a); + SELECT * FROM v5; + } +} {1 22 4 55} + +# Verify that the view v5 gets flattened. see sqliteFlattenSubquery(). +# This will only work if EXPLAIN is enabled. +# Ticket #272 +# +ifcapable {explain} { +do_test view-5.3 { + lsearch [execsql { + EXPLAIN SELECT * FROM v5; + }] OpenEphemeral +} {-1} +do_test view-5.4 { + execsql { + SELECT * FROM v5 AS a, t2 AS b WHERE a.w=b.y; + } +} {1 22 22 2 4 55 55 5} +do_test view-5.5 { + lsearch [execsql { + EXPLAIN SELECT * FROM v5 AS a, t2 AS b WHERE a.w=b.y; + }] OpenEphemeral +} {-1} +do_test view-5.6 { + execsql { + SELECT * FROM t2 AS b, v5 AS a WHERE a.w=b.y; + } +} {22 2 1 22 55 5 4 55} +do_test view-5.7 { + lsearch [execsql { + EXPLAIN SELECT * FROM t2 AS b, v5 AS a WHERE a.w=b.y; + }] OpenEphemeral +} {-1} +do_test view-5.8 { + execsql { + SELECT * FROM t1 AS a, v5 AS b, t2 AS c WHERE a.x=b.v AND b.w=c.y; + } +} {1 2 3 4 1 22 22 2 4 5 6 7 4 55 55 5} +do_test view-5.9 { + lsearch [execsql { + EXPLAIN SELECT * FROM t1 AS a, v5 AS b, t2 AS c WHERE a.x=b.v AND b.w=c.y; + }] OpenEphemeral +} {-1} +} ;# endif explain + +do_test view-6.1 { + execsql { + SELECT min(x), min(a), min(b), min(c), min(a+b+c) FROM v2; + } +} {7 8 9 10 27} +do_test view-6.2 { + execsql { + SELECT max(x), max(a), max(b), max(c), max(a+b+c) FROM v2; + } +} {11 12 13 14 39} + +do_test view-7.1 { + execsql { + CREATE TABLE test1(id integer primary key, a); + CREATE TABLE test2(id integer, b); + INSERT INTO test1 VALUES(1,2); + INSERT INTO test2 VALUES(1,3); + CREATE VIEW test AS + SELECT test1.id, a, b + FROM test1 JOIN test2 ON test2.id=test1.id; + SELECT * FROM test; + } +} {1 2 3} +do_test view-7.2 { + db close + sqlite3 db test.db + execsql { + SELECT * FROM test; + } +} {1 2 3} +do_test view-7.3 { + execsql { + DROP VIEW test; + CREATE VIEW test AS + SELECT test1.id, a, b + FROM test1 JOIN test2 USING(id); + SELECT * FROM test; + } +} {1 2 3} +do_test view-7.4 { + db close + sqlite3 db test.db + execsql { + SELECT * FROM test; + } +} {1 2 3} +do_test view-7.5 { + execsql { + DROP VIEW test; + CREATE VIEW test AS + SELECT test1.id, a, b + FROM test1 NATURAL JOIN test2; + SELECT * FROM test; + } +} {1 2 3} +do_test view-7.6 { + db close + sqlite3 db test.db + execsql { + SELECT * FROM test; + } +} {1 2 3} + +do_test view-8.1 { + execsql { + CREATE VIEW v6 AS SELECT pqr, xyz FROM v1; + SELECT * FROM v6 ORDER BY xyz; + } +} {7 2 13 5 19 8 27 12} +do_test view-8.2 { + db close + sqlite3 db test.db + execsql { + SELECT * FROM v6 ORDER BY xyz; + } +} {7 2 13 5 19 8 27 12} +do_test view-8.3 { + execsql { + CREATE VIEW v7 AS SELECT pqr+xyz AS a FROM v6; + SELECT * FROM v7 ORDER BY a; + } +} {9 18 27 39} + +ifcapable subquery { + do_test view-8.4 { + execsql { + CREATE VIEW v8 AS SELECT max(cnt) AS mx FROM + (SELECT a%2 AS eo, count(*) AS cnt FROM t1 GROUP BY eo); + SELECT * FROM v8; + } + } 3 + do_test view-8.5 { + execsql { + SELECT mx+10, mx*2 FROM v8; + } + } {13 6} + do_test view-8.6 { + execsql { + SELECT mx+10, pqr FROM v6, v8 WHERE xyz=2; + } + } {13 7} + do_test view-8.7 { + execsql { + SELECT mx+10, pqr FROM v6, v8 WHERE xyz>2; + } + } {13 13 13 19 13 27} +} ;# ifcapable subquery + +# Tests for a bug found by Michiel de Wit involving ORDER BY in a VIEW. +# +do_test view-9.1 { + execsql { + INSERT INTO t2 SELECT * FROM t2 WHERE a<5; + INSERT INTO t2 SELECT * FROM t2 WHERE a<4; + INSERT INTO t2 SELECT * FROM t2 WHERE a<3; + SELECT DISTINCT count(*) FROM t2 GROUP BY a ORDER BY 1; + } +} {1 2 4 8} +do_test view-9.2 { + execsql { + SELECT DISTINCT count(*) FROM t2 GROUP BY a ORDER BY 1 LIMIT 3; + } +} {1 2 4} +do_test view-9.3 { + execsql { + CREATE VIEW v9 AS + SELECT DISTINCT count(*) FROM t2 GROUP BY a ORDER BY 1 LIMIT 3; + SELECT * FROM v9; + } +} {1 2 4} +do_test view-9.4 { + execsql { + SELECT * FROM v9 ORDER BY 1 DESC; + } +} {4 2 1} +do_test view-9.5 { + execsql { + CREATE VIEW v10 AS + SELECT DISTINCT a, count(*) FROM t2 GROUP BY a ORDER BY 2 LIMIT 3; + SELECT * FROM v10; + } +} {5 1 4 2 3 4} +do_test view-9.6 { + execsql { + SELECT * FROM v10 ORDER BY 1; + } +} {3 4 4 2 5 1} + +# Tables with columns having peculiar quoted names used in views +# Ticket #756. +# +do_test view-10.1 { + execsql { + CREATE TABLE t3("9" integer, [4] text); + INSERT INTO t3 VALUES(1,2); + CREATE VIEW v_t3_a AS SELECT a.[9] FROM t3 AS a; + CREATE VIEW v_t3_b AS SELECT "4" FROM t3; + SELECT * FROM v_t3_a; + } +} {1} +do_test view-10.2 { + execsql { + SELECT * FROM v_t3_b; + } +} {2} + +do_test view-11.1 { + execsql { + CREATE TABLE t4(a COLLATE NOCASE); + INSERT INTO t4 VALUES('This'); + INSERT INTO t4 VALUES('this'); + INSERT INTO t4 VALUES('THIS'); + SELECT * FROM t4 WHERE a = 'THIS'; + } +} {This this THIS} +ifcapable subquery { + do_test view-11.2 { + execsql { + SELECT * FROM (SELECT * FROM t4) WHERE a = 'THIS'; + } + } {This this THIS} +} +do_test view-11.3 { + execsql { + CREATE VIEW v11 AS SELECT * FROM t4; + SELECT * FROM v11 WHERE a = 'THIS'; + } +} {This this THIS} + +# Ticket #1270: Do not allow parameters in view definitions. +# +do_test view-12.1 { + catchsql { + CREATE VIEW v12 AS SELECT a FROM t1 WHERE b=? + } +} {1 {parameters are not allowed in views}} + +do_test view-13.1 { + file delete -force test2.db + catchsql { + ATTACH 'test2.db' AS two; + CREATE TABLE two.t2(x,y); + CREATE VIEW v13 AS SELECT y FROM two.t2; + } +} {1 {view v13 cannot reference objects in database two}} + +# Ticket #1658 +# +do_test view-14.1 { + catchsql { + CREATE TEMP VIEW t1 AS SELECT a,b FROM t1; + SELECT * FROM temp.t1; + } +} {1 {view t1 is circularly defined}} + +# Tickets #1688, #1709 +# +do_test view-15.1 { + execsql2 { + CREATE VIEW v15 AS SELECT a AS x, b AS y FROM t1; + SELECT * FROM v15 LIMIT 1; + } +} {x 2 y 3} +do_test view-15.2 { + execsql2 { + SELECT x, y FROM v15 LIMIT 1 + } +} {x 2 y 3} + +do_test view-16.1 { + catchsql { + CREATE VIEW IF NOT EXISTS v1 AS SELECT * FROM t1; + } +} {0 {}} +do_test view-16.2 { + execsql { + SELECT sql FROM sqlite_master WHERE name='v1' + } +} {{CREATE VIEW v1 AS SELECT a AS 'xyz', b+c AS 'pqr', c-b FROM t1}} +do_test view-16.3 { + catchsql { + DROP VIEW IF EXISTS nosuchview + } +} {0 {}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab1.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab1.test new file mode 100644 index 0000000..b86ca4a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab1.test @@ -0,0 +1,946 @@ +# 2006 June 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is creating and dropping virtual tables. +# +# $Id: vtab1.test,v 1.46 2007/09/03 15:03:21 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab||!schema_pragmas { + finish_test + return +} + +#---------------------------------------------------------------------- +# Organization of tests in this file: +# +# vtab1-1.*: Error conditions and other issues surrounding creation/connection +# of a virtual module. +# vtab1-2.*: Test sqlite3_declare_vtab() and the xConnect/xDisconnect methods. +# vtab1-3.*: Table scans and WHERE clauses. +# vtab1-4.*: Table scans and ORDER BY clauses. +# vtab1-5.*: Test queries that include joins. This brings the +# sqlite3_index_info.estimatedCost variable into play. +# vtab1-6.*: Test UPDATE/INSERT/DELETE on vtables. +# vtab1-7.*: Test sqlite3_last_insert_rowid(). +# +# This file uses the "echo" module (see src/test8.c). Refer to comments +# in that file for the special behaviour of the Tcl $echo_module variable. +# +# TODO: +# * How to test the sqlite3_index_constraint_usage.omit field? +# * vtab1-5.* +# + + +#---------------------------------------------------------------------- +# Test cases vtab1.1.* +# + +# We cannot create a virtual table if the module has not been registered. +# +do_test vtab1-1.1 { + catchsql { + CREATE VIRTUAL TABLE t1 USING echo; + } +} {1 {no such module: echo}} +do_test vtab1-1.2 { + execsql { + SELECT name FROM sqlite_master ORDER BY 1 + } +} {} + +# Register the module +register_echo_module [sqlite3_connection_pointer db] + +# Once a module has been registered, virtual tables using that module +# may be created. However if a module xCreate() fails to call +# sqlite3_declare_vtab() an error will be raised and the table not created. +# +# The "echo" module does not invoke sqlite3_declare_vtab() if it is +# passed zero arguments. +# +do_test vtab1-1.3 { + catchsql { + CREATE VIRTUAL TABLE t1 USING echo; + } +} {1 {vtable constructor did not declare schema: t1}} +do_test vtab1-1.4 { + execsql { + SELECT name FROM sqlite_master ORDER BY 1 + } +} {} + +# The "echo" module xCreate method returns an error and does not create +# the virtual table if it is passed an argument that does not correspond +# to an existing real table in the same database. +# +do_test vtab1-1.5 { + catchsql { + CREATE VIRTUAL TABLE t1 USING echo(no_such_table); + } +} {1 {vtable constructor failed: t1}} +do_test vtab1-1.6 { + execsql { + SELECT name FROM sqlite_master ORDER BY 1 + } +} {} + +# Ticket #2156. Using the sqlite3_prepare_v2() API, make sure that +# a CREATE VIRTUAL TABLE statement can be used multiple times. +# +do_test vtab1-1.2152.1 { + set DB [sqlite3_connection_pointer db] + set sql {CREATE VIRTUAL TABLE t2152a USING echo(t2152b)} + set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] + sqlite3_step $STMT +} SQLITE_ERROR +do_test vtab-1.2152.2 { + sqlite3_reset $STMT + sqlite3_step $STMT +} SQLITE_ERROR +do_test vtab-1.2152.3 { + sqlite3_reset $STMT + db eval {CREATE TABLE t2152b(x,y)} + sqlite3_step $STMT +} SQLITE_DONE +do_test vtab-1.2152.4 { + sqlite3_finalize $STMT + db eval {DROP TABLE t2152a; DROP TABLE t2152b} +} {} + +# Test to make sure nothing goes wrong and no memory is leaked if we +# select an illegal table-name (i.e a reserved name or the name of a +# table that already exists). +# +do_test vtab1-1.7 { + catchsql { + CREATE VIRTUAL TABLE sqlite_master USING echo; + } +} {1 {object name reserved for internal use: sqlite_master}} +do_test vtab1-1.8 { + catchsql { + CREATE TABLE treal(a, b, c); + CREATE VIRTUAL TABLE treal USING echo(treal); + } +} {1 {table treal already exists}} +do_test vtab1-1.9 { + execsql { + DROP TABLE treal; + SELECT name FROM sqlite_master ORDER BY 1 + } +} {} + +do_test vtab1-1.10 { + execsql { + CREATE TABLE treal(a, b, c); + CREATE VIRTUAL TABLE techo USING echo(treal); + } + db close + sqlite3 db test.db + catchsql { + SELECT * FROM techo; + } +} {1 {no such module: echo}} +do_test vtab1-1.11 { + catchsql { + INSERT INTO techo VALUES(1, 2, 3); + } +} {1 {no such module: echo}} +do_test vtab1-1.12 { + catchsql { + UPDATE techo SET a = 10; + } +} {1 {no such module: echo}} +do_test vtab1-1.13 { + catchsql { + DELETE FROM techo; + } +} {1 {no such module: echo}} +do_test vtab1-1.14 { + catchsql { + PRAGMA table_info(techo) + } +} {1 {no such module: echo}} +do_test vtab1-1.15 { + catchsql { + DROP TABLE techo; + } +} {1 {no such module: echo}} + +register_echo_module [sqlite3_connection_pointer db] +register_echo_module [sqlite3_connection_pointer db] + +# Test an error message returned from a v-table constructor. +# +do_test vtab1-1.16 { + execsql { + DROP TABLE techo; + CREATE TABLE logmsg(log); + } + catchsql { + CREATE VIRTUAL TABLE techo USING echo(treal, logmsg); + } +} {1 {table 'logmsg' already exists}} + +do_test vtab1-1.17 { + execsql { + DROP TABLE treal; + DROP TABLE logmsg; + SELECT sql FROM sqlite_master; + } +} {} + +#---------------------------------------------------------------------- +# Test cases vtab1.2.* +# +# At this point, the database is completely empty. The echo module +# has already been registered. + +# If a single argument is passed to the echo module during table +# creation, it is assumed to be the name of a table in the same +# database. The echo module attempts to set the schema of the +# new virtual table to be the same as the existing database table. +# +do_test vtab1-2.1 { + execsql { + CREATE TABLE template(a, b, c); + } + execsql { PRAGMA table_info(template); } +} [list \ + 0 a {} 0 {} 0 \ + 1 b {} 0 {} 0 \ + 2 c {} 0 {} 0 \ +] +do_test vtab1-2.2 { + execsql { + CREATE VIRTUAL TABLE t1 USING echo(template); + } + execsql { PRAGMA table_info(t1); } +} [list \ + 0 a {} 0 {} 0 \ + 1 b {} 0 {} 0 \ + 2 c {} 0 {} 0 \ +] + +# Test that the database can be unloaded. This should invoke the xDisconnect() +# callback for the successfully create virtual table (t1). +# +do_test vtab1-2.3 { + set echo_module [list] + db close + set echo_module +} [list xDisconnect] + +# Re-open the database. This should not cause any virtual methods to +# be called. The invocation of xConnect() is delayed until the virtual +# table schema is first required by the compiler. +# +do_test vtab1-2.4 { + set echo_module [list] + sqlite3 db test.db + db cache size 0 + set echo_module +} {} + +# Try to query the virtual table schema. This should fail, as the +# echo module has not been registered with this database connection. +# +do_test vtab1.2.6 { + catchsql { PRAGMA table_info(t1); } +} {1 {no such module: echo}} + +# Register the module +register_echo_module [sqlite3_connection_pointer db] + +# Try to query the virtual table schema again. This time it should +# invoke the xConnect method and succeed. +# +do_test vtab1.2.7 { + execsql { PRAGMA table_info(t1); } +} [list \ + 0 a {} 0 {} 0 \ + 1 b {} 0 {} 0 \ + 2 c {} 0 {} 0 \ +] +do_test vtab1.2.8 { + set echo_module +} {xConnect echo main t1 template} + +# Drop table t1. This should cause the xDestroy (but not xDisconnect) method +# to be invoked. +do_test vtab1-2.5 { + set echo_module "" + execsql { + DROP TABLE t1; + } + set echo_module +} {xDestroy} + +do_test vtab1-2.6 { + execsql { + PRAGMA table_info(t1); + } +} {} +do_test vtab1-2.7 { + execsql { + SELECT sql FROM sqlite_master; + } +} [list {CREATE TABLE template(a, b, c)}] +# Clean up other test artifacts: +do_test vtab1-2.8 { + execsql { + DROP TABLE template; + SELECT sql FROM sqlite_master; + } +} [list] + +#---------------------------------------------------------------------- +# Test case vtab1-3 test table scans and the echo module's +# xBestIndex/xFilter handling of WHERE conditions. + +do_test vtab1-3.1 { + set echo_module "" + execsql { + CREATE TABLE treal(a INTEGER, b INTEGER, c); + CREATE INDEX treal_idx ON treal(b); + CREATE VIRTUAL TABLE t1 USING echo(treal); + } + set echo_module +} [list xCreate echo main t1 treal \ + xSync echo(treal) \ + xCommit echo(treal) \ +] + +# Test that a SELECT on t1 doesn't crash. No rows are returned +# because the underlying real table is currently empty. +# +do_test vtab1-3.2 { + execsql { + SELECT a, b, c FROM t1; + } +} {} + +# Put some data into the table treal. Then try a few simple SELECT +# statements on t1. +# +do_test vtab1-3.3 { + execsql { + INSERT INTO treal VALUES(1, 2, 3); + INSERT INTO treal VALUES(4, 5, 6); + SELECT * FROM t1; + } +} {1 2 3 4 5 6} +do_test vtab1-3.4 { + execsql { + SELECT a FROM t1; + } +} {1 4} +do_test vtab1-3.5 { + execsql { + SELECT rowid FROM t1; + } +} {1 2} +do_test vtab1-3.6 { + set echo_module "" + execsql { + SELECT * FROM t1; + } +} {1 2 3 4 5 6} +do_test vtab1-3.7 { + execsql { + SELECT rowid, * FROM t1; + } +} {1 1 2 3 2 4 5 6} +do_test vtab1-3.8 { + execsql { + SELECT a AS d, b AS e, c AS f FROM t1; + } +} {1 2 3 4 5 6} + +# Execute some SELECT statements with WHERE clauses on the t1 table. +# Then check the echo_module variable (written to by the module methods +# in test8.c) to make sure the xBestIndex() and xFilter() methods were +# called correctly. +# +do_test vtab1-3.8 { + set echo_module "" + execsql { + SELECT * FROM t1; + } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal'} \ + xFilter {SELECT rowid, * FROM 'treal'} ] +do_test vtab1-3.9 { + set echo_module "" + execsql { + SELECT * FROM t1 WHERE b = 5; + } +} {4 5 6} +do_test vtab1-3.10 { + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal' WHERE b = ?} \ + xFilter {SELECT rowid, * FROM 'treal' WHERE b = ?} 5 ] +do_test vtab1-3.10 { + set echo_module "" + execsql { + SELECT * FROM t1 WHERE b >= 5 AND b <= 10; + } +} {4 5 6} +do_test vtab1-3.11 { + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal' WHERE b >= ? AND b <= ?} \ + xFilter {SELECT rowid, * FROM 'treal' WHERE b >= ? AND b <= ?} 5 10 ] +do_test vtab1-3.12 { + set echo_module "" + execsql { + SELECT * FROM t1 WHERE b BETWEEN 2 AND 10; + } +} {1 2 3 4 5 6} +do_test vtab1-3.13 { + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal' WHERE b >= ? AND b <= ?} \ + xFilter {SELECT rowid, * FROM 'treal' WHERE b >= ? AND b <= ?} 2 10 ] + +# Add a function for the MATCH operator. Everything always matches! +#proc test_match {lhs rhs} { +# lappend ::echo_module MATCH $lhs $rhs +# return 1 +#} +#db function match test_match + +set echo_module "" +do_test vtab1-3.12 { + set echo_module "" + catchsql { + SELECT * FROM t1 WHERE a MATCH 'string'; + } +} {1 {unable to use function MATCH in the requested context}} +do_test vtab1-3.13 { + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal'} \ + xFilter {SELECT rowid, * FROM 'treal'}] +do_test vtab1-3.14 { + set echo_module "" + execsql { + SELECT * FROM t1 WHERE b MATCH 'string'; + } +} {} +do_test vtab1-3.15 { + set echo_module +} [list xBestIndex \ + {SELECT rowid, * FROM 'treal' WHERE b LIKE (SELECT '%'||?||'%')} \ + xFilter \ + {SELECT rowid, * FROM 'treal' WHERE b LIKE (SELECT '%'||?||'%')} \ + string ] + +#---------------------------------------------------------------------- +# Test case vtab1-3 test table scans and the echo module's +# xBestIndex/xFilter handling of ORDER BY clauses. + +# This procedure executes the SQL. Then it checks to see if the OP_Sort +# opcode was executed. If an OP_Sort did occur, then "sort" is appended +# to the result. If no OP_Sort happened, then "nosort" is appended. +# +# This procedure is used to check to make sure sorting is or is not +# occurring as expected. +# +proc cksort {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return $data +} + +do_test vtab1-4.1 { + set echo_module "" + cksort { + SELECT b FROM t1 ORDER BY b; + } +} {2 5 nosort} +do_test vtab1-4.2 { + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal' ORDER BY b ASC} \ + xFilter {SELECT rowid, * FROM 'treal' ORDER BY b ASC} ] +do_test vtab1-4.3 { + set echo_module "" + cksort { + SELECT b FROM t1 ORDER BY b DESC; + } +} {5 2 nosort} +do_test vtab1-4.4 { + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal' ORDER BY b DESC} \ + xFilter {SELECT rowid, * FROM 'treal' ORDER BY b DESC} ] +do_test vtab1-4.3 { + set echo_module "" + cksort { + SELECT b FROM t1 ORDER BY b||''; + } +} {2 5 sort} +do_test vtab1-4.4 { + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal'} \ + xFilter {SELECT rowid, * FROM 'treal'} ] + +execsql { + DROP TABLE t1; + DROP TABLE treal; +} + +#---------------------------------------------------------------------- +# Test cases vtab1-5 test SELECT queries that include joins on virtual +# tables. + +proc filter {log} { + set out [list] + for {set ii 0} {$ii < [llength $log]} {incr ii} { + if {[lindex $log $ii] eq "xFilter"} { + lappend out xFilter + lappend out [lindex $log [expr $ii+1]] + } + } + return $out +} + +do_test vtab1-5-1 { + execsql { + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(d, e, f); + INSERT INTO t1 VALUES(1, 'red', 'green'); + INSERT INTO t1 VALUES(2, 'blue', 'black'); + INSERT INTO t2 VALUES(1, 'spades', 'clubs'); + INSERT INTO t2 VALUES(2, 'hearts', 'diamonds'); + CREATE VIRTUAL TABLE et1 USING echo(t1); + CREATE VIRTUAL TABLE et2 USING echo(t2); + } +} {} + +do_test vtab1-5-2 { + set echo_module "" + execsql { + SELECT * FROM et1, et2; + } +} [list \ + 1 red green 1 spades clubs \ + 1 red green 2 hearts diamonds \ + 2 blue black 1 spades clubs \ + 2 blue black 2 hearts diamonds \ +] +do_test vtab1-5-3 { + filter $echo_module +} [list \ + xFilter {SELECT rowid, * FROM 't1'} \ + xFilter {SELECT rowid, * FROM 't2'} \ + xFilter {SELECT rowid, * FROM 't2'} \ +] +do_test vtab1-5-4 { + set echo_module "" + execsql { + SELECT * FROM et1, et2 WHERE et2.d = 2; + } +} [list \ + 1 red green 2 hearts diamonds \ + 2 blue black 2 hearts diamonds \ +] +do_test vtab1-5-5 { + filter $echo_module +} [list \ + xFilter {SELECT rowid, * FROM 't1'} \ + xFilter {SELECT rowid, * FROM 't2'} \ + xFilter {SELECT rowid, * FROM 't2'} \ +] +do_test vtab1-5-6 { + execsql { + CREATE INDEX i1 ON t2(d); + } + + db close + sqlite3 db test.db + register_echo_module [sqlite3_connection_pointer db] + + set ::echo_module "" + execsql { + SELECT * FROM et1, et2 WHERE et2.d = 2; + } +} [list \ + 1 red green 2 hearts diamonds \ + 2 blue black 2 hearts diamonds \ +] +do_test vtab1-5-7 { + filter $::echo_module +} [list \ + xFilter {SELECT rowid, * FROM 't2' WHERE d = ?} \ + xFilter {SELECT rowid, * FROM 't1'} \ +] + +execsql { + DROP TABLE t1; + DROP TABLE t2; + DROP TABLE et1; + DROP TABLE et2; +} + +#---------------------------------------------------------------------- +# Test cases vtab1-6 test INSERT, UPDATE and DELETE operations +# on virtual tables. +do_test vtab1-6-1 { + execsql { SELECT sql FROM sqlite_master } +} {} +do_test vtab1-6-2 { + execsql { + CREATE TABLE treal(a PRIMARY KEY, b, c); + CREATE VIRTUAL TABLE techo USING echo(treal); + SELECT name FROM sqlite_master WHERE type = 'table'; + } +} {treal techo} +do_test vtab1-6-3 { + execsql { + INSERT INTO techo VALUES(1, 2, 3); + SELECT * FROM techo; + } +} {1 2 3} +do_test vtab1-6-4 { + execsql { + UPDATE techo SET a = 5; + SELECT * FROM techo; + } +} {5 2 3} + +do_test vtab1-6-5 { + execsql { + UPDATE techo set a = a||b||c; + SELECT * FROM techo; + } +} {523 2 3} + +do_test vtab1-6-6 { + execsql { + UPDATE techo set rowid = 10; + SELECT rowid FROM techo; + } +} {10} + +do_test vtab1-6-7 { + execsql { + DELETE FROM techo; + SELECT * FROM techo; + } +} {} + + +file delete -force test2.db +file delete -force test2.db-journal +sqlite3 db2 test2.db +execsql { + CREATE TABLE techo(a PRIMARY KEY, b, c); +} db2 +proc check_echo_table {tn} { + set ::data1 [execsql {SELECT rowid, * FROM techo}] + set ::data2 [execsql {SELECT rowid, * FROM techo} db2] + do_test $tn { + string equal $::data1 $::data2 + } 1 +} +set tn 0 +foreach stmt [list \ + {INSERT INTO techo VALUES('abc', 'def', 'ghi')} \ + {INSERT INTO techo SELECT a||'.'||rowid, b, c FROM techo} \ + {INSERT INTO techo SELECT a||'x'||rowid, b, c FROM techo} \ + {INSERT INTO techo SELECT a||'y'||rowid, b, c FROM techo} \ + {DELETE FROM techo WHERE (oid % 3) = 0} \ + {UPDATE techo set rowid = 100 WHERE rowid = 1} \ + {INSERT INTO techo(a, b) VALUES('hello', 'world')} \ + {DELETE FROM techo} \ +] { + execsql $stmt + execsql $stmt db2 + check_echo_table vtab1-6.8.[incr tn] +} + +db2 close + + + +#---------------------------------------------------------------------- +# Test cases vtab1-7 tests that the value returned by +# sqlite3_last_insert_rowid() is set correctly when rows are inserted +# into virtual tables. +do_test vtab1.7-1 { + execsql { + CREATE TABLE real_abc(a PRIMARY KEY, b, c); + CREATE VIRTUAL TABLE echo_abc USING echo(real_abc); + } +} {} +do_test vtab1.7-2 { + execsql { + INSERT INTO echo_abc VALUES(1, 2, 3); + SELECT last_insert_rowid(); + } +} {1} +do_test vtab1.7-3 { + execsql { + INSERT INTO echo_abc(rowid) VALUES(31427); + SELECT last_insert_rowid(); + } +} {31427} +do_test vtab1.7-4 { + execsql { + INSERT INTO echo_abc SELECT a||'.v2', b, c FROM echo_abc; + SELECT last_insert_rowid(); + } +} {31429} +do_test vtab1.7-5 { + execsql { + SELECT rowid, a, b, c FROM echo_abc + } +} [list 1 1 2 3 \ + 31427 {} {} {} \ + 31428 1.v2 2 3 \ + 31429 {} {} {} \ +] + +# Now test that DELETE and UPDATE operations do not modify the value. +do_test vtab1.7-6 { + execsql { + UPDATE echo_abc SET c = 5 WHERE b = 2; + SELECT last_insert_rowid(); + } +} {31429} +do_test vtab1.7-7 { + execsql { + UPDATE echo_abc SET rowid = 5 WHERE rowid = 1; + SELECT last_insert_rowid(); + } +} {31429} +do_test vtab1.7-8 { + execsql { + DELETE FROM echo_abc WHERE b = 2; + SELECT last_insert_rowid(); + } +} {31429} +do_test vtab1.7-9 { + execsql { + SELECT rowid, a, b, c FROM echo_abc + } +} [list 31427 {} {} {} \ + 31429 {} {} {} \ +] +do_test vtab1.7-10 { + execsql { + DELETE FROM echo_abc WHERE b = 2; + SELECT last_insert_rowid(); + } +} {31429} +do_test vtab1.7-11 { + execsql { + SELECT rowid, a, b, c FROM real_abc + } +} [list 31427 {} {} {} \ + 31429 {} {} {} \ +] +do_test vtab1.7-12 { + execsql { + DELETE FROM echo_abc; + SELECT last_insert_rowid(); + } +} {31429} +do_test vtab1.7-13 { + execsql { + SELECT rowid, a, b, c FROM real_abc + } +} {} + +do_test vtab1.8-1 { + set echo_module "" + execsql { + ATTACH 'test2.db' AS aux; + CREATE VIRTUAL TABLE aux.e2 USING echo(real_abc); + } + set echo_module +} [list xCreate echo aux e2 real_abc \ + xSync echo(real_abc) \ + xCommit echo(real_abc) \ +] +do_test vtab1.8-2 { + execsql { + DROP TABLE aux.e2; + DROP TABLE treal; + DROP TABLE techo; + DROP TABLE echo_abc; + DROP TABLE real_abc; + } +} {} + +do_test vtab1.9-1 { + set echo_module "" + execsql { + CREATE TABLE r(a, b, c); + CREATE VIRTUAL TABLE e USING echo(r, e_log); + SELECT name FROM sqlite_master; + } +} {r e e_log} +do_test vtab1.9-2 { + execsql { + DROP TABLE e; + SELECT name FROM sqlite_master; + } +} {r} + +do_test vtab1.9-3 { + set echo_module "" + execsql { + CREATE VIRTUAL TABLE e USING echo(r, e_log, virtual 1 2 3 varchar(32)); + } + set echo_module +} [list \ + xCreate echo main e r e_log {virtual 1 2 3 varchar(32)} \ + xSync echo(r) \ + xCommit echo(r) \ +] + +do_test vtab1.10-1 { + execsql { + CREATE TABLE del(d); + CREATE VIRTUAL TABLE e2 USING echo(del); + } + db close + sqlite3 db test.db + register_echo_module [sqlite3_connection_pointer db] + execsql { + DROP TABLE del; + } + catchsql { + SELECT * FROM e2; + } +} {1 {vtable constructor failed: e2}} +do_test vtab1.10-2 { + set rc [catch { + set ptr [sqlite3_connection_pointer db] + sqlite3_declare_vtab $ptr {CREATE TABLE abc(a, b, c)} + } msg] + list $rc $msg +} {1 {library routine called out of sequence}} +do_test vtab1.10-3 { + set ::echo_module_begin_fail r + catchsql { + INSERT INTO e VALUES(1, 2, 3); + } +} {1 {SQL logic error or missing database}} +do_test vtab1.10-4 { + catch {execsql { + EXPLAIN SELECT * FROM e WHERE rowid = 2; + EXPLAIN QUERY PLAN SELECT * FROM e WHERE rowid = 2 ORDER BY rowid; + }} +} {0} + +do_test vtab1.10-5 { + set echo_module "" + execsql { + SELECT * FROM e WHERE rowid||'' MATCH 'pattern'; + } + set echo_module +} [list \ + xBestIndex {SELECT rowid, * FROM 'r'} \ + xFilter {SELECT rowid, * FROM 'r'} \ +] +proc match_func {args} {return ""} +do_test vtab1.10-6 { + set echo_module "" + db function match match_func + execsql { + SELECT * FROM e WHERE match('pattern', rowid, 'pattern2'); + } + set echo_module +} [list \ + xBestIndex {SELECT rowid, * FROM 'r'} \ + xFilter {SELECT rowid, * FROM 'r'} \ +] + + +# Testing the xFindFunction interface +# +catch {rename ::echo_glob_overload {}} +do_test vtab1.11-1 { + execsql { + INSERT INTO r(a,b,c) VALUES(1,'?',99); + INSERT INTO r(a,b,c) VALUES(2,3,99); + SELECT a GLOB b FROM e + } +} {1 0} +proc ::echo_glob_overload {a b} { + return [list $b $a] +} +do_test vtab1.11-2 { + execsql { + SELECT a like 'b' FROM e + } +} {0 0} +do_test vtab1.11-3 { + execsql { + SELECT a glob '2' FROM e + } +} {{1 2} {2 2}} +do_test vtab1.11-4 { + execsql { + SELECT glob('2',a) FROM e + } +} {0 1} +do_test vtab1.11-5 { + execsql { + SELECT glob(a,'2') FROM e + } +} {{2 1} {2 2}} + +#---------------------------------------------------------------------- +# Test the outcome if a constraint is encountered half-way through +# a multi-row INSERT that is inside a transaction +# +do_test vtab1.12-1 { + execsql { + CREATE TABLE b(a, b, c); + CREATE TABLE c(a UNIQUE, b, c); + INSERT INTO b VALUES(1, 'A', 'B'); + INSERT INTO b VALUES(2, 'C', 'D'); + INSERT INTO b VALUES(3, 'E', 'F'); + INSERT INTO c VALUES(3, 'G', 'H'); + CREATE VIRTUAL TABLE echo_c USING echo(c); + } +} {} + +# First test outside of a transaction. +do_test vtab1.12-2 { + catchsql { INSERT INTO echo_c SELECT * FROM b; } +} {1 {constraint failed}} +do_test vtab1.12-3 { + execsql { SELECT * FROM c } +} {3 G H} + +breakpoint + +# Now the real test - wrapped in a transaction. +do_test vtab1.12-4 { + execsql {BEGIN} + catchsql { INSERT INTO echo_c SELECT * FROM b; } +} {1 {constraint failed}} +do_test vtab1.12-5 { + execsql { SELECT * FROM c } +} {3 G H} +do_test vtab1.12-6 { + execsql { COMMIT } + execsql { SELECT * FROM c } +} {3 G H} + +unset -nocomplain echo_module_begin_fail +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab2.test new file mode 100644 index 0000000..641444a --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab2.test @@ -0,0 +1,90 @@ +# 2006 June 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: vtab2.test,v 1.7 2007/02/14 09:19:37 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab||!schema_pragmas { + finish_test + return +} + +register_schema_module [sqlite3_connection_pointer db] +do_test vtab2-1.1 { + execsql { + CREATE VIRTUAL TABLE schema USING schema; + SELECT * FROM schema; + } +} [list \ + main schema 0 database {} 0 {} 0 \ + main schema 1 tablename {} 0 {} 0 \ + main schema 2 cid {} 0 {} 0 \ + main schema 3 name {} 0 {} 0 \ + main schema 4 type {} 0 {} 0 \ + main schema 5 not_null {} 0 {} 0 \ + main schema 6 dflt_value {} 0 {} 0 \ + main schema 7 pk {} 0 {} 0 \ +] + +# See ticket #2230. +# +do_test vtab2-1.2 { + execsql { + SELECT length(tablename) FROM schema GROUP by tablename; + } +} {6} +do_test vtab2-1.3 { + execsql { + SELECT tablename FROM schema GROUP by length(tablename); + } +} {schema} +do_test vtab2-1.4 { + execsql { + SELECT length(tablename) FROM schema GROUP by length(tablename); + } +} {6} + +register_tclvar_module [sqlite3_connection_pointer db] +do_test vtab2-2.1 { + set ::abc 123 + execsql { + CREATE VIRTUAL TABLE vars USING tclvar; + SELECT * FROM vars WHERE name='abc'; + } +} [list abc "" 123] +do_test vtab2-2.2 { + set A(1) 1 + set A(2) 4 + set A(3) 9 + execsql { + SELECT * FROM vars WHERE name='A'; + } +} [list A 1 1 A 2 4 A 3 9] +unset -nocomplain result +unset -nocomplain var +set result {} +foreach var [lsort [info vars tcl_*]] { + catch {lappend result $var [set $var]} +} +do_test vtab2-2.3 { + execsql { + SELECT name, value FROM vars + WHERE name MATCH 'tcl_*' AND arrayname = '' + ORDER BY name; + } +} $result +unset result +unset var + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab3.test new file mode 100644 index 0000000..2d7c679 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab3.test @@ -0,0 +1,142 @@ +# 2006 June 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is the authorisation callback and virtual tables. +# +# $Id: vtab3.test,v 1.2 2006/06/20 11:01:09 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab||!auth { + finish_test + return +} + +set ::auth_fail 0 +set ::auth_log [list] +set ::auth_filter [list SQLITE_READ SQLITE_UPDATE SQLITE_SELECT SQLITE_PRAGMA] + +proc auth {code arg1 arg2 arg3 arg4} { + if {[lsearch $::auth_filter $code]>-1} { + return SQLITE_OK + } + lappend ::auth_log $code $arg1 $arg2 $arg3 $arg4 + incr ::auth_fail -1 + if {$::auth_fail == 0} { + return SQLITE_DENY + } + return SQLITE_OK +} + +do_test vtab3-1.1 { + execsql { + CREATE TABLE elephant( + name VARCHAR(32), + color VARCHAR(16), + age INTEGER, + UNIQUE(name, color) + ); + } +} {} + + +do_test vtab3-1.2 { + register_echo_module [sqlite3_connection_pointer db] + db authorizer ::auth + execsql { + CREATE VIRTUAL TABLE pachyderm USING echo(elephant); + } + set ::auth_log +} [list \ + SQLITE_INSERT sqlite_master {} main {} \ + SQLITE_CREATE_VTABLE pachyderm echo main {} \ +] + +do_test vtab3-1.3 { + set ::auth_log [list] + execsql { + DROP TABLE pachyderm; + } + set ::auth_log +} [list \ + SQLITE_DELETE sqlite_master {} main {} \ + SQLITE_DROP_VTABLE pachyderm echo main {} \ + SQLITE_DELETE pachyderm {} main {} \ + SQLITE_DELETE sqlite_master {} main {} \ +] + +do_test vtab3-1.4 { + set ::auth_fail 1 + catchsql { + CREATE VIRTUAL TABLE pachyderm USING echo(elephant); + } +} {1 {not authorized}} +do_test vtab3-1.5 { + execsql { + SELECT name FROM sqlite_master WHERE type = 'table'; + } +} {elephant} + +do_test vtab3-1.5 { + set ::auth_fail 2 + catchsql { + CREATE VIRTUAL TABLE pachyderm USING echo(elephant); + } +} {1 {not authorized}} +do_test vtab3-1.6 { + execsql { + SELECT name FROM sqlite_master WHERE type = 'table'; + } +} {elephant} + +do_test vtab3-1.5 { + set ::auth_fail 3 + catchsql { + CREATE VIRTUAL TABLE pachyderm USING echo(elephant); + } +} {0 {}} +do_test vtab3-1.6 { + execsql { + SELECT name FROM sqlite_master WHERE type = 'table'; + } +} {elephant pachyderm} + +foreach i [list 1 2 3 4] { + set ::auth_fail $i + do_test vtab3-1.7.$i.1 { + set rc [catch { + execsql {DROP TABLE pachyderm;} + } msg] + if {$msg eq "authorization denied"} {set msg "not authorized"} + list $rc $msg + } {1 {not authorized}} + do_test vtab3-1.7.$i.2 { + execsql { + SELECT name FROM sqlite_master WHERE type = 'table'; + } + } {elephant pachyderm} +} +do_test vtab3-1.8.1 { + set ::auth_fail 0 + catchsql { + DROP TABLE pachyderm; + } +} {0 {}} +do_test vtab3-1.8.2 { + execsql { + SELECT name FROM sqlite_master WHERE type = 'table'; + } +} {elephant} + +finish_test + + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab4.test new file mode 100644 index 0000000..a8e3633 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab4.test @@ -0,0 +1,194 @@ +# 2006 June 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus is on testing the following virtual table methods: +# +# xBegin +# xSync +# xCommit +# xRollback +# +# $Id: vtab4.test,v 1.2 2006/09/02 22:14:59 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +unset -nocomplain echo_module +unset -nocomplain echo_module_sync_fail + +ifcapable !vtab { + finish_test + return +} + +# Register the echo module +db cache size 0 +register_echo_module [sqlite3_connection_pointer db] + +do_test vtab4-1.1 { + execsql { + CREATE TABLE treal(a PRIMARY KEY, b, c); + CREATE VIRTUAL TABLE techo USING echo(treal); + } +} {} + +# Test an INSERT, UPDATE and DELETE statement on the virtual table +# in an implicit transaction. Each should result in a single call +# to xBegin, xSync and xCommit. +# +do_test vtab4-1.2 { + set echo_module [list] + execsql { + INSERT INTO techo VALUES(1, 2, 3); + } + set echo_module +} {xBegin echo(treal) xSync echo(treal) xCommit echo(treal)} +do_test vtab4-1.3 { + set echo_module [list] + execsql { + UPDATE techo SET a = 2; + } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal'} \ + xBegin echo(treal) \ + xFilter {SELECT rowid, * FROM 'treal'} \ + xSync echo(treal) \ + xCommit echo(treal) \ +] +do_test vtab4-1.4 { + set echo_module [list] + execsql { + DELETE FROM techo; + } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal'} \ + xBegin echo(treal) \ + xFilter {SELECT rowid, * FROM 'treal'} \ + xSync echo(treal) \ + xCommit echo(treal) \ +] + +# Ensure xBegin is not called more than once in a single transaction. +# +do_test vtab4-2.1 { + set echo_module [list] + execsql { + BEGIN; + INSERT INTO techo VALUES(1, 2, 3); + INSERT INTO techo VALUES(4, 5, 6); + INSERT INTO techo VALUES(7, 8, 9); + COMMIT; + } + set echo_module +} {xBegin echo(treal) xSync echo(treal) xCommit echo(treal)} + +# Try a transaction with two virtual tables. +# +do_test vtab4-2.2 { + execsql { + CREATE TABLE sreal(a, b, c UNIQUE); + CREATE VIRTUAL TABLE secho USING echo(sreal); + } + set echo_module [list] + execsql { + BEGIN; + INSERT INTO secho SELECT * FROM techo; + DELETE FROM techo; + COMMIT; + } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'treal'} \ + xBegin echo(sreal) \ + xFilter {SELECT rowid, * FROM 'treal'} \ + xBestIndex {SELECT rowid, * FROM 'treal'} \ + xBegin echo(treal) \ + xFilter {SELECT rowid, * FROM 'treal'} \ + xSync echo(sreal) \ + xSync echo(treal) \ + xCommit echo(sreal) \ + xCommit echo(treal) \ +] +do_test vtab4-2.3 { + execsql { + SELECT * FROM secho; + } +} {1 2 3 4 5 6 7 8 9} +do_test vtab4-2.4 { + execsql { + SELECT * FROM techo; + } +} {} + +# Try an explicit ROLLBACK on a transaction with two open virtual tables. +do_test vtab4-2.5 { + set echo_module [list] + execsql { + BEGIN; + INSERT INTO techo SELECT * FROM secho; + DELETE FROM secho; + ROLLBACK; + } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'sreal'} \ + xBegin echo(treal) \ + xFilter {SELECT rowid, * FROM 'sreal'} \ + xBestIndex {SELECT rowid, * FROM 'sreal'} \ + xBegin echo(sreal) \ + xFilter {SELECT rowid, * FROM 'sreal'} \ + xRollback echo(treal) \ + xRollback echo(sreal) \ +] +do_test vtab4-2.6 { + execsql { + SELECT * FROM secho; + } +} {1 2 3 4 5 6 7 8 9} +do_test vtab4-2.7 { + execsql { + SELECT * FROM techo; + } +} {} + +do_test vtab4-3.1 { + set echo_module [list] + set echo_module_sync_fail treal + catchsql { + INSERT INTO techo VALUES(1, 2, 3); + } +} {1 {unknown error}} +do_test vtab4-3.2 { + set echo_module +} {xBegin echo(treal) xSync echo(treal) xRollback echo(treal)} + +breakpoint +do_test vtab4-3.3 { + set echo_module [list] + set echo_module_sync_fail sreal + catchsql { + BEGIN; + INSERT INTO techo SELECT * FROM secho; + DELETE FROM secho; + COMMIT; + } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'sreal'} \ + xBegin echo(treal) \ + xFilter {SELECT rowid, * FROM 'sreal'} \ + xBestIndex {SELECT rowid, * FROM 'sreal'} \ + xBegin echo(sreal) \ + xFilter {SELECT rowid, * FROM 'sreal'} \ + xSync echo(treal) \ + xSync echo(sreal) \ + xRollback echo(treal) \ + xRollback echo(sreal) \ +] + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab5.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab5.test new file mode 100644 index 0000000..4fd678c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab5.test @@ -0,0 +1,153 @@ +# 2006 June 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: vtab5.test,v 1.7 2007/06/27 15:53:35 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +# The following tests - vtab5-1.* - ensure that an INSERT, DELETE or UPDATE +# statement can be executed immediately after a CREATE or schema reload. The +# point here is testing that the parser always calls xConnect() before the +# schema of a virtual table is used. +# +register_echo_module [sqlite3_connection_pointer db] +do_test vtab5-1.1 { + execsql { + CREATE TABLE treal(a VARCHAR(16), b INTEGER, c FLOAT); + INSERT INTO treal VALUES('a', 'b', 'c'); + CREATE VIRTUAL TABLE techo USING echo(treal); + } +} {} +do_test vtab5.1.2 { + execsql { + SELECT * FROM techo; + } +} {a b c} +do_test vtab5.1.3 { + db close + sqlite3 db test.db + register_echo_module [sqlite3_connection_pointer db] + execsql { + INSERT INTO techo VALUES('c', 'd', 'e'); + SELECT * FROM techo; + } +} {a b c c d e} +do_test vtab5.1.4 { + db close + sqlite3 db test.db + register_echo_module [sqlite3_connection_pointer db] + execsql { + UPDATE techo SET a = 10; + SELECT * FROM techo; + } +} {10 b c 10 d e} +do_test vtab5.1.5 { + db close + sqlite3 db test.db + register_echo_module [sqlite3_connection_pointer db] + execsql { + DELETE FROM techo WHERE b > 'c'; + SELECT * FROM techo; + } +} {10 b c} +do_test vtab5.1.X { + execsql { + DROP TABLE techo; + DROP TABLE treal; + } +} {} + +# The following tests - vtab5-2.* - ensure that collation sequences +# assigned to virtual table columns via the "CREATE TABLE" statement +# passed to sqlite3_declare_vtab() are used correctly. +# +do_test vtab5.2.1 { + execsql { + CREATE TABLE strings(str COLLATE NOCASE); + INSERT INTO strings VALUES('abc1'); + INSERT INTO strings VALUES('Abc3'); + INSERT INTO strings VALUES('ABc2'); + INSERT INTO strings VALUES('aBc4'); + SELECT str FROM strings ORDER BY 1; + } +} {abc1 ABc2 Abc3 aBc4} +do_test vtab5.2.2 { + execsql { + CREATE VIRTUAL TABLE echo_strings USING echo(strings); + SELECT str FROM echo_strings ORDER BY 1; + } +} {abc1 ABc2 Abc3 aBc4} +do_test vtab5.2.3 { + execsql { + SELECT str||'' FROM echo_strings ORDER BY 1; + } +} {ABc2 Abc3 aBc4 abc1} + +# Test that it is impossible to create a triggger on a virtual table. +# +ifcapable trigger { + do_test vtab5.3.1 { + catchsql { + CREATE TRIGGER trig INSTEAD OF INSERT ON echo_strings BEGIN + SELECT 1, 2, 3; + END; + } + } {1 {cannot create triggers on virtual tables}} + do_test vtab5.3.2 { + catchsql { + CREATE TRIGGER trig AFTER INSERT ON echo_strings BEGIN + SELECT 1, 2, 3; + END; + } + } {1 {cannot create triggers on virtual tables}} + do_test vtab5.3.2 { + catchsql { + CREATE TRIGGER trig BEFORE INSERT ON echo_strings BEGIN + SELECT 1, 2, 3; + END; + } + } {1 {cannot create triggers on virtual tables}} +} + +# Test that it is impossible to create an index on a virtual table. +# +do_test vtab5.4.1 { + catchsql { + CREATE INDEX echo_strings_i ON echo_strings(str); + } +} {1 {virtual tables may not be indexed}} + +# Test that it is impossible to add a column to a virtual table. +# +do_test vtab5.4.2 { + catchsql { + ALTER TABLE echo_strings ADD COLUMN col2; + } +} {1 {virtual tables may not be altered}} + +# Test that it is impossible to rename a virtual table. +# UPDATE: It is now possible. +# +# do_test vtab5.4.3 { +# catchsql { +# ALTER TABLE echo_strings RENAME TO echo_strings2; +# } +# } {1 {virtual tables may not be altered}} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab6.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab6.test new file mode 100644 index 0000000..e89ab85 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab6.test @@ -0,0 +1,457 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for joins, including outer joins involving +# virtual tables. The test cases in this file are copied from the file +# join.test, and some of the comments still reflect that. +# +# $Id: vtab6.test,v 1.2 2006/06/28 18:18:10 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +register_echo_module [sqlite3_connection_pointer db] + +execsql { + CREATE TABLE real_t1(a,b,c); + CREATE TABLE real_t2(b,c,d); + CREATE TABLE real_t3(c,d,e); + CREATE TABLE real_t4(d,e,f); + CREATE TABLE real_t5(a INTEGER PRIMARY KEY); + CREATE TABLE real_t6(a INTEGER); + CREATE TABLE real_t7 (x, y); + CREATE TABLE real_t8 (a integer primary key, b); + CREATE TABLE real_t9(a INTEGER PRIMARY KEY, b); + CREATE TABLE real_t10(x INTEGER PRIMARY KEY, y); + CREATE TABLE real_t11(p INTEGER PRIMARY KEY, q); + CREATE TABLE real_t12(a,b); + CREATE TABLE real_t13(b,c); + CREATE TABLE real_t21(a,b,c); + CREATE TABLE real_t22(p,q); +} +foreach t [list t1 t2 t3 t4 t5 t6 t7 t8 t9 t10 t11 t12 t13 t21 t22] { + execsql "CREATE VIRTUAL TABLE $t USING echo(real_$t)" +} + +do_test vtab6-1.1 { + execsql { + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(2,3,4); + INSERT INTO t1 VALUES(3,4,5); + SELECT * FROM t1; + } +} {1 2 3 2 3 4 3 4 5} +do_test vtab6-1.2 { + execsql { + INSERT INTO t2 VALUES(1,2,3); + INSERT INTO t2 VALUES(2,3,4); + INSERT INTO t2 VALUES(3,4,5); + SELECT * FROM t2; + } +} {1 2 3 2 3 4 3 4 5} + +do_test vtab6-1.3 { + execsql2 { + SELECT * FROM t1 NATURAL JOIN t2; + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test vtab6-1.3.1 { + execsql2 { + SELECT * FROM t2 NATURAL JOIN t1; + } +} {b 2 c 3 d 4 a 1 b 3 c 4 d 5 a 2} +do_test vtab6-1.3.2 { + execsql2 { + SELECT * FROM t2 AS x NATURAL JOIN t1; + } +} {b 2 c 3 d 4 a 1 b 3 c 4 d 5 a 2} +do_test vtab6-1.3.3 { + execsql2 { + SELECT * FROM t2 NATURAL JOIN t1 AS y; + } +} {b 2 c 3 d 4 a 1 b 3 c 4 d 5 a 2} +do_test vtab6-1.3.4 { + execsql { + SELECT b FROM t1 NATURAL JOIN t2; + } +} {2 3} +do_test vtab6-1.4.1 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 USING(b,c); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test vtab6-1.4.2 { + execsql2 { + SELECT * FROM t1 AS x INNER JOIN t2 USING(b,c); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test vtab6-1.4.3 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 AS y USING(b,c); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test vtab6-1.4.4 { + execsql2 { + SELECT * FROM t1 AS x INNER JOIN t2 AS y USING(b,c); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} +do_test vtab6-1.4.5 { + execsql { + SELECT b FROM t1 JOIN t2 USING(b); + } +} {2 3} +do_test vtab6-1.5 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 USING(b); + } +} {a 1 b 2 c 3 c 3 d 4 a 2 b 3 c 4 c 4 d 5} +do_test vtab6-1.6 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 USING(c); + } +} {a 1 b 2 c 3 b 2 d 4 a 2 b 3 c 4 b 3 d 5} +do_test vtab6-1.7 { + execsql2 { + SELECT * FROM t1 INNER JOIN t2 USING(c,b); + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5} + +do_test vtab6-1.8 { + execsql { + SELECT * FROM t1 NATURAL CROSS JOIN t2; + } +} {1 2 3 4 2 3 4 5} +do_test vtab6-1.9 { + execsql { + SELECT * FROM t1 CROSS JOIN t2 USING(b,c); + } +} {1 2 3 4 2 3 4 5} +do_test vtab6-1.10 { + execsql { + SELECT * FROM t1 NATURAL INNER JOIN t2; + } +} {1 2 3 4 2 3 4 5} +do_test vtab6-1.11 { + execsql { + SELECT * FROM t1 INNER JOIN t2 USING(b,c); + } +} {1 2 3 4 2 3 4 5} +do_test vtab6-1.12 { + execsql { + SELECT * FROM t1 natural inner join t2; + } +} {1 2 3 4 2 3 4 5} + +ifcapable subquery { +breakpoint + do_test vtab6-1.13 { + execsql2 { + SELECT * FROM t1 NATURAL JOIN + (SELECT b as 'c', c as 'd', d as 'e' FROM t2) as t3 + } + } {a 1 b 2 c 3 d 4 e 5} + do_test vtab6-1.14 { + execsql2 { + SELECT * FROM (SELECT b as 'c', c as 'd', d as 'e' FROM t2) as 'tx' + NATURAL JOIN t1 + } + } {c 3 d 4 e 5 a 1 b 2} +} + +do_test vtab6-1.15 { + execsql { + INSERT INTO t3 VALUES(2,3,4); + INSERT INTO t3 VALUES(3,4,5); + INSERT INTO t3 VALUES(4,5,6); + SELECT * FROM t3; + } +} {2 3 4 3 4 5 4 5 6} +do_test vtab6-1.16 { + execsql { + SELECT * FROM t1 natural join t2 natural join t3; + } +} {1 2 3 4 5 2 3 4 5 6} +do_test vtab6-1.17 { + execsql2 { + SELECT * FROM t1 natural join t2 natural join t3; + } +} {a 1 b 2 c 3 d 4 e 5 a 2 b 3 c 4 d 5 e 6} +do_test vtab6-1.18 { + execsql { + INSERT INTO t4 VALUES(2,3,4); + INSERT INTO t4 VALUES(3,4,5); + INSERT INTO t4 VALUES(4,5,6); + SELECT * FROM t4; + } +} {2 3 4 3 4 5 4 5 6} +do_test vtab6-1.19.1 { + execsql { + SELECT * FROM t1 natural join t2 natural join t4; + } +} {1 2 3 4 5 6} +do_test vtab6-1.19.2 { + execsql2 { + SELECT * FROM t1 natural join t2 natural join t4; + } +} {a 1 b 2 c 3 d 4 e 5 f 6} +do_test vtab6-1.20 { + execsql { + SELECT * FROM t1 natural join t2 natural join t3 WHERE t1.a=1 + } +} {1 2 3 4 5} + +do_test vtab6-2.1 { + execsql { + SELECT * FROM t1 NATURAL LEFT JOIN t2; + } +} {1 2 3 4 2 3 4 5 3 4 5 {}} +do_test vtab6-2.2 { + execsql { + SELECT * FROM t2 NATURAL LEFT OUTER JOIN t1; + } +} {1 2 3 {} 2 3 4 1 3 4 5 2} +do_test vtab6-2.3 { + catchsql { + SELECT * FROM t1 NATURAL RIGHT OUTER JOIN t2; + } +} {1 {RIGHT and FULL OUTER JOINs are not currently supported}} +do_test vtab6-2.4 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.d + } +} {1 2 3 {} {} {} 2 3 4 {} {} {} 3 4 5 1 2 3} +do_test vtab6-2.5 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.d WHERE t1.a>1 + } +} {2 3 4 {} {} {} 3 4 5 1 2 3} +do_test vtab6-2.6 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.d WHERE t2.b IS NULL OR t2.b>1 + } +} {1 2 3 {} {} {} 2 3 4 {} {} {}} + +do_test vtab6-3.1 { + catchsql { + SELECT * FROM t1 NATURAL JOIN t2 ON t1.a=t2.b; + } +} {1 {a NATURAL join may not have an ON or USING clause}} +do_test vtab6-3.2 { + catchsql { + SELECT * FROM t1 NATURAL JOIN t2 USING(b); + } +} {1 {a NATURAL join may not have an ON or USING clause}} +do_test vtab6-3.3 { + catchsql { + SELECT * FROM t1 JOIN t2 ON t1.a=t2.b USING(b); + } +} {1 {cannot have both ON and USING clauses in the same join}} +do_test vtab6-3.4 { + catchsql { + SELECT * FROM t1 JOIN t2 USING(a); + } +} {1 {cannot join using column a - column not present in both tables}} +do_test vtab6-3.5 { + catchsql { + SELECT * FROM t1 USING(a); + } +} {0 {1 2 3 2 3 4 3 4 5}} +do_test vtab6-3.6 { + catchsql { + SELECT * FROM t1 JOIN t2 ON t3.a=t2.b; + } +} {1 {no such column: t3.a}} +do_test vtab6-3.7 { + catchsql { + SELECT * FROM t1 INNER OUTER JOIN t2; + } +} {1 {unknown or unsupported join type: INNER OUTER}} +do_test vtab6-3.7 { + catchsql { + SELECT * FROM t1 LEFT BOGUS JOIN t2; + } +} {1 {unknown or unsupported join type: LEFT BOGUS}} + +do_test vtab6-4.1 { + execsql { + BEGIN; + INSERT INTO t6 VALUES(NULL); + INSERT INTO t6 VALUES(NULL); + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + COMMIT; + } + execsql { + SELECT * FROM t6 NATURAL JOIN t5; + } +} {} +do_test vtab6-4.2 { + execsql { + SELECT * FROM t6, t5 WHERE t6.at5.a; + } +} {} +do_test vtab6-4.4 { + execsql { + UPDATE t6 SET a='xyz'; + SELECT * FROM t6 NATURAL JOIN t5; + } +} {} +do_test vtab6-4.6 { + execsql { + SELECT * FROM t6, t5 WHERE t6.at5.a; + } +} {} +do_test vtab6-4.8 { + execsql { + UPDATE t6 SET a=1; + SELECT * FROM t6 NATURAL JOIN t5; + } +} {} +do_test vtab6-4.9 { + execsql { + SELECT * FROM t6, t5 WHERE t6.at5.a; + } +} {} + +# A test for ticket #247. +# +do_test vtab6-7.1 { + execsql { + INSERT INTO t7 VALUES ("pa1", 1); + INSERT INTO t7 VALUES ("pa2", NULL); + INSERT INTO t7 VALUES ("pa3", NULL); + INSERT INTO t7 VALUES ("pa4", 2); + INSERT INTO t7 VALUES ("pa30", 131); + INSERT INTO t7 VALUES ("pa31", 130); + INSERT INTO t7 VALUES ("pa28", NULL); + + INSERT INTO t8 VALUES (1, "pa1"); + INSERT INTO t8 VALUES (2, "pa4"); + INSERT INTO t8 VALUES (3, NULL); + INSERT INTO t8 VALUES (4, NULL); + INSERT INTO t8 VALUES (130, "pa31"); + INSERT INTO t8 VALUES (131, "pa30"); + + SELECT coalesce(t8.a,999) from t7 LEFT JOIN t8 on y=a; + } +} {1 999 999 2 131 130 999} + +# Make sure a left join where the right table is really a view that +# is itself a join works right. Ticket #306. +# +ifcapable view { +do_test vtab6-8.1 { + execsql { + BEGIN; + INSERT INTO t9 VALUES(1,11); + INSERT INTO t9 VALUES(2,22); + INSERT INTO t10 VALUES(1,2); + INSERT INTO t10 VALUES(3,3); + INSERT INTO t11 VALUES(2,111); + INSERT INTO t11 VALUES(3,333); + CREATE VIEW v10_11 AS SELECT x, q FROM t10, t11 WHERE t10.y=t11.p; + COMMIT; + SELECT * FROM t9 LEFT JOIN v10_11 ON( a=x ); + } +} {1 11 1 111 2 22 {} {}} +ifcapable subquery { + do_test vtab6-8.2 { + execsql { + SELECT * FROM t9 LEFT JOIN (SELECT x, q FROM t10, t11 WHERE t10.y=t11.p) + ON( a=x); + } + } {1 11 1 111 2 22 {} {}} +} +do_test vtab6-8.3 { + execsql { + SELECT * FROM v10_11 LEFT JOIN t9 ON( a=x ); + } +} {1 111 1 11 3 333 {} {}} +} ;# ifcapable view + +# Ticket #350 describes a scenario where LEFT OUTER JOIN does not +# function correctly if the right table in the join is really +# subquery. +# +# To test the problem, we generate the same LEFT OUTER JOIN in two +# separate selects but with on using a subquery and the other calling +# the table directly. Then connect the two SELECTs using an EXCEPT. +# Both queries should generate the same results so the answer should +# be an empty set. +# +ifcapable compound { +do_test vtab6-9.1 { + execsql { + BEGIN; + INSERT INTO t12 VALUES(1,11); + INSERT INTO t12 VALUES(2,22); + INSERT INTO t13 VALUES(22,222); + COMMIT; + } +} {} + +ifcapable subquery { + do_test vtab6-9.1.1 { + execsql { + SELECT * FROM t12 NATURAL LEFT JOIN t13 + EXCEPT + SELECT * FROM t12 NATURAL LEFT JOIN (SELECT * FROM t13 WHERE b>0); + } + } {} +} +ifcapable view { + do_test vtab6-9.2 { + execsql { + CREATE VIEW v13 AS SELECT * FROM t13 WHERE b>0; + SELECT * FROM t12 NATURAL LEFT JOIN t13 + EXCEPT + SELECT * FROM t12 NATURAL LEFT JOIN v13; + } + } {} +} ;# ifcapable view +} ;# ifcapable compound + +ifcapable subquery { +do_test vtab6-10.1 { + execsql { + CREATE INDEX i22 ON real_t22(q); + SELECT a FROM t21 LEFT JOIN t22 ON b=p WHERE q= + (SELECT max(m.q) FROM t22 m JOIN t21 n ON n.b=m.p WHERE n.c=1); + } +} {} +} ;# ifcapable subquery + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab7.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab7.test new file mode 100644 index 0000000..5aeb66c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab7.test @@ -0,0 +1,199 @@ +# 2006 July 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus +# of this test is reading and writing to the database from within a +# virtual table xSync() callback. +# +# $Id: vtab7.test,v 1.2 2006/07/26 16:22:16 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +# Register the echo module. Code inside the echo module appends elements +# to the global tcl list variable ::echo_module whenever SQLite invokes +# certain module callbacks. This includes the xSync(), xCommit() and +# xRollback() callbacks. For each of these callback, two elements are +# appended to ::echo_module, as follows: +# +# Module method Elements appended to ::echo_module +# ------------------------------------------------------- +# xSync() xSync echo($tablename) +# xCommit() xCommit echo($tablename) +# xRollback() xRollback echo($tablename) +# ------------------------------------------------------- +# +# In each case, $tablename is replaced by the name of the real table (not +# the echo table). By setting up a tcl trace on the ::echo_module variable, +# code in this file arranges for a Tcl script to be executed from within +# the echo module xSync() callback. +# +register_echo_module [sqlite3_connection_pointer db] +trace add variable ::echo_module write echo_module_trace + +# This Tcl proc is invoked whenever the ::echo_module variable is written. +# +proc echo_module_trace {args} { + # Filter out writes to ::echo_module that are not xSync, xCommit or + # xRollback callbacks. + if {[llength $::echo_module] < 2} return + set x [lindex $::echo_module end-1] + if {$x ne "xSync" && $x ne "xCommit" && $x ne "xRollback"} return + + regexp {^echo.(.*).$} [lindex $::echo_module end] dummy tablename + # puts "Ladies and gentlemen, an $x on $tablename!" + + if {[info exists ::callbacks($x,$tablename)]} { + eval $::callbacks($x,$tablename) + } +} + +# The following tests, vtab7-1.*, test that the trace callback on +# ::echo_module is providing the expected tcl callbacks. +do_test vtab7-1.1 { + execsql { + CREATE TABLE abc(a, b, c); + CREATE VIRTUAL TABLE abc2 USING echo(abc); + } +} {} + +do_test vtab7-1.2 { + set ::callbacks(xSync,abc) {incr ::counter} + set ::counter 0 + execsql { + INSERT INTO abc2 VALUES(1, 2, 3); + } + set ::counter +} {1} + +# Write to an existing database table from within an xSync callback. +do_test vtab7-2.1 { + set ::callbacks(xSync,abc) { + execsql {INSERT INTO log VALUES('xSync');} + } + execsql { + CREATE TABLE log(msg); + INSERT INTO abc2 VALUES(4, 5, 6); + SELECT * FROM log; + } +} {xSync} +do_test vtab7-2.3 { + execsql { + INSERT INTO abc2 VALUES(4, 5, 6); + SELECT * FROM log; + } +} {xSync xSync} +do_test vtab7-2.4 { + execsql { + INSERT INTO abc2 VALUES(4, 5, 6); + SELECT * FROM log; + } +} {xSync xSync xSync} + +# Create a database table from within xSync callback. +do_test vtab7-2.5 { + set ::callbacks(xSync,abc) { + execsql { CREATE TABLE newtab(d, e, f); } + } + execsql { + INSERT INTO abc2 VALUES(1, 2, 3); + SELECT name FROM sqlite_master ORDER BY name; + } +} {abc abc2 log newtab} + +# Drop a database table from within xSync callback. +do_test vtab7-2.6 { + set ::callbacks(xSync,abc) { + execsql { DROP TABLE newtab } + } + execsql { + INSERT INTO abc2 VALUES(1, 2, 3); + SELECT name FROM sqlite_master ORDER BY name; + } +} {abc abc2 log} + +# Write to an attached database from xSync(). +do_test vtab7-3.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' AS db2; + CREATE TABLE db2.stuff(description, shape, color); + } + set ::callbacks(xSync,abc) { + execsql { INSERT INTO db2.stuff VALUES('abc', 'square', 'green'); } + } + execsql { + INSERT INTO abc2 VALUES(1, 2, 3); + SELECT * from stuff; + } +} {abc square green} + +# UPDATE: The next test passes, but leaks memory. So leave it out. +# +# The following tests test that writing to the database from within +# the xCommit callback causes a misuse error. +# do_test vtab7-4.1 { +# unset -nocomplain ::callbacks(xSync,abc) +# set ::callbacks(xCommit,abc) { +# execsql { INSERT INTO log VALUES('hello') } +# } +# catchsql { +# INSERT INTO abc2 VALUES(1, 2, 3); +# } +# } {1 {library routine called out of sequence}} + +# These tests, vtab7-4.*, test that an SQLITE_LOCKED error is returned +# if an attempt to write to a virtual module table or create a new +# virtual table from within an xSync() callback. +do_test vtab7-4.1 { + execsql { + CREATE TABLE def(d, e, f); + CREATE VIRTUAL TABLE def2 USING echo(def); + } + set ::callbacks(xSync,abc) { + set ::error [catchsql { INSERT INTO def2 VALUES(1, 2, 3) }] + } + execsql { + INSERT INTO abc2 VALUES(1, 2, 3); + } + set ::error +} {1 {database table is locked}} +do_test vtab7-4.2 { + set ::callbacks(xSync,abc) { + set ::error [catchsql { CREATE VIRTUAL TABLE def3 USING echo(def) }] + } + execsql { + INSERT INTO abc2 VALUES(1, 2, 3); + } + set ::error +} {1 {database table is locked}} + +do_test vtab7-4.3 { + set ::callbacks(xSync,abc) { + set ::error [catchsql { DROP TABLE def2 }] + } + execsql { + INSERT INTO abc2 VALUES(1, 2, 3); + SELECT name FROM sqlite_master ORDER BY name; + } + set ::error +} {1 {database table is locked}} + +trace remove variable ::echo_module write echo_module_trace +unset -nocomplain ::callbacks + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab8.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab8.test new file mode 100644 index 0000000..8e04a41 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab8.test @@ -0,0 +1,78 @@ +# 2006 August 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file inserting into virtual tables from a SELECT +# statement. +# +# $Id: vtab8.test,v 1.2 2007/03/02 08:12:23 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +register_echo_module [sqlite3_connection_pointer db] + +# See ticket #2244 +# +do_test vtab1.2244-1 { + execsql { + CREATE TABLE t2244(a, b); + CREATE VIRTUAL TABLE t2244e USING echo(t2244); + INSERT INTO t2244 VALUES('AA', 'BB'); + INSERT INTO t2244 VALUES('CC', 'DD'); + SELECT rowid, * FROM t2244e; + } +} {1 AA BB 2 CC DD} +do_test vtab1.2244-2 { + execsql { + SELECT * FROM t2244e WHERE rowid = 10; + } +} {} +do_test vtab1.2244-3 { + execsql { + UPDATE t2244e SET a = 'hello world' WHERE 0; + SELECT rowid, * FROM t2244e; + } +} {1 AA BB 2 CC DD} + +do_test vtab1-2250-2 { + execsql { + CREATE TABLE t2250(a, b); + INSERT INTO t2250 VALUES(10, 20); + CREATE VIRTUAL TABLE t2250e USING echo(t2250); + select max(rowid) from t2250; + select max(rowid) from t2250e; + } +} {1 1} + +# See ticket #2260. +# +do_test vtab1.2260-1 { + execsql { + CREATE TABLE t2260a_real(a, b); + CREATE TABLE t2260b_real(a, b); + + CREATE INDEX i2260 ON t2260a_real(a); + CREATE INDEX i2260x ON t2260b_real(a); + + CREATE VIRTUAL TABLE t2260a USING echo(t2260a_real); + CREATE VIRTUAL TABLE t2260b USING echo(t2260b_real); + + SELECT * FROM t2260a, t2260b WHERE t2260a.a = t2260b.a AND t2260a.a > 101; + } +} {} + +unset -nocomplain echo_module_begin_fail +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab9.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab9.test new file mode 100644 index 0000000..b1290eb --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab9.test @@ -0,0 +1,70 @@ +# 2006 August 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file inserting into virtual tables from a SELECT +# statement. +# +# $Id: vtab9.test,v 1.2 2007/04/16 15:06:26 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +do_test vtab9-1.1 { + register_echo_module [sqlite3_connection_pointer db] + execsql { + CREATE TABLE t0(a); + CREATE VIRTUAL TABLE t1 USING echo(t0); + INSERT INTO t1 SELECT 'hello'; + SELECT rowid, * FROM t1; + } +} {1 hello} + +do_test vtab9-1.2 { + execsql { + CREATE TABLE t2(a,b,c); + CREATE VIRTUAL TABLE t3 USING echo(t2); + CREATE TABLE d1(a,b,c); + INSERT INTO d1 VALUES(1,2,3); + INSERT INTO d1 VALUES('a','b','c'); + INSERT INTO d1 VALUES(NULL,'x',123.456); + INSERT INTO d1 VALUES(x'6869',123456789,-12345); + INSERT INTO t3(a,b,c) SELECT * FROM d1; + SELECT rowid, * FROM t3; + } +} {1 1 2 3 2 a b c 3 {} x 123.456 4 hi 123456789 -12345} + +# do_test vtab9-2.1 { +# execsql { +# CREATE TABLE t4(a); +# CREATE VIRTUAL TABLE t5 USING echo(t4); +# INSERT INTO t4 VALUES('hello'); +# SELECT rowid, a FROM t5; +# } +# } {1 hello} +# do_test vtab9-2.2 { +# execsql { +# INSERT INTO t5(rowid, a) VALUES(1, 'goodbye'); +# } +# } {1 hello} +# do_test vtab9-2.3 { +# execsql { +# REPLACE INTO t5(rowid, a) VALUES(1, 'goodbye'); +# SELECT * FROM t5; +# } +# } {1 goodbye} + +unset -nocomplain echo_module_begin_fail +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtabA.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtabA.test new file mode 100644 index 0000000..bd9e551 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtabA.test @@ -0,0 +1,135 @@ +# 2007 June 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is 'hidden' virtual table columns. +# +# $Id: vtabA.test,v 1.1 2007/06/26 10:38:54 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +proc get_decltype {table col} { + set STMT [sqlite3_prepare $::DB "SELECT $col FROM $table" -1 TAIL] + set decltype [sqlite3_column_decltype $STMT 0] + sqlite3_finalize $STMT + set decltype +} + +proc get_collist {table} { + set ret [list] + db eval "PRAGMA table_info($table)" { lappend ret $name } + set ret +} + +# Register the echo module +register_echo_module [sqlite3_connection_pointer db] + +# Create a virtual table with a 'hidden' column (column b). +# +do_test vtabA-1.1 { + execsql { CREATE TABLE t1(a, b HIDDEN VARCHAR, c INTEGER) } +} {} +do_test vtabA-1.2 { + execsql { CREATE VIRTUAL TABLE t1e USING echo(t1) } +} {} + +# Test that the hidden column is not listed by [PRAGMA table_info]. +# +do_test vtabA-1.3 { + execsql { PRAGMA table_info(t1e) } +} [list \ + 0 a {} 0 {} 0 \ + 1 c INTEGER 0 {} 0 \ +] + +# Test that the hidden column is not require in the default column +# list for an INSERT statement. +# +do_test vtabA-1.4 { + catchsql { + INSERT INTO t1e VALUES('value a', 'value c'); + } +} {0 {}} +do_test vtabA-1.5 { + execsql { + SELECT a, b, c FROM t1e; + } +} {{value a} {} {value c}} + +do_test vtabA-1.6 { + execsql { + SELECT * FROM t1e; + } +} {{value a} {value c}} + +# Test that the expansion of a '*' expression in the result set of +# a SELECT does not include the hidden column. +# +do_test vtabA-1.7 { + execsql { + INSERT INTO t1e SELECT * FROM t1e; + } +} {} +do_test vtabA-1.8 { + execsql { + SELECT * FROM t1e; + } +} {{value a} {value c} {value a} {value c}} + +# Test that the declaration type of the hidden column does not include +# the token "HIDDEN". +# +do_test vtabA-1.9 { + get_decltype t1e b +} {VARCHAR} +do_test vtabA-1.10 { + get_collist t1e +} {a c} + +#---------------------------------------------------------------------- +# These tests vtabA-2.* concentrate on testing that the HIDDEN token +# is detected and handled correctly in various declarations. +# +proc analyse_parse {columns decltype_list} { + db eval { DROP TABLE IF EXISTS t1e; } + db eval { DROP TABLE IF EXISTS t1; } + db eval " CREATE TABLE t1 $columns " + db eval { CREATE VIRTUAL TABLE t1e USING echo(t1) } + set ret [list [get_collist t1e]] + foreach c $decltype_list { + lappend ret [get_decltype t1e $c] + } + set ret +} + +do_test vtabA-2.1 { + analyse_parse {(a text, b integer hidden, c hidden)} {a b c} +} {a text integer {}} + +do_test vtabA-2.2 { + analyse_parse {(a hidden , b integerhidden, c hidden1)} {a b c} +} {{b c} {} integerhidden hidden1} + +do_test vtabA-2.3 { + analyse_parse {(a HiDden, b HIDDEN, c hidden)} {a b c} +} {{} {} {} {}} + +do_test vtabA-2.4 { + analyse_parse {(a whatelse can i hidden test, b HIDDEN hidden)} {a b} +} {{} {whatelse can i test} hidden} + +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab_alter.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab_alter.test new file mode 100644 index 0000000..a67660d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab_alter.test @@ -0,0 +1,103 @@ +# 2007 June 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the ALTER TABLE ... RENAME TO +# command on virtual tables. +# +# $Id: vtab_alter.test,v 1.2 2007/09/01 18:24:55 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +# Register the echo module. +# +# This test uses a special feature of the echo module. If the name +# of the virtual table is a prefix of the name of the underlying +# real table (for example if the v-table is "tbl" and the real table +# is "tbl_base"), then the name of the real table is modified +# when an "ALTER TABLE ... RENAME TO" is executed on the v-table. +# For example: +# +# sqlite> CREATE TABLE t1_base(a, b, c); +# sqlite> CREATE VIRTUAL TABLE t1 USING(t1_base); +# sqlite> ALTER TABLE t1 RENAME TO t2; +# sqlite> SELECT tbl_name FROM sqlite_master; +# t2_base +# t2 +# +register_echo_module [sqlite3_connection_pointer db] + + +# Try to rename an echo table. Make sure nothing terrible happens. +# +do_test vtab_alter-1.1 { + execsql { CREATE TABLE t1(a, b VARCHAR, c INTEGER) } +} {} +do_test vtab_alter-1.2 { + execsql { CREATE VIRTUAL TABLE t1echo USING echo(t1) } +} {} +do_test vtab_alter-1.3 { + catchsql { SELECT * FROM t1echo } +} {0 {}} +do_test vtab_alter-1.4 { + execsql { ALTER TABLE t1echo RENAME TO new } +} {} +do_test vtab_alter-1.5 { + catchsql { SELECT * FROM t1echo } +} {1 {no such table: t1echo}} +do_test vtab_alter-1.6 { + catchsql { SELECT * FROM new } +} {0 {}} + +# Try to rename an echo table that renames it's base table. Make +# sure nothing terrible happens. +# +do_test vtab_alter-2.1 { + execsql { + DROP TABLE new; + DROP TABLE t1; + CREATE TABLE t1_base(a, b, c); + CREATE VIRTUAL TABLE t1 USING echo('*_base'); + } +} {} +do_test vtab_alter-2.2 { + execsql { + INSERT INTO t1_base VALUES(1, 2, 3); + SELECT * FROM t1; + } +} {1 2 3} +do_test vtab_alter-2.3 { + execsql { ALTER TABLE t1 RENAME TO x } +} {} +do_test vtab_alter-2.4 { + execsql { SELECT * FROM x; } +} {1 2 3} +do_test vtab_alter-2.5 { + execsql { SELECT * FROM x_base; } +} {1 2 3} + +# Cause an error to occur when the echo module renames it's +# backing store table. +# +do_test vtab_alter-3.1 { + execsql { CREATE TABLE y_base(a, b, c) } + catchsql { ALTER TABLE x RENAME TO y } +} {1 {SQL logic error or missing database}} +do_test vtab_alter-3.2 { + execsql { SELECT * FROM x } +} {1 2 3} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab_err.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab_err.test new file mode 100644 index 0000000..068386e --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab_err.test @@ -0,0 +1,71 @@ +# 2006 June 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: vtab_err.test,v 1.8 2007/09/03 16:12:10 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + + + +unset -nocomplain echo_module_begin_fail +do_ioerr_test vtab_err-1 -tclprep { + register_echo_module [sqlite3_connection_pointer db] +} -sqlbody { + BEGIN; + CREATE TABLE r(a PRIMARY KEY, b, c); + CREATE VIRTUAL TABLE e USING echo(r); + INSERT INTO e VALUES(1, 2, 3); + INSERT INTO e VALUES('a', 'b', 'c'); + UPDATE e SET c = 10; + DELETE FROM e WHERE a = 'a'; + COMMIT; + BEGIN; + CREATE TABLE r2(a, b, c); + INSERT INTO r2 SELECT * FROM e; + INSERT INTO e SELECT a||'x', b, c FROM r2; + COMMIT; +} + +ifcapable !memdebug { + puts "Skipping vtab_err-2 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} +source $testdir/malloc_common.tcl + + +do_malloc_test vtab_err-2 -tclprep { + register_echo_module [sqlite3_connection_pointer db] +} -sqlbody { + BEGIN; + CREATE TABLE r(a PRIMARY KEY, b, c); + CREATE VIRTUAL TABLE e USING echo(r); + INSERT INTO e VALUES(1, 2, 3); + INSERT INTO e VALUES('a', 'b', 'c'); + UPDATE e SET c = 10; + DELETE FROM e WHERE a = 'a'; + COMMIT; + BEGIN; + CREATE TABLE r2(a, b, c); + INSERT INTO r2 SELECT * FROM e; + INSERT INTO e SELECT a||'x', b, c FROM r2; + COMMIT; +} + +sqlite3_memdebug_fail -1 + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vtab_shared.test b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab_shared.test new file mode 100644 index 0000000..04ba1ec --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vtab_shared.test @@ -0,0 +1,62 @@ +# 2007 April 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file tests interactions between the virtual table and +# shared-schema functionality. +# +# $Id: vtab_shared.test,v 1.1 2007/04/16 15:49:42 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab||!shared_cache { + finish_test + return +} + +db close +sqlite3_enable_shared_cache 1 +sqlite3 db test.db + +do_test vtab_shared-1.0 { + register_echo_module [sqlite3_connection_pointer db] + catchsql { + CREATE TABLE t0(a, b, c); + CREATE VIRTUAL TABLE t1 USING echo(t0); + } +} {1 {Cannot use virtual tables in shared-cache mode}} + +db close +sqlite3_enable_shared_cache 0 +sqlite3 db test.db + +do_test vtab_shared-1.1 { + register_echo_module [sqlite3_connection_pointer db] + catchsql { + CREATE VIRTUAL TABLE t1 USING echo(t0); + } +} {0 {}} + +db close +sqlite3_enable_shared_cache 1 +sqlite3 db test.db + +do_test vtab_shared-1.2 { + register_echo_module [sqlite3_connection_pointer db] + catchsql { + SELECT * FROM t1; + } +} [list 1 \ + {malformed database schema - Cannot use virtual tables in shared-cache mode}] + +db close +sqlite3_enable_shared_cache 0 +finish_test + diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/vx.txt b/libraries/sqlite/unix/sqlite-3.5.1/test/vx.txt new file mode 100644 index 0000000..095ba47 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/vx.txt @@ -0,0 +1,43 @@ +# 2006 June 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: vtab_err.test,v 1.8 2007/09/03 16:12:10 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +source $testdir/malloc_common.tcl + + +do_malloc_test vtab_err-2 -start 374 -tclprep { + register_echo_module [sqlite3_connection_pointer db] +} -sqlbody { + BEGIN; + CREATE TABLE r(a PRIMARY KEY, b, c); + CREATE VIRTUAL TABLE e USING echo(r); + INSERT INTO e VALUES(1, 2, 3); + INSERT INTO e VALUES('a', 'b', 'c'); + UPDATE e SET c = 10; + DELETE FROM e WHERE a = 'a'; + COMMIT; + BEGIN; + CREATE TABLE r2(a, b, c); + INSERT INTO r2 SELECT * FROM e; + INSERT INTO e SELECT a||'x', b, c FROM r2; + COMMIT; +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/where.test b/libraries/sqlite/unix/sqlite-3.5.1/test/where.test new file mode 100644 index 0000000..b69e006 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/where.test @@ -0,0 +1,1156 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the use of indices in WHERE clases. +# +# $Id: where.test,v 1.43 2007/06/25 16:29:34 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Build some test data +# +do_test where-1.0 { + execsql { + CREATE TABLE t1(w int, x int, y int); + CREATE TABLE t2(p int, q int, r int, s int); + } + for {set i 1} {$i<=100} {incr i} { + set w $i + set x [expr {int(log($i)/log(2))}] + set y [expr {$i*$i + 2*$i + 1}] + execsql "INSERT INTO t1 VALUES($w,$x,$y)" + } + + ifcapable subquery { + execsql { + INSERT INTO t2 SELECT 101-w, x, (SELECT max(y) FROM t1)+1-y, y FROM t1; + } + } else { + set maxy [execsql {select max(y) from t1}] + execsql " + INSERT INTO t2 SELECT 101-w, x, $maxy+1-y, y FROM t1; + " + } + + execsql { + CREATE INDEX i1w ON t1(w); + CREATE INDEX i1xy ON t1(x,y); + CREATE INDEX i2p ON t2(p); + CREATE INDEX i2r ON t2(r); + CREATE INDEX i2qs ON t2(q, s); + } +} {} + +# Do an SQL statement. Append the search count to the end of the result. +# +proc count sql { + set ::sqlite_search_count 0 + return [concat [execsql $sql] $::sqlite_search_count] +} + +# Verify that queries use an index. We are using the special variable +# "sqlite_search_count" which tallys the number of executions of MoveTo +# and Next operators in the VDBE. By verifing that the search count is +# small we can be assured that indices are being used properly. +# +do_test where-1.1.1 { + count {SELECT x, y, w FROM t1 WHERE w=10} +} {3 121 10 3} +do_test where-1.1.2 { + set sqlite_query_plan +} {t1 i1w} +do_test where-1.1.3 { + count {SELECT x, y, w AS abc FROM t1 WHERE abc=10} +} {3 121 10 3} +do_test where-1.1.4 { + set sqlite_query_plan +} {t1 i1w} +do_test where-1.2.1 { + count {SELECT x, y, w FROM t1 WHERE w=11} +} {3 144 11 3} +do_test where-1.2.2 { + count {SELECT x, y, w AS abc FROM t1 WHERE abc=11} +} {3 144 11 3} +do_test where-1.3.1 { + count {SELECT x, y, w AS abc FROM t1 WHERE 11=w} +} {3 144 11 3} +do_test where-1.3.2 { + count {SELECT x, y, w AS abc FROM t1 WHERE 11=abc} +} {3 144 11 3} +do_test where-1.4.1 { + count {SELECT w, x, y FROM t1 WHERE 11=w AND x>2} +} {11 3 144 3} +do_test where-1.4.2 { + set sqlite_query_plan +} {t1 i1w} +do_test where-1.4.3 { + count {SELECT w AS a, x AS b, y FROM t1 WHERE 11=a AND b>2} +} {11 3 144 3} +do_test where-1.4.4 { + set sqlite_query_plan +} {t1 i1w} +do_test where-1.5 { + count {SELECT x, y FROM t1 WHERE y<200 AND w=11 AND x>2} +} {3 144 3} +do_test where-1.5.2 { + set sqlite_query_plan +} {t1 i1w} +do_test where-1.6 { + count {SELECT x, y FROM t1 WHERE y<200 AND x>2 AND w=11} +} {3 144 3} +do_test where-1.7 { + count {SELECT x, y FROM t1 WHERE w=11 AND y<200 AND x>2} +} {3 144 3} +do_test where-1.8 { + count {SELECT x, y FROM t1 WHERE w>10 AND y=144 AND x=3} +} {3 144 3} +do_test where-1.8.2 { + set sqlite_query_plan +} {t1 i1xy} +do_test where-1.8.3 { + count {SELECT x, y FROM t1 WHERE y=144 AND x=3} + set sqlite_query_plan +} {{} i1xy} +do_test where-1.9 { + count {SELECT x, y FROM t1 WHERE y=144 AND w>10 AND x=3} +} {3 144 3} +do_test where-1.10 { + count {SELECT x, y FROM t1 WHERE x=3 AND w>=10 AND y=121} +} {3 121 3} +do_test where-1.11 { + count {SELECT x, y FROM t1 WHERE x=3 AND y=100 AND w<10} +} {3 100 3} + +# New for SQLite version 2.1: Verify that that inequality constraints +# are used correctly. +# +do_test where-1.12 { + count {SELECT w FROM t1 WHERE x=3 AND y<100} +} {8 3} +do_test where-1.13 { + count {SELECT w FROM t1 WHERE x=3 AND 100>y} +} {8 3} +do_test where-1.14 { + count {SELECT w FROM t1 WHERE 3=x AND y<100} +} {8 3} +do_test where-1.15 { + count {SELECT w FROM t1 WHERE 3=x AND 100>y} +} {8 3} +do_test where-1.16 { + count {SELECT w FROM t1 WHERE x=3 AND y<=100} +} {8 9 5} +do_test where-1.17 { + count {SELECT w FROM t1 WHERE x=3 AND 100>=y} +} {8 9 5} +do_test where-1.18 { + count {SELECT w FROM t1 WHERE x=3 AND y>225} +} {15 3} +do_test where-1.19 { + count {SELECT w FROM t1 WHERE x=3 AND 225=225} +} {14 15 5} +do_test where-1.21 { + count {SELECT w FROM t1 WHERE x=3 AND 225<=y} +} {14 15 5} +do_test where-1.22 { + count {SELECT w FROM t1 WHERE x=3 AND y>121 AND y<196} +} {11 12 5} +do_test where-1.23 { + count {SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<=196} +} {10 11 12 13 9} +do_test where-1.24 { + count {SELECT w FROM t1 WHERE x=3 AND 121y} +} {11 12 5} +do_test where-1.25 { + count {SELECT w FROM t1 WHERE x=3 AND 121<=y AND 196>=y} +} {10 11 12 13 9} + +# Need to work on optimizing the BETWEEN operator. +# +# do_test where-1.26 { +# count {SELECT w FROM t1 WHERE x=3 AND y BETWEEN 121 AND 196} +# } {10 11 12 13 9} + +do_test where-1.27 { + count {SELECT w FROM t1 WHERE x=3 AND y+1==122} +} {10 17} + +do_test where-1.28 { + count {SELECT w FROM t1 WHERE x+1=4 AND y+1==122} +} {10 99} +do_test where-1.29 { + count {SELECT w FROM t1 WHERE y==121} +} {10 99} + + +do_test where-1.30 { + count {SELECT w FROM t1 WHERE w>97} +} {98 99 100 3} +do_test where-1.31 { + count {SELECT w FROM t1 WHERE w>=97} +} {97 98 99 100 4} +do_test where-1.33 { + count {SELECT w FROM t1 WHERE w==97} +} {97 2} +do_test where-1.33.1 { + count {SELECT w FROM t1 WHERE w<=97 AND w==97} +} {97 2} +do_test where-1.33.2 { + count {SELECT w FROM t1 WHERE w<98 AND w==97} +} {97 2} +do_test where-1.33.3 { + count {SELECT w FROM t1 WHERE w>=97 AND w==97} +} {97 2} +do_test where-1.33.4 { + count {SELECT w FROM t1 WHERE w>96 AND w==97} +} {97 2} +do_test where-1.33.5 { + count {SELECT w FROM t1 WHERE w==97 AND w==97} +} {97 2} +do_test where-1.34 { + count {SELECT w FROM t1 WHERE w+1==98} +} {97 99} +do_test where-1.35 { + count {SELECT w FROM t1 WHERE w<3} +} {1 2 2} +do_test where-1.36 { + count {SELECT w FROM t1 WHERE w<=3} +} {1 2 3 3} +do_test where-1.37 { + count {SELECT w FROM t1 WHERE w+1<=4 ORDER BY w} +} {1 2 3 99} + +do_test where-1.38 { + count {SELECT (w) FROM t1 WHERE (w)>(97)} +} {98 99 100 3} +do_test where-1.39 { + count {SELECT (w) FROM t1 WHERE (w)>=(97)} +} {97 98 99 100 4} +do_test where-1.40 { + count {SELECT (w) FROM t1 WHERE (w)==(97)} +} {97 2} +do_test where-1.41 { + count {SELECT (w) FROM t1 WHERE ((w)+(1))==(98)} +} {97 99} + + +# Do the same kind of thing except use a join as the data source. +# +do_test where-2.1 { + count { + SELECT w, p FROM t2, t1 + WHERE x=q AND y=s AND r=8977 + } +} {34 67 6} +do_test where-2.2 { + count { + SELECT w, p FROM t2, t1 + WHERE x=q AND s=y AND r=8977 + } +} {34 67 6} +do_test where-2.3 { + count { + SELECT w, p FROM t2, t1 + WHERE x=q AND s=y AND r=8977 AND w>10 + } +} {34 67 6} +do_test where-2.4 { + count { + SELECT w, p FROM t2, t1 + WHERE p<80 AND x=q AND s=y AND r=8977 AND w>10 + } +} {34 67 6} +do_test where-2.5 { + count { + SELECT w, p FROM t2, t1 + WHERE p<80 AND x=q AND 8977=r AND s=y AND w>10 + } +} {34 67 6} +do_test where-2.6 { + count { + SELECT w, p FROM t2, t1 + WHERE x=q AND p=77 AND s=y AND w>5 + } +} {24 77 6} +do_test where-2.7 { + count { + SELECT w, p FROM t1, t2 + WHERE x=q AND p>77 AND s=y AND w=5 + } +} {5 96 6} + +# Lets do a 3-way join. +# +do_test where-3.1 { + count { + SELECT A.w, B.p, C.w FROM t1 as A, t2 as B, t1 as C + WHERE C.w=101-B.p AND B.r=10202-A.y AND A.w=11 + } +} {11 90 11 8} +do_test where-3.2 { + count { + SELECT A.w, B.p, C.w FROM t1 as A, t2 as B, t1 as C + WHERE C.w=101-B.p AND B.r=10202-A.y AND A.w=12 + } +} {12 89 12 8} +do_test where-3.3 { + count { + SELECT A.w, B.p, C.w FROM t1 as A, t2 as B, t1 as C + WHERE A.w=15 AND B.p=C.w AND B.r=10202-A.y + } +} {15 86 86 8} + +# Test to see that the special case of a constant WHERE clause is +# handled. +# +do_test where-4.1 { + count { + SELECT * FROM t1 WHERE 0 + } +} {0} +do_test where-4.2 { + count { + SELECT * FROM t1 WHERE 1 LIMIT 1 + } +} {1 0 4 0} +do_test where-4.3 { + execsql { + SELECT 99 WHERE 0 + } +} {} +do_test where-4.4 { + execsql { + SELECT 99 WHERE 1 + } +} {99} +do_test where-4.5 { + execsql { + SELECT 99 WHERE 0.1 + } +} {99} +do_test where-4.6 { + execsql { + SELECT 99 WHERE 0.0 + } +} {} + +# Verify that IN operators in a WHERE clause are handled correctly. +# Omit these tests if the build is not capable of sub-queries. +# +ifcapable subquery { + do_test where-5.1 { + count { + SELECT * FROM t1 WHERE rowid IN (1,2,3,1234) order by 1; + } + } {1 0 4 2 1 9 3 1 16 4} + do_test where-5.2 { + count { + SELECT * FROM t1 WHERE rowid+0 IN (1,2,3,1234) order by 1; + } + } {1 0 4 2 1 9 3 1 16 199} + do_test where-5.3 { + count { + SELECT * FROM t1 WHERE w IN (-1,1,2,3) order by 1; + } + } {1 0 4 2 1 9 3 1 16 14} + do_test where-5.4 { + count { + SELECT * FROM t1 WHERE w+0 IN (-1,1,2,3) order by 1; + } + } {1 0 4 2 1 9 3 1 16 199} + do_test where-5.5 { + count { + SELECT * FROM t1 WHERE rowid IN + (select rowid from t1 where rowid IN (-1,2,4)) + ORDER BY 1; + } + } {2 1 9 4 2 25 3} + do_test where-5.6 { + count { + SELECT * FROM t1 WHERE rowid+0 IN + (select rowid from t1 where rowid IN (-1,2,4)) + ORDER BY 1; + } + } {2 1 9 4 2 25 201} + do_test where-5.7 { + count { + SELECT * FROM t1 WHERE w IN + (select rowid from t1 where rowid IN (-1,2,4)) + ORDER BY 1; + } + } {2 1 9 4 2 25 9} + do_test where-5.8 { + count { + SELECT * FROM t1 WHERE w+0 IN + (select rowid from t1 where rowid IN (-1,2,4)) + ORDER BY 1; + } + } {2 1 9 4 2 25 201} + do_test where-5.9 { + count { + SELECT * FROM t1 WHERE x IN (1,7) ORDER BY 1; + } + } {2 1 9 3 1 16 7} + do_test where-5.10 { + count { + SELECT * FROM t1 WHERE x+0 IN (1,7) ORDER BY 1; + } + } {2 1 9 3 1 16 199} + do_test where-5.11 { + count { + SELECT * FROM t1 WHERE y IN (6400,8100) ORDER BY 1; + } + } {79 6 6400 89 6 8100 199} + do_test where-5.12 { + count { + SELECT * FROM t1 WHERE x=6 AND y IN (6400,8100) ORDER BY 1; + } + } {79 6 6400 89 6 8100 7} + do_test where-5.13 { + count { + SELECT * FROM t1 WHERE x IN (1,7) AND y NOT IN (6400,8100) ORDER BY 1; + } + } {2 1 9 3 1 16 7} + do_test where-5.14 { + count { + SELECT * FROM t1 WHERE x IN (1,7) AND y IN (9,10) ORDER BY 1; + } + } {2 1 9 8} + do_test where-5.15 { + count { + SELECT * FROM t1 WHERE x IN (1,7) AND y IN (9,16) ORDER BY 1; + } + } {2 1 9 3 1 16 11} +} + +# This procedure executes the SQL. Then it checks to see if the OP_Sort +# opcode was executed. If an OP_Sort did occur, then "sort" is appended +# to the result. If no OP_Sort happened, then "nosort" is appended. +# +# This procedure is used to check to make sure sorting is or is not +# occurring as expected. +# +proc cksort {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return $data +} +# Check out the logic that attempts to implement the ORDER BY clause +# using an index rather than by sorting. +# +do_test where-6.1 { + execsql { + CREATE TABLE t3(a,b,c); + CREATE INDEX t3a ON t3(a); + CREATE INDEX t3bc ON t3(b,c); + CREATE INDEX t3acb ON t3(a,c,b); + INSERT INTO t3 SELECT w, 101-w, y FROM t1; + SELECT count(*), sum(a), sum(b), sum(c) FROM t3; + } +} {100 5050 5050 348550} +do_test where-6.2 { + cksort { + SELECT * FROM t3 ORDER BY a LIMIT 3 + } +} {1 100 4 2 99 9 3 98 16 nosort} +do_test where-6.3 { + cksort { + SELECT * FROM t3 ORDER BY a+1 LIMIT 3 + } +} {1 100 4 2 99 9 3 98 16 sort} +do_test where-6.4 { + cksort { + SELECT * FROM t3 WHERE a<10 ORDER BY a LIMIT 3 + } +} {1 100 4 2 99 9 3 98 16 nosort} +do_test where-6.5 { + cksort { + SELECT * FROM t3 WHERE a>0 AND a<10 ORDER BY a LIMIT 3 + } +} {1 100 4 2 99 9 3 98 16 nosort} +do_test where-6.6 { + cksort { + SELECT * FROM t3 WHERE a>0 ORDER BY a LIMIT 3 + } +} {1 100 4 2 99 9 3 98 16 nosort} +do_test where-6.7 { + cksort { + SELECT * FROM t3 WHERE b>0 ORDER BY a LIMIT 3 + } +} {1 100 4 2 99 9 3 98 16 nosort} +ifcapable subquery { + do_test where-6.8 { + cksort { + SELECT * FROM t3 WHERE a IN (3,5,7,1,9,4,2) ORDER BY a LIMIT 3 + } + } {1 100 4 2 99 9 3 98 16 sort} +} +do_test where-6.9.1 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.9.1.1 { + cksort { + SELECT * FROM t3 WHERE a>=1 AND a=1 AND c>0 ORDER BY a LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.9.1.2 { + cksort { + SELECT * FROM t3 WHERE a<2 AND a=1 AND c>0 ORDER BY a LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.9.2 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a,c LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.9.3 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY c LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.9.4 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a DESC LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.9.5 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a DESC, c DESC LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.9.6 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY c DESC LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.9.7 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY c,a LIMIT 3 + } +} {1 100 4 sort} +do_test where-6.9.8 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a DESC, c ASC LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.9.9 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a ASC, c DESC LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.10 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.11 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a,c LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.12 { + cksort { + SELECT * FROM t3 WHERE a=1 AND c>0 ORDER BY a,c,b LIMIT 3 + } +} {1 100 4 nosort} +do_test where-6.13 { + cksort { + SELECT * FROM t3 WHERE a>0 ORDER BY a DESC LIMIT 3 + } +} {100 1 10201 99 2 10000 98 3 9801 nosort} +do_test where-6.13.1 { + cksort { + SELECT * FROM t3 WHERE a>0 ORDER BY -a LIMIT 3 + } +} {100 1 10201 99 2 10000 98 3 9801 sort} +do_test where-6.14 { + cksort { + SELECT * FROM t3 ORDER BY b LIMIT 3 + } +} {100 1 10201 99 2 10000 98 3 9801 nosort} +do_test where-6.15 { + cksort { + SELECT t3.a, t1.x FROM t3, t1 WHERE t3.a=t1.w ORDER BY t3.a LIMIT 3 + } +} {1 0 2 1 3 1 nosort} +do_test where-6.16 { + cksort { + SELECT t3.a, t1.x FROM t3, t1 WHERE t3.a=t1.w ORDER BY t1.x, t3.a LIMIT 3 + } +} {1 0 2 1 3 1 sort} +do_test where-6.19 { + cksort { + SELECT y FROM t1 ORDER BY w LIMIT 3; + } +} {4 9 16 nosort} +do_test where-6.20 { + cksort { + SELECT y FROM t1 ORDER BY rowid LIMIT 3; + } +} {4 9 16 nosort} +do_test where-6.21 { + cksort { + SELECT y FROM t1 ORDER BY rowid, y LIMIT 3; + } +} {4 9 16 nosort} +do_test where-6.22 { + cksort { + SELECT y FROM t1 ORDER BY rowid, y DESC LIMIT 3; + } +} {4 9 16 nosort} +do_test where-6.23 { + cksort { + SELECT y FROM t1 WHERE y>4 ORDER BY rowid, w, x LIMIT 3; + } +} {9 16 25 nosort} +do_test where-6.24 { + cksort { + SELECT y FROM t1 WHERE y>=9 ORDER BY rowid, x DESC, w LIMIT 3; + } +} {9 16 25 nosort} +do_test where-6.25 { + cksort { + SELECT y FROM t1 WHERE y>4 AND y<25 ORDER BY rowid; + } +} {9 16 nosort} +do_test where-6.26 { + cksort { + SELECT y FROM t1 WHERE y>=4 AND y<=25 ORDER BY oid; + } +} {4 9 16 25 nosort} +do_test where-6.27 { + cksort { + SELECT y FROM t1 WHERE y<=25 ORDER BY _rowid_, w+y; + } +} {4 9 16 25 nosort} + + +# Tests for reverse-order sorting. +# +do_test where-7.1 { + cksort { + SELECT w FROM t1 WHERE x=3 ORDER BY y; + } +} {8 9 10 11 12 13 14 15 nosort} +do_test where-7.2 { + cksort { + SELECT w FROM t1 WHERE x=3 ORDER BY y DESC; + } +} {15 14 13 12 11 10 9 8 nosort} +do_test where-7.3 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>100 ORDER BY y LIMIT 3; + } +} {10 11 12 nosort} +do_test where-7.4 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>100 ORDER BY y DESC LIMIT 3; + } +} {15 14 13 nosort} +do_test where-7.5 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>121 ORDER BY y DESC; + } +} {15 14 13 12 11 nosort} +do_test where-7.6 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>=121 ORDER BY y DESC; + } +} {15 14 13 12 11 10 nosort} +do_test where-7.7 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<196 ORDER BY y DESC; + } +} {12 11 10 nosort} +do_test where-7.8 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<=196 ORDER BY y DESC; + } +} {13 12 11 10 nosort} +do_test where-7.9 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>121 AND y<=196 ORDER BY y DESC; + } +} {13 12 11 nosort} +do_test where-7.10 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>100 AND y<196 ORDER BY y DESC; + } +} {12 11 10 nosort} +do_test where-7.11 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<196 ORDER BY y; + } +} {10 11 12 nosort} +do_test where-7.12 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>=121 AND y<=196 ORDER BY y; + } +} {10 11 12 13 nosort} +do_test where-7.13 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>121 AND y<=196 ORDER BY y; + } +} {11 12 13 nosort} +do_test where-7.14 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>100 AND y<196 ORDER BY y; + } +} {10 11 12 nosort} +do_test where-7.15 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y<81 ORDER BY y; + } +} {nosort} +do_test where-7.16 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y<=81 ORDER BY y; + } +} {8 nosort} +do_test where-7.17 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>256 ORDER BY y; + } +} {nosort} +do_test where-7.18 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>=256 ORDER BY y; + } +} {15 nosort} +do_test where-7.19 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y<81 ORDER BY y DESC; + } +} {nosort} +do_test where-7.20 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y<=81 ORDER BY y DESC; + } +} {8 nosort} +do_test where-7.21 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>256 ORDER BY y DESC; + } +} {nosort} +do_test where-7.22 { + cksort { + SELECT w FROM t1 WHERE x=3 AND y>=256 ORDER BY y DESC; + } +} {15 nosort} +do_test where-7.23 { + cksort { + SELECT w FROM t1 WHERE x=0 AND y<4 ORDER BY y; + } +} {nosort} +do_test where-7.24 { + cksort { + SELECT w FROM t1 WHERE x=0 AND y<=4 ORDER BY y; + } +} {1 nosort} +do_test where-7.25 { + cksort { + SELECT w FROM t1 WHERE x=6 AND y>10201 ORDER BY y; + } +} {nosort} +do_test where-7.26 { + cksort { + SELECT w FROM t1 WHERE x=6 AND y>=10201 ORDER BY y; + } +} {100 nosort} +do_test where-7.27 { + cksort { + SELECT w FROM t1 WHERE x=0 AND y<4 ORDER BY y DESC; + } +} {nosort} +do_test where-7.28 { + cksort { + SELECT w FROM t1 WHERE x=0 AND y<=4 ORDER BY y DESC; + } +} {1 nosort} +do_test where-7.29 { + cksort { + SELECT w FROM t1 WHERE x=6 AND y>10201 ORDER BY y DESC; + } +} {nosort} +do_test where-7.30 { + cksort { + SELECT w FROM t1 WHERE x=6 AND y>=10201 ORDER BY y DESC; + } +} {100 nosort} +do_test where-7.31 { + cksort { + SELECT y FROM t1 ORDER BY rowid DESC LIMIT 3 + } +} {10201 10000 9801 nosort} +do_test where-7.32 { + cksort { + SELECT y FROM t1 WHERE y<25 ORDER BY rowid DESC + } +} {16 9 4 nosort} +do_test where-7.33 { + cksort { + SELECT y FROM t1 WHERE y<=25 ORDER BY rowid DESC + } +} {25 16 9 4 nosort} +do_test where-7.34 { + cksort { + SELECT y FROM t1 WHERE y<25 AND y>4 ORDER BY rowid DESC, y DESC + } +} {16 9 nosort} +do_test where-7.35 { + cksort { + SELECT y FROM t1 WHERE y<25 AND y>=4 ORDER BY rowid DESC + } +} {16 9 4 nosort} + +do_test where-8.1 { + execsql { + CREATE TABLE t4 AS SELECT * FROM t1; + CREATE INDEX i4xy ON t4(x,y); + } + cksort { + SELECT w FROM t4 WHERE x=4 and y<1000 ORDER BY y DESC limit 3; + } +} {30 29 28 nosort} +do_test where-8.2 { + execsql { + DELETE FROM t4; + } + cksort { + SELECT w FROM t4 WHERE x=4 and y<1000 ORDER BY y DESC limit 3; + } +} {nosort} + +# Make sure searches with an index work with an empty table. +# +do_test where-9.1 { + execsql { + CREATE TABLE t5(x PRIMARY KEY); + SELECT * FROM t5 WHERE x<10; + } +} {} +do_test where-9.2 { + execsql { + SELECT * FROM t5 WHERE x<10 ORDER BY x DESC; + } +} {} +do_test where-9.3 { + execsql { + SELECT * FROM t5 WHERE x=10; + } +} {} + +do_test where-10.1 { + execsql { + SELECT 1 WHERE abs(random())<0 + } +} {} +do_test where-10.2 { + proc tclvar_func {vname} {return [set ::$vname]} + db function tclvar tclvar_func + set ::v1 0 + execsql { + SELECT count(*) FROM t1 WHERE tclvar('v1'); + } +} {0} +do_test where-10.3 { + set ::v1 1 + execsql { + SELECT count(*) FROM t1 WHERE tclvar('v1'); + } +} {100} +do_test where-10.4 { + set ::v1 1 + proc tclvar_func {vname} { + upvar #0 $vname v + set v [expr {!$v}] + return $v + } + execsql { + SELECT count(*) FROM t1 WHERE tclvar('v1'); + } +} {50} + +# Ticket #1376. The query below was causing a segfault. +# The problem was the age-old error of calling realloc() on an +# array while there are still pointers to individual elements of +# that array. +# +do_test where-11.1 { + execsql { + CREATE TABLE t99(Dte INT, X INT); + DELETE FROM t99 WHERE (Dte = 2451337) OR (Dte = 2451339) OR + (Dte BETWEEN 2451345 AND 2451347) OR (Dte = 2451351) OR + (Dte BETWEEN 2451355 AND 2451356) OR (Dte = 2451358) OR + (Dte = 2451362) OR (Dte = 2451365) OR (Dte = 2451367) OR + (Dte BETWEEN 2451372 AND 2451376) OR (Dte BETWEEN 2451382 AND 2451384) OR + (Dte = 2451387) OR (Dte BETWEEN 2451389 AND 2451391) OR + (Dte BETWEEN 2451393 AND 2451395) OR (Dte = 2451400) OR + (Dte = 2451402) OR (Dte = 2451404) OR (Dte BETWEEN 2451416 AND 2451418) OR + (Dte = 2451422) OR (Dte = 2451426) OR (Dte BETWEEN 2451445 AND 2451446) OR + (Dte = 2451456) OR (Dte = 2451458) OR (Dte BETWEEN 2451465 AND 2451467) OR + (Dte BETWEEN 2451469 AND 2451471) OR (Dte = 2451474) OR + (Dte BETWEEN 2451477 AND 2451501) OR (Dte BETWEEN 2451503 AND 2451509) OR + (Dte BETWEEN 2451511 AND 2451514) OR (Dte BETWEEN 2451518 AND 2451521) OR + (Dte BETWEEN 2451523 AND 2451531) OR (Dte BETWEEN 2451533 AND 2451537) OR + (Dte BETWEEN 2451539 AND 2451544) OR (Dte BETWEEN 2451546 AND 2451551) OR + (Dte BETWEEN 2451553 AND 2451555) OR (Dte = 2451557) OR + (Dte BETWEEN 2451559 AND 2451561) OR (Dte = 2451563) OR + (Dte BETWEEN 2451565 AND 2451566) OR (Dte BETWEEN 2451569 AND 2451571) OR + (Dte = 2451573) OR (Dte = 2451575) OR (Dte = 2451577) OR (Dte = 2451581) OR + (Dte BETWEEN 2451583 AND 2451586) OR (Dte BETWEEN 2451588 AND 2451592) OR + (Dte BETWEEN 2451596 AND 2451598) OR (Dte = 2451600) OR + (Dte BETWEEN 2451602 AND 2451603) OR (Dte = 2451606) OR (Dte = 2451611); + } +} {} + +# Ticket #2116: Make sure sorting by index works well with nn INTEGER PRIMARY +# KEY. +# +do_test where-12.1 { + execsql { + CREATE TABLE t6(a INTEGER PRIMARY KEY, b TEXT); + INSERT INTO t6 VALUES(1,'one'); + INSERT INTO t6 VALUES(4,'four'); + CREATE INDEX t6i1 ON t6(b); + } + cksort { + SELECT * FROM t6 ORDER BY b; + } +} {4 four 1 one nosort} +do_test where-12.2 { + cksort { + SELECT * FROM t6 ORDER BY b, a; + } +} {4 four 1 one nosort} +do_test where-12.3 { + cksort { + SELECT * FROM t6 ORDER BY a; + } +} {1 one 4 four nosort} +do_test where-12.4 { + cksort { + SELECT * FROM t6 ORDER BY a, b; + } +} {1 one 4 four nosort} +do_test where-12.5 { + cksort { + SELECT * FROM t6 ORDER BY b DESC; + } +} {1 one 4 four nosort} +do_test where-12.6 { + cksort { + SELECT * FROM t6 ORDER BY b DESC, a DESC; + } +} {1 one 4 four nosort} +do_test where-12.7 { + cksort { + SELECT * FROM t6 ORDER BY b DESC, a ASC; + } +} {1 one 4 four sort} +do_test where-12.8 { + cksort { + SELECT * FROM t6 ORDER BY b ASC, a DESC; + } +} {4 four 1 one sort} +do_test where-12.9 { + cksort { + SELECT * FROM t6 ORDER BY a DESC; + } +} {4 four 1 one nosort} +do_test where-12.10 { + cksort { + SELECT * FROM t6 ORDER BY a DESC, b DESC; + } +} {4 four 1 one nosort} +do_test where-12.11 { + cksort { + SELECT * FROM t6 ORDER BY a DESC, b ASC; + } +} {4 four 1 one nosort} +do_test where-12.12 { + cksort { + SELECT * FROM t6 ORDER BY a ASC, b DESC; + } +} {1 one 4 four nosort} +do_test where-13.1 { + execsql { + CREATE TABLE t7(a INTEGER PRIMARY KEY, b TEXT); + INSERT INTO t7 VALUES(1,'one'); + INSERT INTO t7 VALUES(4,'four'); + CREATE INDEX t7i1 ON t7(b); + } + cksort { + SELECT * FROM t7 ORDER BY b; + } +} {4 four 1 one nosort} +do_test where-13.2 { + cksort { + SELECT * FROM t7 ORDER BY b, a; + } +} {4 four 1 one nosort} +do_test where-13.3 { + cksort { + SELECT * FROM t7 ORDER BY a; + } +} {1 one 4 four nosort} +do_test where-13.4 { + cksort { + SELECT * FROM t7 ORDER BY a, b; + } +} {1 one 4 four nosort} +do_test where-13.5 { + cksort { + SELECT * FROM t7 ORDER BY b DESC; + } +} {1 one 4 four nosort} +do_test where-13.6 { + cksort { + SELECT * FROM t7 ORDER BY b DESC, a DESC; + } +} {1 one 4 four nosort} +do_test where-13.7 { + cksort { + SELECT * FROM t7 ORDER BY b DESC, a ASC; + } +} {1 one 4 four sort} +do_test where-13.8 { + cksort { + SELECT * FROM t7 ORDER BY b ASC, a DESC; + } +} {4 four 1 one sort} +do_test where-13.9 { + cksort { + SELECT * FROM t7 ORDER BY a DESC; + } +} {4 four 1 one nosort} +do_test where-13.10 { + cksort { + SELECT * FROM t7 ORDER BY a DESC, b DESC; + } +} {4 four 1 one nosort} +do_test where-13.11 { + cksort { + SELECT * FROM t7 ORDER BY a DESC, b ASC; + } +} {4 four 1 one nosort} +do_test where-13.12 { + cksort { + SELECT * FROM t7 ORDER BY a ASC, b DESC; + } +} {1 one 4 four nosort} + +# Ticket #2211. +# +# When optimizing out ORDER BY clauses, make sure that trailing terms +# of the ORDER BY clause do not reference other tables in a join. +# +do_test where-14.1 { + execsql { + CREATE TABLE t8(a INTEGER PRIMARY KEY, b TEXT UNIQUE); + INSERT INTO t8 VALUES(1,'one'); + INSERT INTO t8 VALUES(4,'four'); + } + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.a, y.b + } +} {1/4 1/1 4/4 4/1 sort} +do_test where-14.2 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.a, y.b DESC + } +} {1/1 1/4 4/1 4/4 sort} +do_test where-14.3 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.a, x.b + } +} {1/1 1/4 4/1 4/4 nosort} +do_test where-14.4 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.a, x.b DESC + } +} {1/1 1/4 4/1 4/4 nosort} +btree_breakpoint +do_test where-14.5 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, x.a||x.b + } +} {4/1 4/4 1/1 1/4 nosort} +do_test where-14.6 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, x.a||x.b DESC + } +} {4/1 4/4 1/1 1/4 nosort} +do_test where-14.7 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, y.a||y.b + } +} {4/1 4/4 1/1 1/4 sort} +do_test where-14.7.1 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, x.a, y.a||y.b + } +} {4/1 4/4 1/1 1/4 sort} +do_test where-14.7.2 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, x.a, x.a||x.b + } +} {4/1 4/4 1/1 1/4 nosort} +do_test where-14.8 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, y.a||y.b DESC + } +} {4/4 4/1 1/4 1/1 sort} +do_test where-14.9 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, x.a||y.b + } +} {4/4 4/1 1/4 1/1 sort} +do_test where-14.10 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, x.a||y.b DESC + } +} {4/1 4/4 1/1 1/4 sort} +do_test where-14.11 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, y.a||x.b + } +} {4/1 4/4 1/1 1/4 sort} +do_test where-14.12 { + cksort { + SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, y.a||x.b DESC + } +} {4/4 4/1 1/4 1/1 sort} + +# Ticket #2445. +# +# There was a crash that could occur when a where clause contains an +# alias for an expression in the result set, and that expression retrieves +# a column of the second or subsequent table in a join. +# +do_test where-15.1 { + execsql { + CREATE TEMP TABLE t1 (a, b, c, d, e); + CREATE TEMP TABLE t2 (f); + SELECT t1.e AS alias FROM t2, t1 WHERE alias = 1 ; + } +} {} + +integrity_check {where-99.0} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/where2.test b/libraries/sqlite/unix/sqlite-3.5.1/test/where2.test new file mode 100644 index 0000000..bff3ef6 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/where2.test @@ -0,0 +1,614 @@ +# 2005 July 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the use of indices in WHERE clauses +# based on recent changes to the optimizer. +# +# $Id: where2.test,v 1.12 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Build some test data +# +do_test where2-1.0 { + execsql { + BEGIN; + CREATE TABLE t1(w int, x int, y int, z int); + } + for {set i 1} {$i<=100} {incr i} { + set w $i + set x [expr {int(log($i)/log(2))}] + set y [expr {$i*$i + 2*$i + 1}] + set z [expr {$x+$y}] + ifcapable tclvar { + execsql {INSERT INTO t1 VALUES($::w,$::x,$::y,$::z)} + } else { + execsql {INSERT INTO t1 VALUES(:w,:x,:y,:z)} + } + } + execsql { + CREATE UNIQUE INDEX i1w ON t1(w); + CREATE INDEX i1xy ON t1(x,y); + CREATE INDEX i1zyx ON t1(z,y,x); + COMMIT; + } +} {} + +# Do an SQL statement. Append the search count to the end of the result. +# +proc count sql { + set ::sqlite_search_count 0 + return [concat [execsql $sql] $::sqlite_search_count] +} + +# This procedure executes the SQL. Then it checks to see if the OP_Sort +# opcode was executed. If an OP_Sort did occur, then "sort" is appended +# to the result. If no OP_Sort happened, then "nosort" is appended. +# +# This procedure is used to check to make sure sorting is or is not +# occurring as expected. +# +proc cksort {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return $data +} + +# This procedure executes the SQL. Then it appends to the result the +# "sort" or "nosort" keyword (as in the cksort procedure above) then +# it appends the ::sqlite_query_plan variable. +# +proc queryplan {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return [concat $data $::sqlite_query_plan] +} + + +# Prefer a UNIQUE index over another index. +# +do_test where2-1.1 { + queryplan { + SELECT * FROM t1 WHERE w=85 AND x=6 AND y=7396 + } +} {85 6 7396 7402 nosort t1 i1w} + +# Always prefer a rowid== constraint over any other index. +# +do_test where2-1.3 { + queryplan { + SELECT * FROM t1 WHERE w=85 AND x=6 AND y=7396 AND rowid=85 + } +} {85 6 7396 7402 nosort t1 *} + +# When constrained by a UNIQUE index, the ORDER BY clause is always ignored. +# +do_test where2-2.1 { + queryplan { + SELECT * FROM t1 WHERE w=85 ORDER BY random(5); + } +} {85 6 7396 7402 nosort t1 i1w} +do_test where2-2.2 { + queryplan { + SELECT * FROM t1 WHERE x=6 AND y=7396 ORDER BY random(5); + } +} {85 6 7396 7402 sort t1 i1xy} +do_test where2-2.3 { + queryplan { + SELECT * FROM t1 WHERE rowid=85 AND x=6 AND y=7396 ORDER BY random(5); + } +} {85 6 7396 7402 nosort t1 *} + + +# Efficient handling of forward and reverse table scans. +# +do_test where2-3.1 { + queryplan { + SELECT * FROM t1 ORDER BY rowid LIMIT 2 + } +} {1 0 4 4 2 1 9 10 nosort t1 *} +do_test where2-3.2 { + queryplan { + SELECT * FROM t1 ORDER BY rowid DESC LIMIT 2 + } +} {100 6 10201 10207 99 6 10000 10006 nosort t1 *} + +# The IN operator can be used by indices at multiple layers +# +ifcapable subquery { + do_test where2-4.1 { + queryplan { + SELECT * FROM t1 WHERE z IN (10207,10006) AND y IN (10000,10201) + AND x>0 AND x<10 + ORDER BY w + } + } {99 6 10000 10006 100 6 10201 10207 sort t1 i1zyx} + do_test where2-4.2 { + queryplan { + SELECT * FROM t1 WHERE z IN (10207,10006) AND y=10000 + AND x>0 AND x<10 + ORDER BY w + } + } {99 6 10000 10006 sort t1 i1zyx} + do_test where2-4.3 { + queryplan { + SELECT * FROM t1 WHERE z=10006 AND y IN (10000,10201) + AND x>0 AND x<10 + ORDER BY w + } + } {99 6 10000 10006 sort t1 i1zyx} + ifcapable compound { + do_test where2-4.4 { + queryplan { + SELECT * FROM t1 WHERE z IN (SELECT 10207 UNION SELECT 10006) + AND y IN (10000,10201) + AND x>0 AND x<10 + ORDER BY w + } + } {99 6 10000 10006 100 6 10201 10207 sort t1 i1zyx} + do_test where2-4.5 { + queryplan { + SELECT * FROM t1 WHERE z IN (SELECT 10207 UNION SELECT 10006) + AND y IN (SELECT 10000 UNION SELECT 10201) + AND x>0 AND x<10 + ORDER BY w + } + } {99 6 10000 10006 100 6 10201 10207 sort t1 i1zyx} + } + do_test where2-4.6 { + queryplan { + SELECT * FROM t1 + WHERE x IN (1,2,3,4,5,6,7,8) + AND y IN (10000,10001,10002,10003,10004,10005) + ORDER BY 2 + } + } {99 6 10000 10006 sort t1 i1xy} + + # Duplicate entires on the RHS of an IN operator do not cause duplicate + # output rows. + # + do_test where2-4.6 { + queryplan { + SELECT * FROM t1 WHERE z IN (10207,10006,10006,10207) + ORDER BY w + } + } {99 6 10000 10006 100 6 10201 10207 sort t1 i1zyx} + ifcapable compound { + do_test where2-4.7 { + queryplan { + SELECT * FROM t1 WHERE z IN ( + SELECT 10207 UNION ALL SELECT 10006 + UNION ALL SELECT 10006 UNION ALL SELECT 10207) + ORDER BY w + } + } {99 6 10000 10006 100 6 10201 10207 sort t1 i1zyx} + } + +} ;# ifcapable subquery + +# The use of an IN operator disables the index as a sorter. +# +do_test where2-5.1 { + queryplan { + SELECT * FROM t1 WHERE w=99 ORDER BY w + } +} {99 6 10000 10006 nosort t1 i1w} + +ifcapable subquery { + do_test where2-5.2 { + queryplan { + SELECT * FROM t1 WHERE w IN (99) ORDER BY w + } + } {99 6 10000 10006 sort t1 i1w} +} + +# Verify that OR clauses get translated into IN operators. +# +set ::idx {} +ifcapable subquery {set ::idx i1w} +do_test where2-6.1.1 { + queryplan { + SELECT * FROM t1 WHERE w=99 OR w=100 ORDER BY +w + } +} [list 99 6 10000 10006 100 6 10201 10207 sort t1 $::idx] +do_test where2-6.1.2 { + queryplan { + SELECT * FROM t1 WHERE 99=w OR 100=w ORDER BY +w + } +} [list 99 6 10000 10006 100 6 10201 10207 sort t1 $::idx] +do_test where2-6.2 { + queryplan { + SELECT * FROM t1 WHERE w=99 OR w=100 OR 6=w ORDER BY +w + } +} [list 6 2 49 51 99 6 10000 10006 100 6 10201 10207 sort t1 $::idx] + +do_test where2-6.3 { + queryplan { + SELECT * FROM t1 WHERE w=99 OR w=100 OR 6=+w ORDER BY +w + } +} {6 2 49 51 99 6 10000 10006 100 6 10201 10207 sort t1 {}} +do_test where2-6.4 { + queryplan { + SELECT * FROM t1 WHERE w=99 OR +w=100 OR 6=w ORDER BY +w + } +} {6 2 49 51 99 6 10000 10006 100 6 10201 10207 sort t1 {}} + +set ::idx {} +ifcapable subquery {set ::idx i1zyx} +do_test where2-6.5 { + queryplan { + SELECT b.* FROM t1 a, t1 b + WHERE a.w=1 AND (a.y=b.z OR b.z=10) + ORDER BY +b.w + } +} [list 1 0 4 4 2 1 9 10 sort a i1w b $::idx] +do_test where2-6.6 { + queryplan { + SELECT b.* FROM t1 a, t1 b + WHERE a.w=1 AND (b.z=10 OR a.y=b.z OR b.z=10) + ORDER BY +b.w + } +} [list 1 0 4 4 2 1 9 10 sort a i1w b $::idx] + +# Ticket #2249. Make sure the OR optimization is not attempted if +# comparisons between columns of different affinities are needed. +# +do_test where2-6.7 { + execsql { + CREATE TABLE t2249a(a TEXT UNIQUE); + CREATE TABLE t2249b(b INTEGER); + INSERT INTO t2249a VALUES('0123'); + INSERT INTO t2249b VALUES(123); + } + queryplan { + -- Because a is type TEXT and b is type INTEGER, both a and b + -- will attempt to convert to NUMERIC before the comparison. + -- They will thus compare equal. + -- + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a=b; + } +} {123 0123 nosort t2249b {} t2249a {}} +do_test where2-6.9 { + queryplan { + -- The + operator removes affinity from the rhs. No conversions + -- occur and the comparison is false. The result is an empty set. + -- + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a=+b; + } +} {nosort t2249b {} {} sqlite_autoindex_t2249a_1} +do_test where2-6.9.2 { + # The same thing but with the expression flipped around. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE +b=a + } +} {nosort t2249b {} {} sqlite_autoindex_t2249a_1} +do_test where2-6.10 { + queryplan { + -- Use + on both sides of the comparison to disable indices + -- completely. Make sure we get the same result. + -- + SELECT * FROM t2249b CROSS JOIN t2249a WHERE +a=+b; + } +} {nosort t2249b {} t2249a {}} +do_test where2-6.11 { + # This will not attempt the OR optimization because of the a=b + # comparison. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a=b OR a='hello'; + } +} {123 0123 nosort t2249b {} t2249a {}} +do_test where2-6.11.2 { + # Permutations of the expression terms. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE b=a OR a='hello'; + } +} {123 0123 nosort t2249b {} t2249a {}} +do_test where2-6.11.3 { + # Permutations of the expression terms. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE 'hello'=a OR b=a; + } +} {123 0123 nosort t2249b {} t2249a {}} +do_test where2-6.11.4 { + # Permutations of the expression terms. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a='hello' OR b=a; + } +} {123 0123 nosort t2249b {} t2249a {}} +ifcapable explain { + do_test where2-6.12 { + # In this case, the +b disables the affinity conflict and allows + # the OR optimization to be used again. The result is now an empty + # set, the same as in where2-6.9. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a=+b OR a='hello'; + } + } {nosort t2249b {} {} sqlite_autoindex_t2249a_1} + do_test where2-6.12.2 { + # In this case, the +b disables the affinity conflict and allows + # the OR optimization to be used again. The result is now an empty + # set, the same as in where2-6.9. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a='hello' OR +b=a; + } + } {nosort t2249b {} {} sqlite_autoindex_t2249a_1} +} +ifcapable explain { + do_test where2-6.12.3 { + # In this case, the +b disables the affinity conflict and allows + # the OR optimization to be used again. The result is now an empty + # set, the same as in where2-6.9. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE +b=a OR a='hello'; + } + } {nosort t2249b {} {} sqlite_autoindex_t2249a_1} + do_test where2-6.13 { + # The addition of +a on the second term disabled the OR optimization. + # But we should still get the same empty-set result as in where2-6.9. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a=+b OR +a='hello'; + } + } {nosort t2249b {} t2249a {}} +} + +# Variations on the order of terms in a WHERE clause in order +# to make sure the OR optimizer can recognize them all. +do_test where2-6.20 { + queryplan { + SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE x.a=y.a + } +} {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} +ifcapable explain { + do_test where2-6.21 { + queryplan { + SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE x.a=y.a OR y.a='hello' + } + } {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} + do_test where2-6.22 { + queryplan { + SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE y.a=x.a OR y.a='hello' + } + } {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} + do_test where2-6.23 { + queryplan { + SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE y.a='hello' OR x.a=y.a + } + } {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} +} + +# Unique queries (queries that are guaranteed to return only a single +# row of result) do not call the sorter. But all tables must give +# a unique result. If any one table in the join does not give a unique +# result then sorting is necessary. +# +do_test where2-7.1 { + cksort { + create table t8(a unique, b, c); + insert into t8 values(1,2,3); + insert into t8 values(2,3,4); + create table t9(x,y); + insert into t9 values(2,4); + insert into t9 values(2,3); + select y from t8, t9 where a=1 order by a, y; + } +} {3 4 sort} +do_test where2-7.2 { + cksort { + select * from t8 where a=1 order by b, c + } +} {1 2 3 nosort} +do_test where2-7.3 { + cksort { + select * from t8, t9 where a=1 and y=3 order by b, x + } +} {1 2 3 2 3 sort} +do_test where2-7.4 { + cksort { + create unique index i9y on t9(y); + select * from t8, t9 where a=1 and y=3 order by b, x + } +} {1 2 3 2 3 nosort} + +# Ticket #1807. Using IN constrains on multiple columns of +# a multi-column index. +# +ifcapable subquery { + do_test where2-8.1 { + execsql { + SELECT * FROM t1 WHERE x IN (20,21) AND y IN (1,2) + } + } {} + do_test where2-8.2 { + execsql { + SELECT * FROM t1 WHERE x IN (1,2) AND y IN (-5,-6) + } + } {} + execsql {CREATE TABLE tx AS SELECT * FROM t1} + do_test where2-8.3 { + execsql { + SELECT w FROM t1 + WHERE x IN (SELECT x FROM tx WHERE rowid<0) + AND +y IN (SELECT y FROM tx WHERE rowid=1) + } + } {} + do_test where2-8.4 { + execsql { + SELECT w FROM t1 + WHERE x IN (SELECT x FROM tx WHERE rowid=1) + AND y IN (SELECT y FROM tx WHERE rowid<0) + } + } {} + #set sqlite_where_trace 1 + do_test where2-8.5 { + execsql { + CREATE INDEX tx_xyz ON tx(x, y, z, w); + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 12 AND 14) + } + } {12 13 14} + do_test where2-8.6 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 12 AND 14) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 10 AND 20) + } + } {12 13 14} + do_test where2-8.7 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 12 AND 14) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 10 AND 20) + } + } {10 11 12 13 14 15} + do_test where2-8.8 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 10 AND 20) + } + } {10 11 12 13 14 15 16 17 18 19 20} + do_test where2-8.9 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 2 AND 4) + } + } {} + do_test where2-8.10 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 2 AND 4) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 10 AND 20) + } + } {} + do_test where2-8.11 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 2 AND 4) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 10 AND 20) + } + } {} + do_test where2-8.12 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN -4 AND -2) + } + } {} + do_test where2-8.13 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN -4 AND -2) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 10 AND 20) + } + } {} + do_test where2-8.14 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN -4 AND -2) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 10 AND 20) + } + } {} + do_test where2-8.15 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 200 AND 300) + } + } {} + do_test where2-8.16 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 200 AND 300) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 10 AND 20) + } + } {} + do_test where2-8.17 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE w BETWEEN 200 AND 300) + AND y IN (SELECT y FROM t1 WHERE w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE w BETWEEN 10 AND 20) + } + } {} + do_test where2-8.18 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE +w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE +w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE +w BETWEEN 200 AND 300) + } + } {} + do_test where2-8.19 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE +w BETWEEN 10 AND 20) + AND y IN (SELECT y FROM t1 WHERE +w BETWEEN 200 AND 300) + AND z IN (SELECT z FROM t1 WHERE +w BETWEEN 10 AND 20) + } + } {} + do_test where2-8.20 { + execsql { + SELECT w FROM tx + WHERE x IN (SELECT x FROM t1 WHERE +w BETWEEN 200 AND 300) + AND y IN (SELECT y FROM t1 WHERE +w BETWEEN 10 AND 20) + AND z IN (SELECT z FROM t1 WHERE +w BETWEEN 10 AND 20) + } + } {} +} + +# Make sure WHERE clauses of the form A=1 AND (B=2 OR B=3) are optimized +# when we have an index on A and B. +# +ifcapable or_opt&&tclvar { + do_test where2-9.1 { + execsql { + BEGIN; + CREATE TABLE t10(a,b,c); + INSERT INTO t10 VALUES(1,1,1); + INSERT INTO t10 VALUES(1,2,2); + INSERT INTO t10 VALUES(1,3,3); + } + for {set i 4} {$i<=1000} {incr i} { + execsql {INSERT INTO t10 VALUES(1,$i,$i)} + } + execsql { + CREATE INDEX i10 ON t10(a,b); + COMMIT; + SELECT count(*) FROM t10; + } + } 1000 + do_test where2-9.2 { + count { + SELECT * FROM t10 WHERE a=1 AND (b=2 OR b=3) + } + } {1 2 2 1 3 3 7} +} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/where3.test b/libraries/sqlite/unix/sqlite-3.5.1/test/where3.test new file mode 100644 index 0000000..30e0976 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/where3.test @@ -0,0 +1,162 @@ +# 2006 January 31 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the join reordering optimization +# in cases that include a LEFT JOIN. +# +# $Id: where3.test,v 1.3 2006/12/16 16:25:17 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# The following is from ticket #1652. +# +# A comma join then a left outer join: A,B left join C. +# Arrange indices so that the B table is chosen to go first. +# Also put an index on C, but make sure that A is chosen before C. +# +do_test where3-1.1 { + execsql { + CREATE TABLE t1(a, b); + CREATE TABLE t2(p, q); + CREATE TABLE t3(x, y); + + INSERT INTO t1 VALUES(111,'one'); + INSERT INTO t1 VALUES(222,'two'); + INSERT INTO t1 VALUES(333,'three'); + + INSERT INTO t2 VALUES(1,111); + INSERT INTO t2 VALUES(2,222); + INSERT INTO t2 VALUES(4,444); + CREATE INDEX t2i1 ON t2(p); + + INSERT INTO t3 VALUES(999,'nine'); + CREATE INDEX t3i1 ON t3(x); + + SELECT * FROM t1, t2 LEFT JOIN t3 ON q=x WHERE p=2 AND a=q; + } +} {222 two 2 222 {} {}} + +# Ticket #1830 +# +# This is similar to the above but with the LEFT JOIN on the +# other side. +# +do_test where3-1.2 { + execsql { + CREATE TABLE parent1(parent1key, child1key, Child2key, child3key); + CREATE TABLE child1 ( child1key NVARCHAR, value NVARCHAR ); + CREATE UNIQUE INDEX PKIDXChild1 ON child1 ( child1key ); + CREATE TABLE child2 ( child2key NVARCHAR, value NVARCHAR ); + + INSERT INTO parent1(parent1key,child1key,child2key) + VALUES ( 1, 'C1.1', 'C2.1' ); + INSERT INTO child1 ( child1key, value ) VALUES ( 'C1.1', 'Value for C1.1' ); + INSERT INTO child2 ( child2key, value ) VALUES ( 'C2.1', 'Value for C2.1' ); + + INSERT INTO parent1 ( parent1key, child1key, child2key ) + VALUES ( 2, 'C1.2', 'C2.2' ); + INSERT INTO child2 ( child2key, value ) VALUES ( 'C2.2', 'Value for C2.2' ); + + INSERT INTO parent1 ( parent1key, child1key, child2key ) + VALUES ( 3, 'C1.3', 'C2.3' ); + INSERT INTO child1 ( child1key, value ) VALUES ( 'C1.3', 'Value for C1.3' ); + INSERT INTO child2 ( child2key, value ) VALUES ( 'C2.3', 'Value for C2.3' ); + + SELECT parent1.parent1key, child1.value, child2.value + FROM parent1 + LEFT OUTER JOIN child1 ON child1.child1key = parent1.child1key + INNER JOIN child2 ON child2.child2key = parent1.child2key; + } +} {1 {Value for C1.1} {Value for C2.1} 2 {} {Value for C2.2} 3 {Value for C1.3} {Value for C2.3}} + +# This procedure executes the SQL. Then it appends +# the ::sqlite_query_plan variable. +# +proc queryplan {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + return [concat $data $::sqlite_query_plan] +} + + +# If you have a from clause of the form: A B C left join D +# then make sure the query optimizer is able to reorder the +# A B C part anyway it wants. +# +# Following the fix to ticket #1652, there was a time when +# the C table would not reorder. So the following reorderings +# were possible: +# +# A B C left join D +# B A C left join D +# +# But these reorders were not allowed +# +# C A B left join D +# A C B left join D +# C B A left join D +# B C A left join D +# +# The following tests are here to verify that the latter four +# reorderings are allowed again. +# +do_test where3-2.1 { + execsql { + CREATE TABLE tA(apk integer primary key, ax); + CREATE TABLE tB(bpk integer primary key, bx); + CREATE TABLE tC(cpk integer primary key, cx); + CREATE TABLE tD(dpk integer primary key, dx); + } + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx + WHERE cpk=bx AND bpk=ax + } +} {tA {} tB * tC * tD *} +do_test where3-2.2 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx + WHERE cpk=bx AND apk=bx + } +} {tB {} tA * tC * tD *} +do_test where3-2.3 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx + WHERE cpk=bx AND apk=bx + } +} {tB {} tA * tC * tD *} +do_test where3-2.4 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx + WHERE apk=cx AND bpk=ax + } +} {tC {} tA * tB * tD *} +do_test where3-2.5 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx + WHERE cpk=ax AND bpk=cx + } +} {tA {} tC * tB * tD *} +do_test where3-2.5 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx + WHERE bpk=cx AND apk=bx + } +} {tC {} tB * tA * tD *} +do_test where3-2.6 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx + WHERE cpk=bx AND apk=cx + } +} {tB {} tC * tA * tD *} + + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/where4.test b/libraries/sqlite/unix/sqlite-3.5.1/test/where4.test new file mode 100644 index 0000000..7abbc1d --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/where4.test @@ -0,0 +1,270 @@ +# 2006 October 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the use of indices in WHERE clauses. +# This file was created when support for optimizing IS NULL phrases +# was added. And so the principle purpose of this file is to test +# that IS NULL phrases are correctly optimized. But you can never +# have too many tests, so some other tests are thrown in as well. +# +# $Id: where4.test,v 1.5 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !tclvar||!bloblit { + finish_test + return +} + +# Build some test data +# +do_test where4-1.0 { + execsql { + CREATE TABLE t1(w, x, y); + CREATE INDEX i1wxy ON t1(w,x,y); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(1,NULL,3); + INSERT INTO t1 VALUES('a','b','c'); + INSERT INTO t1 VALUES('a',NULL,'c'); + INSERT INTO t1 VALUES(X'78',x'79',x'7a'); + INSERT INTO t1 VALUES(X'78',NULL,X'7A'); + INSERT INTO t1 VALUES(NULL,NULL,NULL); + SELECT count(*) FROM t1; + } +} {7} + +# Do an SQL statement. Append the search count to the end of the result. +# +proc count sql { + set ::sqlite_search_count 0 + return [concat [execsql $sql] $::sqlite_search_count] +} + +# Verify that queries use an index. We are using the special variable +# "sqlite_search_count" which tallys the number of executions of MoveTo +# and Next operators in the VDBE. By verifing that the search count is +# small we can be assured that indices are being used properly. +# +do_test where4-1.1 { + count {SELECT rowid FROM t1 WHERE w IS NULL} +} {7 2} +do_test where4-1.2 { + count {SELECT rowid FROM t1 WHERE +w IS NULL} +} {7 6} +do_test where4-1.3 { + count {SELECT rowid FROM t1 WHERE w=1 AND x IS NULL} +} {2 2} +do_test where4-1.4 { + count {SELECT rowid FROM t1 WHERE w=1 AND +x IS NULL} +} {2 3} +do_test where4-1.5 { + count {SELECT rowid FROM t1 WHERE w=1 AND x>0} +} {1 2} +do_test where4-1.6 { + count {SELECT rowid FROM t1 WHERE w=1 AND x<9} +} {1 3} +do_test where4-1.7 { + count {SELECT rowid FROM t1 WHERE w=1 AND x IS NULL AND y=3} +} {2 2} +do_test where4-1.8 { + count {SELECT rowid FROM t1 WHERE w=1 AND x IS NULL AND y>2} +} {2 2} +do_test where4-1.9 { + count {SELECT rowid FROM t1 WHERE w='a' AND x IS NULL AND y='c'} +} {4 2} +do_test where4-1.10 { + count {SELECT rowid FROM t1 WHERE w=x'78' AND x IS NULL} +} {6 2} +do_test where4-1.11 { + count {SELECT rowid FROM t1 WHERE w=x'78' AND x IS NULL AND y=123} +} {1} +do_test where4-1.12 { + count {SELECT rowid FROM t1 WHERE w=x'78' AND x IS NULL AND y=x'7A'} +} {6 2} +do_test where4-1.13 { + count {SELECT rowid FROM t1 WHERE w IS NULL AND x IS NULL} +} {7 2} +do_test where4-1.14 { + count {SELECT rowid FROM t1 WHERE w IS NULL AND x IS NULL AND y IS NULL} +} {7 2} +do_test where4-1.15 { + count {SELECT rowid FROM t1 WHERE w IS NULL AND x IS NULL AND y<0} +} {2} +do_test where4-1.16 { + count {SELECT rowid FROM t1 WHERE w IS NULL AND x IS NULL AND y>=0} +} {1} + +do_test where4-2.1 { + execsql {SELECT rowid FROM t1 ORDER BY w, x, y} +} {7 2 1 4 3 6 5} +do_test where4-2.2 { + execsql {SELECT rowid FROM t1 ORDER BY w DESC, x, y} +} {6 5 4 3 2 1 7} +do_test where4-2.3 { + execsql {SELECT rowid FROM t1 ORDER BY w, x DESC, y} +} {7 1 2 3 4 5 6} + + +# Ticket #2177 +# +# Suppose you have a left join where the right table of the left +# join (the one that can be NULL) has an index on two columns. +# The first indexed column is used in the ON clause of the join. +# The second indexed column is used in the WHERE clause with an IS NULL +# constraint. It is not allowed to use the IS NULL optimization to +# optimize the query because the second column might be NULL because +# the right table did not match - something the index does not know +# about. +# +do_test where4-3.1 { + execsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES(1); + INSERT INTO t2 VALUES(2); + INSERT INTO t2 VALUES(3); + CREATE TABLE t3(x,y,UNIQUE(x,y)); + INSERT INTO t3 VALUES(1,11); + INSERT INTO t3 VALUES(2,NULL); + + SELECT * FROM t2 LEFT JOIN t3 ON a=x WHERE +y IS NULL; + } +} {2 2 {} 3 {} {}} +do_test where4-3.2 { + execsql { + SELECT * FROM t2 LEFT JOIN t3 ON a=x WHERE y IS NULL; + } +} {2 2 {} 3 {} {}} + +# Ticket #2189. Probably the same bug as #2177. +# +do_test where4-4.1 { + execsql { + CREATE TABLE test(col1 TEXT PRIMARY KEY); + INSERT INTO test(col1) values('a'); + INSERT INTO test(col1) values('b'); + INSERT INTO test(col1) values('c'); + CREATE TABLE test2(col1 TEXT PRIMARY KEY); + INSERT INTO test2(col1) values('a'); + INSERT INTO test2(col1) values('b'); + INSERT INTO test2(col1) values('c'); + SELECT * FROM test t1 LEFT OUTER JOIN test2 t2 ON t1.col1 = t2.col1 + WHERE +t2.col1 IS NULL; + } +} {} +do_test where4-4.2 { + execsql { + SELECT * FROM test t1 LEFT OUTER JOIN test2 t2 ON t1.col1 = t2.col1 + WHERE t2.col1 IS NULL; + } +} {} +do_test where4-4.3 { + execsql { + SELECT * FROM test t1 LEFT OUTER JOIN test2 t2 ON t1.col1 = t2.col1 + WHERE +t1.col1 IS NULL; + } +} {} +do_test where4-4.4 { + execsql { + SELECT * FROM test t1 LEFT OUTER JOIN test2 t2 ON t1.col1 = t2.col1 + WHERE t1.col1 IS NULL; + } +} {} + +# Ticket #2273. Problems with IN operators and NULLs. +# +do_test where4-5.1 { + execsql { + CREATE TABLE t4(x,y,z,PRIMARY KEY(x,y)); + } + execsql { + SELECT * + FROM t2 LEFT JOIN t4 b1 + LEFT JOIN t4 b2 ON b2.x=b1.x AND b2.y IN (b1.y); + } +} {1 {} {} {} {} {} {} 2 {} {} {} {} {} {} 3 {} {} {} {} {} {}} +do_test where4-5.2 { + execsql { + INSERT INTO t4 VALUES(1,1,11); + INSERT INTO t4 VALUES(1,2,12); + INSERT INTO t4 VALUES(1,3,13); + INSERT INTO t4 VALUES(2,2,22); + SELECT rowid FROM t4 WHERE x IN (1,9,2,5) AND y IN (1,3,NULL,2) AND z!=13; + } +} {1 2 4} +do_test where4-5.3 { + execsql { + SELECT rowid FROM t4 WHERE x IN (1,9,NULL,2) AND y IN (1,3,2) AND z!=13; + } +} {1 2 4} +do_test where4-6.1 { + execsql { + CREATE TABLE t5(a,b,c,d,e,f,UNIQUE(a,b,c,d,e,f)); + INSERT INTO t5 VALUES(1,1,1,1,1,11111); + INSERT INTO t5 VALUES(2,2,2,2,2,22222); + INSERT INTO t5 VALUES(1,2,3,4,5,12345); + INSERT INTO t5 VALUES(2,3,4,5,6,23456); + } + execsql { + SELECT rowid FROM t5 + WHERE a IN (1,9,2) AND b=2 AND c IN (1,2,3,4) AND d>0 + } +} {3 2} +do_test where4-6.2 { + execsql { + SELECT rowid FROM t5 + WHERE a IN (1,NULL,2) AND b=2 AND c IN (1,2,3,4) AND d>0 + } +} {3 2} +do_test where4-7.1 { + execsql { + CREATE TABLE t6(y,z,PRIMARY KEY(y,z)); + } + execsql { + SELECT * FROM t6 WHERE y=NULL AND z IN ('hello'); + } +} {} + +integrity_check {where4-99.0} + +do_test where4-7.1 { + execsql { + BEGIN; + CREATE TABLE t8(a, b, c, d); + CREATE INDEX t8_i ON t8(a, b, c); + CREATE TABLE t7(i); + + INSERT INTO t7 VALUES(1); + INSERT INTO t7 SELECT i*2 FROM t7; + INSERT INTO t7 SELECT i*2 FROM t7; + INSERT INTO t7 SELECT i*2 FROM t7; + INSERT INTO t7 SELECT i*2 FROM t7; + INSERT INTO t7 SELECT i*2 FROM t7; + INSERT INTO t7 SELECT i*2 FROM t7; + + COMMIT; + } +} {} + +# At one point the sub-select inside the aggregate sum() function in the +# following query was leaking a couple of stack entries. This query +# runs the SELECT in a loop enough times that an assert() fails. Or rather, +# did fail before the bug was fixed. +# +do_test where4-7.2 { + execsql { + SELECT sum(( + SELECT d FROM t8 WHERE a = i AND b = i AND c < NULL + )) FROM t7; + } +} {{}} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/where5.test b/libraries/sqlite/unix/sqlite-3.5.1/test/where5.test new file mode 100644 index 0000000..760224c --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/where5.test @@ -0,0 +1,288 @@ +# 2007 June 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing NULL comparisons in the WHERE clause. +# See ticket #2404. +# +# $Id: where5.test,v 1.2 2007/06/08 08:43:10 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Build some test data +# +do_test where5-1.0 { + execsql { + CREATE TABLE t1(x TEXT); + CREATE TABLE t2(x INTEGER); + CREATE TABLE t3(x INTEGER PRIMARY KEY); + INSERT INTO t1 VALUES(-1); + INSERT INTO t1 VALUES(0); + INSERT INTO t1 VALUES(1); + INSERT INTO t2 SELECT * FROM t1; + INSERT INTO t3 SELECT * FROM t2; + } + execsql { + SELECT * FROM t1 WHERE x<0 + } +} {-1} +do_test where5-1.1 { + execsql { + SELECT * FROM t1 WHERE x<=0 + } +} {-1 0} +do_test where5-1.2 { + execsql { + SELECT * FROM t1 WHERE x=0 + } +} {0} +do_test where5-1.3 { + execsql { + SELECT * FROM t1 WHERE x>=0 + } +} {0 1} +do_test where5-1.4 { + execsql { + SELECT * FROM t1 WHERE x>0 + } +} {1} +do_test where5-1.5 { + execsql { + SELECT * FROM t1 WHERE x<>0 + } +} {-1 1} +do_test where5-1.6 { + execsql { + SELECT * FROM t1 WHERE x=NULL + } +} {} +do_test where5-1.10 { + execsql { + SELECT * FROM t1 WHERE x>NULL + } +} {} +do_test where5-1.11 { + execsql { + SELECT * FROM t1 WHERE x!=NULL + } +} {} +do_test where5-1.12 { + execsql { + SELECT * FROM t1 WHERE x IS NULL + } +} {} +do_test where5-1.13 { + execsql { + SELECT * FROM t1 WHERE x IS NOT NULL + } +} {-1 0 1} + + +do_test where5-2.0 { + execsql { + SELECT * FROM t2 WHERE x<0 + } +} {-1} +do_test where5-2.1 { + execsql { + SELECT * FROM t2 WHERE x<=0 + } +} {-1 0} +do_test where5-2.2 { + execsql { + SELECT * FROM t2 WHERE x=0 + } +} {0} +do_test where5-2.3 { + execsql { + SELECT * FROM t2 WHERE x>=0 + } +} {0 1} +do_test where5-2.4 { + execsql { + SELECT * FROM t2 WHERE x>0 + } +} {1} +do_test where5-2.5 { + execsql { + SELECT * FROM t2 WHERE x<>0 + } +} {-1 1} +do_test where5-2.6 { + execsql { + SELECT * FROM t2 WHERE x=NULL + } +} {} +do_test where5-2.10 { + execsql { + SELECT * FROM t2 WHERE x>NULL + } +} {} +do_test where5-2.11 { + execsql { + SELECT * FROM t2 WHERE x!=NULL + } +} {} +do_test where5-2.12 { + execsql { + SELECT * FROM t2 WHERE x IS NULL + } +} {} +do_test where5-2.13 { + execsql { + SELECT * FROM t2 WHERE x IS NOT NULL + } +} {-1 0 1} + + +do_test where5-3.0 { + execsql { + SELECT * FROM t3 WHERE x<0 + } +} {-1} +do_test where5-3.1 { + execsql { + SELECT * FROM t3 WHERE x<=0 + } +} {-1 0} +do_test where5-3.2 { + execsql { + SELECT * FROM t3 WHERE x=0 + } +} {0} +do_test where5-3.3 { + execsql { + SELECT * FROM t3 WHERE x>=0 + } +} {0 1} +do_test where5-3.4 { + execsql { + SELECT * FROM t3 WHERE x>0 + } +} {1} +do_test where5-3.5 { + execsql { + SELECT * FROM t3 WHERE x<>0 + } +} {-1 1} +do_test where5-3.6 { + execsql { + SELECT * FROM t3 WHERE x=NULL + } +} {} +do_test where5-3.10 { + execsql { + SELECT * FROM t3 WHERE x>NULL + } +} {} +do_test where5-3.11 { + execsql { + SELECT * FROM t3 WHERE x!=NULL + } +} {} +do_test where5-3.12 { + execsql { + SELECT * FROM t3 WHERE x IS NULL + } +} {} +do_test where5-3.13 { + execsql { + SELECT * FROM t3 WHERE x IS NOT NULL + } +} {-1 0 1} + +do_test where5-4.0 { + execsql { + SELECT xNULL FROM t3 + } +} {{} {} {}} +do_test where5-4.4 { + execsql { + SELECT x>=NULL FROM t3 + } +} {{} {} {}} +do_test where5-4.5 { + execsql { + SELECT x!=NULL FROM t3 + } +} {{} {} {}} +do_test where5-4.6 { + execsql { + SELECT x IS NULL FROM t3 + } +} {0 0 0} +do_test where5-4.7 { + execsql { + SELECT x IS NOT NULL FROM t3 + } +} {1 1 1} + +finish_test diff --git a/libraries/sqlite/unix/sqlite-3.5.1/test/zeroblob.test b/libraries/sqlite/unix/sqlite-3.5.1/test/zeroblob.test new file mode 100644 index 0000000..04a6f63 --- /dev/null +++ b/libraries/sqlite/unix/sqlite-3.5.1/test/zeroblob.test @@ -0,0 +1,213 @@ +# 2007 May 02 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing of the zero-filled blob functionality +# including the sqlite3_bind_zeroblob(), sqlite3_result_zeroblob(), +# and the built-in zeroblob() SQL function. +# +# $Id: zeroblob.test,v 1.10 2007/09/12 17:01:45 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !incrblob { + finish_test + return +} + +# When zeroblob() is used for the last field of a column, then the +# content of the zeroblob is never instantiated on the VDBE stack. +# But it does get inserted into the database correctly. +# +do_test zeroblob-1.1 { + execsql { + CREATE TABLE t1(a,b,c,d); + } + set ::sqlite3_max_blobsize 0 + execsql { + INSERT INTO t1 VALUES(2,3,4,zeroblob(10000)); + } + set ::sqlite3_max_blobsize +} {10} +do_test zeroblob-1.2 { + execsql { + SELECT length(d) FROM t1 + } +} {10000} + +# If a non-NULL column follows the zeroblob, then the content of +# the zeroblob must be instantiated. +# +do_test zeroblob-1.3 { + set ::sqlite3_max_blobsize 0 + execsql { + INSERT INTO t1 VALUES(3,4,zeroblob(10000),5); + } + set ::sqlite3_max_blobsize +} {10010} +do_test zeroblob-1.4 { + execsql { + SELECT length(c), length(d) FROM t1 + } +} {1 10000 10000 1} + +# Multiple zeroblobs can appear at the end of record. No instantiation +# of the blob content occurs on the stack. +# +do_test zeroblob-1.5 { + set ::sqlite3_max_blobsize 0 + execsql { + INSERT INTO t1 VALUES(4,5,zeroblob(10000),zeroblob(10000)); + } + set ::sqlite3_max_blobsize +} {11} +do_test zeroblob-1.6 { + execsql { + SELECT length(c), length(d) FROM t1 + } +} {1 10000 10000 1 10000 10000} + +# NULLs can follow the zeroblob() or be intermixed with zeroblobs and +# no instantiation of the zeroblobs occurs on the stack. +# +do_test zeroblob-1.7 { + set ::sqlite3_max_blobsize 0 + execsql { + INSERT INTO t1 VALUES(5,zeroblob(10000),NULL,zeroblob(10000)); + } + set ::sqlite3_max_blobsize +} {10} +do_test zeroblob-1.8 { + execsql { + SELECT length(b), length(d) FROM t1 WHERE a=5 + } +} {10000 10000} + +# Comparisons against zeroblobs work. +# +do_test zeroblob-2.1 { + execsql { + SELECT a FROM t1 WHERE b=zeroblob(10000) + } +} {5} + +# Comparisons against zeroblobs work even when indexed. +# +do_test zeroblob-2.2 { + execsql { + CREATE INDEX i1_1 ON t1(b); + SELECT a FROM t1 WHERE b=zeroblob(10000); + } +} {5} + +# DISTINCT works for zeroblobs +# +ifcapable bloblit&&subquery&&compound { + do_test zeroblob-3.1 { + execsql { + SELECT count(DISTINCT a) FROM ( + SELECT x'00000000000000000000' AS a + UNION ALL + SELECT zeroblob(10) AS a + ) + } + } {1} +} + +# Concatentation works with zeroblob +# +ifcapable bloblit { + do_test zeroblob-4.1 { + execsql { + SELECT hex(zeroblob(2) || x'61') + } + } {000061} +} + +# Check various CAST(...) operations on zeroblob. +# +do_test zeroblob-5.1 { + execsql { + SELECT CAST (zeroblob(100) AS REAL); + } +} {0.0} +do_test zeroblob-5.2 { + execsql { + SELECT CAST (zeroblob(100) AS INTEGER); + } +} {0} +do_test zeroblob-5.3 { + execsql { + SELECT CAST (zeroblob(100) AS TEXT); + } +} {{}} +do_test zeroblob-5.4 { + execsql { + SELECT CAST(zeroblob(100) AS BLOB); + } +} [execsql {SELECT zeroblob(100)}] + + +# Check for malicious use of zeroblob. Make sure nothing crashes. +# +do_test zeroblob-6.1.1 { + execsql {select zeroblob(-1)} +} {{}} +do_test zeroblob-6.1.2 { + execsql {select zeroblob(-10)} +} {{}} +do_test zeroblob-6.1.3 { + execsql {select zeroblob(-100)} +} {{}} +do_test zeroblob-6.2 { + execsql {select length(zeroblob(-1))} +} {0} +do_test zeroblob-6.3 { + execsql {select zeroblob(-1)|1} +} {1} +do_test zeroblob-6.4 { + catchsql {select length(zeroblob(2147483648))} +} {1 {string or blob too big}} +do_test zeroblob-6.5 { + catchsql {select zeroblob(2147483648)} +} {1 {string or blob too big}} +do_test zeroblob-6.6 { + execsql {select hex(zeroblob(-1))} +} {{}} +do_test zeroblob-6.7 { + execsql {select typeof(zeroblob(-1))} +} {blob} + +# Test bind_zeroblob() +# +do_test zeroblob-7.1 { + set ::STMT [sqlite3_prepare $::DB "SELECT length(?)" -1 DUMMY] + sqlite3_bind_zeroblob $::STMT 1 450 + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test zeroblob-7.2 { + sqlite3_column_int $::STMT 0 +} {450} +do_test zeroblob-7.3 { + sqlite3_finalize $::STMT +} {SQLITE_OK} + +# Test that MakeRecord can handle a value with some real content +# and a zero-blob tail. +# +do_test zeroblob-8.1 { + llength [execsql { + SELECT 'hello' AS a, zeroblob(10) as b from t1 ORDER BY a, b; + }] +} {8} + + +finish_test -- cgit v1.1