00001 /* fts2 has a design flaw which can lead to database corruption (see 00002 ** below). It is recommended not to use it any longer, instead use 00003 ** fts3 (or higher). If you believe that your use of fts2 is safe, 00004 ** add -DSQLITE_ENABLE_BROKEN_FTS2=1 to your CFLAGS. 00005 */ 00006 #if (!defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2)) \ 00007 && !defined(SQLITE_ENABLE_BROKEN_FTS2) 00008 #error fts2 has a design flaw and has been deprecated. 00009 #endif 00010 /* The flaw is that fts2 uses the content table's unaliased rowid as 00011 ** the unique docid. fts2 embeds the rowid in the index it builds, 00012 ** and expects the rowid to not change. The SQLite VACUUM operation 00013 ** will renumber such rowids, thereby breaking fts2. If you are using 00014 ** fts2 in a system which has disabled VACUUM, then you can continue 00015 ** to use it safely. Note that PRAGMA auto_vacuum does NOT disable 00016 ** VACUUM, though systems using auto_vacuum are unlikely to invoke 00017 ** VACUUM. 00018 ** 00019 ** Unlike fts1, which is safe across VACUUM if you never delete 00020 ** documents, fts2 has a second exposure to this flaw, in the segments 00021 ** table. So fts2 should be considered unsafe across VACUUM in all 00022 ** cases. 00023 */ 00024 00025 /* 00026 ** 2006 Oct 10 00027 ** 00028 ** The author disclaims copyright to this source code. In place of 00029 ** a legal notice, here is a blessing: 00030 ** 00031 ** May you do good and not evil. 00032 ** May you find forgiveness for yourself and forgive others. 00033 ** May you share freely, never taking more than you give. 00034 ** 00035 ****************************************************************************** 00036 ** 00037 ** This is an SQLite module implementing full-text search. 00038 */ 00039 00040 /* 00041 ** The code in this file is only compiled if: 00042 ** 00043 ** * The FTS2 module is being built as an extension 00044 ** (in which case SQLITE_CORE is not defined), or 00045 ** 00046 ** * The FTS2 module is being built into the core of 00047 ** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). 00048 */ 00049 00050 /* TODO(shess) Consider exporting this comment to an HTML file or the 00051 ** wiki. 00052 */ 00053 /* The full-text index is stored in a series of b+tree (-like) 00054 ** structures called segments which map terms to doclists. The 00055 ** structures are like b+trees in layout, but are constructed from the 00056 ** bottom up in optimal fashion and are not updatable. Since trees 00057 ** are built from the bottom up, things will be described from the 00058 ** bottom up. 00059 ** 00060 ** 00061 **** Varints **** 00062 ** The basic unit of encoding is a variable-length integer called a 00063 ** varint. We encode variable-length integers in little-endian order 00064 ** using seven bits * per byte as follows: 00065 ** 00066 ** KEY: 00067 ** A = 0xxxxxxx 7 bits of data and one flag bit 00068 ** B = 1xxxxxxx 7 bits of data and one flag bit 00069 ** 00070 ** 7 bits - A 00071 ** 14 bits - BA 00072 ** 21 bits - BBA 00073 ** and so on. 00074 ** 00075 ** This is identical to how sqlite encodes varints (see util.c). 00076 ** 00077 ** 00078 **** Document lists **** 00079 ** A doclist (document list) holds a docid-sorted list of hits for a 00080 ** given term. Doclists hold docids, and can optionally associate 00081 ** token positions and offsets with docids. 00082 ** 00083 ** A DL_POSITIONS_OFFSETS doclist is stored like this: 00084 ** 00085 ** array { 00086 ** varint docid; 00087 ** array { (position list for column 0) 00088 ** varint position; (delta from previous position plus POS_BASE) 00089 ** varint startOffset; (delta from previous startOffset) 00090 ** varint endOffset; (delta from startOffset) 00091 ** } 00092 ** array { 00093 ** varint POS_COLUMN; (marks start of position list for new column) 00094 ** varint column; (index of new column) 00095 ** array { 00096 ** varint position; (delta from previous position plus POS_BASE) 00097 ** varint startOffset;(delta from previous startOffset) 00098 ** varint endOffset; (delta from startOffset) 00099 ** } 00100 ** } 00101 ** varint POS_END; (marks end of positions for this document. 00102 ** } 00103 ** 00104 ** Here, array { X } means zero or more occurrences of X, adjacent in 00105 ** memory. A "position" is an index of a token in the token stream 00106 ** generated by the tokenizer, while an "offset" is a byte offset, 00107 ** both based at 0. Note that POS_END and POS_COLUMN occur in the 00108 ** same logical place as the position element, and act as sentinals 00109 ** ending a position list array. 00110 ** 00111 ** A DL_POSITIONS doclist omits the startOffset and endOffset 00112 ** information. A DL_DOCIDS doclist omits both the position and 00113 ** offset information, becoming an array of varint-encoded docids. 00114 ** 00115 ** On-disk data is stored as type DL_DEFAULT, so we don't serialize 00116 ** the type. Due to how deletion is implemented in the segmentation 00117 ** system, on-disk doclists MUST store at least positions. 00118 ** 00119 ** 00120 **** Segment leaf nodes **** 00121 ** Segment leaf nodes store terms and doclists, ordered by term. Leaf 00122 ** nodes are written using LeafWriter, and read using LeafReader (to 00123 ** iterate through a single leaf node's data) and LeavesReader (to 00124 ** iterate through a segment's entire leaf layer). Leaf nodes have 00125 ** the format: 00126 ** 00127 ** varint iHeight; (height from leaf level, always 0) 00128 ** varint nTerm; (length of first term) 00129 ** char pTerm[nTerm]; (content of first term) 00130 ** varint nDoclist; (length of term's associated doclist) 00131 ** char pDoclist[nDoclist]; (content of doclist) 00132 ** array { 00133 ** (further terms are delta-encoded) 00134 ** varint nPrefix; (length of prefix shared with previous term) 00135 ** varint nSuffix; (length of unshared suffix) 00136 ** char pTermSuffix[nSuffix];(unshared suffix of next term) 00137 ** varint nDoclist; (length of term's associated doclist) 00138 ** char pDoclist[nDoclist]; (content of doclist) 00139 ** } 00140 ** 00141 ** Here, array { X } means zero or more occurrences of X, adjacent in 00142 ** memory. 00143 ** 00144 ** Leaf nodes are broken into blocks which are stored contiguously in 00145 ** the %_segments table in sorted order. This means that when the end 00146 ** of a node is reached, the next term is in the node with the next 00147 ** greater node id. 00148 ** 00149 ** New data is spilled to a new leaf node when the current node 00150 ** exceeds LEAF_MAX bytes (default 2048). New data which itself is 00151 ** larger than STANDALONE_MIN (default 1024) is placed in a standalone 00152 ** node (a leaf node with a single term and doclist). The goal of 00153 ** these settings is to pack together groups of small doclists while 00154 ** making it efficient to directly access large doclists. The 00155 ** assumption is that large doclists represent terms which are more 00156 ** likely to be query targets. 00157 ** 00158 ** TODO(shess) It may be useful for blocking decisions to be more 00159 ** dynamic. For instance, it may make more sense to have a 2.5k leaf 00160 ** node rather than splitting into 2k and .5k nodes. My intuition is 00161 ** that this might extend through 2x or 4x the pagesize. 00162 ** 00163 ** 00164 **** Segment interior nodes **** 00165 ** Segment interior nodes store blockids for subtree nodes and terms 00166 ** to describe what data is stored by the each subtree. Interior 00167 ** nodes are written using InteriorWriter, and read using 00168 ** InteriorReader. InteriorWriters are created as needed when 00169 ** SegmentWriter creates new leaf nodes, or when an interior node 00170 ** itself grows too big and must be split. The format of interior 00171 ** nodes: 00172 ** 00173 ** varint iHeight; (height from leaf level, always >0) 00174 ** varint iBlockid; (block id of node's leftmost subtree) 00175 ** optional { 00176 ** varint nTerm; (length of first term) 00177 ** char pTerm[nTerm]; (content of first term) 00178 ** array { 00179 ** (further terms are delta-encoded) 00180 ** varint nPrefix; (length of shared prefix with previous term) 00181 ** varint nSuffix; (length of unshared suffix) 00182 ** char pTermSuffix[nSuffix]; (unshared suffix of next term) 00183 ** } 00184 ** } 00185 ** 00186 ** Here, optional { X } means an optional element, while array { X } 00187 ** means zero or more occurrences of X, adjacent in memory. 00188 ** 00189 ** An interior node encodes n terms separating n+1 subtrees. The 00190 ** subtree blocks are contiguous, so only the first subtree's blockid 00191 ** is encoded. The subtree at iBlockid will contain all terms less 00192 ** than the first term encoded (or all terms if no term is encoded). 00193 ** Otherwise, for terms greater than or equal to pTerm[i] but less 00194 ** than pTerm[i+1], the subtree for that term will be rooted at 00195 ** iBlockid+i. Interior nodes only store enough term data to 00196 ** distinguish adjacent children (if the rightmost term of the left 00197 ** child is "something", and the leftmost term of the right child is 00198 ** "wicked", only "w" is stored). 00199 ** 00200 ** New data is spilled to a new interior node at the same height when 00201 ** the current node exceeds INTERIOR_MAX bytes (default 2048). 00202 ** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing 00203 ** interior nodes and making the tree too skinny. The interior nodes 00204 ** at a given height are naturally tracked by interior nodes at 00205 ** height+1, and so on. 00206 ** 00207 ** 00208 **** Segment directory **** 00209 ** The segment directory in table %_segdir stores meta-information for 00210 ** merging and deleting segments, and also the root node of the 00211 ** segment's tree. 00212 ** 00213 ** The root node is the top node of the segment's tree after encoding 00214 ** the entire segment, restricted to ROOT_MAX bytes (default 1024). 00215 ** This could be either a leaf node or an interior node. If the top 00216 ** node requires more than ROOT_MAX bytes, it is flushed to %_segments 00217 ** and a new root interior node is generated (which should always fit 00218 ** within ROOT_MAX because it only needs space for 2 varints, the 00219 ** height and the blockid of the previous root). 00220 ** 00221 ** The meta-information in the segment directory is: 00222 ** level - segment level (see below) 00223 ** idx - index within level 00224 ** - (level,idx uniquely identify a segment) 00225 ** start_block - first leaf node 00226 ** leaves_end_block - last leaf node 00227 ** end_block - last block (including interior nodes) 00228 ** root - contents of root node 00229 ** 00230 ** If the root node is a leaf node, then start_block, 00231 ** leaves_end_block, and end_block are all 0. 00232 ** 00233 ** 00234 **** Segment merging **** 00235 ** To amortize update costs, segments are groups into levels and 00236 ** merged in matches. Each increase in level represents exponentially 00237 ** more documents. 00238 ** 00239 ** New documents (actually, document updates) are tokenized and 00240 ** written individually (using LeafWriter) to a level 0 segment, with 00241 ** incrementing idx. When idx reaches MERGE_COUNT (default 16), all 00242 ** level 0 segments are merged into a single level 1 segment. Level 1 00243 ** is populated like level 0, and eventually MERGE_COUNT level 1 00244 ** segments are merged to a single level 2 segment (representing 00245 ** MERGE_COUNT^2 updates), and so on. 00246 ** 00247 ** A segment merge traverses all segments at a given level in 00248 ** parallel, performing a straightforward sorted merge. Since segment 00249 ** leaf nodes are written in to the %_segments table in order, this 00250 ** merge traverses the underlying sqlite disk structures efficiently. 00251 ** After the merge, all segment blocks from the merged level are 00252 ** deleted. 00253 ** 00254 ** MERGE_COUNT controls how often we merge segments. 16 seems to be 00255 ** somewhat of a sweet spot for insertion performance. 32 and 64 show 00256 ** very similar performance numbers to 16 on insertion, though they're 00257 ** a tiny bit slower (perhaps due to more overhead in merge-time 00258 ** sorting). 8 is about 20% slower than 16, 4 about 50% slower than 00259 ** 16, 2 about 66% slower than 16. 00260 ** 00261 ** At query time, high MERGE_COUNT increases the number of segments 00262 ** which need to be scanned and merged. For instance, with 100k docs 00263 ** inserted: 00264 ** 00265 ** MERGE_COUNT segments 00266 ** 16 25 00267 ** 8 12 00268 ** 4 10 00269 ** 2 6 00270 ** 00271 ** This appears to have only a moderate impact on queries for very 00272 ** frequent terms (which are somewhat dominated by segment merge 00273 ** costs), and infrequent and non-existent terms still seem to be fast 00274 ** even with many segments. 00275 ** 00276 ** TODO(shess) That said, it would be nice to have a better query-side 00277 ** argument for MERGE_COUNT of 16. Also, it is possible/likely that 00278 ** optimizations to things like doclist merging will swing the sweet 00279 ** spot around. 00280 ** 00281 ** 00282 ** 00283 **** Handling of deletions and updates **** 00284 ** Since we're using a segmented structure, with no docid-oriented 00285 ** index into the term index, we clearly cannot simply update the term 00286 ** index when a document is deleted or updated. For deletions, we 00287 ** write an empty doclist (varint(docid) varint(POS_END)), for updates 00288 ** we simply write the new doclist. Segment merges overwrite older 00289 ** data for a particular docid with newer data, so deletes or updates 00290 ** will eventually overtake the earlier data and knock it out. The 00291 ** query logic likewise merges doclists so that newer data knocks out 00292 ** older data. 00293 ** 00294 ** TODO(shess) Provide a VACUUM type operation to clear out all 00295 ** deletions and duplications. This would basically be a forced merge 00296 ** into a single segment. 00297 */ 00298 00299 #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) 00300 00301 #if defined(SQLITE_ENABLE_FTS2) && !defined(SQLITE_CORE) 00302 # define SQLITE_CORE 1 00303 #endif 00304 00305 #include <assert.h> 00306 #include <stdlib.h> 00307 #include <stdio.h> 00308 #include <string.h> 00309 #include <ctype.h> 00310 00311 #include "fts2.h" 00312 #include "fts2_hash.h" 00313 #include "fts2_tokenizer.h" 00314 #include "sqlite3.h" 00315 #include "sqlite3ext.h" 00316 SQLITE_EXTENSION_INIT1 00317 00318 00319 /* TODO(shess) MAN, this thing needs some refactoring. At minimum, it 00320 ** would be nice to order the file better, perhaps something along the 00321 ** lines of: 00322 ** 00323 ** - utility functions 00324 ** - table setup functions 00325 ** - table update functions 00326 ** - table query functions 00327 ** 00328 ** Put the query functions last because they're likely to reference 00329 ** typedefs or functions from the table update section. 00330 */ 00331 00332 #if 0 00333 # define TRACE(A) printf A; fflush(stdout) 00334 #else 00335 # define TRACE(A) 00336 #endif 00337 00338 /* It is not safe to call isspace(), tolower(), or isalnum() on 00339 ** hi-bit-set characters. This is the same solution used in the 00340 ** tokenizer. 00341 */ 00342 /* TODO(shess) The snippet-generation code should be using the 00343 ** tokenizer-generated tokens rather than doing its own local 00344 ** tokenization. 00345 */ 00346 /* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ 00347 static int safe_isspace(char c){ 00348 return (c&0x80)==0 ? isspace(c) : 0; 00349 } 00350 static int safe_tolower(char c){ 00351 return (c&0x80)==0 ? tolower(c) : c; 00352 } 00353 static int safe_isalnum(char c){ 00354 return (c&0x80)==0 ? isalnum(c) : 0; 00355 } 00356 00357 typedef enum DocListType { 00358 DL_DOCIDS, /* docids only */ 00359 DL_POSITIONS, /* docids + positions */ 00360 DL_POSITIONS_OFFSETS /* docids + positions + offsets */ 00361 } DocListType; 00362 00363 /* 00364 ** By default, only positions and not offsets are stored in the doclists. 00365 ** To change this so that offsets are stored too, compile with 00366 ** 00367 ** -DDL_DEFAULT=DL_POSITIONS_OFFSETS 00368 ** 00369 ** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted 00370 ** into (no deletes or updates). 00371 */ 00372 #ifndef DL_DEFAULT 00373 # define DL_DEFAULT DL_POSITIONS 00374 #endif 00375 00376 enum { 00377 POS_END = 0, /* end of this position list */ 00378 POS_COLUMN, /* followed by new column number */ 00379 POS_BASE 00380 }; 00381 00382 /* MERGE_COUNT controls how often we merge segments (see comment at 00383 ** top of file). 00384 */ 00385 #define MERGE_COUNT 16 00386 00387 /* utility functions */ 00388 00389 /* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single 00390 ** record to prevent errors of the form: 00391 ** 00392 ** my_function(SomeType *b){ 00393 ** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) 00394 ** } 00395 */ 00396 /* TODO(shess) Obvious candidates for a header file. */ 00397 #define CLEAR(b) memset(b, '\0', sizeof(*(b))) 00398 00399 #ifndef NDEBUG 00400 # define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) 00401 #else 00402 # define SCRAMBLE(b) 00403 #endif 00404 00405 /* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ 00406 #define VARINT_MAX 10 00407 00408 /* Write a 64-bit variable-length integer to memory starting at p[0]. 00409 * The length of data written will be between 1 and VARINT_MAX bytes. 00410 * The number of bytes written is returned. */ 00411 static int putVarint(char *p, sqlite_int64 v){ 00412 unsigned char *q = (unsigned char *) p; 00413 sqlite_uint64 vu = v; 00414 do{ 00415 *q++ = (unsigned char) ((vu & 0x7f) | 0x80); 00416 vu >>= 7; 00417 }while( vu!=0 ); 00418 q[-1] &= 0x7f; /* turn off high bit in final byte */ 00419 assert( q - (unsigned char *)p <= VARINT_MAX ); 00420 return (int) (q - (unsigned char *)p); 00421 } 00422 00423 /* Read a 64-bit variable-length integer from memory starting at p[0]. 00424 * Return the number of bytes read, or 0 on error. 00425 * The value is stored in *v. */ 00426 static int getVarint(const char *p, sqlite_int64 *v){ 00427 const unsigned char *q = (const unsigned char *) p; 00428 sqlite_uint64 x = 0, y = 1; 00429 while( (*q & 0x80) == 0x80 ){ 00430 x += y * (*q++ & 0x7f); 00431 y <<= 7; 00432 if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ 00433 assert( 0 ); 00434 return 0; 00435 } 00436 } 00437 x += y * (*q++); 00438 *v = (sqlite_int64) x; 00439 return (int) (q - (unsigned char *)p); 00440 } 00441 00442 static int getVarint32(const char *p, int *pi){ 00443 sqlite_int64 i; 00444 int ret = getVarint(p, &i); 00445 *pi = (int) i; 00446 assert( *pi==i ); 00447 return ret; 00448 } 00449 00450 /*******************************************************************/ 00451 /* DataBuffer is used to collect data into a buffer in piecemeal 00452 ** fashion. It implements the usual distinction between amount of 00453 ** data currently stored (nData) and buffer capacity (nCapacity). 00454 ** 00455 ** dataBufferInit - create a buffer with given initial capacity. 00456 ** dataBufferReset - forget buffer's data, retaining capacity. 00457 ** dataBufferDestroy - free buffer's data. 00458 ** dataBufferSwap - swap contents of two buffers. 00459 ** dataBufferExpand - expand capacity without adding data. 00460 ** dataBufferAppend - append data. 00461 ** dataBufferAppend2 - append two pieces of data at once. 00462 ** dataBufferReplace - replace buffer's data. 00463 */ 00464 typedef struct DataBuffer { 00465 char *pData; /* Pointer to malloc'ed buffer. */ 00466 int nCapacity; /* Size of pData buffer. */ 00467 int nData; /* End of data loaded into pData. */ 00468 } DataBuffer; 00469 00470 static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ 00471 assert( nCapacity>=0 ); 00472 pBuffer->nData = 0; 00473 pBuffer->nCapacity = nCapacity; 00474 pBuffer->pData = nCapacity==0 ? NULL : sqlite3_malloc(nCapacity); 00475 } 00476 static void dataBufferReset(DataBuffer *pBuffer){ 00477 pBuffer->nData = 0; 00478 } 00479 static void dataBufferDestroy(DataBuffer *pBuffer){ 00480 if( pBuffer->pData!=NULL ) sqlite3_free(pBuffer->pData); 00481 SCRAMBLE(pBuffer); 00482 } 00483 static void dataBufferSwap(DataBuffer *pBuffer1, DataBuffer *pBuffer2){ 00484 DataBuffer tmp = *pBuffer1; 00485 *pBuffer1 = *pBuffer2; 00486 *pBuffer2 = tmp; 00487 } 00488 static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ 00489 assert( nAddCapacity>0 ); 00490 /* TODO(shess) Consider expanding more aggressively. Note that the 00491 ** underlying malloc implementation may take care of such things for 00492 ** us already. 00493 */ 00494 if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ 00495 pBuffer->nCapacity = pBuffer->nData+nAddCapacity; 00496 pBuffer->pData = sqlite3_realloc(pBuffer->pData, pBuffer->nCapacity); 00497 } 00498 } 00499 static void dataBufferAppend(DataBuffer *pBuffer, 00500 const char *pSource, int nSource){ 00501 assert( nSource>0 && pSource!=NULL ); 00502 dataBufferExpand(pBuffer, nSource); 00503 memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); 00504 pBuffer->nData += nSource; 00505 } 00506 static void dataBufferAppend2(DataBuffer *pBuffer, 00507 const char *pSource1, int nSource1, 00508 const char *pSource2, int nSource2){ 00509 assert( nSource1>0 && pSource1!=NULL ); 00510 assert( nSource2>0 && pSource2!=NULL ); 00511 dataBufferExpand(pBuffer, nSource1+nSource2); 00512 memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); 00513 memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); 00514 pBuffer->nData += nSource1+nSource2; 00515 } 00516 static void dataBufferReplace(DataBuffer *pBuffer, 00517 const char *pSource, int nSource){ 00518 dataBufferReset(pBuffer); 00519 dataBufferAppend(pBuffer, pSource, nSource); 00520 } 00521 00522 /* StringBuffer is a null-terminated version of DataBuffer. */ 00523 typedef struct StringBuffer { 00524 DataBuffer b; /* Includes null terminator. */ 00525 } StringBuffer; 00526 00527 static void initStringBuffer(StringBuffer *sb){ 00528 dataBufferInit(&sb->b, 100); 00529 dataBufferReplace(&sb->b, "", 1); 00530 } 00531 static int stringBufferLength(StringBuffer *sb){ 00532 return sb->b.nData-1; 00533 } 00534 static char *stringBufferData(StringBuffer *sb){ 00535 return sb->b.pData; 00536 } 00537 static void stringBufferDestroy(StringBuffer *sb){ 00538 dataBufferDestroy(&sb->b); 00539 } 00540 00541 static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ 00542 assert( sb->b.nData>0 ); 00543 if( nFrom>0 ){ 00544 sb->b.nData--; 00545 dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); 00546 } 00547 } 00548 static void append(StringBuffer *sb, const char *zFrom){ 00549 nappend(sb, zFrom, strlen(zFrom)); 00550 } 00551 00552 /* Append a list of strings separated by commas. */ 00553 static void appendList(StringBuffer *sb, int nString, char **azString){ 00554 int i; 00555 for(i=0; i<nString; ++i){ 00556 if( i>0 ) append(sb, ", "); 00557 append(sb, azString[i]); 00558 } 00559 } 00560 00561 static int endsInWhiteSpace(StringBuffer *p){ 00562 return stringBufferLength(p)>0 && 00563 safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); 00564 } 00565 00566 /* If the StringBuffer ends in something other than white space, add a 00567 ** single space character to the end. 00568 */ 00569 static void appendWhiteSpace(StringBuffer *p){ 00570 if( stringBufferLength(p)==0 ) return; 00571 if( !endsInWhiteSpace(p) ) append(p, " "); 00572 } 00573 00574 /* Remove white space from the end of the StringBuffer */ 00575 static void trimWhiteSpace(StringBuffer *p){ 00576 while( endsInWhiteSpace(p) ){ 00577 p->b.pData[--p->b.nData-1] = '\0'; 00578 } 00579 } 00580 00581 /*******************************************************************/ 00582 /* DLReader is used to read document elements from a doclist. The 00583 ** current docid is cached, so dlrDocid() is fast. DLReader does not 00584 ** own the doclist buffer. 00585 ** 00586 ** dlrAtEnd - true if there's no more data to read. 00587 ** dlrDocid - docid of current document. 00588 ** dlrDocData - doclist data for current document (including docid). 00589 ** dlrDocDataBytes - length of same. 00590 ** dlrAllDataBytes - length of all remaining data. 00591 ** dlrPosData - position data for current document. 00592 ** dlrPosDataLen - length of pos data for current document (incl POS_END). 00593 ** dlrStep - step to current document. 00594 ** dlrInit - initial for doclist of given type against given data. 00595 ** dlrDestroy - clean up. 00596 ** 00597 ** Expected usage is something like: 00598 ** 00599 ** DLReader reader; 00600 ** dlrInit(&reader, pData, nData); 00601 ** while( !dlrAtEnd(&reader) ){ 00602 ** // calls to dlrDocid() and kin. 00603 ** dlrStep(&reader); 00604 ** } 00605 ** dlrDestroy(&reader); 00606 */ 00607 typedef struct DLReader { 00608 DocListType iType; 00609 const char *pData; 00610 int nData; 00611 00612 sqlite_int64 iDocid; 00613 int nElement; 00614 } DLReader; 00615 00616 static int dlrAtEnd(DLReader *pReader){ 00617 assert( pReader->nData>=0 ); 00618 return pReader->nData==0; 00619 } 00620 static sqlite_int64 dlrDocid(DLReader *pReader){ 00621 assert( !dlrAtEnd(pReader) ); 00622 return pReader->iDocid; 00623 } 00624 static const char *dlrDocData(DLReader *pReader){ 00625 assert( !dlrAtEnd(pReader) ); 00626 return pReader->pData; 00627 } 00628 static int dlrDocDataBytes(DLReader *pReader){ 00629 assert( !dlrAtEnd(pReader) ); 00630 return pReader->nElement; 00631 } 00632 static int dlrAllDataBytes(DLReader *pReader){ 00633 assert( !dlrAtEnd(pReader) ); 00634 return pReader->nData; 00635 } 00636 /* TODO(shess) Consider adding a field to track iDocid varint length 00637 ** to make these two functions faster. This might matter (a tiny bit) 00638 ** for queries. 00639 */ 00640 static const char *dlrPosData(DLReader *pReader){ 00641 sqlite_int64 iDummy; 00642 int n = getVarint(pReader->pData, &iDummy); 00643 assert( !dlrAtEnd(pReader) ); 00644 return pReader->pData+n; 00645 } 00646 static int dlrPosDataLen(DLReader *pReader){ 00647 sqlite_int64 iDummy; 00648 int n = getVarint(pReader->pData, &iDummy); 00649 assert( !dlrAtEnd(pReader) ); 00650 return pReader->nElement-n; 00651 } 00652 static void dlrStep(DLReader *pReader){ 00653 assert( !dlrAtEnd(pReader) ); 00654 00655 /* Skip past current doclist element. */ 00656 assert( pReader->nElement<=pReader->nData ); 00657 pReader->pData += pReader->nElement; 00658 pReader->nData -= pReader->nElement; 00659 00660 /* If there is more data, read the next doclist element. */ 00661 if( pReader->nData!=0 ){ 00662 sqlite_int64 iDocidDelta; 00663 int iDummy, n = getVarint(pReader->pData, &iDocidDelta); 00664 pReader->iDocid += iDocidDelta; 00665 if( pReader->iType>=DL_POSITIONS ){ 00666 assert( n<pReader->nData ); 00667 while( 1 ){ 00668 n += getVarint32(pReader->pData+n, &iDummy); 00669 assert( n<=pReader->nData ); 00670 if( iDummy==POS_END ) break; 00671 if( iDummy==POS_COLUMN ){ 00672 n += getVarint32(pReader->pData+n, &iDummy); 00673 assert( n<pReader->nData ); 00674 }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ 00675 n += getVarint32(pReader->pData+n, &iDummy); 00676 n += getVarint32(pReader->pData+n, &iDummy); 00677 assert( n<pReader->nData ); 00678 } 00679 } 00680 } 00681 pReader->nElement = n; 00682 assert( pReader->nElement<=pReader->nData ); 00683 } 00684 } 00685 static void dlrInit(DLReader *pReader, DocListType iType, 00686 const char *pData, int nData){ 00687 assert( pData!=NULL && nData!=0 ); 00688 pReader->iType = iType; 00689 pReader->pData = pData; 00690 pReader->nData = nData; 00691 pReader->nElement = 0; 00692 pReader->iDocid = 0; 00693 00694 /* Load the first element's data. There must be a first element. */ 00695 dlrStep(pReader); 00696 } 00697 static void dlrDestroy(DLReader *pReader){ 00698 SCRAMBLE(pReader); 00699 } 00700 00701 #ifndef NDEBUG 00702 /* Verify that the doclist can be validly decoded. Also returns the 00703 ** last docid found because it is convenient in other assertions for 00704 ** DLWriter. 00705 */ 00706 static void docListValidate(DocListType iType, const char *pData, int nData, 00707 sqlite_int64 *pLastDocid){ 00708 sqlite_int64 iPrevDocid = 0; 00709 assert( nData>0 ); 00710 assert( pData!=0 ); 00711 assert( pData+nData>pData ); 00712 while( nData!=0 ){ 00713 sqlite_int64 iDocidDelta; 00714 int n = getVarint(pData, &iDocidDelta); 00715 iPrevDocid += iDocidDelta; 00716 if( iType>DL_DOCIDS ){ 00717 int iDummy; 00718 while( 1 ){ 00719 n += getVarint32(pData+n, &iDummy); 00720 if( iDummy==POS_END ) break; 00721 if( iDummy==POS_COLUMN ){ 00722 n += getVarint32(pData+n, &iDummy); 00723 }else if( iType>DL_POSITIONS ){ 00724 n += getVarint32(pData+n, &iDummy); 00725 n += getVarint32(pData+n, &iDummy); 00726 } 00727 assert( n<=nData ); 00728 } 00729 } 00730 assert( n<=nData ); 00731 pData += n; 00732 nData -= n; 00733 } 00734 if( pLastDocid ) *pLastDocid = iPrevDocid; 00735 } 00736 #define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) 00737 #else 00738 #define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) 00739 #endif 00740 00741 /*******************************************************************/ 00742 /* DLWriter is used to write doclist data to a DataBuffer. DLWriter 00743 ** always appends to the buffer and does not own it. 00744 ** 00745 ** dlwInit - initialize to write a given type doclistto a buffer. 00746 ** dlwDestroy - clear the writer's memory. Does not free buffer. 00747 ** dlwAppend - append raw doclist data to buffer. 00748 ** dlwCopy - copy next doclist from reader to writer. 00749 ** dlwAdd - construct doclist element and append to buffer. 00750 ** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). 00751 */ 00752 typedef struct DLWriter { 00753 DocListType iType; 00754 DataBuffer *b; 00755 sqlite_int64 iPrevDocid; 00756 #ifndef NDEBUG 00757 int has_iPrevDocid; 00758 #endif 00759 } DLWriter; 00760 00761 static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ 00762 pWriter->b = b; 00763 pWriter->iType = iType; 00764 pWriter->iPrevDocid = 0; 00765 #ifndef NDEBUG 00766 pWriter->has_iPrevDocid = 0; 00767 #endif 00768 } 00769 static void dlwDestroy(DLWriter *pWriter){ 00770 SCRAMBLE(pWriter); 00771 } 00772 /* iFirstDocid is the first docid in the doclist in pData. It is 00773 ** needed because pData may point within a larger doclist, in which 00774 ** case the first item would be delta-encoded. 00775 ** 00776 ** iLastDocid is the final docid in the doclist in pData. It is 00777 ** needed to create the new iPrevDocid for future delta-encoding. The 00778 ** code could decode the passed doclist to recreate iLastDocid, but 00779 ** the only current user (docListMerge) already has decoded this 00780 ** information. 00781 */ 00782 /* TODO(shess) This has become just a helper for docListMerge. 00783 ** Consider a refactor to make this cleaner. 00784 */ 00785 static void dlwAppend(DLWriter *pWriter, 00786 const char *pData, int nData, 00787 sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ 00788 sqlite_int64 iDocid = 0; 00789 char c[VARINT_MAX]; 00790 int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ 00791 #ifndef NDEBUG 00792 sqlite_int64 iLastDocidDelta; 00793 #endif 00794 00795 /* Recode the initial docid as delta from iPrevDocid. */ 00796 nFirstOld = getVarint(pData, &iDocid); 00797 assert( nFirstOld<nData || (nFirstOld==nData && pWriter->iType==DL_DOCIDS) ); 00798 nFirstNew = putVarint(c, iFirstDocid-pWriter->iPrevDocid); 00799 00800 /* Verify that the incoming doclist is valid AND that it ends with 00801 ** the expected docid. This is essential because we'll trust this 00802 ** docid in future delta-encoding. 00803 */ 00804 ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); 00805 assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); 00806 00807 /* Append recoded initial docid and everything else. Rest of docids 00808 ** should have been delta-encoded from previous initial docid. 00809 */ 00810 if( nFirstOld<nData ){ 00811 dataBufferAppend2(pWriter->b, c, nFirstNew, 00812 pData+nFirstOld, nData-nFirstOld); 00813 }else{ 00814 dataBufferAppend(pWriter->b, c, nFirstNew); 00815 } 00816 pWriter->iPrevDocid = iLastDocid; 00817 } 00818 static void dlwCopy(DLWriter *pWriter, DLReader *pReader){ 00819 dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), 00820 dlrDocid(pReader), dlrDocid(pReader)); 00821 } 00822 static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ 00823 char c[VARINT_MAX]; 00824 int n = putVarint(c, iDocid-pWriter->iPrevDocid); 00825 00826 /* Docids must ascend. */ 00827 assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); 00828 assert( pWriter->iType==DL_DOCIDS ); 00829 00830 dataBufferAppend(pWriter->b, c, n); 00831 pWriter->iPrevDocid = iDocid; 00832 #ifndef NDEBUG 00833 pWriter->has_iPrevDocid = 1; 00834 #endif 00835 } 00836 00837 /*******************************************************************/ 00838 /* PLReader is used to read data from a document's position list. As 00839 ** the caller steps through the list, data is cached so that varints 00840 ** only need to be decoded once. 00841 ** 00842 ** plrInit, plrDestroy - create/destroy a reader. 00843 ** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors 00844 ** plrAtEnd - at end of stream, only call plrDestroy once true. 00845 ** plrStep - step to the next element. 00846 */ 00847 typedef struct PLReader { 00848 /* These refer to the next position's data. nData will reach 0 when 00849 ** reading the last position, so plrStep() signals EOF by setting 00850 ** pData to NULL. 00851 */ 00852 const char *pData; 00853 int nData; 00854 00855 DocListType iType; 00856 int iColumn; /* the last column read */ 00857 int iPosition; /* the last position read */ 00858 int iStartOffset; /* the last start offset read */ 00859 int iEndOffset; /* the last end offset read */ 00860 } PLReader; 00861 00862 static int plrAtEnd(PLReader *pReader){ 00863 return pReader->pData==NULL; 00864 } 00865 static int plrColumn(PLReader *pReader){ 00866 assert( !plrAtEnd(pReader) ); 00867 return pReader->iColumn; 00868 } 00869 static int plrPosition(PLReader *pReader){ 00870 assert( !plrAtEnd(pReader) ); 00871 return pReader->iPosition; 00872 } 00873 static int plrStartOffset(PLReader *pReader){ 00874 assert( !plrAtEnd(pReader) ); 00875 return pReader->iStartOffset; 00876 } 00877 static int plrEndOffset(PLReader *pReader){ 00878 assert( !plrAtEnd(pReader) ); 00879 return pReader->iEndOffset; 00880 } 00881 static void plrStep(PLReader *pReader){ 00882 int i, n; 00883 00884 assert( !plrAtEnd(pReader) ); 00885 00886 if( pReader->nData==0 ){ 00887 pReader->pData = NULL; 00888 return; 00889 } 00890 00891 n = getVarint32(pReader->pData, &i); 00892 if( i==POS_COLUMN ){ 00893 n += getVarint32(pReader->pData+n, &pReader->iColumn); 00894 pReader->iPosition = 0; 00895 pReader->iStartOffset = 0; 00896 n += getVarint32(pReader->pData+n, &i); 00897 } 00898 /* Should never see adjacent column changes. */ 00899 assert( i!=POS_COLUMN ); 00900 00901 if( i==POS_END ){ 00902 pReader->nData = 0; 00903 pReader->pData = NULL; 00904 return; 00905 } 00906 00907 pReader->iPosition += i-POS_BASE; 00908 if( pReader->iType==DL_POSITIONS_OFFSETS ){ 00909 n += getVarint32(pReader->pData+n, &i); 00910 pReader->iStartOffset += i; 00911 n += getVarint32(pReader->pData+n, &i); 00912 pReader->iEndOffset = pReader->iStartOffset+i; 00913 } 00914 assert( n<=pReader->nData ); 00915 pReader->pData += n; 00916 pReader->nData -= n; 00917 } 00918 00919 static void plrInit(PLReader *pReader, DLReader *pDLReader){ 00920 pReader->pData = dlrPosData(pDLReader); 00921 pReader->nData = dlrPosDataLen(pDLReader); 00922 pReader->iType = pDLReader->iType; 00923 pReader->iColumn = 0; 00924 pReader->iPosition = 0; 00925 pReader->iStartOffset = 0; 00926 pReader->iEndOffset = 0; 00927 plrStep(pReader); 00928 } 00929 static void plrDestroy(PLReader *pReader){ 00930 SCRAMBLE(pReader); 00931 } 00932 00933 /*******************************************************************/ 00934 /* PLWriter is used in constructing a document's position list. As a 00935 ** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. 00936 ** PLWriter writes to the associated DLWriter's buffer. 00937 ** 00938 ** plwInit - init for writing a document's poslist. 00939 ** plwDestroy - clear a writer. 00940 ** plwAdd - append position and offset information. 00941 ** plwCopy - copy next position's data from reader to writer. 00942 ** plwTerminate - add any necessary doclist terminator. 00943 ** 00944 ** Calling plwAdd() after plwTerminate() may result in a corrupt 00945 ** doclist. 00946 */ 00947 /* TODO(shess) Until we've written the second item, we can cache the 00948 ** first item's information. Then we'd have three states: 00949 ** 00950 ** - initialized with docid, no positions. 00951 ** - docid and one position. 00952 ** - docid and multiple positions. 00953 ** 00954 ** Only the last state needs to actually write to dlw->b, which would 00955 ** be an improvement in the DLCollector case. 00956 */ 00957 typedef struct PLWriter { 00958 DLWriter *dlw; 00959 00960 int iColumn; /* the last column written */ 00961 int iPos; /* the last position written */ 00962 int iOffset; /* the last start offset written */ 00963 } PLWriter; 00964 00965 /* TODO(shess) In the case where the parent is reading these values 00966 ** from a PLReader, we could optimize to a copy if that PLReader has 00967 ** the same type as pWriter. 00968 */ 00969 static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, 00970 int iStartOffset, int iEndOffset){ 00971 /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, 00972 ** iStartOffsetDelta, and iEndOffsetDelta. 00973 */ 00974 char c[5*VARINT_MAX]; 00975 int n = 0; 00976 00977 /* Ban plwAdd() after plwTerminate(). */ 00978 assert( pWriter->iPos!=-1 ); 00979 00980 if( pWriter->dlw->iType==DL_DOCIDS ) return; 00981 00982 if( iColumn!=pWriter->iColumn ){ 00983 n += putVarint(c+n, POS_COLUMN); 00984 n += putVarint(c+n, iColumn); 00985 pWriter->iColumn = iColumn; 00986 pWriter->iPos = 0; 00987 pWriter->iOffset = 0; 00988 } 00989 assert( iPos>=pWriter->iPos ); 00990 n += putVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); 00991 pWriter->iPos = iPos; 00992 if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ 00993 assert( iStartOffset>=pWriter->iOffset ); 00994 n += putVarint(c+n, iStartOffset-pWriter->iOffset); 00995 pWriter->iOffset = iStartOffset; 00996 assert( iEndOffset>=iStartOffset ); 00997 n += putVarint(c+n, iEndOffset-iStartOffset); 00998 } 00999 dataBufferAppend(pWriter->dlw->b, c, n); 01000 } 01001 static void plwCopy(PLWriter *pWriter, PLReader *pReader){ 01002 plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), 01003 plrStartOffset(pReader), plrEndOffset(pReader)); 01004 } 01005 static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ 01006 char c[VARINT_MAX]; 01007 int n; 01008 01009 pWriter->dlw = dlw; 01010 01011 /* Docids must ascend. */ 01012 assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); 01013 n = putVarint(c, iDocid-pWriter->dlw->iPrevDocid); 01014 dataBufferAppend(pWriter->dlw->b, c, n); 01015 pWriter->dlw->iPrevDocid = iDocid; 01016 #ifndef NDEBUG 01017 pWriter->dlw->has_iPrevDocid = 1; 01018 #endif 01019 01020 pWriter->iColumn = 0; 01021 pWriter->iPos = 0; 01022 pWriter->iOffset = 0; 01023 } 01024 /* TODO(shess) Should plwDestroy() also terminate the doclist? But 01025 ** then plwDestroy() would no longer be just a destructor, it would 01026 ** also be doing work, which isn't consistent with the overall idiom. 01027 ** Another option would be for plwAdd() to always append any necessary 01028 ** terminator, so that the output is always correct. But that would 01029 ** add incremental work to the common case with the only benefit being 01030 ** API elegance. Punt for now. 01031 */ 01032 static void plwTerminate(PLWriter *pWriter){ 01033 if( pWriter->dlw->iType>DL_DOCIDS ){ 01034 char c[VARINT_MAX]; 01035 int n = putVarint(c, POS_END); 01036 dataBufferAppend(pWriter->dlw->b, c, n); 01037 } 01038 #ifndef NDEBUG 01039 /* Mark as terminated for assert in plwAdd(). */ 01040 pWriter->iPos = -1; 01041 #endif 01042 } 01043 static void plwDestroy(PLWriter *pWriter){ 01044 SCRAMBLE(pWriter); 01045 } 01046 01047 /*******************************************************************/ 01048 /* DLCollector wraps PLWriter and DLWriter to provide a 01049 ** dynamically-allocated doclist area to use during tokenization. 01050 ** 01051 ** dlcNew - malloc up and initialize a collector. 01052 ** dlcDelete - destroy a collector and all contained items. 01053 ** dlcAddPos - append position and offset information. 01054 ** dlcAddDoclist - add the collected doclist to the given buffer. 01055 ** dlcNext - terminate the current document and open another. 01056 */ 01057 typedef struct DLCollector { 01058 DataBuffer b; 01059 DLWriter dlw; 01060 PLWriter plw; 01061 } DLCollector; 01062 01063 /* TODO(shess) This could also be done by calling plwTerminate() and 01064 ** dataBufferAppend(). I tried that, expecting nominal performance 01065 ** differences, but it seemed to pretty reliably be worth 1% to code 01066 ** it this way. I suspect it is the incremental malloc overhead (some 01067 ** percentage of the plwTerminate() calls will cause a realloc), so 01068 ** this might be worth revisiting if the DataBuffer implementation 01069 ** changes. 01070 */ 01071 static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ 01072 if( pCollector->dlw.iType>DL_DOCIDS ){ 01073 char c[VARINT_MAX]; 01074 int n = putVarint(c, POS_END); 01075 dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); 01076 }else{ 01077 dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); 01078 } 01079 } 01080 static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ 01081 plwTerminate(&pCollector->plw); 01082 plwDestroy(&pCollector->plw); 01083 plwInit(&pCollector->plw, &pCollector->dlw, iDocid); 01084 } 01085 static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, 01086 int iStartOffset, int iEndOffset){ 01087 plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); 01088 } 01089 01090 static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ 01091 DLCollector *pCollector = sqlite3_malloc(sizeof(DLCollector)); 01092 dataBufferInit(&pCollector->b, 0); 01093 dlwInit(&pCollector->dlw, iType, &pCollector->b); 01094 plwInit(&pCollector->plw, &pCollector->dlw, iDocid); 01095 return pCollector; 01096 } 01097 static void dlcDelete(DLCollector *pCollector){ 01098 plwDestroy(&pCollector->plw); 01099 dlwDestroy(&pCollector->dlw); 01100 dataBufferDestroy(&pCollector->b); 01101 SCRAMBLE(pCollector); 01102 sqlite3_free(pCollector); 01103 } 01104 01105 01106 /* Copy the doclist data of iType in pData/nData into *out, trimming 01107 ** unnecessary data as we go. Only columns matching iColumn are 01108 ** copied, all columns copied if iColumn is -1. Elements with no 01109 ** matching columns are dropped. The output is an iOutType doclist. 01110 */ 01111 /* NOTE(shess) This code is only valid after all doclists are merged. 01112 ** If this is run before merges, then doclist items which represent 01113 ** deletion will be trimmed, and will thus not effect a deletion 01114 ** during the merge. 01115 */ 01116 static void docListTrim(DocListType iType, const char *pData, int nData, 01117 int iColumn, DocListType iOutType, DataBuffer *out){ 01118 DLReader dlReader; 01119 DLWriter dlWriter; 01120 01121 assert( iOutType<=iType ); 01122 01123 dlrInit(&dlReader, iType, pData, nData); 01124 dlwInit(&dlWriter, iOutType, out); 01125 01126 while( !dlrAtEnd(&dlReader) ){ 01127 PLReader plReader; 01128 PLWriter plWriter; 01129 int match = 0; 01130 01131 plrInit(&plReader, &dlReader); 01132 01133 while( !plrAtEnd(&plReader) ){ 01134 if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ 01135 if( !match ){ 01136 plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); 01137 match = 1; 01138 } 01139 plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), 01140 plrStartOffset(&plReader), plrEndOffset(&plReader)); 01141 } 01142 plrStep(&plReader); 01143 } 01144 if( match ){ 01145 plwTerminate(&plWriter); 01146 plwDestroy(&plWriter); 01147 } 01148 01149 plrDestroy(&plReader); 01150 dlrStep(&dlReader); 01151 } 01152 dlwDestroy(&dlWriter); 01153 dlrDestroy(&dlReader); 01154 } 01155 01156 /* Used by docListMerge() to keep doclists in the ascending order by 01157 ** docid, then ascending order by age (so the newest comes first). 01158 */ 01159 typedef struct OrderedDLReader { 01160 DLReader *pReader; 01161 01162 /* TODO(shess) If we assume that docListMerge pReaders is ordered by 01163 ** age (which we do), then we could use pReader comparisons to break 01164 ** ties. 01165 */ 01166 int idx; 01167 } OrderedDLReader; 01168 01169 /* Order eof to end, then by docid asc, idx desc. */ 01170 static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ 01171 if( dlrAtEnd(r1->pReader) ){ 01172 if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ 01173 return 1; /* Only r1 atEnd(). */ 01174 } 01175 if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ 01176 01177 if( dlrDocid(r1->pReader)<dlrDocid(r2->pReader) ) return -1; 01178 if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; 01179 01180 /* Descending on idx. */ 01181 return r2->idx-r1->idx; 01182 } 01183 01184 /* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that 01185 ** p[1..n-1] is already sorted. 01186 */ 01187 /* TODO(shess) Is this frequent enough to warrant a binary search? 01188 ** Before implementing that, instrument the code to check. In most 01189 ** current usage, I expect that p[0] will be less than p[1] a very 01190 ** high proportion of the time. 01191 */ 01192 static void orderedDLReaderReorder(OrderedDLReader *p, int n){ 01193 while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ 01194 OrderedDLReader tmp = p[0]; 01195 p[0] = p[1]; 01196 p[1] = tmp; 01197 n--; 01198 p++; 01199 } 01200 } 01201 01202 /* Given an array of doclist readers, merge their doclist elements 01203 ** into out in sorted order (by docid), dropping elements from older 01204 ** readers when there is a duplicate docid. pReaders is assumed to be 01205 ** ordered by age, oldest first. 01206 */ 01207 /* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably 01208 ** be fixed. 01209 */ 01210 static void docListMerge(DataBuffer *out, 01211 DLReader *pReaders, int nReaders){ 01212 OrderedDLReader readers[MERGE_COUNT]; 01213 DLWriter writer; 01214 int i, n; 01215 const char *pStart = 0; 01216 int nStart = 0; 01217 sqlite_int64 iFirstDocid = 0, iLastDocid = 0; 01218 01219 assert( nReaders>0 ); 01220 if( nReaders==1 ){ 01221 dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); 01222 return; 01223 } 01224 01225 assert( nReaders<=MERGE_COUNT ); 01226 n = 0; 01227 for(i=0; i<nReaders; i++){ 01228 assert( pReaders[i].iType==pReaders[0].iType ); 01229 readers[i].pReader = pReaders+i; 01230 readers[i].idx = i; 01231 n += dlrAllDataBytes(&pReaders[i]); 01232 } 01233 /* Conservatively size output to sum of inputs. Output should end 01234 ** up strictly smaller than input. 01235 */ 01236 dataBufferExpand(out, n); 01237 01238 /* Get the readers into sorted order. */ 01239 while( i-->0 ){ 01240 orderedDLReaderReorder(readers+i, nReaders-i); 01241 } 01242 01243 dlwInit(&writer, pReaders[0].iType, out); 01244 while( !dlrAtEnd(readers[0].pReader) ){ 01245 sqlite_int64 iDocid = dlrDocid(readers[0].pReader); 01246 01247 /* If this is a continuation of the current buffer to copy, extend 01248 ** that buffer. memcpy() seems to be more efficient if it has a 01249 ** lots of data to copy. 01250 */ 01251 if( dlrDocData(readers[0].pReader)==pStart+nStart ){ 01252 nStart += dlrDocDataBytes(readers[0].pReader); 01253 }else{ 01254 if( pStart!=0 ){ 01255 dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); 01256 } 01257 pStart = dlrDocData(readers[0].pReader); 01258 nStart = dlrDocDataBytes(readers[0].pReader); 01259 iFirstDocid = iDocid; 01260 } 01261 iLastDocid = iDocid; 01262 dlrStep(readers[0].pReader); 01263 01264 /* Drop all of the older elements with the same docid. */ 01265 for(i=1; i<nReaders && 01266 !dlrAtEnd(readers[i].pReader) && 01267 dlrDocid(readers[i].pReader)==iDocid; i++){ 01268 dlrStep(readers[i].pReader); 01269 } 01270 01271 /* Get the readers back into order. */ 01272 while( i-->0 ){ 01273 orderedDLReaderReorder(readers+i, nReaders-i); 01274 } 01275 } 01276 01277 /* Copy over any remaining elements. */ 01278 if( nStart>0 ) dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); 01279 dlwDestroy(&writer); 01280 } 01281 01282 /* Helper function for posListUnion(). Compares the current position 01283 ** between left and right, returning as standard C idiom of <0 if 01284 ** left<right, >0 if left>right, and 0 if left==right. "End" always 01285 ** compares greater. 01286 */ 01287 static int posListCmp(PLReader *pLeft, PLReader *pRight){ 01288 assert( pLeft->iType==pRight->iType ); 01289 if( pLeft->iType==DL_DOCIDS ) return 0; 01290 01291 if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; 01292 if( plrAtEnd(pRight) ) return -1; 01293 01294 if( plrColumn(pLeft)<plrColumn(pRight) ) return -1; 01295 if( plrColumn(pLeft)>plrColumn(pRight) ) return 1; 01296 01297 if( plrPosition(pLeft)<plrPosition(pRight) ) return -1; 01298 if( plrPosition(pLeft)>plrPosition(pRight) ) return 1; 01299 if( pLeft->iType==DL_POSITIONS ) return 0; 01300 01301 if( plrStartOffset(pLeft)<plrStartOffset(pRight) ) return -1; 01302 if( plrStartOffset(pLeft)>plrStartOffset(pRight) ) return 1; 01303 01304 if( plrEndOffset(pLeft)<plrEndOffset(pRight) ) return -1; 01305 if( plrEndOffset(pLeft)>plrEndOffset(pRight) ) return 1; 01306 01307 return 0; 01308 } 01309 01310 /* Write the union of position lists in pLeft and pRight to pOut. 01311 ** "Union" in this case meaning "All unique position tuples". Should 01312 ** work with any doclist type, though both inputs and the output 01313 ** should be the same type. 01314 */ 01315 static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ 01316 PLReader left, right; 01317 PLWriter writer; 01318 01319 assert( dlrDocid(pLeft)==dlrDocid(pRight) ); 01320 assert( pLeft->iType==pRight->iType ); 01321 assert( pLeft->iType==pOut->iType ); 01322 01323 plrInit(&left, pLeft); 01324 plrInit(&right, pRight); 01325 plwInit(&writer, pOut, dlrDocid(pLeft)); 01326 01327 while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ 01328 int c = posListCmp(&left, &right); 01329 if( c<0 ){ 01330 plwCopy(&writer, &left); 01331 plrStep(&left); 01332 }else if( c>0 ){ 01333 plwCopy(&writer, &right); 01334 plrStep(&right); 01335 }else{ 01336 plwCopy(&writer, &left); 01337 plrStep(&left); 01338 plrStep(&right); 01339 } 01340 } 01341 01342 plwTerminate(&writer); 01343 plwDestroy(&writer); 01344 plrDestroy(&left); 01345 plrDestroy(&right); 01346 } 01347 01348 /* Write the union of doclists in pLeft and pRight to pOut. For 01349 ** docids in common between the inputs, the union of the position 01350 ** lists is written. Inputs and outputs are always type DL_DEFAULT. 01351 */ 01352 static void docListUnion( 01353 const char *pLeft, int nLeft, 01354 const char *pRight, int nRight, 01355 DataBuffer *pOut /* Write the combined doclist here */ 01356 ){ 01357 DLReader left, right; 01358 DLWriter writer; 01359 01360 if( nLeft==0 ){ 01361 if( nRight!=0) dataBufferAppend(pOut, pRight, nRight); 01362 return; 01363 } 01364 if( nRight==0 ){ 01365 dataBufferAppend(pOut, pLeft, nLeft); 01366 return; 01367 } 01368 01369 dlrInit(&left, DL_DEFAULT, pLeft, nLeft); 01370 dlrInit(&right, DL_DEFAULT, pRight, nRight); 01371 dlwInit(&writer, DL_DEFAULT, pOut); 01372 01373 while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ 01374 if( dlrAtEnd(&right) ){ 01375 dlwCopy(&writer, &left); 01376 dlrStep(&left); 01377 }else if( dlrAtEnd(&left) ){ 01378 dlwCopy(&writer, &right); 01379 dlrStep(&right); 01380 }else if( dlrDocid(&left)<dlrDocid(&right) ){ 01381 dlwCopy(&writer, &left); 01382 dlrStep(&left); 01383 }else if( dlrDocid(&left)>dlrDocid(&right) ){ 01384 dlwCopy(&writer, &right); 01385 dlrStep(&right); 01386 }else{ 01387 posListUnion(&left, &right, &writer); 01388 dlrStep(&left); 01389 dlrStep(&right); 01390 } 01391 } 01392 01393 dlrDestroy(&left); 01394 dlrDestroy(&right); 01395 dlwDestroy(&writer); 01396 } 01397 01398 /* pLeft and pRight are DLReaders positioned to the same docid. 01399 ** 01400 ** If there are no instances in pLeft or pRight where the position 01401 ** of pLeft is one less than the position of pRight, then this 01402 ** routine adds nothing to pOut. 01403 ** 01404 ** If there are one or more instances where positions from pLeft 01405 ** are exactly one less than positions from pRight, then add a new 01406 ** document record to pOut. If pOut wants to hold positions, then 01407 ** include the positions from pRight that are one more than a 01408 ** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. 01409 */ 01410 static void posListPhraseMerge(DLReader *pLeft, DLReader *pRight, 01411 DLWriter *pOut){ 01412 PLReader left, right; 01413 PLWriter writer; 01414 int match = 0; 01415 01416 assert( dlrDocid(pLeft)==dlrDocid(pRight) ); 01417 assert( pOut->iType!=DL_POSITIONS_OFFSETS ); 01418 01419 plrInit(&left, pLeft); 01420 plrInit(&right, pRight); 01421 01422 while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ 01423 if( plrColumn(&left)<plrColumn(&right) ){ 01424 plrStep(&left); 01425 }else if( plrColumn(&left)>plrColumn(&right) ){ 01426 plrStep(&right); 01427 }else if( plrPosition(&left)+1<plrPosition(&right) ){ 01428 plrStep(&left); 01429 }else if( plrPosition(&left)+1>plrPosition(&right) ){ 01430 plrStep(&right); 01431 }else{ 01432 if( !match ){ 01433 plwInit(&writer, pOut, dlrDocid(pLeft)); 01434 match = 1; 01435 } 01436 plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); 01437 plrStep(&left); 01438 plrStep(&right); 01439 } 01440 } 01441 01442 if( match ){ 01443 plwTerminate(&writer); 01444 plwDestroy(&writer); 01445 } 01446 01447 plrDestroy(&left); 01448 plrDestroy(&right); 01449 } 01450 01451 /* We have two doclists with positions: pLeft and pRight. 01452 ** Write the phrase intersection of these two doclists into pOut. 01453 ** 01454 ** A phrase intersection means that two documents only match 01455 ** if pLeft.iPos+1==pRight.iPos. 01456 ** 01457 ** iType controls the type of data written to pOut. If iType is 01458 ** DL_POSITIONS, the positions are those from pRight. 01459 */ 01460 static void docListPhraseMerge( 01461 const char *pLeft, int nLeft, 01462 const char *pRight, int nRight, 01463 DocListType iType, 01464 DataBuffer *pOut /* Write the combined doclist here */ 01465 ){ 01466 DLReader left, right; 01467 DLWriter writer; 01468 01469 if( nLeft==0 || nRight==0 ) return; 01470 01471 assert( iType!=DL_POSITIONS_OFFSETS ); 01472 01473 dlrInit(&left, DL_POSITIONS, pLeft, nLeft); 01474 dlrInit(&right, DL_POSITIONS, pRight, nRight); 01475 dlwInit(&writer, iType, pOut); 01476 01477 while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ 01478 if( dlrDocid(&left)<dlrDocid(&right) ){ 01479 dlrStep(&left); 01480 }else if( dlrDocid(&right)<dlrDocid(&left) ){ 01481 dlrStep(&right); 01482 }else{ 01483 posListPhraseMerge(&left, &right, &writer); 01484 dlrStep(&left); 01485 dlrStep(&right); 01486 } 01487 } 01488 01489 dlrDestroy(&left); 01490 dlrDestroy(&right); 01491 dlwDestroy(&writer); 01492 } 01493 01494 /* We have two DL_DOCIDS doclists: pLeft and pRight. 01495 ** Write the intersection of these two doclists into pOut as a 01496 ** DL_DOCIDS doclist. 01497 */ 01498 static void docListAndMerge( 01499 const char *pLeft, int nLeft, 01500 const char *pRight, int nRight, 01501 DataBuffer *pOut /* Write the combined doclist here */ 01502 ){ 01503 DLReader left, right; 01504 DLWriter writer; 01505 01506 if( nLeft==0 || nRight==0 ) return; 01507 01508 dlrInit(&left, DL_DOCIDS, pLeft, nLeft); 01509 dlrInit(&right, DL_DOCIDS, pRight, nRight); 01510 dlwInit(&writer, DL_DOCIDS, pOut); 01511 01512 while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ 01513 if( dlrDocid(&left)<dlrDocid(&right) ){ 01514 dlrStep(&left); 01515 }else if( dlrDocid(&right)<dlrDocid(&left) ){ 01516 dlrStep(&right); 01517 }else{ 01518 dlwAdd(&writer, dlrDocid(&left)); 01519 dlrStep(&left); 01520 dlrStep(&right); 01521 } 01522 } 01523 01524 dlrDestroy(&left); 01525 dlrDestroy(&right); 01526 dlwDestroy(&writer); 01527 } 01528 01529 /* We have two DL_DOCIDS doclists: pLeft and pRight. 01530 ** Write the union of these two doclists into pOut as a 01531 ** DL_DOCIDS doclist. 01532 */ 01533 static void docListOrMerge( 01534 const char *pLeft, int nLeft, 01535 const char *pRight, int nRight, 01536 DataBuffer *pOut /* Write the combined doclist here */ 01537 ){ 01538 DLReader left, right; 01539 DLWriter writer; 01540 01541 if( nLeft==0 ){ 01542 if( nRight!=0 ) dataBufferAppend(pOut, pRight, nRight); 01543 return; 01544 } 01545 if( nRight==0 ){ 01546 dataBufferAppend(pOut, pLeft, nLeft); 01547 return; 01548 } 01549 01550 dlrInit(&left, DL_DOCIDS, pLeft, nLeft); 01551 dlrInit(&right, DL_DOCIDS, pRight, nRight); 01552 dlwInit(&writer, DL_DOCIDS, pOut); 01553 01554 while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ 01555 if( dlrAtEnd(&right) ){ 01556 dlwAdd(&writer, dlrDocid(&left)); 01557 dlrStep(&left); 01558 }else if( dlrAtEnd(&left) ){ 01559 dlwAdd(&writer, dlrDocid(&right)); 01560 dlrStep(&right); 01561 }else if( dlrDocid(&left)<dlrDocid(&right) ){ 01562 dlwAdd(&writer, dlrDocid(&left)); 01563 dlrStep(&left); 01564 }else if( dlrDocid(&right)<dlrDocid(&left) ){ 01565 dlwAdd(&writer, dlrDocid(&right)); 01566 dlrStep(&right); 01567 }else{ 01568 dlwAdd(&writer, dlrDocid(&left)); 01569 dlrStep(&left); 01570 dlrStep(&right); 01571 } 01572 } 01573 01574 dlrDestroy(&left); 01575 dlrDestroy(&right); 01576 dlwDestroy(&writer); 01577 } 01578 01579 /* We have two DL_DOCIDS doclists: pLeft and pRight. 01580 ** Write into pOut as DL_DOCIDS doclist containing all documents that 01581 ** occur in pLeft but not in pRight. 01582 */ 01583 static void docListExceptMerge( 01584 const char *pLeft, int nLeft, 01585 const char *pRight, int nRight, 01586 DataBuffer *pOut /* Write the combined doclist here */ 01587 ){ 01588 DLReader left, right; 01589 DLWriter writer; 01590 01591 if( nLeft==0 ) return; 01592 if( nRight==0 ){ 01593 dataBufferAppend(pOut, pLeft, nLeft); 01594 return; 01595 } 01596 01597 dlrInit(&left, DL_DOCIDS, pLeft, nLeft); 01598 dlrInit(&right, DL_DOCIDS, pRight, nRight); 01599 dlwInit(&writer, DL_DOCIDS, pOut); 01600 01601 while( !dlrAtEnd(&left) ){ 01602 while( !dlrAtEnd(&right) && dlrDocid(&right)<dlrDocid(&left) ){ 01603 dlrStep(&right); 01604 } 01605 if( dlrAtEnd(&right) || dlrDocid(&left)<dlrDocid(&right) ){ 01606 dlwAdd(&writer, dlrDocid(&left)); 01607 } 01608 dlrStep(&left); 01609 } 01610 01611 dlrDestroy(&left); 01612 dlrDestroy(&right); 01613 dlwDestroy(&writer); 01614 } 01615 01616 static char *string_dup_n(const char *s, int n){ 01617 char *str = sqlite3_malloc(n + 1); 01618 memcpy(str, s, n); 01619 str[n] = '\0'; 01620 return str; 01621 } 01622 01623 /* Duplicate a string; the caller must free() the returned string. 01624 * (We don't use strdup() since it is not part of the standard C library and 01625 * may not be available everywhere.) */ 01626 static char *string_dup(const char *s){ 01627 return string_dup_n(s, strlen(s)); 01628 } 01629 01630 /* Format a string, replacing each occurrence of the % character with 01631 * zDb.zName. This may be more convenient than sqlite_mprintf() 01632 * when one string is used repeatedly in a format string. 01633 * The caller must free() the returned string. */ 01634 static char *string_format(const char *zFormat, 01635 const char *zDb, const char *zName){ 01636 const char *p; 01637 size_t len = 0; 01638 size_t nDb = strlen(zDb); 01639 size_t nName = strlen(zName); 01640 size_t nFullTableName = nDb+1+nName; 01641 char *result; 01642 char *r; 01643 01644 /* first compute length needed */ 01645 for(p = zFormat ; *p ; ++p){ 01646 len += (*p=='%' ? nFullTableName : 1); 01647 } 01648 len += 1; /* for null terminator */ 01649 01650 r = result = sqlite3_malloc(len); 01651 for(p = zFormat; *p; ++p){ 01652 if( *p=='%' ){ 01653 memcpy(r, zDb, nDb); 01654 r += nDb; 01655 *r++ = '.'; 01656 memcpy(r, zName, nName); 01657 r += nName; 01658 } else { 01659 *r++ = *p; 01660 } 01661 } 01662 *r++ = '\0'; 01663 assert( r == result + len ); 01664 return result; 01665 } 01666 01667 static int sql_exec(sqlite3 *db, const char *zDb, const char *zName, 01668 const char *zFormat){ 01669 char *zCommand = string_format(zFormat, zDb, zName); 01670 int rc; 01671 TRACE(("FTS2 sql: %s\n", zCommand)); 01672 rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); 01673 sqlite3_free(zCommand); 01674 return rc; 01675 } 01676 01677 static int sql_prepare(sqlite3 *db, const char *zDb, const char *zName, 01678 sqlite3_stmt **ppStmt, const char *zFormat){ 01679 char *zCommand = string_format(zFormat, zDb, zName); 01680 int rc; 01681 TRACE(("FTS2 prepare: %s\n", zCommand)); 01682 rc = sqlite3_prepare_v2(db, zCommand, -1, ppStmt, NULL); 01683 sqlite3_free(zCommand); 01684 return rc; 01685 } 01686 01687 /* end utility functions */ 01688 01689 /* Forward reference */ 01690 typedef struct fulltext_vtab fulltext_vtab; 01691 01692 /* A single term in a query is represented by an instances of 01693 ** the following structure. 01694 */ 01695 typedef struct QueryTerm { 01696 short int nPhrase; /* How many following terms are part of the same phrase */ 01697 short int iPhrase; /* This is the i-th term of a phrase. */ 01698 short int iColumn; /* Column of the index that must match this term */ 01699 signed char isOr; /* this term is preceded by "OR" */ 01700 signed char isNot; /* this term is preceded by "-" */ 01701 signed char isPrefix; /* this term is followed by "*" */ 01702 char *pTerm; /* text of the term. '\000' terminated. malloced */ 01703 int nTerm; /* Number of bytes in pTerm[] */ 01704 } QueryTerm; 01705 01706 01707 /* A query string is parsed into a Query structure. 01708 * 01709 * We could, in theory, allow query strings to be complicated 01710 * nested expressions with precedence determined by parentheses. 01711 * But none of the major search engines do this. (Perhaps the 01712 * feeling is that an parenthesized expression is two complex of 01713 * an idea for the average user to grasp.) Taking our lead from 01714 * the major search engines, we will allow queries to be a list 01715 * of terms (with an implied AND operator) or phrases in double-quotes, 01716 * with a single optional "-" before each non-phrase term to designate 01717 * negation and an optional OR connector. 01718 * 01719 * OR binds more tightly than the implied AND, which is what the 01720 * major search engines seem to do. So, for example: 01721 * 01722 * [one two OR three] ==> one AND (two OR three) 01723 * [one OR two three] ==> (one OR two) AND three 01724 * 01725 * A "-" before a term matches all entries that lack that term. 01726 * The "-" must occur immediately before the term with in intervening 01727 * space. This is how the search engines do it. 01728 * 01729 * A NOT term cannot be the right-hand operand of an OR. If this 01730 * occurs in the query string, the NOT is ignored: 01731 * 01732 * [one OR -two] ==> one OR two 01733 * 01734 */ 01735 typedef struct Query { 01736 fulltext_vtab *pFts; /* The full text index */ 01737 int nTerms; /* Number of terms in the query */ 01738 QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ 01739 int nextIsOr; /* Set the isOr flag on the next inserted term */ 01740 int nextColumn; /* Next word parsed must be in this column */ 01741 int dfltColumn; /* The default column */ 01742 } Query; 01743 01744 01745 /* 01746 ** An instance of the following structure keeps track of generated 01747 ** matching-word offset information and snippets. 01748 */ 01749 typedef struct Snippet { 01750 int nMatch; /* Total number of matches */ 01751 int nAlloc; /* Space allocated for aMatch[] */ 01752 struct snippetMatch { /* One entry for each matching term */ 01753 char snStatus; /* Status flag for use while constructing snippets */ 01754 short int iCol; /* The column that contains the match */ 01755 short int iTerm; /* The index in Query.pTerms[] of the matching term */ 01756 short int nByte; /* Number of bytes in the term */ 01757 int iStart; /* The offset to the first character of the term */ 01758 } *aMatch; /* Points to space obtained from malloc */ 01759 char *zOffset; /* Text rendering of aMatch[] */ 01760 int nOffset; /* strlen(zOffset) */ 01761 char *zSnippet; /* Snippet text */ 01762 int nSnippet; /* strlen(zSnippet) */ 01763 } Snippet; 01764 01765 01766 typedef enum QueryType { 01767 QUERY_GENERIC, /* table scan */ 01768 QUERY_ROWID, /* lookup by rowid */ 01769 QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ 01770 } QueryType; 01771 01772 typedef enum fulltext_statement { 01773 CONTENT_INSERT_STMT, 01774 CONTENT_SELECT_STMT, 01775 CONTENT_UPDATE_STMT, 01776 CONTENT_DELETE_STMT, 01777 CONTENT_EXISTS_STMT, 01778 01779 BLOCK_INSERT_STMT, 01780 BLOCK_SELECT_STMT, 01781 BLOCK_DELETE_STMT, 01782 BLOCK_DELETE_ALL_STMT, 01783 01784 SEGDIR_MAX_INDEX_STMT, 01785 SEGDIR_SET_STMT, 01786 SEGDIR_SELECT_LEVEL_STMT, 01787 SEGDIR_SPAN_STMT, 01788 SEGDIR_DELETE_STMT, 01789 SEGDIR_SELECT_SEGMENT_STMT, 01790 SEGDIR_SELECT_ALL_STMT, 01791 SEGDIR_DELETE_ALL_STMT, 01792 SEGDIR_COUNT_STMT, 01793 01794 MAX_STMT /* Always at end! */ 01795 } fulltext_statement; 01796 01797 /* These must exactly match the enum above. */ 01798 /* TODO(shess): Is there some risk that a statement will be used in two 01799 ** cursors at once, e.g. if a query joins a virtual table to itself? 01800 ** If so perhaps we should move some of these to the cursor object. 01801 */ 01802 static const char *const fulltext_zStatement[MAX_STMT] = { 01803 /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ 01804 /* CONTENT_SELECT */ "select * from %_content where rowid = ?", 01805 /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ 01806 /* CONTENT_DELETE */ "delete from %_content where rowid = ?", 01807 /* CONTENT_EXISTS */ "select rowid from %_content limit 1", 01808 01809 /* BLOCK_INSERT */ "insert into %_segments values (?)", 01810 /* BLOCK_SELECT */ "select block from %_segments where rowid = ?", 01811 /* BLOCK_DELETE */ "delete from %_segments where rowid between ? and ?", 01812 /* BLOCK_DELETE_ALL */ "delete from %_segments", 01813 01814 /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", 01815 /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", 01816 /* SEGDIR_SELECT_LEVEL */ 01817 "select start_block, leaves_end_block, root from %_segdir " 01818 " where level = ? order by idx", 01819 /* SEGDIR_SPAN */ 01820 "select min(start_block), max(end_block) from %_segdir " 01821 " where level = ? and start_block <> 0", 01822 /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", 01823 01824 /* NOTE(shess): The first three results of the following two 01825 ** statements must match. 01826 */ 01827 /* SEGDIR_SELECT_SEGMENT */ 01828 "select start_block, leaves_end_block, root from %_segdir " 01829 " where level = ? and idx = ?", 01830 /* SEGDIR_SELECT_ALL */ 01831 "select start_block, leaves_end_block, root from %_segdir " 01832 " order by level desc, idx asc", 01833 /* SEGDIR_DELETE_ALL */ "delete from %_segdir", 01834 /* SEGDIR_COUNT */ "select count(*), ifnull(max(level),0) from %_segdir", 01835 }; 01836 01837 /* 01838 ** A connection to a fulltext index is an instance of the following 01839 ** structure. The xCreate and xConnect methods create an instance 01840 ** of this structure and xDestroy and xDisconnect free that instance. 01841 ** All other methods receive a pointer to the structure as one of their 01842 ** arguments. 01843 */ 01844 struct fulltext_vtab { 01845 sqlite3_vtab base; /* Base class used by SQLite core */ 01846 sqlite3 *db; /* The database connection */ 01847 const char *zDb; /* logical database name */ 01848 const char *zName; /* virtual table name */ 01849 int nColumn; /* number of columns in virtual table */ 01850 char **azColumn; /* column names. malloced */ 01851 char **azContentColumn; /* column names in content table; malloced */ 01852 sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ 01853 01854 /* Precompiled statements which we keep as long as the table is 01855 ** open. 01856 */ 01857 sqlite3_stmt *pFulltextStatements[MAX_STMT]; 01858 01859 /* Precompiled statements used for segment merges. We run a 01860 ** separate select across the leaf level of each tree being merged. 01861 */ 01862 sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; 01863 /* The statement used to prepare pLeafSelectStmts. */ 01864 #define LEAF_SELECT \ 01865 "select block from %_segments where rowid between ? and ? order by rowid" 01866 01867 /* These buffer pending index updates during transactions. 01868 ** nPendingData estimates the memory size of the pending data. It 01869 ** doesn't include the hash-bucket overhead, nor any malloc 01870 ** overhead. When nPendingData exceeds kPendingThreshold, the 01871 ** buffer is flushed even before the transaction closes. 01872 ** pendingTerms stores the data, and is only valid when nPendingData 01873 ** is >=0 (nPendingData<0 means pendingTerms has not been 01874 ** initialized). iPrevDocid is the last docid written, used to make 01875 ** certain we're inserting in sorted order. 01876 */ 01877 int nPendingData; 01878 #define kPendingThreshold (1*1024*1024) 01879 sqlite_int64 iPrevDocid; 01880 fts2Hash pendingTerms; 01881 }; 01882 01883 /* 01884 ** When the core wants to do a query, it create a cursor using a 01885 ** call to xOpen. This structure is an instance of a cursor. It 01886 ** is destroyed by xClose. 01887 */ 01888 typedef struct fulltext_cursor { 01889 sqlite3_vtab_cursor base; /* Base class used by SQLite core */ 01890 QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ 01891 sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ 01892 int eof; /* True if at End Of Results */ 01893 Query q; /* Parsed query string */ 01894 Snippet snippet; /* Cached snippet for the current row */ 01895 int iColumn; /* Column being searched */ 01896 DataBuffer result; /* Doclist results from fulltextQuery */ 01897 DLReader reader; /* Result reader if result not empty */ 01898 } fulltext_cursor; 01899 01900 static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ 01901 return (fulltext_vtab *) c->base.pVtab; 01902 } 01903 01904 static const sqlite3_module fts2Module; /* forward declaration */ 01905 01906 /* Return a dynamically generated statement of the form 01907 * insert into %_content (rowid, ...) values (?, ...) 01908 */ 01909 static const char *contentInsertStatement(fulltext_vtab *v){ 01910 StringBuffer sb; 01911 int i; 01912 01913 initStringBuffer(&sb); 01914 append(&sb, "insert into %_content (rowid, "); 01915 appendList(&sb, v->nColumn, v->azContentColumn); 01916 append(&sb, ") values (?"); 01917 for(i=0; i<v->nColumn; ++i) 01918 append(&sb, ", ?"); 01919 append(&sb, ")"); 01920 return stringBufferData(&sb); 01921 } 01922 01923 /* Return a dynamically generated statement of the form 01924 * update %_content set [col_0] = ?, [col_1] = ?, ... 01925 * where rowid = ? 01926 */ 01927 static const char *contentUpdateStatement(fulltext_vtab *v){ 01928 StringBuffer sb; 01929 int i; 01930 01931 initStringBuffer(&sb); 01932 append(&sb, "update %_content set "); 01933 for(i=0; i<v->nColumn; ++i) { 01934 if( i>0 ){ 01935 append(&sb, ", "); 01936 } 01937 append(&sb, v->azContentColumn[i]); 01938 append(&sb, " = ?"); 01939 } 01940 append(&sb, " where rowid = ?"); 01941 return stringBufferData(&sb); 01942 } 01943 01944 /* Puts a freshly-prepared statement determined by iStmt in *ppStmt. 01945 ** If the indicated statement has never been prepared, it is prepared 01946 ** and cached, otherwise the cached version is reset. 01947 */ 01948 static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, 01949 sqlite3_stmt **ppStmt){ 01950 assert( iStmt<MAX_STMT ); 01951 if( v->pFulltextStatements[iStmt]==NULL ){ 01952 const char *zStmt; 01953 int rc; 01954 switch( iStmt ){ 01955 case CONTENT_INSERT_STMT: 01956 zStmt = contentInsertStatement(v); break; 01957 case CONTENT_UPDATE_STMT: 01958 zStmt = contentUpdateStatement(v); break; 01959 default: 01960 zStmt = fulltext_zStatement[iStmt]; 01961 } 01962 rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], 01963 zStmt); 01964 if( zStmt != fulltext_zStatement[iStmt]) sqlite3_free((void *) zStmt); 01965 if( rc!=SQLITE_OK ) return rc; 01966 } else { 01967 int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); 01968 if( rc!=SQLITE_OK ) return rc; 01969 } 01970 01971 *ppStmt = v->pFulltextStatements[iStmt]; 01972 return SQLITE_OK; 01973 } 01974 01975 /* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and 01976 ** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, 01977 ** where we expect no results. 01978 */ 01979 static int sql_single_step(sqlite3_stmt *s){ 01980 int rc = sqlite3_step(s); 01981 return (rc==SQLITE_DONE) ? SQLITE_OK : rc; 01982 } 01983 01984 /* Like sql_get_statement(), but for special replicated LEAF_SELECT 01985 ** statements. idx -1 is a special case for an uncached version of 01986 ** the statement (used in the optimize implementation). 01987 */ 01988 /* TODO(shess) Write version for generic statements and then share 01989 ** that between the cached-statement functions. 01990 */ 01991 static int sql_get_leaf_statement(fulltext_vtab *v, int idx, 01992 sqlite3_stmt **ppStmt){ 01993 assert( idx>=-1 && idx<MERGE_COUNT ); 01994 if( idx==-1 ){ 01995 return sql_prepare(v->db, v->zDb, v->zName, ppStmt, LEAF_SELECT); 01996 }else if( v->pLeafSelectStmts[idx]==NULL ){ 01997 int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], 01998 LEAF_SELECT); 01999 if( rc!=SQLITE_OK ) return rc; 02000 }else{ 02001 int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); 02002 if( rc!=SQLITE_OK ) return rc; 02003 } 02004 02005 *ppStmt = v->pLeafSelectStmts[idx]; 02006 return SQLITE_OK; 02007 } 02008 02009 /* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ 02010 static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, 02011 sqlite3_value **pValues){ 02012 sqlite3_stmt *s; 02013 int i; 02014 int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); 02015 if( rc!=SQLITE_OK ) return rc; 02016 02017 rc = sqlite3_bind_value(s, 1, rowid); 02018 if( rc!=SQLITE_OK ) return rc; 02019 02020 for(i=0; i<v->nColumn; ++i){ 02021 rc = sqlite3_bind_value(s, 2+i, pValues[i]); 02022 if( rc!=SQLITE_OK ) return rc; 02023 } 02024 02025 return sql_single_step(s); 02026 } 02027 02028 /* update %_content set col0 = pValues[0], col1 = pValues[1], ... 02029 * where rowid = [iRowid] */ 02030 static int content_update(fulltext_vtab *v, sqlite3_value **pValues, 02031 sqlite_int64 iRowid){ 02032 sqlite3_stmt *s; 02033 int i; 02034 int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); 02035 if( rc!=SQLITE_OK ) return rc; 02036 02037 for(i=0; i<v->nColumn; ++i){ 02038 rc = sqlite3_bind_value(s, 1+i, pValues[i]); 02039 if( rc!=SQLITE_OK ) return rc; 02040 } 02041 02042 rc = sqlite3_bind_int64(s, 1+v->nColumn, iRowid); 02043 if( rc!=SQLITE_OK ) return rc; 02044 02045 return sql_single_step(s); 02046 } 02047 02048 static void freeStringArray(int nString, const char **pString){ 02049 int i; 02050 02051 for (i=0 ; i < nString ; ++i) { 02052 if( pString[i]!=NULL ) sqlite3_free((void *) pString[i]); 02053 } 02054 sqlite3_free((void *) pString); 02055 } 02056 02057 /* select * from %_content where rowid = [iRow] 02058 * The caller must delete the returned array and all strings in it. 02059 * null fields will be NULL in the returned array. 02060 * 02061 * TODO: Perhaps we should return pointer/length strings here for consistency 02062 * with other code which uses pointer/length. */ 02063 static int content_select(fulltext_vtab *v, sqlite_int64 iRow, 02064 const char ***pValues){ 02065 sqlite3_stmt *s; 02066 const char **values; 02067 int i; 02068 int rc; 02069 02070 *pValues = NULL; 02071 02072 rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); 02073 if( rc!=SQLITE_OK ) return rc; 02074 02075 rc = sqlite3_bind_int64(s, 1, iRow); 02076 if( rc!=SQLITE_OK ) return rc; 02077 02078 rc = sqlite3_step(s); 02079 if( rc!=SQLITE_ROW ) return rc; 02080 02081 values = (const char **) sqlite3_malloc(v->nColumn * sizeof(const char *)); 02082 for(i=0; i<v->nColumn; ++i){ 02083 if( sqlite3_column_type(s, i)==SQLITE_NULL ){ 02084 values[i] = NULL; 02085 }else{ 02086 values[i] = string_dup((char*)sqlite3_column_text(s, i)); 02087 } 02088 } 02089 02090 /* We expect only one row. We must execute another sqlite3_step() 02091 * to complete the iteration; otherwise the table will remain locked. */ 02092 rc = sqlite3_step(s); 02093 if( rc==SQLITE_DONE ){ 02094 *pValues = values; 02095 return SQLITE_OK; 02096 } 02097 02098 freeStringArray(v->nColumn, values); 02099 return rc; 02100 } 02101 02102 /* delete from %_content where rowid = [iRow ] */ 02103 static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ 02104 sqlite3_stmt *s; 02105 int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); 02106 if( rc!=SQLITE_OK ) return rc; 02107 02108 rc = sqlite3_bind_int64(s, 1, iRow); 02109 if( rc!=SQLITE_OK ) return rc; 02110 02111 return sql_single_step(s); 02112 } 02113 02114 /* Returns SQLITE_ROW if any rows exist in %_content, SQLITE_DONE if 02115 ** no rows exist, and any error in case of failure. 02116 */ 02117 static int content_exists(fulltext_vtab *v){ 02118 sqlite3_stmt *s; 02119 int rc = sql_get_statement(v, CONTENT_EXISTS_STMT, &s); 02120 if( rc!=SQLITE_OK ) return rc; 02121 02122 rc = sqlite3_step(s); 02123 if( rc!=SQLITE_ROW ) return rc; 02124 02125 /* We expect only one row. We must execute another sqlite3_step() 02126 * to complete the iteration; otherwise the table will remain locked. */ 02127 rc = sqlite3_step(s); 02128 if( rc==SQLITE_DONE ) return SQLITE_ROW; 02129 if( rc==SQLITE_ROW ) return SQLITE_ERROR; 02130 return rc; 02131 } 02132 02133 /* insert into %_segments values ([pData]) 02134 ** returns assigned rowid in *piBlockid 02135 */ 02136 static int block_insert(fulltext_vtab *v, const char *pData, int nData, 02137 sqlite_int64 *piBlockid){ 02138 sqlite3_stmt *s; 02139 int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); 02140 if( rc!=SQLITE_OK ) return rc; 02141 02142 rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); 02143 if( rc!=SQLITE_OK ) return rc; 02144 02145 rc = sqlite3_step(s); 02146 if( rc==SQLITE_ROW ) return SQLITE_ERROR; 02147 if( rc!=SQLITE_DONE ) return rc; 02148 02149 *piBlockid = sqlite3_last_insert_rowid(v->db); 02150 return SQLITE_OK; 02151 } 02152 02153 /* delete from %_segments 02154 ** where rowid between [iStartBlockid] and [iEndBlockid] 02155 ** 02156 ** Deletes the range of blocks, inclusive, used to delete the blocks 02157 ** which form a segment. 02158 */ 02159 static int block_delete(fulltext_vtab *v, 02160 sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ 02161 sqlite3_stmt *s; 02162 int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); 02163 if( rc!=SQLITE_OK ) return rc; 02164 02165 rc = sqlite3_bind_int64(s, 1, iStartBlockid); 02166 if( rc!=SQLITE_OK ) return rc; 02167 02168 rc = sqlite3_bind_int64(s, 2, iEndBlockid); 02169 if( rc!=SQLITE_OK ) return rc; 02170 02171 return sql_single_step(s); 02172 } 02173 02174 /* Returns SQLITE_ROW with *pidx set to the maximum segment idx found 02175 ** at iLevel. Returns SQLITE_DONE if there are no segments at 02176 ** iLevel. Otherwise returns an error. 02177 */ 02178 static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ 02179 sqlite3_stmt *s; 02180 int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); 02181 if( rc!=SQLITE_OK ) return rc; 02182 02183 rc = sqlite3_bind_int(s, 1, iLevel); 02184 if( rc!=SQLITE_OK ) return rc; 02185 02186 rc = sqlite3_step(s); 02187 /* Should always get at least one row due to how max() works. */ 02188 if( rc==SQLITE_DONE ) return SQLITE_DONE; 02189 if( rc!=SQLITE_ROW ) return rc; 02190 02191 /* NULL means that there were no inputs to max(). */ 02192 if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ 02193 rc = sqlite3_step(s); 02194 if( rc==SQLITE_ROW ) return SQLITE_ERROR; 02195 return rc; 02196 } 02197 02198 *pidx = sqlite3_column_int(s, 0); 02199 02200 /* We expect only one row. We must execute another sqlite3_step() 02201 * to complete the iteration; otherwise the table will remain locked. */ 02202 rc = sqlite3_step(s); 02203 if( rc==SQLITE_ROW ) return SQLITE_ERROR; 02204 if( rc!=SQLITE_DONE ) return rc; 02205 return SQLITE_ROW; 02206 } 02207 02208 /* insert into %_segdir values ( 02209 ** [iLevel], [idx], 02210 ** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], 02211 ** [pRootData] 02212 ** ) 02213 */ 02214 static int segdir_set(fulltext_vtab *v, int iLevel, int idx, 02215 sqlite_int64 iStartBlockid, 02216 sqlite_int64 iLeavesEndBlockid, 02217 sqlite_int64 iEndBlockid, 02218 const char *pRootData, int nRootData){ 02219 sqlite3_stmt *s; 02220 int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); 02221 if( rc!=SQLITE_OK ) return rc; 02222 02223 rc = sqlite3_bind_int(s, 1, iLevel); 02224 if( rc!=SQLITE_OK ) return rc; 02225 02226 rc = sqlite3_bind_int(s, 2, idx); 02227 if( rc!=SQLITE_OK ) return rc; 02228 02229 rc = sqlite3_bind_int64(s, 3, iStartBlockid); 02230 if( rc!=SQLITE_OK ) return rc; 02231 02232 rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); 02233 if( rc!=SQLITE_OK ) return rc; 02234 02235 rc = sqlite3_bind_int64(s, 5, iEndBlockid); 02236 if( rc!=SQLITE_OK ) return rc; 02237 02238 rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); 02239 if( rc!=SQLITE_OK ) return rc; 02240 02241 return sql_single_step(s); 02242 } 02243 02244 /* Queries %_segdir for the block span of the segments in level 02245 ** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, 02246 ** SQLITE_ROW if there are blocks, else an error. 02247 */ 02248 static int segdir_span(fulltext_vtab *v, int iLevel, 02249 sqlite_int64 *piStartBlockid, 02250 sqlite_int64 *piEndBlockid){ 02251 sqlite3_stmt *s; 02252 int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); 02253 if( rc!=SQLITE_OK ) return rc; 02254 02255 rc = sqlite3_bind_int(s, 1, iLevel); 02256 if( rc!=SQLITE_OK ) return rc; 02257 02258 rc = sqlite3_step(s); 02259 if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ 02260 if( rc!=SQLITE_ROW ) return rc; 02261 02262 /* This happens if all segments at this level are entirely inline. */ 02263 if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ 02264 /* We expect only one row. We must execute another sqlite3_step() 02265 * to complete the iteration; otherwise the table will remain locked. */ 02266 int rc2 = sqlite3_step(s); 02267 if( rc2==SQLITE_ROW ) return SQLITE_ERROR; 02268 return rc2; 02269 } 02270 02271 *piStartBlockid = sqlite3_column_int64(s, 0); 02272 *piEndBlockid = sqlite3_column_int64(s, 1); 02273 02274 /* We expect only one row. We must execute another sqlite3_step() 02275 * to complete the iteration; otherwise the table will remain locked. */ 02276 rc = sqlite3_step(s); 02277 if( rc==SQLITE_ROW ) return SQLITE_ERROR; 02278 if( rc!=SQLITE_DONE ) return rc; 02279 return SQLITE_ROW; 02280 } 02281 02282 /* Delete the segment blocks and segment directory records for all 02283 ** segments at iLevel. 02284 */ 02285 static int segdir_delete(fulltext_vtab *v, int iLevel){ 02286 sqlite3_stmt *s; 02287 sqlite_int64 iStartBlockid, iEndBlockid; 02288 int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); 02289 if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; 02290 02291 if( rc==SQLITE_ROW ){ 02292 rc = block_delete(v, iStartBlockid, iEndBlockid); 02293 if( rc!=SQLITE_OK ) return rc; 02294 } 02295 02296 /* Delete the segment directory itself. */ 02297 rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); 02298 if( rc!=SQLITE_OK ) return rc; 02299 02300 rc = sqlite3_bind_int64(s, 1, iLevel); 02301 if( rc!=SQLITE_OK ) return rc; 02302 02303 return sql_single_step(s); 02304 } 02305 02306 /* Delete entire fts index, SQLITE_OK on success, relevant error on 02307 ** failure. 02308 */ 02309 static int segdir_delete_all(fulltext_vtab *v){ 02310 sqlite3_stmt *s; 02311 int rc = sql_get_statement(v, SEGDIR_DELETE_ALL_STMT, &s); 02312 if( rc!=SQLITE_OK ) return rc; 02313 02314 rc = sql_single_step(s); 02315 if( rc!=SQLITE_OK ) return rc; 02316 02317 rc = sql_get_statement(v, BLOCK_DELETE_ALL_STMT, &s); 02318 if( rc!=SQLITE_OK ) return rc; 02319 02320 return sql_single_step(s); 02321 } 02322 02323 /* Returns SQLITE_OK with *pnSegments set to the number of entries in 02324 ** %_segdir and *piMaxLevel set to the highest level which has a 02325 ** segment. Otherwise returns the SQLite error which caused failure. 02326 */ 02327 static int segdir_count(fulltext_vtab *v, int *pnSegments, int *piMaxLevel){ 02328 sqlite3_stmt *s; 02329 int rc = sql_get_statement(v, SEGDIR_COUNT_STMT, &s); 02330 if( rc!=SQLITE_OK ) return rc; 02331 02332 rc = sqlite3_step(s); 02333 /* TODO(shess): This case should not be possible? Should stronger 02334 ** measures be taken if it happens? 02335 */ 02336 if( rc==SQLITE_DONE ){ 02337 *pnSegments = 0; 02338 *piMaxLevel = 0; 02339 return SQLITE_OK; 02340 } 02341 if( rc!=SQLITE_ROW ) return rc; 02342 02343 *pnSegments = sqlite3_column_int(s, 0); 02344 *piMaxLevel = sqlite3_column_int(s, 1); 02345 02346 /* We expect only one row. We must execute another sqlite3_step() 02347 * to complete the iteration; otherwise the table will remain locked. */ 02348 rc = sqlite3_step(s); 02349 if( rc==SQLITE_DONE ) return SQLITE_OK; 02350 if( rc==SQLITE_ROW ) return SQLITE_ERROR; 02351 return rc; 02352 } 02353 02354 /* TODO(shess) clearPendingTerms() is far down the file because 02355 ** writeZeroSegment() is far down the file because LeafWriter is far 02356 ** down the file. Consider refactoring the code to move the non-vtab 02357 ** code above the vtab code so that we don't need this forward 02358 ** reference. 02359 */ 02360 static int clearPendingTerms(fulltext_vtab *v); 02361 02362 /* 02363 ** Free the memory used to contain a fulltext_vtab structure. 02364 */ 02365 static void fulltext_vtab_destroy(fulltext_vtab *v){ 02366 int iStmt, i; 02367 02368 TRACE(("FTS2 Destroy %p\n", v)); 02369 for( iStmt=0; iStmt<MAX_STMT; iStmt++ ){ 02370 if( v->pFulltextStatements[iStmt]!=NULL ){ 02371 sqlite3_finalize(v->pFulltextStatements[iStmt]); 02372 v->pFulltextStatements[iStmt] = NULL; 02373 } 02374 } 02375 02376 for( i=0; i<MERGE_COUNT; i++ ){ 02377 if( v->pLeafSelectStmts[i]!=NULL ){ 02378 sqlite3_finalize(v->pLeafSelectStmts[i]); 02379 v->pLeafSelectStmts[i] = NULL; 02380 } 02381 } 02382 02383 if( v->pTokenizer!=NULL ){ 02384 v->pTokenizer->pModule->xDestroy(v->pTokenizer); 02385 v->pTokenizer = NULL; 02386 } 02387 02388 clearPendingTerms(v); 02389 02390 sqlite3_free(v->azColumn); 02391 for(i = 0; i < v->nColumn; ++i) { 02392 sqlite3_free(v->azContentColumn[i]); 02393 } 02394 sqlite3_free(v->azContentColumn); 02395 sqlite3_free(v); 02396 } 02397 02398 /* 02399 ** Token types for parsing the arguments to xConnect or xCreate. 02400 */ 02401 #define TOKEN_EOF 0 /* End of file */ 02402 #define TOKEN_SPACE 1 /* Any kind of whitespace */ 02403 #define TOKEN_ID 2 /* An identifier */ 02404 #define TOKEN_STRING 3 /* A string literal */ 02405 #define TOKEN_PUNCT 4 /* A single punctuation character */ 02406 02407 /* 02408 ** If X is a character that can be used in an identifier then 02409 ** IdChar(X) will be true. Otherwise it is false. 02410 ** 02411 ** For ASCII, any character with the high-order bit set is 02412 ** allowed in an identifier. For 7-bit characters, 02413 ** sqlite3IsIdChar[X] must be 1. 02414 ** 02415 ** Ticket #1066. the SQL standard does not allow '$' in the 02416 ** middle of identfiers. But many SQL implementations do. 02417 ** SQLite will allow '$' in identifiers for compatibility. 02418 ** But the feature is undocumented. 02419 */ 02420 static const char isIdChar[] = { 02421 /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ 02422 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ 02423 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ 02424 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ 02425 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ 02426 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ 02427 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ 02428 }; 02429 #define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) 02430 02431 02432 /* 02433 ** Return the length of the token that begins at z[0]. 02434 ** Store the token type in *tokenType before returning. 02435 */ 02436 static int getToken(const char *z, int *tokenType){ 02437 int i, c; 02438 switch( *z ){ 02439 case 0: { 02440 *tokenType = TOKEN_EOF; 02441 return 0; 02442 } 02443 case ' ': case '\t': case '\n': case '\f': case '\r': { 02444 for(i=1; safe_isspace(z[i]); i++){} 02445 *tokenType = TOKEN_SPACE; 02446 return i; 02447 } 02448 case '`': 02449 case '\'': 02450 case '"': { 02451 int delim = z[0]; 02452 for(i=1; (c=z[i])!=0; i++){ 02453 if( c==delim ){ 02454 if( z[i+1]==delim ){ 02455 i++; 02456 }else{ 02457 break; 02458 } 02459 } 02460 } 02461 *tokenType = TOKEN_STRING; 02462 return i + (c!=0); 02463 } 02464 case '[': { 02465 for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} 02466 *tokenType = TOKEN_ID; 02467 return i; 02468 } 02469 default: { 02470 if( !IdChar(*z) ){ 02471 break; 02472 } 02473 for(i=1; IdChar(z[i]); i++){} 02474 *tokenType = TOKEN_ID; 02475 return i; 02476 } 02477 } 02478 *tokenType = TOKEN_PUNCT; 02479 return 1; 02480 } 02481 02482 /* 02483 ** A token extracted from a string is an instance of the following 02484 ** structure. 02485 */ 02486 typedef struct Token { 02487 const char *z; /* Pointer to token text. Not '\000' terminated */ 02488 short int n; /* Length of the token text in bytes. */ 02489 } Token; 02490 02491 /* 02492 ** Given a input string (which is really one of the argv[] parameters 02493 ** passed into xConnect or xCreate) split the string up into tokens. 02494 ** Return an array of pointers to '\000' terminated strings, one string 02495 ** for each non-whitespace token. 02496 ** 02497 ** The returned array is terminated by a single NULL pointer. 02498 ** 02499 ** Space to hold the returned array is obtained from a single 02500 ** malloc and should be freed by passing the return value to free(). 02501 ** The individual strings within the token list are all a part of 02502 ** the single memory allocation and will all be freed at once. 02503 */ 02504 static char **tokenizeString(const char *z, int *pnToken){ 02505 int nToken = 0; 02506 Token *aToken = sqlite3_malloc( strlen(z) * sizeof(aToken[0]) ); 02507 int n = 1; 02508 int e, i; 02509 int totalSize = 0; 02510 char **azToken; 02511 char *zCopy; 02512 while( n>0 ){ 02513 n = getToken(z, &e); 02514 if( e!=TOKEN_SPACE ){ 02515 aToken[nToken].z = z; 02516 aToken[nToken].n = n; 02517 nToken++; 02518 totalSize += n+1; 02519 } 02520 z += n; 02521 } 02522 azToken = (char**)sqlite3_malloc( nToken*sizeof(char*) + totalSize ); 02523 zCopy = (char*)&azToken[nToken]; 02524 nToken--; 02525 for(i=0; i<nToken; i++){ 02526 azToken[i] = zCopy; 02527 n = aToken[i].n; 02528 memcpy(zCopy, aToken[i].z, n); 02529 zCopy[n] = 0; 02530 zCopy += n+1; 02531 } 02532 azToken[nToken] = 0; 02533 sqlite3_free(aToken); 02534 *pnToken = nToken; 02535 return azToken; 02536 } 02537 02538 /* 02539 ** Convert an SQL-style quoted string into a normal string by removing 02540 ** the quote characters. The conversion is done in-place. If the 02541 ** input does not begin with a quote character, then this routine 02542 ** is a no-op. 02543 ** 02544 ** Examples: 02545 ** 02546 ** "abc" becomes abc 02547 ** 'xyz' becomes xyz 02548 ** [pqr] becomes pqr 02549 ** `mno` becomes mno 02550 */ 02551 static void dequoteString(char *z){ 02552 int quote; 02553 int i, j; 02554 if( z==0 ) return; 02555 quote = z[0]; 02556 switch( quote ){ 02557 case '\'': break; 02558 case '"': break; 02559 case '`': break; /* For MySQL compatibility */ 02560 case '[': quote = ']'; break; /* For MS SqlServer compatibility */ 02561 default: return; 02562 } 02563 for(i=1, j=0; z[i]; i++){ 02564 if( z[i]==quote ){ 02565 if( z[i+1]==quote ){ 02566 z[j++] = quote; 02567 i++; 02568 }else{ 02569 z[j++] = 0; 02570 break; 02571 } 02572 }else{ 02573 z[j++] = z[i]; 02574 } 02575 } 02576 } 02577 02578 /* 02579 ** The input azIn is a NULL-terminated list of tokens. Remove the first 02580 ** token and all punctuation tokens. Remove the quotes from 02581 ** around string literal tokens. 02582 ** 02583 ** Example: 02584 ** 02585 ** input: tokenize chinese ( 'simplifed' , 'mixed' ) 02586 ** output: chinese simplifed mixed 02587 ** 02588 ** Another example: 02589 ** 02590 ** input: delimiters ( '[' , ']' , '...' ) 02591 ** output: [ ] ... 02592 */ 02593 static void tokenListToIdList(char **azIn){ 02594 int i, j; 02595 if( azIn ){ 02596 for(i=0, j=-1; azIn[i]; i++){ 02597 if( safe_isalnum(azIn[i][0]) || azIn[i][1] ){ 02598 dequoteString(azIn[i]); 02599 if( j>=0 ){ 02600 azIn[j] = azIn[i]; 02601 } 02602 j++; 02603 } 02604 } 02605 azIn[j] = 0; 02606 } 02607 } 02608 02609 02610 /* 02611 ** Find the first alphanumeric token in the string zIn. Null-terminate 02612 ** this token. Remove any quotation marks. And return a pointer to 02613 ** the result. 02614 */ 02615 static char *firstToken(char *zIn, char **pzTail){ 02616 int n, ttype; 02617 while(1){ 02618 n = getToken(zIn, &ttype); 02619 if( ttype==TOKEN_SPACE ){ 02620 zIn += n; 02621 }else if( ttype==TOKEN_EOF ){ 02622 *pzTail = zIn; 02623 return 0; 02624 }else{ 02625 zIn[n] = 0; 02626 *pzTail = &zIn[1]; 02627 dequoteString(zIn); 02628 return zIn; 02629 } 02630 } 02631 /*NOTREACHED*/ 02632 } 02633 02634 /* Return true if... 02635 ** 02636 ** * s begins with the string t, ignoring case 02637 ** * s is longer than t 02638 ** * The first character of s beyond t is not a alphanumeric 02639 ** 02640 ** Ignore leading space in *s. 02641 ** 02642 ** To put it another way, return true if the first token of 02643 ** s[] is t[]. 02644 */ 02645 static int startsWith(const char *s, const char *t){ 02646 while( safe_isspace(*s) ){ s++; } 02647 while( *t ){ 02648 if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; 02649 } 02650 return *s!='_' && !safe_isalnum(*s); 02651 } 02652 02653 /* 02654 ** An instance of this structure defines the "spec" of a 02655 ** full text index. This structure is populated by parseSpec 02656 ** and use by fulltextConnect and fulltextCreate. 02657 */ 02658 typedef struct TableSpec { 02659 const char *zDb; /* Logical database name */ 02660 const char *zName; /* Name of the full-text index */ 02661 int nColumn; /* Number of columns to be indexed */ 02662 char **azColumn; /* Original names of columns to be indexed */ 02663 char **azContentColumn; /* Column names for %_content */ 02664 char **azTokenizer; /* Name of tokenizer and its arguments */ 02665 } TableSpec; 02666 02667 /* 02668 ** Reclaim all of the memory used by a TableSpec 02669 */ 02670 static void clearTableSpec(TableSpec *p) { 02671 sqlite3_free(p->azColumn); 02672 sqlite3_free(p->azContentColumn); 02673 sqlite3_free(p->azTokenizer); 02674 } 02675 02676 /* Parse a CREATE VIRTUAL TABLE statement, which looks like this: 02677 * 02678 * CREATE VIRTUAL TABLE email 02679 * USING fts2(subject, body, tokenize mytokenizer(myarg)) 02680 * 02681 * We return parsed information in a TableSpec structure. 02682 * 02683 */ 02684 static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, 02685 char**pzErr){ 02686 int i, n; 02687 char *z, *zDummy; 02688 char **azArg; 02689 const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ 02690 02691 assert( argc>=3 ); 02692 /* Current interface: 02693 ** argv[0] - module name 02694 ** argv[1] - database name 02695 ** argv[2] - table name 02696 ** argv[3..] - columns, optionally followed by tokenizer specification 02697 ** and snippet delimiters specification. 02698 */ 02699 02700 /* Make a copy of the complete argv[][] array in a single allocation. 02701 ** The argv[][] array is read-only and transient. We can write to the 02702 ** copy in order to modify things and the copy is persistent. 02703 */ 02704 CLEAR(pSpec); 02705 for(i=n=0; i<argc; i++){ 02706 n += strlen(argv[i]) + 1; 02707 } 02708 azArg = sqlite3_malloc( sizeof(char*)*argc + n ); 02709 if( azArg==0 ){ 02710 return SQLITE_NOMEM; 02711 } 02712 z = (char*)&azArg[argc]; 02713 for(i=0; i<argc; i++){ 02714 azArg[i] = z; 02715 strcpy(z, argv[i]); 02716 z += strlen(z)+1; 02717 } 02718 02719 /* Identify the column names and the tokenizer and delimiter arguments 02720 ** in the argv[][] array. 02721 */ 02722 pSpec->zDb = azArg[1]; 02723 pSpec->zName = azArg[2]; 02724 pSpec->nColumn = 0; 02725 pSpec->azColumn = azArg; 02726 zTokenizer = "tokenize simple"; 02727 for(i=3; i<argc; ++i){ 02728 if( startsWith(azArg[i],"tokenize") ){ 02729 zTokenizer = azArg[i]; 02730 }else{ 02731 z = azArg[pSpec->nColumn] = firstToken(azArg[i], &zDummy); 02732 pSpec->nColumn++; 02733 } 02734 } 02735 if( pSpec->nColumn==0 ){ 02736 azArg[0] = "content"; 02737 pSpec->nColumn = 1; 02738 } 02739 02740 /* 02741 ** Construct the list of content column names. 02742 ** 02743 ** Each content column name will be of the form cNNAAAA 02744 ** where NN is the column number and AAAA is the sanitized 02745 ** column name. "sanitized" means that special characters are 02746 ** converted to "_". The cNN prefix guarantees that all column 02747 ** names are unique. 02748 ** 02749 ** The AAAA suffix is not strictly necessary. It is included 02750 ** for the convenience of people who might examine the generated 02751 ** %_content table and wonder what the columns are used for. 02752 */ 02753 pSpec->azContentColumn = sqlite3_malloc( pSpec->nColumn * sizeof(char *) ); 02754 if( pSpec->azContentColumn==0 ){ 02755 clearTableSpec(pSpec); 02756 return SQLITE_NOMEM; 02757 } 02758 for(i=0; i<pSpec->nColumn; i++){ 02759 char *p; 02760 pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); 02761 for (p = pSpec->azContentColumn[i]; *p ; ++p) { 02762 if( !safe_isalnum(*p) ) *p = '_'; 02763 } 02764 } 02765 02766 /* 02767 ** Parse the tokenizer specification string. 02768 */ 02769 pSpec->azTokenizer = tokenizeString(zTokenizer, &n); 02770 tokenListToIdList(pSpec->azTokenizer); 02771 02772 return SQLITE_OK; 02773 } 02774 02775 /* 02776 ** Generate a CREATE TABLE statement that describes the schema of 02777 ** the virtual table. Return a pointer to this schema string. 02778 ** 02779 ** Space is obtained from sqlite3_mprintf() and should be freed 02780 ** using sqlite3_free(). 02781 */ 02782 static char *fulltextSchema( 02783 int nColumn, /* Number of columns */ 02784 const char *const* azColumn, /* List of columns */ 02785 const char *zTableName /* Name of the table */ 02786 ){ 02787 int i; 02788 char *zSchema, *zNext; 02789 const char *zSep = "("; 02790 zSchema = sqlite3_mprintf("CREATE TABLE x"); 02791 for(i=0; i<nColumn; i++){ 02792 zNext = sqlite3_mprintf("%s%s%Q", zSchema, zSep, azColumn[i]); 02793 sqlite3_free(zSchema); 02794 zSchema = zNext; 02795 zSep = ","; 02796 } 02797 zNext = sqlite3_mprintf("%s,%Q)", zSchema, zTableName); 02798 sqlite3_free(zSchema); 02799 return zNext; 02800 } 02801 02802 /* 02803 ** Build a new sqlite3_vtab structure that will describe the 02804 ** fulltext index defined by spec. 02805 */ 02806 static int constructVtab( 02807 sqlite3 *db, /* The SQLite database connection */ 02808 fts2Hash *pHash, /* Hash table containing tokenizers */ 02809 TableSpec *spec, /* Parsed spec information from parseSpec() */ 02810 sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ 02811 char **pzErr /* Write any error message here */ 02812 ){ 02813 int rc; 02814 int n; 02815 fulltext_vtab *v = 0; 02816 const sqlite3_tokenizer_module *m = NULL; 02817 char *schema; 02818 02819 char const *zTok; /* Name of tokenizer to use for this fts table */ 02820 int nTok; /* Length of zTok, including nul terminator */ 02821 02822 v = (fulltext_vtab *) sqlite3_malloc(sizeof(fulltext_vtab)); 02823 if( v==0 ) return SQLITE_NOMEM; 02824 CLEAR(v); 02825 /* sqlite will initialize v->base */ 02826 v->db = db; 02827 v->zDb = spec->zDb; /* Freed when azColumn is freed */ 02828 v->zName = spec->zName; /* Freed when azColumn is freed */ 02829 v->nColumn = spec->nColumn; 02830 v->azContentColumn = spec->azContentColumn; 02831 spec->azContentColumn = 0; 02832 v->azColumn = spec->azColumn; 02833 spec->azColumn = 0; 02834 02835 if( spec->azTokenizer==0 ){ 02836 return SQLITE_NOMEM; 02837 } 02838 02839 zTok = spec->azTokenizer[0]; 02840 if( !zTok ){ 02841 zTok = "simple"; 02842 } 02843 nTok = strlen(zTok)+1; 02844 02845 m = (sqlite3_tokenizer_module *)sqlite3Fts2HashFind(pHash, zTok, nTok); 02846 if( !m ){ 02847 *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); 02848 rc = SQLITE_ERROR; 02849 goto err; 02850 } 02851 02852 for(n=0; spec->azTokenizer[n]; n++){} 02853 if( n ){ 02854 rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], 02855 &v->pTokenizer); 02856 }else{ 02857 rc = m->xCreate(0, 0, &v->pTokenizer); 02858 } 02859 if( rc!=SQLITE_OK ) goto err; 02860 v->pTokenizer->pModule = m; 02861 02862 /* TODO: verify the existence of backing tables foo_content, foo_term */ 02863 02864 schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, 02865 spec->zName); 02866 rc = sqlite3_declare_vtab(db, schema); 02867 sqlite3_free(schema); 02868 if( rc!=SQLITE_OK ) goto err; 02869 02870 memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); 02871 02872 /* Indicate that the buffer is not live. */ 02873 v->nPendingData = -1; 02874 02875 *ppVTab = &v->base; 02876 TRACE(("FTS2 Connect %p\n", v)); 02877 02878 return rc; 02879 02880 err: 02881 fulltext_vtab_destroy(v); 02882 return rc; 02883 } 02884 02885 static int fulltextConnect( 02886 sqlite3 *db, 02887 void *pAux, 02888 int argc, const char *const*argv, 02889 sqlite3_vtab **ppVTab, 02890 char **pzErr 02891 ){ 02892 TableSpec spec; 02893 int rc = parseSpec(&spec, argc, argv, pzErr); 02894 if( rc!=SQLITE_OK ) return rc; 02895 02896 rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); 02897 clearTableSpec(&spec); 02898 return rc; 02899 } 02900 02901 /* The %_content table holds the text of each document, with 02902 ** the rowid used as the docid. 02903 */ 02904 /* TODO(shess) This comment needs elaboration to match the updated 02905 ** code. Work it into the top-of-file comment at that time. 02906 */ 02907 static int fulltextCreate(sqlite3 *db, void *pAux, 02908 int argc, const char * const *argv, 02909 sqlite3_vtab **ppVTab, char **pzErr){ 02910 int rc; 02911 TableSpec spec; 02912 StringBuffer schema; 02913 TRACE(("FTS2 Create\n")); 02914 02915 rc = parseSpec(&spec, argc, argv, pzErr); 02916 if( rc!=SQLITE_OK ) return rc; 02917 02918 initStringBuffer(&schema); 02919 append(&schema, "CREATE TABLE %_content("); 02920 appendList(&schema, spec.nColumn, spec.azContentColumn); 02921 append(&schema, ")"); 02922 rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); 02923 stringBufferDestroy(&schema); 02924 if( rc!=SQLITE_OK ) goto out; 02925 02926 rc = sql_exec(db, spec.zDb, spec.zName, 02927 "create table %_segments(block blob);"); 02928 if( rc!=SQLITE_OK ) goto out; 02929 02930 rc = sql_exec(db, spec.zDb, spec.zName, 02931 "create table %_segdir(" 02932 " level integer," 02933 " idx integer," 02934 " start_block integer," 02935 " leaves_end_block integer," 02936 " end_block integer," 02937 " root blob," 02938 " primary key(level, idx)" 02939 ");"); 02940 if( rc!=SQLITE_OK ) goto out; 02941 02942 rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); 02943 02944 out: 02945 clearTableSpec(&spec); 02946 return rc; 02947 } 02948 02949 /* Decide how to handle an SQL query. */ 02950 static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ 02951 int i; 02952 TRACE(("FTS2 BestIndex\n")); 02953 02954 for(i=0; i<pInfo->nConstraint; ++i){ 02955 const struct sqlite3_index_constraint *pConstraint; 02956 pConstraint = &pInfo->aConstraint[i]; 02957 if( pConstraint->usable ) { 02958 if( pConstraint->iColumn==-1 && 02959 pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ 02960 pInfo->idxNum = QUERY_ROWID; /* lookup by rowid */ 02961 TRACE(("FTS2 QUERY_ROWID\n")); 02962 } else if( pConstraint->iColumn>=0 && 02963 pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ 02964 /* full-text search */ 02965 pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; 02966 TRACE(("FTS2 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); 02967 } else continue; 02968 02969 pInfo->aConstraintUsage[i].argvIndex = 1; 02970 pInfo->aConstraintUsage[i].omit = 1; 02971 02972 /* An arbitrary value for now. 02973 * TODO: Perhaps rowid matches should be considered cheaper than 02974 * full-text searches. */ 02975 pInfo->estimatedCost = 1.0; 02976 02977 return SQLITE_OK; 02978 } 02979 } 02980 pInfo->idxNum = QUERY_GENERIC; 02981 return SQLITE_OK; 02982 } 02983 02984 static int fulltextDisconnect(sqlite3_vtab *pVTab){ 02985 TRACE(("FTS2 Disconnect %p\n", pVTab)); 02986 fulltext_vtab_destroy((fulltext_vtab *)pVTab); 02987 return SQLITE_OK; 02988 } 02989 02990 static int fulltextDestroy(sqlite3_vtab *pVTab){ 02991 fulltext_vtab *v = (fulltext_vtab *)pVTab; 02992 int rc; 02993 02994 TRACE(("FTS2 Destroy %p\n", pVTab)); 02995 rc = sql_exec(v->db, v->zDb, v->zName, 02996 "drop table if exists %_content;" 02997 "drop table if exists %_segments;" 02998 "drop table if exists %_segdir;" 02999 ); 03000 if( rc!=SQLITE_OK ) return rc; 03001 03002 fulltext_vtab_destroy((fulltext_vtab *)pVTab); 03003 return SQLITE_OK; 03004 } 03005 03006 static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ 03007 fulltext_cursor *c; 03008 03009 c = (fulltext_cursor *) sqlite3_malloc(sizeof(fulltext_cursor)); 03010 if( c ){ 03011 memset(c, 0, sizeof(fulltext_cursor)); 03012 /* sqlite will initialize c->base */ 03013 *ppCursor = &c->base; 03014 TRACE(("FTS2 Open %p: %p\n", pVTab, c)); 03015 return SQLITE_OK; 03016 }else{ 03017 return SQLITE_NOMEM; 03018 } 03019 } 03020 03021 03022 /* Free all of the dynamically allocated memory held by *q 03023 */ 03024 static void queryClear(Query *q){ 03025 int i; 03026 for(i = 0; i < q->nTerms; ++i){ 03027 sqlite3_free(q->pTerms[i].pTerm); 03028 } 03029 sqlite3_free(q->pTerms); 03030 CLEAR(q); 03031 } 03032 03033 /* Free all of the dynamically allocated memory held by the 03034 ** Snippet 03035 */ 03036 static void snippetClear(Snippet *p){ 03037 sqlite3_free(p->aMatch); 03038 sqlite3_free(p->zOffset); 03039 sqlite3_free(p->zSnippet); 03040 CLEAR(p); 03041 } 03042 /* 03043 ** Append a single entry to the p->aMatch[] log. 03044 */ 03045 static void snippetAppendMatch( 03046 Snippet *p, /* Append the entry to this snippet */ 03047 int iCol, int iTerm, /* The column and query term */ 03048 int iStart, int nByte /* Offset and size of the match */ 03049 ){ 03050 int i; 03051 struct snippetMatch *pMatch; 03052 if( p->nMatch+1>=p->nAlloc ){ 03053 p->nAlloc = p->nAlloc*2 + 10; 03054 p->aMatch = sqlite3_realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); 03055 if( p->aMatch==0 ){ 03056 p->nMatch = 0; 03057 p->nAlloc = 0; 03058 return; 03059 } 03060 } 03061 i = p->nMatch++; 03062 pMatch = &p->aMatch[i]; 03063 pMatch->iCol = iCol; 03064 pMatch->iTerm = iTerm; 03065 pMatch->iStart = iStart; 03066 pMatch->nByte = nByte; 03067 } 03068 03069 /* 03070 ** Sizing information for the circular buffer used in snippetOffsetsOfColumn() 03071 */ 03072 #define FTS2_ROTOR_SZ (32) 03073 #define FTS2_ROTOR_MASK (FTS2_ROTOR_SZ-1) 03074 03075 /* 03076 ** Add entries to pSnippet->aMatch[] for every match that occurs against 03077 ** document zDoc[0..nDoc-1] which is stored in column iColumn. 03078 */ 03079 static void snippetOffsetsOfColumn( 03080 Query *pQuery, 03081 Snippet *pSnippet, 03082 int iColumn, 03083 const char *zDoc, 03084 int nDoc 03085 ){ 03086 const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ 03087 sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ 03088 sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ 03089 fulltext_vtab *pVtab; /* The full text index */ 03090 int nColumn; /* Number of columns in the index */ 03091 const QueryTerm *aTerm; /* Query string terms */ 03092 int nTerm; /* Number of query string terms */ 03093 int i, j; /* Loop counters */ 03094 int rc; /* Return code */ 03095 unsigned int match, prevMatch; /* Phrase search bitmasks */ 03096 const char *zToken; /* Next token from the tokenizer */ 03097 int nToken; /* Size of zToken */ 03098 int iBegin, iEnd, iPos; /* Offsets of beginning and end */ 03099 03100 /* The following variables keep a circular buffer of the last 03101 ** few tokens */ 03102 unsigned int iRotor = 0; /* Index of current token */ 03103 int iRotorBegin[FTS2_ROTOR_SZ]; /* Beginning offset of token */ 03104 int iRotorLen[FTS2_ROTOR_SZ]; /* Length of token */ 03105 03106 pVtab = pQuery->pFts; 03107 nColumn = pVtab->nColumn; 03108 pTokenizer = pVtab->pTokenizer; 03109 pTModule = pTokenizer->pModule; 03110 rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); 03111 if( rc ) return; 03112 pTCursor->pTokenizer = pTokenizer; 03113 aTerm = pQuery->pTerms; 03114 nTerm = pQuery->nTerms; 03115 if( nTerm>=FTS2_ROTOR_SZ ){ 03116 nTerm = FTS2_ROTOR_SZ - 1; 03117 } 03118 prevMatch = 0; 03119 while(1){ 03120 rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); 03121 if( rc ) break; 03122 iRotorBegin[iRotor&FTS2_ROTOR_MASK] = iBegin; 03123 iRotorLen[iRotor&FTS2_ROTOR_MASK] = iEnd-iBegin; 03124 match = 0; 03125 for(i=0; i<nTerm; i++){ 03126 int iCol; 03127 iCol = aTerm[i].iColumn; 03128 if( iCol>=0 && iCol<nColumn && iCol!=iColumn ) continue; 03129 if( aTerm[i].nTerm>nToken ) continue; 03130 if( !aTerm[i].isPrefix && aTerm[i].nTerm<nToken ) continue; 03131 assert( aTerm[i].nTerm<=nToken ); 03132 if( memcmp(aTerm[i].pTerm, zToken, aTerm[i].nTerm) ) continue; 03133 if( aTerm[i].iPhrase>1 && (prevMatch & (1<<i))==0 ) continue; 03134 match |= 1<<i; 03135 if( i==nTerm-1 || aTerm[i+1].iPhrase==1 ){ 03136 for(j=aTerm[i].iPhrase-1; j>=0; j--){ 03137 int k = (iRotor-j) & FTS2_ROTOR_MASK; 03138 snippetAppendMatch(pSnippet, iColumn, i-j, 03139 iRotorBegin[k], iRotorLen[k]); 03140 } 03141 } 03142 } 03143 prevMatch = match<<1; 03144 iRotor++; 03145 } 03146 pTModule->xClose(pTCursor); 03147 } 03148 03149 03150 /* 03151 ** Compute all offsets for the current row of the query. 03152 ** If the offsets have already been computed, this routine is a no-op. 03153 */ 03154 static void snippetAllOffsets(fulltext_cursor *p){ 03155 int nColumn; 03156 int iColumn, i; 03157 int iFirst, iLast; 03158 fulltext_vtab *pFts; 03159 03160 if( p->snippet.nMatch ) return; 03161 if( p->q.nTerms==0 ) return; 03162 pFts = p->q.pFts; 03163 nColumn = pFts->nColumn; 03164 iColumn = (p->iCursorType - QUERY_FULLTEXT); 03165 if( iColumn<0 || iColumn>=nColumn ){ 03166 iFirst = 0; 03167 iLast = nColumn-1; 03168 }else{ 03169 iFirst = iColumn; 03170 iLast = iColumn; 03171 } 03172 for(i=iFirst; i<=iLast; i++){ 03173 const char *zDoc; 03174 int nDoc; 03175 zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); 03176 nDoc = sqlite3_column_bytes(p->pStmt, i+1); 03177 snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); 03178 } 03179 } 03180 03181 /* 03182 ** Convert the information in the aMatch[] array of the snippet 03183 ** into the string zOffset[0..nOffset-1]. 03184 */ 03185 static void snippetOffsetText(Snippet *p){ 03186 int i; 03187 int cnt = 0; 03188 StringBuffer sb; 03189 char zBuf[200]; 03190 if( p->zOffset ) return; 03191 initStringBuffer(&sb); 03192 for(i=0; i<p->nMatch; i++){ 03193 struct snippetMatch *pMatch = &p->aMatch[i]; 03194 zBuf[0] = ' '; 03195 sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", 03196 pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); 03197 append(&sb, zBuf); 03198 cnt++; 03199 } 03200 p->zOffset = stringBufferData(&sb); 03201 p->nOffset = stringBufferLength(&sb); 03202 } 03203 03204 /* 03205 ** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set 03206 ** of matching words some of which might be in zDoc. zDoc is column 03207 ** number iCol. 03208 ** 03209 ** iBreak is suggested spot in zDoc where we could begin or end an 03210 ** excerpt. Return a value similar to iBreak but possibly adjusted 03211 ** to be a little left or right so that the break point is better. 03212 */ 03213 static int wordBoundary( 03214 int iBreak, /* The suggested break point */ 03215 const char *zDoc, /* Document text */ 03216 int nDoc, /* Number of bytes in zDoc[] */ 03217 struct snippetMatch *aMatch, /* Matching words */ 03218 int nMatch, /* Number of entries in aMatch[] */ 03219 int iCol /* The column number for zDoc[] */ 03220 ){ 03221 int i; 03222 if( iBreak<=10 ){ 03223 return 0; 03224 } 03225 if( iBreak>=nDoc-10 ){ 03226 return nDoc; 03227 } 03228 for(i=0; i<nMatch && aMatch[i].iCol<iCol; i++){} 03229 while( i<nMatch && aMatch[i].iStart+aMatch[i].nByte<iBreak ){ i++; } 03230 if( i<nMatch ){ 03231 if( aMatch[i].iStart<iBreak+10 ){ 03232 return aMatch[i].iStart; 03233 } 03234 if( i>0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ 03235 return aMatch[i-1].iStart; 03236 } 03237 } 03238 for(i=1; i<=10; i++){ 03239 if( safe_isspace(zDoc[iBreak-i]) ){ 03240 return iBreak - i + 1; 03241 } 03242 if( safe_isspace(zDoc[iBreak+i]) ){ 03243 return iBreak + i + 1; 03244 } 03245 } 03246 return iBreak; 03247 } 03248 03249 03250 03251 /* 03252 ** Allowed values for Snippet.aMatch[].snStatus 03253 */ 03254 #define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ 03255 #define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ 03256 03257 /* 03258 ** Generate the text of a snippet. 03259 */ 03260 static void snippetText( 03261 fulltext_cursor *pCursor, /* The cursor we need the snippet for */ 03262 const char *zStartMark, /* Markup to appear before each match */ 03263 const char *zEndMark, /* Markup to appear after each match */ 03264 const char *zEllipsis /* Ellipsis mark */ 03265 ){ 03266 int i, j; 03267 struct snippetMatch *aMatch; 03268 int nMatch; 03269 int nDesired; 03270 StringBuffer sb; 03271 int tailCol; 03272 int tailOffset; 03273 int iCol; 03274 int nDoc; 03275 const char *zDoc; 03276 int iStart, iEnd; 03277 int tailEllipsis = 0; 03278 int iMatch; 03279 03280 03281 sqlite3_free(pCursor->snippet.zSnippet); 03282 pCursor->snippet.zSnippet = 0; 03283 aMatch = pCursor->snippet.aMatch; 03284 nMatch = pCursor->snippet.nMatch; 03285 initStringBuffer(&sb); 03286 03287 for(i=0; i<nMatch; i++){ 03288 aMatch[i].snStatus = SNIPPET_IGNORE; 03289 } 03290 nDesired = 0; 03291 for(i=0; i<pCursor->q.nTerms; i++){ 03292 for(j=0; j<nMatch; j++){ 03293 if( aMatch[j].iTerm==i ){ 03294 aMatch[j].snStatus = SNIPPET_DESIRED; 03295 nDesired++; 03296 break; 03297 } 03298 } 03299 } 03300 03301 iMatch = 0; 03302 tailCol = -1; 03303 tailOffset = 0; 03304 for(i=0; i<nMatch && nDesired>0; i++){ 03305 if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; 03306 nDesired--; 03307 iCol = aMatch[i].iCol; 03308 zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); 03309 nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); 03310 iStart = aMatch[i].iStart - 40; 03311 iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); 03312 if( iStart<=10 ){ 03313 iStart = 0; 03314 } 03315 if( iCol==tailCol && iStart<=tailOffset+20 ){ 03316 iStart = tailOffset; 03317 } 03318 if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ 03319 trimWhiteSpace(&sb); 03320 appendWhiteSpace(&sb); 03321 append(&sb, zEllipsis); 03322 appendWhiteSpace(&sb); 03323 } 03324 iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; 03325 iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); 03326 if( iEnd>=nDoc-10 ){ 03327 iEnd = nDoc; 03328 tailEllipsis = 0; 03329 }else{ 03330 tailEllipsis = 1; 03331 } 03332 while( iMatch<nMatch && aMatch[iMatch].iCol<iCol ){ iMatch++; } 03333 while( iStart<iEnd ){ 03334 while( iMatch<nMatch && aMatch[iMatch].iStart<iStart 03335 && aMatch[iMatch].iCol<=iCol ){ 03336 iMatch++; 03337 } 03338 if( iMatch<nMatch && aMatch[iMatch].iStart<iEnd 03339 && aMatch[iMatch].iCol==iCol ){ 03340 nappend(&sb, &zDoc[iStart], aMatch[iMatch].iStart - iStart); 03341 iStart = aMatch[iMatch].iStart; 03342 append(&sb, zStartMark); 03343 nappend(&sb, &zDoc[iStart], aMatch[iMatch].nByte); 03344 append(&sb, zEndMark); 03345 iStart += aMatch[iMatch].nByte; 03346 for(j=iMatch+1; j<nMatch; j++){ 03347 if( aMatch[j].iTerm==aMatch[iMatch].iTerm 03348 && aMatch[j].snStatus==SNIPPET_DESIRED ){ 03349 nDesired--; 03350 aMatch[j].snStatus = SNIPPET_IGNORE; 03351 } 03352 } 03353 }else{ 03354 nappend(&sb, &zDoc[iStart], iEnd - iStart); 03355 iStart = iEnd; 03356 } 03357 } 03358 tailCol = iCol; 03359 tailOffset = iEnd; 03360 } 03361 trimWhiteSpace(&sb); 03362 if( tailEllipsis ){ 03363 appendWhiteSpace(&sb); 03364 append(&sb, zEllipsis); 03365 } 03366 pCursor->snippet.zSnippet = stringBufferData(&sb); 03367 pCursor->snippet.nSnippet = stringBufferLength(&sb); 03368 } 03369 03370 03371 /* 03372 ** Close the cursor. For additional information see the documentation 03373 ** on the xClose method of the virtual table interface. 03374 */ 03375 static int fulltextClose(sqlite3_vtab_cursor *pCursor){ 03376 fulltext_cursor *c = (fulltext_cursor *) pCursor; 03377 TRACE(("FTS2 Close %p\n", c)); 03378 sqlite3_finalize(c->pStmt); 03379 queryClear(&c->q); 03380 snippetClear(&c->snippet); 03381 if( c->result.nData!=0 ) dlrDestroy(&c->reader); 03382 dataBufferDestroy(&c->result); 03383 sqlite3_free(c); 03384 return SQLITE_OK; 03385 } 03386 03387 static int fulltextNext(sqlite3_vtab_cursor *pCursor){ 03388 fulltext_cursor *c = (fulltext_cursor *) pCursor; 03389 int rc; 03390 03391 TRACE(("FTS2 Next %p\n", pCursor)); 03392 snippetClear(&c->snippet); 03393 if( c->iCursorType < QUERY_FULLTEXT ){ 03394 /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ 03395 rc = sqlite3_step(c->pStmt); 03396 switch( rc ){ 03397 case SQLITE_ROW: 03398 c->eof = 0; 03399 return SQLITE_OK; 03400 case SQLITE_DONE: 03401 c->eof = 1; 03402 return SQLITE_OK; 03403 default: 03404 c->eof = 1; 03405 return rc; 03406 } 03407 } else { /* full-text query */ 03408 rc = sqlite3_reset(c->pStmt); 03409 if( rc!=SQLITE_OK ) return rc; 03410 03411 if( c->result.nData==0 || dlrAtEnd(&c->reader) ){ 03412 c->eof = 1; 03413 return SQLITE_OK; 03414 } 03415 rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader)); 03416 dlrStep(&c->reader); 03417 if( rc!=SQLITE_OK ) return rc; 03418 /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ 03419 rc = sqlite3_step(c->pStmt); 03420 if( rc==SQLITE_ROW ){ /* the case we expect */ 03421 c->eof = 0; 03422 return SQLITE_OK; 03423 } 03424 /* an error occurred; abort */ 03425 return rc==SQLITE_DONE ? SQLITE_ERROR : rc; 03426 } 03427 } 03428 03429 03430 /* TODO(shess) If we pushed LeafReader to the top of the file, or to 03431 ** another file, term_select() could be pushed above 03432 ** docListOfTerm(). 03433 */ 03434 static int termSelect(fulltext_vtab *v, int iColumn, 03435 const char *pTerm, int nTerm, int isPrefix, 03436 DocListType iType, DataBuffer *out); 03437 03438 /* Return a DocList corresponding to the query term *pTerm. If *pTerm 03439 ** is the first term of a phrase query, go ahead and evaluate the phrase 03440 ** query and return the doclist for the entire phrase query. 03441 ** 03442 ** The resulting DL_DOCIDS doclist is stored in pResult, which is 03443 ** overwritten. 03444 */ 03445 static int docListOfTerm( 03446 fulltext_vtab *v, /* The full text index */ 03447 int iColumn, /* column to restrict to. No restriction if >=nColumn */ 03448 QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ 03449 DataBuffer *pResult /* Write the result here */ 03450 ){ 03451 DataBuffer left, right, new; 03452 int i, rc; 03453 03454 /* No phrase search if no position info. */ 03455 assert( pQTerm->nPhrase==0 || DL_DEFAULT!=DL_DOCIDS ); 03456 03457 /* This code should never be called with buffered updates. */ 03458 assert( v->nPendingData<0 ); 03459 03460 dataBufferInit(&left, 0); 03461 rc = termSelect(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pQTerm->isPrefix, 03462 0<pQTerm->nPhrase ? DL_POSITIONS : DL_DOCIDS, &left); 03463 if( rc ) return rc; 03464 for(i=1; i<=pQTerm->nPhrase && left.nData>0; i++){ 03465 dataBufferInit(&right, 0); 03466 rc = termSelect(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, 03467 pQTerm[i].isPrefix, DL_POSITIONS, &right); 03468 if( rc ){ 03469 dataBufferDestroy(&left); 03470 return rc; 03471 } 03472 dataBufferInit(&new, 0); 03473 docListPhraseMerge(left.pData, left.nData, right.pData, right.nData, 03474 i<pQTerm->nPhrase ? DL_POSITIONS : DL_DOCIDS, &new); 03475 dataBufferDestroy(&left); 03476 dataBufferDestroy(&right); 03477 left = new; 03478 } 03479 *pResult = left; 03480 return SQLITE_OK; 03481 } 03482 03483 /* Add a new term pTerm[0..nTerm-1] to the query *q. 03484 */ 03485 static void queryAdd(Query *q, const char *pTerm, int nTerm){ 03486 QueryTerm *t; 03487 ++q->nTerms; 03488 q->pTerms = sqlite3_realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); 03489 if( q->pTerms==0 ){ 03490 q->nTerms = 0; 03491 return; 03492 } 03493 t = &q->pTerms[q->nTerms - 1]; 03494 CLEAR(t); 03495 t->pTerm = sqlite3_malloc(nTerm+1); 03496 memcpy(t->pTerm, pTerm, nTerm); 03497 t->pTerm[nTerm] = 0; 03498 t->nTerm = nTerm; 03499 t->isOr = q->nextIsOr; 03500 t->isPrefix = 0; 03501 q->nextIsOr = 0; 03502 t->iColumn = q->nextColumn; 03503 q->nextColumn = q->dfltColumn; 03504 } 03505 03506 /* 03507 ** Check to see if the string zToken[0...nToken-1] matches any 03508 ** column name in the virtual table. If it does, 03509 ** return the zero-indexed column number. If not, return -1. 03510 */ 03511 static int checkColumnSpecifier( 03512 fulltext_vtab *pVtab, /* The virtual table */ 03513 const char *zToken, /* Text of the token */ 03514 int nToken /* Number of characters in the token */ 03515 ){ 03516 int i; 03517 for(i=0; i<pVtab->nColumn; i++){ 03518 if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 03519 && pVtab->azColumn[i][nToken]==0 ){ 03520 return i; 03521 } 03522 } 03523 return -1; 03524 } 03525 03526 /* 03527 ** Parse the text at pSegment[0..nSegment-1]. Add additional terms 03528 ** to the query being assemblied in pQuery. 03529 ** 03530 ** inPhrase is true if pSegment[0..nSegement-1] is contained within 03531 ** double-quotes. If inPhrase is true, then the first term 03532 ** is marked with the number of terms in the phrase less one and 03533 ** OR and "-" syntax is ignored. If inPhrase is false, then every 03534 ** term found is marked with nPhrase=0 and OR and "-" syntax is significant. 03535 */ 03536 static int tokenizeSegment( 03537 sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ 03538 const char *pSegment, int nSegment, /* Query expression being parsed */ 03539 int inPhrase, /* True if within "..." */ 03540 Query *pQuery /* Append results here */ 03541 ){ 03542 const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; 03543 sqlite3_tokenizer_cursor *pCursor; 03544 int firstIndex = pQuery->nTerms; 03545 int iCol; 03546 int nTerm = 1; 03547 03548 int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); 03549 if( rc!=SQLITE_OK ) return rc; 03550 pCursor->pTokenizer = pTokenizer; 03551 03552 while( 1 ){ 03553 const char *pToken; 03554 int nToken, iBegin, iEnd, iPos; 03555 03556 rc = pModule->xNext(pCursor, 03557 &pToken, &nToken, 03558 &iBegin, &iEnd, &iPos); 03559 if( rc!=SQLITE_OK ) break; 03560 if( !inPhrase && 03561 pSegment[iEnd]==':' && 03562 (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ 03563 pQuery->nextColumn = iCol; 03564 continue; 03565 } 03566 if( !inPhrase && pQuery->nTerms>0 && nToken==2 03567 && pSegment[iBegin]=='O' && pSegment[iBegin+1]=='R' ){ 03568 pQuery->nextIsOr = 1; 03569 continue; 03570 } 03571 queryAdd(pQuery, pToken, nToken); 03572 if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ 03573 pQuery->pTerms[pQuery->nTerms-1].isNot = 1; 03574 } 03575 if( iEnd<nSegment && pSegment[iEnd]=='*' ){ 03576 pQuery->pTerms[pQuery->nTerms-1].isPrefix = 1; 03577 } 03578 pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; 03579 if( inPhrase ){ 03580 nTerm++; 03581 } 03582 } 03583 03584 if( inPhrase && pQuery->nTerms>firstIndex ){ 03585 pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; 03586 } 03587 03588 return pModule->xClose(pCursor); 03589 } 03590 03591 /* Parse a query string, yielding a Query object pQuery. 03592 ** 03593 ** The calling function will need to queryClear() to clean up 03594 ** the dynamically allocated memory held by pQuery. 03595 */ 03596 static int parseQuery( 03597 fulltext_vtab *v, /* The fulltext index */ 03598 const char *zInput, /* Input text of the query string */ 03599 int nInput, /* Size of the input text */ 03600 int dfltColumn, /* Default column of the index to match against */ 03601 Query *pQuery /* Write the parse results here. */ 03602 ){ 03603 int iInput, inPhrase = 0; 03604 03605 if( zInput==0 ) nInput = 0; 03606 if( nInput<0 ) nInput = strlen(zInput); 03607 pQuery->nTerms = 0; 03608 pQuery->pTerms = NULL; 03609 pQuery->nextIsOr = 0; 03610 pQuery->nextColumn = dfltColumn; 03611 pQuery->dfltColumn = dfltColumn; 03612 pQuery->pFts = v; 03613 03614 for(iInput=0; iInput<nInput; ++iInput){ 03615 int i; 03616 for(i=iInput; i<nInput && zInput[i]!='"'; ++i){} 03617 if( i>iInput ){ 03618 tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, 03619 pQuery); 03620 } 03621 iInput = i; 03622 if( i<nInput ){ 03623 assert( zInput[i]=='"' ); 03624 inPhrase = !inPhrase; 03625 } 03626 } 03627 03628 if( inPhrase ){ 03629 /* unmatched quote */ 03630 queryClear(pQuery); 03631 return SQLITE_ERROR; 03632 } 03633 return SQLITE_OK; 03634 } 03635 03636 /* TODO(shess) Refactor the code to remove this forward decl. */ 03637 static int flushPendingTerms(fulltext_vtab *v); 03638 03639 /* Perform a full-text query using the search expression in 03640 ** zInput[0..nInput-1]. Return a list of matching documents 03641 ** in pResult. 03642 ** 03643 ** Queries must match column iColumn. Or if iColumn>=nColumn 03644 ** they are allowed to match against any column. 03645 */ 03646 static int fulltextQuery( 03647 fulltext_vtab *v, /* The full text index */ 03648 int iColumn, /* Match against this column by default */ 03649 const char *zInput, /* The query string */ 03650 int nInput, /* Number of bytes in zInput[] */ 03651 DataBuffer *pResult, /* Write the result doclist here */ 03652 Query *pQuery /* Put parsed query string here */ 03653 ){ 03654 int i, iNext, rc; 03655 DataBuffer left, right, or, new; 03656 int nNot = 0; 03657 QueryTerm *aTerm; 03658 03659 /* TODO(shess) Instead of flushing pendingTerms, we could query for 03660 ** the relevant term and merge the doclist into what we receive from 03661 ** the database. Wait and see if this is a common issue, first. 03662 ** 03663 ** A good reason not to flush is to not generate update-related 03664 ** error codes from here. 03665 */ 03666 03667 /* Flush any buffered updates before executing the query. */ 03668 rc = flushPendingTerms(v); 03669 if( rc!=SQLITE_OK ) return rc; 03670 03671 /* TODO(shess) I think that the queryClear() calls below are not 03672 ** necessary, because fulltextClose() already clears the query. 03673 */ 03674 rc = parseQuery(v, zInput, nInput, iColumn, pQuery); 03675 if( rc!=SQLITE_OK ) return rc; 03676 03677 /* Empty or NULL queries return no results. */ 03678 if( pQuery->nTerms==0 ){ 03679 dataBufferInit(pResult, 0); 03680 return SQLITE_OK; 03681 } 03682 03683 /* Merge AND terms. */ 03684 /* TODO(shess) I think we can early-exit if( i>nNot && left.nData==0 ). */ 03685 aTerm = pQuery->pTerms; 03686 for(i = 0; i<pQuery->nTerms; i=iNext){ 03687 if( aTerm[i].isNot ){ 03688 /* Handle all NOT terms in a separate pass */ 03689 nNot++; 03690 iNext = i + aTerm[i].nPhrase+1; 03691 continue; 03692 } 03693 iNext = i + aTerm[i].nPhrase + 1; 03694 rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); 03695 if( rc ){ 03696 if( i!=nNot ) dataBufferDestroy(&left); 03697 queryClear(pQuery); 03698 return rc; 03699 } 03700 while( iNext<pQuery->nTerms && aTerm[iNext].isOr ){ 03701 rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &or); 03702 iNext += aTerm[iNext].nPhrase + 1; 03703 if( rc ){ 03704 if( i!=nNot ) dataBufferDestroy(&left); 03705 dataBufferDestroy(&right); 03706 queryClear(pQuery); 03707 return rc; 03708 } 03709 dataBufferInit(&new, 0); 03710 docListOrMerge(right.pData, right.nData, or.pData, or.nData, &new); 03711 dataBufferDestroy(&right); 03712 dataBufferDestroy(&or); 03713 right = new; 03714 } 03715 if( i==nNot ){ /* first term processed. */ 03716 left = right; 03717 }else{ 03718 dataBufferInit(&new, 0); 03719 docListAndMerge(left.pData, left.nData, right.pData, right.nData, &new); 03720 dataBufferDestroy(&right); 03721 dataBufferDestroy(&left); 03722 left = new; 03723 } 03724 } 03725 03726 if( nNot==pQuery->nTerms ){ 03727 /* We do not yet know how to handle a query of only NOT terms */ 03728 return SQLITE_ERROR; 03729 } 03730 03731 /* Do the EXCEPT terms */ 03732 for(i=0; i<pQuery->nTerms; i += aTerm[i].nPhrase + 1){ 03733 if( !aTerm[i].isNot ) continue; 03734 rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); 03735 if( rc ){ 03736 queryClear(pQuery); 03737 dataBufferDestroy(&left); 03738 return rc; 03739 } 03740 dataBufferInit(&new, 0); 03741 docListExceptMerge(left.pData, left.nData, right.pData, right.nData, &new); 03742 dataBufferDestroy(&right); 03743 dataBufferDestroy(&left); 03744 left = new; 03745 } 03746 03747 *pResult = left; 03748 return rc; 03749 } 03750 03751 /* 03752 ** This is the xFilter interface for the virtual table. See 03753 ** the virtual table xFilter method documentation for additional 03754 ** information. 03755 ** 03756 ** If idxNum==QUERY_GENERIC then do a full table scan against 03757 ** the %_content table. 03758 ** 03759 ** If idxNum==QUERY_ROWID then do a rowid lookup for a single entry 03760 ** in the %_content table. 03761 ** 03762 ** If idxNum>=QUERY_FULLTEXT then use the full text index. The 03763 ** column on the left-hand side of the MATCH operator is column 03764 ** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand 03765 ** side of the MATCH operator. 03766 */ 03767 /* TODO(shess) Upgrade the cursor initialization and destruction to 03768 ** account for fulltextFilter() being called multiple times on the 03769 ** same cursor. The current solution is very fragile. Apply fix to 03770 ** fts2 as appropriate. 03771 */ 03772 static int fulltextFilter( 03773 sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ 03774 int idxNum, const char *idxStr, /* Which indexing scheme to use */ 03775 int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ 03776 ){ 03777 fulltext_cursor *c = (fulltext_cursor *) pCursor; 03778 fulltext_vtab *v = cursor_vtab(c); 03779 int rc; 03780 03781 TRACE(("FTS2 Filter %p\n",pCursor)); 03782 03783 /* If the cursor has a statement that was not prepared according to 03784 ** idxNum, clear it. I believe all calls to fulltextFilter with a 03785 ** given cursor will have the same idxNum , but in this case it's 03786 ** easy to be safe. 03787 */ 03788 if( c->pStmt && c->iCursorType!=idxNum ){ 03789 sqlite3_finalize(c->pStmt); 03790 c->pStmt = NULL; 03791 } 03792 03793 /* Get a fresh statement appropriate to idxNum. */ 03794 /* TODO(shess): Add a prepared-statement cache in the vt structure. 03795 ** The cache must handle multiple open cursors. Easier to cache the 03796 ** statement variants at the vt to reduce malloc/realloc/free here. 03797 ** Or we could have a StringBuffer variant which allowed stack 03798 ** construction for small values. 03799 */ 03800 if( !c->pStmt ){ 03801 char *zSql = sqlite3_mprintf("select rowid, * from %%_content %s", 03802 idxNum==QUERY_GENERIC ? "" : "where rowid=?"); 03803 rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); 03804 sqlite3_free(zSql); 03805 if( rc!=SQLITE_OK ) return rc; 03806 c->iCursorType = idxNum; 03807 }else{ 03808 sqlite3_reset(c->pStmt); 03809 assert( c->iCursorType==idxNum ); 03810 } 03811 03812 switch( idxNum ){ 03813 case QUERY_GENERIC: 03814 break; 03815 03816 case QUERY_ROWID: 03817 rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); 03818 if( rc!=SQLITE_OK ) return rc; 03819 break; 03820 03821 default: /* full-text search */ 03822 { 03823 const char *zQuery = (const char *)sqlite3_value_text(argv[0]); 03824 assert( idxNum<=QUERY_FULLTEXT+v->nColumn); 03825 assert( argc==1 ); 03826 queryClear(&c->q); 03827 if( c->result.nData!=0 ){ 03828 /* This case happens if the same cursor is used repeatedly. */ 03829 dlrDestroy(&c->reader); 03830 dataBufferReset(&c->result); 03831 }else{ 03832 dataBufferInit(&c->result, 0); 03833 } 03834 rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &c->result, &c->q); 03835 if( rc!=SQLITE_OK ) return rc; 03836 if( c->result.nData!=0 ){ 03837 dlrInit(&c->reader, DL_DOCIDS, c->result.pData, c->result.nData); 03838 } 03839 break; 03840 } 03841 } 03842 03843 return fulltextNext(pCursor); 03844 } 03845 03846 /* This is the xEof method of the virtual table. The SQLite core 03847 ** calls this routine to find out if it has reached the end of 03848 ** a query's results set. 03849 */ 03850 static int fulltextEof(sqlite3_vtab_cursor *pCursor){ 03851 fulltext_cursor *c = (fulltext_cursor *) pCursor; 03852 return c->eof; 03853 } 03854 03855 /* This is the xColumn method of the virtual table. The SQLite 03856 ** core calls this method during a query when it needs the value 03857 ** of a column from the virtual table. This method needs to use 03858 ** one of the sqlite3_result_*() routines to store the requested 03859 ** value back in the pContext. 03860 */ 03861 static int fulltextColumn(sqlite3_vtab_cursor *pCursor, 03862 sqlite3_context *pContext, int idxCol){ 03863 fulltext_cursor *c = (fulltext_cursor *) pCursor; 03864 fulltext_vtab *v = cursor_vtab(c); 03865 03866 if( idxCol<v->nColumn ){ 03867 sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); 03868 sqlite3_result_value(pContext, pVal); 03869 }else if( idxCol==v->nColumn ){ 03870 /* The extra column whose name is the same as the table. 03871 ** Return a blob which is a pointer to the cursor 03872 */ 03873 sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); 03874 } 03875 return SQLITE_OK; 03876 } 03877 03878 /* This is the xRowid method. The SQLite core calls this routine to 03879 ** retrive the rowid for the current row of the result set. The 03880 ** rowid should be written to *pRowid. 03881 */ 03882 static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ 03883 fulltext_cursor *c = (fulltext_cursor *) pCursor; 03884 03885 *pRowid = sqlite3_column_int64(c->pStmt, 0); 03886 return SQLITE_OK; 03887 } 03888 03889 /* Add all terms in [zText] to pendingTerms table. If [iColumn] > 0, 03890 ** we also store positions and offsets in the hash table using that 03891 ** column number. 03892 */ 03893 static int buildTerms(fulltext_vtab *v, sqlite_int64 iDocid, 03894 const char *zText, int iColumn){ 03895 sqlite3_tokenizer *pTokenizer = v->pTokenizer; 03896 sqlite3_tokenizer_cursor *pCursor; 03897 const char *pToken; 03898 int nTokenBytes; 03899 int iStartOffset, iEndOffset, iPosition; 03900 int rc; 03901 03902 rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); 03903 if( rc!=SQLITE_OK ) return rc; 03904 03905 pCursor->pTokenizer = pTokenizer; 03906 while( SQLITE_OK==(rc=pTokenizer->pModule->xNext(pCursor, 03907 &pToken, &nTokenBytes, 03908 &iStartOffset, &iEndOffset, 03909 &iPosition)) ){ 03910 DLCollector *p; 03911 int nData; /* Size of doclist before our update. */ 03912 03913 /* Positions can't be negative; we use -1 as a terminator 03914 * internally. Token can't be NULL or empty. */ 03915 if( iPosition<0 || pToken == NULL || nTokenBytes == 0 ){ 03916 rc = SQLITE_ERROR; 03917 break; 03918 } 03919 03920 p = fts2HashFind(&v->pendingTerms, pToken, nTokenBytes); 03921 if( p==NULL ){ 03922 nData = 0; 03923 p = dlcNew(iDocid, DL_DEFAULT); 03924 fts2HashInsert(&v->pendingTerms, pToken, nTokenBytes, p); 03925 03926 /* Overhead for our hash table entry, the key, and the value. */ 03927 v->nPendingData += sizeof(struct fts2HashElem)+sizeof(*p)+nTokenBytes; 03928 }else{ 03929 nData = p->b.nData; 03930 if( p->dlw.iPrevDocid!=iDocid ) dlcNext(p, iDocid); 03931 } 03932 if( iColumn>=0 ){ 03933 dlcAddPos(p, iColumn, iPosition, iStartOffset, iEndOffset); 03934 } 03935 03936 /* Accumulate data added by dlcNew or dlcNext, and dlcAddPos. */ 03937 v->nPendingData += p->b.nData-nData; 03938 } 03939 03940 /* TODO(shess) Check return? Should this be able to cause errors at 03941 ** this point? Actually, same question about sqlite3_finalize(), 03942 ** though one could argue that failure there means that the data is 03943 ** not durable. *ponder* 03944 */ 03945 pTokenizer->pModule->xClose(pCursor); 03946 if( SQLITE_DONE == rc ) return SQLITE_OK; 03947 return rc; 03948 } 03949 03950 /* Add doclists for all terms in [pValues] to pendingTerms table. */ 03951 static int insertTerms(fulltext_vtab *v, sqlite_int64 iRowid, 03952 sqlite3_value **pValues){ 03953 int i; 03954 for(i = 0; i < v->nColumn ; ++i){ 03955 char *zText = (char*)sqlite3_value_text(pValues[i]); 03956 int rc = buildTerms(v, iRowid, zText, i); 03957 if( rc!=SQLITE_OK ) return rc; 03958 } 03959 return SQLITE_OK; 03960 } 03961 03962 /* Add empty doclists for all terms in the given row's content to 03963 ** pendingTerms. 03964 */ 03965 static int deleteTerms(fulltext_vtab *v, sqlite_int64 iRowid){ 03966 const char **pValues; 03967 int i, rc; 03968 03969 /* TODO(shess) Should we allow such tables at all? */ 03970 if( DL_DEFAULT==DL_DOCIDS ) return SQLITE_ERROR; 03971 03972 rc = content_select(v, iRowid, &pValues); 03973 if( rc!=SQLITE_OK ) return rc; 03974 03975 for(i = 0 ; i < v->nColumn; ++i) { 03976 rc = buildTerms(v, iRowid, pValues[i], -1); 03977 if( rc!=SQLITE_OK ) break; 03978 } 03979 03980 freeStringArray(v->nColumn, pValues); 03981 return SQLITE_OK; 03982 } 03983 03984 /* TODO(shess) Refactor the code to remove this forward decl. */ 03985 static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid); 03986 03987 /* Insert a row into the %_content table; set *piRowid to be the ID of the 03988 ** new row. Add doclists for terms to pendingTerms. 03989 */ 03990 static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestRowid, 03991 sqlite3_value **pValues, sqlite_int64 *piRowid){ 03992 int rc; 03993 03994 rc = content_insert(v, pRequestRowid, pValues); /* execute an SQL INSERT */ 03995 if( rc!=SQLITE_OK ) return rc; 03996 03997 *piRowid = sqlite3_last_insert_rowid(v->db); 03998 rc = initPendingTerms(v, *piRowid); 03999 if( rc!=SQLITE_OK ) return rc; 04000 04001 return insertTerms(v, *piRowid, pValues); 04002 } 04003 04004 /* Delete a row from the %_content table; add empty doclists for terms 04005 ** to pendingTerms. 04006 */ 04007 static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ 04008 int rc = initPendingTerms(v, iRow); 04009 if( rc!=SQLITE_OK ) return rc; 04010 04011 rc = deleteTerms(v, iRow); 04012 if( rc!=SQLITE_OK ) return rc; 04013 04014 return content_delete(v, iRow); /* execute an SQL DELETE */ 04015 } 04016 04017 /* Update a row in the %_content table; add delete doclists to 04018 ** pendingTerms for old terms not in the new data, add insert doclists 04019 ** to pendingTerms for terms in the new data. 04020 */ 04021 static int index_update(fulltext_vtab *v, sqlite_int64 iRow, 04022 sqlite3_value **pValues){ 04023 int rc = initPendingTerms(v, iRow); 04024 if( rc!=SQLITE_OK ) return rc; 04025 04026 /* Generate an empty doclist for each term that previously appeared in this 04027 * row. */ 04028 rc = deleteTerms(v, iRow); 04029 if( rc!=SQLITE_OK ) return rc; 04030 04031 rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ 04032 if( rc!=SQLITE_OK ) return rc; 04033 04034 /* Now add positions for terms which appear in the updated row. */ 04035 return insertTerms(v, iRow, pValues); 04036 } 04037 04038 /*******************************************************************/ 04039 /* InteriorWriter is used to collect terms and block references into 04040 ** interior nodes in %_segments. See commentary at top of file for 04041 ** format. 04042 */ 04043 04044 /* How large interior nodes can grow. */ 04045 #define INTERIOR_MAX 2048 04046 04047 /* Minimum number of terms per interior node (except the root). This 04048 ** prevents large terms from making the tree too skinny - must be >0 04049 ** so that the tree always makes progress. Note that the min tree 04050 ** fanout will be INTERIOR_MIN_TERMS+1. 04051 */ 04052 #define INTERIOR_MIN_TERMS 7 04053 #if INTERIOR_MIN_TERMS<1 04054 # error INTERIOR_MIN_TERMS must be greater than 0. 04055 #endif 04056 04057 /* ROOT_MAX controls how much data is stored inline in the segment 04058 ** directory. 04059 */ 04060 /* TODO(shess) Push ROOT_MAX down to whoever is writing things. It's 04061 ** only here so that interiorWriterRootInfo() and leafWriterRootInfo() 04062 ** can both see it, but if the caller passed it in, we wouldn't even 04063 ** need a define. 04064 */ 04065 #define ROOT_MAX 1024 04066 #if ROOT_MAX<VARINT_MAX*2 04067 # error ROOT_MAX must have enough space for a header. 04068 #endif 04069 04070 /* InteriorBlock stores a linked-list of interior blocks while a lower 04071 ** layer is being constructed. 04072 */ 04073 typedef struct InteriorBlock { 04074 DataBuffer term; /* Leftmost term in block's subtree. */ 04075 DataBuffer data; /* Accumulated data for the block. */ 04076 struct InteriorBlock *next; 04077 } InteriorBlock; 04078 04079 static InteriorBlock *interiorBlockNew(int iHeight, sqlite_int64 iChildBlock, 04080 const char *pTerm, int nTerm){ 04081 InteriorBlock *block = sqlite3_malloc(sizeof(InteriorBlock)); 04082 char c[VARINT_MAX+VARINT_MAX]; 04083 int n; 04084 04085 if( block ){ 04086 memset(block, 0, sizeof(*block)); 04087 dataBufferInit(&block->term, 0); 04088 dataBufferReplace(&block->term, pTerm, nTerm); 04089 04090 n = putVarint(c, iHeight); 04091 n += putVarint(c+n, iChildBlock); 04092 dataBufferInit(&block->data, INTERIOR_MAX); 04093 dataBufferReplace(&block->data, c, n); 04094 } 04095 return block; 04096 } 04097 04098 #ifndef NDEBUG 04099 /* Verify that the data is readable as an interior node. */ 04100 static void interiorBlockValidate(InteriorBlock *pBlock){ 04101 const char *pData = pBlock->data.pData; 04102 int nData = pBlock->data.nData; 04103 int n, iDummy; 04104 sqlite_int64 iBlockid; 04105 04106 assert( nData>0 ); 04107 assert( pData!=0 ); 04108 assert( pData+nData>pData ); 04109 04110 /* Must lead with height of node as a varint(n), n>0 */ 04111 n = getVarint32(pData, &iDummy); 04112 assert( n>0 ); 04113 assert( iDummy>0 ); 04114 assert( n<nData ); 04115 pData += n; 04116 nData -= n; 04117 04118 /* Must contain iBlockid. */ 04119 n = getVarint(pData, &iBlockid); 04120 assert( n>0 ); 04121 assert( n<=nData ); 04122 pData += n; 04123 nData -= n; 04124 04125 /* Zero or more terms of positive length */ 04126 if( nData!=0 ){ 04127 /* First term is not delta-encoded. */ 04128 n = getVarint32(pData, &iDummy); 04129 assert( n>0 ); 04130 assert( iDummy>0 ); 04131 assert( n+iDummy>0); 04132 assert( n+iDummy<=nData ); 04133 pData += n+iDummy; 04134 nData -= n+iDummy; 04135 04136 /* Following terms delta-encoded. */ 04137 while( nData!=0 ){ 04138 /* Length of shared prefix. */ 04139 n = getVarint32(pData, &iDummy); 04140 assert( n>0 ); 04141 assert( iDummy>=0 ); 04142 assert( n<nData ); 04143 pData += n; 04144 nData -= n; 04145 04146 /* Length and data of distinct suffix. */ 04147 n = getVarint32(pData, &iDummy); 04148 assert( n>0 ); 04149 assert( iDummy>0 ); 04150 assert( n+iDummy>0); 04151 assert( n+iDummy<=nData ); 04152 pData += n+iDummy; 04153 nData -= n+iDummy; 04154 } 04155 } 04156 } 04157 #define ASSERT_VALID_INTERIOR_BLOCK(x) interiorBlockValidate(x) 04158 #else 04159 #define ASSERT_VALID_INTERIOR_BLOCK(x) assert( 1 ) 04160 #endif 04161 04162 typedef struct InteriorWriter { 04163 int iHeight; /* from 0 at leaves. */ 04164 InteriorBlock *first, *last; 04165 struct InteriorWriter *parentWriter; 04166 04167 DataBuffer term; /* Last term written to block "last". */ 04168 sqlite_int64 iOpeningChildBlock; /* First child block in block "last". */ 04169 #ifndef NDEBUG 04170 sqlite_int64 iLastChildBlock; /* for consistency checks. */ 04171 #endif 04172 } InteriorWriter; 04173 04174 /* Initialize an interior node where pTerm[nTerm] marks the leftmost 04175 ** term in the tree. iChildBlock is the leftmost child block at the 04176 ** next level down the tree. 04177 */ 04178 static void interiorWriterInit(int iHeight, const char *pTerm, int nTerm, 04179 sqlite_int64 iChildBlock, 04180 InteriorWriter *pWriter){ 04181 InteriorBlock *block; 04182 assert( iHeight>0 ); 04183 CLEAR(pWriter); 04184 04185 pWriter->iHeight = iHeight; 04186 pWriter->iOpeningChildBlock = iChildBlock; 04187 #ifndef NDEBUG 04188 pWriter->iLastChildBlock = iChildBlock; 04189 #endif 04190 block = interiorBlockNew(iHeight, iChildBlock, pTerm, nTerm); 04191 pWriter->last = pWriter->first = block; 04192 ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); 04193 dataBufferInit(&pWriter->term, 0); 04194 } 04195 04196 /* Append the child node rooted at iChildBlock to the interior node, 04197 ** with pTerm[nTerm] as the leftmost term in iChildBlock's subtree. 04198 */ 04199 static void interiorWriterAppend(InteriorWriter *pWriter, 04200 const char *pTerm, int nTerm, 04201 sqlite_int64 iChildBlock){ 04202 char c[VARINT_MAX+VARINT_MAX]; 04203 int n, nPrefix = 0; 04204 04205 ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); 04206 04207 /* The first term written into an interior node is actually 04208 ** associated with the second child added (the first child was added 04209 ** in interiorWriterInit, or in the if clause at the bottom of this 04210 ** function). That term gets encoded straight up, with nPrefix left 04211 ** at 0. 04212 */ 04213 if( pWriter->term.nData==0 ){ 04214 n = putVarint(c, nTerm); 04215 }else{ 04216 while( nPrefix<pWriter->term.nData && 04217 pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ 04218 nPrefix++; 04219 } 04220 04221 n = putVarint(c, nPrefix); 04222 n += putVarint(c+n, nTerm-nPrefix); 04223 } 04224 04225 #ifndef NDEBUG 04226 pWriter->iLastChildBlock++; 04227 #endif 04228 assert( pWriter->iLastChildBlock==iChildBlock ); 04229 04230 /* Overflow to a new block if the new term makes the current block 04231 ** too big, and the current block already has enough terms. 04232 */ 04233 if( pWriter->last->data.nData+n+nTerm-nPrefix>INTERIOR_MAX && 04234 iChildBlock-pWriter->iOpeningChildBlock>INTERIOR_MIN_TERMS ){ 04235 pWriter->last->next = interiorBlockNew(pWriter->iHeight, iChildBlock, 04236 pTerm, nTerm); 04237 pWriter->last = pWriter->last->next; 04238 pWriter->iOpeningChildBlock = iChildBlock; 04239 dataBufferReset(&pWriter->term); 04240 }else{ 04241 dataBufferAppend2(&pWriter->last->data, c, n, 04242 pTerm+nPrefix, nTerm-nPrefix); 04243 dataBufferReplace(&pWriter->term, pTerm, nTerm); 04244 } 04245 ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); 04246 } 04247 04248 /* Free the space used by pWriter, including the linked-list of 04249 ** InteriorBlocks, and parentWriter, if present. 04250 */ 04251 static int interiorWriterDestroy(InteriorWriter *pWriter){ 04252 InteriorBlock *block = pWriter->first; 04253 04254 while( block!=NULL ){ 04255 InteriorBlock *b = block; 04256 block = block->next; 04257 dataBufferDestroy(&b->term); 04258 dataBufferDestroy(&b->data); 04259 sqlite3_free(b); 04260 } 04261 if( pWriter->parentWriter!=NULL ){ 04262 interiorWriterDestroy(pWriter->parentWriter); 04263 sqlite3_free(pWriter->parentWriter); 04264 } 04265 dataBufferDestroy(&pWriter->term); 04266 SCRAMBLE(pWriter); 04267 return SQLITE_OK; 04268 } 04269 04270 /* If pWriter can fit entirely in ROOT_MAX, return it as the root info 04271 ** directly, leaving *piEndBlockid unchanged. Otherwise, flush 04272 ** pWriter to %_segments, building a new layer of interior nodes, and 04273 ** recursively ask for their root into. 04274 */ 04275 static int interiorWriterRootInfo(fulltext_vtab *v, InteriorWriter *pWriter, 04276 char **ppRootInfo, int *pnRootInfo, 04277 sqlite_int64 *piEndBlockid){ 04278 InteriorBlock *block = pWriter->first; 04279 sqlite_int64 iBlockid = 0; 04280 int rc; 04281 04282 /* If we can fit the segment inline */ 04283 if( block==pWriter->last && block->data.nData<ROOT_MAX ){ 04284 *ppRootInfo = block->data.pData; 04285 *pnRootInfo = block->data.nData; 04286 return SQLITE_OK; 04287 } 04288 04289 /* Flush the first block to %_segments, and create a new level of 04290 ** interior node. 04291 */ 04292 ASSERT_VALID_INTERIOR_BLOCK(block); 04293 rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); 04294 if( rc!=SQLITE_OK ) return rc; 04295 *piEndBlockid = iBlockid; 04296 04297 pWriter->parentWriter = sqlite3_malloc(sizeof(*pWriter->parentWriter)); 04298 interiorWriterInit(pWriter->iHeight+1, 04299 block->term.pData, block->term.nData, 04300 iBlockid, pWriter->parentWriter); 04301 04302 /* Flush additional blocks and append to the higher interior 04303 ** node. 04304 */ 04305 for(block=block->next; block!=NULL; block=block->next){ 04306 ASSERT_VALID_INTERIOR_BLOCK(block); 04307 rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); 04308 if( rc!=SQLITE_OK ) return rc; 04309 *piEndBlockid = iBlockid; 04310 04311 interiorWriterAppend(pWriter->parentWriter, 04312 block->term.pData, block->term.nData, iBlockid); 04313 } 04314 04315 /* Parent node gets the chance to be the root. */ 04316 return interiorWriterRootInfo(v, pWriter->parentWriter, 04317 ppRootInfo, pnRootInfo, piEndBlockid); 04318 } 04319 04320 /****************************************************************/ 04321 /* InteriorReader is used to read off the data from an interior node 04322 ** (see comment at top of file for the format). 04323 */ 04324 typedef struct InteriorReader { 04325 const char *pData; 04326 int nData; 04327 04328 DataBuffer term; /* previous term, for decoding term delta. */ 04329 04330 sqlite_int64 iBlockid; 04331 } InteriorReader; 04332 04333 static void interiorReaderDestroy(InteriorReader *pReader){ 04334 dataBufferDestroy(&pReader->term); 04335 SCRAMBLE(pReader); 04336 } 04337 04338 /* TODO(shess) The assertions are great, but what if we're in NDEBUG 04339 ** and the blob is empty or otherwise contains suspect data? 04340 */ 04341 static void interiorReaderInit(const char *pData, int nData, 04342 InteriorReader *pReader){ 04343 int n, nTerm; 04344 04345 /* Require at least the leading flag byte */ 04346 assert( nData>0 ); 04347 assert( pData[0]!='\0' ); 04348 04349 CLEAR(pReader); 04350 04351 /* Decode the base blockid, and set the cursor to the first term. */ 04352 n = getVarint(pData+1, &pReader->iBlockid); 04353 assert( 1+n<=nData ); 04354 pReader->pData = pData+1+n; 04355 pReader->nData = nData-(1+n); 04356 04357 /* A single-child interior node (such as when a leaf node was too 04358 ** large for the segment directory) won't have any terms. 04359 ** Otherwise, decode the first term. 04360 */ 04361 if( pReader->nData==0 ){ 04362 dataBufferInit(&pReader->term, 0); 04363 }else{ 04364 n = getVarint32(pReader->pData, &nTerm); 04365 dataBufferInit(&pReader->term, nTerm); 04366 dataBufferReplace(&pReader->term, pReader->pData+n, nTerm); 04367 assert( n+nTerm<=pReader->nData ); 04368 pReader->pData += n+nTerm; 04369 pReader->nData -= n+nTerm; 04370 } 04371 } 04372 04373 static int interiorReaderAtEnd(InteriorReader *pReader){ 04374 return pReader->term.nData==0; 04375 } 04376 04377 static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){ 04378 return pReader->iBlockid; 04379 } 04380 04381 static int interiorReaderTermBytes(InteriorReader *pReader){ 04382 assert( !interiorReaderAtEnd(pReader) ); 04383 return pReader->term.nData; 04384 } 04385 static const char *interiorReaderTerm(InteriorReader *pReader){ 04386 assert( !interiorReaderAtEnd(pReader) ); 04387 return pReader->term.pData; 04388 } 04389 04390 /* Step forward to the next term in the node. */ 04391 static void interiorReaderStep(InteriorReader *pReader){ 04392 assert( !interiorReaderAtEnd(pReader) ); 04393 04394 /* If the last term has been read, signal eof, else construct the 04395 ** next term. 04396 */ 04397 if( pReader->nData==0 ){ 04398 dataBufferReset(&pReader->term); 04399 }else{ 04400 int n, nPrefix, nSuffix; 04401 04402 n = getVarint32(pReader->pData, &nPrefix); 04403 n += getVarint32(pReader->pData+n, &nSuffix); 04404 04405 /* Truncate the current term and append suffix data. */ 04406 pReader->term.nData = nPrefix; 04407 dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); 04408 04409 assert( n+nSuffix<=pReader->nData ); 04410 pReader->pData += n+nSuffix; 04411 pReader->nData -= n+nSuffix; 04412 } 04413 pReader->iBlockid++; 04414 } 04415 04416 /* Compare the current term to pTerm[nTerm], returning strcmp-style 04417 ** results. If isPrefix, equality means equal through nTerm bytes. 04418 */ 04419 static int interiorReaderTermCmp(InteriorReader *pReader, 04420 const char *pTerm, int nTerm, int isPrefix){ 04421 const char *pReaderTerm = interiorReaderTerm(pReader); 04422 int nReaderTerm = interiorReaderTermBytes(pReader); 04423 int c, n = nReaderTerm<nTerm ? nReaderTerm : nTerm; 04424 04425 if( n==0 ){ 04426 if( nReaderTerm>0 ) return -1; 04427 if( nTerm>0 ) return 1; 04428 return 0; 04429 } 04430 04431 c = memcmp(pReaderTerm, pTerm, n); 04432 if( c!=0 ) return c; 04433 if( isPrefix && n==nTerm ) return 0; 04434 return nReaderTerm - nTerm; 04435 } 04436 04437 /****************************************************************/ 04438 /* LeafWriter is used to collect terms and associated doclist data 04439 ** into leaf blocks in %_segments (see top of file for format info). 04440 ** Expected usage is: 04441 ** 04442 ** LeafWriter writer; 04443 ** leafWriterInit(0, 0, &writer); 04444 ** while( sorted_terms_left_to_process ){ 04445 ** // data is doclist data for that term. 04446 ** rc = leafWriterStep(v, &writer, pTerm, nTerm, pData, nData); 04447 ** if( rc!=SQLITE_OK ) goto err; 04448 ** } 04449 ** rc = leafWriterFinalize(v, &writer); 04450 **err: 04451 ** leafWriterDestroy(&writer); 04452 ** return rc; 04453 ** 04454 ** leafWriterStep() may write a collected leaf out to %_segments. 04455 ** leafWriterFinalize() finishes writing any buffered data and stores 04456 ** a root node in %_segdir. leafWriterDestroy() frees all buffers and 04457 ** InteriorWriters allocated as part of writing this segment. 04458 ** 04459 ** TODO(shess) Document leafWriterStepMerge(). 04460 */ 04461 04462 /* Put terms with data this big in their own block. */ 04463 #define STANDALONE_MIN 1024 04464 04465 /* Keep leaf blocks below this size. */ 04466 #define LEAF_MAX 2048 04467 04468 typedef struct LeafWriter { 04469 int iLevel; 04470 int idx; 04471 sqlite_int64 iStartBlockid; /* needed to create the root info */ 04472 sqlite_int64 iEndBlockid; /* when we're done writing. */ 04473 04474 DataBuffer term; /* previous encoded term */ 04475 DataBuffer data; /* encoding buffer */ 04476 04477 /* bytes of first term in the current node which distinguishes that 04478 ** term from the last term of the previous node. 04479 */ 04480 int nTermDistinct; 04481 04482 InteriorWriter parentWriter; /* if we overflow */ 04483 int has_parent; 04484 } LeafWriter; 04485 04486 static void leafWriterInit(int iLevel, int idx, LeafWriter *pWriter){ 04487 CLEAR(pWriter); 04488 pWriter->iLevel = iLevel; 04489 pWriter->idx = idx; 04490 04491 dataBufferInit(&pWriter->term, 32); 04492 04493 /* Start out with a reasonably sized block, though it can grow. */ 04494 dataBufferInit(&pWriter->data, LEAF_MAX); 04495 } 04496 04497 #ifndef NDEBUG 04498 /* Verify that the data is readable as a leaf node. */ 04499 static void leafNodeValidate(const char *pData, int nData){ 04500 int n, iDummy; 04501 04502 if( nData==0 ) return; 04503 assert( nData>0 ); 04504 assert( pData!=0 ); 04505 assert( pData+nData>pData ); 04506 04507 /* Must lead with a varint(0) */ 04508 n = getVarint32(pData, &iDummy); 04509 assert( iDummy==0 ); 04510 assert( n>0 ); 04511 assert( n<nData ); 04512 pData += n; 04513 nData -= n; 04514 04515 /* Leading term length and data must fit in buffer. */ 04516 n = getVarint32(pData, &iDummy); 04517 assert( n>0 ); 04518 assert( iDummy>0 ); 04519 assert( n+iDummy>0 ); 04520 assert( n+iDummy<nData ); 04521 pData += n+iDummy; 04522 nData -= n+iDummy; 04523 04524 /* Leading term's doclist length and data must fit. */ 04525 n = getVarint32(pData, &iDummy); 04526 assert( n>0 ); 04527 assert( iDummy>0 ); 04528 assert( n+iDummy>0 ); 04529 assert( n+iDummy<=nData ); 04530 ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); 04531 pData += n+iDummy; 04532 nData -= n+iDummy; 04533 04534 /* Verify that trailing terms and doclists also are readable. */ 04535 while( nData!=0 ){ 04536 n = getVarint32(pData, &iDummy); 04537 assert( n>0 ); 04538 assert( iDummy>=0 ); 04539 assert( n<nData ); 04540 pData += n; 04541 nData -= n; 04542 n = getVarint32(pData, &iDummy); 04543 assert( n>0 ); 04544 assert( iDummy>0 ); 04545 assert( n+iDummy>0 ); 04546 assert( n+iDummy<nData ); 04547 pData += n+iDummy; 04548 nData -= n+iDummy; 04549 04550 n = getVarint32(pData, &iDummy); 04551 assert( n>0 ); 04552 assert( iDummy>0 ); 04553 assert( n+iDummy>0 ); 04554 assert( n+iDummy<=nData ); 04555 ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); 04556 pData += n+iDummy; 04557 nData -= n+iDummy; 04558 } 04559 } 04560 #define ASSERT_VALID_LEAF_NODE(p, n) leafNodeValidate(p, n) 04561 #else 04562 #define ASSERT_VALID_LEAF_NODE(p, n) assert( 1 ) 04563 #endif 04564 04565 /* Flush the current leaf node to %_segments, and adding the resulting 04566 ** blockid and the starting term to the interior node which will 04567 ** contain it. 04568 */ 04569 static int leafWriterInternalFlush(fulltext_vtab *v, LeafWriter *pWriter, 04570 int iData, int nData){ 04571 sqlite_int64 iBlockid = 0; 04572 const char *pStartingTerm; 04573 int nStartingTerm, rc, n; 04574 04575 /* Must have the leading varint(0) flag, plus at least some 04576 ** valid-looking data. 04577 */ 04578 assert( nData>2 ); 04579 assert( iData>=0 ); 04580 assert( iData+nData<=pWriter->data.nData ); 04581 ASSERT_VALID_LEAF_NODE(pWriter->data.pData+iData, nData); 04582 04583 rc = block_insert(v, pWriter->data.pData+iData, nData, &iBlockid); 04584 if( rc!=SQLITE_OK ) return rc; 04585 assert( iBlockid!=0 ); 04586 04587 /* Reconstruct the first term in the leaf for purposes of building 04588 ** the interior node. 04589 */ 04590 n = getVarint32(pWriter->data.pData+iData+1, &nStartingTerm); 04591 pStartingTerm = pWriter->data.pData+iData+1+n; 04592 assert( pWriter->data.nData>iData+1+n+nStartingTerm ); 04593 assert( pWriter->nTermDistinct>0 ); 04594 assert( pWriter->nTermDistinct<=nStartingTerm ); 04595 nStartingTerm = pWriter->nTermDistinct; 04596 04597 if( pWriter->has_parent ){ 04598 interiorWriterAppend(&pWriter->parentWriter, 04599 pStartingTerm, nStartingTerm, iBlockid); 04600 }else{ 04601 interiorWriterInit(1, pStartingTerm, nStartingTerm, iBlockid, 04602 &pWriter->parentWriter); 04603 pWriter->has_parent = 1; 04604 } 04605 04606 /* Track the span of this segment's leaf nodes. */ 04607 if( pWriter->iEndBlockid==0 ){ 04608 pWriter->iEndBlockid = pWriter->iStartBlockid = iBlockid; 04609 }else{ 04610 pWriter->iEndBlockid++; 04611 assert( iBlockid==pWriter->iEndBlockid ); 04612 } 04613 04614 return SQLITE_OK; 04615 } 04616 static int leafWriterFlush(fulltext_vtab *v, LeafWriter *pWriter){ 04617 int rc = leafWriterInternalFlush(v, pWriter, 0, pWriter->data.nData); 04618 if( rc!=SQLITE_OK ) return rc; 04619 04620 /* Re-initialize the output buffer. */ 04621 dataBufferReset(&pWriter->data); 04622 04623 return SQLITE_OK; 04624 } 04625 04626 /* Fetch the root info for the segment. If the entire leaf fits 04627 ** within ROOT_MAX, then it will be returned directly, otherwise it 04628 ** will be flushed and the root info will be returned from the 04629 ** interior node. *piEndBlockid is set to the blockid of the last 04630 ** interior or leaf node written to disk (0 if none are written at 04631 ** all). 04632 */ 04633 static int leafWriterRootInfo(fulltext_vtab *v, LeafWriter *pWriter, 04634 char **ppRootInfo, int *pnRootInfo, 04635 sqlite_int64 *piEndBlockid){ 04636 /* we can fit the segment entirely inline */ 04637 if( !pWriter->has_parent && pWriter->data.nData<ROOT_MAX ){ 04638 *ppRootInfo = pWriter->data.pData; 04639 *pnRootInfo = pWriter->data.nData; 04640 *piEndBlockid = 0; 04641 return SQLITE_OK; 04642 } 04643 04644 /* Flush remaining leaf data. */ 04645 if( pWriter->data.nData>0 ){ 04646 int rc = leafWriterFlush(v, pWriter); 04647 if( rc!=SQLITE_OK ) return rc; 04648 } 04649 04650 /* We must have flushed a leaf at some point. */ 04651 assert( pWriter->has_parent ); 04652 04653 /* Tenatively set the end leaf blockid as the end blockid. If the 04654 ** interior node can be returned inline, this will be the final 04655 ** blockid, otherwise it will be overwritten by 04656 ** interiorWriterRootInfo(). 04657 */ 04658 *piEndBlockid = pWriter->iEndBlockid; 04659 04660 return interiorWriterRootInfo(v, &pWriter->parentWriter, 04661 ppRootInfo, pnRootInfo, piEndBlockid); 04662 } 04663 04664 /* Collect the rootInfo data and store it into the segment directory. 04665 ** This has the effect of flushing the segment's leaf data to 04666 ** %_segments, and also flushing any interior nodes to %_segments. 04667 */ 04668 static int leafWriterFinalize(fulltext_vtab *v, LeafWriter *pWriter){ 04669 sqlite_int64 iEndBlockid; 04670 char *pRootInfo; 04671 int rc, nRootInfo; 04672 04673 rc = leafWriterRootInfo(v, pWriter, &pRootInfo, &nRootInfo, &iEndBlockid); 04674 if( rc!=SQLITE_OK ) return rc; 04675 04676 /* Don't bother storing an entirely empty segment. */ 04677 if( iEndBlockid==0 && nRootInfo==0 ) return SQLITE_OK; 04678 04679 return segdir_set(v, pWriter->iLevel, pWriter->idx, 04680 pWriter->iStartBlockid, pWriter->iEndBlockid, 04681 iEndBlockid, pRootInfo, nRootInfo); 04682 } 04683 04684 static void leafWriterDestroy(LeafWriter *pWriter){ 04685 if( pWriter->has_parent ) interiorWriterDestroy(&pWriter->parentWriter); 04686 dataBufferDestroy(&pWriter->term); 04687 dataBufferDestroy(&pWriter->data); 04688 } 04689 04690 /* Encode a term into the leafWriter, delta-encoding as appropriate. 04691 ** Returns the length of the new term which distinguishes it from the 04692 ** previous term, which can be used to set nTermDistinct when a node 04693 ** boundary is crossed. 04694 */ 04695 static int leafWriterEncodeTerm(LeafWriter *pWriter, 04696 const char *pTerm, int nTerm){ 04697 char c[VARINT_MAX+VARINT_MAX]; 04698 int n, nPrefix = 0; 04699 04700 assert( nTerm>0 ); 04701 while( nPrefix<pWriter->term.nData && 04702 pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ 04703 nPrefix++; 04704 /* Failing this implies that the terms weren't in order. */ 04705 assert( nPrefix<nTerm ); 04706 } 04707 04708 if( pWriter->data.nData==0 ){ 04709 /* Encode the node header and leading term as: 04710 ** varint(0) 04711 ** varint(nTerm) 04712 ** char pTerm[nTerm] 04713 */ 04714 n = putVarint(c, '\0'); 04715 n += putVarint(c+n, nTerm); 04716 dataBufferAppend2(&pWriter->data, c, n, pTerm, nTerm); 04717 }else{ 04718 /* Delta-encode the term as: 04719 ** varint(nPrefix) 04720 ** varint(nSuffix) 04721 ** char pTermSuffix[nSuffix] 04722 */ 04723 n = putVarint(c, nPrefix); 04724 n += putVarint(c+n, nTerm-nPrefix); 04725 dataBufferAppend2(&pWriter->data, c, n, pTerm+nPrefix, nTerm-nPrefix); 04726 } 04727 dataBufferReplace(&pWriter->term, pTerm, nTerm); 04728 04729 return nPrefix+1; 04730 } 04731 04732 /* Used to avoid a memmove when a large amount of doclist data is in 04733 ** the buffer. This constructs a node and term header before 04734 ** iDoclistData and flushes the resulting complete node using 04735 ** leafWriterInternalFlush(). 04736 */ 04737 static int leafWriterInlineFlush(fulltext_vtab *v, LeafWriter *pWriter, 04738 const char *pTerm, int nTerm, 04739 int iDoclistData){ 04740 char c[VARINT_MAX+VARINT_MAX]; 04741 int iData, n = putVarint(c, 0); 04742 n += putVarint(c+n, nTerm); 04743 04744 /* There should always be room for the header. Even if pTerm shared 04745 ** a substantial prefix with the previous term, the entire prefix 04746 ** could be constructed from earlier data in the doclist, so there 04747 ** should be room. 04748 */ 04749 assert( iDoclistData>=n+nTerm ); 04750 04751 iData = iDoclistData-(n+nTerm); 04752 memcpy(pWriter->data.pData+iData, c, n); 04753 memcpy(pWriter->data.pData+iData+n, pTerm, nTerm); 04754 04755 return leafWriterInternalFlush(v, pWriter, iData, pWriter->data.nData-iData); 04756 } 04757 04758 /* Push pTerm[nTerm] along with the doclist data to the leaf layer of 04759 ** %_segments. 04760 */ 04761 static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter, 04762 const char *pTerm, int nTerm, 04763 DLReader *pReaders, int nReaders){ 04764 char c[VARINT_MAX+VARINT_MAX]; 04765 int iTermData = pWriter->data.nData, iDoclistData; 04766 int i, nData, n, nActualData, nActual, rc, nTermDistinct; 04767 04768 ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); 04769 nTermDistinct = leafWriterEncodeTerm(pWriter, pTerm, nTerm); 04770 04771 /* Remember nTermDistinct if opening a new node. */ 04772 if( iTermData==0 ) pWriter->nTermDistinct = nTermDistinct; 04773 04774 iDoclistData = pWriter->data.nData; 04775 04776 /* Estimate the length of the merged doclist so we can leave space 04777 ** to encode it. 04778 */ 04779 for(i=0, nData=0; i<nReaders; i++){ 04780 nData += dlrAllDataBytes(&pReaders[i]); 04781 } 04782 n = putVarint(c, nData); 04783 dataBufferAppend(&pWriter->data, c, n); 04784 04785 docListMerge(&pWriter->data, pReaders, nReaders); 04786 ASSERT_VALID_DOCLIST(DL_DEFAULT, 04787 pWriter->data.pData+iDoclistData+n, 04788 pWriter->data.nData-iDoclistData-n, NULL); 04789 04790 /* The actual amount of doclist data at this point could be smaller 04791 ** than the length we encoded. Additionally, the space required to 04792 ** encode this length could be smaller. For small doclists, this is 04793 ** not a big deal, we can just use memmove() to adjust things. 04794 */ 04795 nActualData = pWriter->data.nData-(iDoclistData+n); 04796 nActual = putVarint(c, nActualData); 04797 assert( nActualData<=nData ); 04798 assert( nActual<=n ); 04799 04800 /* If the new doclist is big enough for force a standalone leaf 04801 ** node, we can immediately flush it inline without doing the 04802 ** memmove(). 04803 */ 04804 /* TODO(shess) This test matches leafWriterStep(), which does this 04805 ** test before it knows the cost to varint-encode the term and 04806 ** doclist lengths. At some point, change to 04807 ** pWriter->data.nData-iTermData>STANDALONE_MIN. 04808 */ 04809 if( nTerm+nActualData>STANDALONE_MIN ){ 04810 /* Push leaf node from before this term. */ 04811 if( iTermData>0 ){ 04812 rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); 04813 if( rc!=SQLITE_OK ) return rc; 04814 04815 pWriter->nTermDistinct = nTermDistinct; 04816 } 04817 04818 /* Fix the encoded doclist length. */ 04819 iDoclistData += n - nActual; 04820 memcpy(pWriter->data.pData+iDoclistData, c, nActual); 04821 04822 /* Push the standalone leaf node. */ 04823 rc = leafWriterInlineFlush(v, pWriter, pTerm, nTerm, iDoclistData); 04824 if( rc!=SQLITE_OK ) return rc; 04825 04826 /* Leave the node empty. */ 04827 dataBufferReset(&pWriter->data); 04828 04829 return rc; 04830 } 04831 04832 /* At this point, we know that the doclist was small, so do the 04833 ** memmove if indicated. 04834 */ 04835 if( nActual<n ){ 04836 memmove(pWriter->data.pData+iDoclistData+nActual, 04837 pWriter->data.pData+iDoclistData+n, 04838 pWriter->data.nData-(iDoclistData+n)); 04839 pWriter->data.nData -= n-nActual; 04840 } 04841 04842 /* Replace written length with actual length. */ 04843 memcpy(pWriter->data.pData+iDoclistData, c, nActual); 04844 04845 /* If the node is too large, break things up. */ 04846 /* TODO(shess) This test matches leafWriterStep(), which does this 04847 ** test before it knows the cost to varint-encode the term and 04848 ** doclist lengths. At some point, change to 04849 ** pWriter->data.nData>LEAF_MAX. 04850 */ 04851 if( iTermData+nTerm+nActualData>LEAF_MAX ){ 04852 /* Flush out the leading data as a node */ 04853 rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); 04854 if( rc!=SQLITE_OK ) return rc; 04855 04856 pWriter->nTermDistinct = nTermDistinct; 04857 04858 /* Rebuild header using the current term */ 04859 n = putVarint(pWriter->data.pData, 0); 04860 n += putVarint(pWriter->data.pData+n, nTerm); 04861 memcpy(pWriter->data.pData+n, pTerm, nTerm); 04862 n += nTerm; 04863 04864 /* There should always be room, because the previous encoding 04865 ** included all data necessary to construct the term. 04866 */ 04867 assert( n<iDoclistData ); 04868 /* So long as STANDALONE_MIN is half or less of LEAF_MAX, the 04869 ** following memcpy() is safe (as opposed to needing a memmove). 04870 */ 04871 assert( 2*STANDALONE_MIN<=LEAF_MAX ); 04872 assert( n+pWriter->data.nData-iDoclistData<iDoclistData ); 04873 memcpy(pWriter->data.pData+n, 04874 pWriter->data.pData+iDoclistData, 04875 pWriter->data.nData-iDoclistData); 04876 pWriter->data.nData -= iDoclistData-n; 04877 } 04878 ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); 04879 04880 return SQLITE_OK; 04881 } 04882 04883 /* Push pTerm[nTerm] along with the doclist data to the leaf layer of 04884 ** %_segments. 04885 */ 04886 /* TODO(shess) Revise writeZeroSegment() so that doclists are 04887 ** constructed directly in pWriter->data. 04888 */ 04889 static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter, 04890 const char *pTerm, int nTerm, 04891 const char *pData, int nData){ 04892 int rc; 04893 DLReader reader; 04894 04895 dlrInit(&reader, DL_DEFAULT, pData, nData); 04896 rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1); 04897 dlrDestroy(&reader); 04898 04899 return rc; 04900 } 04901 04902 04903 /****************************************************************/ 04904 /* LeafReader is used to iterate over an individual leaf node. */ 04905 typedef struct LeafReader { 04906 DataBuffer term; /* copy of current term. */ 04907 04908 const char *pData; /* data for current term. */ 04909 int nData; 04910 } LeafReader; 04911 04912 static void leafReaderDestroy(LeafReader *pReader){ 04913 dataBufferDestroy(&pReader->term); 04914 SCRAMBLE(pReader); 04915 } 04916 04917 static int leafReaderAtEnd(LeafReader *pReader){ 04918 return pReader->nData<=0; 04919 } 04920 04921 /* Access the current term. */ 04922 static int leafReaderTermBytes(LeafReader *pReader){ 04923 return pReader->term.nData; 04924 } 04925 static const char *leafReaderTerm(LeafReader *pReader){ 04926 assert( pReader->term.nData>0 ); 04927 return pReader->term.pData; 04928 } 04929 04930 /* Access the doclist data for the current term. */ 04931 static int leafReaderDataBytes(LeafReader *pReader){ 04932 int nData; 04933 assert( pReader->term.nData>0 ); 04934 getVarint32(pReader->pData, &nData); 04935 return nData; 04936 } 04937 static const char *leafReaderData(LeafReader *pReader){ 04938 int n, nData; 04939 assert( pReader->term.nData>0 ); 04940 n = getVarint32(pReader->pData, &nData); 04941 return pReader->pData+n; 04942 } 04943 04944 static void leafReaderInit(const char *pData, int nData, 04945 LeafReader *pReader){ 04946 int nTerm, n; 04947 04948 assert( nData>0 ); 04949 assert( pData[0]=='\0' ); 04950 04951 CLEAR(pReader); 04952 04953 /* Read the first term, skipping the header byte. */ 04954 n = getVarint32(pData+1, &nTerm); 04955 dataBufferInit(&pReader->term, nTerm); 04956 dataBufferReplace(&pReader->term, pData+1+n, nTerm); 04957 04958 /* Position after the first term. */ 04959 assert( 1+n+nTerm<nData ); 04960 pReader->pData = pData+1+n+nTerm; 04961 pReader->nData = nData-1-n-nTerm; 04962 } 04963 04964 /* Step the reader forward to the next term. */ 04965 static void leafReaderStep(LeafReader *pReader){ 04966 int n, nData, nPrefix, nSuffix; 04967 assert( !leafReaderAtEnd(pReader) ); 04968 04969 /* Skip previous entry's data block. */ 04970 n = getVarint32(pReader->pData, &nData); 04971 assert( n+nData<=pReader->nData ); 04972 pReader->pData += n+nData; 04973 pReader->nData -= n+nData; 04974 04975 if( !leafReaderAtEnd(pReader) ){ 04976 /* Construct the new term using a prefix from the old term plus a 04977 ** suffix from the leaf data. 04978 */ 04979 n = getVarint32(pReader->pData, &nPrefix); 04980 n += getVarint32(pReader->pData+n, &nSuffix); 04981 assert( n+nSuffix<pReader->nData ); 04982 pReader->term.nData = nPrefix; 04983 dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); 04984 04985 pReader->pData += n+nSuffix; 04986 pReader->nData -= n+nSuffix; 04987 } 04988 } 04989 04990 /* strcmp-style comparison of pReader's current term against pTerm. 04991 ** If isPrefix, equality means equal through nTerm bytes. 04992 */ 04993 static int leafReaderTermCmp(LeafReader *pReader, 04994 const char *pTerm, int nTerm, int isPrefix){ 04995 int c, n = pReader->term.nData<nTerm ? pReader->term.nData : nTerm; 04996 if( n==0 ){ 04997 if( pReader->term.nData>0 ) return -1; 04998 if(nTerm>0 ) return 1; 04999 return 0; 05000 } 05001 05002 c = memcmp(pReader->term.pData, pTerm, n); 05003 if( c!=0 ) return c; 05004 if( isPrefix && n==nTerm ) return 0; 05005 return pReader->term.nData - nTerm; 05006 } 05007 05008 05009 /****************************************************************/ 05010 /* LeavesReader wraps LeafReader to allow iterating over the entire 05011 ** leaf layer of the tree. 05012 */ 05013 typedef struct LeavesReader { 05014 int idx; /* Index within the segment. */ 05015 05016 sqlite3_stmt *pStmt; /* Statement we're streaming leaves from. */ 05017 int eof; /* we've seen SQLITE_DONE from pStmt. */ 05018 05019 LeafReader leafReader; /* reader for the current leaf. */ 05020 DataBuffer rootData; /* root data for inline. */ 05021 } LeavesReader; 05022 05023 /* Access the current term. */ 05024 static int leavesReaderTermBytes(LeavesReader *pReader){ 05025 assert( !pReader->eof ); 05026 return leafReaderTermBytes(&pReader->leafReader); 05027 } 05028 static const char *leavesReaderTerm(LeavesReader *pReader){ 05029 assert( !pReader->eof ); 05030 return leafReaderTerm(&pReader->leafReader); 05031 } 05032 05033 /* Access the doclist data for the current term. */ 05034 static int leavesReaderDataBytes(LeavesReader *pReader){ 05035 assert( !pReader->eof ); 05036 return leafReaderDataBytes(&pReader->leafReader); 05037 } 05038 static const char *leavesReaderData(LeavesReader *pReader){ 05039 assert( !pReader->eof ); 05040 return leafReaderData(&pReader->leafReader); 05041 } 05042 05043 static int leavesReaderAtEnd(LeavesReader *pReader){ 05044 return pReader->eof; 05045 } 05046 05047 /* loadSegmentLeaves() may not read all the way to SQLITE_DONE, thus 05048 ** leaving the statement handle open, which locks the table. 05049 */ 05050 /* TODO(shess) This "solution" is not satisfactory. Really, there 05051 ** should be check-in function for all statement handles which 05052 ** arranges to call sqlite3_reset(). This most likely will require 05053 ** modification to control flow all over the place, though, so for now 05054 ** just punt. 05055 ** 05056 ** Note the the current system assumes that segment merges will run to 05057 ** completion, which is why this particular probably hasn't arisen in 05058 ** this case. Probably a brittle assumption. 05059 */ 05060 static int leavesReaderReset(LeavesReader *pReader){ 05061 return sqlite3_reset(pReader->pStmt); 05062 } 05063 05064 static void leavesReaderDestroy(LeavesReader *pReader){ 05065 /* If idx is -1, that means we're using a non-cached statement 05066 ** handle in the optimize() case, so we need to release it. 05067 */ 05068 if( pReader->pStmt!=NULL && pReader->idx==-1 ){ 05069 sqlite3_finalize(pReader->pStmt); 05070 } 05071 leafReaderDestroy(&pReader->leafReader); 05072 dataBufferDestroy(&pReader->rootData); 05073 SCRAMBLE(pReader); 05074 } 05075 05076 /* Initialize pReader with the given root data (if iStartBlockid==0 05077 ** the leaf data was entirely contained in the root), or from the 05078 ** stream of blocks between iStartBlockid and iEndBlockid, inclusive. 05079 */ 05080 static int leavesReaderInit(fulltext_vtab *v, 05081 int idx, 05082 sqlite_int64 iStartBlockid, 05083 sqlite_int64 iEndBlockid, 05084 const char *pRootData, int nRootData, 05085 LeavesReader *pReader){ 05086 CLEAR(pReader); 05087 pReader->idx = idx; 05088 05089 dataBufferInit(&pReader->rootData, 0); 05090 if( iStartBlockid==0 ){ 05091 /* Entire leaf level fit in root data. */ 05092 dataBufferReplace(&pReader->rootData, pRootData, nRootData); 05093 leafReaderInit(pReader->rootData.pData, pReader->rootData.nData, 05094 &pReader->leafReader); 05095 }else{ 05096 sqlite3_stmt *s; 05097 int rc = sql_get_leaf_statement(v, idx, &s); 05098 if( rc!=SQLITE_OK ) return rc; 05099 05100 rc = sqlite3_bind_int64(s, 1, iStartBlockid); 05101 if( rc!=SQLITE_OK ) return rc; 05102 05103 rc = sqlite3_bind_int64(s, 2, iEndBlockid); 05104 if( rc!=SQLITE_OK ) return rc; 05105 05106 rc = sqlite3_step(s); 05107 if( rc==SQLITE_DONE ){ 05108 pReader->eof = 1; 05109 return SQLITE_OK; 05110 } 05111 if( rc!=SQLITE_ROW ) return rc; 05112 05113 pReader->pStmt = s; 05114 leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), 05115 sqlite3_column_bytes(pReader->pStmt, 0), 05116 &pReader->leafReader); 05117 } 05118 return SQLITE_OK; 05119 } 05120 05121 /* Step the current leaf forward to the next term. If we reach the 05122 ** end of the current leaf, step forward to the next leaf block. 05123 */ 05124 static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){ 05125 assert( !leavesReaderAtEnd(pReader) ); 05126 leafReaderStep(&pReader->leafReader); 05127 05128 if( leafReaderAtEnd(&pReader->leafReader) ){ 05129 int rc; 05130 if( pReader->rootData.pData ){ 05131 pReader->eof = 1; 05132 return SQLITE_OK; 05133 } 05134 rc = sqlite3_step(pReader->pStmt); 05135 if( rc!=SQLITE_ROW ){ 05136 pReader->eof = 1; 05137 return rc==SQLITE_DONE ? SQLITE_OK : rc; 05138 } 05139 leafReaderDestroy(&pReader->leafReader); 05140 leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), 05141 sqlite3_column_bytes(pReader->pStmt, 0), 05142 &pReader->leafReader); 05143 } 05144 return SQLITE_OK; 05145 } 05146 05147 /* Order LeavesReaders by their term, ignoring idx. Readers at eof 05148 ** always sort to the end. 05149 */ 05150 static int leavesReaderTermCmp(LeavesReader *lr1, LeavesReader *lr2){ 05151 if( leavesReaderAtEnd(lr1) ){ 05152 if( leavesReaderAtEnd(lr2) ) return 0; 05153 return 1; 05154 } 05155 if( leavesReaderAtEnd(lr2) ) return -1; 05156 05157 return leafReaderTermCmp(&lr1->leafReader, 05158 leavesReaderTerm(lr2), leavesReaderTermBytes(lr2), 05159 0); 05160 } 05161 05162 /* Similar to leavesReaderTermCmp(), with additional ordering by idx 05163 ** so that older segments sort before newer segments. 05164 */ 05165 static int leavesReaderCmp(LeavesReader *lr1, LeavesReader *lr2){ 05166 int c = leavesReaderTermCmp(lr1, lr2); 05167 if( c!=0 ) return c; 05168 return lr1->idx-lr2->idx; 05169 } 05170 05171 /* Assume that pLr[1]..pLr[nLr] are sorted. Bubble pLr[0] into its 05172 ** sorted position. 05173 */ 05174 static void leavesReaderReorder(LeavesReader *pLr, int nLr){ 05175 while( nLr>1 && leavesReaderCmp(pLr, pLr+1)>0 ){ 05176 LeavesReader tmp = pLr[0]; 05177 pLr[0] = pLr[1]; 05178 pLr[1] = tmp; 05179 nLr--; 05180 pLr++; 05181 } 05182 } 05183 05184 /* Initializes pReaders with the segments from level iLevel, returning 05185 ** the number of segments in *piReaders. Leaves pReaders in sorted 05186 ** order. 05187 */ 05188 static int leavesReadersInit(fulltext_vtab *v, int iLevel, 05189 LeavesReader *pReaders, int *piReaders){ 05190 sqlite3_stmt *s; 05191 int i, rc = sql_get_statement(v, SEGDIR_SELECT_LEVEL_STMT, &s); 05192 if( rc!=SQLITE_OK ) return rc; 05193 05194 rc = sqlite3_bind_int(s, 1, iLevel); 05195 if( rc!=SQLITE_OK ) return rc; 05196 05197 i = 0; 05198 while( (rc = sqlite3_step(s))==SQLITE_ROW ){ 05199 sqlite_int64 iStart = sqlite3_column_int64(s, 0); 05200 sqlite_int64 iEnd = sqlite3_column_int64(s, 1); 05201 const char *pRootData = sqlite3_column_blob(s, 2); 05202 int nRootData = sqlite3_column_bytes(s, 2); 05203 05204 assert( i<MERGE_COUNT ); 05205 rc = leavesReaderInit(v, i, iStart, iEnd, pRootData, nRootData, 05206 &pReaders[i]); 05207 if( rc!=SQLITE_OK ) break; 05208 05209 i++; 05210 } 05211 if( rc!=SQLITE_DONE ){ 05212 while( i-->0 ){ 05213 leavesReaderDestroy(&pReaders[i]); 05214 } 05215 return rc; 05216 } 05217 05218 *piReaders = i; 05219 05220 /* Leave our results sorted by term, then age. */ 05221 while( i-- ){ 05222 leavesReaderReorder(pReaders+i, *piReaders-i); 05223 } 05224 return SQLITE_OK; 05225 } 05226 05227 /* Merge doclists from pReaders[nReaders] into a single doclist, which 05228 ** is written to pWriter. Assumes pReaders is ordered oldest to 05229 ** newest. 05230 */ 05231 /* TODO(shess) Consider putting this inline in segmentMerge(). */ 05232 static int leavesReadersMerge(fulltext_vtab *v, 05233 LeavesReader *pReaders, int nReaders, 05234 LeafWriter *pWriter){ 05235 DLReader dlReaders[MERGE_COUNT]; 05236 const char *pTerm = leavesReaderTerm(pReaders); 05237 int i, nTerm = leavesReaderTermBytes(pReaders); 05238 05239 assert( nReaders<=MERGE_COUNT ); 05240 05241 for(i=0; i<nReaders; i++){ 05242 dlrInit(&dlReaders[i], DL_DEFAULT, 05243 leavesReaderData(pReaders+i), 05244 leavesReaderDataBytes(pReaders+i)); 05245 } 05246 05247 return leafWriterStepMerge(v, pWriter, pTerm, nTerm, dlReaders, nReaders); 05248 } 05249 05250 /* Forward ref due to mutual recursion with segdirNextIndex(). */ 05251 static int segmentMerge(fulltext_vtab *v, int iLevel); 05252 05253 /* Put the next available index at iLevel into *pidx. If iLevel 05254 ** already has MERGE_COUNT segments, they are merged to a higher 05255 ** level to make room. 05256 */ 05257 static int segdirNextIndex(fulltext_vtab *v, int iLevel, int *pidx){ 05258 int rc = segdir_max_index(v, iLevel, pidx); 05259 if( rc==SQLITE_DONE ){ /* No segments at iLevel. */ 05260 *pidx = 0; 05261 }else if( rc==SQLITE_ROW ){ 05262 if( *pidx==(MERGE_COUNT-1) ){ 05263 rc = segmentMerge(v, iLevel); 05264 if( rc!=SQLITE_OK ) return rc; 05265 *pidx = 0; 05266 }else{ 05267 (*pidx)++; 05268 } 05269 }else{ 05270 return rc; 05271 } 05272 return SQLITE_OK; 05273 } 05274 05275 /* Merge MERGE_COUNT segments at iLevel into a new segment at 05276 ** iLevel+1. If iLevel+1 is already full of segments, those will be 05277 ** merged to make room. 05278 */ 05279 static int segmentMerge(fulltext_vtab *v, int iLevel){ 05280 LeafWriter writer; 05281 LeavesReader lrs[MERGE_COUNT]; 05282 int i, rc, idx = 0; 05283 05284 /* Determine the next available segment index at the next level, 05285 ** merging as necessary. 05286 */ 05287 rc = segdirNextIndex(v, iLevel+1, &idx); 05288 if( rc!=SQLITE_OK ) return rc; 05289 05290 /* TODO(shess) This assumes that we'll always see exactly 05291 ** MERGE_COUNT segments to merge at a given level. That will be 05292 ** broken if we allow the developer to request preemptive or 05293 ** deferred merging. 05294 */ 05295 memset(&lrs, '\0', sizeof(lrs)); 05296 rc = leavesReadersInit(v, iLevel, lrs, &i); 05297 if( rc!=SQLITE_OK ) return rc; 05298 assert( i==MERGE_COUNT ); 05299 05300 leafWriterInit(iLevel+1, idx, &writer); 05301 05302 /* Since leavesReaderReorder() pushes readers at eof to the end, 05303 ** when the first reader is empty, all will be empty. 05304 */ 05305 while( !leavesReaderAtEnd(lrs) ){ 05306 /* Figure out how many readers share their next term. */ 05307 for(i=1; i<MERGE_COUNT && !leavesReaderAtEnd(lrs+i); i++){ 05308 if( 0!=leavesReaderTermCmp(lrs, lrs+i) ) break; 05309 } 05310 05311 rc = leavesReadersMerge(v, lrs, i, &writer); 05312 if( rc!=SQLITE_OK ) goto err; 05313 05314 /* Step forward those that were merged. */ 05315 while( i-->0 ){ 05316 rc = leavesReaderStep(v, lrs+i); 05317 if( rc!=SQLITE_OK ) goto err; 05318 05319 /* Reorder by term, then by age. */ 05320 leavesReaderReorder(lrs+i, MERGE_COUNT-i); 05321 } 05322 } 05323 05324 for(i=0; i<MERGE_COUNT; i++){ 05325 leavesReaderDestroy(&lrs[i]); 05326 } 05327 05328 rc = leafWriterFinalize(v, &writer); 05329 leafWriterDestroy(&writer); 05330 if( rc!=SQLITE_OK ) return rc; 05331 05332 /* Delete the merged segment data. */ 05333 return segdir_delete(v, iLevel); 05334 05335 err: 05336 for(i=0; i<MERGE_COUNT; i++){ 05337 leavesReaderDestroy(&lrs[i]); 05338 } 05339 leafWriterDestroy(&writer); 05340 return rc; 05341 } 05342 05343 /* Accumulate the union of *acc and *pData into *acc. */ 05344 static void docListAccumulateUnion(DataBuffer *acc, 05345 const char *pData, int nData) { 05346 DataBuffer tmp = *acc; 05347 dataBufferInit(acc, tmp.nData+nData); 05348 docListUnion(tmp.pData, tmp.nData, pData, nData, acc); 05349 dataBufferDestroy(&tmp); 05350 } 05351 05352 /* TODO(shess) It might be interesting to explore different merge 05353 ** strategies, here. For instance, since this is a sorted merge, we 05354 ** could easily merge many doclists in parallel. With some 05355 ** comprehension of the storage format, we could merge all of the 05356 ** doclists within a leaf node directly from the leaf node's storage. 05357 ** It may be worthwhile to merge smaller doclists before larger 05358 ** doclists, since they can be traversed more quickly - but the 05359 ** results may have less overlap, making them more expensive in a 05360 ** different way. 05361 */ 05362 05363 /* Scan pReader for pTerm/nTerm, and merge the term's doclist over 05364 ** *out (any doclists with duplicate docids overwrite those in *out). 05365 ** Internal function for loadSegmentLeaf(). 05366 */ 05367 static int loadSegmentLeavesInt(fulltext_vtab *v, LeavesReader *pReader, 05368 const char *pTerm, int nTerm, int isPrefix, 05369 DataBuffer *out){ 05370 /* doclist data is accumulated into pBuffers similar to how one does 05371 ** increment in binary arithmetic. If index 0 is empty, the data is 05372 ** stored there. If there is data there, it is merged and the 05373 ** results carried into position 1, with further merge-and-carry 05374 ** until an empty position is found. 05375 */ 05376 DataBuffer *pBuffers = NULL; 05377 int nBuffers = 0, nMaxBuffers = 0, rc; 05378 05379 assert( nTerm>0 ); 05380 05381 for(rc=SQLITE_OK; rc==SQLITE_OK && !leavesReaderAtEnd(pReader); 05382 rc=leavesReaderStep(v, pReader)){ 05383 /* TODO(shess) Really want leavesReaderTermCmp(), but that name is 05384 ** already taken to compare the terms of two LeavesReaders. Think 05385 ** on a better name. [Meanwhile, break encapsulation rather than 05386 ** use a confusing name.] 05387 */ 05388 int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); 05389 if( c>0 ) break; /* Past any possible matches. */ 05390 if( c==0 ){ 05391 const char *pData = leavesReaderData(pReader); 05392 int iBuffer, nData = leavesReaderDataBytes(pReader); 05393 05394 /* Find the first empty buffer. */ 05395 for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ 05396 if( 0==pBuffers[iBuffer].nData ) break; 05397 } 05398 05399 /* Out of buffers, add an empty one. */ 05400 if( iBuffer==nBuffers ){ 05401 if( nBuffers==nMaxBuffers ){ 05402 DataBuffer *p; 05403 nMaxBuffers += 20; 05404 05405 /* Manual realloc so we can handle NULL appropriately. */ 05406 p = sqlite3_malloc(nMaxBuffers*sizeof(*pBuffers)); 05407 if( p==NULL ){ 05408 rc = SQLITE_NOMEM; 05409 break; 05410 } 05411 05412 if( nBuffers>0 ){ 05413 assert(pBuffers!=NULL); 05414 memcpy(p, pBuffers, nBuffers*sizeof(*pBuffers)); 05415 sqlite3_free(pBuffers); 05416 } 05417 pBuffers = p; 05418 } 05419 dataBufferInit(&(pBuffers[nBuffers]), 0); 05420 nBuffers++; 05421 } 05422 05423 /* At this point, must have an empty at iBuffer. */ 05424 assert(iBuffer<nBuffers && pBuffers[iBuffer].nData==0); 05425 05426 /* If empty was first buffer, no need for merge logic. */ 05427 if( iBuffer==0 ){ 05428 dataBufferReplace(&(pBuffers[0]), pData, nData); 05429 }else{ 05430 /* pAcc is the empty buffer the merged data will end up in. */ 05431 DataBuffer *pAcc = &(pBuffers[iBuffer]); 05432 DataBuffer *p = &(pBuffers[0]); 05433 05434 /* Handle position 0 specially to avoid need to prime pAcc 05435 ** with pData/nData. 05436 */ 05437 dataBufferSwap(p, pAcc); 05438 docListAccumulateUnion(pAcc, pData, nData); 05439 05440 /* Accumulate remaining doclists into pAcc. */ 05441 for(++p; p<pAcc; ++p){ 05442 docListAccumulateUnion(pAcc, p->pData, p->nData); 05443 05444 /* dataBufferReset() could allow a large doclist to blow up 05445 ** our memory requirements. 05446 */ 05447 if( p->nCapacity<1024 ){ 05448 dataBufferReset(p); 05449 }else{ 05450 dataBufferDestroy(p); 05451 dataBufferInit(p, 0); 05452 } 05453 } 05454 } 05455 } 05456 } 05457 05458 /* Union all the doclists together into *out. */ 05459 /* TODO(shess) What if *out is big? Sigh. */ 05460 if( rc==SQLITE_OK && nBuffers>0 ){ 05461 int iBuffer; 05462 for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ 05463 if( pBuffers[iBuffer].nData>0 ){ 05464 if( out->nData==0 ){ 05465 dataBufferSwap(out, &(pBuffers[iBuffer])); 05466 }else{ 05467 docListAccumulateUnion(out, pBuffers[iBuffer].pData, 05468 pBuffers[iBuffer].nData); 05469 } 05470 } 05471 } 05472 } 05473 05474 while( nBuffers-- ){ 05475 dataBufferDestroy(&(pBuffers[nBuffers])); 05476 } 05477 if( pBuffers!=NULL ) sqlite3_free(pBuffers); 05478 05479 return rc; 05480 } 05481 05482 /* Call loadSegmentLeavesInt() with pData/nData as input. */ 05483 static int loadSegmentLeaf(fulltext_vtab *v, const char *pData, int nData, 05484 const char *pTerm, int nTerm, int isPrefix, 05485 DataBuffer *out){ 05486 LeavesReader reader; 05487 int rc; 05488 05489 assert( nData>1 ); 05490 assert( *pData=='\0' ); 05491 rc = leavesReaderInit(v, 0, 0, 0, pData, nData, &reader); 05492 if( rc!=SQLITE_OK ) return rc; 05493 05494 rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); 05495 leavesReaderReset(&reader); 05496 leavesReaderDestroy(&reader); 05497 return rc; 05498 } 05499 05500 /* Call loadSegmentLeavesInt() with the leaf nodes from iStartLeaf to 05501 ** iEndLeaf (inclusive) as input, and merge the resulting doclist into 05502 ** out. 05503 */ 05504 static int loadSegmentLeaves(fulltext_vtab *v, 05505 sqlite_int64 iStartLeaf, sqlite_int64 iEndLeaf, 05506 const char *pTerm, int nTerm, int isPrefix, 05507 DataBuffer *out){ 05508 int rc; 05509 LeavesReader reader; 05510 05511 assert( iStartLeaf<=iEndLeaf ); 05512 rc = leavesReaderInit(v, 0, iStartLeaf, iEndLeaf, NULL, 0, &reader); 05513 if( rc!=SQLITE_OK ) return rc; 05514 05515 rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); 05516 leavesReaderReset(&reader); 05517 leavesReaderDestroy(&reader); 05518 return rc; 05519 } 05520 05521 /* Taking pData/nData as an interior node, find the sequence of child 05522 ** nodes which could include pTerm/nTerm/isPrefix. Note that the 05523 ** interior node terms logically come between the blocks, so there is 05524 ** one more blockid than there are terms (that block contains terms >= 05525 ** the last interior-node term). 05526 */ 05527 /* TODO(shess) The calling code may already know that the end child is 05528 ** not worth calculating, because the end may be in a later sibling 05529 ** node. Consider whether breaking symmetry is worthwhile. I suspect 05530 ** it is not worthwhile. 05531 */ 05532 static void getChildrenContaining(const char *pData, int nData, 05533 const char *pTerm, int nTerm, int isPrefix, 05534 sqlite_int64 *piStartChild, 05535 sqlite_int64 *piEndChild){ 05536 InteriorReader reader; 05537 05538 assert( nData>1 ); 05539 assert( *pData!='\0' ); 05540 interiorReaderInit(pData, nData, &reader); 05541 05542 /* Scan for the first child which could contain pTerm/nTerm. */ 05543 while( !interiorReaderAtEnd(&reader) ){ 05544 if( interiorReaderTermCmp(&reader, pTerm, nTerm, 0)>0 ) break; 05545 interiorReaderStep(&reader); 05546 } 05547 *piStartChild = interiorReaderCurrentBlockid(&reader); 05548 05549 /* Keep scanning to find a term greater than our term, using prefix 05550 ** comparison if indicated. If isPrefix is false, this will be the 05551 ** same blockid as the starting block. 05552 */ 05553 while( !interiorReaderAtEnd(&reader) ){ 05554 if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break; 05555 interiorReaderStep(&reader); 05556 } 05557 *piEndChild = interiorReaderCurrentBlockid(&reader); 05558 05559 interiorReaderDestroy(&reader); 05560 05561 /* Children must ascend, and if !prefix, both must be the same. */ 05562 assert( *piEndChild>=*piStartChild ); 05563 assert( isPrefix || *piStartChild==*piEndChild ); 05564 } 05565 05566 /* Read block at iBlockid and pass it with other params to 05567 ** getChildrenContaining(). 05568 */ 05569 static int loadAndGetChildrenContaining( 05570 fulltext_vtab *v, 05571 sqlite_int64 iBlockid, 05572 const char *pTerm, int nTerm, int isPrefix, 05573 sqlite_int64 *piStartChild, sqlite_int64 *piEndChild 05574 ){ 05575 sqlite3_stmt *s = NULL; 05576 int rc; 05577 05578 assert( iBlockid!=0 ); 05579 assert( pTerm!=NULL ); 05580 assert( nTerm!=0 ); /* TODO(shess) Why not allow this? */ 05581 assert( piStartChild!=NULL ); 05582 assert( piEndChild!=NULL ); 05583 05584 rc = sql_get_statement(v, BLOCK_SELECT_STMT, &s); 05585 if( rc!=SQLITE_OK ) return rc; 05586 05587 rc = sqlite3_bind_int64(s, 1, iBlockid); 05588 if( rc!=SQLITE_OK ) return rc; 05589 05590 rc = sqlite3_step(s); 05591 if( rc==SQLITE_DONE ) return SQLITE_ERROR; 05592 if( rc!=SQLITE_ROW ) return rc; 05593 05594 getChildrenContaining(sqlite3_column_blob(s, 0), sqlite3_column_bytes(s, 0), 05595 pTerm, nTerm, isPrefix, piStartChild, piEndChild); 05596 05597 /* We expect only one row. We must execute another sqlite3_step() 05598 * to complete the iteration; otherwise the table will remain 05599 * locked. */ 05600 rc = sqlite3_step(s); 05601 if( rc==SQLITE_ROW ) return SQLITE_ERROR; 05602 if( rc!=SQLITE_DONE ) return rc; 05603 05604 return SQLITE_OK; 05605 } 05606 05607 /* Traverse the tree represented by pData[nData] looking for 05608 ** pTerm[nTerm], placing its doclist into *out. This is internal to 05609 ** loadSegment() to make error-handling cleaner. 05610 */ 05611 static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData, 05612 sqlite_int64 iLeavesEnd, 05613 const char *pTerm, int nTerm, int isPrefix, 05614 DataBuffer *out){ 05615 /* Special case where root is a leaf. */ 05616 if( *pData=='\0' ){ 05617 return loadSegmentLeaf(v, pData, nData, pTerm, nTerm, isPrefix, out); 05618 }else{ 05619 int rc; 05620 sqlite_int64 iStartChild, iEndChild; 05621 05622 /* Process pData as an interior node, then loop down the tree 05623 ** until we find the set of leaf nodes to scan for the term. 05624 */ 05625 getChildrenContaining(pData, nData, pTerm, nTerm, isPrefix, 05626 &iStartChild, &iEndChild); 05627 while( iStartChild>iLeavesEnd ){ 05628 sqlite_int64 iNextStart, iNextEnd; 05629 rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix, 05630 &iNextStart, &iNextEnd); 05631 if( rc!=SQLITE_OK ) return rc; 05632 05633 /* If we've branched, follow the end branch, too. */ 05634 if( iStartChild!=iEndChild ){ 05635 sqlite_int64 iDummy; 05636 rc = loadAndGetChildrenContaining(v, iEndChild, pTerm, nTerm, isPrefix, 05637 &iDummy, &iNextEnd); 05638 if( rc!=SQLITE_OK ) return rc; 05639 } 05640 05641 assert( iNextStart<=iNextEnd ); 05642 iStartChild = iNextStart; 05643 iEndChild = iNextEnd; 05644 } 05645 assert( iStartChild<=iLeavesEnd ); 05646 assert( iEndChild<=iLeavesEnd ); 05647 05648 /* Scan through the leaf segments for doclists. */ 05649 return loadSegmentLeaves(v, iStartChild, iEndChild, 05650 pTerm, nTerm, isPrefix, out); 05651 } 05652 } 05653 05654 /* Call loadSegmentInt() to collect the doclist for pTerm/nTerm, then 05655 ** merge its doclist over *out (any duplicate doclists read from the 05656 ** segment rooted at pData will overwrite those in *out). 05657 */ 05658 /* TODO(shess) Consider changing this to determine the depth of the 05659 ** leaves using either the first characters of interior nodes (when 05660 ** ==1, we're one level above the leaves), or the first character of 05661 ** the root (which will describe the height of the tree directly). 05662 ** Either feels somewhat tricky to me. 05663 */ 05664 /* TODO(shess) The current merge is likely to be slow for large 05665 ** doclists (though it should process from newest/smallest to 05666 ** oldest/largest, so it may not be that bad). It might be useful to 05667 ** modify things to allow for N-way merging. This could either be 05668 ** within a segment, with pairwise merges across segments, or across 05669 ** all segments at once. 05670 */ 05671 static int loadSegment(fulltext_vtab *v, const char *pData, int nData, 05672 sqlite_int64 iLeavesEnd, 05673 const char *pTerm, int nTerm, int isPrefix, 05674 DataBuffer *out){ 05675 DataBuffer result; 05676 int rc; 05677 05678 assert( nData>1 ); 05679 05680 /* This code should never be called with buffered updates. */ 05681 assert( v->nPendingData<0 ); 05682 05683 dataBufferInit(&result, 0); 05684 rc = loadSegmentInt(v, pData, nData, iLeavesEnd, 05685 pTerm, nTerm, isPrefix, &result); 05686 if( rc==SQLITE_OK && result.nData>0 ){ 05687 if( out->nData==0 ){ 05688 DataBuffer tmp = *out; 05689 *out = result; 05690 result = tmp; 05691 }else{ 05692 DataBuffer merged; 05693 DLReader readers[2]; 05694 05695 dlrInit(&readers[0], DL_DEFAULT, out->pData, out->nData); 05696 dlrInit(&readers[1], DL_DEFAULT, result.pData, result.nData); 05697 dataBufferInit(&merged, out->nData+result.nData); 05698 docListMerge(&merged, readers, 2); 05699 dataBufferDestroy(out); 05700 *out = merged; 05701 dlrDestroy(&readers[0]); 05702 dlrDestroy(&readers[1]); 05703 } 05704 } 05705 dataBufferDestroy(&result); 05706 return rc; 05707 } 05708 05709 /* Scan the database and merge together the posting lists for the term 05710 ** into *out. 05711 */ 05712 static int termSelect(fulltext_vtab *v, int iColumn, 05713 const char *pTerm, int nTerm, int isPrefix, 05714 DocListType iType, DataBuffer *out){ 05715 DataBuffer doclist; 05716 sqlite3_stmt *s; 05717 int rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); 05718 if( rc!=SQLITE_OK ) return rc; 05719 05720 /* This code should never be called with buffered updates. */ 05721 assert( v->nPendingData<0 ); 05722 05723 dataBufferInit(&doclist, 0); 05724 05725 /* Traverse the segments from oldest to newest so that newer doclist 05726 ** elements for given docids overwrite older elements. 05727 */ 05728 while( (rc = sqlite3_step(s))==SQLITE_ROW ){ 05729 const char *pData = sqlite3_column_blob(s, 2); 05730 const int nData = sqlite3_column_bytes(s, 2); 05731 const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); 05732 rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, 05733 &doclist); 05734 if( rc!=SQLITE_OK ) goto err; 05735 } 05736 if( rc==SQLITE_DONE ){ 05737 if( doclist.nData!=0 ){ 05738 /* TODO(shess) The old term_select_all() code applied the column 05739 ** restrict as we merged segments, leading to smaller buffers. 05740 ** This is probably worthwhile to bring back, once the new storage 05741 ** system is checked in. 05742 */ 05743 if( iColumn==v->nColumn) iColumn = -1; 05744 docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, 05745 iColumn, iType, out); 05746 } 05747 rc = SQLITE_OK; 05748 } 05749 05750 err: 05751 dataBufferDestroy(&doclist); 05752 return rc; 05753 } 05754 05755 /****************************************************************/ 05756 /* Used to hold hashtable data for sorting. */ 05757 typedef struct TermData { 05758 const char *pTerm; 05759 int nTerm; 05760 DLCollector *pCollector; 05761 } TermData; 05762 05763 /* Orders TermData elements in strcmp fashion ( <0 for less-than, 0 05764 ** for equal, >0 for greater-than). 05765 */ 05766 static int termDataCmp(const void *av, const void *bv){ 05767 const TermData *a = (const TermData *)av; 05768 const TermData *b = (const TermData *)bv; 05769 int n = a->nTerm<b->nTerm ? a->nTerm : b->nTerm; 05770 int c = memcmp(a->pTerm, b->pTerm, n); 05771 if( c!=0 ) return c; 05772 return a->nTerm-b->nTerm; 05773 } 05774 05775 /* Order pTerms data by term, then write a new level 0 segment using 05776 ** LeafWriter. 05777 */ 05778 static int writeZeroSegment(fulltext_vtab *v, fts2Hash *pTerms){ 05779 fts2HashElem *e; 05780 int idx, rc, i, n; 05781 TermData *pData; 05782 LeafWriter writer; 05783 DataBuffer dl; 05784 05785 /* Determine the next index at level 0, merging as necessary. */ 05786 rc = segdirNextIndex(v, 0, &idx); 05787 if( rc!=SQLITE_OK ) return rc; 05788 05789 n = fts2HashCount(pTerms); 05790 pData = sqlite3_malloc(n*sizeof(TermData)); 05791 05792 for(i = 0, e = fts2HashFirst(pTerms); e; i++, e = fts2HashNext(e)){ 05793 assert( i<n ); 05794 pData[i].pTerm = fts2HashKey(e); 05795 pData[i].nTerm = fts2HashKeysize(e); 05796 pData[i].pCollector = fts2HashData(e); 05797 } 05798 assert( i==n ); 05799 05800 /* TODO(shess) Should we allow user-defined collation sequences, 05801 ** here? I think we only need that once we support prefix searches. 05802 */ 05803 if( n>1 ) qsort(pData, n, sizeof(*pData), termDataCmp); 05804 05805 /* TODO(shess) Refactor so that we can write directly to the segment 05806 ** DataBuffer, as happens for segment merges. 05807 */ 05808 leafWriterInit(0, idx, &writer); 05809 dataBufferInit(&dl, 0); 05810 for(i=0; i<n; i++){ 05811 dataBufferReset(&dl); 05812 dlcAddDoclist(pData[i].pCollector, &dl); 05813 rc = leafWriterStep(v, &writer, 05814 pData[i].pTerm, pData[i].nTerm, dl.pData, dl.nData); 05815 if( rc!=SQLITE_OK ) goto err; 05816 } 05817 rc = leafWriterFinalize(v, &writer); 05818 05819 err: 05820 dataBufferDestroy(&dl); 05821 sqlite3_free(pData); 05822 leafWriterDestroy(&writer); 05823 return rc; 05824 } 05825 05826 /* If pendingTerms has data, free it. */ 05827 static int clearPendingTerms(fulltext_vtab *v){ 05828 if( v->nPendingData>=0 ){ 05829 fts2HashElem *e; 05830 for(e=fts2HashFirst(&v->pendingTerms); e; e=fts2HashNext(e)){ 05831 dlcDelete(fts2HashData(e)); 05832 } 05833 fts2HashClear(&v->pendingTerms); 05834 v->nPendingData = -1; 05835 } 05836 return SQLITE_OK; 05837 } 05838 05839 /* If pendingTerms has data, flush it to a level-zero segment, and 05840 ** free it. 05841 */ 05842 static int flushPendingTerms(fulltext_vtab *v){ 05843 if( v->nPendingData>=0 ){ 05844 int rc = writeZeroSegment(v, &v->pendingTerms); 05845 if( rc==SQLITE_OK ) clearPendingTerms(v); 05846 return rc; 05847 } 05848 return SQLITE_OK; 05849 } 05850 05851 /* If pendingTerms is "too big", or docid is out of order, flush it. 05852 ** Regardless, be certain that pendingTerms is initialized for use. 05853 */ 05854 static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid){ 05855 /* TODO(shess) Explore whether partially flushing the buffer on 05856 ** forced-flush would provide better performance. I suspect that if 05857 ** we ordered the doclists by size and flushed the largest until the 05858 ** buffer was half empty, that would let the less frequent terms 05859 ** generate longer doclists. 05860 */ 05861 if( iDocid<=v->iPrevDocid || v->nPendingData>kPendingThreshold ){ 05862 int rc = flushPendingTerms(v); 05863 if( rc!=SQLITE_OK ) return rc; 05864 } 05865 if( v->nPendingData<0 ){ 05866 fts2HashInit(&v->pendingTerms, FTS2_HASH_STRING, 1); 05867 v->nPendingData = 0; 05868 } 05869 v->iPrevDocid = iDocid; 05870 return SQLITE_OK; 05871 } 05872 05873 /* This function implements the xUpdate callback; it is the top-level entry 05874 * point for inserting, deleting or updating a row in a full-text table. */ 05875 static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, 05876 sqlite_int64 *pRowid){ 05877 fulltext_vtab *v = (fulltext_vtab *) pVtab; 05878 int rc; 05879 05880 TRACE(("FTS2 Update %p\n", pVtab)); 05881 05882 if( nArg<2 ){ 05883 rc = index_delete(v, sqlite3_value_int64(ppArg[0])); 05884 if( rc==SQLITE_OK ){ 05885 /* If we just deleted the last row in the table, clear out the 05886 ** index data. 05887 */ 05888 rc = content_exists(v); 05889 if( rc==SQLITE_ROW ){ 05890 rc = SQLITE_OK; 05891 }else if( rc==SQLITE_DONE ){ 05892 /* Clear the pending terms so we don't flush a useless level-0 05893 ** segment when the transaction closes. 05894 */ 05895 rc = clearPendingTerms(v); 05896 if( rc==SQLITE_OK ){ 05897 rc = segdir_delete_all(v); 05898 } 05899 } 05900 } 05901 } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ 05902 /* An update: 05903 * ppArg[0] = old rowid 05904 * ppArg[1] = new rowid 05905 * ppArg[2..2+v->nColumn-1] = values 05906 * ppArg[2+v->nColumn] = value for magic column (we ignore this) 05907 */ 05908 sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); 05909 if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || 05910 sqlite3_value_int64(ppArg[1]) != rowid ){ 05911 rc = SQLITE_ERROR; /* we don't allow changing the rowid */ 05912 } else { 05913 assert( nArg==2+v->nColumn+1); 05914 rc = index_update(v, rowid, &ppArg[2]); 05915 } 05916 } else { 05917 /* An insert: 05918 * ppArg[1] = requested rowid 05919 * ppArg[2..2+v->nColumn-1] = values 05920 * ppArg[2+v->nColumn] = value for magic column (we ignore this) 05921 */ 05922 assert( nArg==2+v->nColumn+1); 05923 rc = index_insert(v, ppArg[1], &ppArg[2], pRowid); 05924 } 05925 05926 return rc; 05927 } 05928 05929 static int fulltextSync(sqlite3_vtab *pVtab){ 05930 TRACE(("FTS2 xSync()\n")); 05931 return flushPendingTerms((fulltext_vtab *)pVtab); 05932 } 05933 05934 static int fulltextBegin(sqlite3_vtab *pVtab){ 05935 fulltext_vtab *v = (fulltext_vtab *) pVtab; 05936 TRACE(("FTS2 xBegin()\n")); 05937 05938 /* Any buffered updates should have been cleared by the previous 05939 ** transaction. 05940 */ 05941 assert( v->nPendingData<0 ); 05942 return clearPendingTerms(v); 05943 } 05944 05945 static int fulltextCommit(sqlite3_vtab *pVtab){ 05946 fulltext_vtab *v = (fulltext_vtab *) pVtab; 05947 TRACE(("FTS2 xCommit()\n")); 05948 05949 /* Buffered updates should have been cleared by fulltextSync(). */ 05950 assert( v->nPendingData<0 ); 05951 return clearPendingTerms(v); 05952 } 05953 05954 static int fulltextRollback(sqlite3_vtab *pVtab){ 05955 TRACE(("FTS2 xRollback()\n")); 05956 return clearPendingTerms((fulltext_vtab *)pVtab); 05957 } 05958 05959 /* 05960 ** Implementation of the snippet() function for FTS2 05961 */ 05962 static void snippetFunc( 05963 sqlite3_context *pContext, 05964 int argc, 05965 sqlite3_value **argv 05966 ){ 05967 fulltext_cursor *pCursor; 05968 if( argc<1 ) return; 05969 if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || 05970 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ 05971 sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); 05972 }else{ 05973 const char *zStart = "<b>"; 05974 const char *zEnd = "</b>"; 05975 const char *zEllipsis = "<b>...</b>"; 05976 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); 05977 if( argc>=2 ){ 05978 zStart = (const char*)sqlite3_value_text(argv[1]); 05979 if( argc>=3 ){ 05980 zEnd = (const char*)sqlite3_value_text(argv[2]); 05981 if( argc>=4 ){ 05982 zEllipsis = (const char*)sqlite3_value_text(argv[3]); 05983 } 05984 } 05985 } 05986 snippetAllOffsets(pCursor); 05987 snippetText(pCursor, zStart, zEnd, zEllipsis); 05988 sqlite3_result_text(pContext, pCursor->snippet.zSnippet, 05989 pCursor->snippet.nSnippet, SQLITE_STATIC); 05990 } 05991 } 05992 05993 /* 05994 ** Implementation of the offsets() function for FTS2 05995 */ 05996 static void snippetOffsetsFunc( 05997 sqlite3_context *pContext, 05998 int argc, 05999 sqlite3_value **argv 06000 ){ 06001 fulltext_cursor *pCursor; 06002 if( argc<1 ) return; 06003 if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || 06004 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ 06005 sqlite3_result_error(pContext, "illegal first argument to offsets",-1); 06006 }else{ 06007 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); 06008 snippetAllOffsets(pCursor); 06009 snippetOffsetText(&pCursor->snippet); 06010 sqlite3_result_text(pContext, 06011 pCursor->snippet.zOffset, pCursor->snippet.nOffset, 06012 SQLITE_STATIC); 06013 } 06014 } 06015 06016 /* OptLeavesReader is nearly identical to LeavesReader, except that 06017 ** where LeavesReader is geared towards the merging of complete 06018 ** segment levels (with exactly MERGE_COUNT segments), OptLeavesReader 06019 ** is geared towards implementation of the optimize() function, and 06020 ** can merge all segments simultaneously. This version may be 06021 ** somewhat less efficient than LeavesReader because it merges into an 06022 ** accumulator rather than doing an N-way merge, but since segment 06023 ** size grows exponentially (so segment count logrithmically) this is 06024 ** probably not an immediate problem. 06025 */ 06026 /* TODO(shess): Prove that assertion, or extend the merge code to 06027 ** merge tree fashion (like the prefix-searching code does). 06028 */ 06029 /* TODO(shess): OptLeavesReader and LeavesReader could probably be 06030 ** merged with little or no loss of performance for LeavesReader. The 06031 ** merged code would need to handle >MERGE_COUNT segments, and would 06032 ** also need to be able to optionally optimize away deletes. 06033 */ 06034 typedef struct OptLeavesReader { 06035 /* Segment number, to order readers by age. */ 06036 int segment; 06037 LeavesReader reader; 06038 } OptLeavesReader; 06039 06040 static int optLeavesReaderAtEnd(OptLeavesReader *pReader){ 06041 return leavesReaderAtEnd(&pReader->reader); 06042 } 06043 static int optLeavesReaderTermBytes(OptLeavesReader *pReader){ 06044 return leavesReaderTermBytes(&pReader->reader); 06045 } 06046 static const char *optLeavesReaderData(OptLeavesReader *pReader){ 06047 return leavesReaderData(&pReader->reader); 06048 } 06049 static int optLeavesReaderDataBytes(OptLeavesReader *pReader){ 06050 return leavesReaderDataBytes(&pReader->reader); 06051 } 06052 static const char *optLeavesReaderTerm(OptLeavesReader *pReader){ 06053 return leavesReaderTerm(&pReader->reader); 06054 } 06055 static int optLeavesReaderStep(fulltext_vtab *v, OptLeavesReader *pReader){ 06056 return leavesReaderStep(v, &pReader->reader); 06057 } 06058 static int optLeavesReaderTermCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ 06059 return leavesReaderTermCmp(&lr1->reader, &lr2->reader); 06060 } 06061 /* Order by term ascending, segment ascending (oldest to newest), with 06062 ** exhausted readers to the end. 06063 */ 06064 static int optLeavesReaderCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ 06065 int c = optLeavesReaderTermCmp(lr1, lr2); 06066 if( c!=0 ) return c; 06067 return lr1->segment-lr2->segment; 06068 } 06069 /* Bubble pLr[0] to appropriate place in pLr[1..nLr-1]. Assumes that 06070 ** pLr[1..nLr-1] is already sorted. 06071 */ 06072 static void optLeavesReaderReorder(OptLeavesReader *pLr, int nLr){ 06073 while( nLr>1 && optLeavesReaderCmp(pLr, pLr+1)>0 ){ 06074 OptLeavesReader tmp = pLr[0]; 06075 pLr[0] = pLr[1]; 06076 pLr[1] = tmp; 06077 nLr--; 06078 pLr++; 06079 } 06080 } 06081 06082 /* optimize() helper function. Put the readers in order and iterate 06083 ** through them, merging doclists for matching terms into pWriter. 06084 ** Returns SQLITE_OK on success, or the SQLite error code which 06085 ** prevented success. 06086 */ 06087 static int optimizeInternal(fulltext_vtab *v, 06088 OptLeavesReader *readers, int nReaders, 06089 LeafWriter *pWriter){ 06090 int i, rc = SQLITE_OK; 06091 DataBuffer doclist, merged, tmp; 06092 06093 /* Order the readers. */ 06094 i = nReaders; 06095 while( i-- > 0 ){ 06096 optLeavesReaderReorder(&readers[i], nReaders-i); 06097 } 06098 06099 dataBufferInit(&doclist, LEAF_MAX); 06100 dataBufferInit(&merged, LEAF_MAX); 06101 06102 /* Exhausted readers bubble to the end, so when the first reader is 06103 ** at eof, all are at eof. 06104 */ 06105 while( !optLeavesReaderAtEnd(&readers[0]) ){ 06106 06107 /* Figure out how many readers share the next term. */ 06108 for(i=1; i<nReaders && !optLeavesReaderAtEnd(&readers[i]); i++){ 06109 if( 0!=optLeavesReaderTermCmp(&readers[0], &readers[i]) ) break; 06110 } 06111 06112 /* Special-case for no merge. */ 06113 if( i==1 ){ 06114 /* Trim deletions from the doclist. */ 06115 dataBufferReset(&merged); 06116 docListTrim(DL_DEFAULT, 06117 optLeavesReaderData(&readers[0]), 06118 optLeavesReaderDataBytes(&readers[0]), 06119 -1, DL_DEFAULT, &merged); 06120 }else{ 06121 DLReader dlReaders[MERGE_COUNT]; 06122 int iReader, nReaders; 06123 06124 /* Prime the pipeline with the first reader's doclist. After 06125 ** one pass index 0 will reference the accumulated doclist. 06126 */ 06127 dlrInit(&dlReaders[0], DL_DEFAULT, 06128 optLeavesReaderData(&readers[0]), 06129 optLeavesReaderDataBytes(&readers[0])); 06130 iReader = 1; 06131 06132 assert( iReader<i ); /* Must execute the loop at least once. */ 06133 while( iReader<i ){ 06134 /* Merge 16 inputs per pass. */ 06135 for( nReaders=1; iReader<i && nReaders<MERGE_COUNT; 06136 iReader++, nReaders++ ){ 06137 dlrInit(&dlReaders[nReaders], DL_DEFAULT, 06138 optLeavesReaderData(&readers[iReader]), 06139 optLeavesReaderDataBytes(&readers[iReader])); 06140 } 06141 06142 /* Merge doclists and swap result into accumulator. */ 06143 dataBufferReset(&merged); 06144 docListMerge(&merged, dlReaders, nReaders); 06145 tmp = merged; 06146 merged = doclist; 06147 doclist = tmp; 06148 06149 while( nReaders-- > 0 ){ 06150 dlrDestroy(&dlReaders[nReaders]); 06151 } 06152 06153 /* Accumulated doclist to reader 0 for next pass. */ 06154 dlrInit(&dlReaders[0], DL_DEFAULT, doclist.pData, doclist.nData); 06155 } 06156 06157 /* Destroy reader that was left in the pipeline. */ 06158 dlrDestroy(&dlReaders[0]); 06159 06160 /* Trim deletions from the doclist. */ 06161 dataBufferReset(&merged); 06162 docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, 06163 -1, DL_DEFAULT, &merged); 06164 } 06165 06166 /* Only pass doclists with hits (skip if all hits deleted). */ 06167 if( merged.nData>0 ){ 06168 rc = leafWriterStep(v, pWriter, 06169 optLeavesReaderTerm(&readers[0]), 06170 optLeavesReaderTermBytes(&readers[0]), 06171 merged.pData, merged.nData); 06172 if( rc!=SQLITE_OK ) goto err; 06173 } 06174 06175 /* Step merged readers to next term and reorder. */ 06176 while( i-- > 0 ){ 06177 rc = optLeavesReaderStep(v, &readers[i]); 06178 if( rc!=SQLITE_OK ) goto err; 06179 06180 optLeavesReaderReorder(&readers[i], nReaders-i); 06181 } 06182 } 06183 06184 err: 06185 dataBufferDestroy(&doclist); 06186 dataBufferDestroy(&merged); 06187 return rc; 06188 } 06189 06190 /* Implement optimize() function for FTS3. optimize(t) merges all 06191 ** segments in the fts index into a single segment. 't' is the magic 06192 ** table-named column. 06193 */ 06194 static void optimizeFunc(sqlite3_context *pContext, 06195 int argc, sqlite3_value **argv){ 06196 fulltext_cursor *pCursor; 06197 if( argc>1 ){ 06198 sqlite3_result_error(pContext, "excess arguments to optimize()",-1); 06199 }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || 06200 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ 06201 sqlite3_result_error(pContext, "illegal first argument to optimize",-1); 06202 }else{ 06203 fulltext_vtab *v; 06204 int i, rc, iMaxLevel; 06205 OptLeavesReader *readers; 06206 int nReaders; 06207 LeafWriter writer; 06208 sqlite3_stmt *s; 06209 06210 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); 06211 v = cursor_vtab(pCursor); 06212 06213 /* Flush any buffered updates before optimizing. */ 06214 rc = flushPendingTerms(v); 06215 if( rc!=SQLITE_OK ) goto err; 06216 06217 rc = segdir_count(v, &nReaders, &iMaxLevel); 06218 if( rc!=SQLITE_OK ) goto err; 06219 if( nReaders==0 || nReaders==1 ){ 06220 sqlite3_result_text(pContext, "Index already optimal", -1, 06221 SQLITE_STATIC); 06222 return; 06223 } 06224 06225 rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); 06226 if( rc!=SQLITE_OK ) goto err; 06227 06228 readers = sqlite3_malloc(nReaders*sizeof(readers[0])); 06229 if( readers==NULL ) goto err; 06230 06231 /* Note that there will already be a segment at this position 06232 ** until we call segdir_delete() on iMaxLevel. 06233 */ 06234 leafWriterInit(iMaxLevel, 0, &writer); 06235 06236 i = 0; 06237 while( (rc = sqlite3_step(s))==SQLITE_ROW ){ 06238 sqlite_int64 iStart = sqlite3_column_int64(s, 0); 06239 sqlite_int64 iEnd = sqlite3_column_int64(s, 1); 06240 const char *pRootData = sqlite3_column_blob(s, 2); 06241 int nRootData = sqlite3_column_bytes(s, 2); 06242 06243 assert( i<nReaders ); 06244 rc = leavesReaderInit(v, -1, iStart, iEnd, pRootData, nRootData, 06245 &readers[i].reader); 06246 if( rc!=SQLITE_OK ) break; 06247 06248 readers[i].segment = i; 06249 i++; 06250 } 06251 06252 /* If we managed to succesfully read them all, optimize them. */ 06253 if( rc==SQLITE_DONE ){ 06254 assert( i==nReaders ); 06255 rc = optimizeInternal(v, readers, nReaders, &writer); 06256 } 06257 06258 while( i-- > 0 ){ 06259 leavesReaderDestroy(&readers[i].reader); 06260 } 06261 sqlite3_free(readers); 06262 06263 /* If we've successfully gotten to here, delete the old segments 06264 ** and flush the interior structure of the new segment. 06265 */ 06266 if( rc==SQLITE_OK ){ 06267 for( i=0; i<=iMaxLevel; i++ ){ 06268 rc = segdir_delete(v, i); 06269 if( rc!=SQLITE_OK ) break; 06270 } 06271 06272 if( rc==SQLITE_OK ) rc = leafWriterFinalize(v, &writer); 06273 } 06274 06275 leafWriterDestroy(&writer); 06276 06277 if( rc!=SQLITE_OK ) goto err; 06278 06279 sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); 06280 return; 06281 06282 /* TODO(shess): Error-handling needs to be improved along the 06283 ** lines of the dump_ functions. 06284 */ 06285 err: 06286 { 06287 char buf[512]; 06288 sqlite3_snprintf(sizeof(buf), buf, "Error in optimize: %s", 06289 sqlite3_errmsg(sqlite3_context_db_handle(pContext))); 06290 sqlite3_result_error(pContext, buf, -1); 06291 } 06292 } 06293 } 06294 06295 #ifdef SQLITE_TEST 06296 /* Generate an error of the form "<prefix>: <msg>". If msg is NULL, 06297 ** pull the error from the context's db handle. 06298 */ 06299 static void generateError(sqlite3_context *pContext, 06300 const char *prefix, const char *msg){ 06301 char buf[512]; 06302 if( msg==NULL ) msg = sqlite3_errmsg(sqlite3_context_db_handle(pContext)); 06303 sqlite3_snprintf(sizeof(buf), buf, "%s: %s", prefix, msg); 06304 sqlite3_result_error(pContext, buf, -1); 06305 } 06306 06307 /* Helper function to collect the set of terms in the segment into 06308 ** pTerms. The segment is defined by the leaf nodes between 06309 ** iStartBlockid and iEndBlockid, inclusive, or by the contents of 06310 ** pRootData if iStartBlockid is 0 (in which case the entire segment 06311 ** fit in a leaf). 06312 */ 06313 static int collectSegmentTerms(fulltext_vtab *v, sqlite3_stmt *s, 06314 fts2Hash *pTerms){ 06315 const sqlite_int64 iStartBlockid = sqlite3_column_int64(s, 0); 06316 const sqlite_int64 iEndBlockid = sqlite3_column_int64(s, 1); 06317 const char *pRootData = sqlite3_column_blob(s, 2); 06318 const int nRootData = sqlite3_column_bytes(s, 2); 06319 LeavesReader reader; 06320 int rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid, 06321 pRootData, nRootData, &reader); 06322 if( rc!=SQLITE_OK ) return rc; 06323 06324 while( rc==SQLITE_OK && !leavesReaderAtEnd(&reader) ){ 06325 const char *pTerm = leavesReaderTerm(&reader); 06326 const int nTerm = leavesReaderTermBytes(&reader); 06327 void *oldValue = sqlite3Fts2HashFind(pTerms, pTerm, nTerm); 06328 void *newValue = (void *)((char *)oldValue+1); 06329 06330 /* From the comment before sqlite3Fts2HashInsert in fts2_hash.c, 06331 ** the data value passed is returned in case of malloc failure. 06332 */ 06333 if( newValue==sqlite3Fts2HashInsert(pTerms, pTerm, nTerm, newValue) ){ 06334 rc = SQLITE_NOMEM; 06335 }else{ 06336 rc = leavesReaderStep(v, &reader); 06337 } 06338 } 06339 06340 leavesReaderDestroy(&reader); 06341 return rc; 06342 } 06343 06344 /* Helper function to build the result string for dump_terms(). */ 06345 static int generateTermsResult(sqlite3_context *pContext, fts2Hash *pTerms){ 06346 int iTerm, nTerms, nResultBytes, iByte; 06347 char *result; 06348 TermData *pData; 06349 fts2HashElem *e; 06350 06351 /* Iterate pTerms to generate an array of terms in pData for 06352 ** sorting. 06353 */ 06354 nTerms = fts2HashCount(pTerms); 06355 assert( nTerms>0 ); 06356 pData = sqlite3_malloc(nTerms*sizeof(TermData)); 06357 if( pData==NULL ) return SQLITE_NOMEM; 06358 06359 nResultBytes = 0; 06360 for(iTerm = 0, e = fts2HashFirst(pTerms); e; iTerm++, e = fts2HashNext(e)){ 06361 nResultBytes += fts2HashKeysize(e)+1; /* Term plus trailing space */ 06362 assert( iTerm<nTerms ); 06363 pData[iTerm].pTerm = fts2HashKey(e); 06364 pData[iTerm].nTerm = fts2HashKeysize(e); 06365 pData[iTerm].pCollector = fts2HashData(e); /* unused */ 06366 } 06367 assert( iTerm==nTerms ); 06368 06369 assert( nResultBytes>0 ); /* nTerms>0, nResultsBytes must be, too. */ 06370 result = sqlite3_malloc(nResultBytes); 06371 if( result==NULL ){ 06372 sqlite3_free(pData); 06373 return SQLITE_NOMEM; 06374 } 06375 06376 if( nTerms>1 ) qsort(pData, nTerms, sizeof(*pData), termDataCmp); 06377 06378 /* Read the terms in order to build the result. */ 06379 iByte = 0; 06380 for(iTerm=0; iTerm<nTerms; ++iTerm){ 06381 memcpy(result+iByte, pData[iTerm].pTerm, pData[iTerm].nTerm); 06382 iByte += pData[iTerm].nTerm; 06383 result[iByte++] = ' '; 06384 } 06385 assert( iByte==nResultBytes ); 06386 assert( result[nResultBytes-1]==' ' ); 06387 result[nResultBytes-1] = '\0'; 06388 06389 /* Passes away ownership of result. */ 06390 sqlite3_result_text(pContext, result, nResultBytes-1, sqlite3_free); 06391 sqlite3_free(pData); 06392 return SQLITE_OK; 06393 } 06394 06395 /* Implements dump_terms() for use in inspecting the fts2 index from 06396 ** tests. TEXT result containing the ordered list of terms joined by 06397 ** spaces. dump_terms(t, level, idx) dumps the terms for the segment 06398 ** specified by level, idx (in %_segdir), while dump_terms(t) dumps 06399 ** all terms in the index. In both cases t is the fts table's magic 06400 ** table-named column. 06401 */ 06402 static void dumpTermsFunc( 06403 sqlite3_context *pContext, 06404 int argc, sqlite3_value **argv 06405 ){ 06406 fulltext_cursor *pCursor; 06407 if( argc!=3 && argc!=1 ){ 06408 generateError(pContext, "dump_terms", "incorrect arguments"); 06409 }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || 06410 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ 06411 generateError(pContext, "dump_terms", "illegal first argument"); 06412 }else{ 06413 fulltext_vtab *v; 06414 fts2Hash terms; 06415 sqlite3_stmt *s = NULL; 06416 int rc; 06417 06418 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); 06419 v = cursor_vtab(pCursor); 06420 06421 /* If passed only the cursor column, get all segments. Otherwise 06422 ** get the segment described by the following two arguments. 06423 */ 06424 if( argc==1 ){ 06425 rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); 06426 }else{ 06427 rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); 06428 if( rc==SQLITE_OK ){ 06429 rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[1])); 06430 if( rc==SQLITE_OK ){ 06431 rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[2])); 06432 } 06433 } 06434 } 06435 06436 if( rc!=SQLITE_OK ){ 06437 generateError(pContext, "dump_terms", NULL); 06438 return; 06439 } 06440 06441 /* Collect the terms for each segment. */ 06442 sqlite3Fts2HashInit(&terms, FTS2_HASH_STRING, 1); 06443 while( (rc = sqlite3_step(s))==SQLITE_ROW ){ 06444 rc = collectSegmentTerms(v, s, &terms); 06445 if( rc!=SQLITE_OK ) break; 06446 } 06447 06448 if( rc!=SQLITE_DONE ){ 06449 sqlite3_reset(s); 06450 generateError(pContext, "dump_terms", NULL); 06451 }else{ 06452 const int nTerms = fts2HashCount(&terms); 06453 if( nTerms>0 ){ 06454 rc = generateTermsResult(pContext, &terms); 06455 if( rc==SQLITE_NOMEM ){ 06456 generateError(pContext, "dump_terms", "out of memory"); 06457 }else{ 06458 assert( rc==SQLITE_OK ); 06459 } 06460 }else if( argc==3 ){ 06461 /* The specific segment asked for could not be found. */ 06462 generateError(pContext, "dump_terms", "segment not found"); 06463 }else{ 06464 /* No segments found. */ 06465 /* TODO(shess): It should be impossible to reach this. This 06466 ** case can only happen for an empty table, in which case 06467 ** SQLite has no rows to call this function on. 06468 */ 06469 sqlite3_result_null(pContext); 06470 } 06471 } 06472 sqlite3Fts2HashClear(&terms); 06473 } 06474 } 06475 06476 /* Expand the DL_DEFAULT doclist in pData into a text result in 06477 ** pContext. 06478 */ 06479 static void createDoclistResult(sqlite3_context *pContext, 06480 const char *pData, int nData){ 06481 DataBuffer dump; 06482 DLReader dlReader; 06483 06484 assert( pData!=NULL && nData>0 ); 06485 06486 dataBufferInit(&dump, 0); 06487 dlrInit(&dlReader, DL_DEFAULT, pData, nData); 06488 for( ; !dlrAtEnd(&dlReader); dlrStep(&dlReader) ){ 06489 char buf[256]; 06490 PLReader plReader; 06491 06492 plrInit(&plReader, &dlReader); 06493 if( DL_DEFAULT==DL_DOCIDS || plrAtEnd(&plReader) ){ 06494 sqlite3_snprintf(sizeof(buf), buf, "[%lld] ", dlrDocid(&dlReader)); 06495 dataBufferAppend(&dump, buf, strlen(buf)); 06496 }else{ 06497 int iColumn = plrColumn(&plReader); 06498 06499 sqlite3_snprintf(sizeof(buf), buf, "[%lld %d[", 06500 dlrDocid(&dlReader), iColumn); 06501 dataBufferAppend(&dump, buf, strlen(buf)); 06502 06503 for( ; !plrAtEnd(&plReader); plrStep(&plReader) ){ 06504 if( plrColumn(&plReader)!=iColumn ){ 06505 iColumn = plrColumn(&plReader); 06506 sqlite3_snprintf(sizeof(buf), buf, "] %d[", iColumn); 06507 assert( dump.nData>0 ); 06508 dump.nData--; /* Overwrite trailing space. */ 06509 assert( dump.pData[dump.nData]==' '); 06510 dataBufferAppend(&dump, buf, strlen(buf)); 06511 } 06512 if( DL_DEFAULT==DL_POSITIONS_OFFSETS ){ 06513 sqlite3_snprintf(sizeof(buf), buf, "%d,%d,%d ", 06514 plrPosition(&plReader), 06515 plrStartOffset(&plReader), plrEndOffset(&plReader)); 06516 }else if( DL_DEFAULT==DL_POSITIONS ){ 06517 sqlite3_snprintf(sizeof(buf), buf, "%d ", plrPosition(&plReader)); 06518 }else{ 06519 assert( NULL=="Unhandled DL_DEFAULT value"); 06520 } 06521 dataBufferAppend(&dump, buf, strlen(buf)); 06522 } 06523 plrDestroy(&plReader); 06524 06525 assert( dump.nData>0 ); 06526 dump.nData--; /* Overwrite trailing space. */ 06527 assert( dump.pData[dump.nData]==' '); 06528 dataBufferAppend(&dump, "]] ", 3); 06529 } 06530 } 06531 dlrDestroy(&dlReader); 06532 06533 assert( dump.nData>0 ); 06534 dump.nData--; /* Overwrite trailing space. */ 06535 assert( dump.pData[dump.nData]==' '); 06536 dump.pData[dump.nData] = '\0'; 06537 assert( dump.nData>0 ); 06538 06539 /* Passes ownership of dump's buffer to pContext. */ 06540 sqlite3_result_text(pContext, dump.pData, dump.nData, sqlite3_free); 06541 dump.pData = NULL; 06542 dump.nData = dump.nCapacity = 0; 06543 } 06544 06545 /* Implements dump_doclist() for use in inspecting the fts2 index from 06546 ** tests. TEXT result containing a string representation of the 06547 ** doclist for the indicated term. dump_doclist(t, term, level, idx) 06548 ** dumps the doclist for term from the segment specified by level, idx 06549 ** (in %_segdir), while dump_doclist(t, term) dumps the logical 06550 ** doclist for the term across all segments. The per-segment doclist 06551 ** can contain deletions, while the full-index doclist will not 06552 ** (deletions are omitted). 06553 ** 06554 ** Result formats differ with the setting of DL_DEFAULTS. Examples: 06555 ** 06556 ** DL_DOCIDS: [1] [3] [7] 06557 ** DL_POSITIONS: [1 0[0 4] 1[17]] [3 1[5]] 06558 ** DL_POSITIONS_OFFSETS: [1 0[0,0,3 4,23,26] 1[17,102,105]] [3 1[5,20,23]] 06559 ** 06560 ** In each case the number after the outer '[' is the docid. In the 06561 ** latter two cases, the number before the inner '[' is the column 06562 ** associated with the values within. For DL_POSITIONS the numbers 06563 ** within are the positions, for DL_POSITIONS_OFFSETS they are the 06564 ** position, the start offset, and the end offset. 06565 */ 06566 static void dumpDoclistFunc( 06567 sqlite3_context *pContext, 06568 int argc, sqlite3_value **argv 06569 ){ 06570 fulltext_cursor *pCursor; 06571 if( argc!=2 && argc!=4 ){ 06572 generateError(pContext, "dump_doclist", "incorrect arguments"); 06573 }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || 06574 sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ 06575 generateError(pContext, "dump_doclist", "illegal first argument"); 06576 }else if( sqlite3_value_text(argv[1])==NULL || 06577 sqlite3_value_text(argv[1])[0]=='\0' ){ 06578 generateError(pContext, "dump_doclist", "empty second argument"); 06579 }else{ 06580 const char *pTerm = (const char *)sqlite3_value_text(argv[1]); 06581 const int nTerm = strlen(pTerm); 06582 fulltext_vtab *v; 06583 int rc; 06584 DataBuffer doclist; 06585 06586 memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); 06587 v = cursor_vtab(pCursor); 06588 06589 dataBufferInit(&doclist, 0); 06590 06591 /* termSelect() yields the same logical doclist that queries are 06592 ** run against. 06593 */ 06594 if( argc==2 ){ 06595 rc = termSelect(v, v->nColumn, pTerm, nTerm, 0, DL_DEFAULT, &doclist); 06596 }else{ 06597 sqlite3_stmt *s = NULL; 06598 06599 /* Get our specific segment's information. */ 06600 rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); 06601 if( rc==SQLITE_OK ){ 06602 rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[2])); 06603 if( rc==SQLITE_OK ){ 06604 rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[3])); 06605 } 06606 } 06607 06608 if( rc==SQLITE_OK ){ 06609 rc = sqlite3_step(s); 06610 06611 if( rc==SQLITE_DONE ){ 06612 dataBufferDestroy(&doclist); 06613 generateError(pContext, "dump_doclist", "segment not found"); 06614 return; 06615 } 06616 06617 /* Found a segment, load it into doclist. */ 06618 if( rc==SQLITE_ROW ){ 06619 const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); 06620 const char *pData = sqlite3_column_blob(s, 2); 06621 const int nData = sqlite3_column_bytes(s, 2); 06622 06623 /* loadSegment() is used by termSelect() to load each 06624 ** segment's data. 06625 */ 06626 rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, 0, 06627 &doclist); 06628 if( rc==SQLITE_OK ){ 06629 rc = sqlite3_step(s); 06630 06631 /* Should not have more than one matching segment. */ 06632 if( rc!=SQLITE_DONE ){ 06633 sqlite3_reset(s); 06634 dataBufferDestroy(&doclist); 06635 generateError(pContext, "dump_doclist", "invalid segdir"); 06636 return; 06637 } 06638 rc = SQLITE_OK; 06639 } 06640 } 06641 } 06642 06643 sqlite3_reset(s); 06644 } 06645 06646 if( rc==SQLITE_OK ){ 06647 if( doclist.nData>0 ){ 06648 createDoclistResult(pContext, doclist.pData, doclist.nData); 06649 }else{ 06650 /* TODO(shess): This can happen if the term is not present, or 06651 ** if all instances of the term have been deleted and this is 06652 ** an all-index dump. It may be interesting to distinguish 06653 ** these cases. 06654 */ 06655 sqlite3_result_text(pContext, "", 0, SQLITE_STATIC); 06656 } 06657 }else if( rc==SQLITE_NOMEM ){ 06658 /* Handle out-of-memory cases specially because if they are 06659 ** generated in fts2 code they may not be reflected in the db 06660 ** handle. 06661 */ 06662 /* TODO(shess): Handle this more comprehensively. 06663 ** sqlite3ErrStr() has what I need, but is internal. 06664 */ 06665 generateError(pContext, "dump_doclist", "out of memory"); 06666 }else{ 06667 generateError(pContext, "dump_doclist", NULL); 06668 } 06669 06670 dataBufferDestroy(&doclist); 06671 } 06672 } 06673 #endif 06674 06675 /* 06676 ** This routine implements the xFindFunction method for the FTS2 06677 ** virtual table. 06678 */ 06679 static int fulltextFindFunction( 06680 sqlite3_vtab *pVtab, 06681 int nArg, 06682 const char *zName, 06683 void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), 06684 void **ppArg 06685 ){ 06686 if( strcmp(zName,"snippet")==0 ){ 06687 *pxFunc = snippetFunc; 06688 return 1; 06689 }else if( strcmp(zName,"offsets")==0 ){ 06690 *pxFunc = snippetOffsetsFunc; 06691 return 1; 06692 }else if( strcmp(zName,"optimize")==0 ){ 06693 *pxFunc = optimizeFunc; 06694 return 1; 06695 #ifdef SQLITE_TEST 06696 /* NOTE(shess): These functions are present only for testing 06697 ** purposes. No particular effort is made to optimize their 06698 ** execution or how they build their results. 06699 */ 06700 }else if( strcmp(zName,"dump_terms")==0 ){ 06701 /* fprintf(stderr, "Found dump_terms\n"); */ 06702 *pxFunc = dumpTermsFunc; 06703 return 1; 06704 }else if( strcmp(zName,"dump_doclist")==0 ){ 06705 /* fprintf(stderr, "Found dump_doclist\n"); */ 06706 *pxFunc = dumpDoclistFunc; 06707 return 1; 06708 #endif 06709 } 06710 return 0; 06711 } 06712 06713 /* 06714 ** Rename an fts2 table. 06715 */ 06716 static int fulltextRename( 06717 sqlite3_vtab *pVtab, 06718 const char *zName 06719 ){ 06720 fulltext_vtab *p = (fulltext_vtab *)pVtab; 06721 int rc = SQLITE_NOMEM; 06722 char *zSql = sqlite3_mprintf( 06723 "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" 06724 "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';" 06725 "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';" 06726 , p->zDb, p->zName, zName 06727 , p->zDb, p->zName, zName 06728 , p->zDb, p->zName, zName 06729 ); 06730 if( zSql ){ 06731 rc = sqlite3_exec(p->db, zSql, 0, 0, 0); 06732 sqlite3_free(zSql); 06733 } 06734 return rc; 06735 } 06736 06737 static const sqlite3_module fts2Module = { 06738 /* iVersion */ 0, 06739 /* xCreate */ fulltextCreate, 06740 /* xConnect */ fulltextConnect, 06741 /* xBestIndex */ fulltextBestIndex, 06742 /* xDisconnect */ fulltextDisconnect, 06743 /* xDestroy */ fulltextDestroy, 06744 /* xOpen */ fulltextOpen, 06745 /* xClose */ fulltextClose, 06746 /* xFilter */ fulltextFilter, 06747 /* xNext */ fulltextNext, 06748 /* xEof */ fulltextEof, 06749 /* xColumn */ fulltextColumn, 06750 /* xRowid */ fulltextRowid, 06751 /* xUpdate */ fulltextUpdate, 06752 /* xBegin */ fulltextBegin, 06753 /* xSync */ fulltextSync, 06754 /* xCommit */ fulltextCommit, 06755 /* xRollback */ fulltextRollback, 06756 /* xFindFunction */ fulltextFindFunction, 06757 /* xRename */ fulltextRename, 06758 }; 06759 06760 static void hashDestroy(void *p){ 06761 fts2Hash *pHash = (fts2Hash *)p; 06762 sqlite3Fts2HashClear(pHash); 06763 sqlite3_free(pHash); 06764 } 06765 06766 /* 06767 ** The fts2 built-in tokenizers - "simple" and "porter" - are implemented 06768 ** in files fts2_tokenizer1.c and fts2_porter.c respectively. The following 06769 ** two forward declarations are for functions declared in these files 06770 ** used to retrieve the respective implementations. 06771 ** 06772 ** Calling sqlite3Fts2SimpleTokenizerModule() sets the value pointed 06773 ** to by the argument to point a the "simple" tokenizer implementation. 06774 ** Function ...PorterTokenizerModule() sets *pModule to point to the 06775 ** porter tokenizer/stemmer implementation. 06776 */ 06777 void sqlite3Fts2SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); 06778 void sqlite3Fts2PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); 06779 void sqlite3Fts2IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); 06780 06781 int sqlite3Fts2InitHashTable(sqlite3 *, fts2Hash *, const char *); 06782 06783 /* 06784 ** Initialise the fts2 extension. If this extension is built as part 06785 ** of the sqlite library, then this function is called directly by 06786 ** SQLite. If fts2 is built as a dynamically loadable extension, this 06787 ** function is called by the sqlite3_extension_init() entry point. 06788 */ 06789 int sqlite3Fts2Init(sqlite3 *db){ 06790 int rc = SQLITE_OK; 06791 fts2Hash *pHash = 0; 06792 const sqlite3_tokenizer_module *pSimple = 0; 06793 const sqlite3_tokenizer_module *pPorter = 0; 06794 const sqlite3_tokenizer_module *pIcu = 0; 06795 06796 sqlite3Fts2SimpleTokenizerModule(&pSimple); 06797 sqlite3Fts2PorterTokenizerModule(&pPorter); 06798 #ifdef SQLITE_ENABLE_ICU 06799 sqlite3Fts2IcuTokenizerModule(&pIcu); 06800 #endif 06801 06802 /* Allocate and initialise the hash-table used to store tokenizers. */ 06803 pHash = sqlite3_malloc(sizeof(fts2Hash)); 06804 if( !pHash ){ 06805 rc = SQLITE_NOMEM; 06806 }else{ 06807 sqlite3Fts2HashInit(pHash, FTS2_HASH_STRING, 1); 06808 } 06809 06810 /* Load the built-in tokenizers into the hash table */ 06811 if( rc==SQLITE_OK ){ 06812 if( sqlite3Fts2HashInsert(pHash, "simple", 7, (void *)pSimple) 06813 || sqlite3Fts2HashInsert(pHash, "porter", 7, (void *)pPorter) 06814 || (pIcu && sqlite3Fts2HashInsert(pHash, "icu", 4, (void *)pIcu)) 06815 ){ 06816 rc = SQLITE_NOMEM; 06817 } 06818 } 06819 06820 /* Create the virtual table wrapper around the hash-table and overload 06821 ** the two scalar functions. If this is successful, register the 06822 ** module with sqlite. 06823 */ 06824 if( SQLITE_OK==rc 06825 && SQLITE_OK==(rc = sqlite3Fts2InitHashTable(db, pHash, "fts2_tokenizer")) 06826 && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) 06827 && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) 06828 && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", -1)) 06829 #ifdef SQLITE_TEST 06830 && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_terms", -1)) 06831 && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_doclist", -1)) 06832 #endif 06833 ){ 06834 return sqlite3_create_module_v2( 06835 db, "fts2", &fts2Module, (void *)pHash, hashDestroy 06836 ); 06837 } 06838 06839 /* An error has occured. Delete the hash table and return the error code. */ 06840 assert( rc!=SQLITE_OK ); 06841 if( pHash ){ 06842 sqlite3Fts2HashClear(pHash); 06843 sqlite3_free(pHash); 06844 } 06845 return rc; 06846 } 06847 06848 #if !SQLITE_CORE 06849 int sqlite3_extension_init( 06850 sqlite3 *db, 06851 char **pzErrMsg, 06852 const sqlite3_api_routines *pApi 06853 ){ 06854 SQLITE_EXTENSION_INIT2(pApi) 06855 return sqlite3Fts2Init(db); 06856 } 06857 #endif 06858 06859 #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */
ContextLogger2—ContextLogger2 Logger Daemon Internals—Generated on Mon May 2 13:49:53 2011 by Doxygen 1.6.1