| 1 | /*------------------------------------------------------------------------- |
| 2 | * |
| 3 | * ts_typanalyze.c |
| 4 | * functions for gathering statistics from tsvector columns |
| 5 | * |
| 6 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
| 7 | * |
| 8 | * |
| 9 | * IDENTIFICATION |
| 10 | * src/backend/tsearch/ts_typanalyze.c |
| 11 | * |
| 12 | *------------------------------------------------------------------------- |
| 13 | */ |
| 14 | #include "postgres.h" |
| 15 | |
| 16 | #include "catalog/pg_collation.h" |
| 17 | #include "catalog/pg_operator.h" |
| 18 | #include "commands/vacuum.h" |
| 19 | #include "tsearch/ts_type.h" |
| 20 | #include "utils/builtins.h" |
| 21 | #include "utils/hashutils.h" |
| 22 | |
| 23 | |
| 24 | /* A hash key for lexemes */ |
| 25 | typedef struct |
| 26 | { |
| 27 | char *lexeme; /* lexeme (not NULL terminated!) */ |
| 28 | int length; /* its length in bytes */ |
| 29 | } LexemeHashKey; |
| 30 | |
| 31 | /* A hash table entry for the Lossy Counting algorithm */ |
| 32 | typedef struct |
| 33 | { |
| 34 | LexemeHashKey key; /* This is 'e' from the LC algorithm. */ |
| 35 | int frequency; /* This is 'f'. */ |
| 36 | int delta; /* And this is 'delta'. */ |
| 37 | } TrackItem; |
| 38 | |
| 39 | static void compute_tsvector_stats(VacAttrStats *stats, |
| 40 | AnalyzeAttrFetchFunc fetchfunc, |
| 41 | int samplerows, |
| 42 | double totalrows); |
| 43 | static void prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current); |
| 44 | static uint32 lexeme_hash(const void *key, Size keysize); |
| 45 | static int lexeme_match(const void *key1, const void *key2, Size keysize); |
| 46 | static int lexeme_compare(const void *key1, const void *key2); |
| 47 | static int trackitem_compare_frequencies_desc(const void *e1, const void *e2); |
| 48 | static int trackitem_compare_lexemes(const void *e1, const void *e2); |
| 49 | |
| 50 | |
| 51 | /* |
| 52 | * ts_typanalyze -- a custom typanalyze function for tsvector columns |
| 53 | */ |
| 54 | Datum |
| 55 | ts_typanalyze(PG_FUNCTION_ARGS) |
| 56 | { |
| 57 | VacAttrStats *stats = (VacAttrStats *) PG_GETARG_POINTER(0); |
| 58 | Form_pg_attribute attr = stats->attr; |
| 59 | |
| 60 | /* If the attstattarget column is negative, use the default value */ |
| 61 | /* NB: it is okay to scribble on stats->attr since it's a copy */ |
| 62 | if (attr->attstattarget < 0) |
| 63 | attr->attstattarget = default_statistics_target; |
| 64 | |
| 65 | stats->compute_stats = compute_tsvector_stats; |
| 66 | /* see comment about the choice of minrows in commands/analyze.c */ |
| 67 | stats->minrows = 300 * attr->attstattarget; |
| 68 | |
| 69 | PG_RETURN_BOOL(true); |
| 70 | } |
| 71 | |
| 72 | /* |
| 73 | * compute_tsvector_stats() -- compute statistics for a tsvector column |
| 74 | * |
| 75 | * This functions computes statistics that are useful for determining @@ |
| 76 | * operations' selectivity, along with the fraction of non-null rows and |
| 77 | * average width. |
| 78 | * |
| 79 | * Instead of finding the most common values, as we do for most datatypes, |
| 80 | * we're looking for the most common lexemes. This is more useful, because |
| 81 | * there most probably won't be any two rows with the same tsvector and thus |
| 82 | * the notion of a MCV is a bit bogus with this datatype. With a list of the |
| 83 | * most common lexemes we can do a better job at figuring out @@ selectivity. |
| 84 | * |
| 85 | * For the same reasons we assume that tsvector columns are unique when |
| 86 | * determining the number of distinct values. |
| 87 | * |
| 88 | * The algorithm used is Lossy Counting, as proposed in the paper "Approximate |
| 89 | * frequency counts over data streams" by G. S. Manku and R. Motwani, in |
| 90 | * Proceedings of the 28th International Conference on Very Large Data Bases, |
| 91 | * Hong Kong, China, August 2002, section 4.2. The paper is available at |
| 92 | * http://www.vldb.org/conf/2002/S10P03.pdf |
| 93 | * |
| 94 | * The Lossy Counting (aka LC) algorithm goes like this: |
| 95 | * Let s be the threshold frequency for an item (the minimum frequency we |
| 96 | * are interested in) and epsilon the error margin for the frequency. Let D |
| 97 | * be a set of triples (e, f, delta), where e is an element value, f is that |
| 98 | * element's frequency (actually, its current occurrence count) and delta is |
| 99 | * the maximum error in f. We start with D empty and process the elements in |
| 100 | * batches of size w. (The batch size is also known as "bucket size" and is |
| 101 | * equal to 1/epsilon.) Let the current batch number be b_current, starting |
| 102 | * with 1. For each element e we either increment its f count, if it's |
| 103 | * already in D, or insert a new triple into D with values (e, 1, b_current |
| 104 | * - 1). After processing each batch we prune D, by removing from it all |
| 105 | * elements with f + delta <= b_current. After the algorithm finishes we |
| 106 | * suppress all elements from D that do not satisfy f >= (s - epsilon) * N, |
| 107 | * where N is the total number of elements in the input. We emit the |
| 108 | * remaining elements with estimated frequency f/N. The LC paper proves |
| 109 | * that this algorithm finds all elements with true frequency at least s, |
| 110 | * and that no frequency is overestimated or is underestimated by more than |
| 111 | * epsilon. Furthermore, given reasonable assumptions about the input |
| 112 | * distribution, the required table size is no more than about 7 times w. |
| 113 | * |
| 114 | * We set s to be the estimated frequency of the K'th word in a natural |
| 115 | * language's frequency table, where K is the target number of entries in |
| 116 | * the MCELEM array plus an arbitrary constant, meant to reflect the fact |
| 117 | * that the most common words in any language would usually be stopwords |
| 118 | * so we will not actually see them in the input. We assume that the |
| 119 | * distribution of word frequencies (including the stopwords) follows Zipf's |
| 120 | * law with an exponent of 1. |
| 121 | * |
| 122 | * Assuming Zipfian distribution, the frequency of the K'th word is equal |
| 123 | * to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of |
| 124 | * words in the language. Putting W as one million, we get roughly 0.07/K. |
| 125 | * Assuming top 10 words are stopwords gives s = 0.07/(K + 10). We set |
| 126 | * epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and |
| 127 | * maximum expected hashtable size of about 1000 * (K + 10). |
| 128 | * |
| 129 | * Note: in the above discussion, s, epsilon, and f/N are in terms of a |
| 130 | * lexeme's frequency as a fraction of all lexemes seen in the input. |
| 131 | * However, what we actually want to store in the finished pg_statistic |
| 132 | * entry is each lexeme's frequency as a fraction of all rows that it occurs |
| 133 | * in. Assuming that the input tsvectors are correctly constructed, no |
| 134 | * lexeme occurs more than once per tsvector, so the final count f is a |
| 135 | * correct estimate of the number of input tsvectors it occurs in, and we |
| 136 | * need only change the divisor from N to nonnull_cnt to get the number we |
| 137 | * want. |
| 138 | */ |
| 139 | static void |
| 140 | compute_tsvector_stats(VacAttrStats *stats, |
| 141 | AnalyzeAttrFetchFunc fetchfunc, |
| 142 | int samplerows, |
| 143 | double totalrows) |
| 144 | { |
| 145 | int num_mcelem; |
| 146 | int null_cnt = 0; |
| 147 | double total_width = 0; |
| 148 | |
| 149 | /* This is D from the LC algorithm. */ |
| 150 | HTAB *lexemes_tab; |
| 151 | HASHCTL hash_ctl; |
| 152 | HASH_SEQ_STATUS scan_status; |
| 153 | |
| 154 | /* This is the current bucket number from the LC algorithm */ |
| 155 | int b_current; |
| 156 | |
| 157 | /* This is 'w' from the LC algorithm */ |
| 158 | int bucket_width; |
| 159 | int vector_no, |
| 160 | lexeme_no; |
| 161 | LexemeHashKey hash_key; |
| 162 | TrackItem *item; |
| 163 | |
| 164 | /* |
| 165 | * We want statistics_target * 10 lexemes in the MCELEM array. This |
| 166 | * multiplier is pretty arbitrary, but is meant to reflect the fact that |
| 167 | * the number of individual lexeme values tracked in pg_statistic ought to |
| 168 | * be more than the number of values for a simple scalar column. |
| 169 | */ |
| 170 | num_mcelem = stats->attr->attstattarget * 10; |
| 171 | |
| 172 | /* |
| 173 | * We set bucket width equal to (num_mcelem + 10) / 0.007 as per the |
| 174 | * comment above. |
| 175 | */ |
| 176 | bucket_width = (num_mcelem + 10) * 1000 / 7; |
| 177 | |
| 178 | /* |
| 179 | * Create the hashtable. It will be in local memory, so we don't need to |
| 180 | * worry about overflowing the initial size. Also we don't need to pay any |
| 181 | * attention to locking and memory management. |
| 182 | */ |
| 183 | MemSet(&hash_ctl, 0, sizeof(hash_ctl)); |
| 184 | hash_ctl.keysize = sizeof(LexemeHashKey); |
| 185 | hash_ctl.entrysize = sizeof(TrackItem); |
| 186 | hash_ctl.hash = lexeme_hash; |
| 187 | hash_ctl.match = lexeme_match; |
| 188 | hash_ctl.hcxt = CurrentMemoryContext; |
| 189 | lexemes_tab = hash_create("Analyzed lexemes table" , |
| 190 | num_mcelem, |
| 191 | &hash_ctl, |
| 192 | HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); |
| 193 | |
| 194 | /* Initialize counters. */ |
| 195 | b_current = 1; |
| 196 | lexeme_no = 0; |
| 197 | |
| 198 | /* Loop over the tsvectors. */ |
| 199 | for (vector_no = 0; vector_no < samplerows; vector_no++) |
| 200 | { |
| 201 | Datum value; |
| 202 | bool isnull; |
| 203 | TSVector vector; |
| 204 | WordEntry *curentryptr; |
| 205 | char *lexemesptr; |
| 206 | int j; |
| 207 | |
| 208 | vacuum_delay_point(); |
| 209 | |
| 210 | value = fetchfunc(stats, vector_no, &isnull); |
| 211 | |
| 212 | /* |
| 213 | * Check for null/nonnull. |
| 214 | */ |
| 215 | if (isnull) |
| 216 | { |
| 217 | null_cnt++; |
| 218 | continue; |
| 219 | } |
| 220 | |
| 221 | /* |
| 222 | * Add up widths for average-width calculation. Since it's a |
| 223 | * tsvector, we know it's varlena. As in the regular |
| 224 | * compute_minimal_stats function, we use the toasted width for this |
| 225 | * calculation. |
| 226 | */ |
| 227 | total_width += VARSIZE_ANY(DatumGetPointer(value)); |
| 228 | |
| 229 | /* |
| 230 | * Now detoast the tsvector if needed. |
| 231 | */ |
| 232 | vector = DatumGetTSVector(value); |
| 233 | |
| 234 | /* |
| 235 | * We loop through the lexemes in the tsvector and add them to our |
| 236 | * tracking hashtable. |
| 237 | */ |
| 238 | lexemesptr = STRPTR(vector); |
| 239 | curentryptr = ARRPTR(vector); |
| 240 | for (j = 0; j < vector->size; j++) |
| 241 | { |
| 242 | bool found; |
| 243 | |
| 244 | /* |
| 245 | * Construct a hash key. The key points into the (detoasted) |
| 246 | * tsvector value at this point, but if a new entry is created, we |
| 247 | * make a copy of it. This way we can free the tsvector value |
| 248 | * once we've processed all its lexemes. |
| 249 | */ |
| 250 | hash_key.lexeme = lexemesptr + curentryptr->pos; |
| 251 | hash_key.length = curentryptr->len; |
| 252 | |
| 253 | /* Lookup current lexeme in hashtable, adding it if new */ |
| 254 | item = (TrackItem *) hash_search(lexemes_tab, |
| 255 | (const void *) &hash_key, |
| 256 | HASH_ENTER, &found); |
| 257 | |
| 258 | if (found) |
| 259 | { |
| 260 | /* The lexeme is already on the tracking list */ |
| 261 | item->frequency++; |
| 262 | } |
| 263 | else |
| 264 | { |
| 265 | /* Initialize new tracking list element */ |
| 266 | item->frequency = 1; |
| 267 | item->delta = b_current - 1; |
| 268 | |
| 269 | item->key.lexeme = palloc(hash_key.length); |
| 270 | memcpy(item->key.lexeme, hash_key.lexeme, hash_key.length); |
| 271 | } |
| 272 | |
| 273 | /* lexeme_no is the number of elements processed (ie N) */ |
| 274 | lexeme_no++; |
| 275 | |
| 276 | /* We prune the D structure after processing each bucket */ |
| 277 | if (lexeme_no % bucket_width == 0) |
| 278 | { |
| 279 | prune_lexemes_hashtable(lexemes_tab, b_current); |
| 280 | b_current++; |
| 281 | } |
| 282 | |
| 283 | /* Advance to the next WordEntry in the tsvector */ |
| 284 | curentryptr++; |
| 285 | } |
| 286 | |
| 287 | /* If the vector was toasted, free the detoasted copy. */ |
| 288 | if (TSVectorGetDatum(vector) != value) |
| 289 | pfree(vector); |
| 290 | } |
| 291 | |
| 292 | /* We can only compute real stats if we found some non-null values. */ |
| 293 | if (null_cnt < samplerows) |
| 294 | { |
| 295 | int nonnull_cnt = samplerows - null_cnt; |
| 296 | int i; |
| 297 | TrackItem **sort_table; |
| 298 | int track_len; |
| 299 | int cutoff_freq; |
| 300 | int minfreq, |
| 301 | maxfreq; |
| 302 | |
| 303 | stats->stats_valid = true; |
| 304 | /* Do the simple null-frac and average width stats */ |
| 305 | stats->stanullfrac = (double) null_cnt / (double) samplerows; |
| 306 | stats->stawidth = total_width / (double) nonnull_cnt; |
| 307 | |
| 308 | /* Assume it's a unique column (see notes above) */ |
| 309 | stats->stadistinct = -1.0 * (1.0 - stats->stanullfrac); |
| 310 | |
| 311 | /* |
| 312 | * Construct an array of the interesting hashtable items, that is, |
| 313 | * those meeting the cutoff frequency (s - epsilon)*N. Also identify |
| 314 | * the minimum and maximum frequencies among these items. |
| 315 | * |
| 316 | * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff |
| 317 | * frequency is 9*N / bucket_width. |
| 318 | */ |
| 319 | cutoff_freq = 9 * lexeme_no / bucket_width; |
| 320 | |
| 321 | i = hash_get_num_entries(lexemes_tab); /* surely enough space */ |
| 322 | sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i); |
| 323 | |
| 324 | hash_seq_init(&scan_status, lexemes_tab); |
| 325 | track_len = 0; |
| 326 | minfreq = lexeme_no; |
| 327 | maxfreq = 0; |
| 328 | while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL) |
| 329 | { |
| 330 | if (item->frequency > cutoff_freq) |
| 331 | { |
| 332 | sort_table[track_len++] = item; |
| 333 | minfreq = Min(minfreq, item->frequency); |
| 334 | maxfreq = Max(maxfreq, item->frequency); |
| 335 | } |
| 336 | } |
| 337 | Assert(track_len <= i); |
| 338 | |
| 339 | /* emit some statistics for debug purposes */ |
| 340 | elog(DEBUG3, "tsvector_stats: target # mces = %d, bucket width = %d, " |
| 341 | "# lexemes = %d, hashtable size = %d, usable entries = %d" , |
| 342 | num_mcelem, bucket_width, lexeme_no, i, track_len); |
| 343 | |
| 344 | /* |
| 345 | * If we obtained more lexemes than we really want, get rid of those |
| 346 | * with least frequencies. The easiest way is to qsort the array into |
| 347 | * descending frequency order and truncate the array. |
| 348 | */ |
| 349 | if (num_mcelem < track_len) |
| 350 | { |
| 351 | qsort(sort_table, track_len, sizeof(TrackItem *), |
| 352 | trackitem_compare_frequencies_desc); |
| 353 | /* reset minfreq to the smallest frequency we're keeping */ |
| 354 | minfreq = sort_table[num_mcelem - 1]->frequency; |
| 355 | } |
| 356 | else |
| 357 | num_mcelem = track_len; |
| 358 | |
| 359 | /* Generate MCELEM slot entry */ |
| 360 | if (num_mcelem > 0) |
| 361 | { |
| 362 | MemoryContext old_context; |
| 363 | Datum *mcelem_values; |
| 364 | float4 *mcelem_freqs; |
| 365 | |
| 366 | /* |
| 367 | * We want to store statistics sorted on the lexeme value using |
| 368 | * first length, then byte-for-byte comparison. The reason for |
| 369 | * doing length comparison first is that we don't care about the |
| 370 | * ordering so long as it's consistent, and comparing lengths |
| 371 | * first gives us a chance to avoid a strncmp() call. |
| 372 | * |
| 373 | * This is different from what we do with scalar statistics -- |
| 374 | * they get sorted on frequencies. The rationale is that we |
| 375 | * usually search through most common elements looking for a |
| 376 | * specific value, so we can grab its frequency. When values are |
| 377 | * presorted we can employ binary search for that. See |
| 378 | * ts_selfuncs.c for a real usage scenario. |
| 379 | */ |
| 380 | qsort(sort_table, num_mcelem, sizeof(TrackItem *), |
| 381 | trackitem_compare_lexemes); |
| 382 | |
| 383 | /* Must copy the target values into anl_context */ |
| 384 | old_context = MemoryContextSwitchTo(stats->anl_context); |
| 385 | |
| 386 | /* |
| 387 | * We sorted statistics on the lexeme value, but we want to be |
| 388 | * able to find out the minimal and maximal frequency without |
| 389 | * going through all the values. We keep those two extra |
| 390 | * frequencies in two extra cells in mcelem_freqs. |
| 391 | * |
| 392 | * (Note: the MCELEM statistics slot definition allows for a third |
| 393 | * extra number containing the frequency of nulls, but we don't |
| 394 | * create that for a tsvector column, since null elements aren't |
| 395 | * possible.) |
| 396 | */ |
| 397 | mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum)); |
| 398 | mcelem_freqs = (float4 *) palloc((num_mcelem + 2) * sizeof(float4)); |
| 399 | |
| 400 | /* |
| 401 | * See comments above about use of nonnull_cnt as the divisor for |
| 402 | * the final frequency estimates. |
| 403 | */ |
| 404 | for (i = 0; i < num_mcelem; i++) |
| 405 | { |
| 406 | TrackItem *item = sort_table[i]; |
| 407 | |
| 408 | mcelem_values[i] = |
| 409 | PointerGetDatum(cstring_to_text_with_len(item->key.lexeme, |
| 410 | item->key.length)); |
| 411 | mcelem_freqs[i] = (double) item->frequency / (double) nonnull_cnt; |
| 412 | } |
| 413 | mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt; |
| 414 | mcelem_freqs[i] = (double) maxfreq / (double) nonnull_cnt; |
| 415 | MemoryContextSwitchTo(old_context); |
| 416 | |
| 417 | stats->stakind[0] = STATISTIC_KIND_MCELEM; |
| 418 | stats->staop[0] = TextEqualOperator; |
| 419 | stats->stacoll[0] = DEFAULT_COLLATION_OID; |
| 420 | stats->stanumbers[0] = mcelem_freqs; |
| 421 | /* See above comment about two extra frequency fields */ |
| 422 | stats->numnumbers[0] = num_mcelem + 2; |
| 423 | stats->stavalues[0] = mcelem_values; |
| 424 | stats->numvalues[0] = num_mcelem; |
| 425 | /* We are storing text values */ |
| 426 | stats->statypid[0] = TEXTOID; |
| 427 | stats->statyplen[0] = -1; /* typlen, -1 for varlena */ |
| 428 | stats->statypbyval[0] = false; |
| 429 | stats->statypalign[0] = 'i'; |
| 430 | } |
| 431 | } |
| 432 | else |
| 433 | { |
| 434 | /* We found only nulls; assume the column is entirely null */ |
| 435 | stats->stats_valid = true; |
| 436 | stats->stanullfrac = 1.0; |
| 437 | stats->stawidth = 0; /* "unknown" */ |
| 438 | stats->stadistinct = 0.0; /* "unknown" */ |
| 439 | } |
| 440 | |
| 441 | /* |
| 442 | * We don't need to bother cleaning up any of our temporary palloc's. The |
| 443 | * hashtable should also go away, as it used a child memory context. |
| 444 | */ |
| 445 | } |
| 446 | |
| 447 | /* |
| 448 | * A function to prune the D structure from the Lossy Counting algorithm. |
| 449 | * Consult compute_tsvector_stats() for wider explanation. |
| 450 | */ |
| 451 | static void |
| 452 | prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current) |
| 453 | { |
| 454 | HASH_SEQ_STATUS scan_status; |
| 455 | TrackItem *item; |
| 456 | |
| 457 | hash_seq_init(&scan_status, lexemes_tab); |
| 458 | while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL) |
| 459 | { |
| 460 | if (item->frequency + item->delta <= b_current) |
| 461 | { |
| 462 | char *lexeme = item->key.lexeme; |
| 463 | |
| 464 | if (hash_search(lexemes_tab, (const void *) &item->key, |
| 465 | HASH_REMOVE, NULL) == NULL) |
| 466 | elog(ERROR, "hash table corrupted" ); |
| 467 | pfree(lexeme); |
| 468 | } |
| 469 | } |
| 470 | } |
| 471 | |
| 472 | /* |
| 473 | * Hash functions for lexemes. They are strings, but not NULL terminated, |
| 474 | * so we need a special hash function. |
| 475 | */ |
| 476 | static uint32 |
| 477 | lexeme_hash(const void *key, Size keysize) |
| 478 | { |
| 479 | const LexemeHashKey *l = (const LexemeHashKey *) key; |
| 480 | |
| 481 | return DatumGetUInt32(hash_any((const unsigned char *) l->lexeme, |
| 482 | l->length)); |
| 483 | } |
| 484 | |
| 485 | /* |
| 486 | * Matching function for lexemes, to be used in hashtable lookups. |
| 487 | */ |
| 488 | static int |
| 489 | lexeme_match(const void *key1, const void *key2, Size keysize) |
| 490 | { |
| 491 | /* The keysize parameter is superfluous, the keys store their lengths */ |
| 492 | return lexeme_compare(key1, key2); |
| 493 | } |
| 494 | |
| 495 | /* |
| 496 | * Comparison function for lexemes. |
| 497 | */ |
| 498 | static int |
| 499 | lexeme_compare(const void *key1, const void *key2) |
| 500 | { |
| 501 | const LexemeHashKey *d1 = (const LexemeHashKey *) key1; |
| 502 | const LexemeHashKey *d2 = (const LexemeHashKey *) key2; |
| 503 | |
| 504 | /* First, compare by length */ |
| 505 | if (d1->length > d2->length) |
| 506 | return 1; |
| 507 | else if (d1->length < d2->length) |
| 508 | return -1; |
| 509 | /* Lengths are equal, do a byte-by-byte comparison */ |
| 510 | return strncmp(d1->lexeme, d2->lexeme, d1->length); |
| 511 | } |
| 512 | |
| 513 | /* |
| 514 | * qsort() comparator for sorting TrackItems on frequencies (descending sort) |
| 515 | */ |
| 516 | static int |
| 517 | trackitem_compare_frequencies_desc(const void *e1, const void *e2) |
| 518 | { |
| 519 | const TrackItem *const *t1 = (const TrackItem *const *) e1; |
| 520 | const TrackItem *const *t2 = (const TrackItem *const *) e2; |
| 521 | |
| 522 | return (*t2)->frequency - (*t1)->frequency; |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | * qsort() comparator for sorting TrackItems on lexemes |
| 527 | */ |
| 528 | static int |
| 529 | trackitem_compare_lexemes(const void *e1, const void *e2) |
| 530 | { |
| 531 | const TrackItem *const *t1 = (const TrackItem *const *) e1; |
| 532 | const TrackItem *const *t2 = (const TrackItem *const *) e2; |
| 533 | |
| 534 | return lexeme_compare(&(*t1)->key, &(*t2)->key); |
| 535 | } |
| 536 | |