/** @license 2019 Neil Edelman, distributed under the terms of the [MIT License](https://opensource.org/licenses/MIT). @abstract Source ; examples . @subtitle Hash table ![Example of table.](../web/table.png) table> implements a set or map of entry> as a hash table. It must be supplied a hash_fn> and, is_equal_fn> or inverse_hash_fn>. @param[TABLE_NAME, TABLE_KEY] `` that satisfies `C` naming conventions when mangled and a valid key> associated therewith; required. `` is private, whose names are prefixed in a manner to avoid collisions. @param[TABLE_HASH, TABLE_IS_EQUAL, TABLE_INVERSE] `TABLE_HASH`, and either `TABLE_IS_EQUAL` or `TABLE_INVERSE`, but not both, are required. Function satisfying hash_fn>, and is_equal_fn> or inverse_hash_fn>. @param[TABLE_VALUE] An optional type that is the payload of the key, thus making this a map or associative array. (If the key is part of an aggregate pointer, it will be more efficient and robust to use a set with a type conversion instead of storing related pointers in a map.) @param[TABLE_UINT] This is uint>, the unsigned type of hash hash of the key given by hash_fn>; defaults to `size_t`. @param[TABLE_EXPECT_TRAIT] Do not un-define certain variables for subsequent inclusion in a trait. @param[TABLE_DEFAULT_NAME, TABLE_DEFAULT] Default trait; a name that satisfies `C` naming conventions when mangled and a value> used in tableget>. There can be multiple defaults, but only one can omit `TABLE_DEFAULT_NAME`. @param[TABLE_TO_STRING_NAME, TABLE_TO_STRING] To string trait contained in ; `` that satisfies `C` naming conventions when mangled and function implementing to_string_fn>. There can be multiple to string traits, but only one can omit `TABLE_TO_STRING_NAME`. @std C89 */ #if !defined(TABLE_NAME) || !defined(TABLE_KEY) || !defined(TABLE_HASH) \ || !(defined(TABLE_IS_EQUAL) ^ defined(TABLE_INVERSE)) #error Name TABLE_NAME, tag type TABLE_KEY, functions TABLE_HASH, and, \ TABLE_IS_EQUAL or TABLE_INVERSE (but not both) undefined. #endif #if defined(TABLE_DEFAULT_NAME) || defined(TABLE_DEFAULT) #define TABLE_DEFAULT_TRAIT 1 #else #define TABLE_DEFAULT_TRAIT 0 #endif #if defined(TABLE_TO_STRING_NAME) || defined(TABLE_TO_STRING) #define TABLE_TO_STRING_TRAIT 1 #else #define TABLE_TO_STRING_TRAIT 0 #endif #define TABLE_TRAITS TABLE_DEFAULT_TRAIT + TABLE_TO_STRING_TRAIT #if TABLE_TRAITS > 1 #error Only one trait per include is allowed; use TABLE_EXPECT_TRAIT. #endif #if defined(TABLE_DEFAULT_NAME) && !defined(TABLE_DEFAULT) #error TABLE_DEFAULT_NAME requires TABLE_DEFAULT. #endif #if defined(TABLE_TO_STRING_NAME) && !defined(TABLE_TO_STRING) #error TABLE_TO_STRING_NAME requires TABLE_TO_STRING. #endif #ifndef TABLE_H /* */ #if TABLE_TRAITS == 0 /* */ #ifdef TABLE_VALUE /* */ /** @return Key from `e`. */ static PN_(key) PN_(entry_key)(PN_(entry) e) { #ifdef TABLE_VALUE return e.key; #else return e; #endif } /* Address is hash modulo size of table. Any occupied buckets at the head of the linked structure are closed, that is, the address equals the index. These form a linked table, possibly with other, open buckets that have the same address in vacant buckets. */ struct PN_(bucket) { PN_(uint) next; /* Bucket index, including `TABLE_NULL` and `TABLE_END`. */ PN_(uint) hash; #ifndef TABLE_INVERSE PN_(key) key; #endif #ifdef TABLE_VALUE PN_(value) value; #endif }; /** Gets the key of an occupied `bucket`. */ static PN_(key) PN_(bucket_key)(const struct PN_(bucket) *const bucket) { assert(bucket && bucket->next != TABLE_NULL); #ifdef TABLE_INVERSE return PN_(inverse_hash)(bucket->hash); #else return bucket->key; #endif } /** Gets the value of an occupied `bucket`, which might be the same as the key. */ static PN_(value) PN_(bucket_value)(const struct PN_(bucket) *const bucket) { assert(bucket && bucket->next != TABLE_NULL); #ifdef TABLE_VALUE return bucket->value; #else return PN_(bucket_key)(bucket); #endif } /** Fills `entry`, a public structure, with the information of `bucket`. */ static void PN_(to_entry)(const struct PN_(bucket) *const bucket, PN_(entry) *const entry) { assert(bucket && entry); #ifdef TABLE_VALUE /* entry { key key; value value; } */ entry->key = PN_(bucket_key)(bucket); memcpy(&entry->value, &bucket->value, sizeof bucket->value); #else /* entry key */ *entry = PN_(bucket_key)(bucket); #endif } /** Returns true if the `replace` replaces the `original`. */ typedef int (*PN_(policy_fn))(PN_(key) original, PN_(key) replace); /** To initialize, see table>, `TABLE_IDLE`, `{0}` (`C99`,) or being `static`. The fields should be treated as read-only; any modification is liable to cause the table to go into an invalid state. ![States.](../web/states.png) */ struct N_(table) { /* "Padding size," good. */ struct PN_(bucket) *buckets; /* @ has zero/one key specified by `next`. */ /* `size <= capacity`; size is not needed but convenient and allows short-circuiting. Index of the top of the stack; however, we are really lazy, so MSB store is the top a step ahead? Thereby, hysteresis. */ PN_(uint) log_capacity, size, top; }; /** The capacity of a non-idle `table` is always a power-of-two. */ static PN_(uint) PN_(capacity)(const struct N_(table) *const table) { return assert(table && table->buckets && table->log_capacity >= 3), (PN_(uint))((PN_(uint))1 << table->log_capacity); } /** @return Indexes the first closed bucket in the set of buckets with the same address from non-idle `table` given the `hash`. If the bucket is empty, it will have `next = TABLE_NULL` or it's own to_bucket> not equal to the index. */ static PN_(uint) PN_(to_bucket)(const struct N_(table) *const table, const PN_(uint) hash) { return hash & (PN_(capacity)(table) - 1); } /** @return Search for the previous link in the bucket to `b` in `table`, if it exists, (by restarting and going though the list.) @order \O(`bucket size`) */ static struct PN_(bucket) *PN_(prev)(const struct N_(table) *const table, const PN_(uint) b) { const struct PN_(bucket) *const bucket = table->buckets + b; PN_(uint) to_next = TABLE_NULL, next; assert(table && bucket->next != TABLE_NULL); /* Note that this does not check for corrupted tables; would get assert. */ for(next = PN_(to_bucket)(table, bucket->hash); /* assert(next < capacity), */ next != b; to_next = next, next = table->buckets[next].next); return to_next != TABLE_NULL ? table->buckets + to_next : 0; } /* */ /** `TABLE_INVERSE` is injective, so in that case, we only compare hashes. @return `a` and `b`. */ static int PN_(equal_buckets)(PN_(ckey) a, PN_(ckey) b) { #ifdef TABLE_INVERSE return (void)a, (void)b, 1; #else return PN_(equal)(a, b); #endif } /** `table` will be searched linearly for `key` which has `hash`. @fixme Move to front like splay trees? */ static struct PN_(bucket) *PN_(query)(struct N_(table) *const table, PN_(ckey) key, const PN_(uint) hash) { struct PN_(bucket) *bucket1; PN_(uint) head, b0 = TABLE_NULL, b1, b2; assert(table && table->buckets && table->log_capacity); bucket1 = table->buckets + (head = b1 = PN_(to_bucket)(table, hash)); /* Not the start of a bucket: empty or in the collision stack. */ if((b2 = bucket1->next) == TABLE_NULL || PN_(in_stack_range)(table, b1) && b1 != PN_(to_bucket)(table, bucket1->hash)) return 0; while(hash != bucket1->hash || !PN_(equal_buckets)(key, PN_(bucket_key)(bucket1))) { if(b2 == TABLE_END) return 0; bucket1 = table->buckets + (b0 = b1, b1 = b2); assert(b1 < PN_(capacity)(table) && PN_(in_stack_range)(table, b1) && b1 != TABLE_NULL); b2 = bucket1->next; } #ifdef TABLE_DONT_SPLAY /* */ } /** Ensures that `table` has enough buckets to fill `n` more than the size. May invalidate and re-arrange the order. @return Success; otherwise, `errno` will be set. @throws[realloc] @throws[ERANGE] Tried allocating more then can fit in half uint> or `realloc` doesn't follow [POSIX ](https://pubs.opengroup.org/onlinepubs/009695399/functions/realloc.html). */ static int PN_(buffer)(struct N_(table) *const table, const PN_(uint) n) { struct PN_(bucket) *buckets; const PN_(uint) log_c0 = table->log_capacity, c0 = log_c0 ? (PN_(uint))((PN_(uint))1 << log_c0) : 0; PN_(uint) log_c1, c1, size1, i, wait, mask; assert(table && table->size <= TABLE_HIGH && (!table->buckets && !table->size && !log_c0 && !c0 || table->buckets && table->size <= c0 && log_c0>=3)); /* Can we satisfy `n` growth from the buffer? */ if(TABLE_M1 - table->size < n || TABLE_HIGH < (size1 = table->size + n)) return errno = ERANGE, 0; if(table->buckets) log_c1 = log_c0, c1 = c0 ? c0 : 1; else log_c1 = 3, c1 = 8; while(c1 < size1) log_c1++, c1 <<= 1; if(log_c0 == log_c1) return 1; /* Otherwise, need to allocate more. */ if(!(buckets = realloc(table->buckets, sizeof *buckets * c1))) { if(!errno) errno = ERANGE; return 0; } table->top = (c1 - 1) | TABLE_HIGH; /* No stack. */ table->buckets = buckets, table->log_capacity = log_c1; /* Initialize new values. Mask to identify the added bits. */ { struct PN_(bucket) *e = buckets + c0, *const e_end = buckets + c1; for( ; e < e_end; e++) e->next = TABLE_NULL; } mask = (PN_(uint))((((PN_(uint))1 << log_c0) - 1) ^ (((PN_(uint))1 << log_c1) - 1)); /* Rehash most closed buckets in the lower half. Create waiting linked-stack by borrowing next. */ wait = TABLE_END; for(i = 0; i < c0; i++) { struct PN_(bucket) *idx, *go; PN_(uint) g, hash; idx = table->buckets + i; if(idx->next == TABLE_NULL) continue; g = PN_(to_bucket)(table, hash = idx->hash); /* It's a power-of-two size, so, like consistent hashing, `E[old/new]` capacity that a closed bucket will remain where it is. */ if(i == g) { idx->next = TABLE_END; continue; } if((go = table->buckets + g)->next == TABLE_NULL) { /* Priority is given to the first closed bucket; simpler later. */ struct PN_(bucket) *head; PN_(uint) h = g & ~mask; assert(h <= g); if(h < g && i < h && (head = table->buckets + h, assert(head->next != TABLE_NULL), PN_(to_bucket)(table, head->hash) == g)) { memcpy(go, head, sizeof *head); go->next = TABLE_END, head->next = TABLE_NULL; /* Fall-though -- the bucket still needs to be put on wait. */ } else { /* If the new bucket is available and this bucket is first. */ memcpy(go, idx, sizeof *idx); go->next = TABLE_END, idx->next = TABLE_NULL; continue; } } idx->next = wait, wait = i; /* Push for next sweep. */ } /* Search waiting stack for buckets that moved concurrently. */ { PN_(uint) prev = TABLE_END, w = wait; while(w != TABLE_END) { struct PN_(bucket) *waiting = table->buckets + w; PN_(uint) cl = PN_(to_bucket)(table, waiting->hash); struct PN_(bucket) *const closed = table->buckets + cl; assert(cl != w); if(closed->next == TABLE_NULL) { memcpy(closed, waiting, sizeof *waiting), closed->next = TABLE_END; if(prev != TABLE_END) table->buckets[prev].next = waiting->next; if(wait == w) wait = waiting->next; /* First, modify head. */ w = waiting->next, waiting->next = TABLE_NULL; } else { assert(closed->next == TABLE_END); /* Not in the wait stack. */ prev = w, w = waiting->next; } }} /* Rebuild the top stack at the high numbers from the waiting at low. */ while(wait != TABLE_END) { struct PN_(bucket) *const waiting = table->buckets + wait; PN_(uint) h = PN_(to_bucket)(table, waiting->hash); struct PN_(bucket) *const head = table->buckets + h; struct PN_(bucket) *top; assert(h != wait && head->next != TABLE_NULL); PN_(grow_stack)(table), top = table->buckets + table->top; memcpy(top, waiting, sizeof *waiting); top->next = head->next, head->next = table->top; wait = waiting->next, waiting->next = TABLE_NULL; /* Pop. */ } return 1; } /** Replace the `key` and `hash` of `bucket`. Don't touch next. */ static void PN_(replace_key)(struct PN_(bucket) *const bucket, const PN_(key) key, const PN_(uint) hash) { (void)key; bucket->hash = hash; #ifndef TABLE_INVERSE memcpy(&bucket->key, &key, sizeof key); #endif } /** Replace the entire `entry` and `hash` of `bucket`. Don't touch next. */ static void PN_(replace_entry)(struct PN_(bucket) *const bucket, const PN_(entry) entry, const PN_(uint) hash) { PN_(replace_key)(bucket, PN_(entry_key)(entry), hash); #ifdef TABLE_VALUE memcpy(&bucket->value, &entry.value, sizeof(entry.value)); #endif } /** Evicts the spot where `hash` goes in `table`. This results in a space in the table. */ static struct PN_(bucket) *PN_(evict)(struct N_(table) *const table, const PN_(uint) hash) { PN_(uint) i; struct PN_(bucket) *bucket; if(!PN_(buffer)(table, 1)) return 0; /* Amortized. */ bucket = table->buckets + (i = PN_(to_bucket)(table, hash)); /* Closed. */ if(bucket->next != TABLE_NULL) { /* Occupied. */ int in_stack = PN_(to_bucket)(table, bucket->hash) != i; PN_(move_to_top)(table, i); bucket->next = in_stack ? TABLE_END : table->top; } else { /* Unoccupied. */ bucket->next = TABLE_END; } table->size++; return bucket; } /** Put `entry` in `table`. For collisions, only if `policy` exists and returns true do and displace it to `eject`, if non-null. @return A . @throws[malloc] @order Amortized \O(max bucket length); the key to another bucket may have to be moved to the top; the table might be full and have to be resized. */ static enum table_result PN_(put)(struct N_(table) *const table, PN_(entry) entry, PN_(entry) *eject, const PN_(policy_fn) policy) { struct PN_(bucket) *bucket; const PN_(key) key = PN_(entry_key)(entry); const PN_(uint) hash = PN_(hash)(key); enum table_result result; assert(table); if(table->buckets && (bucket = PN_(query)(table, key, hash))) { if(!policy || !policy(PN_(bucket_key)(bucket), key)) return TABLE_YIELD; if(eject) PN_(to_entry)(bucket, eject); result = TABLE_REPLACE; } else { if(!(bucket = PN_(evict)(table, hash))) return TABLE_ERROR; result = TABLE_UNIQUE; } PN_(replace_entry)(bucket, entry, hash); return result; } #ifdef TABLE_VALUE /* */ /** Initialises `table` to idle. @order \Theta(1) @allow */ static void N_(table)(struct N_(table) *const table) { assert(table); table->buckets = 0; table->log_capacity = 0; table->size = 0; table->top = 0; } /** Destroys `table` and returns it to idle. @allow */ static void N_(table_)(struct N_(table) *const table) { assert(table), free(table->buckets); N_(table)(table); } /** Reserve at least `n` more empty buckets in `table`. This may cause the capacity to increase and invalidates any pointers to data in the table. @return Success. @throws[ERANGE] The request was unsatisfiable. @throws[realloc] @allow */ static int N_(table_buffer)(struct N_(table) *const table, const PN_(uint) n) { return assert(table), PN_(buffer)(table, n); } /** Clears and removes all buckets from `table`. The capacity and memory of the `table` is preserved, but all previous values are un-associated. (The load factor will be less until it reaches it's previous size.) @order \Theta(`table.capacity`) @allow */ static void N_(table_clear)(struct N_(table) *const table) { struct PN_(bucket) *b, *b_end; assert(table); if(!table->buckets) { assert(!table->log_capacity); return; } assert(table->log_capacity); for(b = table->buckets, b_end = b + PN_(capacity)(table); b < b_end; b++) b->next = TABLE_NULL; table->size = 0; table->top = (PN_(capacity)(table) - 1) & TABLE_HIGH; } /** @return Whether `key` is in `table` (which can be null.) @allow */ static int N_(table_is)(struct N_(table) *const table, const PN_(key) key) { return table && table->buckets ? !!PN_(query)(table, key, PN_(hash)(key)) : 0; } /** @param[result] If null, behaves like table_is>, otherwise, a entry> which gets filled on true. @return Whether `key` is in `table` (which can be null.) @allow */ static int N_(table_query)(struct N_(table) *const table, const PN_(key) key, PN_(entry) *const result) { struct PN_(bucket) *bucket; if(!table || !table->buckets || !(bucket = PN_(query)(table, key, PN_(hash)(key)))) return 0; if(result) PN_(to_entry)(bucket, result); return 1; } /** @return The value associated with `key` in `table`, (which can be null.) If no such value exists, `default_value` is returned. @order Average \O(1); worst \O(n). @allow */ static PN_(value) N_(table_get_or)(struct N_(table) *const table, const PN_(key) key, PN_(value) default_value) { struct PN_(bucket) *bucket; return table && table->buckets && (bucket = PN_(query)(table, key, PN_(hash)(key))) ? PN_(bucket_value)(bucket) : default_value; } /** Puts `entry` in `table` only if absent. @return One of: `TABLE_ERROR`, the table is not modified; `TABLE_YIELD`, not modified if there is another entry with the same key; `TABLE_UNIQUE`, put an entry in the table. @throws[realloc, ERANGE] On `TABLE_ERROR`. @order Average amortised \O(1); worst \O(n). @allow */ static enum table_result N_(table_try)(struct N_(table) *const table, PN_(entry) entry) { return PN_(put)(table, entry, 0, 0); } /** Callback in table_replace>. @return `original` and `replace` ignored, true. @implements policy_fn> */ static int PN_(always_replace)(const PN_(key) original, const PN_(key) replace) { return (void)original, (void)replace, 1; } /** Puts `entry` in `table`. @return One of: `TABLE_ERROR`, the table is not modified; `TABLE_REPLACE`, the `entry` is put if the table, and, if non-null, `eject` will be filled; `TABLE_UNIQUE`, on a unique entry. @throws[realloc, ERANGE] On `TABLE_ERROR`. @order Average amortised \O(1); worst \O(n). @allow */ static enum table_result N_(table_replace)(struct N_(table) *const table, PN_(entry) entry, PN_(entry) *eject) { return PN_(put)(table, entry, eject, &PN_(always_replace)); } /** Puts `entry` in `table` only if absent or if calling `policy` returns true. @return One of: `TABLE_ERROR`, the table is not modified; `TABLE_REPLACE`, if `update` is non-null and returns true, if non-null, `eject` will be filled; `TABLE_YIELD`, if `update` is null or false; `TABLE_UNIQUE`, on unique entry. @throws[realloc, ERANGE] On `TABLE_ERROR`. @order Average amortised \O(1); worst \O(n). @allow */ static enum table_result N_(table_update)(struct N_(table) *const table, PN_(entry) entry, PN_(entry) *eject, const PN_(policy_fn) policy) { return PN_(put)(table, entry, eject, policy); } #ifdef TABLE_VALUE /* */ /** Removes `key` from `table` (which could be null.) @return Whether that `key` was in `table`. @order Average \O(1), (hash distributes elements uniformly); worst \O(n). @allow */ static int N_(table_remove)(struct N_(table) *const table, const PN_(key) key) { struct PN_(bucket) *current; PN_(uint) crnt, prv = TABLE_NULL, nxt, hash = PN_(hash)(key); if(!table || !table->size) return 0; assert(table->buckets); /* Find item and keep track of previous. */ current = table->buckets + (crnt = PN_(to_bucket)(table, hash)); if((nxt = current->next) == TABLE_NULL || PN_(in_stack_range)(table, crnt) && crnt != PN_(to_bucket)(table, current->hash)) return 0; while(hash != current->hash && !PN_(equal_buckets)(key, PN_(bucket_key)(current))) { if(nxt == TABLE_END) return 0; prv = crnt, current = table->buckets + (crnt = nxt); assert(crnt < PN_(capacity)(table) && PN_(in_stack_range)(table, crnt) && crnt != TABLE_NULL); nxt = current->next; } if(prv != TABLE_NULL) { /* Open entry. */ struct PN_(bucket) *previous = table->buckets + prv; previous->next = current->next; } else if(current->next != TABLE_END) { /* Head closed entry and others. */ struct PN_(bucket) *const second = table->buckets + (crnt = current->next); assert(current->next < PN_(capacity)(table)); memcpy(current, second, sizeof *second); current = second; } current->next = TABLE_NULL, table->size--, PN_(shrink_stack)(table, crnt); return 1; } /* */ /** ![States](../web/it.png) Adding, deleting, successfully looking up entries, or any modification of the table's topology invalidates the iterator. Iteration usually not in any particular order. The asymptotic runtime of iterating though the whole table is proportional to the capacity. */ struct N_(table_iterator); struct N_(table_iterator) { struct PN_(iterator) it; struct N_(table) *modify; union { PN_(uint) prev; void *do_not_warn; } _; }; /** Loads `table` (can be null) into `it`. @allow */ static void N_(table_begin)(struct N_(table_iterator) *const it, struct N_(table) *const table) { PN_(begin)(&it->it, table); /* Stupid: I want to have table_iterator_remove>; so I need a non-constant value. The value in to string is constant. */ it->modify = table; it->_.prev = TABLE_NULL; } /** Advances `prev` to keep up with `b` in `it`. (Stupid: changing back to offset.) */ static void PN_(advance)(struct N_(table_iterator) *const it, const struct PN_(bucket) *const b) { it->_.prev = (PN_(uint))(b - it->it.table->buckets); } /** Advances `it`. @param[entry] If non-null, the entry is filled with the next element only if it has a next. @return Whether it had a next element. @allow */ static int N_(table_next)(struct N_(table_iterator) *const it, PN_(entry) *entry) { const struct PN_(bucket) *b = PN_(next)(&it->it); return b ? (PN_(advance)(it, b), PN_(to_entry)(b, entry), 1) : 0; } /** Especially for tables that can have zero as a valid value, this is used to differentiate between zero and null. @return Whether the table specified to `it` in table_begin> has a next element. @order Amortized on the capacity, \O(1). @allow */ static int N_(table_has_next)(struct N_(table_iterator) *const it) { assert(it); return it->it.table && it->it.table->buckets && PN_(skip)(&it->it); } #ifdef TABLE_VALUE /* */ /** Removes the entry at `it`. Whereas table_remove> invalidates the iterator, this corrects for a signal `it`. @return Success, or there was no entry at the iterator's position, (anymore.) @allow */ static int N_(table_iterator_remove)(struct N_(table_iterator) *const it) { struct N_(table) *table; PN_(uint) b = it->_.prev; struct PN_(bucket) *previous = 0, *current; PN_(uint) prv = TABLE_NULL, crnt; assert(it); if(b == TABLE_NULL) return 0; table = it->modify; assert(table && table == it->it.table && table->buckets && b < PN_(capacity)(table)); /* Egregious code reuse. :[ */ current = table->buckets + b, assert(current->next != TABLE_NULL); crnt = PN_(to_bucket)(table, current->hash); while(crnt != b) assert(crnt < PN_(capacity)(table)), crnt = (previous = table->buckets + (prv = crnt))->next; if(prv != TABLE_NULL) { /* Open entry. */ previous->next = current->next; } else if(current->next != TABLE_END) { /* Head closed entry and others. */ const PN_(uint) scnd = current->next; struct PN_(bucket) *const second = table->buckets + scnd; assert(scnd < PN_(capacity)(table)); memcpy(current, second, sizeof *second); if(crnt < scnd) it->it._.b = it->_.prev; /* Iterate new entry. */ crnt = scnd; current = second; } current->next = TABLE_NULL, table->size--, PN_(shrink_stack)(table, crnt); it->_.prev = TABLE_NULL; return 1; } /* */ static void PN_(unused_base_coda)(void); static void PN_(unused_base)(void) { PN_(entry) e; PN_(key) k; PN_(value) v; memset(&e, 0, sizeof e); memset(&k, 0, sizeof k); memset(&v, 0, sizeof v); N_(table)(0); N_(table_)(0); N_(table_buffer)(0, 0); N_(table_clear)(0); N_(table_is)(0, k); N_(table_query)(0, k, 0); N_(table_get_or)(0, k, v); N_(table_try)(0, e); N_(table_replace)(0, e, 0); N_(table_update)(0,e,0,0); N_(table_remove)(0, 0); N_(table_begin)(0, 0); N_(table_next)(0, 0); N_(table_has_next)(0); N_(table_iterator_remove)(0);PN_(unused_base_coda)(); #ifdef TABLE_VALUE N_(table_compute)(0, k, 0); N_(table_next_key)(0); N_(table_next_value)(0); #endif } static void PN_(unused_base_coda)(void) { PN_(unused_base)(); } #elif defined(TABLE_DEFAULT) /* base --> */ #undef TSZ_ #undef SZ_ #undef TABLE_TO_STRING #ifdef TABLE_TO_STRING_NAME #undef TABLE_TO_STRING_NAME #endif #endif /* traits --> */ #ifdef TABLE_EXPECT_TRAIT /* */ #endif /* !trait --> */ #undef TABLE_DEFAULT_TRAIT #undef TABLE_TO_STRING_TRAIT #undef TABLE_TRAITS