26 rv = alloc_arena_next (h);
27 alloc_arena_next (h) += nbytes;
29 if (alloc_arena_next (h) > alloc_arena_size (h))
32 return (
void *) (
uword) (rv + alloc_arena (h));
35 void BV (clib_bihash_instantiate) (
BVT (clib_bihash) *
h)
40 alloc_arena_next (h) = 0;
41 alloc_arena_size (h) = h->memory_size;
43 bucket_size = h->nbuckets *
sizeof (h->buckets[0]);
49 void BV (clib_bihash_init2) (
BVT (clib_bihash_init2_args) *
a)
53 BVT (clib_bihash) * h = a->h;
55 a->nbuckets = 1 << (
max_log2 (a->nbuckets));
57 h->name = (
u8 *) a->name;
58 h->nbuckets = a->nbuckets;
59 h->log2_nbuckets =
max_log2 (a->nbuckets);
60 h->memory_size = a->memory_size;
62 h->fmt_fn = a->fmt_fn;
72 ASSERT (h->memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
75 if (a->dont_add_to_all_bihash_list == 0)
97 if (a->instantiate_immediately)
98 BV (clib_bihash_instantiate) (
h);
104 BVT (clib_bihash_init2_args) _a, *a = &_a;
106 memset (a, 0,
sizeof (*a));
110 a->nbuckets = nbuckets;
113 BV (clib_bihash_init2) (
a);
117 #if !defined (MFD_ALLOW_SEALING) 118 #define MFD_ALLOW_SEALING 0x0002U 121 void BV (clib_bihash_master_init_svm)
129 ASSERT (memory_size < (1ULL << 32));
137 if (ftruncate (fd, memory_size) < 0)
147 mmap_addr = mmap (0, memory_size,
148 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
150 if (mmap_addr == MAP_FAILED)
156 h->sh = (
void *) mmap_addr;
158 nbuckets = 1 << (
max_log2 (nbuckets));
160 h->name = (
u8 *) name;
161 h->sh->nbuckets = h->nbuckets = nbuckets;
162 h->log2_nbuckets =
max_log2 (nbuckets);
164 alloc_arena (h) = (
u64) (
uword) mmap_addr;
168 bucket_size = nbuckets *
sizeof (h->buckets[0]);
173 h->alloc_lock[0] = 0;
175 h->sh->alloc_lock_as_u64 =
180 BIHASH_FREELIST_LENGTH *
sizeof (
u64));
181 freelist_vh->
len = BIHASH_FREELIST_LENGTH;
183 h->sh->freelists_as_u64 =
185 h->freelists = (
void *) (freelist_vh->
vector_data);
190 void BV (clib_bihash_slave_init_svm)
191 (
BVT (clib_bihash) *
h,
char *
name,
int fd)
195 BVT (clib_bihash_shared_header) * sh;
198 mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 );
199 if (mmap_addr == MAP_FAILED)
205 sh = (
BVT (clib_bihash_shared_header) *) mmap_addr;
207 memory_size = sh->alloc_arena_size;
209 munmap (mmap_addr, 4096);
212 mmap_addr = mmap (0, memory_size,
213 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
215 if (mmap_addr == MAP_FAILED)
223 h->sh = (
void *) mmap_addr;
224 alloc_arena (h) = (
u64) (
uword) mmap_addr;
227 h->name = (
u8 *) name;
229 h->nbuckets = h->sh->nbuckets;
230 h->log2_nbuckets =
max_log2 (h->nbuckets);
238 void BV (clib_bihash_set_kvp_format_fn) (
BVT (clib_bihash) *
h,
249 goto never_initialized;
254 #if BIHASH_32_64_SVM == 0 258 (void) close (h->memfd);
271 clib_warning (
"Couldn't find hash table %llx on clib_all_bihashes...",
281 ASSERT (h->alloc_lock[0]);
294 h->freelists[
log2_pages] = rv->next_free_as_u64;
311 ASSERT (h->alloc_lock[0]);
326 BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
329 int log2_working_copy_length;
331 ASSERT (h->alloc_lock[0]);
333 if (thread_index >=
vec_len (h->working_copies))
344 working_copy = h->working_copies[thread_index];
345 log2_working_copy_length = h->working_copy_lengths[thread_index];
347 h->saved_bucket.as_u64 = b->as_u64;
349 if (b->log2_pages > log2_working_copy_length)
357 (
h,
sizeof (working_copy[0]) * (1 << b->log2_pages));
358 h->working_copy_lengths[thread_index] = b->log2_pages;
359 h->working_copies[thread_index] = working_copy;
361 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_working_copy_lost,
362 1ULL << b->log2_pages);
368 working_bucket.as_u64 = b->as_u64;
371 b->as_u64 = working_bucket.as_u64;
372 h->working_copies[thread_index] = working_copy;
378 (
BVT (clib_bihash) *
h,
383 int i, j, length_in_kvs;
385 ASSERT (h->alloc_lock[0]);
387 new_values = BV (value_alloc) (
h, new_log2_pages);
388 length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
390 for (i = 0; i < length_in_kvs; i++)
395 if (BV (clib_bihash_is_free) (&(old_values->kvp[
i])))
399 new_hash = BV (clib_bihash_hash) (&(old_values->kvp[
i]));
400 new_hash >>= h->log2_nbuckets;
401 new_hash &= (1 << new_log2_pages) - 1;
402 new_v = &new_values[new_hash];
408 if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
411 sizeof (new_v->kvp[j]));
416 BV (value_free) (
h, new_values, new_log2_pages);
427 (
BVT (clib_bihash) *
h,
432 int i, j, new_length, old_length;
434 ASSERT (h->alloc_lock[0]);
436 new_values = BV (value_alloc) (
h, new_log2_pages);
437 new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
438 old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
442 for (i = 0; i < old_length; i++)
445 for (; j < new_length; j++)
448 if (BV (clib_bihash_is_free) (&(old_values->kvp[
i])))
452 if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
456 sizeof (new_values->kvp[j]));
463 BV (value_free) (
h, new_values, new_log2_pages);
471 static inline int BV (clib_bihash_add_del_inline)
472 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
int is_add,
473 int (*is_stale_cb) (
BVT (clib_bihash_kv) *,
void *),
void *arg)
476 BVT (clib_bihash_bucket) * b, tmp_b;
480 u32 new_log2_pages, old_log2_pages;
482 int mark_bucket_linear;
494 BV (clib_bihash_alloc_lock) (
h);
495 if (h->instantiated == 0)
496 BV (clib_bihash_instantiate) (
h);
497 BV (clib_bihash_alloc_unlock) (
h);
500 hash = BV (clib_bihash_hash) (add_v);
502 bucket_index = hash & (h->nbuckets - 1);
503 b = &h->buckets[bucket_index];
505 hash >>= h->log2_nbuckets;
507 BV (clib_bihash_lock_bucket) (b);
510 if (BV (clib_bihash_bucket_is_empty) (b))
514 BV (clib_bihash_unlock_bucket) (b);
518 BV (clib_bihash_alloc_lock) (
h);
519 v = BV (value_alloc) (
h, 0);
520 BV (clib_bihash_alloc_unlock) (
h);
528 b->as_u64 = tmp_b.as_u64;
529 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_alloc_add, 1);
538 v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
539 if (b->linear_search)
540 limit <<= b->log2_pages;
555 for (i = 0; i < limit; i++)
557 if (BV (clib_bihash_key_compare) (v->kvp[
i].key, add_v->key))
562 BV (clib_bihash_unlock_bucket) (b);
568 BV (clib_bihash_unlock_bucket) (b);
569 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_replace, 1);
576 for (i = 0; i < limit; i++)
578 if (BV (clib_bihash_is_free) (&(v->kvp[
i])))
585 &add_v->value, sizeof (add_v->value));
588 sizeof (add_v->key));
591 BV (clib_bihash_unlock_bucket) (b);
592 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_add, 1);
599 for (i = 0; i < limit; i++)
601 if (is_stale_cb (&(v->kvp[i]), arg))
605 BV (clib_bihash_unlock_bucket) (b);
606 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_replace, 1);
615 for (i = 0; i < limit; i++)
618 if (BV (clib_bihash_key_compare) (v->kvp[
i].key, add_v->key))
620 clib_memset (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
625 BV (clib_bihash_unlock_bucket) (b);
626 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_del, 1);
632 tmp_b.as_u64 = b->as_u64;
639 BV (clib_bihash_alloc_lock) (
h);
642 BV (value_free) (
h, v, tmp_b.log2_pages);
643 BV (clib_bihash_alloc_unlock) (
h);
644 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_del_free,
651 BV (clib_bihash_unlock_bucket) (b);
656 BV (clib_bihash_alloc_lock) (
h);
661 old_log2_pages = h->saved_bucket.log2_pages;
662 new_log2_pages = old_log2_pages + 1;
663 mark_bucket_linear = 0;
664 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_split_add, 1);
665 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_splits, old_log2_pages);
667 working_copy = h->working_copies[thread_index];
669 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_splits, 1);
689 mark_bucket_linear = 1;
690 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_linear, 1);
692 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_resplit, 1);
693 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_splits,
699 new_hash = BV (clib_bihash_hash) (add_v);
701 if (mark_bucket_linear)
702 limit <<= new_log2_pages;
703 new_hash >>= h->log2_nbuckets;
704 new_hash &= (1 << new_log2_pages) - 1;
705 new_v += mark_bucket_linear ? 0 : new_hash;
707 for (i = 0; i < limit; i++)
709 if (BV (clib_bihash_is_free) (&(new_v->kvp[
i])))
717 BV (value_free) (
h, save_new_v, new_log2_pages);
728 tmp_b.log2_pages = new_log2_pages;
730 tmp_b.linear_search = mark_bucket_linear;
731 tmp_b.refcnt = h->saved_bucket.refcnt + 1;
732 ASSERT (tmp_b.refcnt > 0);
735 b->as_u64 = tmp_b.as_u64;
738 BV (value_free) (
h, v, h->saved_bucket.log2_pages);
739 BV (clib_bihash_alloc_unlock) (
h);
744 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
int is_add)
746 return BV (clib_bihash_add_del_inline) (
h, add_v, is_add, 0, 0);
749 int BV (clib_bihash_add_or_overwrite_stale)
750 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
751 int (*stale_callback) (
BVT (clib_bihash_kv) *,
void *),
void *arg)
753 return BV (clib_bihash_add_del_inline) (
h, add_v, 1, stale_callback, arg);
756 int BV (clib_bihash_search)
757 (
BVT (clib_bihash) *
h,
758 BVT (clib_bihash_kv) * search_key,
BVT (clib_bihash_kv) * valuep)
763 BVT (clib_bihash_bucket) * b;
771 hash = BV (clib_bihash_hash) (search_key);
773 bucket_index = hash & (h->nbuckets - 1);
774 b = &h->buckets[bucket_index];
776 if (BV (clib_bihash_bucket_is_empty) (b))
781 volatile BVT (clib_bihash_bucket) * bv = b;
786 hash >>= h->log2_nbuckets;
790 v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
792 limit <<= b->log2_pages;
794 for (i = 0; i < limit; i++)
796 if (BV (clib_bihash_key_compare) (v->kvp[
i].key, search_key->key))
805 u8 *BV (format_bihash) (
u8 * s, va_list * args)
807 BVT (clib_bihash) * h = va_arg (*args,
BVT (clib_bihash) *);
808 int verbose = va_arg (*args,
int);
809 BVT (clib_bihash_bucket) * b;
812 u64 active_elements = 0;
813 u64 active_buckets = 0;
814 u64 linear_buckets = 0;
817 s =
format (s,
"Hash table %s\n", h->name ? h->name : (
u8 *)
"(unnamed)");
820 return format (s,
"[empty, uninitialized]");
822 for (i = 0; i < h->nbuckets; i++)
825 if (BV (clib_bihash_bucket_is_empty) (b))
828 s =
format (s,
"[%d]: empty\n", i);
834 if (b->linear_search)
839 s =
format (s,
"[%d]: heap offset %lld, len %d, linear %d\n", i,
840 b->offset, (1 << b->log2_pages), b->linear_search);
844 for (j = 0; j < (1 << b->log2_pages); j++)
848 if (BV (clib_bihash_is_free) (&v->kvp[k]))
851 s =
format (s,
" %d: empty\n",
852 j * BIHASH_KVP_PER_PAGE + k);
859 s =
format (s,
" %d: %U\n",
860 j * BIHASH_KVP_PER_PAGE + k,
861 h->fmt_fn, &(v->kvp[k]), verbose);
865 s =
format (s,
" %d: %U\n",
866 j * BIHASH_KVP_PER_PAGE + k,
867 BV (format_bihash_kvp), &(v->kvp[k]));
876 s =
format (s,
" %lld active elements %lld active buckets\n",
877 active_elements, active_buckets);
880 for (i = 0; i <
vec_len (h->freelists); i++)
884 u64 free_elt_as_u64 = h->freelists[
i];
886 while (free_elt_as_u64)
890 free_elt_as_u64 = free_elt->next_free_as_u64;
893 if (nfree || verbose)
894 s =
format (s,
" [len %d] %u free elts\n", 1 << i, nfree);
897 s =
format (s,
" %lld linear search buckets\n", linear_buckets);
898 used_bytes = alloc_arena_next (h);
900 " arena: base %llx, next %llx\n" 901 " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
902 alloc_arena (h), alloc_arena_next (h),
903 used_bytes, used_bytes >> 20,
904 alloc_arena_size (h), alloc_arena_size (h) >> 20);
909 (
BVT (clib_bihash) *
h,
913 BVT (clib_bihash_bucket) * b;
919 for (i = 0; i < h->nbuckets; i++)
922 if (BV (clib_bihash_bucket_is_empty) (b))
926 for (j = 0; j < (1 << b->log2_pages); j++)
930 if (BV (clib_bihash_is_free) (&v->kvp[k]))
933 if (BIHASH_WALK_STOP == cb (&v->kvp[k], arg))
938 if (BV (clib_bihash_bucket_is_empty) (b))
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
#define BIHASH_KVP_PER_PAGE
void clib_bihash_free(clib_bihash *h)
Destroy a bounded index extensible hash table.
#define clib_memcpy_fast(a, b, c)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static int memfd_create(const char *name, unsigned int flags)
void os_out_of_memory(void)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
for(i=1;i<=collision_buckets;i++)
#define MFD_ALLOW_SEALING
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
static uword clib_bihash_get_offset(clib_bihash *h, void *v)
Get clib mheap offset given a pointer.
int(* clib_bihash_foreach_key_value_pair_cb)(clib_bihash_kv *kv, void *ctx)
void clib_bihash_foreach_key_value_pair(clib_bihash *h, clib_bihash_foreach_key_value_pair_cb *callback, void *arg)
Visit active (key,value) pairs in a bi-hash table.
static vnet_classify_entry_t * split_and_rehash_linear(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
static vnet_classify_entry_t * split_and_rehash(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
#define vec_free(V)
Free vector's memory (no header).
static void * clib_mem_set_heap(void *heap)
#define clib_warning(format, args...)
static void make_working_copy(vnet_classify_table_t *t, vnet_classify_bucket_t *b)
#define vec_delete(V, N, M)
Delete N elements starting at element M.
static void clib_mem_free(void *p)
template key/value backing page structure
static void clib_mem_vm_free(void *addr, uword size)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
#define clib_unix_warning(format, args...)
static_always_inline uword os_get_thread_index(void)
static void * clib_mem_alloc_aligned(uword size, uword align)
#define CLIB_MEMORY_BARRIER()
static void * clib_bihash_get_value(clib_bihash *h, uword offset)
Get pointer to value page given its clib mheap offset.
void ** clib_all_bihashes
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
#define CLIB_CACHE_LINE_BYTES
void * clib_all_bihash_set_heap(void)
static void * clib_mem_vm_alloc(uword size)
static void * alloc_aligned(uword size, uword log2_align, void **ptr_to_free)