26 rv = alloc_arena_next (h);
27 alloc_arena_next (h) += nbytes;
29 if (rv >= alloc_arena_size (h))
32 return (
void *) (
uword) (rv + alloc_arena (h));
41 nbuckets = 1 << (
max_log2 (nbuckets));
43 h->name = (
u8 *) name;
44 h->nbuckets = nbuckets;
45 h->log2_nbuckets =
max_log2 (nbuckets);
53 ASSERT (memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
56 alloc_arena_next (h) = 0;
59 bucket_size = nbuckets *
sizeof (h->buckets[0]);
69 #if !defined (MFD_ALLOW_SEALING) 70 #define MFD_ALLOW_SEALING 0x0002U 73 void BV (clib_bihash_master_init_svm)
81 ASSERT (memory_size < (1ULL << 32));
89 if (ftruncate (fd, memory_size) < 0)
99 mmap_addr = mmap (0, memory_size,
100 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
102 if (mmap_addr == MAP_FAILED)
108 h->sh = (
void *) mmap_addr;
110 nbuckets = 1 << (
max_log2 (nbuckets));
112 h->name = (
u8 *) name;
113 h->sh->nbuckets = h->nbuckets = nbuckets;
114 h->log2_nbuckets =
max_log2 (nbuckets);
116 alloc_arena (h) = (
u64) (
uword) mmap_addr;
120 bucket_size = nbuckets *
sizeof (h->buckets[0]);
125 h->alloc_lock[0] = 0;
127 h->sh->alloc_lock_as_u64 =
132 BIHASH_FREELIST_LENGTH *
sizeof (
u64));
133 freelist_vh->
len = BIHASH_FREELIST_LENGTH;
135 h->sh->freelists_as_u64 =
137 h->freelists = (
void *) (freelist_vh->
vector_data);
142 void BV (clib_bihash_slave_init_svm)
143 (
BVT (clib_bihash) *
h,
char *
name,
int fd)
147 BVT (clib_bihash_shared_header) * sh;
150 mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 );
151 if (mmap_addr == MAP_FAILED)
157 sh = (
BVT (clib_bihash_shared_header) *) mmap_addr;
159 memory_size = sh->alloc_arena_size;
161 munmap (mmap_addr, 4096);
164 mmap_addr = mmap (0, memory_size,
165 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
167 if (mmap_addr == MAP_FAILED)
175 h->sh = (
void *) mmap_addr;
176 alloc_arena (h) = (
u64) (
uword) mmap_addr;
179 h->name = (
u8 *) name;
181 h->nbuckets = h->sh->nbuckets;
182 h->log2_nbuckets =
max_log2 (h->nbuckets);
190 void BV (clib_bihash_set_kvp_format_fn) (
BVT (clib_bihash) *
h,
199 #if BIHASH_32_64_SVM == 0 203 (void) close (h->memfd);
215 ASSERT (h->alloc_lock[0]);
228 h->freelists[
log2_pages] = rv->next_free_as_u64;
245 ASSERT (h->alloc_lock[0]);
260 BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
263 int log2_working_copy_length;
265 ASSERT (h->alloc_lock[0]);
267 if (thread_index >=
vec_len (h->working_copies))
278 working_copy = h->working_copies[thread_index];
279 log2_working_copy_length = h->working_copy_lengths[thread_index];
281 h->saved_bucket.as_u64 = b->as_u64;
283 if (b->log2_pages > log2_working_copy_length)
291 (
h,
sizeof (working_copy[0]) * (1 << b->log2_pages));
292 h->working_copy_lengths[thread_index] = b->log2_pages;
293 h->working_copies[thread_index] = working_copy;
299 working_bucket.as_u64 = b->as_u64;
302 b->as_u64 = working_bucket.as_u64;
303 h->working_copies[thread_index] = working_copy;
309 (
BVT (clib_bihash) *
h,
314 int i, j, length_in_kvs;
316 ASSERT (h->alloc_lock[0]);
318 new_values = BV (value_alloc) (
h, new_log2_pages);
319 length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
321 for (i = 0; i < length_in_kvs; i++)
326 if (BV (clib_bihash_is_free) (&(old_values->kvp[
i])))
330 new_hash = BV (clib_bihash_hash) (&(old_values->kvp[
i]));
331 new_hash >>= h->log2_nbuckets;
332 new_hash &= (1 << new_log2_pages) - 1;
333 new_v = &new_values[new_hash];
339 if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
342 sizeof (new_v->kvp[j]));
347 BV (value_free) (
h, new_values, new_log2_pages);
358 (
BVT (clib_bihash) *
h,
363 int i, j, new_length, old_length;
365 ASSERT (h->alloc_lock[0]);
367 new_values = BV (value_alloc) (
h, new_log2_pages);
368 new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
369 old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
373 for (i = 0; i < old_length; i++)
376 for (; j < new_length; j++)
379 if (BV (clib_bihash_is_free) (&(old_values->kvp[
i])))
383 if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
387 sizeof (new_values->kvp[j]));
394 BV (value_free) (
h, new_values, new_log2_pages);
402 static inline int BV (clib_bihash_add_del_inline)
403 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
int is_add,
404 int (*is_stale_cb) (
BVT (clib_bihash_kv) *,
void *),
void *arg)
407 BVT (clib_bihash_bucket) * b, tmp_b;
411 u32 new_log2_pages, old_log2_pages;
413 int mark_bucket_linear;
416 hash = BV (clib_bihash_hash) (add_v);
418 bucket_index = hash & (h->nbuckets - 1);
419 b = &h->buckets[bucket_index];
421 hash >>= h->log2_nbuckets;
423 BV (clib_bihash_lock_bucket) (b);
426 if (BV (clib_bihash_bucket_is_empty) (b))
430 BV (clib_bihash_unlock_bucket) (b);
434 BV (clib_bihash_alloc_lock) (
h);
435 v = BV (value_alloc) (
h, 0);
436 BV (clib_bihash_alloc_unlock) (
h);
444 b->as_u64 = tmp_b.as_u64;
445 BV (clib_bihash_unlock_bucket) (b);
453 v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
454 if (b->linear_search)
455 limit <<= b->log2_pages;
470 for (i = 0; i < limit; i++)
472 if (!memcmp (&(v->kvp[i]), &add_v->key,
sizeof (add_v->key)))
476 BV (clib_bihash_unlock_bucket) (b);
483 for (i = 0; i < limit; i++)
485 if (BV (clib_bihash_is_free) (&(v->kvp[
i])))
492 &add_v->value, sizeof (add_v->value));
495 sizeof (add_v->key));
498 BV (clib_bihash_unlock_bucket) (b);
505 for (i = 0; i < limit; i++)
507 if (is_stale_cb (&(v->kvp[i]), arg))
511 BV (clib_bihash_unlock_bucket) (b);
520 for (i = 0; i < limit; i++)
523 if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
525 clib_memset (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
530 BV (clib_bihash_unlock_bucket) (b);
536 tmp_b.as_u64 = b->as_u64;
543 BV (clib_bihash_alloc_lock) (
h);
546 BV (value_free) (
h, v, tmp_b.log2_pages);
547 BV (clib_bihash_alloc_unlock) (
h);
553 BV (clib_bihash_unlock_bucket) (b);
558 BV (clib_bihash_alloc_lock) (
h);
563 old_log2_pages = h->saved_bucket.log2_pages;
564 new_log2_pages = old_log2_pages + 1;
565 mark_bucket_linear = 0;
567 working_copy = h->working_copies[thread_index];
588 mark_bucket_linear = 1;
594 new_hash = BV (clib_bihash_hash) (add_v);
596 if (mark_bucket_linear)
597 limit <<= new_log2_pages;
598 new_hash >>= h->log2_nbuckets;
599 new_hash &= (1 << new_log2_pages) - 1;
600 new_v += mark_bucket_linear ? 0 : new_hash;
602 for (i = 0; i < limit; i++)
604 if (BV (clib_bihash_is_free) (&(new_v->kvp[
i])))
612 BV (value_free) (
h, save_new_v, new_log2_pages);
623 tmp_b.log2_pages = new_log2_pages;
625 tmp_b.linear_search = mark_bucket_linear;
626 tmp_b.refcnt = h->saved_bucket.refcnt + 1;
627 ASSERT (tmp_b.refcnt > 0);
630 b->as_u64 = tmp_b.as_u64;
633 BV (value_free) (
h, v, h->saved_bucket.log2_pages);
634 BV (clib_bihash_alloc_unlock) (
h);
639 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
int is_add)
641 return BV (clib_bihash_add_del_inline) (
h, add_v, is_add, 0, 0);
644 int BV (clib_bihash_add_or_overwrite_stale)
645 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
646 int (*stale_callback) (
BVT (clib_bihash_kv) *,
void *),
void *arg)
648 return BV (clib_bihash_add_del_inline) (
h, add_v, 1, stale_callback, arg);
651 int BV (clib_bihash_search)
652 (
BVT (clib_bihash) *
h,
653 BVT (clib_bihash_kv) * search_key,
BVT (clib_bihash_kv) * valuep)
658 BVT (clib_bihash_bucket) * b;
663 hash = BV (clib_bihash_hash) (search_key);
665 bucket_index = hash & (h->nbuckets - 1);
666 b = &h->buckets[bucket_index];
668 if (BV (clib_bihash_bucket_is_empty) (b))
673 volatile BVT (clib_bihash_bucket) * bv = b;
678 hash >>= h->log2_nbuckets;
682 v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
684 limit <<= b->log2_pages;
686 for (i = 0; i < limit; i++)
688 if (BV (clib_bihash_key_compare) (v->kvp[
i].key, search_key->key))
697 u8 *BV (format_bihash) (
u8 * s, va_list * args)
699 BVT (clib_bihash) * h = va_arg (*args,
BVT (clib_bihash) *);
700 int verbose = va_arg (*args,
int);
701 BVT (clib_bihash_bucket) * b;
704 u64 active_elements = 0;
705 u64 active_buckets = 0;
706 u64 linear_buckets = 0;
709 s =
format (s,
"Hash table %s\n", h->name ? h->name : (
u8 *)
"(unnamed)");
711 for (i = 0; i < h->nbuckets; i++)
714 if (BV (clib_bihash_bucket_is_empty) (b))
717 s =
format (s,
"[%d]: empty\n", i);
723 if (b->linear_search)
728 s =
format (s,
"[%d]: heap offset %lld, len %d, linear %d\n", i,
729 b->offset, (1 << b->log2_pages), b->linear_search);
733 for (j = 0; j < (1 << b->log2_pages); j++)
737 if (BV (clib_bihash_is_free) (&v->kvp[k]))
740 s =
format (s,
" %d: empty\n",
741 j * BIHASH_KVP_PER_PAGE + k);
748 s =
format (s,
" %d: %U\n",
749 j * BIHASH_KVP_PER_PAGE + k,
750 h->fmt_fn, &(v->kvp[k]));
754 s =
format (s,
" %d: %U\n",
755 j * BIHASH_KVP_PER_PAGE + k,
756 BV (format_bihash_kvp), &(v->kvp[k]));
765 s =
format (s,
" %lld active elements %lld active buckets\n",
766 active_elements, active_buckets);
769 for (i = 0; i <
vec_len (h->freelists); i++)
773 u64 free_elt_as_u64 = h->freelists[
i];
775 while (free_elt_as_u64)
779 free_elt_as_u64 = free_elt->next_free_as_u64;
782 if (nfree || verbose)
783 s =
format (s,
" [len %d] %u free elts\n", 1 << i, nfree);
786 s =
format (s,
" %lld linear search buckets\n", linear_buckets);
787 used_bytes = alloc_arena_next (h);
789 " arena: base %llx, next %llx\n" 790 " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
791 alloc_arena (h), alloc_arena_next (h),
792 used_bytes, used_bytes >> 20,
793 alloc_arena_size (h), alloc_arena_size (h) >> 20);
798 (
BVT (clib_bihash) *
h,
void *callback,
void *arg)
801 BVT (clib_bihash_bucket) * b;
803 void (*fp) (
BVT (clib_bihash_kv) *,
void *) = callback;
805 for (i = 0; i < h->nbuckets; i++)
808 if (BV (clib_bihash_bucket_is_empty) (b))
812 for (j = 0; j < (1 << b->log2_pages); j++)
816 if (BV (clib_bihash_is_free) (&v->kvp[k]))
819 (*fp) (&v->kvp[k], arg);
823 if (BV (clib_bihash_bucket_is_empty) (b))
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
#define BIHASH_KVP_PER_PAGE
void clib_bihash_free(clib_bihash *h)
Destroy a bounded index extensible hash table.
#define clib_memcpy_fast(a, b, c)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static int memfd_create(const char *name, unsigned int flags)
void os_out_of_memory(void)
for(i=1;i<=collision_buckets;i++)
#define MFD_ALLOW_SEALING
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
static uword clib_bihash_get_offset(clib_bihash *h, void *v)
Get clib mheap offset given a pointer.
static vnet_classify_entry_t * split_and_rehash_linear(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
void clib_bihash_foreach_key_value_pair(clib_bihash *h, void *callback, void *arg)
Visit active (key,value) pairs in a bi-hash table.
static vnet_classify_entry_t * split_and_rehash(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
#define vec_free(V)
Free vector's memory (no header).
#define clib_warning(format, args...)
static void make_working_copy(vnet_classify_table_t *t, vnet_classify_bucket_t *b)
template key/value backing page structure
static void clib_mem_vm_free(void *addr, uword size)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
#define clib_unix_warning(format, args...)
static_always_inline uword os_get_thread_index(void)
#define CLIB_MEMORY_BARRIER()
static void * clib_bihash_get_value(clib_bihash *h, uword offset)
Get pointer to value page given its clib mheap offset.
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
#define CLIB_CACHE_LINE_BYTES
static void * clib_mem_vm_alloc(uword size)
static void * alloc_aligned(uword size, uword log2_align, void **ptr_to_free)