18 #ifndef MAP_HUGE_SHIFT 19 #define MAP_HUGE_SHIFT 26 22 #ifndef BIIHASH_MIN_ALLOC_LOG2_PAGES 23 #define BIIHASH_MIN_ALLOC_LOG2_PAGES 10 40 BVT (clib_bihash_alloc_chunk) * chunk = h->chunks;
43 if (chunk && chunk->bytes_left >= nbytes)
45 rv = chunk->next_alloc;
46 chunk->bytes_left -= nbytes;
47 chunk->next_alloc += nbytes;
52 if (nbytes >= chunk_sz)
60 rv = (
u8 *) (chunk + 1);
64 chunk->next = h->chunks->next;
65 chunk->prev = h->chunks;
66 h->chunks->next = chunk;
68 chunk->next->prev = chunk;
80 chunk->size = chunk_sz;
81 chunk->bytes_left = chunk_sz;
82 chunk->next_alloc = (
u8 *) (chunk + 1);
83 chunk->next = h->chunks;
86 chunk->next->prev = chunk;
88 rv = chunk->next_alloc;
89 chunk->bytes_left -= nbytes;
90 chunk->next_alloc += nbytes;
94 rv = alloc_arena_next (h);
95 alloc_arena_next (h) += nbytes;
97 if (alloc_arena_next (h) > alloc_arena_size (h))
100 if (alloc_arena_next (h) > alloc_arena_mapped (h))
103 uword alloc = alloc_arena_next (h) - alloc_arena_mapped (h);
104 int mmap_flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS;
105 int mmap_flags_huge = (mmap_flags | MAP_HUGETLB | MAP_LOCKED |
109 if (alloc_arena_mapped (h) >> 2 > alloc)
110 alloc = alloc_arena_mapped (h) >> 2;
113 alloc =
round_pow2 (alloc, 1 << BIHASH_LOG2_HUGEPAGE_SIZE);
115 base = (
void *) (
uword) (alloc_arena (h) + alloc_arena_mapped (h));
117 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0);
120 if (rv == MAP_FAILED || mlock (base, alloc) != 0)
121 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
123 if (rv == MAP_FAILED)
126 alloc_arena_mapped (h) += alloc;
129 return (
void *) (
uword) (rv + alloc_arena (h));
132 static void BV (clib_bihash_instantiate) (
BVT (clib_bihash) *
h)
145 BIHASH_LOG2_HUGEPAGE_SIZE);
146 if (alloc_arena (h) == ~0)
148 alloc_arena_next (h) = 0;
149 alloc_arena_size (h) = h->memory_size;
150 alloc_arena_mapped (h) = 0;
153 bucket_size = h->nbuckets *
sizeof (h->buckets[0]);
157 h->nbuckets * BIHASH_KVP_PER_PAGE *
sizeof (
BVT (clib_bihash_kv));
165 BVT (clib_bihash_bucket) *
b;
169 for (i = 0; i < h->nbuckets; i++)
175 sizeof (
BVT (clib_bihash_kv)));
178 b = (
void *) (((
uword)
b) +
sizeof (*b) +
179 (BIHASH_KVP_PER_PAGE *
180 sizeof (
BVT (clib_bihash_kv))));
187 void BV (clib_bihash_init2) (
BVT (clib_bihash_init2_args) *
a)
191 BVT (clib_bihash) * h = a->h;
193 a->nbuckets = 1 << (
max_log2 (a->nbuckets));
195 h->name = (
u8 *) a->name;
196 h->nbuckets = a->nbuckets;
197 h->log2_nbuckets =
max_log2 (a->nbuckets);
200 h->fmt_fn = BV (format_bihash);
201 h->kvp_fmt_fn = a->kvp_fmt_fn;
212 ASSERT (h->memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
215 if (a->dont_add_to_all_bihash_list == 0)
235 h->alloc_lock[0] = 0;
237 #if BIHASH_LAZY_INSTANTIATE 238 if (a->instantiate_immediately)
240 BV (clib_bihash_instantiate) (
h);
246 BVT (clib_bihash_init2_args) _a, *a = &_a;
248 memset (a, 0,
sizeof (*a));
252 a->nbuckets = nbuckets;
255 BV (clib_bihash_init2) (
a);
259 #if !defined (MFD_ALLOW_SEALING) 260 #define MFD_ALLOW_SEALING 0x0002U 263 void BV (clib_bihash_initiator_init_svm)
273 ASSERT (memory_size < (1ULL << 32));
281 if (ftruncate (fd, memory_size) < 0)
291 mmap_addr = mmap (0, memory_size,
292 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
294 if (mmap_addr == MAP_FAILED)
300 h->sh = (
void *) mmap_addr;
302 nbuckets = 1 << (
max_log2 (nbuckets));
304 h->name = (
u8 *) name;
305 h->sh->nbuckets = h->nbuckets = nbuckets;
306 h->log2_nbuckets =
max_log2 (nbuckets);
308 alloc_arena (h) = (
u64) (
uword) mmap_addr;
312 bucket_size = nbuckets *
sizeof (h->buckets[0]);
318 h->alloc_lock[0] = 0;
320 h->sh->alloc_lock_as_u64 =
325 BIHASH_FREELIST_LENGTH *
sizeof (
u64));
326 freelist_vh->
len = BIHASH_FREELIST_LENGTH;
327 h->sh->freelists_as_u64 =
329 h->freelists = (
void *) (freelist_vh->
vector_data);
331 h->fmt_fn = BV (format_bihash);
332 h->kvp_fmt_fn = NULL;
336 void BV (clib_bihash_responder_init_svm)
337 (
BVT (clib_bihash) *
h,
char *
name,
int fd)
341 BVT (clib_bihash_shared_header) * sh;
346 mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 );
347 if (mmap_addr == MAP_FAILED)
353 sh = (
BVT (clib_bihash_shared_header) *) mmap_addr;
355 memory_size = sh->alloc_arena_size;
357 munmap (mmap_addr, 4096);
360 mmap_addr = mmap (0, memory_size,
361 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
363 if (mmap_addr == MAP_FAILED)
371 h->sh = (
void *) mmap_addr;
372 alloc_arena (h) = (
u64) (
uword) mmap_addr;
375 h->name = (
u8 *) name;
377 h->nbuckets = h->sh->nbuckets;
378 h->log2_nbuckets =
max_log2 (h->nbuckets);
382 h->fmt_fn = BV (format_bihash);
383 h->kvp_fmt_fn = NULL;
387 void BV (clib_bihash_set_kvp_format_fn) (
BVT (clib_bihash) *
h,
390 h->kvp_fmt_fn = kvp_fmt_fn;
393 int BV (clib_bihash_is_initialised) (
const BVT (clib_bihash) *
h)
395 return (h->instantiated != 0);
403 goto never_initialized;
409 BVT (clib_bihash_alloc_chunk) * next, *chunk;
424 #if BIHASH_32_64_SVM == 0 428 (void) close (h->memfd);
432 alloc_arena_size (h));
443 clib_warning (
"Couldn't find hash table %llx on clib_all_bihashes...",
453 ASSERT (h->alloc_lock[0]);
466 h->freelists[
log2_pages] = rv->next_free_as_u64;
483 ASSERT (h->alloc_lock[0]);
492 BVT (clib_bihash_alloc_chunk) *
c;
493 c = (
BVT (clib_bihash_alloc_chunk) *) v - 1;
496 c->prev->next =
c->next;
501 c->next->prev =
c->prev;
520 BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
523 int log2_working_copy_length;
525 ASSERT (h->alloc_lock[0]);
527 if (thread_index >=
vec_len (h->working_copies))
538 working_copy = h->working_copies[thread_index];
539 log2_working_copy_length = h->working_copy_lengths[thread_index];
541 h->saved_bucket.as_u64 =
b->as_u64;
543 if (
b->log2_pages > log2_working_copy_length)
551 (
h,
sizeof (working_copy[0]) * (1 <<
b->log2_pages));
552 h->working_copy_lengths[thread_index] =
b->log2_pages;
553 h->working_copies[thread_index] = working_copy;
555 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_working_copy_lost,
556 1ULL <<
b->log2_pages);
562 working_bucket.as_u64 =
b->as_u64;
565 b->as_u64 = working_bucket.as_u64;
566 h->working_copies[thread_index] = working_copy;
572 (
BVT (clib_bihash) *
h,
577 int i, j, length_in_kvs;
579 ASSERT (h->alloc_lock[0]);
581 new_values = BV (value_alloc) (
h, new_log2_pages);
582 length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
584 for (i = 0; i < length_in_kvs; i++)
589 if (BV (clib_bihash_is_free) (&(old_values->kvp[
i])))
593 new_hash = BV (clib_bihash_hash) (&(old_values->kvp[
i]));
594 new_hash =
extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
595 new_v = &new_values[new_hash];
601 if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
604 sizeof (new_v->kvp[j]));
609 BV (value_free) (
h, new_values, new_log2_pages);
620 (
BVT (clib_bihash) *
h,
625 int i, j, new_length, old_length;
627 ASSERT (h->alloc_lock[0]);
629 new_values = BV (value_alloc) (
h, new_log2_pages);
630 new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
631 old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
635 for (i = 0; i < old_length; i++)
638 for (; j < new_length; j++)
641 if (BV (clib_bihash_is_free) (&(old_values->kvp[
i])))
645 if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
649 sizeof (new_values->kvp[j]));
656 BV (value_free) (
h, new_values, new_log2_pages);
665 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
u64 hash,
int is_add,
666 int (*is_stale_cb) (
BVT (clib_bihash_kv) *,
void *),
void *arg)
668 BVT (clib_bihash_bucket) *
b, tmp_b;
672 u32 new_log2_pages, old_log2_pages;
674 int mark_bucket_linear;
678 static const BVT (clib_bihash_bucket)
mask = {
684 #if BIHASH_LAZY_INSTANTIATE 694 BV (clib_bihash_alloc_lock) (
h);
695 if (h->instantiated == 0)
696 BV (clib_bihash_instantiate) (
h);
697 BV (clib_bihash_alloc_unlock) (
h);
701 ASSERT (h->instantiated != 0);
704 b = BV (clib_bihash_get_bucket) (
h, hash);
706 BV (clib_bihash_lock_bucket) (
b);
713 BV (clib_bihash_unlock_bucket) (
b);
717 BV (clib_bihash_alloc_lock) (
h);
718 v = BV (value_alloc) (
h, 0);
719 BV (clib_bihash_alloc_unlock) (
h);
727 b->as_u64 = tmp_b.as_u64;
728 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_alloc_add, 1);
740 limit <<=
b->log2_pages;
758 for (i = 0; i < limit; i++)
760 if (BV (clib_bihash_key_compare) (v->kvp[
i].key, add_v->key))
765 BV (clib_bihash_unlock_bucket) (
b);
770 &add_v->value, sizeof (add_v->value));
771 BV (clib_bihash_unlock_bucket) (
b);
772 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_replace, 1);
779 for (i = 0; i < limit; i++)
781 if (BV (clib_bihash_is_free) (&(v->kvp[
i])))
788 &add_v->value, sizeof (add_v->value));
791 sizeof (add_v->key));
794 BV (clib_bihash_unlock_bucket) (
b);
795 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_add, 1);
802 for (i = 0; i < limit; i++)
804 if (is_stale_cb (&(v->kvp[i]), arg))
808 BV (clib_bihash_unlock_bucket) (
b);
809 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_replace, 1);
818 for (i = 0; i < limit; i++)
821 if (BV (clib_bihash_key_compare) (v->kvp[
i].key, add_v->key))
830 &&
b->log2_pages > 0)
832 tmp_b.as_u64 =
b->as_u64;
834 (
h, (
void *) (
b + 1));
835 b->linear_search = 0;
839 sizeof (
BVT (clib_bihash_kv)));
841 BV (clib_bihash_unlock_bucket) (
b);
842 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_del, 1);
843 goto free_backing_store;
847 BV (clib_bihash_unlock_bucket) (
b);
848 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_del, 1);
854 tmp_b.as_u64 =
b->as_u64;
861 BV (clib_bihash_alloc_lock) (
h);
864 BV (value_free) (
h, v, tmp_b.log2_pages);
865 BV (clib_bihash_alloc_unlock) (
h);
866 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_del_free,
873 BV (clib_bihash_unlock_bucket) (
b);
878 BV (clib_bihash_alloc_lock) (
h);
883 old_log2_pages = h->saved_bucket.log2_pages;
884 new_log2_pages = old_log2_pages + 1;
885 mark_bucket_linear = 0;
886 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_split_add, 1);
887 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_splits, old_log2_pages);
889 working_copy = h->working_copies[thread_index];
891 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_splits, 1);
911 mark_bucket_linear = 1;
912 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_linear, 1);
914 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_resplit, 1);
915 BV (clib_bihash_increment_stat) (
h, BIHASH_STAT_splits,
921 new_hash = BV (clib_bihash_hash) (add_v);
923 if (mark_bucket_linear)
924 limit <<= new_log2_pages;
926 new_v +=
extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
928 for (i = 0; i < limit; i++)
930 if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
938 BV (value_free) (
h, save_new_v, new_log2_pages);
949 tmp_b.log2_pages = new_log2_pages;
951 tmp_b.linear_search = mark_bucket_linear;
952 #if BIHASH_KVP_AT_BUCKET_LEVEL 954 if (new_log2_pages > 0)
956 tmp_b.refcnt = h->saved_bucket.refcnt + 1;
957 ASSERT (tmp_b.refcnt > 0);
960 b->as_u64 = tmp_b.as_u64;
962 #if BIHASH_KVP_AT_BUCKET_LEVEL 963 if (h->saved_bucket.log2_pages > 0)
969 BV (value_free) (
h, v, h->saved_bucket.log2_pages);
971 #if BIHASH_KVP_AT_BUCKET_LEVEL 976 BV (clib_bihash_alloc_unlock) (
h);
981 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
int is_add,
982 int (*is_stale_cb) (
BVT (clib_bihash_kv) *,
void *),
void *arg)
984 u64 hash = BV (clib_bihash_hash) (add_v);
985 return BV (clib_bihash_add_del_inline_with_hash) (
h, add_v, hash, is_add,
990 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
int is_add)
992 return BV (clib_bihash_add_del_inline) (
h, add_v, is_add, 0, 0);
995 int BV (clib_bihash_add_or_overwrite_stale)
996 (
BVT (clib_bihash) *
h,
BVT (clib_bihash_kv) * add_v,
997 int (*stale_callback) (
BVT (clib_bihash_kv) *,
void *),
void *arg)
999 return BV (clib_bihash_add_del_inline) (
h, add_v, 1, stale_callback, arg);
1002 int BV (clib_bihash_search)
1003 (
BVT (clib_bihash) *
h,
1004 BVT (clib_bihash_kv) * search_key,
BVT (clib_bihash_kv) * valuep)
1009 u8 *BV (format_bihash) (
u8 * s, va_list * args)
1011 BVT (clib_bihash) * h = va_arg (*args,
BVT (clib_bihash) *);
1012 int verbose = va_arg (*args,
int);
1013 BVT (clib_bihash_bucket) *
b;
1016 u64 active_elements = 0;
1017 u64 active_buckets = 0;
1018 u64 linear_buckets = 0;
1020 s =
format (s,
"Hash table '%s'\n", h->name ? h->name : (
u8 *)
"(unnamed)");
1022 #if BIHASH_LAZY_INSTANTIATE 1024 return format (s,
" empty, uninitialized");
1027 for (i = 0; i < h->nbuckets; i++)
1029 b = BV (clib_bihash_get_bucket) (
h,
i);
1030 if (BV (clib_bihash_bucket_is_empty) (
b))
1033 s =
format (s,
"[%d]: empty\n", i);
1039 if (
b->linear_search)
1045 (s,
"[%d]: heap offset %lld, len %d, refcnt %d, linear %d\n", i,
1046 b->offset, (1 <<
b->log2_pages),
b->refcnt,
b->linear_search);
1050 for (j = 0; j < (1 <<
b->log2_pages); j++)
1054 if (BV (clib_bihash_is_free) (&v->kvp[k]))
1057 s =
format (s,
" %d: empty\n",
1058 j * BIHASH_KVP_PER_PAGE + k);
1065 s =
format (s,
" %d: %U\n",
1066 j * BIHASH_KVP_PER_PAGE + k,
1067 h->kvp_fmt_fn, &(v->kvp[k]), verbose);
1071 s =
format (s,
" %d: %U\n",
1072 j * BIHASH_KVP_PER_PAGE + k,
1073 BV (format_bihash_kvp), &(v->kvp[k]));
1082 s =
format (s,
" %lld active elements %lld active buckets\n",
1083 active_elements, active_buckets);
1086 for (i = 0; i <
vec_len (h->freelists); i++)
1090 u64 free_elt_as_u64 = h->freelists[
i];
1092 while (free_elt_as_u64)
1096 free_elt_as_u64 = free_elt->next_free_as_u64;
1099 if (nfree || verbose)
1100 s =
format (s,
" [len %d] %u free elts\n", 1 << i, nfree);
1103 s =
format (s,
" %lld linear search buckets\n", linear_buckets);
1106 BVT (clib_bihash_alloc_chunk) *
c = h->chunks;
1107 uword bytes_left = 0, total_size = 0, n_chunks = 0;
1111 bytes_left +=
c->bytes_left;
1112 total_size +=
c->size;
1117 " heap: %u chunk(s) allocated\n" 1118 " bytes: used %U, scrap %U\n", n_chunks,
1124 u64 used_bytes = alloc_arena_next (h);
1126 " arena: base %llx, next %llx\n" 1127 " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
1128 alloc_arena (h), alloc_arena_next (h),
1129 used_bytes, used_bytes >> 20,
1130 alloc_arena_size (h), alloc_arena_size (h) >> 20);
1136 (
BVT (clib_bihash) *
h,
1140 BVT (clib_bihash_bucket) *
b;
1144 #if BIHASH_LAZY_INSTANTIATE 1149 for (i = 0; i < h->nbuckets; i++)
1151 b = BV (clib_bihash_get_bucket) (
h,
i);
1152 if (BV (clib_bihash_bucket_is_empty) (
b))
1156 for (j = 0; j < (1 <<
b->log2_pages); j++)
1160 if (BV (clib_bihash_is_free) (&v->kvp[k]))
1163 if (BIHASH_WALK_STOP == cb (&v->kvp[k], arg))
1168 if (BV (clib_bihash_bucket_is_empty) (
b))
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
#define BIHASH_KVP_PER_PAGE
void clib_bihash_free(clib_bihash *h)
Destroy a bounded index extensible hash table.
#define CLIB_MEMORY_STORE_BARRIER()
#define clib_memcpy_fast(a, b, c)
static clib_mem_heap_t * clib_mem_set_heap(clib_mem_heap_t *heap)
static int memfd_create(const char *name, unsigned int flags)
void os_out_of_memory(void)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
__clib_export void ** clib_all_bihashes
#define MFD_ALLOW_SEALING
#define static_always_inline
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
description fragment has unexpected format
static uword clib_bihash_get_offset(clib_bihash *h, void *v)
Get clib mheap offset given a pointer.
int(* clib_bihash_foreach_key_value_pair_cb)(clib_bihash_kv *kv, void *ctx)
void clib_bihash_foreach_key_value_pair(clib_bihash *h, clib_bihash_foreach_key_value_pair_cb *callback, void *arg)
Visit active (key,value) pairs in a bi-hash table.
static vnet_classify_entry_t * split_and_rehash_linear(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
static vnet_classify_entry_t * split_and_rehash(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
#define BIHASH_KVP_AT_BUCKET_LEVEL
sll srl srl sll sra u16x4 i
#define vec_free(V)
Free vector's memory (no header).
#define clib_warning(format, args...)
static uword round_pow2(uword x, uword pow2)
void * clib_mem_get_heap_base(clib_mem_heap_t *heap)
static clib_mem_heap_t * clib_mem_get_heap(void)
uword clib_mem_vm_reserve(uword start, uword size, clib_mem_page_sz_t log2_page_sz)
static void make_working_copy(vnet_classify_table_t *t, vnet_classify_bucket_t *b)
int clib_bihash_search_inline_2(clib_bihash *h, clib_bihash_kv *search_key, clib_bihash_kv *valuep)
Search a bi-hash table.
#define vec_delete(V, N, M)
Delete N elements starting at element M.
static void clib_mem_free(void *p)
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
static uword extract_bits(uword x, int start, int count)
template key/value backing page structure
static void clib_mem_vm_free(void *addr, uword size)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
#define clib_unix_warning(format, args...)
static_always_inline uword os_get_thread_index(void)
static void * clib_mem_alloc_aligned(uword size, uword align)
__clib_export clib_mem_heap_t * clib_all_bihash_set_heap(void)
static void * clib_bihash_get_value(clib_bihash *h, uword offset)
Get pointer to value page given its clib mheap offset.
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
#define CLIB_CACHE_LINE_BYTES
static void * alloc_aligned(uword size, uword log2_align, void **ptr_to_free)