FD.io VPP  v21.01.1
Vector Packet Processing
bihash_template.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */
17 
18 #ifndef MAP_HUGE_SHIFT
19 #define MAP_HUGE_SHIFT 26
20 #endif
21 
22 #ifndef BIIHASH_MIN_ALLOC_LOG2_PAGES
23 #define BIIHASH_MIN_ALLOC_LOG2_PAGES 10
24 #endif
25 
26 static inline void *BV (alloc_aligned) (BVT (clib_bihash) * h, uword nbytes)
27 {
28  uword rv;
29 
30  /* Round to an even number of cache lines */
31  nbytes = round_pow2 (nbytes, CLIB_CACHE_LINE_BYTES);
32 
33  if (BIHASH_USE_HEAP)
34  {
35  void *rv, *oldheap;
36  uword page_sz = sizeof (BVT (clib_bihash_value));
37  uword chunk_sz = round_pow2 (page_sz << BIIHASH_MIN_ALLOC_LOG2_PAGES,
39 
40  BVT (clib_bihash_alloc_chunk) * chunk = h->chunks;
41 
42  /* if there is enough space in the currenrt chunk */
43  if (chunk && chunk->bytes_left >= nbytes)
44  {
45  rv = chunk->next_alloc;
46  chunk->bytes_left -= nbytes;
47  chunk->next_alloc += nbytes;
48  return rv;
49  }
50 
51  /* requested allocation is bigger than chunk size */
52  if (nbytes >= chunk_sz)
53  {
54  oldheap = clib_mem_set_heap (h->heap);
55  chunk = clib_mem_alloc_aligned (nbytes + sizeof (*chunk),
57  clib_mem_set_heap (oldheap);
58  clib_memset_u8 (chunk, 0, sizeof (*chunk));
59  chunk->size = nbytes;
60  rv = (u8 *) (chunk + 1);
61  if (h->chunks)
62  {
63  /* take 2nd place in the list */
64  chunk->next = h->chunks->next;
65  chunk->prev = h->chunks;
66  h->chunks->next = chunk;
67  if (chunk->next)
68  chunk->next->prev = chunk;
69  }
70  else
71  h->chunks = chunk;
72 
73  return rv;
74  }
75 
76  oldheap = clib_mem_set_heap (h->heap);
77  chunk = clib_mem_alloc_aligned (chunk_sz + sizeof (*chunk),
79  clib_mem_set_heap (oldheap);
80  chunk->size = chunk_sz;
81  chunk->bytes_left = chunk_sz;
82  chunk->next_alloc = (u8 *) (chunk + 1);
83  chunk->next = h->chunks;
84  chunk->prev = 0;
85  if (chunk->next)
86  chunk->next->prev = chunk;
87  h->chunks = chunk;
88  rv = chunk->next_alloc;
89  chunk->bytes_left -= nbytes;
90  chunk->next_alloc += nbytes;
91  return rv;
92  }
93 
94  rv = alloc_arena_next (h);
95  alloc_arena_next (h) += nbytes;
96 
97  if (alloc_arena_next (h) > alloc_arena_size (h))
99 
100  if (alloc_arena_next (h) > alloc_arena_mapped (h))
101  {
102  void *base, *rv;
103  uword alloc = alloc_arena_next (h) - alloc_arena_mapped (h);
104  int mmap_flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS;
105  int mmap_flags_huge = (mmap_flags | MAP_HUGETLB | MAP_LOCKED |
106  BIHASH_LOG2_HUGEPAGE_SIZE << MAP_HUGE_SHIFT);
107 
108  /* new allocation is 25% of existing one */
109  if (alloc_arena_mapped (h) >> 2 > alloc)
110  alloc = alloc_arena_mapped (h) >> 2;
111 
112  /* round allocation to page size */
113  alloc = round_pow2 (alloc, 1 << BIHASH_LOG2_HUGEPAGE_SIZE);
114 
115  base = (void *) (uword) (alloc_arena (h) + alloc_arena_mapped (h));
116 
117  rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0);
118 
119  /* fallback - maybe we are still able to allocate normal pages */
120  if (rv == MAP_FAILED || mlock (base, alloc) != 0)
121  rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
122 
123  if (rv == MAP_FAILED)
124  os_out_of_memory ();
125 
126  alloc_arena_mapped (h) += alloc;
127  }
128 
129  return (void *) (uword) (rv + alloc_arena (h));
130 }
131 
132 static void BV (clib_bihash_instantiate) (BVT (clib_bihash) * h)
133 {
134  uword bucket_size;
135 
136  if (BIHASH_USE_HEAP)
137  {
138  h->heap = clib_mem_get_heap ();
139  h->chunks = 0;
140  alloc_arena (h) = (uword) clib_mem_get_heap_base (h->heap);
141  }
142  else
143  {
144  alloc_arena (h) = clib_mem_vm_reserve (0, h->memory_size,
145  BIHASH_LOG2_HUGEPAGE_SIZE);
146  if (alloc_arena (h) == ~0)
147  os_out_of_memory ();
148  alloc_arena_next (h) = 0;
149  alloc_arena_size (h) = h->memory_size;
150  alloc_arena_mapped (h) = 0;
151  }
152 
153  bucket_size = h->nbuckets * sizeof (h->buckets[0]);
154 
156  bucket_size +=
157  h->nbuckets * BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv));
158 
159  h->buckets = BV (alloc_aligned) (h, bucket_size);
160  clib_memset_u8 (h->buckets, 0, bucket_size);
161 
163  {
164  int i;
165  BVT (clib_bihash_bucket) * b;
166 
167  b = h->buckets;
168 
169  for (i = 0; i < h->nbuckets; i++)
170  {
171  b->offset = BV (clib_bihash_get_offset) (h, (void *) (b + 1));
172  b->refcnt = 1;
173  /* Mark all elements free */
174  clib_memset_u8 ((b + 1), 0xff, BIHASH_KVP_PER_PAGE *
175  sizeof (BVT (clib_bihash_kv)));
176 
177  /* Compute next bucket start address */
178  b = (void *) (((uword) b) + sizeof (*b) +
179  (BIHASH_KVP_PER_PAGE *
180  sizeof (BVT (clib_bihash_kv))));
181  }
182  }
184  h->instantiated = 1;
185 }
186 
187 void BV (clib_bihash_init2) (BVT (clib_bihash_init2_args) * a)
188 {
189  int i;
190  void *oldheap;
191  BVT (clib_bihash) * h = a->h;
192 
193  a->nbuckets = 1 << (max_log2 (a->nbuckets));
194 
195  h->name = (u8 *) a->name;
196  h->nbuckets = a->nbuckets;
197  h->log2_nbuckets = max_log2 (a->nbuckets);
198  h->memory_size = BIHASH_USE_HEAP ? 0 : a->memory_size;
199  h->instantiated = 0;
200  h->fmt_fn = BV (format_bihash);
201  h->kvp_fmt_fn = a->kvp_fmt_fn;
202 
203  alloc_arena (h) = 0;
204 
205  /*
206  * Make sure the requested size is rational. The max table
207  * size without playing the alignment card is 64 Gbytes.
208  * If someone starts complaining that's not enough, we can shift
209  * the offset by CLIB_LOG2_CACHE_LINE_BYTES...
210  */
211  if (BIHASH_USE_HEAP)
212  ASSERT (h->memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
213 
214  /* Add this hash table to the list */
215  if (a->dont_add_to_all_bihash_list == 0)
216  {
217  for (i = 0; i < vec_len (clib_all_bihashes); i++)
218  if (clib_all_bihashes[i] == h)
219  goto do_lock;
220  oldheap = clib_all_bihash_set_heap ();
221  vec_add1 (clib_all_bihashes, (void *) h);
222  clib_mem_set_heap (oldheap);
223  }
224 
225 do_lock:
226  if (h->alloc_lock)
227  clib_mem_free ((void *) h->alloc_lock);
228 
229  /*
230  * Set up the lock now, so we can use it to make the first add
231  * thread-safe
232  */
235  h->alloc_lock[0] = 0;
236 
237 #if BIHASH_LAZY_INSTANTIATE
238  if (a->instantiate_immediately)
239 #endif
240  BV (clib_bihash_instantiate) (h);
241 }
242 
243 void BV (clib_bihash_init)
244  (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
245 {
246  BVT (clib_bihash_init2_args) _a, *a = &_a;
247 
248  memset (a, 0, sizeof (*a));
249 
250  a->h = h;
251  a->name = name;
252  a->nbuckets = nbuckets;
253  a->memory_size = memory_size;
254 
255  BV (clib_bihash_init2) (a);
256 }
257 
258 #if BIHASH_32_64_SVM
259 #if !defined (MFD_ALLOW_SEALING)
260 #define MFD_ALLOW_SEALING 0x0002U
261 #endif
262 
263 void BV (clib_bihash_initiator_init_svm)
264  (BVT (clib_bihash) * h, char *name, u32 nbuckets, u64 memory_size)
265 {
266  uword bucket_size;
267  u8 *mmap_addr;
268  vec_header_t *freelist_vh;
269  int fd;
270 
271  ASSERT (BIHASH_USE_HEAP == 0);
272 
273  ASSERT (memory_size < (1ULL << 32));
274  /* Set up for memfd sharing */
275  if ((fd = memfd_create (name, MFD_ALLOW_SEALING)) == -1)
276  {
277  clib_unix_warning ("memfd_create");
278  return;
279  }
280 
281  if (ftruncate (fd, memory_size) < 0)
282  {
283  clib_unix_warning ("ftruncate");
284  return;
285  }
286 
287  /* Not mission-critical, complain and continue */
288  if ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
289  clib_unix_warning ("fcntl (F_ADD_SEALS)");
290 
291  mmap_addr = mmap (0, memory_size,
292  PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
293 
294  if (mmap_addr == MAP_FAILED)
295  {
296  clib_unix_warning ("mmap failed");
297  ASSERT (0);
298  }
299 
300  h->sh = (void *) mmap_addr;
301  h->memfd = fd;
302  nbuckets = 1 << (max_log2 (nbuckets));
303 
304  h->name = (u8 *) name;
305  h->sh->nbuckets = h->nbuckets = nbuckets;
306  h->log2_nbuckets = max_log2 (nbuckets);
307 
308  alloc_arena (h) = (u64) (uword) mmap_addr;
309  alloc_arena_next (h) = CLIB_CACHE_LINE_BYTES;
310  alloc_arena_size (h) = memory_size;
311 
312  bucket_size = nbuckets * sizeof (h->buckets[0]);
313  h->buckets = BV (alloc_aligned) (h, bucket_size);
314  clib_memset_u8 (h->buckets, 0, bucket_size);
315  h->sh->buckets_as_u64 = (u64) BV (clib_bihash_get_offset) (h, h->buckets);
316 
317  h->alloc_lock = BV (alloc_aligned) (h, CLIB_CACHE_LINE_BYTES);
318  h->alloc_lock[0] = 0;
319 
320  h->sh->alloc_lock_as_u64 =
321  (u64) BV (clib_bihash_get_offset) (h, (void *) h->alloc_lock);
322  freelist_vh =
323  BV (alloc_aligned) (h,
324  sizeof (vec_header_t) +
325  BIHASH_FREELIST_LENGTH * sizeof (u64));
326  freelist_vh->len = BIHASH_FREELIST_LENGTH;
327  h->sh->freelists_as_u64 =
328  (u64) BV (clib_bihash_get_offset) (h, freelist_vh->vector_data);
329  h->freelists = (void *) (freelist_vh->vector_data);
330 
331  h->fmt_fn = BV (format_bihash);
332  h->kvp_fmt_fn = NULL;
333  h->instantiated = 1;
334 }
335 
336 void BV (clib_bihash_responder_init_svm)
337  (BVT (clib_bihash) * h, char *name, int fd)
338 {
339  u8 *mmap_addr;
341  BVT (clib_bihash_shared_header) * sh;
342 
343  ASSERT (BIHASH_USE_HEAP == 0);
344 
345  /* Trial mapping, to learn the segment size */
346  mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 /* offset */ );
347  if (mmap_addr == MAP_FAILED)
348  {
349  clib_unix_warning ("trial mmap failed");
350  ASSERT (0);
351  }
352 
353  sh = (BVT (clib_bihash_shared_header) *) mmap_addr;
354 
355  memory_size = sh->alloc_arena_size;
356 
357  munmap (mmap_addr, 4096);
358 
359  /* Actual mapping, at the required size */
360  mmap_addr = mmap (0, memory_size,
361  PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
362 
363  if (mmap_addr == MAP_FAILED)
364  {
365  clib_unix_warning ("mmap failed");
366  ASSERT (0);
367  }
368 
369  (void) close (fd);
370 
371  h->sh = (void *) mmap_addr;
372  alloc_arena (h) = (u64) (uword) mmap_addr;
373  h->memfd = -1;
374 
375  h->name = (u8 *) name;
376  h->buckets = BV (clib_bihash_get_value) (h, h->sh->buckets_as_u64);
377  h->nbuckets = h->sh->nbuckets;
378  h->log2_nbuckets = max_log2 (h->nbuckets);
379 
380  h->alloc_lock = BV (clib_bihash_get_value) (h, h->sh->alloc_lock_as_u64);
381  h->freelists = BV (clib_bihash_get_value) (h, h->sh->freelists_as_u64);
382  h->fmt_fn = BV (format_bihash);
383  h->kvp_fmt_fn = NULL;
384 }
385 #endif /* BIHASH_32_64_SVM */
386 
387 void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
388  format_function_t * kvp_fmt_fn)
389 {
390  h->kvp_fmt_fn = kvp_fmt_fn;
391 }
392 
393 int BV (clib_bihash_is_initialised) (const BVT (clib_bihash) * h)
394 {
395  return (h->instantiated != 0);
396 }
397 
398 void BV (clib_bihash_free) (BVT (clib_bihash) * h)
399 {
400  int i;
401 
402  if (PREDICT_FALSE (h->instantiated == 0))
403  goto never_initialized;
404 
405  h->instantiated = 0;
406 
407  if (BIHASH_USE_HEAP)
408  {
409  BVT (clib_bihash_alloc_chunk) * next, *chunk;
410  void *oldheap = clib_mem_set_heap (h->heap);
411 
412  chunk = h->chunks;
413  while (chunk)
414  {
415  next = chunk->next;
416  clib_mem_free (chunk);
417  chunk = next;
418  }
419  clib_mem_set_heap (oldheap);
420  }
421 
422  vec_free (h->working_copies);
423  vec_free (h->working_copy_lengths);
424 #if BIHASH_32_64_SVM == 0
425  vec_free (h->freelists);
426 #else
427  if (h->memfd > 0)
428  (void) close (h->memfd);
429 #endif
430  if (BIHASH_USE_HEAP == 0)
431  clib_mem_vm_free ((void *) (uword) (alloc_arena (h)),
432  alloc_arena_size (h));
433 never_initialized:
434  clib_memset_u8 (h, 0, sizeof (*h));
435  for (i = 0; i < vec_len (clib_all_bihashes); i++)
436  {
437  if ((void *) h == clib_all_bihashes[i])
438  {
440  return;
441  }
442  }
443  clib_warning ("Couldn't find hash table %llx on clib_all_bihashes...",
444  (u64) (uword) h);
445 }
446 
447 static
449 BV (value_alloc) (BVT (clib_bihash) * h, u32 log2_pages)
450 {
451  BVT (clib_bihash_value) * rv = 0;
452 
453  ASSERT (h->alloc_lock[0]);
454 
455 #if BIHASH_32_64_SVM
456  ASSERT (log2_pages < vec_len (h->freelists));
457 #endif
458 
459  if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
460  {
461  vec_validate_init_empty (h->freelists, log2_pages, 0);
462  rv = BV (alloc_aligned) (h, (sizeof (*rv) * (1 << log2_pages)));
463  goto initialize;
464  }
465  rv = BV (clib_bihash_get_value) (h, (uword) h->freelists[log2_pages]);
466  h->freelists[log2_pages] = rv->next_free_as_u64;
467 
468 initialize:
469  ASSERT (rv);
470  /*
471  * Latest gcc complains that the length arg is zero
472  * if we replace (1<<log2_pages) with vec_len(rv).
473  * No clue.
474  */
475  clib_memset_u8 (rv, 0xff, sizeof (*rv) * (1 << log2_pages));
476  return rv;
477 }
478 
479 static void
480 BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v,
481  u32 log2_pages)
482 {
483  ASSERT (h->alloc_lock[0]);
484 
485  ASSERT (vec_len (h->freelists) > log2_pages);
486 
487  if (BIHASH_USE_HEAP && log2_pages >= BIIHASH_MIN_ALLOC_LOG2_PAGES)
488  {
489  /* allocations bigger or equal to chunk size always contain single
490  * alloc and they can be given back to heap */
491  void *oldheap;
492  BVT (clib_bihash_alloc_chunk) * c;
493  c = (BVT (clib_bihash_alloc_chunk) *) v - 1;
494 
495  if (c->prev)
496  c->prev->next = c->next;
497  else
498  h->chunks = c->next;
499 
500  if (c->next)
501  c->next->prev = c->prev;
502 
503  oldheap = clib_mem_set_heap (h->heap);
504  clib_mem_free (c);
505  clib_mem_set_heap (oldheap);
506  return;
507  }
508 
509  if (CLIB_DEBUG > 0)
510  clib_memset_u8 (v, 0xFE, sizeof (*v) * (1 << log2_pages));
511 
512  v->next_free_as_u64 = (u64) h->freelists[log2_pages];
513  h->freelists[log2_pages] = (u64) BV (clib_bihash_get_offset) (h, v);
514 }
515 
516 static inline void
517 BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b)
518 {
519  BVT (clib_bihash_value) * v;
520  BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
521  BVT (clib_bihash_value) * working_copy;
522  u32 thread_index = os_get_thread_index ();
523  int log2_working_copy_length;
524 
525  ASSERT (h->alloc_lock[0]);
526 
527  if (thread_index >= vec_len (h->working_copies))
528  {
529  vec_validate (h->working_copies, thread_index);
530  vec_validate_init_empty (h->working_copy_lengths, thread_index, ~0);
531  }
532 
533  /*
534  * working_copies are per-cpu so that near-simultaneous
535  * updates from multiple threads will not result in sporadic, spurious
536  * lookup failures.
537  */
538  working_copy = h->working_copies[thread_index];
539  log2_working_copy_length = h->working_copy_lengths[thread_index];
540 
541  h->saved_bucket.as_u64 = b->as_u64;
542 
543  if (b->log2_pages > log2_working_copy_length)
544  {
545  /*
546  * It's not worth the bookkeeping to free working copies
547  * if (working_copy)
548  * clib_mem_free (working_copy);
549  */
550  working_copy = BV (alloc_aligned)
551  (h, sizeof (working_copy[0]) * (1 << b->log2_pages));
552  h->working_copy_lengths[thread_index] = b->log2_pages;
553  h->working_copies[thread_index] = working_copy;
554 
555  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_working_copy_lost,
556  1ULL << b->log2_pages);
557  }
558 
559  v = BV (clib_bihash_get_value) (h, b->offset);
560 
561  clib_memcpy_fast (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
562  working_bucket.as_u64 = b->as_u64;
563  working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
565  b->as_u64 = working_bucket.as_u64;
566  h->working_copies[thread_index] = working_copy;
567 }
568 
569 static
571 BV (split_and_rehash)
572  (BVT (clib_bihash) * h,
573  BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
574  u32 new_log2_pages)
575 {
576  BVT (clib_bihash_value) * new_values, *new_v;
577  int i, j, length_in_kvs;
578 
579  ASSERT (h->alloc_lock[0]);
580 
581  new_values = BV (value_alloc) (h, new_log2_pages);
582  length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
583 
584  for (i = 0; i < length_in_kvs; i++)
585  {
586  u64 new_hash;
587 
588  /* Entry not in use? Forget it */
589  if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
590  continue;
591 
592  /* rehash the item onto its new home-page */
593  new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i]));
594  new_hash = extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
595  new_v = &new_values[new_hash];
596 
597  /* Across the new home-page */
598  for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
599  {
600  /* Empty slot */
601  if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
602  {
603  clib_memcpy_fast (&(new_v->kvp[j]), &(old_values->kvp[i]),
604  sizeof (new_v->kvp[j]));
605  goto doublebreak;
606  }
607  }
608  /* Crap. Tell caller to try again */
609  BV (value_free) (h, new_values, new_log2_pages);
610  return 0;
611  doublebreak:;
612  }
613 
614  return new_values;
615 }
616 
617 static
620  (BVT (clib_bihash) * h,
621  BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
622  u32 new_log2_pages)
623 {
624  BVT (clib_bihash_value) * new_values;
625  int i, j, new_length, old_length;
626 
627  ASSERT (h->alloc_lock[0]);
628 
629  new_values = BV (value_alloc) (h, new_log2_pages);
630  new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
631  old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
632 
633  j = 0;
634  /* Across the old value array */
635  for (i = 0; i < old_length; i++)
636  {
637  /* Find a free slot in the new linear scan bucket */
638  for (; j < new_length; j++)
639  {
640  /* Old value not in use? Forget it. */
641  if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
642  goto doublebreak;
643 
644  /* New value should never be in use */
645  if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
646  {
647  /* Copy the old value and move along */
648  clib_memcpy_fast (&(new_values->kvp[j]), &(old_values->kvp[i]),
649  sizeof (new_values->kvp[j]));
650  j++;
651  goto doublebreak;
652  }
653  }
654  /* This should never happen... */
655  clib_warning ("BUG: linear rehash failed!");
656  BV (value_free) (h, new_values, new_log2_pages);
657  return 0;
658 
659  doublebreak:;
660  }
661  return new_values;
662 }
663 
664 static_always_inline int BV (clib_bihash_add_del_inline_with_hash)
665  (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, u64 hash, int is_add,
666  int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
667 {
668  BVT (clib_bihash_bucket) * b, tmp_b;
669  BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
670  int i, limit;
671  u64 new_hash;
672  u32 new_log2_pages, old_log2_pages;
673  u32 thread_index = os_get_thread_index ();
674  int mark_bucket_linear;
675  int resplit_once;
676 
677  /* *INDENT-OFF* */
678  static const BVT (clib_bihash_bucket) mask = {
679  .linear_search = 1,
680  .log2_pages = -1
681  };
682  /* *INDENT-ON* */
683 
684 #if BIHASH_LAZY_INSTANTIATE
685  /*
686  * Create the table (is_add=1,2), or flunk the request now (is_add=0)
687  * Use the alloc_lock to protect the instantiate operation.
688  */
689  if (PREDICT_FALSE (h->instantiated == 0))
690  {
691  if (is_add == 0)
692  return (-1);
693 
694  BV (clib_bihash_alloc_lock) (h);
695  if (h->instantiated == 0)
696  BV (clib_bihash_instantiate) (h);
697  BV (clib_bihash_alloc_unlock) (h);
698  }
699 #else
700  /* Debug image: make sure the table has been instantiated */
701  ASSERT (h->instantiated != 0);
702 #endif
703 
704  b = BV (clib_bihash_get_bucket) (h, hash);
705 
706  BV (clib_bihash_lock_bucket) (b);
707 
708  /* First elt in the bucket? */
709  if (BIHASH_KVP_AT_BUCKET_LEVEL == 0 && BV (clib_bihash_bucket_is_empty) (b))
710  {
711  if (is_add == 0)
712  {
713  BV (clib_bihash_unlock_bucket) (b);
714  return (-1);
715  }
716 
717  BV (clib_bihash_alloc_lock) (h);
718  v = BV (value_alloc) (h, 0);
719  BV (clib_bihash_alloc_unlock) (h);
720 
721  *v->kvp = *add_v;
722  tmp_b.as_u64 = 0; /* clears bucket lock */
723  tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
724  tmp_b.refcnt = 1;
726 
727  b->as_u64 = tmp_b.as_u64; /* unlocks the bucket */
728  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_alloc_add, 1);
729 
730  return (0);
731  }
732 
733  /* WARNING: we're still looking at the live copy... */
734  limit = BIHASH_KVP_PER_PAGE;
735  v = BV (clib_bihash_get_value) (h, b->offset);
736 
737  if (PREDICT_FALSE (b->as_u64 & mask.as_u64))
738  {
739  if (PREDICT_FALSE (b->linear_search))
740  limit <<= b->log2_pages;
741  else
742  v += extract_bits (hash, h->log2_nbuckets, b->log2_pages);
743  }
744 
745  if (is_add)
746  {
747  /*
748  * Because reader threads are looking at live data,
749  * we have to be extra careful. Readers do NOT hold the
750  * bucket lock. We need to be SLOWER than a search, past the
751  * point where readers CHECK the bucket lock.
752  */
753 
754  /*
755  * For obvious (in hindsight) reasons, see if we're supposed to
756  * replace an existing key, then look for an empty slot.
757  */
758  for (i = 0; i < limit; i++)
759  {
760  if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
761  {
762  /* Add but do not overwrite? */
763  if (is_add == 2)
764  {
765  BV (clib_bihash_unlock_bucket) (b);
766  return (-2);
767  }
768 
769  clib_memcpy_fast (&(v->kvp[i].value),
770  &add_v->value, sizeof (add_v->value));
771  BV (clib_bihash_unlock_bucket) (b);
772  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
773  return (0);
774  }
775  }
776  /*
777  * Look for an empty slot. If found, use it
778  */
779  for (i = 0; i < limit; i++)
780  {
781  if (BV (clib_bihash_is_free) (&(v->kvp[i])))
782  {
783  /*
784  * Copy the value first, so that if a reader manages
785  * to match the new key, the value will be right...
786  */
787  clib_memcpy_fast (&(v->kvp[i].value),
788  &add_v->value, sizeof (add_v->value));
789  CLIB_MEMORY_STORE_BARRIER (); /* Make sure the value has settled */
790  clib_memcpy_fast (&(v->kvp[i]), &add_v->key,
791  sizeof (add_v->key));
792  b->refcnt++;
793  ASSERT (b->refcnt > 0);
794  BV (clib_bihash_unlock_bucket) (b);
795  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_add, 1);
796  return (0);
797  }
798  }
799  /* look for stale data to overwrite */
800  if (is_stale_cb)
801  {
802  for (i = 0; i < limit; i++)
803  {
804  if (is_stale_cb (&(v->kvp[i]), arg))
805  {
806  clib_memcpy_fast (&(v->kvp[i]), add_v, sizeof (*add_v));
808  BV (clib_bihash_unlock_bucket) (b);
809  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
810  return (0);
811  }
812  }
813  }
814  /* Out of space in this bucket, split the bucket... */
815  }
816  else /* delete case */
817  {
818  for (i = 0; i < limit; i++)
819  {
820  /* Found the key? Kill it... */
821  if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
822  {
823  clib_memset_u8 (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
824  /* Is the bucket empty? */
825  if (PREDICT_TRUE (b->refcnt > 1))
826  {
827  b->refcnt--;
828  /* Switch back to the bucket-level kvp array? */
829  if (BIHASH_KVP_AT_BUCKET_LEVEL && b->refcnt == 1
830  && b->log2_pages > 0)
831  {
832  tmp_b.as_u64 = b->as_u64;
833  b->offset = BV (clib_bihash_get_offset)
834  (h, (void *) (b + 1));
835  b->linear_search = 0;
836  b->log2_pages = 0;
837  /* Clean up the bucket-level kvp array */
838  clib_memset_u8 ((b + 1), 0xff, BIHASH_KVP_PER_PAGE *
839  sizeof (BVT (clib_bihash_kv)));
841  BV (clib_bihash_unlock_bucket) (b);
842  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
843  goto free_backing_store;
844  }
845 
847  BV (clib_bihash_unlock_bucket) (b);
848  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
849  return (0);
850  }
851  else /* yes, free it */
852  {
853  /* Save old bucket value, need log2_pages to free it */
854  tmp_b.as_u64 = b->as_u64;
855 
856  /* Kill and unlock the bucket */
857  b->as_u64 = 0;
858 
859  free_backing_store:
860  /* And free the backing storage */
861  BV (clib_bihash_alloc_lock) (h);
862  /* Note: v currently points into the middle of the bucket */
863  v = BV (clib_bihash_get_value) (h, tmp_b.offset);
864  BV (value_free) (h, v, tmp_b.log2_pages);
865  BV (clib_bihash_alloc_unlock) (h);
866  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del_free,
867  1);
868  return (0);
869  }
870  }
871  }
872  /* Not found... */
873  BV (clib_bihash_unlock_bucket) (b);
874  return (-3);
875  }
876 
877  /* Move readers to a (locked) temp copy of the bucket */
878  BV (clib_bihash_alloc_lock) (h);
879  BV (make_working_copy) (h, b);
880 
881  v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
882 
883  old_log2_pages = h->saved_bucket.log2_pages;
884  new_log2_pages = old_log2_pages + 1;
885  mark_bucket_linear = 0;
886  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_split_add, 1);
887  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, old_log2_pages);
888 
889  working_copy = h->working_copies[thread_index];
890  resplit_once = 0;
891  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, 1);
892 
893  new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
894  new_log2_pages);
895  if (new_v == 0)
896  {
897  try_resplit:
898  resplit_once = 1;
899  new_log2_pages++;
900  /* Try re-splitting. If that fails, fall back to linear search */
901  new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
902  new_log2_pages);
903  if (new_v == 0)
904  {
905  mark_linear:
906  new_log2_pages--;
907  /* pinned collisions, use linear search */
908  new_v =
909  BV (split_and_rehash_linear) (h, working_copy, old_log2_pages,
910  new_log2_pages);
911  mark_bucket_linear = 1;
912  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_linear, 1);
913  }
914  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_resplit, 1);
915  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits,
916  old_log2_pages + 1);
917  }
918 
919  /* Try to add the new entry */
920  save_new_v = new_v;
921  new_hash = BV (clib_bihash_hash) (add_v);
922  limit = BIHASH_KVP_PER_PAGE;
923  if (mark_bucket_linear)
924  limit <<= new_log2_pages;
925  else
926  new_v += extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
927 
928  for (i = 0; i < limit; i++)
929  {
930  if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
931  {
932  clib_memcpy_fast (&(new_v->kvp[i]), add_v, sizeof (*add_v));
933  goto expand_ok;
934  }
935  }
936 
937  /* Crap. Try again */
938  BV (value_free) (h, save_new_v, new_log2_pages);
939  /*
940  * If we've already doubled the size of the bucket once,
941  * fall back to linear search now.
942  */
943  if (resplit_once)
944  goto mark_linear;
945  else
946  goto try_resplit;
947 
948 expand_ok:
949  tmp_b.log2_pages = new_log2_pages;
950  tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
951  tmp_b.linear_search = mark_bucket_linear;
952 #if BIHASH_KVP_AT_BUCKET_LEVEL
953  /* Compensate for permanent refcount bump at the bucket level */
954  if (new_log2_pages > 0)
955 #endif
956  tmp_b.refcnt = h->saved_bucket.refcnt + 1;
957  ASSERT (tmp_b.refcnt > 0);
958  tmp_b.lock = 0;
960  b->as_u64 = tmp_b.as_u64;
961 
962 #if BIHASH_KVP_AT_BUCKET_LEVEL
963  if (h->saved_bucket.log2_pages > 0)
964  {
965 #endif
966 
967  /* free the old bucket, except at the bucket level if so configured */
968  v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
969  BV (value_free) (h, v, h->saved_bucket.log2_pages);
970 
971 #if BIHASH_KVP_AT_BUCKET_LEVEL
972  }
973 #endif
974 
975 
976  BV (clib_bihash_alloc_unlock) (h);
977  return (0);
978 }
979 
980 static_always_inline int BV (clib_bihash_add_del_inline)
981  (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add,
982  int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
983 {
984  u64 hash = BV (clib_bihash_hash) (add_v);
985  return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, is_add,
986  is_stale_cb, arg);
987 }
988 
989 int BV (clib_bihash_add_del)
990  (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
991 {
992  return BV (clib_bihash_add_del_inline) (h, add_v, is_add, 0, 0);
993 }
994 
995 int BV (clib_bihash_add_or_overwrite_stale)
996  (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v,
997  int (*stale_callback) (BVT (clib_bihash_kv) *, void *), void *arg)
998 {
999  return BV (clib_bihash_add_del_inline) (h, add_v, 1, stale_callback, arg);
1000 }
1001 
1002 int BV (clib_bihash_search)
1003  (BVT (clib_bihash) * h,
1004  BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
1005 {
1006  return BV (clib_bihash_search_inline_2) (h, search_key, valuep);
1007 }
1008 
1009 u8 *BV (format_bihash) (u8 * s, va_list * args)
1010 {
1011  BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
1012  int verbose = va_arg (*args, int);
1013  BVT (clib_bihash_bucket) * b;
1014  BVT (clib_bihash_value) * v;
1015  int i, j, k;
1016  u64 active_elements = 0;
1017  u64 active_buckets = 0;
1018  u64 linear_buckets = 0;
1019 
1020  s = format (s, "Hash table '%s'\n", h->name ? h->name : (u8 *) "(unnamed)");
1021 
1022 #if BIHASH_LAZY_INSTANTIATE
1023  if (PREDICT_FALSE (h->instantiated == 0))
1024  return format (s, " empty, uninitialized");
1025 #endif
1026 
1027  for (i = 0; i < h->nbuckets; i++)
1028  {
1029  b = BV (clib_bihash_get_bucket) (h, i);
1030  if (BV (clib_bihash_bucket_is_empty) (b))
1031  {
1032  if (verbose > 1)
1033  s = format (s, "[%d]: empty\n", i);
1034  continue;
1035  }
1036 
1037  active_buckets++;
1038 
1039  if (b->linear_search)
1040  linear_buckets++;
1041 
1042  if (verbose)
1043  {
1044  s = format
1045  (s, "[%d]: heap offset %lld, len %d, refcnt %d, linear %d\n", i,
1046  b->offset, (1 << b->log2_pages), b->refcnt, b->linear_search);
1047  }
1048 
1049  v = BV (clib_bihash_get_value) (h, b->offset);
1050  for (j = 0; j < (1 << b->log2_pages); j++)
1051  {
1052  for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1053  {
1054  if (BV (clib_bihash_is_free) (&v->kvp[k]))
1055  {
1056  if (verbose > 1)
1057  s = format (s, " %d: empty\n",
1058  j * BIHASH_KVP_PER_PAGE + k);
1059  continue;
1060  }
1061  if (verbose)
1062  {
1063  if (h->kvp_fmt_fn)
1064  {
1065  s = format (s, " %d: %U\n",
1066  j * BIHASH_KVP_PER_PAGE + k,
1067  h->kvp_fmt_fn, &(v->kvp[k]), verbose);
1068  }
1069  else
1070  {
1071  s = format (s, " %d: %U\n",
1072  j * BIHASH_KVP_PER_PAGE + k,
1073  BV (format_bihash_kvp), &(v->kvp[k]));
1074  }
1075  }
1076  active_elements++;
1077  }
1078  v++;
1079  }
1080  }
1081 
1082  s = format (s, " %lld active elements %lld active buckets\n",
1083  active_elements, active_buckets);
1084  s = format (s, " %d free lists\n", vec_len (h->freelists));
1085 
1086  for (i = 0; i < vec_len (h->freelists); i++)
1087  {
1088  u32 nfree = 0;
1089  BVT (clib_bihash_value) * free_elt;
1090  u64 free_elt_as_u64 = h->freelists[i];
1091 
1092  while (free_elt_as_u64)
1093  {
1094  free_elt = BV (clib_bihash_get_value) (h, free_elt_as_u64);
1095  nfree++;
1096  free_elt_as_u64 = free_elt->next_free_as_u64;
1097  }
1098 
1099  if (nfree || verbose)
1100  s = format (s, " [len %d] %u free elts\n", 1 << i, nfree);
1101  }
1102 
1103  s = format (s, " %lld linear search buckets\n", linear_buckets);
1104  if (BIHASH_USE_HEAP)
1105  {
1106  BVT (clib_bihash_alloc_chunk) * c = h->chunks;
1107  uword bytes_left = 0, total_size = 0, n_chunks = 0;
1108 
1109  while (c)
1110  {
1111  bytes_left += c->bytes_left;
1112  total_size += c->size;
1113  n_chunks += 1;
1114  c = c->next;
1115  }
1116  s = format (s,
1117  " heap: %u chunk(s) allocated\n"
1118  " bytes: used %U, scrap %U\n", n_chunks,
1119  format_memory_size, total_size,
1120  format_memory_size, bytes_left);
1121  }
1122  else
1123  {
1124  u64 used_bytes = alloc_arena_next (h);
1125  s = format (s,
1126  " arena: base %llx, next %llx\n"
1127  " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
1128  alloc_arena (h), alloc_arena_next (h),
1129  used_bytes, used_bytes >> 20,
1130  alloc_arena_size (h), alloc_arena_size (h) >> 20);
1131  }
1132  return s;
1133 }
1134 
1136  (BVT (clib_bihash) * h,
1137  BV (clib_bihash_foreach_key_value_pair_cb) cb, void *arg)
1138 {
1139  int i, j, k;
1140  BVT (clib_bihash_bucket) * b;
1141  BVT (clib_bihash_value) * v;
1142 
1143 
1144 #if BIHASH_LAZY_INSTANTIATE
1145  if (PREDICT_FALSE (h->instantiated == 0))
1146  return;
1147 #endif
1148 
1149  for (i = 0; i < h->nbuckets; i++)
1150  {
1151  b = BV (clib_bihash_get_bucket) (h, i);
1152  if (BV (clib_bihash_bucket_is_empty) (b))
1153  continue;
1154 
1155  v = BV (clib_bihash_get_value) (h, b->offset);
1156  for (j = 0; j < (1 << b->log2_pages); j++)
1157  {
1158  for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1159  {
1160  if (BV (clib_bihash_is_free) (&v->kvp[k]))
1161  continue;
1162 
1163  if (BIHASH_WALK_STOP == cb (&v->kvp[k], arg))
1164  return;
1165  /*
1166  * In case the callback deletes the last entry in the bucket...
1167  */
1168  if (BV (clib_bihash_bucket_is_empty) (b))
1169  goto doublebreak;
1170  }
1171  v++;
1172  }
1173  doublebreak:
1174  ;
1175  }
1176 }
1177 
1178 /** @endcond */
1179 
1180 /*
1181  * fd.io coding-style-patch-verification: ON
1182  *
1183  * Local Variables:
1184  * eval: (c-set-style "gnu")
1185  * End:
1186  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
#define BIHASH_KVP_PER_PAGE
Definition: bihash_16_8.h:25
a
Definition: bitmap.h:544
void clib_bihash_free(clib_bihash *h)
Destroy a bounded index extensible hash table.
#define PREDICT_TRUE(x)
Definition: clib.h:122
unsigned long u64
Definition: types.h:89
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:136
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
static clib_mem_heap_t * clib_mem_set_heap(clib_mem_heap_t *heap)
Definition: mem.h:365
static int memfd_create(const char *name, unsigned int flags)
Definition: syscall.h:52
void os_out_of_memory(void)
Definition: unix-misc.c:219
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:592
u16 mask
Definition: flow_types.api:52
unsigned char u8
Definition: types.h:56
__clib_export void ** clib_all_bihashes
u8 *() format_function_t(u8 *s, va_list *args)
Definition: format.h:48
#define MFD_ALLOW_SEALING
Definition: main.c:104
#define static_always_inline
Definition: clib.h:109
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
description fragment has unexpected format
Definition: map.api:433
u8 * format_memory_size(u8 *s, va_list *va)
Definition: std-formats.c:209
const cJSON *const b
Definition: cJSON.h:255
unsigned int u32
Definition: types.h:88
static uword clib_bihash_get_offset(clib_bihash *h, void *v)
Get clib mheap offset given a pointer.
int(* clib_bihash_foreach_key_value_pair_cb)(clib_bihash_kv *kv, void *ctx)
Definition: bihash_doc.h:175
void clib_bihash_foreach_key_value_pair(clib_bihash *h, clib_bihash_foreach_key_value_pair_cb *callback, void *arg)
Visit active (key,value) pairs in a bi-hash table.
u64 memory_size
Definition: vhost_user.h:105
vec_header_t h
Definition: buffer.c:322
static vnet_classify_entry_t * split_and_rehash_linear(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define F_ADD_SEALS
Definition: mem.c:41
#define MAP_HUGE_SHIFT
Definition: ip4_mtrie.c:791
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
BVT(clib_bihash)
Definition: l2_fib.c:1019
static vnet_classify_entry_t * split_and_rehash(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
svmdb_client_t * c
#define BIHASH_KVP_AT_BUCKET_LEVEL
Definition: bihash_16_8.h:26
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
#define clib_warning(format, args...)
Definition: error.h:59
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:265
u32 len
Number of elements in vector (NOT its allocated length).
Definition: vec_bootstrap.h:57
string name[64]
Definition: ip.api:44
void * clib_mem_get_heap_base(clib_mem_heap_t *heap)
Definition: mem_dlmalloc.c:583
static clib_mem_heap_t * clib_mem_get_heap(void)
Definition: mem.h:359
uword clib_mem_vm_reserve(uword start, uword size, clib_mem_page_sz_t log2_page_sz)
Definition: mem.c:332
static void make_working_copy(vnet_classify_table_t *t, vnet_classify_bucket_t *b)
#define ASSERT(truth)
int clib_bihash_search_inline_2(clib_bihash *h, clib_bihash_kv *search_key, clib_bihash_kv *valuep)
Search a bi-hash table.
#define vec_delete(V, N, M)
Delete N elements starting at element M.
Definition: vec.h:854
static void clib_mem_free(void *p)
Definition: mem.h:311
u8 log2_pages
Definition: bihash_doc.h:62
vector header structure
Definition: vec_bootstrap.h:55
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
Definition: string.h:424
static uword extract_bits(uword x, int start, int count)
Definition: clib.h:313
#define BIHASH_USE_HEAP
Definition: bihash_16_8.h:29
template key/value backing page structure
Definition: bihash_doc.h:44
static void clib_mem_vm_free(void *addr, uword size)
Definition: mem.h:443
u8 vector_data[0]
Vector data .
Definition: vec_bootstrap.h:60
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
Definition: clib.h:209
u64 uword
Definition: types.h:112
#define clib_unix_warning(format, args...)
Definition: error.h:68
static_always_inline uword os_get_thread_index(void)
Definition: os.h:63
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:261
__clib_export clib_mem_heap_t * clib_all_bihash_set_heap(void)
static void * clib_bihash_get_value(clib_bihash *h, uword offset)
Get pointer to value page given its clib mheap offset.
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
Definition: vec.h:556
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static void * alloc_aligned(uword size, uword log2_align, void **ptr_to_free)
Definition: test_vec.h:191
#define F_SEAL_SHRINK
Definition: mem.c:45