FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
bihash_template.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */
17 
18 #ifndef MAP_HUGE_SHIFT
19 #define MAP_HUGE_SHIFT 26
20 #endif
21 
22 #ifndef BIIHASH_MIN_ALLOC_LOG2_PAGES
23 #define BIIHASH_MIN_ALLOC_LOG2_PAGES 10
24 #endif
25 
26 #ifndef BIHASH_USE_HEAP
27 #define BIHASH_USE_HEAP 1
28 #endif
29 
30 static inline void *BV (alloc_aligned) (BVT (clib_bihash) * h, uword nbytes)
31 {
32  uword rv;
33 
34  /* Round to an even number of cache lines */
35  nbytes = round_pow2 (nbytes, CLIB_CACHE_LINE_BYTES);
36 
37  if (BIHASH_USE_HEAP)
38  {
39  void *rv, *oldheap;
40  uword page_sz = sizeof (BVT (clib_bihash_value));
41  uword chunk_sz = round_pow2 (page_sz << BIIHASH_MIN_ALLOC_LOG2_PAGES,
43 
44  BVT (clib_bihash_alloc_chunk) * chunk = h->chunks;
45 
46  /* if there is enough space in the currenrt chunk */
47  if (chunk && chunk->bytes_left >= nbytes)
48  {
49  rv = chunk->next_alloc;
50  chunk->bytes_left -= nbytes;
51  chunk->next_alloc += nbytes;
52  return rv;
53  }
54 
55  /* requested allocation is bigger than chunk size */
56  if (nbytes >= chunk_sz)
57  {
58  oldheap = clib_mem_set_heap (h->heap);
59  chunk = clib_mem_alloc_aligned (nbytes + sizeof (*chunk),
61  clib_mem_set_heap (oldheap);
62  clib_memset_u8 (chunk, 0, sizeof (*chunk));
63  chunk->size = nbytes;
64  rv = (u8 *) (chunk + 1);
65  if (h->chunks)
66  {
67  /* take 2nd place in the list */
68  chunk->next = h->chunks->next;
69  chunk->prev = h->chunks;
70  h->chunks->next = chunk;
71  if (chunk->next)
72  chunk->next->prev = chunk;
73  }
74  else
75  h->chunks = chunk;
76 
77  return rv;
78  }
79 
80  oldheap = clib_mem_set_heap (h->heap);
81  chunk = clib_mem_alloc_aligned (chunk_sz + sizeof (*chunk),
83  clib_mem_set_heap (oldheap);
84  chunk->size = chunk_sz;
85  chunk->bytes_left = chunk_sz;
86  chunk->next_alloc = (u8 *) (chunk + 1);
87  chunk->next = h->chunks;
88  chunk->prev = 0;
89  if (chunk->next)
90  chunk->next->prev = chunk;
91  h->chunks = chunk;
92  rv = chunk->next_alloc;
93  chunk->bytes_left -= nbytes;
94  chunk->next_alloc += nbytes;
95  return rv;
96  }
97 
98  rv = alloc_arena_next (h);
99  alloc_arena_next (h) += nbytes;
100 
101  if (alloc_arena_next (h) > alloc_arena_size (h))
102  os_out_of_memory ();
103 
104  if (alloc_arena_next (h) > alloc_arena_mapped (h))
105  {
106  void *base, *rv;
107  uword alloc = alloc_arena_next (h) - alloc_arena_mapped (h);
108  int mmap_flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS;
109  int mmap_flags_huge = (mmap_flags | MAP_HUGETLB | MAP_LOCKED |
110  BIHASH_LOG2_HUGEPAGE_SIZE << MAP_HUGE_SHIFT);
111 
112  /* new allocation is 25% of existing one */
113  if (alloc_arena_mapped (h) >> 2 > alloc)
114  alloc = alloc_arena_mapped (h) >> 2;
115 
116  /* round allocation to page size */
117  alloc = round_pow2 (alloc, 1 << BIHASH_LOG2_HUGEPAGE_SIZE);
118 
119  base = (void *) (uword) (alloc_arena (h) + alloc_arena_mapped (h));
120 
121  rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0);
122 
123  /* fallback - maybe we are still able to allocate normal pages */
124  if (rv == MAP_FAILED || mlock (base, alloc) != 0)
125  rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
126 
127  if (rv == MAP_FAILED)
128  os_out_of_memory ();
129 
130  alloc_arena_mapped (h) += alloc;
131  }
132 
133  return (void *) (uword) (rv + alloc_arena (h));
134 }
135 
136 static void BV (clib_bihash_instantiate) (BVT (clib_bihash) * h)
137 {
138  uword bucket_size;
139 
140  if (BIHASH_USE_HEAP)
141  {
142  h->heap = clib_mem_get_heap ();
143  h->chunks = 0;
144  alloc_arena (h) = (uword) clib_mem_get_heap_base (h->heap);
145  }
146  else
147  {
148  alloc_arena (h) = clib_mem_vm_reserve (0, h->memory_size,
149  BIHASH_LOG2_HUGEPAGE_SIZE);
150  if (alloc_arena (h) == ~0)
151  os_out_of_memory ();
152  alloc_arena_next (h) = 0;
153  alloc_arena_size (h) = h->memory_size;
154  alloc_arena_mapped (h) = 0;
155  }
156 
157  bucket_size = h->nbuckets * sizeof (h->buckets[0]);
158 
160  bucket_size +=
161  h->nbuckets * BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv));
162 
163  h->buckets = BV (alloc_aligned) (h, bucket_size);
164  clib_memset_u8 (h->buckets, 0, bucket_size);
165 
167  {
168  int i;
169  BVT (clib_bihash_bucket) * b;
170 
171  b = h->buckets;
172 
173  for (i = 0; i < h->nbuckets; i++)
174  {
175  b->offset = BV (clib_bihash_get_offset) (h, (void *) (b + 1));
176  b->refcnt = 1;
177  /* Mark all elements free */
178  clib_memset_u8 ((b + 1), 0xff, BIHASH_KVP_PER_PAGE *
179  sizeof (BVT (clib_bihash_kv)));
180 
181  /* Compute next bucket start address */
182  b = (void *) (((uword) b) + sizeof (*b) +
184  sizeof (BVT (clib_bihash_kv))));
185  }
186  }
188  h->instantiated = 1;
189 }
190 
191 void BV (clib_bihash_init2) (BVT (clib_bihash_init2_args) * a)
192 {
193  int i;
194  void *oldheap;
195  BVT (clib_bihash) * h = a->h;
196 
197  a->nbuckets = 1 << (max_log2 (a->nbuckets));
198 
199  h->name = (u8 *) a->name;
200  h->nbuckets = a->nbuckets;
201  h->log2_nbuckets = max_log2 (a->nbuckets);
202  h->memory_size = BIHASH_USE_HEAP ? 0 : a->memory_size;
203  h->instantiated = 0;
204  h->dont_add_to_all_bihash_list = a->dont_add_to_all_bihash_list;
205  h->fmt_fn = BV (format_bihash);
206  h->kvp_fmt_fn = a->kvp_fmt_fn;
207 
208  alloc_arena (h) = 0;
209 
210  /*
211  * Make sure the requested size is rational. The max table
212  * size without playing the alignment card is 64 Gbytes.
213  * If someone starts complaining that's not enough, we can shift
214  * the offset by CLIB_LOG2_CACHE_LINE_BYTES...
215  */
216  if (BIHASH_USE_HEAP)
217  ASSERT (h->memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
218 
219  /* Add this hash table to the list */
220  if (a->dont_add_to_all_bihash_list == 0)
221  {
222  for (i = 0; i < vec_len (clib_all_bihashes); i++)
223  if (clib_all_bihashes[i] == h)
224  goto do_lock;
225  oldheap = clib_all_bihash_set_heap ();
226  vec_add1 (clib_all_bihashes, (void *) h);
227  clib_mem_set_heap (oldheap);
228  }
229 
230 do_lock:
231  if (h->alloc_lock)
232  clib_mem_free ((void *) h->alloc_lock);
233 
234  /*
235  * Set up the lock now, so we can use it to make the first add
236  * thread-safe
237  */
240  h->alloc_lock[0] = 0;
241 
242 #if BIHASH_LAZY_INSTANTIATE
243  if (a->instantiate_immediately)
244 #endif
245  BV (clib_bihash_instantiate) (h);
246 }
247 
248 void BV (clib_bihash_init)
249  (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
250 {
251  BVT (clib_bihash_init2_args) _a, *a = &_a;
252 
253  memset (a, 0, sizeof (*a));
254 
255  a->h = h;
256  a->name = name;
257  a->nbuckets = nbuckets;
258  a->memory_size = memory_size;
259 
260  BV (clib_bihash_init2) (a);
261 }
262 
263 #if BIHASH_32_64_SVM
264 #if !defined (MFD_ALLOW_SEALING)
265 #define MFD_ALLOW_SEALING 0x0002U
266 #endif
267 
268 void BV (clib_bihash_initiator_init_svm)
269  (BVT (clib_bihash) * h, char *name, u32 nbuckets, u64 memory_size)
270 {
271  uword bucket_size;
272  u8 *mmap_addr;
273  vec_header_t *freelist_vh;
274  int fd;
275 
276  ASSERT (BIHASH_USE_HEAP == 0);
277 
278  ASSERT (memory_size < (1ULL << 32));
279  /* Set up for memfd sharing */
281  {
282  clib_unix_warning ("memfd_create");
283  return;
284  }
285 
286  if (ftruncate (fd, memory_size) < 0)
287  {
288  clib_unix_warning ("ftruncate");
289  return;
290  }
291 
292  /* Not mission-critical, complain and continue */
293  if ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
294  clib_unix_warning ("fcntl (F_ADD_SEALS)");
295 
296  mmap_addr = mmap (0, memory_size,
297  PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
298 
299  if (mmap_addr == MAP_FAILED)
300  {
301  clib_unix_warning ("mmap failed");
302  ASSERT (0);
303  }
304 
305  h->sh = (void *) mmap_addr;
306  h->memfd = fd;
307  nbuckets = 1 << (max_log2 (nbuckets));
308 
309  h->name = (u8 *) name;
310  h->sh->nbuckets = h->nbuckets = nbuckets;
311  h->log2_nbuckets = max_log2 (nbuckets);
312 
313  alloc_arena (h) = (u64) (uword) mmap_addr;
314  alloc_arena_next (h) = CLIB_CACHE_LINE_BYTES;
315  alloc_arena_size (h) = memory_size;
316 
317  bucket_size = nbuckets * sizeof (h->buckets[0]);
318  h->buckets = BV (alloc_aligned) (h, bucket_size);
319  clib_memset_u8 (h->buckets, 0, bucket_size);
320  h->sh->buckets_as_u64 = (u64) BV (clib_bihash_get_offset) (h, h->buckets);
321 
322  h->alloc_lock = BV (alloc_aligned) (h, CLIB_CACHE_LINE_BYTES);
323  h->alloc_lock[0] = 0;
324 
325  h->sh->alloc_lock_as_u64 =
326  (u64) BV (clib_bihash_get_offset) (h, (void *) h->alloc_lock);
327  freelist_vh =
328  BV (alloc_aligned) (h,
329  sizeof (vec_header_t) +
330  BIHASH_FREELIST_LENGTH * sizeof (u64));
331  freelist_vh->len = BIHASH_FREELIST_LENGTH;
332  h->sh->freelists_as_u64 =
333  (u64) BV (clib_bihash_get_offset) (h, freelist_vh->vector_data);
334  h->freelists = (void *) (freelist_vh->vector_data);
335 
336  h->fmt_fn = BV (format_bihash);
337  h->kvp_fmt_fn = NULL;
338  h->instantiated = 1;
339 }
340 
341 void BV (clib_bihash_responder_init_svm)
342  (BVT (clib_bihash) * h, char *name, int fd)
343 {
344  u8 *mmap_addr;
346  BVT (clib_bihash_shared_header) * sh;
347 
348  ASSERT (BIHASH_USE_HEAP == 0);
349 
350  /* Trial mapping, to learn the segment size */
351  mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 /* offset */ );
352  if (mmap_addr == MAP_FAILED)
353  {
354  clib_unix_warning ("trial mmap failed");
355  ASSERT (0);
356  }
357 
358  sh = (BVT (clib_bihash_shared_header) *) mmap_addr;
359 
360  memory_size = sh->alloc_arena_size;
361 
362  munmap (mmap_addr, 4096);
363 
364  /* Actual mapping, at the required size */
365  mmap_addr = mmap (0, memory_size,
366  PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
367 
368  if (mmap_addr == MAP_FAILED)
369  {
370  clib_unix_warning ("mmap failed");
371  ASSERT (0);
372  }
373 
374  (void) close (fd);
375 
376  h->sh = (void *) mmap_addr;
377  alloc_arena (h) = (u64) (uword) mmap_addr;
378  h->memfd = -1;
379 
380  h->name = (u8 *) name;
381  h->buckets = BV (clib_bihash_get_value) (h, h->sh->buckets_as_u64);
382  h->nbuckets = h->sh->nbuckets;
383  h->log2_nbuckets = max_log2 (h->nbuckets);
384 
385  h->alloc_lock = BV (clib_bihash_get_value) (h, h->sh->alloc_lock_as_u64);
386  h->freelists = BV (clib_bihash_get_value) (h, h->sh->freelists_as_u64);
387  h->fmt_fn = BV (format_bihash);
388  h->kvp_fmt_fn = NULL;
389 }
390 #endif /* BIHASH_32_64_SVM */
391 
392 void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
393  format_function_t * kvp_fmt_fn)
394 {
395  h->kvp_fmt_fn = kvp_fmt_fn;
396 }
397 
398 int BV (clib_bihash_is_initialised) (const BVT (clib_bihash) * h)
399 {
400  return (h->instantiated != 0);
401 }
402 
403 void BV (clib_bihash_free) (BVT (clib_bihash) * h)
404 {
405  int i;
406 
407  if (PREDICT_FALSE (h->instantiated == 0))
408  goto never_initialized;
409 
410  h->instantiated = 0;
411 
412  if (BIHASH_USE_HEAP)
413  {
414  BVT (clib_bihash_alloc_chunk) * next, *chunk;
415  void *oldheap = clib_mem_set_heap (h->heap);
416 
417  chunk = h->chunks;
418  while (chunk)
419  {
420  next = chunk->next;
421  clib_mem_free (chunk);
422  chunk = next;
423  }
424  clib_mem_set_heap (oldheap);
425  }
426 
427  vec_free (h->working_copies);
428  vec_free (h->working_copy_lengths);
429 #if BIHASH_32_64_SVM == 0
430  vec_free (h->freelists);
431 #else
432  if (h->memfd > 0)
433  (void) close (h->memfd);
434 #endif
435  if (BIHASH_USE_HEAP == 0)
436  clib_mem_vm_free ((void *) (uword) (alloc_arena (h)),
437  alloc_arena_size (h));
438 never_initialized:
439  if (h->dont_add_to_all_bihash_list)
440  {
441  clib_memset_u8 (h, 0, sizeof (*h));
442  return;
443  }
444  clib_memset_u8 (h, 0, sizeof (*h));
445  for (i = 0; i < vec_len (clib_all_bihashes); i++)
446  {
447  if ((void *) h == clib_all_bihashes[i])
448  {
450  return;
451  }
452  }
453  clib_warning ("Couldn't find hash table %llx on clib_all_bihashes...",
454  (u64) (uword) h);
455 }
456 
457 static
459 BV (value_alloc) (BVT (clib_bihash) * h, u32 log2_pages)
460 {
461  BVT (clib_bihash_value) * rv = 0;
462 
463  ASSERT (h->alloc_lock[0]);
464 
465 #if BIHASH_32_64_SVM
466  ASSERT (log2_pages < vec_len (h->freelists));
467 #endif
468 
469  if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
470  {
471  vec_validate_init_empty (h->freelists, log2_pages, 0);
472  rv = BV (alloc_aligned) (h, (sizeof (*rv) * (1 << log2_pages)));
473  goto initialize;
474  }
475  rv = BV (clib_bihash_get_value) (h, (uword) h->freelists[log2_pages]);
476  h->freelists[log2_pages] = rv->next_free_as_u64;
477 
478 initialize:
479  ASSERT (rv);
480  /*
481  * Latest gcc complains that the length arg is zero
482  * if we replace (1<<log2_pages) with vec_len(rv).
483  * No clue.
484  */
485  clib_memset_u8 (rv, 0xff, sizeof (*rv) * (1 << log2_pages));
486  return rv;
487 }
488 
489 static void
490 BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v,
491  u32 log2_pages)
492 {
493  ASSERT (h->alloc_lock[0]);
494 
495  ASSERT (vec_len (h->freelists) > log2_pages);
496 
497  if (BIHASH_USE_HEAP && log2_pages >= BIIHASH_MIN_ALLOC_LOG2_PAGES)
498  {
499  /* allocations bigger or equal to chunk size always contain single
500  * alloc and they can be given back to heap */
501  void *oldheap;
502  BVT (clib_bihash_alloc_chunk) * c;
503  c = (BVT (clib_bihash_alloc_chunk) *) v - 1;
504 
505  if (c->prev)
506  c->prev->next = c->next;
507  else
508  h->chunks = c->next;
509 
510  if (c->next)
511  c->next->prev = c->prev;
512 
513  oldheap = clib_mem_set_heap (h->heap);
514  clib_mem_free (c);
515  clib_mem_set_heap (oldheap);
516  return;
517  }
518 
519  if (CLIB_DEBUG > 0)
520  clib_memset_u8 (v, 0xFE, sizeof (*v) * (1 << log2_pages));
521 
522  v->next_free_as_u64 = (u64) h->freelists[log2_pages];
523  h->freelists[log2_pages] = (u64) BV (clib_bihash_get_offset) (h, v);
524 }
525 
526 static inline void
527 BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b)
528 {
529  BVT (clib_bihash_value) * v;
530  BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
531  BVT (clib_bihash_value) * working_copy;
533  int log2_working_copy_length;
534 
535  ASSERT (h->alloc_lock[0]);
536 
537  if (thread_index >= vec_len (h->working_copies))
538  {
539  vec_validate (h->working_copies, thread_index);
540  vec_validate_init_empty (h->working_copy_lengths, thread_index, ~0);
541  }
542 
543  /*
544  * working_copies are per-cpu so that near-simultaneous
545  * updates from multiple threads will not result in sporadic, spurious
546  * lookup failures.
547  */
548  working_copy = h->working_copies[thread_index];
549  log2_working_copy_length = h->working_copy_lengths[thread_index];
550 
551  h->saved_bucket.as_u64 = b->as_u64;
552 
553  if (b->log2_pages > log2_working_copy_length)
554  {
555  /*
556  * It's not worth the bookkeeping to free working copies
557  * if (working_copy)
558  * clib_mem_free (working_copy);
559  */
560  working_copy = BV (alloc_aligned)
561  (h, sizeof (working_copy[0]) * (1 << b->log2_pages));
562  h->working_copy_lengths[thread_index] = b->log2_pages;
563  h->working_copies[thread_index] = working_copy;
564 
565  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_working_copy_lost,
566  1ULL << b->log2_pages);
567  }
568 
569  v = BV (clib_bihash_get_value) (h, b->offset);
570 
571  clib_memcpy_fast (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
572  working_bucket.as_u64 = b->as_u64;
573  working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
575  b->as_u64 = working_bucket.as_u64;
576  h->working_copies[thread_index] = working_copy;
577 }
578 
579 static
581 BV (split_and_rehash)
582  (BVT (clib_bihash) * h,
583  BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
584  u32 new_log2_pages)
585 {
586  BVT (clib_bihash_value) * new_values, *new_v;
587  int i, j, length_in_kvs;
588 
589  ASSERT (h->alloc_lock[0]);
590 
591  new_values = BV (value_alloc) (h, new_log2_pages);
592  length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
593 
594  for (i = 0; i < length_in_kvs; i++)
595  {
596  u64 new_hash;
597 
598  /* Entry not in use? Forget it */
599  if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
600  continue;
601 
602  /* rehash the item onto its new home-page */
603  new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i]));
604  new_hash = extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
605  new_v = &new_values[new_hash];
606 
607  /* Across the new home-page */
608  for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
609  {
610  /* Empty slot */
611  if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
612  {
613  clib_memcpy_fast (&(new_v->kvp[j]), &(old_values->kvp[i]),
614  sizeof (new_v->kvp[j]));
615  goto doublebreak;
616  }
617  }
618  /* Crap. Tell caller to try again */
619  BV (value_free) (h, new_values, new_log2_pages);
620  return 0;
621  doublebreak:;
622  }
623 
624  return new_values;
625 }
626 
627 static
630  (BVT (clib_bihash) * h,
631  BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
632  u32 new_log2_pages)
633 {
634  BVT (clib_bihash_value) * new_values;
635  int i, j, new_length, old_length;
636 
637  ASSERT (h->alloc_lock[0]);
638 
639  new_values = BV (value_alloc) (h, new_log2_pages);
640  new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
641  old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
642 
643  j = 0;
644  /* Across the old value array */
645  for (i = 0; i < old_length; i++)
646  {
647  /* Find a free slot in the new linear scan bucket */
648  for (; j < new_length; j++)
649  {
650  /* Old value not in use? Forget it. */
651  if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
652  goto doublebreak;
653 
654  /* New value should never be in use */
655  if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
656  {
657  /* Copy the old value and move along */
658  clib_memcpy_fast (&(new_values->kvp[j]), &(old_values->kvp[i]),
659  sizeof (new_values->kvp[j]));
660  j++;
661  goto doublebreak;
662  }
663  }
664  /* This should never happen... */
665  clib_warning ("BUG: linear rehash failed!");
666  BV (value_free) (h, new_values, new_log2_pages);
667  return 0;
668 
669  doublebreak:;
670  }
671  return new_values;
672 }
673 
674 static_always_inline int BV (clib_bihash_add_del_inline_with_hash)
675  (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, u64 hash, int is_add,
676  int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
677 {
678  BVT (clib_bihash_bucket) * b, tmp_b;
679  BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
680  int i, limit;
681  u64 new_hash;
682  u32 new_log2_pages, old_log2_pages;
684  int mark_bucket_linear;
685  int resplit_once;
686 
687  /* *INDENT-OFF* */
688  static const BVT (clib_bihash_bucket) mask = {
689  .linear_search = 1,
690  .log2_pages = -1
691  };
692  /* *INDENT-ON* */
693 
694 #if BIHASH_LAZY_INSTANTIATE
695  /*
696  * Create the table (is_add=1,2), or flunk the request now (is_add=0)
697  * Use the alloc_lock to protect the instantiate operation.
698  */
699  if (PREDICT_FALSE (h->instantiated == 0))
700  {
701  if (is_add == 0)
702  return (-1);
703 
704  BV (clib_bihash_alloc_lock) (h);
705  if (h->instantiated == 0)
706  BV (clib_bihash_instantiate) (h);
707  BV (clib_bihash_alloc_unlock) (h);
708  }
709 #else
710  /* Debug image: make sure the table has been instantiated */
711  ASSERT (h->instantiated != 0);
712 #endif
713 
714  b = BV (clib_bihash_get_bucket) (h, hash);
715 
716  BV (clib_bihash_lock_bucket) (b);
717 
718  /* First elt in the bucket? */
719  if (BIHASH_KVP_AT_BUCKET_LEVEL == 0 && BV (clib_bihash_bucket_is_empty) (b))
720  {
721  if (is_add == 0)
722  {
723  BV (clib_bihash_unlock_bucket) (b);
724  return (-1);
725  }
726 
727  BV (clib_bihash_alloc_lock) (h);
728  v = BV (value_alloc) (h, 0);
729  BV (clib_bihash_alloc_unlock) (h);
730 
731  *v->kvp = *add_v;
732  tmp_b.as_u64 = 0; /* clears bucket lock */
733  tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
734  tmp_b.refcnt = 1;
736 
737  b->as_u64 = tmp_b.as_u64; /* unlocks the bucket */
738  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_alloc_add, 1);
739 
740  return (0);
741  }
742 
743  /* WARNING: we're still looking at the live copy... */
744  limit = BIHASH_KVP_PER_PAGE;
745  v = BV (clib_bihash_get_value) (h, b->offset);
746 
747  if (PREDICT_FALSE (b->as_u64 & mask.as_u64))
748  {
749  if (PREDICT_FALSE (b->linear_search))
750  limit <<= b->log2_pages;
751  else
752  v += extract_bits (hash, h->log2_nbuckets, b->log2_pages);
753  }
754 
755  if (is_add)
756  {
757  /*
758  * Because reader threads are looking at live data,
759  * we have to be extra careful. Readers do NOT hold the
760  * bucket lock. We need to be SLOWER than a search, past the
761  * point where readers CHECK the bucket lock.
762  */
763 
764  /*
765  * For obvious (in hindsight) reasons, see if we're supposed to
766  * replace an existing key, then look for an empty slot.
767  */
768  for (i = 0; i < limit; i++)
769  {
770  if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
771  {
772  /* Add but do not overwrite? */
773  if (is_add == 2)
774  {
775  BV (clib_bihash_unlock_bucket) (b);
776  return (-2);
777  }
778 
779  clib_memcpy_fast (&(v->kvp[i].value),
780  &add_v->value, sizeof (add_v->value));
781  BV (clib_bihash_unlock_bucket) (b);
782  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
783  return (0);
784  }
785  }
786  /*
787  * Look for an empty slot. If found, use it
788  */
789  for (i = 0; i < limit; i++)
790  {
791  if (BV (clib_bihash_is_free) (&(v->kvp[i])))
792  {
793  /*
794  * Copy the value first, so that if a reader manages
795  * to match the new key, the value will be right...
796  */
797  clib_memcpy_fast (&(v->kvp[i].value),
798  &add_v->value, sizeof (add_v->value));
799  CLIB_MEMORY_STORE_BARRIER (); /* Make sure the value has settled */
800  clib_memcpy_fast (&(v->kvp[i]), &add_v->key,
801  sizeof (add_v->key));
802  b->refcnt++;
803  ASSERT (b->refcnt > 0);
804  BV (clib_bihash_unlock_bucket) (b);
805  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_add, 1);
806  return (0);
807  }
808  }
809  /* look for stale data to overwrite */
810  if (is_stale_cb)
811  {
812  for (i = 0; i < limit; i++)
813  {
814  if (is_stale_cb (&(v->kvp[i]), arg))
815  {
816  clib_memcpy_fast (&(v->kvp[i]), add_v, sizeof (*add_v));
818  BV (clib_bihash_unlock_bucket) (b);
819  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
820  return (0);
821  }
822  }
823  }
824  /* Out of space in this bucket, split the bucket... */
825  }
826  else /* delete case */
827  {
828  for (i = 0; i < limit; i++)
829  {
830  /* Found the key? Kill it... */
831  if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
832  {
833  clib_memset_u8 (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
834  /* Is the bucket empty? */
835  if (PREDICT_TRUE (b->refcnt > 1))
836  {
837  b->refcnt--;
838  /* Switch back to the bucket-level kvp array? */
839  if (BIHASH_KVP_AT_BUCKET_LEVEL && b->refcnt == 1
840  && b->log2_pages > 0)
841  {
842  tmp_b.as_u64 = b->as_u64;
843  b->offset = BV (clib_bihash_get_offset)
844  (h, (void *) (b + 1));
845  b->linear_search = 0;
846  b->log2_pages = 0;
847  /* Clean up the bucket-level kvp array */
848  clib_memset_u8 ((b + 1), 0xff, BIHASH_KVP_PER_PAGE *
849  sizeof (BVT (clib_bihash_kv)));
851  BV (clib_bihash_unlock_bucket) (b);
852  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
853  goto free_backing_store;
854  }
855 
857  BV (clib_bihash_unlock_bucket) (b);
858  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
859  return (0);
860  }
861  else /* yes, free it */
862  {
863  /* Save old bucket value, need log2_pages to free it */
864  tmp_b.as_u64 = b->as_u64;
865 
866  /* Kill and unlock the bucket */
867  b->as_u64 = 0;
868 
869  free_backing_store:
870  /* And free the backing storage */
871  BV (clib_bihash_alloc_lock) (h);
872  /* Note: v currently points into the middle of the bucket */
873  v = BV (clib_bihash_get_value) (h, tmp_b.offset);
874  BV (value_free) (h, v, tmp_b.log2_pages);
875  BV (clib_bihash_alloc_unlock) (h);
876  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del_free,
877  1);
878  return (0);
879  }
880  }
881  }
882  /* Not found... */
883  BV (clib_bihash_unlock_bucket) (b);
884  return (-3);
885  }
886 
887  /* Move readers to a (locked) temp copy of the bucket */
888  BV (clib_bihash_alloc_lock) (h);
889  BV (make_working_copy) (h, b);
890 
891  v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
892 
893  old_log2_pages = h->saved_bucket.log2_pages;
894  new_log2_pages = old_log2_pages + 1;
895  mark_bucket_linear = 0;
896  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_split_add, 1);
897  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, old_log2_pages);
898 
899  working_copy = h->working_copies[thread_index];
900  resplit_once = 0;
901  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, 1);
902 
903  new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
904  new_log2_pages);
905  if (new_v == 0)
906  {
907  try_resplit:
908  resplit_once = 1;
909  new_log2_pages++;
910  /* Try re-splitting. If that fails, fall back to linear search */
911  new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
912  new_log2_pages);
913  if (new_v == 0)
914  {
915  mark_linear:
916  new_log2_pages--;
917  /* pinned collisions, use linear search */
918  new_v =
919  BV (split_and_rehash_linear) (h, working_copy, old_log2_pages,
920  new_log2_pages);
921  mark_bucket_linear = 1;
922  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_linear, 1);
923  }
924  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_resplit, 1);
925  BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits,
926  old_log2_pages + 1);
927  }
928 
929  /* Try to add the new entry */
930  save_new_v = new_v;
931  new_hash = BV (clib_bihash_hash) (add_v);
932  limit = BIHASH_KVP_PER_PAGE;
933  if (mark_bucket_linear)
934  limit <<= new_log2_pages;
935  else
936  new_v += extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
937 
938  for (i = 0; i < limit; i++)
939  {
940  if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
941  {
942  clib_memcpy_fast (&(new_v->kvp[i]), add_v, sizeof (*add_v));
943  goto expand_ok;
944  }
945  }
946 
947  /* Crap. Try again */
948  BV (value_free) (h, save_new_v, new_log2_pages);
949  /*
950  * If we've already doubled the size of the bucket once,
951  * fall back to linear search now.
952  */
953  if (resplit_once)
954  goto mark_linear;
955  else
956  goto try_resplit;
957 
958 expand_ok:
959  tmp_b.log2_pages = new_log2_pages;
960  tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
961  tmp_b.linear_search = mark_bucket_linear;
962 #if BIHASH_KVP_AT_BUCKET_LEVEL
963  /* Compensate for permanent refcount bump at the bucket level */
964  if (new_log2_pages > 0)
965 #endif
966  tmp_b.refcnt = h->saved_bucket.refcnt + 1;
967  ASSERT (tmp_b.refcnt > 0);
968  tmp_b.lock = 0;
970  b->as_u64 = tmp_b.as_u64;
971 
972 #if BIHASH_KVP_AT_BUCKET_LEVEL
973  if (h->saved_bucket.log2_pages > 0)
974  {
975 #endif
976 
977  /* free the old bucket, except at the bucket level if so configured */
978  v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
979  BV (value_free) (h, v, h->saved_bucket.log2_pages);
980 
981 #if BIHASH_KVP_AT_BUCKET_LEVEL
982  }
983 #endif
984 
985 
986  BV (clib_bihash_alloc_unlock) (h);
987  return (0);
988 }
989 
990 static_always_inline int BV (clib_bihash_add_del_inline)
991  (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add,
992  int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
993 {
994  u64 hash = BV (clib_bihash_hash) (add_v);
995  return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, is_add,
996  is_stale_cb, arg);
997 }
998 
999 int BV (clib_bihash_add_del)
1000  (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
1001 {
1002  return BV (clib_bihash_add_del_inline) (h, add_v, is_add, 0, 0);
1003 }
1004 
1005 int BV (clib_bihash_add_or_overwrite_stale)
1006  (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v,
1007  int (*stale_callback) (BVT (clib_bihash_kv) *, void *), void *arg)
1008 {
1009  return BV (clib_bihash_add_del_inline) (h, add_v, 1, stale_callback, arg);
1010 }
1011 
1012 int BV (clib_bihash_search)
1013  (BVT (clib_bihash) * h,
1014  BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
1015 {
1016  return BV (clib_bihash_search_inline_2) (h, search_key, valuep);
1017 }
1018 
1019 u8 *BV (format_bihash) (u8 * s, va_list * args)
1020 {
1021  BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
1022  int verbose = va_arg (*args, int);
1023  BVT (clib_bihash_bucket) * b;
1024  BVT (clib_bihash_value) * v;
1025  int i, j, k;
1026  u64 active_elements = 0;
1027  u64 active_buckets = 0;
1028  u64 linear_buckets = 0;
1029 
1030  s = format (s, "Hash table '%s'\n", h->name ? h->name : (u8 *) "(unnamed)");
1031 
1032 #if BIHASH_LAZY_INSTANTIATE
1033  if (PREDICT_FALSE (h->instantiated == 0))
1034  return format (s, " empty, uninitialized");
1035 #endif
1036 
1037  for (i = 0; i < h->nbuckets; i++)
1038  {
1039  b = BV (clib_bihash_get_bucket) (h, i);
1040  if (BV (clib_bihash_bucket_is_empty) (b))
1041  {
1042  if (verbose > 1)
1043  s = format (s, "[%d]: empty\n", i);
1044  continue;
1045  }
1046 
1047  active_buckets++;
1048 
1049  if (b->linear_search)
1050  linear_buckets++;
1051 
1052  if (verbose)
1053  {
1054  s = format
1055  (s, "[%d]: heap offset %lld, len %d, refcnt %d, linear %d\n", i,
1056  b->offset, (1 << b->log2_pages), b->refcnt, b->linear_search);
1057  }
1058 
1059  v = BV (clib_bihash_get_value) (h, b->offset);
1060  for (j = 0; j < (1 << b->log2_pages); j++)
1061  {
1062  for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1063  {
1064  if (BV (clib_bihash_is_free) (&v->kvp[k]))
1065  {
1066  if (verbose > 1)
1067  s = format (s, " %d: empty\n",
1068  j * BIHASH_KVP_PER_PAGE + k);
1069  continue;
1070  }
1071  if (verbose)
1072  {
1073  if (h->kvp_fmt_fn)
1074  {
1075  s = format (s, " %d: %U\n",
1076  j * BIHASH_KVP_PER_PAGE + k,
1077  h->kvp_fmt_fn, &(v->kvp[k]), verbose);
1078  }
1079  else
1080  {
1081  s = format (s, " %d: %U\n",
1082  j * BIHASH_KVP_PER_PAGE + k,
1083  BV (format_bihash_kvp), &(v->kvp[k]));
1084  }
1085  }
1086  active_elements++;
1087  }
1088  v++;
1089  }
1090  }
1091 
1092  s = format (s, " %lld active elements %lld active buckets\n",
1093  active_elements, active_buckets);
1094  s = format (s, " %d free lists\n", vec_len (h->freelists));
1095 
1096  for (i = 0; i < vec_len (h->freelists); i++)
1097  {
1098  u32 nfree = 0;
1099  BVT (clib_bihash_value) * free_elt;
1100  u64 free_elt_as_u64 = h->freelists[i];
1101 
1102  while (free_elt_as_u64)
1103  {
1104  free_elt = BV (clib_bihash_get_value) (h, free_elt_as_u64);
1105  nfree++;
1106  free_elt_as_u64 = free_elt->next_free_as_u64;
1107  }
1108 
1109  if (nfree || verbose)
1110  s = format (s, " [len %d] %u free elts\n", 1 << i, nfree);
1111  }
1112 
1113  s = format (s, " %lld linear search buckets\n", linear_buckets);
1114  if (BIHASH_USE_HEAP)
1115  {
1116  BVT (clib_bihash_alloc_chunk) * c = h->chunks;
1117  uword bytes_left = 0, total_size = 0, n_chunks = 0;
1118 
1119  while (c)
1120  {
1121  bytes_left += c->bytes_left;
1122  total_size += c->size;
1123  n_chunks += 1;
1124  c = c->next;
1125  }
1126  s = format (s,
1127  " heap: %u chunk(s) allocated\n"
1128  " bytes: used %U, scrap %U\n", n_chunks,
1129  format_memory_size, total_size,
1130  format_memory_size, bytes_left);
1131  }
1132  else
1133  {
1134  u64 used_bytes = alloc_arena_next (h);
1135  s = format (s,
1136  " arena: base %llx, next %llx\n"
1137  " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
1138  alloc_arena (h), alloc_arena_next (h),
1139  used_bytes, used_bytes >> 20,
1140  alloc_arena_size (h), alloc_arena_size (h) >> 20);
1141  }
1142  return s;
1143 }
1144 
1146  (BVT (clib_bihash) * h,
1147  BV (clib_bihash_foreach_key_value_pair_cb) cb, void *arg)
1148 {
1149  int i, j, k;
1150  BVT (clib_bihash_bucket) * b;
1151  BVT (clib_bihash_value) * v;
1152 
1153 
1154 #if BIHASH_LAZY_INSTANTIATE
1155  if (PREDICT_FALSE (h->instantiated == 0))
1156  return;
1157 #endif
1158 
1159  for (i = 0; i < h->nbuckets; i++)
1160  {
1161  b = BV (clib_bihash_get_bucket) (h, i);
1162  if (BV (clib_bihash_bucket_is_empty) (b))
1163  continue;
1164 
1165  v = BV (clib_bihash_get_value) (h, b->offset);
1166  for (j = 0; j < (1 << b->log2_pages); j++)
1167  {
1168  for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1169  {
1170  if (BV (clib_bihash_is_free) (&v->kvp[k]))
1171  continue;
1172 
1173  if (BIHASH_WALK_STOP == cb (&v->kvp[k], arg))
1174  return;
1175  /*
1176  * In case the callback deletes the last entry in the bucket...
1177  */
1178  if (BV (clib_bihash_bucket_is_empty) (b))
1179  goto doublebreak;
1180  }
1181  v++;
1182  }
1183  doublebreak:
1184  ;
1185  }
1186 }
1187 
1188 /** @endcond */
1189 
1190 /*
1191  * fd.io coding-style-patch-verification: ON
1192  *
1193  * Local Variables:
1194  * eval: (c-set-style "gnu")
1195  * End:
1196  */
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:495
clib_mem_get_heap
static clib_mem_heap_t * clib_mem_get_heap(void)
Definition: mem.h:362
clib_mem_get_heap_base
void * clib_mem_get_heap_base(clib_mem_heap_t *heap)
Definition: mem_dlmalloc.c:586
F_SEAL_SHRINK
#define F_SEAL_SHRINK
Definition: mem.c:44
BIHASH_KVP_PER_PAGE
#define BIHASH_KVP_PER_PAGE
Definition: bihash_16_8.h:25
name
string name[64]
Definition: fib.api:25
next
u16 * next
Definition: nat44_ei_out2in.c:718
clib_mem_free
static void clib_mem_free(void *p)
Definition: mem.h:314
os_out_of_memory
void os_out_of_memory(void)
Definition: unix-misc.c:219
vec_delete
#define vec_delete(V, N, M)
Delete N elements starting at element M.
Definition: vec.h:875
clib_bihash_get_value
static void * clib_bihash_get_value(clib_bihash *h, uword offset)
Get pointer to value page given its clib mheap offset.
clib_memcpy_fast
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
memory_size
u64 memory_size
Definition: vhost_user.h:124
h
h
Definition: flowhash_template.h:372
clib_unix_warning
#define clib_unix_warning(format, args...)
Definition: error.h:68
clib_all_bihash_set_heap
__clib_export clib_mem_heap_t * clib_all_bihash_set_heap(void)
Definition: bihash_all_vector.c:23
clib_bihash_search_inline_2
int clib_bihash_search_inline_2(clib_bihash *h, clib_bihash_kv *search_key, clib_bihash_kv *valuep)
Search a bi-hash table.
clib_mem_vm_create_fd
__clib_export int clib_mem_vm_create_fd(clib_mem_page_sz_t log2_page_size, char *fmt,...)
Definition: mem.c:231
clib_bihash_get_offset
static uword clib_bihash_get_offset(clib_bihash *h, void *v)
Get clib mheap offset given a pointer.
max_log2
static uword max_log2(uword x)
Definition: clib.h:223
split_and_rehash_linear
static vnet_classify_entry_t * split_and_rehash_linear(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
Definition: vnet_classify.c:334
round_pow2
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:279
alloc_aligned
static void * alloc_aligned(uword size, uword log2_align, void **ptr_to_free)
Definition: test_vec.h:191
vec_len
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
Definition: vec_bootstrap.h:142
CLIB_MEMORY_STORE_BARRIER
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:140
clib_bihash_init
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
vec_add1
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:606
clib_all_bihashes
__clib_export void ** clib_all_bihashes
Definition: bihash_all_vector.c:19
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
c
svmdb_client_t * c
Definition: vpp_get_metrics.c:48
static_always_inline
#define static_always_inline
Definition: clib.h:112
uword
u64 uword
Definition: types.h:112
clib_bihash_free
void clib_bihash_free(clib_bihash *h)
Destroy a bounded index extensible hash table.
F_ADD_SEALS
#define F_ADD_SEALS
Definition: mem.c:40
vec_header_t::vector_data
u8 vector_data[0]
Vector data .
Definition: vec_bootstrap.h:60
BVT
BVT(clib_bihash)
The table of adjacencies indexed by the rewrite string.
Definition: l2_fib.c:1065
mask
vl_api_pnat_mask_t mask
Definition: pnat.api:45
vec_validate
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment)
Definition: vec.h:523
clib_mem_vm_free
static void clib_mem_vm_free(void *addr, uword size)
Definition: mem.h:446
log2_pages
u8 log2_pages
Definition: bihash_doc.h:62
clib_mem_vm_reserve
uword clib_mem_vm_reserve(uword start, uword size, clib_mem_page_sz_t log2_page_sz)
Definition: mem.c:299
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:58
format_memory_size
u8 * format_memory_size(u8 *s, va_list *va)
Definition: std-formats.c:209
clib_memset_u8
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
Definition: string.h:441
os_get_thread_index
static_always_inline uword os_get_thread_index(void)
Definition: os.h:63
BIHASH_KVP_AT_BUCKET_LEVEL
#define BIHASH_KVP_AT_BUCKET_LEVEL
Definition: bihash_16_8.h:26
format_function_t
u8 *() format_function_t(u8 *s, va_list *args)
Definition: format.h:48
vec_free
#define vec_free(V)
Free vector's memory (no header).
Definition: vec.h:395
clib_bihash_value
template key/value backing page structure
Definition: bihash_doc.h:44
u64
unsigned long u64
Definition: types.h:89
split_and_rehash
static vnet_classify_entry_t * split_and_rehash(vnet_classify_table_t *t, vnet_classify_entry_t *old_values, u32 old_log2_pages, u32 new_log2_pages)
Definition: vnet_classify.c:283
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
vec_validate_init_empty
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header,...
Definition: vec.h:570
u32
unsigned int u32
Definition: types.h:88
extract_bits
static uword extract_bits(uword x, int start, int count)
Definition: clib.h:327
CLIB_MEM_PAGE_SZ_DEFAULT
@ CLIB_MEM_PAGE_SZ_DEFAULT
Definition: mem.h:60
make_working_copy
static void make_working_copy(vnet_classify_table_t *t, vnet_classify_bucket_t *b)
Definition: vnet_classify.c:228
BIHASH_USE_HEAP
#define BIHASH_USE_HEAP
Definition: bihash_16_8.h:29
clib_bihash_add_del
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
vec_header_t
vector header structure
Definition: vec_bootstrap.h:55
clib_bihash_foreach_key_value_pair
void clib_bihash_foreach_key_value_pair(clib_bihash *h, clib_bihash_foreach_key_value_pair_cb *callback, void *arg)
Visit active (key,value) pairs in a bi-hash table.
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
u8
unsigned char u8
Definition: types.h:56
a
a
Definition: bitmap.h:525
i
int i
Definition: flowhash_template.h:376
clib_warning
#define clib_warning(format, args...)
Definition: error.h:59
clib_mem_alloc_aligned
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:264
clib_bihash_foreach_key_value_pair_cb
int(* clib_bihash_foreach_key_value_pair_cb)(clib_bihash_kv *kv, void *ctx)
Definition: bihash_doc.h:175
rv
int __clib_unused rv
Definition: application.c:491
MAP_HUGE_SHIFT
#define MAP_HUGE_SHIFT
Definition: ip4_mtrie.c:888
PREDICT_TRUE
#define PREDICT_TRUE(x)
Definition: clib.h:125
vec_header_t::len
u32 len
Number of elements in vector (NOT its allocated length).
Definition: vec_bootstrap.h:57
clib_mem_set_heap
static clib_mem_heap_t * clib_mem_set_heap(clib_mem_heap_t *heap)
Definition: mem.h:368