|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
29 #ifndef LB_PLUGIN_LB_LBHASH_H_
30 #define LB_PLUGIN_LB_LBHASH_H_
35 #if defined (__SSE4_2__)
36 #include <immintrin.h>
42 #define LBHASH_ENTRY_PER_BUCKET 4
44 #define LB_HASH_DO_NOT_USE_SSE_BUCKETS 0
64 #define lb_hash_nbuckets(h) (((h)->buckets_mask) + 1)
65 #define lb_hash_size(h) ((h)->buckets_mask + LBHASH_ENTRY_PER_BUCKET)
67 #define lb_hash_foreach_bucket(h, bucket) \
68 for (bucket = (h)->buckets; \
69 bucket < (h)->buckets + lb_hash_nbuckets(h); \
72 #define lb_hash_foreach_entry(h, bucket, i) \
73 lb_hash_foreach_bucket(h, bucket) \
74 for (i = 0; i < LBHASH_ENTRY_PER_BUCKET; i++)
76 #define lb_hash_foreach_valid_entry(h, bucket, i, now) \
77 lb_hash_foreach_entry(h, bucket, i) \
78 if (!clib_u32_loop_gt((now), bucket->timeout[i]))
94 h->buckets_mask = (buckets - 1);
115 u32 *available_index,
u32 *found_value)
119 *available_index = ~0;
120 #if __SSE4_2__ && LB_HASH_DO_NOT_USE_SSE_BUCKETS == 0
121 u32 bitmask, found_index;
125 mask = _mm_cmpgt_epi32(_mm_loadu_si128 ((__m128i *) bucket->
timeout),
126 _mm_set1_epi32 (time_now));
128 bitmask = (~_mm_movemask_epi8(
mask)) & 0xffff;
130 *available_index = (bitmask)?__builtin_ctz(bitmask)/4:*available_index;
135 _mm_loadu_si128 ((__m128i *) bucket->
hash),
136 _mm_set1_epi32 (hash)));
142 _mm_loadu_si128 ((__m128i *) bucket->
vip),
143 _mm_set1_epi32 (vip)));
146 bitmask = _mm_movemask_epi8(
mask);
148 found_index = (bitmask)?__builtin_ctzll(bitmask)/4:0;
150 *found_value = (bitmask)?bucket->
value[found_index]:*found_value;
156 u8 cmp = (bucket->
hash[
i] == hash && bucket->
vip[
i] == vip);
158 *found_value = (cmp || timeouted)?*found_value:bucket->
value[
i];
160 *available_index = (timeouted && (*available_index == ~0))?
i:*available_index;
171 return h->buckets[hash &
h->buckets_mask].value[available_index];
176 u32 available_index,
u32 time_now)
179 bucket->
hash[available_index] = hash;
181 bucket->
timeout[available_index] = time_now +
h->timeout;
182 bucket->
vip[available_index] = vip;
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
static_always_inline u32 lb_hash_elts(lb_hash_t *h, u32 time_now)
static_always_inline void lb_hash_get(lb_hash_t *ht, u32 hash, u32 vip, u32 time_now, u32 *available_index, u32 *found_value)
static_always_inline void lb_hash_prefetch_bucket(lb_hash_t *ht, u32 hash)
#define CLIB_PREFETCH(addr, size, type)
#define vec_alloc_aligned(V, N, A)
Allocate space for N more elements (no header, given alignment)
#define LBHASH_ENTRY_PER_BUCKET
vppinfra already includes tons of different hash tables.
u32 hash[LBHASH_ENTRY_PER_BUCKET]
u32 timeout[LBHASH_ENTRY_PER_BUCKET]
#define static_always_inline
u32 value[LBHASH_ENTRY_PER_BUCKET]
static_always_inline void lb_hash_free(lb_hash_t *h)
sll srl srl sll sra u16x4 i
lb_hash_bucket_t buckets[]
#define CLIB_CACHE_LINE_BYTES
#define lb_hash_foreach_valid_entry(h, bucket, i, now)
#define vec_free(V)
Free vector's memory (no header).
static_always_inline u32 lb_hash_available_value(lb_hash_t *h, u32 hash, u32 available_index)
static_always_inline lb_hash_t * lb_hash_alloc(u32 buckets, u32 timeout)
u32 vip[LBHASH_ENTRY_PER_BUCKET]
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static_always_inline void lb_hash_put(lb_hash_t *h, u32 hash, u32 value, u32 vip, u32 available_index, u32 time_now)
#define clib_u32_loop_gt(a, b)
32 bits integer comparison for running values.
static uword is_pow2(uword x)