29 #ifndef KP_PLUGIN_KP_KPHASH_H_ 30 #define KP_PLUGIN_KP_KPHASH_H_ 39 #define KPHASH_ENTRY_PER_BUCKET 4 41 #define KP_HASH_DO_NOT_USE_SSE_BUCKETS 0 47 #define clib_u32_loop_gt(a, b) (((u32)(a)) - ((u32)(b)) < 0x7fffffff) 67 #define kp_hash_nbuckets(h) (((h)->buckets_mask) + 1) 68 #define kp_hash_size(h) ((h)->buckets_mask + KPHASH_ENTRY_PER_BUCKET) 70 #define kp_hash_foreach_bucket(h, bucket) \ 71 for (bucket = (h)->buckets; \ 72 bucket < (h)->buckets + kp_hash_nbuckets(h); \ 75 #define kp_hash_foreach_entry(h, bucket, i) \ 76 kp_hash_foreach_bucket(h, bucket) \ 77 for (i = 0; i < KPHASH_ENTRY_PER_BUCKET; i++) 79 #define kp_hash_foreach_valid_entry(h, bucket, i, now) \ 80 kp_hash_foreach_entry(h, bucket, i) \ 81 if (!clib_u32_loop_gt((now), bucket->timeout[i])) 111 #ifdef clib_crc32c_uses_intrinsics 118 return clib_crc32c ((
u8 *) key, 40);
120 u64 tmp = k0 ^ k1 ^ k2 ^ k3 ^ k4;
134 u32 *available_index,
u32 *found_value)
138 *available_index = ~0;
139 #if __SSE4_2__ && KP_HASH_DO_NOT_USE_SSE_BUCKETS == 0 140 u32 bitmask, found_index;
144 mask = _mm_cmpgt_epi32(_mm_loadu_si128 ((__m128i *) bucket->
timeout),
145 _mm_set1_epi32 (time_now));
147 bitmask = (~_mm_movemask_epi8(mask)) & 0xffff;
149 *available_index = (bitmask)?__builtin_ctz(bitmask)/4:*available_index;
152 mask = _mm_and_si128(mask,
154 _mm_loadu_si128 ((__m128i *) bucket->
hash),
155 _mm_set1_epi32 (hash)));
159 mask = _mm_and_si128(mask,
161 _mm_loadu_si128 ((__m128i *) bucket->
vip),
162 _mm_set1_epi32 (vip)));
165 bitmask = _mm_movemask_epi8(mask);
167 found_index = (bitmask)?__builtin_ctzll(bitmask)/4:0;
169 *found_value = (bitmask)?bucket->
value[found_index]:*found_value;
175 u8 cmp = (bucket->
hash[
i] == hash && bucket->
vip[
i] == vip);
177 *found_value = (cmp || timeouted)?*found_value:bucket->
value[i];
179 *available_index = (timeouted && (*available_index == ~0))?i:*available_index;
195 u32 available_index,
u32 time_now)
198 bucket->
hash[available_index] = hash;
199 bucket->
value[available_index] = value;
201 bucket->
vip[available_index] = vip;
#define KPHASH_ENTRY_PER_BUCKET
vppinfra already includes tons of different hash tables.
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
sll srl srl sll sra u16x4 i
static u64 clib_xxhash(u64 key)
u32 timeout[KPHASH_ENTRY_PER_BUCKET]
#define static_always_inline
static_always_inline void kp_hash_free(kp_hash_t *h)
static_always_inline kp_hash_t * kp_hash_alloc(u32 buckets, u32 timeout)
#define kp_hash_foreach_valid_entry(h, bucket, i, now)
#define vec_alloc_aligned(V, N, A)
Allocate space for N more elements (no header, given alignment)
static_always_inline void kp_hash_prefetch_bucket(kp_hash_t *ht, u32 hash)
#define clib_u32_loop_gt(a, b)
32 bits integer comparison for running values.
static_always_inline void kp_hash_put(kp_hash_t *h, u32 hash, u32 value, u32 vip, u32 available_index, u32 time_now)
#define CLIB_PREFETCH(addr, size, type)
#define vec_free(V)
Free vector's memory (no header).
static_always_inline u32 kp_hash_elts(kp_hash_t *h, u32 time_now)
u32 vip[KPHASH_ENTRY_PER_BUCKET]
static_always_inline u32 kp_hash_available_value(kp_hash_t *h, u32 hash, u32 available_index)
static uword is_pow2(uword x)
u32 value[KPHASH_ENTRY_PER_BUCKET]
static_always_inline void kp_hash_get(kp_hash_t *ht, u32 hash, u32 vip, u32 time_now, u32 *available_index, u32 *found_value)
static_always_inline u32 kp_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4)
kp_hash_bucket_t buckets[]
#define CLIB_CACHE_LINE_BYTES
u32 hash[KPHASH_ENTRY_PER_BUCKET]