29 #ifndef LB_PLUGIN_LB_LBHASH_H_ 30 #define LB_PLUGIN_LB_LBHASH_H_ 34 #define LBHASH_ENTRY_PER_BUCKET_LOG2 2 35 #define LBHASH_ENTRY_PER_BUCKET (1 << LBHASH_ENTRY_PER_BUCKET_LOG2) 36 #define LBHASH_ENTRY_PER_BUCKET_MASK (LBHASH_ENTRY_PER_BUCKET - 1) 50 #define lb_hash_nbuckets(h) (((h)->buckets_mask >> LBHASH_ENTRY_PER_BUCKET_LOG2) + 1) 51 #define lb_hash_size(h) ((h)->buckets_mask + LBHASH_ENTRY_PER_BUCKET) 53 #define lb_hash_foreach_entry(h, e) \ 54 for (e = (h)->entries; e < h->entries + lb_hash_size(h); e++) 56 #define lb_hash_foreach_valid_entry(h, e, now) \ 57 lb_hash_foreach_entry(h, e) \ 58 if (!clib_u32_loop_gt((now), (e)->last_seen + (h)->timeout)) 88 __asm__
volatile(
"crc32l %[data], %[value];" 89 : [value]
"+r" (value)
90 : [data]
"rm" (data));
100 value = lb_hash_crc_u32 (dp[0], value);
101 value = lb_hash_crc_u32 (dp[1], value);
102 value = lb_hash_crc_u32 (dp[2], value);
103 value = lb_hash_crc_u32 (dp[3], value);
104 value = lb_hash_crc_u32 (dp[4], value);
105 value = lb_hash_crc_u32 (dp[5], value);
106 value = lb_hash_crc_u32 (dp[6], value);
107 value = lb_hash_crc_u32 (dp[7], value);
108 value = lb_hash_crc_u32 (dp[8], value);
109 value = lb_hash_crc_u32 (dp[9], value);
116 u64 tmp = k[0] ^ k[1] ^ k[2] ^ k[3] ^ k[4];
129 *available_index = ~0;
134 (e[
i].
key[0] ^ k[0]) |
135 (e[i].key[1] ^ k[1]) |
136 (e[
i].
key[2] ^ k[2]) |
137 (e[i].key[3] ^ k[3]) |
138 (e[
i].
key[4] ^ k[4]);
142 *value = (cmp || timeouted)?*value:e[i].value;
143 e[
i].
last_seen = (cmp || timeouted)?e[i].last_seen:time_now;
144 *available_index = (timeouted && (*available_index == ~0))?(&e[
i] - h->
entries):*available_index;
sll srl srl sll sra u16x4 i
#define LBHASH_ENTRY_PER_BUCKET
static_always_inline lb_hash_t * lb_hash_alloc(u32 buckets, u32 timeout)
#define clib_u32_loop_gt(a, b)
32 bits integer comparison for running values.
static u64 clib_xxhash(u64 key)
static_always_inline u32 lb_hash_put(lb_hash_t *h, u64 k[5], u32 value, u32 available_index, u32 time_now)
#define static_always_inline
lb_hash_entry_t entries[]
#define lb_hash_foreach_valid_entry(h, e, now)
static_always_inline void lb_hash_get(lb_hash_t *h, u64 k[5], u32 hash, u32 time_now, u32 *available_index, u32 *value)
#define vec_alloc_aligned(V, N, A)
Allocate space for N more elements (no header, given alignment)
#define CLIB_PREFETCH(addr, size, type)
#define vec_free(V)
Free vector's memory (no header).
static_always_inline u32 lb_hash_available_value(lb_hash_t *h, u32 available_index)
static_always_inline void lb_hash_free(lb_hash_t *h)
static uword is_pow2(uword x)
static_always_inline u32 lb_hash_hash(u64 k[5])
#define LBHASH_ENTRY_PER_BUCKET_LOG2
vppinfra already includes tons of different hash tables.
#define CLIB_CACHE_LINE_BYTES
static_always_inline u32 lb_hash_elts(lb_hash_t *h, u32 time_now)