40 #ifndef __included_ip4_forward_h__ 41 #define __included_ip4_forward_h__ 58 int lookup_for_responses_to_locally_received_packets)
62 u32 n_left_from, n_left_to_next, *from, *to_next;
70 while (n_left_from > 0)
74 #if (CLIB_N_PREFETCHES >= 8) 75 while (n_left_from >= 8 && n_left_to_next >= 4)
84 u32 pi0, pi1, pi2, pi3, lb_index0, lb_index1, lb_index2, lb_index3;
87 u32 hash_c0, hash_c1, hash_c2, hash_c3;
88 const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
110 pi0 = to_next[0] = from[0];
111 pi1 = to_next[1] = from[1];
112 pi2 = to_next[2] = from[2];
113 pi3 = to_next[3] = from[3];
140 if (!lookup_for_responses_to_locally_received_packets)
153 if (!lookup_for_responses_to_locally_received_packets)
161 if (!lookup_for_responses_to_locally_received_packets)
169 if (lookup_for_responses_to_locally_received_packets)
184 ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
271 (cm, thread_index, lb_index0, 1,
274 (cm, thread_index, lb_index1, 1,
277 (cm, thread_index, lb_index2, 1,
280 (cm, thread_index, lb_index3, 1,
284 to_next, n_left_to_next,
286 next0, next1, next2, next3);
288 #elif (CLIB_N_PREFETCHES >= 4) 289 while (n_left_from >= 4 && n_left_to_next >= 2)
298 u32 pi0, pi1, lb_index0, lb_index1;
300 u32 hash_c0, hash_c1;
317 pi0 = to_next[0] = from[0];
318 pi1 = to_next[1] = from[1];
337 if (!lookup_for_responses_to_locally_received_packets)
346 if (!lookup_for_responses_to_locally_received_packets)
352 if (!lookup_for_responses_to_locally_received_packets)
358 if (lookup_for_responses_to_locally_received_packets)
369 ASSERT (lb_index0 && lb_index1);
416 (cm, thread_index, lb_index0, 1,
419 (cm, thread_index, lb_index1, 1,
423 to_next, n_left_to_next,
424 pi0, pi1, next0, next1);
427 while (n_left_from > 0 && n_left_to_next > 0)
449 if (!lookup_for_responses_to_locally_received_packets)
455 if (!lookup_for_responses_to_locally_received_packets)
458 if (!lookup_for_responses_to_locally_received_packets)
461 if (lookup_for_responses_to_locally_received_packets)
u16 lb_n_buckets
number of buckets in the load-balance.
vlib_combined_counter_main_t lbm_to_counters
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step(const ip4_fib_mtrie_t *m, ip4_fib_mtrie_leaf_t current_leaf, const ip4_address_t *dst_address, u32 dst_address_byte_index)
Lookup step.
static u32 ip4_compute_flow_hash(const ip4_header_t *ip, flow_hash_config_t flow_hash_config)
flow_hash_config_t lb_hash_config
the hash config to use when selecting a bucket.
static const dpo_id_t * load_balance_get_fwd_bucket(const load_balance_t *lb, u16 bucket)
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
static uword ip4_lookup_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int lookup_for_responses_to_locally_received_packets)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
ip_lookup_next_t
An adjacency is a representation of an attached L3 peer.
u16 lb_n_buckets_minus_1
number of buckets in the load-balance - 1.
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
static u32 ip4_fib_mtrie_leaf_get_adj_index(ip4_fib_mtrie_leaf_t n)
From the stored slot value extract the LB index value.
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
static void ip_lookup_set_buffer_fib_index(u32 *fib_index_by_sw_if_index, vlib_buffer_t *b)
static const dpo_id_t * load_balance_get_bucket_i(const load_balance_t *lb, u32 bucket)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
load_balance_main_t load_balance_main
The one instance of load-balance main.
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
ip4_fib_mtrie_t mtrie
Mtrie for fast lookups.
#define CLIB_PREFETCH(addr, size, type)
static ip4_fib_t * ip4_fib_get(u32 index)
Get the FIB at the given index.
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step_one(const ip4_fib_mtrie_t *m, const ip4_address_t *dst_address)
Lookup step number 1.
static load_balance_t * load_balance_get(index_t lbi)
u32 flow_hash_config_t
A flow hash configuration is a mask of the flow hash options.
static uword is_pow2(uword x)
index_t dpoi_index
the index of objects of that type
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
A collection of combined counters.
ip4_main_t ip4_main
Global ip4 main structure.
u16 flags
Copy of main node flags.
u16 dpoi_next_node
The next VLIB node to follow.
#define VLIB_NODE_FLAG_TRACE
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.