FD.io VPP  v18.04-17-g3a0d853
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <lb/lb.h>
17 
18 #include <vnet/gre/packet.h>
19 #include <lb/lbhash.h>
20 
21 #define foreach_lb_error \
22  _(NONE, "no error") \
23  _(PROTO_NOT_SUPPORTED, "protocol not supported")
24 
25 typedef enum {
26 #define _(sym,str) LB_ERROR_##sym,
28 #undef _
30 } lb_error_t;
31 
32 static char *lb_error_strings[] = {
33 #define _(sym,string) string,
35 #undef _
36 };
37 
38 typedef struct {
41 } lb_trace_t;
42 
43 u8 *
44 format_lb_trace (u8 * s, va_list * args)
45 {
46  lb_main_t *lbm = &lb_main;
47  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
48  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
49  lb_trace_t *t = va_arg (*args, lb_trace_t *);
50  if (pool_is_free_index(lbm->vips, t->vip_index)) {
51  s = format(s, "lb vip[%d]: This VIP was freed since capture\n");
52  } else {
53  s = format(s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip, &lbm->vips[t->vip_index]);
54  }
55  if (pool_is_free_index(lbm->ass, t->as_index)) {
56  s = format(s, "lb as[%d]: This AS was freed since capture\n");
57  } else {
58  s = format(s, "lb as[%d]: %U\n", t->as_index, format_lb_as, &lbm->ass[t->as_index]);
59  }
60  return s;
61 }
62 
64 {
65  lb_main_t *lbm = &lb_main;
66  lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
67  //Check if size changed
68  if (PREDICT_FALSE(sticky_ht && (lbm->per_cpu_sticky_buckets != lb_hash_nbuckets(sticky_ht))))
69  {
70  //Dereference everything in there
72  u32 i;
73  lb_hash_foreach_entry(sticky_ht, b, i) {
74  vlib_refcount_add(&lbm->as_refcount, thread_index, b->value[i], -1);
75  vlib_refcount_add(&lbm->as_refcount, thread_index, 0, 1);
76  }
77 
78  lb_hash_free(sticky_ht);
79  sticky_ht = NULL;
80  }
81 
82  //Create if necessary
83  if (PREDICT_FALSE(sticky_ht == NULL)) {
84  lbm->per_cpu[thread_index].sticky_ht = lb_hash_alloc(lbm->per_cpu_sticky_buckets, lbm->flow_timeout);
85  sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
86  clib_warning("Regenerated sticky table %p", sticky_ht);
87  }
88 
89  ASSERT(sticky_ht);
90 
91  //Update timeout
92  sticky_ht->timeout = lbm->flow_timeout;
93  return sticky_ht;
94 }
95 
96 u64
98 {
99  return 0;
100 }
101 
102 u64
104 {
105  return 0;
106 }
107 
110 {
111  u32 hash;
112  if (is_input_v4)
113  {
114  ip4_header_t *ip40;
115  u64 ports;
116  ip40 = vlib_buffer_get_current (p);
117  if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP ||
118  ip40->protocol == IP_PROTOCOL_UDP))
119  ports = ((u64)((udp_header_t *)(ip40 + 1))->src_port << 16) |
120  ((u64)((udp_header_t *)(ip40 + 1))->dst_port);
121  else
122  ports = lb_node_get_other_ports4(ip40);
123 
124  hash = lb_hash_hash(*((u64 *)&ip40->address_pair), ports,
125  0, 0, 0);
126  }
127  else
128  {
129  ip6_header_t *ip60;
130  ip60 = vlib_buffer_get_current (p);
131  u64 ports;
132  if (PREDICT_TRUE (ip60->protocol == IP_PROTOCOL_TCP ||
133  ip60->protocol == IP_PROTOCOL_UDP))
134  ports = ((u64)((udp_header_t *)(ip60 + 1))->src_port << 16) |
135  ((u64)((udp_header_t *)(ip60 + 1))->dst_port);
136  else
137  ports = lb_node_get_other_ports6(ip60);
138 
139  hash = lb_hash_hash(ip60->src_address.as_u64[0],
140  ip60->src_address.as_u64[1],
141  ip60->dst_address.as_u64[0],
142  ip60->dst_address.as_u64[1],
143  ports);
144  }
145  return hash;
146 }
147 
150  vlib_node_runtime_t * node, vlib_frame_t * frame,
151  u8 is_input_v4, //Compile-time parameter stating that is input is v4 (or v6)
152  lb_encap_type_t encap_type) //Compile-time parameter stating that is GRE4 or GRE6 or L3DSR
153 {
154  lb_main_t *lbm = &lb_main;
155  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
156  u32 thread_index = vlib_get_thread_index();
157  u32 lb_time = lb_hash_time_now(vm);
158 
159  lb_hash_t *sticky_ht = lb_get_sticky_table(thread_index);
160  from = vlib_frame_vector_args (frame);
161  n_left_from = frame->n_vectors;
162  next_index = node->cached_next_index;
163 
164  u32 nexthash0 = 0;
165  if (PREDICT_TRUE(n_left_from > 0))
166  nexthash0 = lb_node_get_hash(vlib_get_buffer (vm, from[0]), is_input_v4);
167 
168  while (n_left_from > 0)
169  {
170  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
171  while (n_left_from > 0 && n_left_to_next > 0)
172  {
173  u32 pi0;
174  vlib_buffer_t *p0;
175  lb_vip_t *vip0;
176  u32 asindex0;
177  u16 len0;
178  u32 available_index0;
179  u8 counter = 0;
180  u32 hash0 = nexthash0;
181 
182  if (PREDICT_TRUE(n_left_from > 1))
183  {
184  vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
185  //Compute next hash and prefetch bucket
186  nexthash0 = lb_node_get_hash(p1, is_input_v4);
187  lb_hash_prefetch_bucket(sticky_ht, nexthash0);
188  //Prefetch for encap, next
189  CLIB_PREFETCH (vlib_buffer_get_current(p1) - 64, 64, STORE);
190  }
191 
192  if (PREDICT_TRUE(n_left_from > 2))
193  {
194  vlib_buffer_t *p2;
195  p2 = vlib_get_buffer(vm, from[2]);
196  /* prefetch packet header and data */
197  vlib_prefetch_buffer_header(p2, STORE);
198  CLIB_PREFETCH (vlib_buffer_get_current(p2), 64, STORE);
199  }
200 
201  pi0 = to_next[0] = from[0];
202  from += 1;
203  n_left_from -= 1;
204  to_next += 1;
205  n_left_to_next -= 1;
206 
207  p0 = vlib_get_buffer (vm, pi0);
208  vip0 = pool_elt_at_index (lbm->vips,
209  vnet_buffer (p0)->ip.adj_index[VLIB_TX]);
210 
211  if (is_input_v4)
212  {
213  ip4_header_t *ip40;
214  ip40 = vlib_buffer_get_current (p0);
215  len0 = clib_net_to_host_u16(ip40->length);
216  }
217  else
218  {
219  ip6_header_t *ip60;
220  ip60 = vlib_buffer_get_current (p0);
221  len0 = clib_net_to_host_u16(ip60->payload_length) + sizeof(ip6_header_t);
222  }
223 
224  lb_hash_get(sticky_ht, hash0, vnet_buffer (p0)->ip.adj_index[VLIB_TX],
225  lb_time, &available_index0, &asindex0);
226 
227  if (PREDICT_TRUE(asindex0 != ~0))
228  {
229  //Found an existing entry
230  counter = LB_VIP_COUNTER_NEXT_PACKET;
231  }
232  else if (PREDICT_TRUE(available_index0 != ~0))
233  {
234  //There is an available slot for a new flow
235  asindex0 = vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index;
236  counter = LB_VIP_COUNTER_FIRST_PACKET;
237  counter = (asindex0 == 0)?LB_VIP_COUNTER_NO_SERVER:counter;
238 
239  //TODO: There are race conditions with as0 and vip0 manipulation.
240  //Configuration may be changed, vectors resized, etc...
241 
242  //Dereference previously used
243  vlib_refcount_add(&lbm->as_refcount, thread_index,
244  lb_hash_available_value(sticky_ht, hash0, available_index0), -1);
245  vlib_refcount_add(&lbm->as_refcount, thread_index,
246  asindex0, 1);
247 
248  //Add sticky entry
249  //Note that when there is no AS configured, an entry is configured anyway.
250  //But no configured AS is not something that should happen
251  lb_hash_put(sticky_ht, hash0, asindex0,
252  vnet_buffer (p0)->ip.adj_index[VLIB_TX],
253  available_index0, lb_time);
254  }
255  else
256  {
257  //Could not store new entry in the table
258  asindex0 = vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index;
259  counter = LB_VIP_COUNTER_UNTRACKED_PACKET;
260  }
261 
263  thread_index,
264  vnet_buffer (p0)->ip.adj_index[VLIB_TX],
265  1);
266 
267  //Now let's encap
268  if ( (encap_type == LB_ENCAP_TYPE_GRE4)
269  || (encap_type == LB_ENCAP_TYPE_GRE6) )
270  {
271  gre_header_t *gre0;
272  if (encap_type == LB_ENCAP_TYPE_GRE4) /* encap GRE4*/
273  {
274  ip4_header_t *ip40;
275  vlib_buffer_advance(p0, - sizeof(ip4_header_t) - sizeof(gre_header_t));
276  ip40 = vlib_buffer_get_current(p0);
277  gre0 = (gre_header_t *)(ip40 + 1);
278  ip40->src_address = lbm->ip4_src_address;
279  ip40->dst_address = lbm->ass[asindex0].address.ip4;
280  ip40->ip_version_and_header_length = 0x45;
281  ip40->ttl = 128;
282  ip40->fragment_id = 0;
283  ip40->flags_and_fragment_offset = 0;
284  ip40->length = clib_host_to_net_u16(len0 + sizeof(gre_header_t) + sizeof(ip4_header_t));
285  ip40->protocol = IP_PROTOCOL_GRE;
286  ip40->checksum = ip4_header_checksum (ip40);
287  }
288  else /* encap GRE6*/
289  {
290  ip6_header_t *ip60;
291  vlib_buffer_advance(p0, - sizeof(ip6_header_t) - sizeof(gre_header_t));
292  ip60 = vlib_buffer_get_current(p0);
293  gre0 = (gre_header_t *)(ip60 + 1);
294  ip60->dst_address = lbm->ass[asindex0].address.ip6;
295  ip60->src_address = lbm->ip6_src_address;
296  ip60->hop_limit = 128;
297  ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32 (0x6<<28);
298  ip60->payload_length = clib_host_to_net_u16(len0 + sizeof(gre_header_t));
299  ip60->protocol = IP_PROTOCOL_GRE;
300  }
301 
302  gre0->flags_and_version = 0;
303  gre0->protocol = (is_input_v4)?
304  clib_host_to_net_u16(0x0800):
305  clib_host_to_net_u16(0x86DD);
306  } else if (encap_type == LB_ENCAP_TYPE_L3DSR) /* encap L3DSR*/
307  {
308  ip4_header_t *ip40;
309  tcp_header_t *th0;
310 
311  ip40 = vlib_buffer_get_current(p0);
312  ip40->dst_address = lbm->ass[asindex0].address.ip4;
313  /* Get and rewrite DSCP bit */
314  ip40->tos = (u8)((vip0->dscp & 0x3F)<<2);
315  ip40->checksum = ip4_header_checksum (ip40);
316  /* Recomputing L4 checksum after dst-IP modifying */
317  th0 = ip4_next_header(ip40);
318  th0->checksum = 0;
319  th0->checksum = ip4_tcp_udp_compute_checksum(vm, p0, ip40);
320  }
321 
322  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
323  {
324  lb_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
325  tr->as_index = asindex0;
326  tr->vip_index = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
327  }
328 
329  //Enqueue to next
330  //Note that this is going to error if asindex0 == 0
331  vnet_buffer (p0)->ip.adj_index[VLIB_TX] = lbm->ass[asindex0].dpo.dpoi_index;
332  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
333  n_left_to_next, pi0,
334  lbm->ass[asindex0].dpo.dpoi_next_node);
335  }
336  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
337  }
338 
339  return frame->n_vectors;
340 }
341 
342 static uword
344  vlib_node_runtime_t * node, vlib_frame_t * frame)
345 {
346  return lb_node_fn(vm, node, frame, 0, LB_ENCAP_TYPE_GRE6);
347 }
348 
349 static uword
351  vlib_node_runtime_t * node, vlib_frame_t * frame)
352 {
353  return lb_node_fn(vm, node, frame, 0, LB_ENCAP_TYPE_GRE4);
354 }
355 
356 static uword
358  vlib_node_runtime_t * node, vlib_frame_t * frame)
359 {
360  return lb_node_fn(vm, node, frame, 1, LB_ENCAP_TYPE_GRE6);
361 }
362 
363 static uword
365  vlib_node_runtime_t * node, vlib_frame_t * frame)
366 {
367  return lb_node_fn(vm, node, frame, 1, LB_ENCAP_TYPE_GRE4);
368 }
369 
370 static uword
372  vlib_node_runtime_t * node, vlib_frame_t * frame)
373 {
374  return lb_node_fn(vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR);
375 }
376 
378 {
379  .function = lb6_gre6_node_fn,
380  .name = "lb6-gre6",
381  .vector_size = sizeof (u32),
382  .format_trace = format_lb_trace,
383 
384  .n_errors = LB_N_ERROR,
385  .error_strings = lb_error_strings,
386 
387  .n_next_nodes = LB_N_NEXT,
388  .next_nodes =
389  {
390  [LB_NEXT_DROP] = "error-drop"
391  },
392 };
393 
395 {
396  .function = lb6_gre4_node_fn,
397  .name = "lb6-gre4",
398  .vector_size = sizeof (u32),
399  .format_trace = format_lb_trace,
400 
401  .n_errors = LB_N_ERROR,
402  .error_strings = lb_error_strings,
403 
404  .n_next_nodes = LB_N_NEXT,
405  .next_nodes =
406  {
407  [LB_NEXT_DROP] = "error-drop"
408  },
409 };
410 
412 {
413  .function = lb4_gre6_node_fn,
414  .name = "lb4-gre6",
415  .vector_size = sizeof (u32),
416  .format_trace = format_lb_trace,
417 
418  .n_errors = LB_N_ERROR,
419  .error_strings = lb_error_strings,
420 
421  .n_next_nodes = LB_N_NEXT,
422  .next_nodes =
423  {
424  [LB_NEXT_DROP] = "error-drop"
425  },
426 };
427 
429 {
430  .function = lb4_gre4_node_fn,
431  .name = "lb4-gre4",
432  .vector_size = sizeof (u32),
433  .format_trace = format_lb_trace,
434 
435  .n_errors = LB_N_ERROR,
436  .error_strings = lb_error_strings,
437 
438  .n_next_nodes = LB_N_NEXT,
439  .next_nodes =
440  {
441  [LB_NEXT_DROP] = "error-drop"
442  },
443 };
444 
446 {
447  .function = lb4_l3dsr_node_fn,
448  .name = "lb4-l3dsr",
449  .vector_size = sizeof (u32),
450  .format_trace = format_lb_trace,
451 
452  .n_errors = LB_N_ERROR,
453  .error_strings = lb_error_strings,
454 
455  .n_next_nodes = LB_N_NEXT,
456  .next_nodes =
457  {
458  [LB_NEXT_DROP] = "error-drop"
459  },
460 };
u8 * format_lb_trace(u8 *s, va_list *args)
Definition: node.c:44
format_function_t format_lb_vip
Definition: lb.h:244
u32 lb_hash_time_now(vlib_main_t *vm)
Definition: lb.c:57
u8 dscp
DSCP bits for L3DSR.
Definition: lb.h:211
static uword lb6_gre4_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:350
#define CLIB_UNUSED(x)
Definition: clib.h:79
static uword lb4_l3dsr_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:371
vlib_node_registration_t lb6_gre6_node
(constructor) VLIB_REGISTER_NODE (lb6_gre6_node)
Definition: node.c:377
ip4_address_t src_address
Definition: ip4_packet.h:164
#define foreach_lb_error
Definition: node.c:21
u32 per_cpu_sticky_buckets
Number of buckets in the per-cpu sticky hash table.
Definition: lb.h:299
#define PREDICT_TRUE(x)
Definition: clib.h:106
u64 as_u64[2]
Definition: ip6_packet.h:51
#define NULL
Definition: clib.h:55
static_always_inline lb_hash_t * lb_hash_alloc(u32 buckets, u32 timeout)
Definition: lbhash.h:81
Definition: lb.h:49
int i
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
u16 flags_and_fragment_offset
Definition: ip4_packet.h:145
static_always_inline u32 lb_hash_available_value(lb_hash_t *h, u32 hash, u32 available_index)
Definition: lbhash.h:168
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:342
lb_hash_t * sticky_ht
Each CPU has its own sticky flow hash table.
Definition: lb.h:252
ip46_address_t address
Destination address used to tunnel traffic towards that application server.
Definition: lb.h:68
static uword lb4_gre6_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:357
u32 timeout
Definition: lbhash.h:60
u32 vip_index
Definition: node.c:39
#define lb_hash_nbuckets(h)
Definition: lbhash.h:64
#define static_always_inline
Definition: clib.h:93
ip4_address_t dst_address
Definition: ip4_packet.h:164
lb_main_t lb_main
Definition: lb.c:27
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:191
u32 flow_timeout
Flow timeout in seconds.
Definition: lb.h:304
Definition: lb.h:255
vlib_refcount_t as_refcount
Each AS has an associated reference counter.
Definition: lb.h:274
unsigned long u64
Definition: types.h:89
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:233
lb_error_t
Definition: node.c:25
format_function_t format_lb_as
Definition: lb.h:113
lb_vip_t * vips
Pool of all Virtual IPs.
Definition: lb.h:259
ip4_address_t ip4_src_address
Source address used for IPv4 encapsulated traffic.
Definition: lb.h:294
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:461
u32 value[LBHASH_ENTRY_PER_BUCKET]
Definition: lbhash.h:55
vlib_node_registration_t lb4_gre4_node
(constructor) VLIB_REGISTER_NODE (lb4_gre4_node)
Definition: node.c:428
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:209
ip4_address_pair_t address_pair
Definition: ip4_packet.h:166
#define PREDICT_FALSE(x)
Definition: clib.h:105
static_always_inline void lb_hash_prefetch_bucket(lb_hash_t *ht, u32 hash)
Definition: lbhash.h:106
lb_hash_t * lb_get_sticky_table(u32 thread_index)
Definition: node.c:63
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_node_registration_t lb6_gre4_node
(constructor) VLIB_REGISTER_NODE (lb6_gre4_node)
Definition: node.c:394
u16 flags_and_version
Definition: packet.h:40
lb_encap_type_t
Definition: lb.h:132
static_always_inline void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
Definition: refcount.h:68
static_always_inline u32 lb_node_get_hash(vlib_buffer_t *p, u8 is_input_v4)
Definition: node.c:109
#define lb_hash_foreach_entry(h, bucket, i)
Definition: lbhash.h:72
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u16 n_vectors
Definition: node.h:344
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:74
vlib_main_t * vm
Definition: buffer.c:294
u64 lb_node_get_other_ports6(ip6_header_t *ip60)
Definition: node.c:103
u32 as_index
Definition: node.c:40
static_always_inline uword lb_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_input_v4, lb_encap_type_t encap_type)
Definition: node.c:149
u16 protocol
Definition: packet.h:55
#define clib_warning(format, args...)
Definition: error.h:59
vlib_node_registration_t lb4_l3dsr_node
(constructor) VLIB_REGISTER_NODE (lb4_l3dsr_node)
Definition: node.c:445
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:270
u32 as_index
Definition: lb.h:116
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
static_always_inline void lb_hash_free(lb_hash_t *h)
Definition: lbhash.h:99
lb_as_t * ass
Pool of ASs.
Definition: lb.h:267
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:456
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
Definition: ip4_forward.c:1041
u32 new_flow_table_mask
New flows table length - 1 (length MUST be a power of 2)
Definition: lb.h:178
static char * lb_error_strings[]
Definition: node.c:32
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:222
lb_per_cpu_t * per_cpu
Some global data is per-cpu.
Definition: lb.h:279
u64 uword
Definition: types.h:112
vlib_simple_counter_main_t vip_counters[LB_N_VIP_COUNTERS]
Per VIP counter.
Definition: lb.h:309
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:329
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
ip6_address_t ip6_src_address
Source address used in IPv6 encapsulated traffic.
Definition: lb.h:289
u16 payload_length
Definition: ip6_packet.h:333
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
unsigned char u8
Definition: types.h:56
u64 lb_node_get_other_ports4(ip4_header_t *ip40)
Definition: node.c:97
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
static uword lb6_gre6_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:343
#define vnet_buffer(b)
Definition: buffer.h:372
static_always_inline u32 lb_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4)
Definition: lb_hash_hash.h:46
dpo_id_t dpo
The next DPO in the graph to follow.
Definition: lb.h:109
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:180
u8 ip_version_and_header_length
Definition: ip4_packet.h:132
lb_new_flow_entry_t * new_flow_table
Vector mapping (flow-hash & new_connect_table_mask) to AS index.
Definition: lb.h:172
static uword lb4_gre4_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:364
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
Load balancing service is provided per VIP.
Definition: lb.h:164
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
static_always_inline void lb_hash_get(lb_hash_t *ht, u32 hash, u32 vip, u32 time_now, u32 *available_index, u32 *found_value)
Definition: lbhash.h:113
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:239
ip6_address_t dst_address
Definition: ip6_packet.h:342
vlib_node_registration_t lb4_gre6_node
(constructor) VLIB_REGISTER_NODE (lb4_gre6_node)
Definition: node.c:411
static_always_inline void lb_hash_put(lb_hash_t *h, u32 hash, u32 value, u32 vip, u32 available_index, u32 time_now)
Definition: lbhash.h:174