FD.io VPP  v18.07-rc0-415-g6c78436
Vector Packet Processing
udp_encap_node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vnet/udp/udp_encap.h>
17 
18 typedef struct udp4_encap_trace_t_
19 {
23 
24 typedef struct udp6_encap_trace_t_
25 {
29 
31 
32 static u8 *
33 format_udp4_encap_trace (u8 * s, va_list * args)
34 {
35  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
38 
39  t = va_arg (*args, udp4_encap_trace_t *);
40 
41  s = format (s, "%U\n %U",
42  format_ip4_header, &t->ip, sizeof (t->ip),
43  format_udp_header, &t->udp, sizeof (t->udp));
44  return (s);
45 }
46 
47 static u8 *
48 format_udp6_encap_trace (u8 * s, va_list * args)
49 {
50  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53 
54  t = va_arg (*args, udp6_encap_trace_t *);
55 
56  s = format (s, "%U\n %U",
57  format_ip6_header, &t->ip, sizeof (t->ip),
58  format_udp_header, &t->udp, sizeof (t->udp));
59  return (s);
60 }
61 
64  vlib_node_runtime_t * node,
65  vlib_frame_t * frame, int is_encap_v6)
66 {
68  u32 *from = vlib_frame_vector_args (frame);
69  u32 n_left_from, n_left_to_next, *to_next, next_index;
70  u32 thread_index = vlib_get_thread_index ();
71 
72  n_left_from = frame->n_vectors;
73  next_index = node->cached_next_index;
74 
75  while (n_left_from > 0)
76  {
77  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
78 
79  while (n_left_from >= 4 && n_left_to_next >= 2)
80  {
81  vlib_buffer_t *b0, *b1;
82  udp_encap_t *ue0, *ue1;
83  u32 bi0, next0, uei0;
84  u32 bi1, next1, uei1;
85 
86  /* Prefetch next iteration. */
87  {
88  vlib_buffer_t *p2, *p3;
89 
90  p2 = vlib_get_buffer (vm, from[2]);
91  p3 = vlib_get_buffer (vm, from[3]);
92 
93  vlib_prefetch_buffer_header (p2, STORE);
94  vlib_prefetch_buffer_header (p3, STORE);
95  }
96 
97  bi0 = to_next[0] = from[0];
98  bi1 = to_next[1] = from[1];
99 
100  from += 2;
101  n_left_from -= 2;
102  to_next += 2;
103  n_left_to_next -= 2;
104 
105  b0 = vlib_get_buffer (vm, bi0);
106  b1 = vlib_get_buffer (vm, bi1);
107 
108  uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
109  uei1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
110 
111  vlib_increment_combined_counter (cm, thread_index, uei0, 1,
113  b0));
114  vlib_increment_combined_counter (cm, thread_index, uei1, 1,
116  b1));
117 
118  /* Rewrite packet header and updates lengths. */
119  ue0 = udp_encap_get (uei0);
120  ue1 = udp_encap_get (uei1);
121 
122  /* Paint */
123  if (is_encap_v6)
124  {
125  const u8 n_bytes =
126  sizeof (udp_header_t) + sizeof (ip6_header_t);
127  ip_udp_encap_two (vm, b0, b1, (u8 *) & ue0->ue_hdrs,
128  (u8 *) & ue1->ue_hdrs, n_bytes, 0);
129  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
130  {
131  udp6_encap_trace_t *tr =
132  vlib_add_trace (vm, node, b0, sizeof (*tr));
133  tr->udp = ue0->ue_hdrs.ip6.ue_udp;
134  tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
135  }
136  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
137  {
138  udp6_encap_trace_t *tr =
139  vlib_add_trace (vm, node, b1, sizeof (*tr));
140  tr->udp = ue1->ue_hdrs.ip6.ue_udp;
141  tr->ip = ue1->ue_hdrs.ip6.ue_ip6;
142  }
143  }
144  else
145  {
146  const u8 n_bytes =
147  sizeof (udp_header_t) + sizeof (ip4_header_t);
148 
149  ip_udp_encap_two (vm, b0, b1,
150  (u8 *) & ue0->ue_hdrs,
151  (u8 *) & ue1->ue_hdrs, n_bytes, 1);
152 
153  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
154  {
155  udp4_encap_trace_t *tr =
156  vlib_add_trace (vm, node, b0, sizeof (*tr));
157  tr->udp = ue0->ue_hdrs.ip4.ue_udp;
158  tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
159  }
160  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
161  {
162  udp4_encap_trace_t *tr =
163  vlib_add_trace (vm, node, b1, sizeof (*tr));
164  tr->udp = ue1->ue_hdrs.ip4.ue_udp;
165  tr->ip = ue1->ue_hdrs.ip4.ue_ip4;
166  }
167  }
168 
169  next0 = ue0->ue_dpo.dpoi_next_node;
170  next1 = ue1->ue_dpo.dpoi_next_node;
171  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;
172  vnet_buffer (b1)->ip.adj_index[VLIB_TX] = ue1->ue_dpo.dpoi_index;
173 
174  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
175  to_next, n_left_to_next,
176  bi0, bi1, next0, next1);
177  }
178 
179  while (n_left_from > 0 && n_left_to_next > 0)
180  {
181  u32 bi0, next0, uei0;
182  vlib_buffer_t *b0;
183  udp_encap_t *ue0;
184 
185  bi0 = to_next[0] = from[0];
186 
187  from += 1;
188  n_left_from -= 1;
189  to_next += 1;
190  n_left_to_next -= 1;
191 
192  b0 = vlib_get_buffer (vm, bi0);
193 
194  uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
195 
196  /* Rewrite packet header and updates lengths. */
197  ue0 = udp_encap_get (uei0);
198 
199  vlib_increment_combined_counter (cm, thread_index, uei0, 1,
201  b0));
202 
203  /* Paint */
204  if (is_encap_v6)
205  {
206  const u8 n_bytes =
207  sizeof (udp_header_t) + sizeof (ip6_header_t);
208  ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip6, n_bytes,
209  0);
210 
211  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
212  {
213  udp6_encap_trace_t *tr =
214  vlib_add_trace (vm, node, b0, sizeof (*tr));
215  tr->udp = ue0->ue_hdrs.ip6.ue_udp;
216  tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
217  }
218  }
219  else
220  {
221  const u8 n_bytes =
222  sizeof (udp_header_t) + sizeof (ip4_header_t);
223 
224  ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip4, n_bytes,
225  1);
226 
227  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
228  {
229  udp4_encap_trace_t *tr =
230  vlib_add_trace (vm, node, b0, sizeof (*tr));
231  tr->udp = ue0->ue_hdrs.ip4.ue_udp;
232  tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
233  }
234  }
235 
236  next0 = ue0->ue_dpo.dpoi_next_node;
237  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;
238 
239  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
240  to_next, n_left_to_next,
241  bi0, next0);
242  }
243 
244  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
245  }
246 
247  return frame->n_vectors;
248 }
249 
250 static uword
252  vlib_node_runtime_t * node, vlib_frame_t * frame)
253 {
254  return udp_encap_inline (vm, node, frame, 0);
255 }
256 
257 static uword
259  vlib_node_runtime_t * node, vlib_frame_t * frame)
260 {
261  return udp_encap_inline (vm, node, frame, 1);
262 }
263 
264 /* *INDENT-OFF* */
266  .function = udp4_encap,
267  .name = "udp4-encap",
268  .vector_size = sizeof (u32),
269 
270  .format_trace = format_udp4_encap_trace,
271 
272  .n_next_nodes = 0,
273 };
275 
277  .function = udp6_encap,
278  .name = "udp6-encap",
279  .vector_size = sizeof (u32),
280 
281  .format_trace = format_udp6_encap_trace,
282 
283  .n_next_nodes = 0,
284 };
286 /* *INDENT-ON* */
287 
288 
289 /*
290  * fd.io coding-style-patch-verification: ON
291  *
292  * Local Variables:
293  * eval: (c-set-style "gnu")
294  * End:
295  */
The UDP encap represenation.
Definition: udp_encap.h:46
#define CLIB_UNUSED(x)
Definition: clib.h:79
vlib_node_registration_t udp6_encap_node
(constructor) VLIB_REGISTER_NODE (udp6_encap_node)
format_function_t format_udp_header
Definition: format.h:107
struct udp6_encap_trace_t_ udp6_encap_trace_t
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:213
format_function_t format_ip4_header
Definition: format.h:89
dpo_id_t ue_dpo
The DPO used to forward to the next node in the VLIB graph.
Definition: udp_encap.h:78
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
static uword udp6_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:250
unsigned char u8
Definition: types.h:56
#define always_inline
Definition: clib.h:92
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:184
unsigned int u32
Definition: types.h:88
static uword udp4_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define PREDICT_FALSE(x)
Definition: clib.h:105
static uword udp_encap_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_encap_v6)
static void ip_udp_encap_one(vlib_main_t *vm, vlib_buffer_t *b0, u8 *ec0, word ec_len, u8 is_ip4)
Definition: udp.h:326
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
struct udp_encap_t_::@332::@334 ip6
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_combined_counter_main_t udp_encap_counters
Stats for each UDP encap object.
Definition: udp_encap.c:39
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:153
static void ip_udp_encap_two(vlib_main_t *vm, vlib_buffer_t *b0, vlib_buffer_t *b1, u8 *ec0, u8 *ec1, word ec_len, u8 is_v4)
Definition: udp.h:354
u16 n_vectors
Definition: node.h:380
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
vlib_main_t * vm
Definition: buffer.c:294
struct udp_encap_t_::@332::@333 ip4
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
union udp_encap_t_::@332 ue_hdrs
The headers to paint, in packet painting order.
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:492
struct udp4_encap_trace_t_ udp4_encap_trace_t
format_function_t format_ip6_header
Definition: format.h:103
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
Definition: defs.h:47
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
A collection of combined counters.
Definition: counter.h:181
#define vnet_buffer(b)
Definition: buffer.h:360
VLIB_NODE_FUNCTION_MULTIARCH(udp4_encap_node, udp4_encap)
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:180
static u8 * format_udp6_encap_trace(u8 *s, va_list *args)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
vlib_node_registration_t udp4_encap_node
(constructor) VLIB_REGISTER_NODE (udp4_encap_node)
static udp_encap_t * udp_encap_get(index_t uei)
Definition: udp_encap.h:147
static u8 * format_udp4_encap_trace(u8 *s, va_list *args)