FD.io VPP  v16.06
Vector Packet Processing
replication.c
Go to the documentation of this file.
1 /*
2  * replication.c : packet replication
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/vnet.h>
20 #include <vppinfra/error.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/replication.h>
23 
24 
26 
27 
30  vlib_buffer_t * b0,
31  u32 recycle_node_index,
32  u32 l2_packet)
33 {
36  uword cpu_number = vm->cpu_index;
37  ip4_header_t * ip;
38  u32 ctx_id;
39 
40  // Allocate a context, reserve context 0
41  if (PREDICT_FALSE(rm->contexts[cpu_number] == 0))
42  pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
43 
44  pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
45  ctx_id = ctx - rm->contexts[cpu_number];
46 
47  // Save state from vlib buffer
48  ctx->saved_clone_count = b0->clone_count;
50  ctx->current_data = b0->current_data;
51 
52  // Set up vlib buffer hooks
53  b0->clone_count = ctx_id;
55 
56  // Save feature state
57  ctx->recycle_node_index = recycle_node_index;
58 
59  // Save vnet state
61 
62  // Save packet contents
63  ctx->l2_packet = l2_packet;
65  if (l2_packet) {
66  // Save ethernet header
67  ctx->l2_header[0] = ((u64 *)ip)[0];
68  ctx->l2_header[1] = ((u64 *)ip)[1];
69  ctx->l2_header[2] = ((u64 *)ip)[2];
70  // set ip to the true ip header
71  ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
72  }
73 
74  // Copy L3 fields.
75  // We need to save TOS for ip4 and ip6 packets. Fortunately the TOS field is
76  // in the first two bytes of both the ip4 and ip6 headers.
77  ctx->ip_tos = *((u16 *)(ip));
78 
79  // Save the ip4 checksum as well. We just blindly save the corresponding two
80  // bytes even for ip6 packets.
81  ctx->ip4_checksum = ip->checksum;
82 
83  return ctx;
84 }
85 
86 
89  vlib_buffer_t * b0,
90  u32 is_last)
91 {
94  uword cpu_number = vm->cpu_index;
95  ip4_header_t * ip;
96 
97  // Get access to the replication context
98  ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
99 
100  // Restore vnet buffer state
102 
103  // Restore the packet start (current_data) and length
105 
106  // Restore packet contents
108  if (ctx->l2_packet) {
109  // Restore ethernet header
110  ((u64 *)ip)[0] = ctx->l2_header[0];
111  ((u64 *)ip)[1] = ctx->l2_header[1];
112  ((u64 *)ip)[2] = ctx->l2_header[2];
113  // set ip to the true ip header
114  ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
115  }
116 
117  // Restore L3 fields
118  *((u16 *)(ip)) = ctx->ip_tos;
119  ip->checksum = ctx->ip4_checksum;
120 
121  if (is_last) {
122  // This is the last replication in the list.
123  // Restore original buffer free functionality.
124  b0->clone_count = ctx->saved_clone_count;
126 
127  // Free context back to its pool
128  pool_put (rm->contexts[cpu_number], ctx);
129  }
130 
131  return ctx;
132 }
133 
134 
135 
136 /*
137  * fish pkts back from the recycle queue/freelist
138  * un-flatten the context chains
139  */
142 {
143  vlib_frame_t * f = 0;
144  u32 n_left_from;
145  u32 n_left_to_next = 0;
146  u32 n_this_frame = 0;
147  u32 * from;
148  u32 * to_next = 0;
149  u32 bi0, pi0;
150  vlib_buffer_t *b0;
151  int i;
153  replication_context_t * ctx;
154  u32 feature_node_index = 0;
155  uword cpu_number = vm->cpu_index;
156 
157  // All buffers in the list are destined to the same recycle node.
158  // Pull the recycle node index from the first buffer.
159  // Note: this could be sped up if the node index were stuffed into
160  // the freelist itself.
161  if (vec_len (fl->aligned_buffers) > 0) {
162  bi0 = fl->aligned_buffers[0];
163  b0 = vlib_get_buffer (vm, bi0);
164  ctx = pool_elt_at_index (rm->contexts[cpu_number],
165  b0->clone_count);
166  feature_node_index = ctx->recycle_node_index;
167  } else if (vec_len (fl->unaligned_buffers) > 0) {
168  bi0 = fl->unaligned_buffers[0];
169  b0 = vlib_get_buffer (vm, bi0);
170  ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
171  feature_node_index = ctx->recycle_node_index;
172  }
173 
174  /* aligned, unaligned buffers */
175  for (i = 0; i < 2; i++)
176  {
177  if (i == 0)
178  {
179  from = fl->aligned_buffers;
180  n_left_from = vec_len (from);
181  }
182  else
183  {
184  from = fl->unaligned_buffers;
185  n_left_from = vec_len (from);
186  }
187 
188  while (n_left_from > 0)
189  {
190  if (PREDICT_FALSE(n_left_to_next == 0))
191  {
192  if (f)
193  {
194  f->n_vectors = n_this_frame;
195  vlib_put_frame_to_node (vm, feature_node_index, f);
196  }
197 
198  f = vlib_get_frame_to_node (vm, feature_node_index);
199  to_next = vlib_frame_vector_args (f);
200  n_left_to_next = VLIB_FRAME_SIZE;
201  n_this_frame = 0;
202  }
203 
204  bi0 = from[0];
205  if (PREDICT_TRUE(n_left_from > 1))
206  {
207  pi0 = from[1];
208  vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
209  }
210 
211  b0 = vlib_get_buffer (vm, bi0);
212 
213  // Mark that this buffer was just recycled
215 
216  // If buffer is traced, mark frame as traced
218  f->flags |= VLIB_FRAME_TRACE;
219 
220  to_next[0] = bi0;
221 
222  from++;
223  to_next++;
224  n_this_frame++;
225  n_left_to_next--;
226  n_left_from--;
227  }
228  }
229 
232 
233  if (f)
234  {
235  ASSERT(n_this_frame);
236  f->n_vectors = n_this_frame;
237  vlib_put_frame_to_node (vm, feature_node_index, f);
238  }
239 }
240 
241 
242 
244 {
246  vlib_buffer_main_t * bm = vm->buffer_main;
248  __attribute__((unused)) replication_context_t * ctx;
250 
251  rm->vlib_main = vm;
252  rm->vnet_main = vnet_get_main();
253  rm->recycle_list_index =
254  vlib_buffer_create_free_list (vm, 1024 /* fictional */,
255  "replication-recycle");
256 
258  rm->recycle_list_index);
259 
261 
262  // Verify the replication context is the expected size
263  ASSERT(sizeof(replication_context_t) == 128); // 2 cache lines
264 
265  vec_validate (rm->contexts, tm->n_vlib_mains - 1);
266  return 0;
267 }
268 
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:394
always_inline vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:267
u32 free_list_index
Buffer free list that this buffer was allocated from and will be freed to.
Definition: buffer.h:102
#define PREDICT_TRUE(x)
Definition: clib.h:98
vlib_buffer_main_t * buffer_main
Definition: main.h:103
always_inline void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:184
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
replication_main_t replication_main
Definition: replication.c:25
void(* buffers_added_to_freelist_function)(struct vlib_main_t *vm, struct vlib_buffer_free_list_t *fl)
Definition: buffer.h:279
vnet_main_t * vnet_get_main(void)
Definition: misc.c:45
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:77
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:181
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:109
u32 cpu_index
Definition: main.h:159
unsigned long u64
Definition: types.h:89
#define VLIB_FRAME_TRACE
Definition: node.h:344
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
Definition: node_funcs.h:202
#define pool_elt_at_index(p, i)
Definition: pool.h:346
#define pool_put(P, E)
Definition: pool.h:200
#define PREDICT_FALSE(x)
Definition: clib.h:97
static void replication_recycle_callback(vlib_main_t *vm, vlib_buffer_free_list_t *fl)
Definition: replication.c:140
#define VLIB_FRAME_SIZE
Definition: node.h:292
replication_context_t * replication_prep(vlib_main_t *vm, vlib_buffer_t *b0, u32 recycle_node_index, u32 l2_packet)
Definition: replication.c:29
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:191
u32 vlib_buffer_create_free_list(vlib_main_t *vm, u32 n_data_bytes, char *fmt,...)
Definition: buffer.c:495
#define pool_get_aligned(P, E, A)
Definition: pool.h:155
u16 n_vectors
Definition: node.h:307
#define clib_memcpy(a, b, c)
Definition: string.h:63
vlib_main_t * vlib_main
Definition: replication.h:62
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
#define vnet_buffer(b)
Definition: buffer.h:300
u16 flags
Definition: node.h:298
vnet_main_t * vnet_main
Definition: replication.h:63
u32 clone_count
Specifies whether this buffer should be reinitialized when freed.
Definition: buffer.h:121
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:91
clib_error_t * replication_init(vlib_main_t *vm)
Definition: replication.c:243
u64 uword
Definition: types.h:112
unsigned short u16
Definition: types.h:57
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
always_inline void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:197
unsigned char u8
Definition: types.h:56
vlib_buffer_free_list_t * buffer_free_list_pool
Definition: buffer.h:295
replication_context_t * replication_recycle(vlib_main_t *vm, vlib_buffer_t *b0, u32 is_last)
Definition: replication.c:88
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:184
#define VLIB_BUFFER_IS_RECYCLED
Definition: buffer.h:94
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:84
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
replication_context_t ** contexts
Definition: replication.h:60