FD.io VPP  v18.07.1-19-g511ce25
Vector Packet Processing
threads.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef included_vlib_threads_h
16 #define included_vlib_threads_h
17 
18 #include <vlib/main.h>
19 #include <linux/sched.h>
20 
21 /*
22  * To enable detailed tracing of barrier usage, including call stacks and
23  * timings, define BARRIER_TRACING here or in relevant TAGS. If also used
24  * with CLIB_DEBUG, timing will _not_ be representative of normal code
25  * execution.
26  *
27  */
28 
29 // #define BARRIER_TRACING 1
30 
31 /*
32  * Two options for barrier tracing output: syslog & elog.
33  */
34 
35 // #define BARRIER_TRACING_ELOG 1
36 
37 extern vlib_main_t **vlib_mains;
38 
39 void vlib_set_thread_name (char *name);
40 
41 /* arg is actually a vlib__thread_t * */
42 typedef void (vlib_thread_function_t) (void *arg);
43 
45 {
46  /* constructor generated list of thread registrations */
48 
49  /* config parameters */
50  char *name;
51  char *short_name;
58 
59  /* All threads of this type run on pthreads */
64 
65 /*
66  * Frames have their cpu / vlib_main_t index in the low-order N bits
67  * Make VLIB_MAX_CPUS a power-of-two, please...
68  */
69 
70 #ifndef VLIB_MAX_CPUS
71 #define VLIB_MAX_CPUS 256
72 #endif
73 
74 #if VLIB_MAX_CPUS > CLIB_MAX_MHEAPS
75 #error Please increase number of per-cpu mheaps
76 #endif
77 
78 #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) /* 0x3f, max */
79 #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK)
80 
81 #define VLIB_LOG2_THREAD_STACK_SIZE (21)
82 #define VLIB_THREAD_STACK_SIZE (1<<VLIB_LOG2_THREAD_STACK_SIZE)
83 
84 typedef enum
85 {
88 
89 typedef struct
90 {
91  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
92  volatile u32 valid;
96 
97  /* 256 * 4 = 1024 bytes, even mult of cache line size */
98  u32 buffer_index[VLIB_FRAME_SIZE];
99 }
101 
102 typedef struct
103 {
104  /* First cache line */
105  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
106  volatile u32 *wait_at_barrier;
108 
109  /* Second Cache Line */
110  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
113  void (*thread_function) (void *);
121 #ifdef BARRIER_TRACING
122  const char *barrier_caller;
123  const char *barrier_context;
124 #endif
126 
127  long lwp;
128  int lcore_id;
129  pthread_t thread_id;
131 
133 
134 typedef struct
135 {
136  /* enqueue side */
137  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
138  volatile u64 tail;
143 
144  /* dequeue side */
145  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
146  volatile u64 head;
152 
153  /* dequeue hint to enqueue side */
154  CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
155  volatile u64 head_hint;
156 
157  /* read-only, constant, shared */
158  CLIB_CACHE_LINE_ALIGN_MARK (cacheline3);
161 }
163 
164 typedef struct
165 {
168 
169  /* for frame queue tracing */
173 
174 typedef struct
175 {
180 
181 /* Called early, in thread 0's context */
183 
184 int vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
185  u32 frame_queue_index, vlib_frame_t * frame,
186  vlib_frame_queue_msg_type_t type);
187 
188 int
190 
192 
193 void vlib_create_worker_threads (vlib_main_t * vm, int n,
194  void (*thread_function) (void *));
195 
198 
199 /* Check for a barrier sync request every 30ms */
200 #define BARRIER_SYNC_DELAY (0.030000)
201 
202 #if CLIB_DEBUG > 0
203 /* long barrier timeout, for gdb... */
204 #define BARRIER_SYNC_TIMEOUT (600.1)
205 #else
206 #define BARRIER_SYNC_TIMEOUT (1.0)
207 #endif
208 
209 #ifdef BARRIER_TRACING
210 #define vlib_worker_thread_barrier_sync(X) {vlib_worker_threads[0].barrier_caller=__FUNCTION__;vlib_worker_thread_barrier_sync_int(X);}
211 #else
212 #define vlib_worker_thread_barrier_sync(X) vlib_worker_thread_barrier_sync_int(X)
213 #endif
214 
215 
219 
222 {
223  return __os_thread_index;
224 }
225 
226 always_inline void
228 {
229  if (CLIB_DEBUG > 0)
230  {
231  if (vlib_get_thread_index ())
232  fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__);
233  }
234 }
235 
236 typedef enum
237 {
241 
242 void vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which);
243 
244 #define foreach_vlib_main(body) \
245 do { \
246  vlib_main_t ** __vlib_mains = 0, *this_vlib_main; \
247  int ii; \
248  \
249  for (ii = 0; ii < vec_len (vlib_mains); ii++) \
250  { \
251  this_vlib_main = vlib_mains[ii]; \
252  ASSERT (ii == 0 || \
253  this_vlib_main->parked_at_barrier == 1); \
254  if (this_vlib_main) \
255  vec_add1 (__vlib_mains, this_vlib_main); \
256  } \
257  \
258  for (ii = 0; ii < vec_len (__vlib_mains); ii++) \
259  { \
260  this_vlib_main = __vlib_mains[ii]; \
261  /* body uses this_vlib_main... */ \
262  (body); \
263  } \
264  vec_free (__vlib_mains); \
265 } while (0);
266 
267 #define foreach_sched_policy \
268  _(SCHED_OTHER, OTHER, "other") \
269  _(SCHED_BATCH, BATCH, "batch") \
270  _(SCHED_IDLE, IDLE, "idle") \
271  _(SCHED_FIFO, FIFO, "fifo") \
272  _(SCHED_RR, RR, "rr")
273 
274 typedef enum
275 {
276 #define _(v,f,s) SCHED_POLICY_##f = v,
278 #undef _
281 
282 typedef struct
283 {
284  clib_error_t *(*vlib_launch_thread_cb) (void *fp, vlib_worker_thread_t * w,
285  unsigned lcore_id);
286  clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 lcore);
287 } vlib_thread_callbacks_t;
288 
289 typedef struct
290 {
291  /* Link list of registrations, built by constructors */
293 
294  /* Vector of registrations, w/ non-data-structure clones at the top */
296 
298 
300 
301  /*
302  * Launch all threads as pthreads,
303  * not eal_rte_launch (strict affinity) threads
304  */
306 
307  /* Number of vlib_main / vnet_main clones */
309 
310  /* Number of thread stacks to create */
312 
313  /* Number of pthreads */
315 
316  /* Number of threads */
318 
319  /* Number of cores to skip, must match the core mask */
321 
322  /* Thread prefix name */
324 
325  /* main thread lcore */
327 
328  /* Bitmap of available CPU cores */
330 
331  /* Bitmap of available CPU sockets (NUMA nodes) */
333 
334  /* Worker handoff queues */
336 
337  /* worker thread initialization barrier */
339 
340  /* scheduling policy */
342 
343  /* scheduling policy priority */
345 
346  /* callbacks */
347  vlib_thread_callbacks_t cb;
350 
352 
353 #include <vlib/global_funcs.h>
354 
355 #define VLIB_REGISTER_THREAD(x,...) \
356  __VA_ARGS__ vlib_thread_registration_t x; \
357 static void __vlib_add_thread_registration_##x (void) \
358  __attribute__((__constructor__)) ; \
359 static void __vlib_add_thread_registration_##x (void) \
360 { \
361  vlib_thread_main_t * tm = &vlib_thread_main; \
362  x.next = tm->next; \
363  tm->next = &x; \
364 } \
365 static void __vlib_rm_thread_registration_##x (void) \
366  __attribute__((__destructor__)) ; \
367 static void __vlib_rm_thread_registration_##x (void) \
368 { \
369  vlib_thread_main_t * tm = &vlib_thread_main; \
370  VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next); \
371 } \
372 __VA_ARGS__ vlib_thread_registration_t x
373 
376 {
377  return vlib_thread_main.n_vlib_mains - 1;
378 }
379 
382 {
383  return worker_index + 1;
384 }
385 
388 {
389  return thread_index - 1;
390 }
391 
394 {
395  return vlib_get_thread_index () - 1;
396 }
397 
398 static inline void
400 {
401  if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier))
402  {
403  vlib_main_t *vm = vlib_get_main ();
404  clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
405  if (CLIB_DEBUG > 0)
406  {
407  vm = vlib_get_main ();
408  vm->parked_at_barrier = 1;
409  }
410  while (*vlib_worker_threads->wait_at_barrier)
411  ;
412 
413  /*
414  * Recompute the offset from thread-0 time.
415  * Note that vlib_time_now adds vm->time_offset, so
416  * clear it first. Save the resulting idea of "now", to
417  * see how well we're doing. See show_clock_command_fn(...)
418  */
419  {
420  f64 now;
421  vm->time_offset = 0.0;
422  now = vlib_time_now (vm);
425  }
426 
427  if (CLIB_DEBUG > 0)
428  vm->parked_at_barrier = 0;
429  clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
430 
431  if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
432  {
434  clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
435  -1);
436  while (*vlib_worker_threads->node_reforks_required)
437  ;
438  }
439  }
440 }
441 
444 {
445  vlib_main_t *vm;
447  ASSERT (worker_index < tm->n_vlib_mains - 1);
448  vm = vlib_mains[worker_index + 1];
449  ASSERT (vm);
450  return vm;
451 }
452 
453 static inline void
455 {
457  hf->valid = 1;
458 }
459 
460 static inline vlib_frame_queue_elt_t *
461 vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index)
462 {
463  vlib_frame_queue_t *fq;
467  vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
468  u64 new_tail;
469 
470  fq = fqm->vlib_frame_queues[index];
471  ASSERT (fq);
472 
473  new_tail = __sync_add_and_fetch (&fq->tail, 1);
474 
475  /* Wait until a ring slot is available */
476  while (new_tail >= fq->head_hint + fq->nelts)
478 
479  elt = fq->elts + (new_tail & (fq->nelts - 1));
480 
481  /* this would be very bad... */
482  while (elt->valid)
483  ;
484 
486  elt->last_n_vectors = elt->n_vectors = 0;
487 
488  return elt;
489 }
490 
491 static inline vlib_frame_queue_t *
493  u32 index,
494  u32 queue_hi_thresh,
496  handoff_queue_by_worker_index)
497 {
498  vlib_frame_queue_t *fq;
501  vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
502 
503  fq = handoff_queue_by_worker_index[index];
504  if (fq != (vlib_frame_queue_t *) (~0))
505  return fq;
506 
507  fq = fqm->vlib_frame_queues[index];
508  ASSERT (fq);
509 
510  if (PREDICT_FALSE (fq->tail >= (fq->head_hint + queue_hi_thresh)))
511  {
512  /* a valid entry in the array will indicate the queue has reached
513  * the specified threshold and is congested
514  */
515  handoff_queue_by_worker_index[index] = fq;
516  fq->enqueue_full_events++;
517  return fq;
518  }
519 
520  return NULL;
521 }
522 
523 static inline vlib_frame_queue_elt_t *
525  u32 vlib_worker_index,
527  handoff_queue_elt_by_worker_index)
528 {
530 
531  if (handoff_queue_elt_by_worker_index[vlib_worker_index])
532  return handoff_queue_elt_by_worker_index[vlib_worker_index];
533 
534  elt = vlib_get_frame_queue_elt (frame_queue_index, vlib_worker_index);
535 
536  handoff_queue_elt_by_worker_index[vlib_worker_index] = elt;
537 
538  return elt;
539 }
540 
541 u8 *vlib_thread_stack_init (uword thread_index);
542 int vlib_thread_cb_register (struct vlib_main_t *vm,
543  vlib_thread_callbacks_t * cb);
544 extern void *rpc_call_main_thread_cb_fn;
545 
546 void
548  args);
549 void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
550 
551 #endif /* included_vlib_threads_h */
552 
553 /*
554  * fd.io coding-style-patch-verification: ON
555  *
556  * Local Variables:
557  * eval: (c-set-style "gnu")
558  * End:
559  */
f64 time_offset
Definition: main.h:65
vlib_main_t vlib_global_main
Definition: main.c:1644
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:63
void vlib_worker_thread_init(vlib_worker_thread_t *w)
Definition: threads.c:637
int vlib_frame_queue_enqueue(vlib_main_t *vm, u32 node_runtime_index, u32 frame_queue_index, vlib_frame_t *frame, vlib_frame_queue_msg_type_t type)
unsigned long u64
Definition: types.h:89
#define NULL
Definition: clib.h:55
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:228
void vlib_set_thread_name(char *name)
Definition: threads.c:267
void * thread_function_arg
Definition: threads.h:114
frame_queue_trace_t * frame_queue_traces
Definition: threads.h:170
elog_track_t elog_track
Definition: threads.h:116
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:35
void vlib_rpc_call_main_thread(void *function, u8 *args, u32 size)
Definition: threads.c:1868
struct vlib_thread_registration_ * next
Definition: threads.h:47
int vlib_thread_cb_register(struct vlib_main_t *vm, vlib_thread_callbacks_t *cb)
Definition: threads.c:1844
volatile u32 valid
Definition: threads.h:92
static vlib_frame_queue_t * is_vlib_frame_queue_congested(u32 frame_queue_index, u32 index, u32 queue_hi_thresh, vlib_frame_queue_t **handoff_queue_by_worker_index)
Definition: threads.h:492
unsigned char u8
Definition: types.h:56
static void vlib_smp_unsafe_warning(void)
Definition: threads.h:227
double f64
Definition: types.h:142
pthread_t thread[MAX_CONNS]
Definition: main.c:125
volatile int parked_at_barrier
Definition: main.h:197
static void vlib_worker_thread_barrier_check(void)
Definition: threads.h:399
vlib_thread_registration_t * next
Definition: threads.h:292
static u32 vlib_get_worker_index(u32 thread_index)
Definition: threads.h:387
#define static_always_inline
Definition: clib.h:93
#define clib_smp_atomic_add(addr, increment)
Definition: smp.h:46
#define always_inline
Definition: clib.h:92
vlib_frame_queue_msg_type_t
Definition: threads.h:84
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
uword * cpu_core_bitmap
Definition: threads.h:329
int vlib_frame_queue_dequeue(vlib_main_t *vm, vlib_frame_queue_main_t *fqm)
Definition: threads.c:1656
vlib_frame_queue_elt_t * elts
Definition: threads.h:159
f64 time_last_barrier_release
Definition: main.h:66
unsigned int u32
Definition: types.h:88
static u32 vlib_get_current_worker_index()
Definition: threads.h:393
#define VLIB_FRAME_SIZE
Definition: node.h:364
vlib_fork_fixup_t
Definition: threads.h:236
void vlib_process_signal_event_mt_helper(vlib_process_signal_event_mt_args_t *args)
Definition: threads.c:1857
int extern_thread_mgmt
Definition: threads.h:348
sched_policy_t
Definition: threads.h:274
volatile u64 head
Definition: threads.h:146
u8 * vlib_thread_stack_init(uword thread_index)
Definition: main.c:614
vlib_thread_callbacks_t cb
Definition: threads.h:347
unsigned short u16
Definition: types.h:57
signed long i64
Definition: types.h:82
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
Definition: threads.c:1545
#define PREDICT_FALSE(x)
Definition: clib.h:105
static vlib_frame_queue_elt_t * vlib_get_worker_handoff_queue_elt(u32 frame_queue_index, u32 vlib_worker_index, vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index)
Definition: threads.h:524
word fformat(FILE *f, char *fmt,...)
Definition: format.c:453
void vlib_worker_thread_node_runtime_update(void)
Definition: threads.c:1230
void vlib_worker_thread_fork_fixup(vlib_fork_fixup_t which)
Definition: threads.c:1406
volatile u64 tail
Definition: threads.h:138
vlib_frame_queue_t ** vlib_frame_queues
Definition: threads.h:167
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
vlib_main_t * vm
Definition: buffer.c:294
frame_queue_nelt_counter_t * frame_queue_histogram
Definition: threads.h:171
void * rpc_call_main_thread_cb_fn
Definition: threads.c:1865
vlib_worker_thread_t * worker_threads
Definition: threads.h:299
static vlib_frame_queue_elt_t * vlib_get_frame_queue_elt(u32 frame_queue_index, u32 index)
Definition: threads.h:461
volatile u32 * wait_at_barrier
Definition: threads.h:106
#define ASSERT(truth)
vlib_frame_queue_main_t * frame_queue_mains
Definition: threads.h:335
struct vlib_thread_registration_ vlib_thread_registration_t
uword * thread_registrations_by_name
Definition: threads.h:297
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1818
volatile u32 * node_reforks_required
Definition: threads.h:125
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
clib_error_t * vlib_thread_init(vlib_main_t *vm)
Definition: threads.c:322
u32 enqueue_full_events
Definition: threads.h:142
u64 uword
Definition: types.h:112
vlib_main_t ** vlib_mains
Definition: buffer.c:303
void vlib_create_worker_threads(vlib_main_t *vm, int n, void(*thread_function)(void *))
volatile u64 head_hint
Definition: threads.h:155
static void vlib_put_frame_queue_elt(vlib_frame_queue_elt_t *hf)
Definition: threads.h:454
volatile u32 * workers_at_barrier
Definition: threads.h:107
static u32 vlib_num_workers()
Definition: threads.h:375
void vlib_worker_thread_node_refork(void)
Definition: threads.c:1075
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
uword * cpu_socket_bitmap
Definition: threads.h:332
#define foreach_sched_policy
Definition: threads.h:267
void( vlib_thread_function_t)(void *arg)
Definition: threads.h:42
vlib_thread_registration_t ** registrations
Definition: threads.h:295
vlib_thread_main_t vlib_thread_main
Definition: threads.c:36
pthread_t thread_id
Definition: threads.h:129
vlib_thread_registration_t * registration
Definition: threads.h:118
volatile u32 worker_thread_release
Definition: threads.h:338
void vlib_worker_thread_barrier_sync_int(vlib_main_t *vm)
Definition: threads.c:1444
static vlib_main_t * vlib_get_worker_vlib_main(u32 worker_index)
Definition: threads.h:443
static u32 vlib_get_worker_thread_index(u32 worker_index)
Definition: threads.h:381