FD.io VPP  v20.05.1-6-gf53edbc3b
Vector Packet Processing
threads.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef included_vlib_threads_h
16 #define included_vlib_threads_h
17 
18 #include <vlib/main.h>
19 #include <linux/sched.h>
20 
21 extern vlib_main_t **vlib_mains;
22 
23 void vlib_set_thread_name (char *name);
24 
25 /* arg is actually a vlib__thread_t * */
26 typedef void (vlib_thread_function_t) (void *arg);
27 
29 {
30  /* constructor generated list of thread registrations */
32 
33  /* config parameters */
34  char *name;
35  char *short_name;
42 
43  /* All threads of this type run on pthreads */
48 
49 /*
50  * Frames have their cpu / vlib_main_t index in the low-order N bits
51  * Make VLIB_MAX_CPUS a power-of-two, please...
52  */
53 
54 #ifndef VLIB_MAX_CPUS
55 #define VLIB_MAX_CPUS 256
56 #endif
57 
58 #if VLIB_MAX_CPUS > CLIB_MAX_MHEAPS
59 #error Please increase number of per-cpu mheaps
60 #endif
61 
62 #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) /* 0x3f, max */
63 #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK)
64 
65 #define VLIB_LOG2_THREAD_STACK_SIZE (21)
66 #define VLIB_THREAD_STACK_SIZE (1<<VLIB_LOG2_THREAD_STACK_SIZE)
67 
68 typedef enum
69 {
72 
73 typedef struct
74 {
75  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
76  volatile u32 valid;
80 
81  /* 256 * 4 = 1024 bytes, even mult of cache line size */
82  u32 buffer_index[VLIB_FRAME_SIZE];
83 }
85 
86 typedef struct
87 {
88  /* First cache line */
89  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
90  volatile u32 *wait_at_barrier;
92 
93  /* Second Cache Line */
94  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
95  void *thread_mheap;
97  void (*thread_function) (void *);
106  const char *barrier_caller;
107  const char *barrier_context;
109 
110  long lwp;
111  int cpu_id;
112  int core_id;
113  int numa_id;
114  pthread_t thread_id;
116 
118 
119 typedef struct
120 {
121  /* enqueue side */
122  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
123  volatile u64 tail;
128 
129  /* dequeue side */
130  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
131  volatile u64 head;
137 
138  /* dequeue hint to enqueue side */
139  CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
140  volatile u64 head_hint;
141 
142  /* read-only, constant, shared */
143  CLIB_CACHE_LINE_ALIGN_MARK (cacheline3);
146 }
148 
149 typedef struct
150 {
154 
155 typedef struct
156 {
160 
163 
164  /* for frame queue tracing */
168 
169 typedef struct
170 {
175 
176 /* Called early, in thread 0's context */
178 
179 int vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
180  u32 frame_queue_index, vlib_frame_t * frame,
181  vlib_frame_queue_msg_type_t type);
182 
183 int
185 
187 
188 void vlib_create_worker_threads (vlib_main_t * vm, int n,
189  void (*thread_function) (void *));
190 
193 
194 /* Check for a barrier sync request every 30ms */
195 #define BARRIER_SYNC_DELAY (0.030000)
196 
197 #if CLIB_DEBUG > 0
198 /* long barrier timeout, for gdb... */
199 #define BARRIER_SYNC_TIMEOUT (600.1)
200 #else
201 #define BARRIER_SYNC_TIMEOUT (1.0)
202 #endif
203 
204 #define vlib_worker_thread_barrier_sync(X) {vlib_worker_thread_barrier_sync_int(X, __FUNCTION__);}
205 
207  const char *func_name);
212 /**
213  * Wait until each of the workers has been once around the track
214  */
215 void vlib_worker_wait_one_loop (void);
216 
219 {
220  return __os_thread_index;
221 }
222 
223 always_inline void
225 {
226  if (CLIB_DEBUG > 0)
227  {
228  if (vlib_get_thread_index ())
229  fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__);
230  }
231 }
232 
233 typedef enum
234 {
238 
239 void vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which);
240 
241 #define foreach_vlib_main(body) \
242 do { \
243  vlib_main_t ** __vlib_mains = 0, *this_vlib_main; \
244  int ii; \
245  \
246  for (ii = 0; ii < vec_len (vlib_mains); ii++) \
247  { \
248  this_vlib_main = vlib_mains[ii]; \
249  ASSERT (ii == 0 || \
250  this_vlib_main->parked_at_barrier == 1); \
251  if (this_vlib_main) \
252  vec_add1 (__vlib_mains, this_vlib_main); \
253  } \
254  \
255  for (ii = 0; ii < vec_len (__vlib_mains); ii++) \
256  { \
257  this_vlib_main = __vlib_mains[ii]; \
258  /* body uses this_vlib_main... */ \
259  (body); \
260  } \
261  vec_free (__vlib_mains); \
262 } while (0);
263 
264 #define foreach_sched_policy \
265  _(SCHED_OTHER, OTHER, "other") \
266  _(SCHED_BATCH, BATCH, "batch") \
267  _(SCHED_IDLE, IDLE, "idle") \
268  _(SCHED_FIFO, FIFO, "fifo") \
269  _(SCHED_RR, RR, "rr")
270 
271 typedef enum
272 {
273 #define _(v,f,s) SCHED_POLICY_##f = v,
275 #undef _
278 
279 typedef struct
280 {
281  clib_error_t *(*vlib_launch_thread_cb) (void *fp, vlib_worker_thread_t * w,
282  unsigned cpu_id);
283  clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 cpu);
284 } vlib_thread_callbacks_t;
285 
286 typedef struct
287 {
288  /* Link list of registrations, built by constructors */
290 
291  /* Vector of registrations, w/ non-data-structure clones at the top */
293 
295 
297 
298  /*
299  * Launch all threads as pthreads,
300  * not eal_rte_launch (strict affinity) threads
301  */
303 
304  /* Number of vlib_main / vnet_main clones */
306 
307  /* Number of thread stacks to create */
309 
310  /* Number of pthreads */
312 
313  /* Number of threads */
315 
316  /* Number of cores to skip, must match the core mask */
318 
319  /* Thread prefix name */
321 
322  /* main thread lcore */
324 
325  /* Bitmap of available CPU cores */
327 
328  /* Bitmap of available CPU sockets (NUMA nodes) */
330 
331  /* Worker handoff queues */
333 
334  /* worker thread initialization barrier */
336 
337  /* scheduling policy */
339 
340  /* scheduling policy priority */
342 
343  /* callbacks */
344  vlib_thread_callbacks_t cb;
346 
347  /* NUMA-bound heap size */
349 
351 
353 
354 #include <vlib/global_funcs.h>
355 
356 #define VLIB_REGISTER_THREAD(x,...) \
357  __VA_ARGS__ vlib_thread_registration_t x; \
358 static void __vlib_add_thread_registration_##x (void) \
359  __attribute__((__constructor__)) ; \
360 static void __vlib_add_thread_registration_##x (void) \
361 { \
362  vlib_thread_main_t * tm = &vlib_thread_main; \
363  x.next = tm->next; \
364  tm->next = &x; \
365 } \
366 static void __vlib_rm_thread_registration_##x (void) \
367  __attribute__((__destructor__)) ; \
368 static void __vlib_rm_thread_registration_##x (void) \
369 { \
370  vlib_thread_main_t * tm = &vlib_thread_main; \
371  VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next); \
372 } \
373 __VA_ARGS__ vlib_thread_registration_t x
374 
377 {
378  return vlib_thread_main.n_vlib_mains - 1;
379 }
380 
383 {
384  return worker_index + 1;
385 }
386 
389 {
390  return thread_index - 1;
391 }
392 
395 {
396  return vlib_get_thread_index () - 1;
397 }
398 
399 static inline void
401 {
402  if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier))
403  {
404  vlib_main_t *vm = vlib_get_main ();
405  u32 thread_index = vm->thread_index;
406  f64 t = vlib_time_now (vm);
407 
408  if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled))
409  {
410  vlib_worker_thread_t *w = vlib_worker_threads + thread_index;
411  /* *INDENT-OFF* */
412  ELOG_TYPE_DECLARE (e) = {
413  .format = "barrier-wait-thread-%d",
414  .format_args = "i4",
415  };
416  /* *INDENT-ON* */
417 
418  struct
419  {
420  u32 thread_index;
421  } __clib_packed *ed;
422 
424  w->elog_track);
425  ed->thread_index = thread_index;
426  }
427 
428  if (CLIB_DEBUG > 0)
429  {
430  vm = vlib_get_main ();
431  vm->parked_at_barrier = 1;
432  }
433  clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
434  while (*vlib_worker_threads->wait_at_barrier)
435  ;
436 
437  /*
438  * Recompute the offset from thread-0 time.
439  * Note that vlib_time_now adds vm->time_offset, so
440  * clear it first. Save the resulting idea of "now", to
441  * see how well we're doing. See show_clock_command_fn(...)
442  */
443  {
444  f64 now;
445  vm->time_offset = 0.0;
446  now = vlib_time_now (vm);
449  }
450 
451  if (CLIB_DEBUG > 0)
452  vm->parked_at_barrier = 0;
453  clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
454 
455  if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
456  {
457  if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled))
458  {
459  t = vlib_time_now (vm) - t;
460  vlib_worker_thread_t *w = vlib_worker_threads + thread_index;
461  /* *INDENT-OFF* */
462  ELOG_TYPE_DECLARE (e) = {
463  .format = "barrier-refork-thread-%d",
464  .format_args = "i4",
465  };
466  /* *INDENT-ON* */
467 
468  struct
469  {
470  u32 thread_index;
471  } __clib_packed *ed;
472 
474  w->elog_track);
475  ed->thread_index = thread_index;
476  }
477 
479  clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
480  -1);
481  while (*vlib_worker_threads->node_reforks_required)
482  ;
483  }
484  if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled))
485  {
486  t = vlib_time_now (vm) - t;
487  vlib_worker_thread_t *w = vlib_worker_threads + thread_index;
488  /* *INDENT-OFF* */
489  ELOG_TYPE_DECLARE (e) = {
490  .format = "barrier-released-thread-%d: %dus",
491  .format_args = "i4i4",
492  };
493  /* *INDENT-ON* */
494 
495  struct
496  {
497  u32 thread_index;
498  u32 duration;
499  } __clib_packed *ed;
500 
502  w->elog_track);
503  ed->thread_index = thread_index;
504  ed->duration = (int) (1000000.0 * t);
505  }
506  }
507 }
508 
511 {
512  vlib_main_t *vm;
514  ASSERT (worker_index < tm->n_vlib_mains - 1);
515  vm = vlib_mains[worker_index + 1];
516  ASSERT (vm);
517  return vm;
518 }
519 
520 static inline u8
522 {
523  return (!vlib_num_workers ()
524  || ((vlib_get_thread_index () == 0
525  && vlib_worker_threads->wait_at_barrier[0])));
526 }
527 
528 static inline void
530 {
532  hf->valid = 1;
533 }
534 
535 static inline vlib_frame_queue_elt_t *
536 vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index)
537 {
538  vlib_frame_queue_t *fq;
542  vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
543  u64 new_tail;
544 
545  fq = fqm->vlib_frame_queues[index];
546  ASSERT (fq);
547 
548  new_tail = clib_atomic_add_fetch (&fq->tail, 1);
549 
550  /* Wait until a ring slot is available */
551  while (new_tail >= fq->head_hint + fq->nelts)
553 
554  elt = fq->elts + (new_tail & (fq->nelts - 1));
555 
556  /* this would be very bad... */
557  while (elt->valid)
558  ;
559 
561  elt->last_n_vectors = elt->n_vectors = 0;
562 
563  return elt;
564 }
565 
566 static inline vlib_frame_queue_t *
568  u32 index,
569  u32 queue_hi_thresh,
571  handoff_queue_by_worker_index)
572 {
573  vlib_frame_queue_t *fq;
576  vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
577 
578  fq = handoff_queue_by_worker_index[index];
579  if (fq != (vlib_frame_queue_t *) (~0))
580  return fq;
581 
582  fq = fqm->vlib_frame_queues[index];
583  ASSERT (fq);
584 
585  if (PREDICT_FALSE (fq->tail >= (fq->head_hint + queue_hi_thresh)))
586  {
587  /* a valid entry in the array will indicate the queue has reached
588  * the specified threshold and is congested
589  */
590  handoff_queue_by_worker_index[index] = fq;
591  fq->enqueue_full_events++;
592  return fq;
593  }
594 
595  return NULL;
596 }
597 
598 static inline vlib_frame_queue_elt_t *
600  u32 vlib_worker_index,
602  handoff_queue_elt_by_worker_index)
603 {
605 
606  if (handoff_queue_elt_by_worker_index[vlib_worker_index])
607  return handoff_queue_elt_by_worker_index[vlib_worker_index];
608 
609  elt = vlib_get_frame_queue_elt (frame_queue_index, vlib_worker_index);
610 
611  handoff_queue_elt_by_worker_index[vlib_worker_index] = elt;
612 
613  return elt;
614 }
615 
616 u8 *vlib_thread_stack_init (uword thread_index);
617 int vlib_thread_cb_register (struct vlib_main_t *vm,
618  vlib_thread_callbacks_t * cb);
619 extern void *rpc_call_main_thread_cb_fn;
620 
621 void
623  args);
624 void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
625 void vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id);
626 
627 
628 #endif /* included_vlib_threads_h */
629 
630 /*
631  * fd.io coding-style-patch-verification: ON
632  *
633  * Local Variables:
634  * eval: (c-set-style "gnu")
635  * End:
636  */
f64 time_offset
Definition: main.h:89
vlib_main_t vlib_global_main
Definition: main.c:1999
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
void vlib_worker_thread_init(vlib_worker_thread_t *w)
Definition: threads.c:537
void() vlib_thread_function_t(void *arg)
Definition: threads.h:26
int vlib_frame_queue_enqueue(vlib_main_t *vm, u32 node_runtime_index, u32 frame_queue_index, vlib_frame_t *frame, vlib_frame_queue_msg_type_t type)
#define clib_atomic_add_fetch(a, b)
Definition: atomics.h:30
unsigned long u64
Definition: types.h:89
void vlib_worker_thread_initial_barrier_sync_and_release(vlib_main_t *vm)
Definition: threads.c:1413
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:291
uword numa_heap_size
Definition: threads.h:348
void vlib_set_thread_name(char *name)
Definition: threads.c:152
u32 thread_index
Definition: main.h:218
void * thread_function_arg
Definition: threads.h:98
frame_queue_trace_t * frame_queue_traces
Definition: threads.h:165
elog_track_t elog_track
Definition: threads.h:100
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:34
void vlib_rpc_call_main_thread(void *function, u8 *args, u32 size)
Definition: threads.c:1919
struct vlib_thread_registration_ * next
Definition: threads.h:31
int vlib_thread_cb_register(struct vlib_main_t *vm, vlib_thread_callbacks_t *cb)
Definition: threads.c:1895
volatile u32 valid
Definition: threads.h:76
static vlib_frame_queue_t * is_vlib_frame_queue_congested(u32 frame_queue_index, u32 index, u32 queue_hi_thresh, vlib_frame_queue_t **handoff_queue_by_worker_index)
Definition: threads.h:567
unsigned char u8
Definition: types.h:56
static void vlib_smp_unsafe_warning(void)
Definition: threads.h:224
double f64
Definition: types.h:142
volatile int parked_at_barrier
Definition: main.h:243
static void vlib_worker_thread_barrier_check(void)
Definition: threads.h:400
vlib_thread_registration_t * next
Definition: threads.h:289
static u32 vlib_get_worker_index(u32 thread_index)
Definition: threads.h:388
#define static_always_inline
Definition: clib.h:106
vlib_frame_queue_msg_type_t
Definition: threads.h:68
vlib_frame_queue_elt_t ** handoff_queue_elt_by_thread_index
Definition: threads.h:151
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
uword * cpu_core_bitmap
Definition: threads.h:326
int vlib_frame_queue_dequeue(vlib_main_t *vm, vlib_frame_queue_main_t *fqm)
Definition: threads.c:1686
vlib_frame_queue_elt_t * elts
Definition: threads.h:144
pthread_t thread[MAX_CONNS]
Definition: main.c:142
f64 time_last_barrier_release
Definition: main.h:90
unsigned int u32
Definition: types.h:88
static u32 vlib_get_current_worker_index()
Definition: threads.h:394
#define VLIB_FRAME_SIZE
Definition: node.h:380
u32 cpu_id
Definition: vpe.api:221
vlib_fork_fixup_t
Definition: threads.h:233
void vlib_process_signal_event_mt_helper(vlib_process_signal_event_mt_args_t *args)
Definition: threads.c:1908
vl_api_fib_path_type_t type
Definition: fib_types.api:123
int extern_thread_mgmt
Definition: threads.h:345
sched_policy_t
Definition: threads.h:271
void * thread_mheap
Definition: threads.h:95
volatile u64 head
Definition: threads.h:131
u8 * vlib_thread_stack_init(uword thread_index)
Definition: main.c:665
vlib_thread_callbacks_t cb
Definition: threads.h:344
const char * barrier_context
Definition: threads.h:107
unsigned short u16
Definition: types.h:57
signed long i64
Definition: types.h:78
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
Definition: threads.c:1540
#define PREDICT_FALSE(x)
Definition: clib.h:118
static vlib_frame_queue_elt_t * vlib_get_worker_handoff_queue_elt(u32 frame_queue_index, u32 vlib_worker_index, vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index)
Definition: threads.h:599
#define always_inline
Definition: ipsec.h:28
vlib_main_t * vm
Definition: in2out_ed.c:1599
word fformat(FILE *f, char *fmt,...)
Definition: format.c:462
void vlib_worker_thread_node_runtime_update(void)
Definition: threads.c:1219
void vlib_worker_thread_fork_fixup(vlib_fork_fixup_t which)
Definition: threads.c:1375
volatile u64 tail
Definition: threads.h:123
vlib_frame_queue_t ** vlib_frame_queues
Definition: threads.h:161
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:218
elog_main_t elog_main
Definition: main.h:193
frame_queue_nelt_counter_t * frame_queue_histogram
Definition: threads.h:166
void * rpc_call_main_thread_cb_fn
Definition: threads.c:1916
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
void vlib_worker_wait_one_loop(void)
Wait until each of the workers has been once around the track.
Definition: threads.c:1649
string name[64]
Definition: ip.api:44
vlib_worker_thread_t * worker_threads
Definition: threads.h:296
static u8 vlib_thread_is_main_w_barrier(void)
Definition: threads.h:521
static vlib_frame_queue_elt_t * vlib_get_frame_queue_elt(u32 frame_queue_index, u32 index)
Definition: threads.h:536
volatile u32 * wait_at_barrier
Definition: threads.h:90
vlib_frame_queue_per_thread_data_t * per_thread_data
Definition: threads.h:162
vlib_frame_queue_t ** congested_handoff_queue_by_thread_index
Definition: threads.h:152
#define ASSERT(truth)
u8 vlib_worker_thread_barrier_held(void)
Return true if the wroker thread barrier is held.
Definition: threads.c:1441
vlib_frame_queue_main_t * frame_queue_mains
Definition: threads.h:332
#define ELOG_TRACK_DATA(em, f, track)
Definition: elog.h:478
struct vlib_thread_registration_ vlib_thread_registration_t
uword * thread_registrations_by_name
Definition: threads.h:294
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1854
volatile u32 * node_reforks_required
Definition: threads.h:108
const char * barrier_caller
Definition: threads.h:106
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
clib_error_t * vlib_thread_init(vlib_main_t *vm)
Definition: threads.c:207
#define clib_atomic_fetch_add(a, b)
Definition: atomics.h:23
u32 enqueue_full_events
Definition: threads.h:127
u64 uword
Definition: types.h:112
vlib_main_t ** vlib_mains
Definition: buffer.c:332
void vlib_create_worker_threads(vlib_main_t *vm, int n, void(*thread_function)(void *))
volatile u64 head_hint
Definition: threads.h:140
static void vlib_put_frame_queue_elt(vlib_frame_queue_elt_t *hf)
Definition: threads.h:529
volatile u32 * workers_at_barrier
Definition: threads.h:91
static u32 vlib_num_workers()
Definition: threads.h:376
void vlib_worker_thread_node_refork(void)
Definition: threads.c:1033
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:130
uword * cpu_socket_bitmap
Definition: threads.h:329
#define foreach_sched_policy
Definition: threads.h:264
vlib_thread_registration_t ** registrations
Definition: threads.h:292
vlib_thread_main_t vlib_thread_main
Definition: threads.c:35
pthread_t thread_id
Definition: threads.h:114
vlib_thread_registration_t * registration
Definition: threads.h:102
volatile u32 worker_thread_release
Definition: threads.h:335
static vlib_main_t * vlib_get_worker_vlib_main(u32 worker_index)
Definition: threads.h:510
static u32 vlib_get_worker_thread_index(u32 worker_index)
Definition: threads.h:382
void vlib_get_thread_core_numa(vlib_worker_thread_t *w, unsigned cpu_id)
Definition: threads.c:592
void vlib_worker_thread_barrier_sync_int(vlib_main_t *vm, const char *func_name)
Definition: threads.c:1450