41 s =
format (s,
"SESSION_QUEUE: session index %d, server thread index %d",
48 #define foreach_session_queue_error \ 49 _(TX, "Packets transmitted") \ 50 _(TIMER, "Timer events") \ 51 _(NO_BUFFER, "Out of buffers") 55 #define _(sym,str) SESSION_QUEUE_ERROR_##sym, 62 #define _(sym,string) string, 71 u32 left_from_seg,
u32 * left_to_snd0,
72 u16 * n_bufs,
u32 * tx_offset,
u16 deq_per_buf,
76 u32 chain_bi0, to_deq;
77 u16 len_to_deq0, n_bytes_read;
85 to_deq = left_from_seg;
86 for (j = 1; j < n_bufs_per_seg; j++)
89 len_to_deq0 =
clib_min (to_deq, deq_per_buf);
92 chain_bi0 = smm->tx_buffers[thread_index][*n_bufs];
93 _vec_len (smm->tx_buffers[thread_index]) = *n_bufs;
100 n_bytes_read =
svm_fifo_peek (fifo, *tx_offset, len_to_deq0, data0);
101 *tx_offset += n_bytes_read;
107 ASSERT (n_bytes_read == len_to_deq0);
118 to_deq -= n_bytes_read;
124 *left_to_snd0 -= left_from_seg;
130 session_fifo_event_t * e0,
132 int *n_tx_packets,
u8 peek_data)
135 u32 left_to_snd0, max_len_to_snd0, len_to_deq0, snd_space0;
136 u32 n_bufs_per_evt, n_frames_per_evt, n_bufs_per_frame;
140 u32 next_index, next0, *to_next, n_left_to_next, bi0;
142 u32 tx_offset = 0, max_dequeue0, n_bytes_per_seg, left_for_seg;
143 u16 snd_mss0, n_bufs_per_seg, n_bufs;
146 u32 n_bytes_per_buf, deq_per_buf, deq_per_first_buf;
147 u32 buffers_allocated, buffers_allocated_this_call;
149 next_index = next0 = smm->session_type_to_next[s0->session_type];
153 tc0 = transport_vft->get_connection (s0->connection_index, thread_index);
156 snd_mss0 = transport_vft->send_mss (tc0);
157 snd_space0 = transport_vft->send_space (tc0);
160 if (snd_space0 == 0 || snd_mss0 == 0)
162 vec_add1 (smm->pending_event_vector[thread_index], *e0);
175 tx_offset = transport_vft->tx_fifo_offset (tc0);
179 max_dequeue0 -= tx_offset;
183 if (max_dequeue0 == 0)
187 if (max_dequeue0 < snd_space0)
190 max_len_to_snd0 = (max_dequeue0 > snd_mss0) ?
191 max_dequeue0 - max_dequeue0 % snd_mss0 : max_dequeue0;
197 max_len_to_snd0 = snd_space0;
204 n_bufs_per_seg = ceil ((
double) n_bytes_per_seg / n_bytes_per_buf);
205 n_bufs_per_evt = ceil ((
double) max_len_to_snd0 / n_bytes_per_seg);
209 deq_per_buf =
clib_min (snd_mss0, n_bytes_per_buf);
212 n_bufs =
vec_len (smm->tx_buffers[thread_index]);
213 left_to_snd0 = max_len_to_snd0;
214 for (i = 0; i < n_frames_per_evt; i++)
220 n_bufs + n_bufs_per_frame - 1);
221 buffers_allocated = 0;
232 buffers_allocated += buffers_allocated_this_call;
234 while (buffers_allocated_this_call > 0
235 && ((buffers_allocated + n_bufs < n_bufs_per_frame)));
237 n_bufs += buffers_allocated;
238 _vec_len (smm->tx_buffers[thread_index]) = n_bufs;
242 vec_add1 (smm->pending_event_vector[thread_index], *e0);
245 ASSERT (n_bufs >= n_bufs_per_frame);
249 while (left_to_snd0 && n_left_to_next)
257 bi0 = smm->tx_buffers[thread_index][--n_bufs];
258 _vec_len (smm->tx_buffers[thread_index]) = n_bufs;
267 b0->
flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
271 len_to_deq0 =
clib_min (left_to_snd0, deq_per_first_buf);
277 if (n_bytes_read <= 0)
281 tx_offset += n_bytes_read;
287 if (n_bytes_read <= 0)
293 left_to_snd0 -= n_bytes_read;
294 *n_tx_packets = *n_tx_packets + 1;
301 left_for_seg =
clib_min (snd_mss0 - n_bytes_read, left_to_snd0);
303 s0->server_tx_fifo, b0, bi0,
304 n_bufs_per_seg, left_for_seg,
305 &left_to_snd0, &n_bufs, &tx_offset,
306 deq_per_buf, peek_data);
311 transport_vft->push_header (tc0, b0);
315 ed->data[0] = e0->event_type;
316 ed->data[1] = max_dequeue0;
317 ed->data[2] = len_to_deq0;
318 ed->data[3] = left_to_snd0;
336 to_next, n_left_to_next,
343 if (max_len_to_snd0 < max_dequeue0)
348 vec_add1 (smm->pending_event_vector[thread_index], *e0);
362 vec_add1 (smm->pending_event_vector[thread_index], *e0);
365 _vec_len (smm->tx_buffers[thread_index]) += 1;
373 session_fifo_event_t * e0,
384 session_fifo_event_t * e0,
404 session_fifo_event_t _e, *e = &_e;
410 q = smm->vpp_event_queues[my_thread_index];
414 for (i = 0; i < q->cursize; i++)
416 headp = (
i8 *) (&q->data[0] + q->elsize * index);
419 switch (e->event_type)
423 fformat (stdout,
"[%04d] TX session %d\n", i, s0->session_index);
428 fformat (stdout,
"[%04d] disconnect session %d\n", i,
434 fformat (stdout,
"[%04d] builtin_rx %d\n", i, s0->session_index);
438 fformat (stdout,
"[%04d] RPC call %llx with %llx\n",
439 i, (
u64) (e->rpc_args.fp), (
u64) (e->rpc_args.arg));
443 fformat (stdout,
"[%04d] unhandled event type %d\n",
450 if (index == q->maxsize)
459 switch (e->event_type)
476 if (s->server_rx_fifo == f || s->server_tx_fifo == f)
490 session_fifo_event_t *pending_event_vector, *evt;
491 int i, index, found = 0;
496 thread_index = f->master_thread_index;
500 q = smm->vpp_event_queues[thread_index];
502 for (i = 0; i < q->cursize; i++)
504 headp = (
i8 *) (&q->data[0] + q->elsize * index);
509 if (++index == q->maxsize)
515 pending_event_vector = smm->pending_event_vector[thread_index];
533 session_fifo_event_t *my_pending_event_vector, *pending_disconnects, *e;
534 session_fifo_event_t *my_fifo_events;
535 u32 n_to_dequeue, n_events;
538 int n_tx_packets = 0;
554 q = smm->vpp_event_queues[my_thread_index];
558 my_fifo_events = smm->free_event_vector[my_thread_index];
561 n_to_dequeue = q->cursize;
562 my_pending_event_vector = smm->pending_event_vector[my_thread_index];
563 pending_disconnects = smm->pending_disconnects[my_thread_index];
565 if (!n_to_dequeue && !
vec_len (my_pending_event_vector)
566 && !
vec_len (pending_disconnects))
576 if (0 &&
vec_len (my_pending_event_vector) >= 100)
583 if (pthread_mutex_trylock (&q->mutex))
586 for (i = 0; i < n_to_dequeue; i++)
593 if (q->cursize < (q->maxsize / 8))
594 (void) pthread_cond_broadcast (&q->condvar);
595 pthread_mutex_unlock (&q->mutex);
597 vec_append (my_fifo_events, my_pending_event_vector);
598 vec_append (my_fifo_events, smm->pending_disconnects[my_thread_index]);
600 _vec_len (my_pending_event_vector) = 0;
601 smm->pending_event_vector[my_thread_index] = my_pending_event_vector;
602 _vec_len (smm->pending_disconnects[my_thread_index]) = 0;
605 n_events =
vec_len (my_fifo_events);
606 for (i = 0; i < n_events; i++)
609 session_fifo_event_t *e0;
611 e0 = &my_fifo_events[
i];
613 switch (e0->event_type)
627 vec_add1 (smm->pending_event_vector[my_thread_index], *e0);
632 rv = (smm->session_tx_fns[s0->session_type]) (vm, node, smm, e0, s0,
639 SESSION_QUEUE_ERROR_NO_BUFFER, 1);
648 vec_add1 (smm->pending_disconnects[my_thread_index], *e0);
660 app->cb_fns.builtin_server_rx_callback (s0);
663 fp = e0->rpc_args.fp;
664 (*fp) (e0->rpc_args.arg);
668 clib_warning (
"unhandled event type %d", e0->event_type);
672 _vec_len (my_fifo_events) = 0;
673 smm->free_event_vector[my_thread_index] = my_fifo_events;
676 SESSION_QUEUE_ERROR_TX, n_tx_packets);
687 .name =
"session-queue",
692 .state = VLIB_NODE_STATE_DISABLED,
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
vlib_main_t vlib_global_main
sll srl srl sll sra u16x4 i
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
struct _transport_connection transport_connection_t
static f64 vlib_time_now(vlib_main_t *vm)
struct _transport_proto_vft transport_proto_vft_t
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
struct _vlib_node_registration vlib_node_registration_t
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
static clib_error_t * session_queue_exit(vlib_main_t *vm)
static uword session_queue_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
void session_node_enable_disable(u8 is_en)
vlib_main_t ** vlib_mains
void transport_update_time(f64 time_now, u8 thread_index)
#define vlib_worker_thread_barrier_sync(X)
struct _svm_fifo svm_fifo_t
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
static int session_tx_fifo_read_and_snd_i(vlib_main_t *vm, vlib_node_runtime_t *node, session_manager_main_t *smm, session_fifo_event_t *e0, stream_session_t *s0, u32 thread_index, int *n_tx_packets, u8 peek_data)
void dump_thread_0_event_queue(void)
#define VLIB_BUFFER_NEXT_PRESENT
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
#define VLIB_BUFFER_TOTAL_LENGTH_VALID
struct _stream_session_t stream_session_t
static char * session_queue_error_strings[]
transport_proto_vft_t * transport_protocol_get_vft(transport_proto_t transport_proto)
Get transport virtual function table.
static transport_proto_t session_get_transport_proto(stream_session_t *s)
static session_manager_main_t * vnet_get_session_manager_main()
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_buffer_make_headroom(vlib_buffer_t *b, u8 size)
Make head room, typically for packet headers.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
int session_tx_fifo_peek_and_snd(vlib_main_t *vm, vlib_node_runtime_t *node, session_manager_main_t *smm, session_fifo_event_t *e0, stream_session_t *s0, u32 thread_index, int *n_tx_pkts)
static u32 vlib_buffer_free_list_buffer_size(vlib_main_t *vm, u32 free_list_index)
static void svm_fifo_unset_event(svm_fifo_t *f)
Unsets fifo event flag.
int unix_shared_memory_queue_sub_raw(unix_shared_memory_queue_t *q, u8 *elem)
static stream_session_t * session_get_if_valid(u64 si, u32 thread_index)
u32 node_index
Node index.
struct _session_manager_main session_manager_main_t
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
static u8 svm_fifo_set_event(svm_fifo_t *f)
Sets fifo event flag.
The fine-grained event logger allows lightweight, thread-safe event logging at minimum cost...
#define SESSION_EVT_DBG(_evt, _args...)
#define VLIB_MAIN_LOOP_EXIT_FUNCTION(x)
#define clib_warning(format, args...)
#define clib_memcpy(a, b, c)
struct _application application_t
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
#define foreach_session_queue_error
static u8 session_node_cmp_event(session_fifo_event_t *e, svm_fifo_t *f)
u32 next_buffer
Next buffer for this linked-list of buffers.
#define vec_append(v1, v2)
Append v2 after v1.
void stream_session_disconnect(stream_session_t *s)
Disconnect session and propagate to transport.
static stream_session_t * session_event_get_session(session_fifo_event_t *e, u8 thread_index)
static u8 * format_session_queue_trace(u8 *s, va_list *args)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
int session_tx_fifo_dequeue_and_snd(vlib_main_t *vm, vlib_node_runtime_t *node, session_manager_main_t *smm, session_fifo_event_t *e0, stream_session_t *s0, u32 thread_index, int *n_tx_pkts)
static stream_session_t * session_get_from_handle(u64 handle)
enum _transport_proto transport_proto_t
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
application_t * application_get(u32 index)
vlib_node_registration_t session_queue_node
(constructor) VLIB_REGISTER_NODE (session_queue_node)
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
#define VLIB_REGISTER_NODE(x,...)
#define vec_foreach(var, vec)
Vector iterator.
static void session_tx_fifo_chain_tail(session_manager_main_t *smm, vlib_main_t *vm, u8 thread_index, svm_fifo_t *fifo, vlib_buffer_t *b0, u32 bi0, u8 n_bufs_per_seg, u32 left_from_seg, u32 *left_to_snd0, u16 *n_bufs, u32 *tx_offset, u16 deq_per_buf, u8 peek_data)
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
u8 session_node_lookup_fifo_event(svm_fifo_t *f, session_fifo_event_t *e)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
int svm_fifo_peek(svm_fifo_t *f, u32 relative_offset, u32 max_bytes, u8 *copy_here)
int svm_fifo_dequeue_nowait(svm_fifo_t *f, u32 max_bytes, u8 *copy_here)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
struct _unix_shared_memory_queue unix_shared_memory_queue_t