42 s =
format (s,
"SESSION_QUEUE: session index %d, server thread index %d",
49 #define foreach_session_queue_error \ 50 _(TX, "Packets transmitted") \ 51 _(TIMER, "Timer events") \ 52 _(NO_BUFFER, "Out of buffers") 56 #define _(sym,str) SESSION_QUEUE_ERROR_##sym, 63 #define _(sym,string) string, 70 u32 next_index,
u32 * to_next,
u16 n_segs,
77 for (i = 0; i <
clib_min (n_trace, n_segs); i++)
94 u32 chain_bi0, to_deq, left_from_seg;
95 u16 len_to_deq, n_bytes_read;
98 b->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
104 to_deq = left_from_seg;
111 chain_bi0 = smm->tx_buffers[ctx->
s->thread_index][*n_bufs];
112 _vec_len (smm->tx_buffers[ctx->
s->thread_index]) = *n_bufs;
128 session_dgram_hdr_t *hdr = &ctx->
hdr;
130 deq_now =
clib_min (hdr->data_length - hdr->data_offset,
134 ASSERT (n_bytes_read > 0);
136 hdr->data_offset += n_bytes_read;
137 if (hdr->data_offset == hdr->data_length)
144 ASSERT (n_bytes_read == len_to_deq);
150 prev_b->
flags |= VLIB_BUFFER_NEXT_PRESENT;
155 to_deq -= n_bytes_read;
167 u32 thread_index,
u16 * n_bufs,
u32 wanted)
175 _vec_len (smm->tx_buffers[thread_index]) = *n_bufs;
191 b->
flags = VNET_BUFFER_F_LOCALLY_ORIGINATED;
202 ASSERT (n_bytes_read > 0);
211 session_dgram_hdr_t *hdr = &ctx->
hdr;
216 ASSERT (hdr->data_length > hdr->data_offset);
217 deq_now =
clib_min (hdr->data_length - hdr->data_offset,
221 ASSERT (n_bytes_read > 0);
225 ip_copy (&ctx->
tc->rmt_ip, &hdr->rmt_ip, ctx->
tc->is_ip4);
226 ctx->
tc->rmt_port = hdr->rmt_port;
228 hdr->data_offset += n_bytes_read;
229 if (hdr->data_offset == hdr->data_length)
239 ASSERT (n_bytes_read > 0);
253 ed->data[0] = FIFO_EVENT_APP_TX;
254 ed->data[1] = ctx->max_dequeue;
255 ed->data[2] = len_to_deq;
256 ed->data[3] = ctx->left_to_snd;
281 return ctx->
transport_vft->get_connection (ctx->
s->connection_index,
282 ctx->
s->thread_index);
287 return ctx->
transport_vft->get_listener (ctx->
s->connection_index);
290 return ctx->
transport_vft->get_connection (ctx->
s->connection_index,
291 ctx->
s->thread_index);
298 u32 max_segs,
u8 peek_data)
300 u32 n_bytes_per_buf, n_bytes_per_seg;
365 session_fifo_event_t * e,
369 u32 next_index, next0, next1, *to_next, n_left_to_next;
371 u32 thread_index = s->thread_index, n_left, pbi;
381 vec_add1 (smm->pending_event_vector[thread_index], *e);
385 next_index = smm->session_type_to_next[s->session_type];
386 next0 = next1 = next_index;
396 vec_add1 (smm->pending_event_vector[thread_index], *e);
410 n_bufs =
vec_len (smm->tx_buffers[thread_index]);
416 if (n_bufs < n_bufs_needed)
422 vec_add1 (smm->pending_event_vector[thread_index], *e);
444 pbi = smm->tx_buffers[thread_index][n_bufs - 3];
447 pbi = smm->tx_buffers[thread_index][n_bufs - 4];
451 to_next[0] = bi0 = smm->tx_buffers[thread_index][--n_bufs];
452 to_next[1] = bi1 = smm->tx_buffers[thread_index][--n_bufs];
471 n_left_to_next, bi0, bi1, next0,
480 to_next[0] = bi0 = smm->tx_buffers[thread_index][--n_bufs];
495 n_left_to_next, bi0, next0);
502 _vec_len (smm->tx_buffers[thread_index]) = n_bufs;
510 vec_add1 (smm->pending_event_vector[thread_index], *e);
521 vec_add1 (smm->pending_event_vector[thread_index], *e);
528 session_fifo_event_t * e0,
536 session_fifo_event_t * e0,
545 session_fifo_event_t * e0,
551 return app->cb_fns.builtin_app_tx_callback (s0);
566 session_fifo_event_t _e, *e = &_e;
572 q = smm->vpp_event_queues[my_thread_index];
576 for (i = 0; i < q->cursize; i++)
578 headp = (
i8 *) (&q->data[0] + q->elsize * index);
581 switch (e->event_type)
585 fformat (stdout,
"[%04d] TX session %d\n", i, s0->session_index);
590 fformat (stdout,
"[%04d] disconnect session %d\n", i,
596 fformat (stdout,
"[%04d] builtin_rx %d\n", i, s0->session_index);
600 fformat (stdout,
"[%04d] RPC call %llx with %llx\n",
601 i, (
u64) (e->rpc_args.fp), (
u64) (e->rpc_args.arg));
605 fformat (stdout,
"[%04d] unhandled event type %d\n",
612 if (index == q->maxsize)
621 switch (e->event_type)
638 if (s->server_rx_fifo == f || s->server_tx_fifo == f)
652 session_fifo_event_t *pending_event_vector, *evt;
653 int i, index, found = 0;
658 thread_index = f->master_thread_index;
662 q = smm->vpp_event_queues[thread_index];
664 for (i = 0; i < q->cursize; i++)
666 headp = (
i8 *) (&q->data[0] + q->elsize * index);
671 if (++index == q->maxsize)
677 pending_event_vector = smm->pending_event_vector[thread_index];
695 session_fifo_event_t *my_pending_event_vector, *e;
696 session_fifo_event_t *my_fifo_events;
697 u32 n_to_dequeue, n_events;
700 int n_tx_packets = 0;
716 q = smm->vpp_event_queues[thread_index];
720 my_fifo_events = smm->free_event_vector[thread_index];
723 n_to_dequeue = q->cursize;
724 my_pending_event_vector = smm->pending_event_vector[thread_index];
726 if (!n_to_dequeue && !
vec_len (my_pending_event_vector)
727 && !
vec_len (smm->pending_disconnects[thread_index]))
737 if (0 &&
vec_len (my_pending_event_vector) >= 100)
744 if (pthread_mutex_trylock (&q->mutex))
747 for (i = 0; i < n_to_dequeue; i++)
754 if (q->cursize < (q->maxsize / 8))
755 (void) pthread_cond_broadcast (&q->condvar);
756 pthread_mutex_unlock (&q->mutex);
758 vec_append (my_fifo_events, my_pending_event_vector);
759 vec_append (my_fifo_events, smm->pending_disconnects[thread_index]);
761 _vec_len (my_pending_event_vector) = 0;
762 smm->pending_event_vector[thread_index] = my_pending_event_vector;
763 _vec_len (smm->pending_disconnects[thread_index]) = 0;
766 n_events =
vec_len (my_fifo_events);
767 for (i = 0; i < n_events; i++)
770 session_fifo_event_t *e0;
772 e0 = &my_fifo_events[
i];
773 switch (e0->event_type)
778 vec_add1 (smm->pending_event_vector[thread_index], *e0);
791 rv = (smm->session_tx_fns[s0->session_type]) (vm, node, e0, s0,
797 SESSION_QUEUE_ERROR_NO_BUFFER, 1);
808 vec_add1 (smm->pending_disconnects[thread_index], *e0);
814 vec_add1 (smm->pending_disconnects[thread_index], *e0);
826 app->cb_fns.builtin_app_rx_callback (s0);
829 fp = e0->rpc_args.fp;
830 (*fp) (e0->rpc_args.arg);
834 clib_warning (
"unhandled event type %d", e0->event_type);
838 _vec_len (my_fifo_events) = 0;
839 smm->free_event_vector[thread_index] = my_fifo_events;
842 SESSION_QUEUE_ERROR_TX, n_tx_packets);
853 .name =
"session-queue",
858 .state = VLIB_NODE_STATE_DISABLED,
885 f64 now, timeout = 1.0;
886 uword *event_data = 0;
920 .name =
"session-queue-process",
921 .state = VLIB_NODE_STATE_DISABLED,
vlib_main_t vlib_global_main
static void session_tx_trace_frame(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 *to_next, u16 n_segs, stream_session_t *s, u32 n_trace)
void ip_copy(ip46_address_t *dst, ip46_address_t *src, u8 is_ip4)
#define SESSION_Q_PROCESS_FLUSH_FRAMES
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
#define SESSION_CONN_HDR_LEN
struct _transport_connection transport_connection_t
static int session_output_try_get_buffers(vlib_main_t *vm, session_manager_main_t *smm, u32 thread_index, u16 *n_bufs, u32 wanted)
int session_tx_fifo_dequeue_internal(vlib_main_t *vm, vlib_node_runtime_t *node, session_fifo_event_t *e0, stream_session_t *s0, int *n_tx_pkts)
session_manager_main_t session_manager_main
static f64 vlib_time_now(vlib_main_t *vm)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
void svm_fifo_overwrite_head(svm_fifo_t *f, u8 *data, u32 len)
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
static clib_error_t * session_queue_exit(vlib_main_t *vm)
static uword session_queue_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
static stream_session_t * session_get_from_handle(session_handle_t handle)
void session_node_enable_disable(u8 is_en)
vlib_main_t ** vlib_mains
static void session_tx_fill_buffer(vlib_main_t *vm, session_tx_context_t *ctx, vlib_buffer_t *b, u16 *n_bufs, u8 peek_data)
void transport_update_time(f64 time_now, u8 thread_index)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define vlib_worker_thread_barrier_sync(X)
struct _svm_fifo svm_fifo_t
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
void dump_thread_0_event_queue(void)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
int session_tx_fifo_dequeue_and_snd(vlib_main_t *vm, vlib_node_runtime_t *node, session_fifo_event_t *e0, stream_session_t *s0, int *n_tx_pkts)
struct _stream_session_t stream_session_t
static char * session_queue_error_strings[]
transport_proto_vft_t * transport_protocol_get_vft(transport_proto_t transport_proto)
Get transport virtual function table.
static transport_proto_t session_get_transport_proto(stream_session_t *s)
static session_manager_main_t * vnet_get_session_manager_main()
int session_tx_fifo_peek_and_snd(vlib_main_t *vm, vlib_node_runtime_t *node, session_fifo_event_t *e0, stream_session_t *s0, int *n_tx_pkts)
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_buffer_make_headroom(vlib_buffer_t *b, u8 size)
Make head room, typically for packet headers.
transport_proto_vft_t * transport_vft
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
static uword session_queue_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
static void svm_fifo_unset_event(svm_fifo_t *f)
Unsets fifo event flag.
static stream_session_t * session_get_if_valid(u64 si, u32 thread_index)
u32 node_index
Node index.
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
struct _session_manager_main session_manager_main_t
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
static u8 svm_fifo_set_event(svm_fifo_t *f)
Sets fifo event flag.
The fine-grained event logger allows lightweight, thread-safe event logging at minimum cost...
#define SESSION_EVT_DBG(_evt, _args...)
#define VLIB_REGISTER_NODE(x,...)
static void session_tx_fifo_chain_tail(vlib_main_t *vm, session_tx_context_t *ctx, vlib_buffer_t *b, u16 *n_bufs, u8 peek_data)
#define VLIB_MAIN_LOOP_EXIT_FUNCTION(x)
#define clib_warning(format, args...)
#define clib_memcpy(a, b, c)
struct _application application_t
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
#define SESSION_Q_PROCESS_STOP
#define foreach_session_queue_error
static u8 session_node_cmp_event(session_fifo_event_t *e, svm_fifo_t *f)
u32 next_buffer
Next buffer for this linked-list of buffers.
#define vec_append(v1, v2)
Append v2 after v1.
int svm_queue_sub_raw(svm_queue_t *q, u8 *elem)
static stream_session_t * session_event_get_session(session_fifo_event_t *e, u8 thread_index)
static u8 * format_session_queue_trace(u8 *s, va_list *args)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
static void session_tx_set_dequeue_params(vlib_main_t *vm, session_tx_context_t *ctx, u32 max_segs, u8 peek_data)
struct _vlib_node_registration vlib_node_registration_t
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 max_bytes)
static u8 session_tx_not_ready(stream_session_t *s, u8 peek_data)
enum _transport_proto transport_proto_t
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
application_t * application_get(u32 index)
static u32 vlib_buffer_free_list_buffer_size(vlib_main_t *vm, vlib_buffer_free_list_index_t index)
vlib_node_registration_t session_queue_node
(constructor) VLIB_REGISTER_NODE (session_queue_node)
struct _svm_queue svm_queue_t
struct clib_bihash_value offset
template key/value backing page structure
static transport_connection_t * session_tx_get_transport(session_tx_context_t *ctx, u8 peek_data)
transport_connection_t * tc
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
#define vec_foreach(var, vec)
Vector iterator.
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
u8 session_node_lookup_fifo_event(svm_fifo_t *f, session_fifo_event_t *e)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
vlib_node_registration_t session_queue_process_node
(constructor) VLIB_REGISTER_NODE (session_queue_process_node)
int svm_fifo_peek(svm_fifo_t *f, u32 relative_offset, u32 max_bytes, u8 *copy_here)
int svm_fifo_dequeue_nowait(svm_fifo_t *f, u32 max_bytes, u8 *copy_here)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
void stream_session_disconnect_transport(stream_session_t *s)
Notify transport the session can be disconnected.
static int session_tx_fifo_read_and_snd_i(vlib_main_t *vm, vlib_node_runtime_t *node, session_fifo_event_t *e, stream_session_t *s, int *n_tx_packets, u8 peek_data)