19 #include <sys/eventfd.h> 36 ASSERT (elt_index < ring->nitems);
37 return (ring->
data + elt_index * ring->
elsize);
44 uword rings_sz = 0, mq_sz;
64 mq_sz =
sizeof (
svm_msg_q_t) + vec_sz + rings_sz + q_sz;
77 for (i = 0; i < cfg->
n_rings; i++)
87 ring->
data = rings_ptr;
198 u32 dist1, dist2, tail, head;
212 return (dist1 < dist2);
246 mq->
q->consumer_evtfd = fd;
252 mq->
q->producer_evtfd = fd;
259 if ((fd = eventfd (0, EFD_NONBLOCK)) < 0)
269 if ((fd = eventfd (0, EFD_NONBLOCK)) < 0)
279 s =
format (s,
" [Q:%d/%d]", mq->
q->cursize, mq->
q->maxsize);
void svm_queue_add_raw(svm_queue_t *q, u8 *elem)
Add element to queue with mutex held.
svm_msg_q_ring_t * rings
rings with message data
void * svm_msg_q_msg_data(svm_msg_q_t *mq, svm_msg_q_msg_t *msg)
Get data for message in queue.
static u8 svm_msg_q_ring_is_full(svm_msg_q_t *mq, u32 ring_index)
int svm_queue_add(svm_queue_t *q, u8 *elem, int nowait)
svm_msg_q_msg_t svm_msg_q_alloc_msg(svm_msg_q_t *mq, u32 nbytes)
Allocate message buffer.
u8 * format_svm_msg_q(u8 *s, va_list *args)
Format message queue, shows msg count for each ring.
#define clib_atomic_fetch_sub(a, b)
for(i=1;i<=collision_buckets;i++)
volatile u32 head
current head (for dequeue)
static svm_msg_q_ring_t * svm_msg_q_ring_inline(svm_msg_q_t *mq, u32 ring_index)
int svm_msg_q_lock_and_alloc_msg_w_ring(svm_msg_q_t *mq, u32 ring_index, u8 noblock, svm_msg_q_msg_t *msg)
Lock message queue and allocate message buffer on ring.
int svm_queue_sub(svm_queue_t *q, u8 *elem, svm_q_conditional_wait_t cond, u32 time)
static uword vec_header_bytes(uword header_bytes)
svm_msg_q_t * svm_msg_q_alloc(svm_msg_q_cfg_t *cfg)
Allocate message queue.
description fragment has unexpected format
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
volatile u32 tail
current tail (for enqueue)
int svm_msg_q_sub(svm_msg_q_t *mq, svm_msg_q_msg_t *msg, svm_q_conditional_wait_t cond, u32 time)
Consumer dequeue one message from queue.
void svm_msg_q_free(svm_msg_q_t *mq)
Free message queue.
int svm_msg_q_alloc_consumer_eventfd(svm_msg_q_t *mq)
Allocate event fd for queue consumer.
void svm_msg_q_set_producer_eventfd(svm_msg_q_t *mq, int fd)
Set event fd for queue producer.
svm_msg_q_ring_t * svm_msg_q_ring(svm_msg_q_t *mq, u32 ring_index)
Get message queue ring.
static int svm_msq_q_msg_is_valid(svm_msg_q_t *mq, svm_msg_q_msg_t *msg)
static void svm_msg_q_wait(svm_msg_q_t *mq)
Wait for message queue event.
volatile u32 cursize
current size of the ring
struct svm_msg_q_ring_ svm_msg_q_ring_t
u32 n_rings
number of msg rings
Unidirectional shared-memory multi-ring message queue.
static void svm_msg_q_unlock(svm_msg_q_t *mq)
Unlock message queue.
sll srl srl sll sra u16x4 i
#define clib_warning(format, args...)
svm_queue_t * q
queue for exchanging messages
int svm_msg_q_add(svm_msg_q_t *mq, svm_msg_q_msg_t *msg, int nowait)
Producer enqueue one message to queue.
u32 elt_index
index in ring
static int svm_msg_q_try_lock(svm_msg_q_t *mq)
Try locking message queue.
void svm_queue_free(svm_queue_t *q)
u32 ring_index
ring index, could be u8
void svm_msg_q_add_and_unlock(svm_msg_q_t *mq, svm_msg_q_msg_t *msg)
Producer enqueue one message to queue with mutex held.
static void clib_mem_free(void *p)
struct svm_msg_q_ svm_msg_q_t
u8 * data
chunk of memory for msg data
int svm_queue_sub_raw(svm_queue_t *q, u8 *elem)
void svm_msg_q_set_consumer_eventfd(svm_msg_q_t *mq, int fd)
Set event fd for queue consumer.
svm_msg_q_ring_cfg_t * ring_cfgs
array of ring cfgs
#define clib_atomic_fetch_add(a, b)
u32 q_nitems
msg queue size (not rings)
void svm_msg_q_free_msg(svm_msg_q_t *mq, svm_msg_q_msg_t *msg)
Free message buffer.
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
svm_queue_t * svm_queue_init(void *base, int nels, int elsize)
struct _svm_queue svm_queue_t
u32 elsize
size of an element
static void * clib_mem_alloc_aligned(uword size, uword align)
#define vec_foreach(var, vec)
Vector iterator.
int consumer_pid
pid of msg consumer
int svm_msg_q_alloc_producer_eventfd(svm_msg_q_t *mq)
Allocate event fd for queue consumer.
#define CLIB_CACHE_LINE_BYTES
static int svm_msg_q_lock(svm_msg_q_t *mq)
Lock, or block trying, the message queue.
static u8 svm_msg_q_is_full(svm_msg_q_t *mq)
Check if message queue is full.
svm_msg_q_msg_t svm_msg_q_alloc_msg_w_ring(svm_msg_q_t *mq, u32 ring_index)
Allocate message buffer on ring.
static void * svm_msg_q_ring_data(svm_msg_q_ring_t *ring, u32 elt_index)
void svm_queue_send_signal(svm_queue_t *q, u8 is_prod)
void svm_msg_q_sub_w_lock(svm_msg_q_t *mq, svm_msg_q_msg_t *msg)
Consumer dequeue one message from queue with mutex held.
u32 nitems
max size of the ring