FD.io VPP  v21.01.1
Vector Packet Processing
lock.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
18 
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
21 
22 #if __x86_64__
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
24 #elif defined (__aarch64__) || defined (__arm__)
25 #define CLIB_PAUSE() __asm__ ("yield")
26 #else
27 #define CLIB_PAUSE()
28 #endif
29 
30 #if CLIB_DEBUG > 1
31 #define CLIB_LOCK_DBG(_p) \
32 do { \
33  (*_p)->frame_address = __builtin_frame_address (0); \
34  (*_p)->pid = getpid (); \
35  (*_p)->thread_index = os_get_thread_index (); \
36 } while (0)
37 #define CLIB_LOCK_DBG_CLEAR(_p) \
38 do { \
39  (*_p)->frame_address = 0; \
40  (*_p)->pid = 0; \
41  (*_p)->thread_index = 0; \
42 } while (0)
43 #else
44 #define CLIB_LOCK_DBG(_p)
45 #define CLIB_LOCK_DBG_CLEAR(_p)
46 #endif
47 
48 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
49 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
50 
52 {
53  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
55 #if CLIB_DEBUG > 0
56  pid_t pid;
59 #endif
60 };
61 
63 
64 static inline void
65 clib_spinlock_init (clib_spinlock_t * p)
66 {
68  clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
69 }
70 
71 static inline void
72 clib_spinlock_free (clib_spinlock_t * p)
73 {
74  if (*p)
75  {
76  clib_mem_free ((void *) *p);
77  *p = 0;
78  }
79 }
80 
82 clib_spinlock_lock (clib_spinlock_t * p)
83 {
84  u32 free = 0;
85  while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
86  {
87  /* atomic load limits number of compare_exchange executions */
88  while (clib_atomic_load_relax_n (&(*p)->lock))
89  CLIB_PAUSE ();
90  /* on failure, compare_exchange writes (*p)->lock into free */
91  free = 0;
92  }
93  CLIB_LOCK_DBG (p);
94 }
95 
97 clib_spinlock_trylock (clib_spinlock_t * p)
98 {
100  return 0;
101  clib_spinlock_lock (p);
102  return 1;
103 }
104 
106 clib_spinlock_lock_if_init (clib_spinlock_t * p)
107 {
108  if (PREDICT_FALSE (*p != 0))
109  clib_spinlock_lock (p);
110 }
111 
113 clib_spinlock_trylock_if_init (clib_spinlock_t * p)
114 {
115  if (PREDICT_FALSE (*p != 0))
116  return clib_spinlock_trylock (p);
117  return 1;
118 }
119 
121 clib_spinlock_unlock (clib_spinlock_t * p)
122 {
124  /* Make sure all reads/writes are complete before releasing the lock */
125  clib_atomic_release (&(*p)->lock);
126 }
127 
129 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
130 {
131  if (PREDICT_FALSE (*p != 0))
133 }
134 
135 /*
136  * Readers-Writer Lock
137  */
138 
139 typedef struct clib_rw_lock_
140 {
141  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
142  /* -1 when W lock held, > 0 when R lock held */
143  volatile i32 rw_cnt;
144 #if CLIB_DEBUG > 0
145  pid_t pid;
148 #endif
149 } *clib_rwlock_t;
150 
151 always_inline void
153 {
155  clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
156 }
157 
158 always_inline void
160 {
161  if (*p)
162  {
163  clib_mem_free ((void *) *p);
164  *p = 0;
165  }
166 }
167 
168 always_inline void
170 {
171  i32 cnt;
172  do
173  {
174  /* rwlock held by a writer */
175  while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) < 0)
176  CLIB_PAUSE ();
177  }
179  (&(*p)->rw_cnt, &cnt, cnt + 1, 1));
180  CLIB_LOCK_DBG (p);
181 }
182 
183 always_inline void
185 {
186  ASSERT ((*p)->rw_cnt > 0);
188  clib_atomic_fetch_sub_rel (&(*p)->rw_cnt, 1);
189 }
190 
191 always_inline void
193 {
194  i32 cnt = 0;
195  do
196  {
197  /* rwlock held by writer or reader(s) */
198  while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) != 0)
199  CLIB_PAUSE ();
200  }
201  while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->rw_cnt, &cnt, -1, 1));
202  CLIB_LOCK_DBG (p);
203 }
204 
205 always_inline void
207 {
209  clib_atomic_release (&(*p)->rw_cnt);
210 }
211 
212 #endif
213 
214 /*
215  * fd.io coding-style-patch-verification: ON
216  *
217  * Local Variables:
218  * eval: (c-set-style "gnu")
219  * End:
220  */
static void clib_rwlock_reader_lock(clib_rwlock_t *p)
Definition: lock.h:169
#define CLIB_PAUSE()
Definition: lock.h:23
static_always_inline int clib_spinlock_trylock_if_init(clib_spinlock_t *p)
Definition: lock.h:113
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:121
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:82
void * frame_address
Definition: lock.h:58
static void clib_rwlock_writer_lock(clib_rwlock_t *p)
Definition: lock.h:192
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static void clib_rwlock_free(clib_rwlock_t *p)
Definition: lock.h:159
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:129
pid_t pid
Definition: lock.h:145
#define CLIB_SPINLOCK_IS_LOCKED(_p)
Definition: lock.h:48
void * frame_address
Definition: lock.h:147
static void clib_spinlock_free(clib_spinlock_t *p)
Definition: lock.h:72
#define CLIB_LOCK_DBG_CLEAR(_p)
Definition: lock.h:45
#define static_always_inline
Definition: clib.h:109
unsigned int u32
Definition: types.h:88
uword thread_index
Definition: lock.h:146
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:65
static void clib_rwlock_init(clib_rwlock_t *p)
Definition: lock.h:152
u32 lock
Definition: lock.h:54
#define clib_atomic_release(a)
Definition: atomics.h:43
static void clib_rwlock_reader_unlock(clib_rwlock_t *p)
Definition: lock.h:184
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define always_inline
Definition: ipsec.h:28
static void clib_rwlock_writer_unlock(clib_rwlock_t *p)
Definition: lock.h:206
struct clib_rw_lock_ * clib_rwlock_t
#define clib_atomic_load_relax_n(a)
Definition: atomics.h:47
void free(void *p)
Definition: mem.c:42
signed int i32
Definition: types.h:77
#define ASSERT(truth)
CLIB_CACHE_LINE_ALIGN_MARK(cacheline0)
static void clib_mem_free(void *p)
Definition: mem.h:311
uword thread_index
Definition: lock.h:57
static_always_inline int clib_spinlock_trylock(clib_spinlock_t *p)
Definition: lock.h:97
volatile i32 rw_cnt
Definition: lock.h:143
#define clib_atomic_fetch_sub_rel(a, b)
Definition: atomics.h:55
u64 uword
Definition: types.h:112
#define clib_atomic_cmp_and_swap_acq_relax_n(addr, exp, new, weak)
Definition: atomics.h:40
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:261
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:106
pid_t pid
Definition: lock.h:56
#define CLIB_LOCK_DBG(_p)
Definition: lock.h:44
struct clib_spinlock_s * clib_spinlock_t
Definition: lock.h:62