FD.io VPP  v18.07-rc0-415-g6c78436
Vector Packet Processing
valloc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vppinfra/valloc.h>
17 
18 /** @file
19  @brief Simple first-fit virtual space allocator
20 */
21 
22 /** Add a chunk of memory to a virtual allocation arena
23  @param vam - clib_valloc_main_t * pointer to the allocation arena
24  @param template - clib_valloc_chunk_t * pointer to a template chunk which
25  describes the virtual address range to add
26 
27  @note only the baseva and size member of the template chunk are significant
28  It's perfectly OK for the new chunk to be discontinuous with previous
29  chunks, the chunk fusion algorithm won't merge them.
30  */
31 
32 void
34  clib_valloc_chunk_t * template)
35 {
36  clib_valloc_chunk_t *ch, *new_ch;
37  u32 index;
38 
40 
42 
43  /* Add at the beginning, or at the end... */
44  index = vam->first_index;
45 
46  /*
47  * Make sure we're not trying to add an overlapping chunk..
48  * It's worth checking, because someone will eventually do that.
49  */
50  if (CLIB_DEBUG > 0 && index != ~0)
51  {
52  while (index != ~0)
53  {
54  ch = pool_elt_at_index (vam->chunks, index);
55  ASSERT (template->baseva < ch->baseva || template->baseva >=
56  (ch->baseva + ch->size));
57  ASSERT (template->baseva + template->size < ch->baseva ||
58  template->baseva + template->size >=
59  (ch->baseva + ch->size));
60  index = ch->next;
61  }
62  index = vam->first_index;
63  }
64 
65  if (index != ~0)
66  ch = pool_elt_at_index (vam->chunks, index);
67 
68  if (index == ~0 || template->baseva < ch->baseva)
69  {
70  pool_get (vam->chunks, new_ch);
71  memset (new_ch, 0, sizeof (*new_ch));
72 
73  if (index != ~0)
74  {
75  ch = pool_elt_at_index (vam->chunks, index);
76 
77  new_ch->next = index;
78  new_ch->prev = ~0;
79  ch->prev = new_ch - vam->chunks;
80  }
81  else
82  {
83  new_ch->next = new_ch->prev = ~0;
84  }
85 
86  new_ch->baseva = template->baseva;
87  new_ch->size = template->size;
88 
89  vam->first_index = new_ch - vam->chunks;
90 
91  hash_set (vam->chunk_index_by_baseva, new_ch->baseva, vam->first_index);
92  }
93  else
94  {
95  /* Walk to the end of the chunk chain */
96  while (index != ~0)
97  {
98  ch = pool_elt_at_index (vam->chunks, index);
99  index = ch->next;
100  }
101  /* we want the last chunk index */
102  index = ch - vam->chunks;
103 
104  pool_get (vam->chunks, new_ch);
105  memset (new_ch, 0, sizeof (*new_ch));
106 
107  ch = pool_elt_at_index (vam->chunks, index);
108 
109  new_ch->next = ~0;
110  new_ch->prev = index;
111  ch->next = new_ch - vam->chunks;
112 
113  new_ch->baseva = template->baseva;
114  new_ch->size = template->size;
115 
116  hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
117  new_ch - vam->chunks);
118  }
119 
121 }
122 
123 /** Initialize a virtual memory allocation arena
124  @param vam - clib_valloc_main_t * pointer to the arena to initialize
125  @param template - clib_valloc_chunk_t * pointer to a template chunk which
126  describes the initial virtual address range
127 */
128 void
130  int need_lock)
131 {
132  ASSERT (template && template->baseva && template->size);
133  memset (vam, 0, sizeof (*vam));
134  if (need_lock)
135  clib_spinlock_init (&vam->lock);
136 
137  vam->chunk_index_by_baseva = hash_create (0, sizeof (uword));
138  vam->first_index = ~0;
140 
141  clib_valloc_add_chunk (vam, template);
142 }
143 
144 /** Allocate virtual space
145  @param vam - clib_valloc_main_t * pointer to the allocation arena
146  @param size - u64 number of bytes to allocate
147  @os_out_of_memory_on_failure - 1=> panic on allocation failure
148  @return uword allocated space, 0=> failure
149 */
150 uword
152  int os_out_of_memory_on_failure)
153 {
154  clib_valloc_chunk_t *ch, *new_ch, *next_ch;
155  u32 index;
156 
158 
159  index = vam->first_index;
160 
161  while (index != ~0)
162  {
163  ch = pool_elt_at_index (vam->chunks, index);
164  /* If the chunk is free... */
165  if ((ch->flags & CLIB_VALLOC_BUSY) == 0)
166  {
167  /* Too small? */
168  if (ch->size < size)
169  goto next_chunk;
170  /* Exact match? */
171  if (ch->size == size)
172  {
173  ch->flags |= CLIB_VALLOC_BUSY;
175  return ch->baseva;
176  }
177  /*
178  * The current free chunk is larger than necessary, split the block.
179  */
180  pool_get (vam->chunks, new_ch);
181  /* ch might have just moved */
182  ch = pool_elt_at_index (vam->chunks, index);
183  memset (new_ch, 0, sizeof (*new_ch));
184  new_ch->next = new_ch->prev = ~0;
185  new_ch->baseva = ch->baseva + size;
186  new_ch->size = ch->size - size;
187  ch->size = size;
188 
189  /* Insert into doubly-linked list */
190  new_ch->next = ch->next;
191  new_ch->prev = ch - vam->chunks;
192 
193  if (ch->next != ~0)
194  {
195  next_ch = pool_elt_at_index (vam->chunks, ch->next);
196  next_ch->prev = new_ch - vam->chunks;
197  }
198  ch->next = new_ch - vam->chunks;
199 
200  hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
201  new_ch - vam->chunks);
202 
203  ch->flags |= CLIB_VALLOC_BUSY;
205  return ch->baseva;
206  }
207 
208  next_chunk:
209  index = ch->next;
210  }
212 
213  if (os_out_of_memory_on_failure)
214  os_out_of_memory ();
215 
216  return 0;
217 }
218 
219 
220 /** Free virtual space
221  @param vam - clib_valloc_main_t * pointer to the allocation arena
222  @param baseva - uword base virtual address of the returned space
223  @return uword - size of the freed virtual chunk
224  @note the size is returned since we know it / in case the caller
225  doesn't memorize chunk sizes
226 */
227 uword
229 {
230  clib_valloc_chunk_t *ch, *prev_ch, *next_ch, *n2_ch;
231  u32 index;
232  uword return_size = 0;
233  uword *p;
234 
236 
237  p = hash_get (vam->chunk_index_by_baseva, baseva);
238 
239  /* Check even in production images */
240  if (p == 0)
241  os_panic ();
242 
243  index = p[0];
244 
245  ch = pool_elt_at_index (vam->chunks, index);
246 
247  return_size = ch->size;
248 
249  ASSERT (ch->baseva == baseva);
250  ASSERT ((ch->flags & CLIB_VALLOC_BUSY) != 0);
251 
252  ch->flags &= ~CLIB_VALLOC_BUSY;
253 
254  /* combine with previous entry? */
255  if (ch->prev != ~0)
256  {
257  prev_ch = pool_elt_at_index (vam->chunks, ch->prev);
258  /*
259  * Previous item must be free as well, and
260  * tangent to this block.
261  */
262  if ((prev_ch->flags & CLIB_VALLOC_BUSY) == 0
263  && ((prev_ch->baseva + prev_ch->size) == ch->baseva))
264  {
265  hash_unset (vam->chunk_index_by_baseva, baseva);
266  prev_ch->size += ch->size;
267  prev_ch->next = ch->next;
268  if (ch->next != ~0)
269  {
270  next_ch = pool_elt_at_index (vam->chunks, ch->next);
271  next_ch->prev = ch->prev;
272  }
273  ASSERT (ch - vam->chunks != vam->first_index);
274  memset (ch, 0xfe, sizeof (*ch));
275  pool_put (vam->chunks, ch);
276  /* See about combining with next elt */
277  ch = prev_ch;
278  }
279  }
280 
281  /* Combine with next entry? */
282  if (ch->next != ~0)
283  {
284  next_ch = pool_elt_at_index (vam->chunks, ch->next);
285 
286  if ((next_ch->flags & CLIB_VALLOC_BUSY) == 0
287  && ((ch->baseva + ch->size) == next_ch->baseva))
288  {
289  hash_unset (vam->chunk_index_by_baseva, next_ch->baseva);
290  ch->size += next_ch->size;
291  ch->next = next_ch->next;
292  if (ch->next != ~0)
293  {
294  n2_ch = pool_elt_at_index (vam->chunks, next_ch->next);
295  n2_ch->prev = ch - vam->chunks;
296  }
297  ASSERT (next_ch - vam->chunks != vam->first_index);
298  memset (next_ch, 0xfe, sizeof (*ch));
299  pool_put (vam->chunks, next_ch);
300  }
301  }
302 
304  return return_size;
305 }
306 
307 /** format a virtual allocation arena (varargs)
308  @param vam - clib_valloc_main_t pointer to the arena to format
309  @param verbose - int - verbosity level
310  @return u8 vector
311 */
312 u8 *
313 format_valloc (u8 * s, va_list * va)
314 {
315  clib_valloc_main_t *vam = va_arg (*va, clib_valloc_main_t *);
316  int verbose = va_arg (*va, int);
318  u32 index;
319  uword *p;
320 
322 
323  s = format (s, "%d chunks, first index %d\n",
324  pool_elts (vam->chunks), vam->first_index);
325 
326  if (verbose)
327  {
328  index = vam->first_index;
329  while (index != ~0)
330  {
331  ch = pool_elt_at_index (vam->chunks, index);
332 
333  s = format (s, "[%d] base %llx size %llx (%lld) prev %d %s\n",
334  index, ch->baseva, ch->size, ch->size, ch->prev,
335  (ch->flags & CLIB_VALLOC_BUSY) ? "busy" : "free");
336 
337  p = hash_get (vam->chunk_index_by_baseva, ch->baseva);
338  if (p == 0)
339  {
340  s = format (s, " BUG: baseva not in hash table!\n");
341  }
342  else if (p[0] != index)
343  {
344  s = format (s, " BUG: baseva in hash table %d not %d!\n",
345  p[0], index);
346  }
347  index = ch->next;
348  }
349  }
350 
352 
353  return s;
354 }
355 
356 /*
357  * fd.io coding-style-patch-verification: ON
358  *
359  * Local Variables:
360  * eval: (c-set-style "gnu")
361  * End:
362  */
#define CLIB_VALLOC_BUSY
chunk is in use
Definition: valloc.h:37
#define hash_set(h, key, value)
Definition: hash.h:255
#define hash_unset(h, key)
Definition: hash.h:261
uword flags
flags
Definition: valloc.h:44
void os_panic(void)
Definition: unix-misc.c:174
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:98
void os_out_of_memory(void)
Definition: unix-misc.c:219
clib_valloc_chunk_t * chunks
pool of virtual chunks
Definition: valloc.h:41
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:227
#define CLIB_VALLOC_INITIALIZED
object has been initialized
Definition: valloc.h:48
u32 first_index
pool index of first chunk in list
Definition: valloc.h:45
unsigned char u8
Definition: types.h:56
uword * chunk_index_by_baseva
chunk by baseva hash
Definition: valloc.h:42
Simple first-fit virtual space allocator.
unsigned int u32
Definition: types.h:88
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:57
#define hash_get(h, key)
Definition: hash.h:249
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:461
uword size
uword flags
flags (free/busy)
Definition: valloc.h:34
void clib_valloc_add_chunk(clib_valloc_main_t *vam, clib_valloc_chunk_t *template)
Add a chunk of memory to a virtual allocation arena.
Definition: valloc.c:33
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:273
uword size
size in bytes of this chunk
Definition: valloc.h:33
u32 prev
previous chunk pool index
Definition: valloc.h:31
uword baseva
base VA for this chunk
Definition: valloc.h:32
clib_spinlock_t lock
spinlock
Definition: valloc.h:43
uword clib_valloc_alloc(clib_valloc_main_t *vam, uword size, int os_out_of_memory_on_failure)
Allocate virtual space.
Definition: valloc.c:151
#define hash_create(elts, value_bytes)
Definition: hash.h:696
#define ASSERT(truth)
u32 next
next chunk pool index
Definition: valloc.h:30
u8 * format_valloc(u8 *s, va_list *va)
format a virtual allocation arena (varargs)
Definition: valloc.c:313
u64 uword
Definition: types.h:112
uword clib_valloc_free(clib_valloc_main_t *vam, uword baseva)
Free virtual space.
Definition: valloc.c:228
void clib_valloc_init(clib_valloc_main_t *vam, clib_valloc_chunk_t *template, int need_lock)
Initialize a virtual memory allocation arena.
Definition: valloc.c:129
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:82
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128