FD.io VPP  v18.01.2-1-g9b554f3
Vector Packet Processing
svm.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * svm.c - shared VM allocation, mmap(...MAP_FIXED...)
4  * library
5  *
6  * Copyright (c) 2009 Cisco and/or its affiliates.
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at:
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *------------------------------------------------------------------
19  */
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #include <sys/stat.h>
26 #include <netinet/in.h>
27 #include <signal.h>
28 #include <pthread.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <vppinfra/clib.h>
34 #include <vppinfra/vec.h>
35 #include <vppinfra/hash.h>
36 #include <vppinfra/bitmap.h>
37 #include <vppinfra/fifo.h>
38 #include <vppinfra/time.h>
39 #include <vppinfra/mheap.h>
40 #include <vppinfra/heap.h>
41 #include <vppinfra/pool.h>
42 #include <vppinfra/format.h>
43 
44 #include "svm.h"
45 
47 static int root_rp_refcount;
48 
49 #define MAXLOCK 2
50 static pthread_mutex_t *mutexes_held[MAXLOCK];
51 static int nheld;
52 
55 {
56  return root_rp;
57 }
58 
59 #define MUTEX_DEBUG
60 
61 static void
62 region_lock (svm_region_t * rp, int tag)
63 {
64  pthread_mutex_lock (&rp->mutex);
65 #ifdef MUTEX_DEBUG
66  rp->mutex_owner_pid = getpid ();
67  rp->mutex_owner_tag = tag;
68 #endif
69  ASSERT (nheld < MAXLOCK);
70  /*
71  * Keep score of held mutexes so we can try to exit
72  * cleanly if the world comes to an end at the worst possible
73  * moment
74  */
75  mutexes_held[nheld++] = &rp->mutex;
76 }
77 
78 static void
80 {
81  int i, j;
82 #ifdef MUTEX_DEBUG
83  rp->mutex_owner_pid = 0;
84  rp->mutex_owner_tag = 0;
85 #endif
86 
87  for (i = nheld - 1; i >= 0; i--)
88  {
89  if (mutexes_held[i] == &rp->mutex)
90  {
91  for (j = i; j < MAXLOCK - 1; j++)
92  mutexes_held[j] = mutexes_held[j + 1];
93  nheld--;
94  goto found;
95  }
96  }
97  ASSERT (0);
98 
99 found:
101  pthread_mutex_unlock (&rp->mutex);
102 }
103 
104 
105 static u8 *
106 format_svm_flags (u8 * s, va_list * args)
107 {
108  uword f = va_arg (*args, uword);
109 
110  if (f & SVM_FLAGS_MHEAP)
111  s = format (s, "MHEAP ");
112  if (f & SVM_FLAGS_FILE)
113  s = format (s, "FILE ");
114  if (f & SVM_FLAGS_NODATA)
115  s = format (s, "NODATA ");
116  if (f & SVM_FLAGS_NEED_DATA_INIT)
117  s = format (s, "INIT ");
118 
119  return (s);
120 }
121 
122 static u8 *
123 format_svm_size (u8 * s, va_list * args)
124 {
125  uword size = va_arg (*args, uword);
126 
127  if (size >= (1 << 20))
128  {
129  s = format (s, "(%d mb)", size >> 20);
130  }
131  else if (size >= (1 << 10))
132  {
133  s = format (s, "(%d kb)", size >> 10);
134  }
135  else
136  {
137  s = format (s, "(%d bytes)", size);
138  }
139  return (s);
140 }
141 
142 u8 *
143 format_svm_region (u8 * s, va_list * args)
144 {
145  svm_region_t *rp = va_arg (*args, svm_region_t *);
146  int verbose = va_arg (*args, int);
147  int i;
148  uword lo, hi;
149 
150  s = format (s, "%s: base va 0x%x size 0x%x %U\n",
151  rp->region_name, rp->virtual_base,
153  s = format (s, " user_ctx 0x%x, bitmap_size %d\n",
154  rp->user_ctx, rp->bitmap_size);
155 
156  if (verbose)
157  {
158  s = format (s, " flags: 0x%x %U\n", rp->flags,
159  format_svm_flags, rp->flags);
160  s = format (s,
161  " region_heap 0x%x data_base 0x%x data_heap 0x%x\n",
162  rp->region_heap, rp->data_base, rp->data_heap);
163  }
164 
165  s = format (s, " %d clients, pids: ", vec_len (rp->client_pids));
166 
167  for (i = 0; i < vec_len (rp->client_pids); i++)
168  s = format (s, "%d ", rp->client_pids[i]);
169 
170  s = format (s, "\n");
171 
172  if (verbose)
173  {
174  lo = hi = ~0;
175 
176  s = format (s, " VM in use: ");
177 
178  for (i = 0; i < rp->bitmap_size; i++)
179  {
180  if (clib_bitmap_get_no_check (rp->bitmap, i) != 0)
181  {
182  if (lo == ~0)
183  {
184  hi = lo = rp->virtual_base + i * MMAP_PAGESIZE;
185  }
186  else
187  {
188  hi = rp->virtual_base + i * MMAP_PAGESIZE;
189  }
190  }
191  else
192  {
193  if (lo != ~0)
194  {
195  hi = rp->virtual_base + i * MMAP_PAGESIZE - 1;
196  s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi,
197  (hi - lo) >> 10);
198  lo = hi = ~0;
199  }
200  }
201  }
202  s = format (s, " rgn heap stats: %U", format_mheap,
203  rp->region_heap, 0);
204  if ((rp->flags & SVM_FLAGS_MHEAP) && rp->data_heap)
205  {
206  s = format (s, "\n data heap stats: %U", format_mheap,
207  rp->data_heap, 1);
208  }
209  s = format (s, "\n");
210  }
211 
212  return (s);
213 }
214 
215 /*
216  * rnd_pagesize
217  * Round to a pagesize multiple, presumably 4k works
218  */
219 static u64
221 {
222  u64 rv;
223 
224  rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1);
225  return (rv);
226 }
227 
228 /*
229  * svm_data_region_setup
230  */
231 static int
233 {
234  int fd;
235  u8 junk = 0;
236  uword map_size;
237 
238  map_size = rp->virtual_size - (MMAP_PAGESIZE +
239  (a->pvt_heap_size ? a->pvt_heap_size :
241 
242  if (a->flags & SVM_FLAGS_FILE)
243  {
244  struct stat statb;
245 
246  fd = open (a->backing_file, O_RDWR | O_CREAT, 0777);
247 
248  if (fd < 0)
249  {
250  clib_unix_warning ("open");
251  return -1;
252  }
253 
254  if (fstat (fd, &statb) < 0)
255  {
256  clib_unix_warning ("fstat");
257  close (fd);
258  return -2;
259  }
260 
261  if (statb.st_mode & S_IFREG)
262  {
263  if (statb.st_size == 0)
264  {
265  if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
266  {
267  clib_unix_warning ("seek region size");
268  close (fd);
269  return -3;
270  }
271  if (write (fd, &junk, 1) != 1)
272  {
273  clib_unix_warning ("set region size");
274  close (fd);
275  return -3;
276  }
277  }
278  else
279  {
280  map_size = rnd_pagesize (statb.st_size);
281  }
282  }
283  else
284  {
285  map_size = a->backing_mmap_size;
286  }
287 
288  ASSERT (map_size <= rp->virtual_size -
290 
291  if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
292  MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
293  {
294  clib_unix_warning ("mmap");
295  close (fd);
296  return -3;
297  }
298  close (fd);
299  rp->backing_file = (char *) format (0, "%s\0", a->backing_file);
300  rp->flags |= SVM_FLAGS_FILE;
301  }
302 
303  if (a->flags & SVM_FLAGS_MHEAP)
304  {
305  rp->data_heap =
306  mheap_alloc_with_flags ((void *) (rp->data_base), map_size,
308  rp->flags |= SVM_FLAGS_MHEAP;
309  }
310  return 0;
311 }
312 
313 static int
315 {
316  int fd;
317  u8 junk = 0;
318  uword map_size;
319  struct stat statb;
320 
321  map_size = rp->virtual_size -
324 
325  if (a->flags & SVM_FLAGS_FILE)
326  {
327 
328  fd = open (a->backing_file, O_RDWR, 0777);
329 
330  if (fd < 0)
331  {
332  clib_unix_warning ("open");
333  return -1;
334  }
335 
336  if (fstat (fd, &statb) < 0)
337  {
338  clib_unix_warning ("fstat");
339  close (fd);
340  return -2;
341  }
342 
343  if (statb.st_mode & S_IFREG)
344  {
345  if (statb.st_size == 0)
346  {
347  if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
348  {
349  clib_unix_warning ("seek region size");
350  close (fd);
351  return -3;
352  }
353  if (write (fd, &junk, 1) != 1)
354  {
355  clib_unix_warning ("set region size");
356  close (fd);
357  return -3;
358  }
359  }
360  else
361  {
362  map_size = rnd_pagesize (statb.st_size);
363  }
364  }
365  else
366  {
367  map_size = a->backing_mmap_size;
368  }
369 
370  ASSERT (map_size <= rp->virtual_size
371  - (MMAP_PAGESIZE
372  +
374 
375  if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
376  MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
377  {
378  clib_unix_warning ("mmap");
379  close (fd);
380  return -3;
381  }
382  close (fd);
383  }
384  return 0;
385 }
386 
387 u8 *
389 {
390  u8 *path;
391  u8 *shm_name;
392  u8 *split_point;
393  u8 *mkdir_arg = 0;
394  int root_path_offset = 0;
395  int name_offset = 0;
396 
397  if (a->root_path)
398  {
399  /* Tolerate present or absent slashes */
400  if (a->root_path[0] == '/')
401  root_path_offset++;
402 
403  /* create the root_path under /dev/shm
404  iterate through path creating directories */
405 
406  path = format (0, "/dev/shm/%s%c", &a->root_path[root_path_offset], 0);
407  split_point = path + 1;
408  vec_add1 (mkdir_arg, '-');
409 
410  while (*split_point)
411  {
412  while (*split_point && *split_point != '/')
413  {
414  vec_add1 (mkdir_arg, *split_point);
415  split_point++;
416  }
417  vec_add1 (mkdir_arg, 0);
418 
419  /* ready to descend another level */
420  mkdir_arg[vec_len (mkdir_arg) - 1] = '-';
421  split_point++;
422  }
423  vec_free (mkdir_arg);
424  vec_free (path);
425 
426  if (a->name[0] == '/')
427  name_offset = 1;
428 
429  shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset],
430  &a->name[name_offset], 0);
431  }
432  else
433  shm_name = format (0, "%s%c", a->name, 0);
434  return (shm_name);
435 }
436 
437 void
439 {
440  pthread_mutexattr_t attr;
441  pthread_condattr_t cattr;
442  int nbits, words, bit;
443  int overhead_space;
444  void *oldheap;
445  uword data_base;
446  ASSERT (rp);
447  int rv;
448 
449  memset (rp, 0, sizeof (*rp));
450 
451  if (pthread_mutexattr_init (&attr))
452  clib_unix_warning ("mutexattr_init");
453 
454  if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
455  clib_unix_warning ("mutexattr_setpshared");
456 
457  if (pthread_mutex_init (&rp->mutex, &attr))
458  clib_unix_warning ("mutex_init");
459 
460  if (pthread_mutexattr_destroy (&attr))
461  clib_unix_warning ("mutexattr_destroy");
462 
463  if (pthread_condattr_init (&cattr))
464  clib_unix_warning ("condattr_init");
465 
466  if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED))
467  clib_unix_warning ("condattr_setpshared");
468 
469  if (pthread_cond_init (&rp->condvar, &cattr))
470  clib_unix_warning ("cond_init");
471 
472  if (pthread_condattr_destroy (&cattr))
473  clib_unix_warning ("condattr_destroy");
474 
475  region_lock (rp, 1);
476 
477  rp->virtual_base = a->baseva;
478  rp->virtual_size = a->size;
479 
480  rp->region_heap =
482  (a->baseva + MMAP_PAGESIZE, void *),
483  (a->pvt_heap_size !=
486  oldheap = svm_push_pvt_heap (rp);
487 
488  rp->region_name = (char *) format (0, "%s%c", a->name, 0);
489  vec_add1 (rp->client_pids, getpid ());
490 
491  nbits = rp->virtual_size / MMAP_PAGESIZE;
492 
493  ASSERT (nbits > 0);
494  rp->bitmap_size = nbits;
495  words = (nbits + BITS (uword) - 1) / BITS (uword);
496  vec_validate (rp->bitmap, words - 1);
497 
498  overhead_space = MMAP_PAGESIZE /* header */ +
500 
501  bit = 0;
502  data_base = (uword) rp->virtual_base;
503 
504  if (a->flags & SVM_FLAGS_NODATA)
506 
507  do
508  {
509  clib_bitmap_set_no_check (rp->bitmap, bit, 1);
510  bit++;
511  overhead_space -= MMAP_PAGESIZE;
512  data_base += MMAP_PAGESIZE;
513  }
514  while (overhead_space > 0);
515 
516  rp->data_base = (void *) data_base;
517 
518  /*
519  * Note: although the POSIX spec guarantees that only one
520  * process enters this block, we have to play games
521  * to hold off clients until e.g. the mutex is ready
522  */
523  rp->version = SVM_VERSION;
524 
525  /* setup the data portion of the region */
526 
527  rv = svm_data_region_create (a, rp);
528  if (rv)
529  {
530  clib_warning ("data_region_create: %d", rv);
531  }
532 
533  region_unlock (rp);
534 
535  svm_pop_heap (oldheap);
536 }
537 
538 /*
539  * svm_map_region
540  */
541 void *
543 {
544  int svm_fd;
545  svm_region_t *rp;
546  int deadman = 0;
547  u8 junk = 0;
548  void *oldheap;
549  int rv;
550  int pid_holding_region_lock;
551  u8 *shm_name;
552  int dead_region_recovery = 0;
553  int time_left;
554  struct stat stat;
555  struct timespec ts, tsrem;
556 
557  ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
558  ASSERT (a->name);
559 
560  shm_name = shm_name_from_svm_map_region_args (a);
561 
562  if (CLIB_DEBUG > 1)
563  clib_warning ("[%d] map region %s: shm_open (%s)",
564  getpid (), a->name, shm_name);
565 
566  svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777);
567 
568  if (svm_fd >= 0)
569  {
570  if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0)
571  clib_unix_warning ("segment chmod");
572  /* This turns out to fail harmlessly if the client starts first */
573  if (fchown (svm_fd, a->uid, a->gid) < 0)
574  clib_unix_warning ("segment chown [ok if client starts first]");
575 
576  vec_free (shm_name);
577 
578  if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1)
579  {
580  clib_warning ("seek region size");
581  close (svm_fd);
582  return (0);
583  }
584  if (write (svm_fd, &junk, 1) != 1)
585  {
586  clib_warning ("set region size");
587  close (svm_fd);
588  return (0);
589  }
590 
591  rp = mmap (uword_to_pointer (a->baseva, void *), a->size,
592  PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0);
593 
594  if (rp == (svm_region_t *) MAP_FAILED)
595  {
596  clib_unix_warning ("mmap create");
597  close (svm_fd);
598  return (0);
599  }
600  close (svm_fd);
601 
603 
604  return ((void *) rp);
605  }
606  else
607  {
608  svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777);
609 
610  vec_free (shm_name);
611 
612  if (svm_fd < 0)
613  {
614  perror ("svm_region_map(mmap open)");
615  return (0);
616  }
617 
618  time_left = 20;
619  while (1)
620  {
621  if (0 != fstat (svm_fd, &stat))
622  {
623  clib_warning ("fstat failed: %d", errno);
624  close (svm_fd);
625  return (0);
626  }
627  if (stat.st_size > 0)
628  {
629  break;
630  }
631  if (0 == time_left)
632  {
633  clib_warning ("waiting for resize of shm file timed out");
634  close (svm_fd);
635  return (0);
636  }
637  ts.tv_sec = 0;
638  ts.tv_nsec = 100000000;
639  while (nanosleep (&ts, &tsrem) < 0)
640  ts = tsrem;
641  time_left--;
642  }
643 
644  rp = mmap (0, MMAP_PAGESIZE,
645  PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0);
646 
647  if (rp == (svm_region_t *) MAP_FAILED)
648  {
649  close (svm_fd);
650  clib_warning ("mmap");
651  return (0);
652  }
653  /*
654  * We lost the footrace to create this region; make sure
655  * the winner has crossed the finish line.
656  */
657  while (rp->version == 0 && deadman++ < 5)
658  {
659  sleep (1);
660  }
661 
662  /*
663  * <bleep>-ed?
664  */
665  if (rp->version == 0)
666  {
667  clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION);
668  close (svm_fd);
669  munmap (rp, a->size);
670  return (0);
671  }
672  /* Remap now that the region has been placed */
673  a->baseva = rp->virtual_base;
674  a->size = rp->virtual_size;
675  munmap (rp, MMAP_PAGESIZE);
676 
677  rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size,
678  PROT_READ | PROT_WRITE,
679  MAP_SHARED | MAP_FIXED, svm_fd, 0);
680  if ((uword) rp == (uword) MAP_FAILED)
681  {
682  clib_unix_warning ("mmap");
683  close (svm_fd);
684  return (0);
685  }
686 
687  if ((uword) rp != rp->virtual_base)
688  {
689  clib_warning ("mmap botch");
690  }
691 
692  /*
693  * Try to fix the region mutex if it is held by
694  * a dead process
695  */
696  pid_holding_region_lock = rp->mutex_owner_pid;
697  if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
698  {
700  ("region %s mutex held by dead pid %d, tag %d, force unlock",
701  rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
702  /* owner pid is nonexistent */
703  rp->mutex.__data.__owner = 0;
704  rp->mutex.__data.__lock = 0;
705  dead_region_recovery = 1;
706  }
707 
708  if (dead_region_recovery)
709  clib_warning ("recovery: attempt to re-lock region");
710 
711  region_lock (rp, 2);
712  oldheap = svm_push_pvt_heap (rp);
713  vec_add1 (rp->client_pids, getpid ());
714 
715  if (dead_region_recovery)
716  clib_warning ("recovery: attempt svm_data_region_map");
717 
718  rv = svm_data_region_map (a, rp);
719  if (rv)
720  {
721  clib_warning ("data_region_map: %d", rv);
722  }
723 
724  if (dead_region_recovery)
725  clib_warning ("unlock and continue");
726 
727  region_unlock (rp);
728 
729  svm_pop_heap (oldheap);
730 
731  return ((void *) rp);
732 
733  }
734  return 0; /* NOTREACHED */
735 }
736 
737 static void
739 {
740  int i;
741  for (i = 0; i < nheld; i++)
742  {
743  pthread_mutex_unlock (mutexes_held[i]);
744  }
745 }
746 
747 static int
749 {
750  svm_region_t *rp;
751  u64 ticks = clib_cpu_time_now ();
752  uword randomize_baseva;
753 
754  /* guard against klutz calls */
755  if (root_rp)
756  return -1;
757 
759 
760  atexit (svm_mutex_cleanup);
761 
762  /* Randomize the shared-VM base at init time */
763  if (MMAP_PAGESIZE <= (4 << 10))
764  randomize_baseva = (ticks & 15) * MMAP_PAGESIZE;
765  else
766  randomize_baseva = (ticks & 3) * MMAP_PAGESIZE;
767 
768  a->baseva += randomize_baseva;
769 
770  rp = svm_map_region (a);
771  if (!rp)
772  return -1;
773 
774  region_lock (rp, 3);
775 
776  /* Set up the main region data structures */
778  {
779  svm_main_region_t *mp = 0;
780  void *oldheap;
781 
783 
784  oldheap = svm_push_pvt_heap (rp);
785  vec_validate (mp, 0);
786  mp->name_hash = hash_create_string (0, sizeof (uword));
787  mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0;
788  mp->uid = a->uid;
789  mp->gid = a->gid;
790  rp->data_base = mp;
791  svm_pop_heap (oldheap);
792  }
793  region_unlock (rp);
794  root_rp = rp;
795 
796  return 0;
797 }
798 
799 void
801 {
802  svm_map_region_args_t _a, *a = &_a;
803 
804  memset (a, 0, sizeof (*a));
805  a->root_path = 0;
809  a->flags = SVM_FLAGS_NODATA;
810  a->uid = 0;
811  a->gid = 0;
812 
814 }
815 
816 int
817 svm_region_init_chroot (const char *root_path)
818 {
819  svm_map_region_args_t _a, *a = &_a;
820 
821  memset (a, 0, sizeof (*a));
822  a->root_path = root_path;
826  a->flags = SVM_FLAGS_NODATA;
827  a->uid = 0;
828  a->gid = 0;
829 
830  return svm_region_init_internal (a);
831 }
832 
833 void
834 svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid)
835 {
836  svm_map_region_args_t _a, *a = &_a;
837 
838  memset (a, 0, sizeof (*a));
839  a->root_path = root_path;
843  a->flags = SVM_FLAGS_NODATA;
844  a->uid = uid;
845  a->gid = gid;
846 
848 }
849 
850 void
852 {
854 }
855 
856 void *
858 {
859  svm_main_region_t *mp;
860  svm_region_t *rp;
861  uword need_nbits;
862  int index, i;
863  void *oldheap;
864  uword *p;
865  u8 *name;
866  svm_subregion_t *subp;
867 
868  ASSERT (root_rp);
869 
870  a->size += MMAP_PAGESIZE +
872  a->size = rnd_pagesize (a->size);
873 
874  region_lock (root_rp, 4);
875  oldheap = svm_push_pvt_heap (root_rp);
876  mp = root_rp->data_base;
877 
878  ASSERT (mp);
879 
880  /* Map the named region from the correct chroot environment */
881  if (a->root_path == NULL)
882  a->root_path = (char *) mp->root_path;
883 
884  /*
885  * See if this region is already known. If it is, we're
886  * almost done...
887  */
888  p = hash_get_mem (mp->name_hash, a->name);
889 
890  if (p)
891  {
892  rp = svm_map_region (a);
893  region_unlock (root_rp);
894  svm_pop_heap (oldheap);
895  return rp;
896  }
897 
898  /* Create the region. */
899  ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
900 
901  need_nbits = a->size / MMAP_PAGESIZE;
902 
903  index = 1; /* $$$ fixme, figure out how many bit to really skip */
904 
905  /*
906  * Scan the virtual space allocation bitmap, looking for a large
907  * enough chunk
908  */
909  do
910  {
911  if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0)
912  {
913  for (i = 0; i < (need_nbits - 1); i++)
914  {
915  if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1)
916  {
917  index = index + i;
918  goto next;
919  }
920  }
921  break;
922  }
923  index++;
924  next:;
925  }
926  while (index < root_rp->bitmap_size);
927 
928  /* Completely out of VM? */
929  if (index >= root_rp->bitmap_size)
930  {
931  clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)",
932  root_rp->region_name, a->size, a->size);
933  svm_pop_heap (oldheap);
934  region_unlock (root_rp);
935  return 0;
936  }
937 
938  /*
939  * Mark virtual space allocated
940  */
941 #if CLIB_DEBUG > 1
942  clib_warning ("set %d bits at index %d", need_nbits, index);
943 #endif
944 
945  for (i = 0; i < need_nbits; i++)
946  {
947  clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1);
948  }
949 
950  /* Place this region where it goes... */
951  a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
952 
953  rp = svm_map_region (a);
954 
955  pool_get (mp->subregions, subp);
956  name = format (0, "%s%c", a->name, 0);
957  subp->subregion_name = name;
958 
959  hash_set_mem (mp->name_hash, name, subp - mp->subregions);
960 
961  svm_pop_heap (oldheap);
962 
963  region_unlock (root_rp);
964 
965  return (rp);
966 }
967 
968 void
970 {
971  svm_map_region_args_t _a, *a = &_a;
972  svm_main_region_t *mp;
973  u8 *shm_name;
974 
975  ASSERT (root_rp);
976  ASSERT (rp);
978 
979  mp = root_rp->data_base;
980  ASSERT (mp);
981 
982  a->root_path = (char *) mp->root_path;
983  a->name = rp->region_name;
984  shm_name = shm_name_from_svm_map_region_args (a);
985  if (CLIB_DEBUG > 1)
986  clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name);
987  shm_unlink ((const char *) shm_name);
988  vec_free (shm_name);
989 }
990 
991 /*
992  * svm_region_unmap
993  *
994  * Let go of the indicated region. If the calling process
995  * is the last customer, throw it away completely.
996  * The root region mutex guarantees atomicity with respect to
997  * a new region client showing up at the wrong moment.
998  */
999 void
1000 svm_region_unmap (void *rp_arg)
1001 {
1002  int i, mypid = getpid ();
1003  int nclients_left;
1004  void *oldheap;
1005  uword virtual_base, virtual_size;
1006  svm_region_t *rp = rp_arg;
1007  char *name;
1008 
1009  /*
1010  * If we take a signal while holding one or more shared-memory
1011  * mutexes, we may end up back here from an otherwise
1012  * benign exit handler. Bail out to avoid a recursive
1013  * mutex screw-up.
1014  */
1015  if (nheld)
1016  return;
1017 
1018  ASSERT (rp);
1019  ASSERT (root_rp);
1020 
1021  if (CLIB_DEBUG > 1)
1022  clib_warning ("[%d] unmap region %s", getpid (), rp->region_name);
1023 
1024  region_lock (root_rp, 5);
1025  region_lock (rp, 6);
1026 
1027  oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
1028 
1029  /* Remove the caller from the list of mappers */
1030  for (i = 0; i < vec_len (rp->client_pids); i++)
1031  {
1032  if (rp->client_pids[i] == mypid)
1033  {
1034  vec_delete (rp->client_pids, 1, i);
1035  goto found;
1036  }
1037  }
1038  clib_warning ("pid %d AWOL", mypid);
1039 
1040 found:
1041 
1042  svm_pop_heap (oldheap);
1043 
1044  nclients_left = vec_len (rp->client_pids);
1045  virtual_base = rp->virtual_base;
1046  virtual_size = rp->virtual_size;
1047 
1048  if (nclients_left == 0)
1049  {
1050  int index, nbits, i;
1051  svm_main_region_t *mp;
1052  uword *p;
1053  svm_subregion_t *subp;
1054 
1055  /* Kill the region, last guy on his way out */
1056 
1057  oldheap = svm_push_pvt_heap (root_rp);
1058  name = vec_dup (rp->region_name);
1059 
1060  virtual_base = rp->virtual_base;
1061  virtual_size = rp->virtual_size;
1062 
1063  /* Figure out which bits to clear in the root region bitmap */
1064  index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE;
1065 
1066  nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE;
1067 
1068 #if CLIB_DEBUG > 1
1069  clib_warning ("clear %d bits at index %d", nbits, index);
1070 #endif
1071  /* Give back the allocated VM */
1072  for (i = 0; i < nbits; i++)
1073  {
1074  clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0);
1075  }
1076 
1077  mp = root_rp->data_base;
1078 
1079  p = hash_get_mem (mp->name_hash, name);
1080 
1081  /* Better never happen ... */
1082  if (p == NULL)
1083  {
1084  region_unlock (rp);
1085  region_unlock (root_rp);
1086  svm_pop_heap (oldheap);
1087  clib_warning ("Region name '%s' not found?", name);
1088  return;
1089  }
1090 
1091  /* Remove from the root region subregion pool */
1092  subp = mp->subregions + p[0];
1093  pool_put (mp->subregions, subp);
1094 
1095  hash_unset_mem (mp->name_hash, name);
1096 
1097  vec_free (name);
1098 
1099  region_unlock (rp);
1100  svm_region_unlink (rp);
1101  munmap ((void *) virtual_base, virtual_size);
1102  region_unlock (root_rp);
1103  svm_pop_heap (oldheap);
1104  return;
1105  }
1106 
1107  region_unlock (rp);
1108  region_unlock (root_rp);
1109 
1110  munmap ((void *) virtual_base, virtual_size);
1111 }
1112 
1113 /*
1114  * svm_region_exit
1115  */
1116 void
1118 {
1119  void *oldheap;
1120  int i, mypid = getpid ();
1121  uword virtual_base, virtual_size;
1122 
1123  /* It felt so nice we did it twice... */
1124  if (root_rp == 0)
1125  return;
1126 
1127  if (--root_rp_refcount > 0)
1128  return;
1129 
1130  /*
1131  * If we take a signal while holding one or more shared-memory
1132  * mutexes, we may end up back here from an otherwise
1133  * benign exit handler. Bail out to avoid a recursive
1134  * mutex screw-up.
1135  */
1136  if (nheld)
1137  return;
1138 
1139  region_lock (root_rp, 7);
1140  oldheap = svm_push_pvt_heap (root_rp);
1141 
1142  virtual_base = root_rp->virtual_base;
1143  virtual_size = root_rp->virtual_size;
1144 
1145  for (i = 0; i < vec_len (root_rp->client_pids); i++)
1146  {
1147  if (root_rp->client_pids[i] == mypid)
1148  {
1149  vec_delete (root_rp->client_pids, 1, i);
1150  goto found;
1151  }
1152  }
1153  clib_warning ("pid %d AWOL", mypid);
1154 
1155 found:
1156 
1157  if (vec_len (root_rp->client_pids) == 0)
1158  svm_region_unlink (root_rp);
1159 
1160  region_unlock (root_rp);
1161  svm_pop_heap (oldheap);
1162 
1163  root_rp = 0;
1164  munmap ((void *) virtual_base, virtual_size);
1165 }
1166 
1167 void
1169 {
1170  int j;
1171  int mypid = getpid ();
1172  void *oldheap;
1173 
1174  for (j = 0; j < vec_len (rp->client_pids); j++)
1175  {
1176  if (mypid == rp->client_pids[j])
1177  continue;
1178  if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0))
1179  {
1180  clib_warning ("%s: cleanup ghost pid %d",
1181  rp->region_name, rp->client_pids[j]);
1182  /* nb: client vec in rp->region_heap */
1183  oldheap = svm_push_pvt_heap (rp);
1184  vec_delete (rp->client_pids, 1, j);
1185  j--;
1186  svm_pop_heap (oldheap);
1187  }
1188  }
1189 }
1190 
1191 
1192 /*
1193  * Scan svm regions for dead clients
1194  */
1195 void
1196 svm_client_scan (const char *root_path)
1197 {
1198  int i, j;
1199  svm_main_region_t *mp;
1200  svm_map_region_args_t *a = 0;
1202  svm_region_t *rp;
1203  svm_subregion_t *subp;
1204  u8 *name = 0;
1205  u8 **svm_names = 0;
1206  void *oldheap;
1207  int mypid = getpid ();
1208 
1209  vec_validate (a, 0);
1210 
1211  svm_region_init_chroot (root_path);
1212 
1213  root_rp = svm_get_root_rp ();
1214 
1215  pthread_mutex_lock (&root_rp->mutex);
1216 
1217  mp = root_rp->data_base;
1218 
1219  for (j = 0; j < vec_len (root_rp->client_pids); j++)
1220  {
1221  if (mypid == root_rp->client_pids[j])
1222  continue;
1223  if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0))
1224  {
1225  clib_warning ("%s: cleanup ghost pid %d",
1226  root_rp->region_name, root_rp->client_pids[j]);
1227  /* nb: client vec in root_rp->region_heap */
1228  oldheap = svm_push_pvt_heap (root_rp);
1229  vec_delete (root_rp->client_pids, 1, j);
1230  j--;
1231  svm_pop_heap (oldheap);
1232  }
1233  }
1234 
1235  /*
1236  * Snapshoot names, can't hold root rp mutex across
1237  * find_or_create.
1238  */
1239  /* *INDENT-OFF* */
1240  pool_foreach (subp, mp->subregions, ({
1241  name = vec_dup (subp->subregion_name);
1242  vec_add1(svm_names, name);
1243  }));
1244  /* *INDENT-ON* */
1245 
1246  pthread_mutex_unlock (&root_rp->mutex);
1247 
1248  for (i = 0; i < vec_len (svm_names); i++)
1249  {
1250  vec_validate (a, 0);
1251  a->root_path = root_path;
1252  a->name = (char *) svm_names[i];
1253  rp = svm_region_find_or_create (a);
1254  if (rp)
1255  {
1256  pthread_mutex_lock (&rp->mutex);
1257 
1259 
1260  pthread_mutex_unlock (&rp->mutex);
1261  svm_region_unmap (rp);
1262  vec_free (svm_names[i]);
1263  }
1264  vec_free (a);
1265  }
1266  vec_free (svm_names);
1267 
1268  svm_region_exit ();
1269 
1270  vec_free (a);
1271 }
1272 
1273 /*
1274  * fd.io coding-style-patch-verification: ON
1275  *
1276  * Local Variables:
1277  * eval: (c-set-style "gnu")
1278  * End:
1279  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:432
vmrglw vmrglh hi
void svm_region_init_chroot_uid_gid(const char *root_path, int uid, int gid)
Definition: svm.c:834
svm_region_t * svm_get_root_rp(void)
Definition: svm.c:54
#define SVM_GLOBAL_REGION_NAME
Definition: svm_common.h:87
const char * root_path
Definition: svm_common.h:67
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:337
static int nheld
Definition: svm.c:51
static void svm_pop_heap(void *oldheap)
Definition: svm.h:94
#define vec_c_string_is_terminated(V)
Test whether a vector is a NULL terminated c-string.
Definition: vec.h:982
a
Definition: bitmap.h:516
#define SVM_FLAGS_NODATA
Definition: svm_common.h:29
#define SVM_FLAGS_NEED_DATA_INIT
Definition: svm_common.h:30
void * svm_map_region(svm_map_region_args_t *a)
Definition: svm.c:542
Fixed length block allocator.
#define NULL
Definition: clib.h:55
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:518
static u64 clib_cpu_time_now(void)
Definition: time.h:73
void svm_client_scan(const char *root_path)
Definition: svm.c:1196
uword virtual_base
Definition: svm_common.h:42
#define SVM_PVT_MHEAP_SIZE
Definition: svm_common.h:32
#define hash_set_mem(h, key, value)
Definition: hash.h:274
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:225
static u8 * format_svm_size(u8 *s, va_list *args)
Definition: svm.c:123
static uword clib_bitmap_get_no_check(uword *ai, uword i)
Gets the ith bit value from a bitmap Does not sanity-check the bit position.
Definition: bitmap.h:212
u8 * format_mheap(u8 *s, va_list *va)
Definition: mheap.c:1162
#define MMAP_PAGESIZE
Definition: memfd.h:44
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
Definition: bitmap.h:141
#define SVM_VERSION
Definition: svm_common.h:25
void svm_region_init(void)
Definition: svm.c:800
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:438
uword * client_pids
Definition: svm_common.h:54
#define MHEAP_FLAG_DISABLE_VM
void * svm_region_find_or_create(svm_map_region_args_t *a)
Definition: svm.c:857
volatile void * user_ctx
Definition: svm_common.h:47
static u64 rnd_pagesize(u64 size)
Definition: svm.c:220
#define SVM_GLOBAL_REGION_BASEVA
Definition: svm_common.h:93
unsigned long u64
Definition: types.h:89
pthread_cond_t condvar
Definition: svm_common.h:38
u8 * format_svm_region(u8 *s, va_list *args)
Definition: svm.c:143
#define hash_create_string(elts, value_bytes)
Definition: hash.h:675
#define SVM_FLAGS_MHEAP
Definition: svm_common.h:27
static int svm_region_init_internal(svm_map_region_args_t *a)
Definition: svm.c:748
void * data_base
Definition: svm_common.h:45
#define hash_unset_mem(h, key)
Definition: hash.h:290
#define MAXLOCK
Definition: svm.c:49
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:271
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:370
svm_subregion_t * subregions
Definition: svm_common.h:111
char * backing_file
Definition: svm_common.h:52
uword virtual_size
Definition: svm_common.h:43
void svm_region_init_args(svm_map_region_args_t *a)
Definition: svm.c:851
void svm_region_exit()
Definition: svm.c:1117
#define SVM_GLOBAL_REGION_SIZE
Definition: svm_common.h:86
char * region_name
Definition: svm_common.h:51
void svm_region_init_mapped_region(svm_map_region_args_t *a, svm_region_t *rp)
Definition: svm.c:438
#define uword_to_pointer(u, type)
Definition: types.h:136
static void * svm_push_pvt_heap(svm_region_t *rp)
Definition: svm.h:78
static int root_rp_refcount
Definition: svm.c:47
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:336
#define clib_warning(format, args...)
Definition: error.h:59
static pthread_mutex_t * mutexes_held[MAXLOCK]
Definition: svm.c:50
u8 * shm_name_from_svm_map_region_args(svm_map_region_args_t *a)
Definition: svm.c:388
uword bitmap_size
Definition: svm_common.h:49
static int svm_data_region_map(svm_map_region_args_t *a, svm_region_t *rp)
Definition: svm.c:314
void * mheap_alloc_with_flags(void *memory, uword memory_size, uword flags)
Definition: mheap.c:869
#define ASSERT(truth)
#define vec_delete(V, N, M)
Delete N elements starting at element M.
Definition: vec.h:781
static void region_lock(svm_region_t *rp, int tag)
Definition: svm.c:62
u64 size
Definition: vhost-user.h:76
volatile uword version
Definition: svm_common.h:36
Bitmaps built as vectors of machine words.
static void region_unlock(svm_region_t *rp)
Definition: svm.c:79
int mutex_owner_tag
Definition: svm_common.h:40
u64 uword
Definition: types.h:112
void svm_region_unlink(svm_region_t *rp)
Definition: svm.c:969
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
const char * name
Definition: svm_common.h:68
#define clib_unix_warning(format, args...)
Definition: error.h:68
void svm_region_unmap(void *rp_arg)
Definition: svm.c:1000
void svm_client_scan_this_region_nolock(svm_region_t *rp)
Definition: svm.c:1168
#define hash_get_mem(h, key)
Definition: hash.h:268
void * region_heap
Definition: svm_common.h:44
uword * bitmap
Definition: svm_common.h:50
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
void * data_heap
Definition: svm_common.h:46
static int svm_data_region_create(svm_map_region_args_t *a, svm_region_t *rp)
Definition: svm.c:232
static u8 * format_svm_flags(u8 *s, va_list *args)
Definition: svm.c:106
int mutex_owner_pid
Definition: svm_common.h:39
#define BITS(x)
Definition: clib.h:58
uword flags
Definition: svm_common.h:41
static void svm_mutex_cleanup(void)
Definition: svm.c:738
pthread_mutex_t mutex
Definition: svm_common.h:37
#define SVM_FLAGS_FILE
Definition: svm_common.h:28
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
static svm_region_t * root_rp
Definition: svm.c:46
int svm_region_init_chroot(const char *root_path)
Definition: svm.c:817