FD.io VPP  v18.07-rc0-415-g6c78436
Vector Packet Processing
svm.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * svm.c - shared VM allocation, mmap(...MAP_FIXED...)
4  * library
5  *
6  * Copyright (c) 2009 Cisco and/or its affiliates.
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at:
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *------------------------------------------------------------------
19  */
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #include <sys/stat.h>
26 #include <netinet/in.h>
27 #include <signal.h>
28 #include <pthread.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <vppinfra/clib.h>
34 #include <vppinfra/vec.h>
35 #include <vppinfra/hash.h>
36 #include <vppinfra/bitmap.h>
37 #include <vppinfra/fifo.h>
38 #include <vppinfra/time.h>
39 #include <vppinfra/mheap.h>
40 #include <vppinfra/heap.h>
41 #include <vppinfra/pool.h>
42 #include <vppinfra/format.h>
43 
44 #include "svm.h"
45 
47 static int root_rp_refcount;
48 
49 #define MAXLOCK 2
50 static pthread_mutex_t *mutexes_held[MAXLOCK];
51 static int nheld;
52 
55 {
56  return root_rp;
57 }
58 
59 #define MUTEX_DEBUG
60 
61 u64
63 {
64 #if __aarch64__
65  /* On AArch64 VA space can have different size, from 36 to 48 bits.
66  Here we are trying to detect VA bits by parsing /proc/self/maps
67  address ranges */
68  int fd;
69  unformat_input_t input;
70  u64 start, end = 0;
71  u8 bits = 0;
72 
73  if ((fd = open ("/proc/self/maps", 0)) < 0)
74  clib_unix_error ("open '/proc/self/maps'");
75 
76  unformat_init_clib_file (&input, fd);
78  {
79  if (unformat (&input, "%llx-%llx", &start, &end))
80  end--;
81  unformat_skip_line (&input);
82  }
83  unformat_free (&input);
84  close (fd);
85 
86  bits = count_leading_zeros (end);
87  bits = 64 - bits;
88  if (bits >= 36 && bits <= 48)
89  return ((1ul << bits) / 4) - (2 * SVM_GLOBAL_REGION_SIZE);
90  else
91  clib_unix_error ("unexpected va bits '%u'", bits);
92 #endif
93 
94  /* default value */
95  return 0x30000000;
96 }
97 
98 static void
99 region_lock (svm_region_t * rp, int tag)
100 {
101  pthread_mutex_lock (&rp->mutex);
102 #ifdef MUTEX_DEBUG
103  rp->mutex_owner_pid = getpid ();
104  rp->mutex_owner_tag = tag;
105 #endif
106  ASSERT (nheld < MAXLOCK);
107  /*
108  * Keep score of held mutexes so we can try to exit
109  * cleanly if the world comes to an end at the worst possible
110  * moment
111  */
112  mutexes_held[nheld++] = &rp->mutex;
113 }
114 
115 static void
117 {
118  int i, j;
119 #ifdef MUTEX_DEBUG
120  rp->mutex_owner_pid = 0;
121  rp->mutex_owner_tag = 0;
122 #endif
123 
124  for (i = nheld - 1; i >= 0; i--)
125  {
126  if (mutexes_held[i] == &rp->mutex)
127  {
128  for (j = i; j < MAXLOCK - 1; j++)
129  mutexes_held[j] = mutexes_held[j + 1];
130  nheld--;
131  goto found;
132  }
133  }
134  ASSERT (0);
135 
136 found:
138  pthread_mutex_unlock (&rp->mutex);
139 }
140 
141 
142 static u8 *
143 format_svm_flags (u8 * s, va_list * args)
144 {
145  uword f = va_arg (*args, uword);
146 
147  if (f & SVM_FLAGS_MHEAP)
148  s = format (s, "MHEAP ");
149  if (f & SVM_FLAGS_FILE)
150  s = format (s, "FILE ");
151  if (f & SVM_FLAGS_NODATA)
152  s = format (s, "NODATA ");
153  if (f & SVM_FLAGS_NEED_DATA_INIT)
154  s = format (s, "INIT ");
155 
156  return (s);
157 }
158 
159 static u8 *
160 format_svm_size (u8 * s, va_list * args)
161 {
162  uword size = va_arg (*args, uword);
163 
164  if (size >= (1 << 20))
165  {
166  s = format (s, "(%d mb)", size >> 20);
167  }
168  else if (size >= (1 << 10))
169  {
170  s = format (s, "(%d kb)", size >> 10);
171  }
172  else
173  {
174  s = format (s, "(%d bytes)", size);
175  }
176  return (s);
177 }
178 
179 u8 *
180 format_svm_region (u8 * s, va_list * args)
181 {
182  svm_region_t *rp = va_arg (*args, svm_region_t *);
183  int verbose = va_arg (*args, int);
184  int i;
185  uword lo, hi;
186 
187  s = format (s, "%s: base va 0x%x size 0x%x %U\n",
188  rp->region_name, rp->virtual_base,
190  s = format (s, " user_ctx 0x%x, bitmap_size %d\n",
191  rp->user_ctx, rp->bitmap_size);
192 
193  if (verbose)
194  {
195  s = format (s, " flags: 0x%x %U\n", rp->flags,
196  format_svm_flags, rp->flags);
197  s = format (s,
198  " region_heap 0x%x data_base 0x%x data_heap 0x%x\n",
199  rp->region_heap, rp->data_base, rp->data_heap);
200  }
201 
202  s = format (s, " %d clients, pids: ", vec_len (rp->client_pids));
203 
204  for (i = 0; i < vec_len (rp->client_pids); i++)
205  s = format (s, "%d ", rp->client_pids[i]);
206 
207  s = format (s, "\n");
208 
209  if (verbose)
210  {
211  lo = hi = ~0;
212 
213  s = format (s, " VM in use: ");
214 
215  for (i = 0; i < rp->bitmap_size; i++)
216  {
217  if (clib_bitmap_get_no_check (rp->bitmap, i) != 0)
218  {
219  if (lo == ~0)
220  {
221  hi = lo = rp->virtual_base + i * MMAP_PAGESIZE;
222  }
223  else
224  {
225  hi = rp->virtual_base + i * MMAP_PAGESIZE;
226  }
227  }
228  else
229  {
230  if (lo != ~0)
231  {
232  hi = rp->virtual_base + i * MMAP_PAGESIZE - 1;
233  s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi,
234  (hi - lo) >> 10);
235  lo = hi = ~0;
236  }
237  }
238  }
239  s = format (s, " rgn heap stats: %U", format_mheap,
240  rp->region_heap, 0);
241  if ((rp->flags & SVM_FLAGS_MHEAP) && rp->data_heap)
242  {
243  s = format (s, "\n data heap stats: %U", format_mheap,
244  rp->data_heap, 1);
245  }
246  s = format (s, "\n");
247  }
248 
249  return (s);
250 }
251 
252 /*
253  * rnd_pagesize
254  * Round to a pagesize multiple, presumably 4k works
255  */
256 static u64
258 {
259  u64 rv;
260 
261  rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1);
262  return (rv);
263 }
264 
265 /*
266  * svm_data_region_setup
267  */
268 static int
270 {
271  int fd;
272  u8 junk = 0;
273  uword map_size;
274 
275  map_size = rp->virtual_size - (MMAP_PAGESIZE +
276  (a->pvt_heap_size ? a->pvt_heap_size :
278 
279  if (a->flags & SVM_FLAGS_FILE)
280  {
281  struct stat statb;
282 
283  fd = open (a->backing_file, O_RDWR | O_CREAT, 0777);
284 
285  if (fd < 0)
286  {
287  clib_unix_warning ("open");
288  return -1;
289  }
290 
291  if (fstat (fd, &statb) < 0)
292  {
293  clib_unix_warning ("fstat");
294  close (fd);
295  return -2;
296  }
297 
298  if (statb.st_mode & S_IFREG)
299  {
300  if (statb.st_size == 0)
301  {
302  if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
303  {
304  clib_unix_warning ("seek region size");
305  close (fd);
306  return -3;
307  }
308  if (write (fd, &junk, 1) != 1)
309  {
310  clib_unix_warning ("set region size");
311  close (fd);
312  return -3;
313  }
314  }
315  else
316  {
317  map_size = rnd_pagesize (statb.st_size);
318  }
319  }
320  else
321  {
322  map_size = a->backing_mmap_size;
323  }
324 
325  ASSERT (map_size <= rp->virtual_size -
327 
328  if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
329  MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
330  {
331  clib_unix_warning ("mmap");
332  close (fd);
333  return -3;
334  }
335  close (fd);
336  rp->backing_file = (char *) format (0, "%s\0", a->backing_file);
337  rp->flags |= SVM_FLAGS_FILE;
338  }
339 
340  if (a->flags & SVM_FLAGS_MHEAP)
341  {
343  rp->data_heap =
344  mheap_alloc_with_flags ((void *) (rp->data_base), map_size,
346  heap_header = mheap_header (rp->data_heap);
347  heap_header->flags |= MHEAP_FLAG_THREAD_SAFE;
348 
349  rp->flags |= SVM_FLAGS_MHEAP;
350  }
351  return 0;
352 }
353 
354 static int
356 {
357  int fd;
358  u8 junk = 0;
359  uword map_size;
360  struct stat statb;
361 
362  map_size = rp->virtual_size -
365 
366  if (a->flags & SVM_FLAGS_FILE)
367  {
368 
369  fd = open (a->backing_file, O_RDWR, 0777);
370 
371  if (fd < 0)
372  {
373  clib_unix_warning ("open");
374  return -1;
375  }
376 
377  if (fstat (fd, &statb) < 0)
378  {
379  clib_unix_warning ("fstat");
380  close (fd);
381  return -2;
382  }
383 
384  if (statb.st_mode & S_IFREG)
385  {
386  if (statb.st_size == 0)
387  {
388  if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
389  {
390  clib_unix_warning ("seek region size");
391  close (fd);
392  return -3;
393  }
394  if (write (fd, &junk, 1) != 1)
395  {
396  clib_unix_warning ("set region size");
397  close (fd);
398  return -3;
399  }
400  }
401  else
402  {
403  map_size = rnd_pagesize (statb.st_size);
404  }
405  }
406  else
407  {
408  map_size = a->backing_mmap_size;
409  }
410 
411  ASSERT (map_size <= rp->virtual_size
412  - (MMAP_PAGESIZE
413  +
415 
416  if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
417  MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
418  {
419  clib_unix_warning ("mmap");
420  close (fd);
421  return -3;
422  }
423  close (fd);
424  }
425  return 0;
426 }
427 
428 u8 *
430 {
431  u8 *path;
432  u8 *shm_name;
433  u8 *split_point;
434  u8 *mkdir_arg = 0;
435  int root_path_offset = 0;
436  int name_offset = 0;
437 
438  if (a->root_path)
439  {
440  /* Tolerate present or absent slashes */
441  if (a->root_path[0] == '/')
442  root_path_offset++;
443 
444  /* create the root_path under /dev/shm
445  iterate through path creating directories */
446 
447  path = format (0, "/dev/shm/%s%c", &a->root_path[root_path_offset], 0);
448  split_point = path + 1;
449  vec_add1 (mkdir_arg, '-');
450 
451  while (*split_point)
452  {
453  while (*split_point && *split_point != '/')
454  {
455  vec_add1 (mkdir_arg, *split_point);
456  split_point++;
457  }
458  vec_add1 (mkdir_arg, 0);
459 
460  /* ready to descend another level */
461  mkdir_arg[vec_len (mkdir_arg) - 1] = '-';
462  split_point++;
463  }
464  vec_free (mkdir_arg);
465  vec_free (path);
466 
467  if (a->name[0] == '/')
468  name_offset = 1;
469 
470  shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset],
471  &a->name[name_offset], 0);
472  }
473  else
474  shm_name = format (0, "%s%c", a->name, 0);
475  return (shm_name);
476 }
477 
478 void
480 {
481  pthread_mutexattr_t attr;
482  pthread_condattr_t cattr;
483  int nbits, words, bit;
484  int overhead_space;
485  void *oldheap;
486  uword data_base;
487  ASSERT (rp);
488  int rv;
489 
490  memset (rp, 0, sizeof (*rp));
491 
492  if (pthread_mutexattr_init (&attr))
493  clib_unix_warning ("mutexattr_init");
494 
495  if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
496  clib_unix_warning ("mutexattr_setpshared");
497 
498  if (pthread_mutex_init (&rp->mutex, &attr))
499  clib_unix_warning ("mutex_init");
500 
501  if (pthread_mutexattr_destroy (&attr))
502  clib_unix_warning ("mutexattr_destroy");
503 
504  if (pthread_condattr_init (&cattr))
505  clib_unix_warning ("condattr_init");
506 
507  if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED))
508  clib_unix_warning ("condattr_setpshared");
509 
510  if (pthread_cond_init (&rp->condvar, &cattr))
511  clib_unix_warning ("cond_init");
512 
513  if (pthread_condattr_destroy (&cattr))
514  clib_unix_warning ("condattr_destroy");
515 
516  region_lock (rp, 1);
517 
518  rp->virtual_base = a->baseva;
519  rp->virtual_size = a->size;
520 
521  rp->region_heap =
523  (a->baseva + MMAP_PAGESIZE, void *),
524  (a->pvt_heap_size !=
527  oldheap = svm_push_pvt_heap (rp);
528 
529  rp->region_name = (char *) format (0, "%s%c", a->name, 0);
530  vec_add1 (rp->client_pids, getpid ());
531 
532  nbits = rp->virtual_size / MMAP_PAGESIZE;
533 
534  ASSERT (nbits > 0);
535  rp->bitmap_size = nbits;
536  words = (nbits + BITS (uword) - 1) / BITS (uword);
537  vec_validate (rp->bitmap, words - 1);
538 
539  overhead_space = MMAP_PAGESIZE /* header */ +
541 
542  bit = 0;
543  data_base = (uword) rp->virtual_base;
544 
545  if (a->flags & SVM_FLAGS_NODATA)
547 
548  do
549  {
550  clib_bitmap_set_no_check (rp->bitmap, bit, 1);
551  bit++;
552  overhead_space -= MMAP_PAGESIZE;
553  data_base += MMAP_PAGESIZE;
554  }
555  while (overhead_space > 0);
556 
557  rp->data_base = (void *) data_base;
558 
559  /*
560  * Note: although the POSIX spec guarantees that only one
561  * process enters this block, we have to play games
562  * to hold off clients until e.g. the mutex is ready
563  */
564  rp->version = SVM_VERSION;
565 
566  /* setup the data portion of the region */
567 
568  rv = svm_data_region_create (a, rp);
569  if (rv)
570  {
571  clib_warning ("data_region_create: %d", rv);
572  }
573 
574  region_unlock (rp);
575 
576  svm_pop_heap (oldheap);
577 }
578 
579 /*
580  * svm_map_region
581  */
582 void *
584 {
585  int svm_fd;
586  svm_region_t *rp;
587  int deadman = 0;
588  u8 junk = 0;
589  void *oldheap;
590  int rv;
591  int pid_holding_region_lock;
592  u8 *shm_name;
593  int dead_region_recovery = 0;
594  int time_left;
595  struct stat stat;
596  struct timespec ts, tsrem;
597 
598  ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
599  ASSERT (a->name);
600 
601  shm_name = shm_name_from_svm_map_region_args (a);
602 
603  if (CLIB_DEBUG > 1)
604  clib_warning ("[%d] map region %s: shm_open (%s)",
605  getpid (), a->name, shm_name);
606 
607  svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777);
608 
609  if (svm_fd >= 0)
610  {
611  if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0)
612  clib_unix_warning ("segment chmod");
613  /* This turns out to fail harmlessly if the client starts first */
614  if (fchown (svm_fd, a->uid, a->gid) < 0)
615  clib_unix_warning ("segment chown [ok if client starts first]");
616 
617  vec_free (shm_name);
618 
619  if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1)
620  {
621  clib_warning ("seek region size");
622  close (svm_fd);
623  return (0);
624  }
625  if (write (svm_fd, &junk, 1) != 1)
626  {
627  clib_warning ("set region size");
628  close (svm_fd);
629  return (0);
630  }
631 
632  rp = mmap (uword_to_pointer (a->baseva, void *), a->size,
633  PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0);
634 
635  if (rp == (svm_region_t *) MAP_FAILED)
636  {
637  clib_unix_warning ("mmap create");
638  close (svm_fd);
639  return (0);
640  }
641  close (svm_fd);
642 
644 
645  return ((void *) rp);
646  }
647  else
648  {
649  svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777);
650 
651  vec_free (shm_name);
652 
653  if (svm_fd < 0)
654  {
655  perror ("svm_region_map(mmap open)");
656  return (0);
657  }
658 
659  /* Reset ownership in case the client started first */
660  if (fchown (svm_fd, a->uid, a->gid) < 0)
661  clib_unix_warning ("segment chown [ok if client starts first]");
662 
663  time_left = 20;
664  while (1)
665  {
666  if (0 != fstat (svm_fd, &stat))
667  {
668  clib_warning ("fstat failed: %d", errno);
669  close (svm_fd);
670  return (0);
671  }
672  if (stat.st_size > 0)
673  {
674  break;
675  }
676  if (0 == time_left)
677  {
678  clib_warning ("waiting for resize of shm file timed out");
679  close (svm_fd);
680  return (0);
681  }
682  ts.tv_sec = 0;
683  ts.tv_nsec = 100000000;
684  while (nanosleep (&ts, &tsrem) < 0)
685  ts = tsrem;
686  time_left--;
687  }
688 
689  rp = mmap (0, MMAP_PAGESIZE,
690  PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0);
691 
692  if (rp == (svm_region_t *) MAP_FAILED)
693  {
694  close (svm_fd);
695  clib_warning ("mmap");
696  return (0);
697  }
698  /*
699  * We lost the footrace to create this region; make sure
700  * the winner has crossed the finish line.
701  */
702  while (rp->version == 0 && deadman++ < 5)
703  {
704  sleep (1);
705  }
706 
707  /*
708  * <bleep>-ed?
709  */
710  if (rp->version == 0)
711  {
712  clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION);
713  close (svm_fd);
714  munmap (rp, a->size);
715  return (0);
716  }
717  /* Remap now that the region has been placed */
718  a->baseva = rp->virtual_base;
719  a->size = rp->virtual_size;
720  munmap (rp, MMAP_PAGESIZE);
721 
722  rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size,
723  PROT_READ | PROT_WRITE,
724  MAP_SHARED | MAP_FIXED, svm_fd, 0);
725  if ((uword) rp == (uword) MAP_FAILED)
726  {
727  clib_unix_warning ("mmap");
728  close (svm_fd);
729  return (0);
730  }
731 
732  close (svm_fd);
733 
734  if ((uword) rp != rp->virtual_base)
735  {
736  clib_warning ("mmap botch");
737  }
738 
739  /*
740  * Try to fix the region mutex if it is held by
741  * a dead process
742  */
743  pid_holding_region_lock = rp->mutex_owner_pid;
744  if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
745  {
747  ("region %s mutex held by dead pid %d, tag %d, force unlock",
748  rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
749  /* owner pid is nonexistent */
750  rp->mutex.__data.__owner = 0;
751  rp->mutex.__data.__lock = 0;
752  dead_region_recovery = 1;
753  }
754 
755  if (dead_region_recovery)
756  clib_warning ("recovery: attempt to re-lock region");
757 
758  region_lock (rp, 2);
759  oldheap = svm_push_pvt_heap (rp);
760  vec_add1 (rp->client_pids, getpid ());
761 
762  if (dead_region_recovery)
763  clib_warning ("recovery: attempt svm_data_region_map");
764 
765  rv = svm_data_region_map (a, rp);
766  if (rv)
767  {
768  clib_warning ("data_region_map: %d", rv);
769  }
770 
771  if (dead_region_recovery)
772  clib_warning ("unlock and continue");
773 
774  region_unlock (rp);
775 
776  svm_pop_heap (oldheap);
777 
778  return ((void *) rp);
779 
780  }
781  return 0; /* NOTREACHED */
782 }
783 
784 static void
786 {
787  int i;
788  for (i = 0; i < nheld; i++)
789  {
790  pthread_mutex_unlock (mutexes_held[i]);
791  }
792 }
793 
794 static int
796 {
797  svm_region_t *rp;
798  u64 ticks = clib_cpu_time_now ();
799  uword randomize_baseva;
800 
801  /* guard against klutz calls */
802  if (root_rp)
803  return -1;
804 
806 
807  atexit (svm_mutex_cleanup);
808 
809  /* Randomize the shared-VM base at init time */
810  if (MMAP_PAGESIZE <= (4 << 10))
811  randomize_baseva = (ticks & 15) * MMAP_PAGESIZE;
812  else
813  randomize_baseva = (ticks & 3) * MMAP_PAGESIZE;
814 
815  a->baseva += randomize_baseva;
816 
817  rp = svm_map_region (a);
818  if (!rp)
819  return -1;
820 
821  region_lock (rp, 3);
822 
823  /* Set up the main region data structures */
825  {
826  svm_main_region_t *mp = 0;
827  void *oldheap;
828 
830 
831  oldheap = svm_push_pvt_heap (rp);
832  vec_validate (mp, 0);
833  mp->name_hash = hash_create_string (0, sizeof (uword));
834  mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0;
835  mp->uid = a->uid;
836  mp->gid = a->gid;
837  rp->data_base = mp;
838  svm_pop_heap (oldheap);
839  }
840  region_unlock (rp);
841  root_rp = rp;
842 
843  return 0;
844 }
845 
846 void
848 {
849  svm_map_region_args_t _a, *a = &_a;
850 
851  memset (a, 0, sizeof (*a));
852  a->root_path = 0;
856  a->flags = SVM_FLAGS_NODATA;
857  a->uid = 0;
858  a->gid = 0;
859 
861 }
862 
863 int
864 svm_region_init_chroot (const char *root_path)
865 {
866  svm_map_region_args_t _a, *a = &_a;
867 
868  memset (a, 0, sizeof (*a));
869  a->root_path = root_path;
873  a->flags = SVM_FLAGS_NODATA;
874  a->uid = 0;
875  a->gid = 0;
876 
877  return svm_region_init_internal (a);
878 }
879 
880 void
881 svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid)
882 {
883  svm_map_region_args_t _a, *a = &_a;
884 
885  memset (a, 0, sizeof (*a));
886  a->root_path = root_path;
890  a->flags = SVM_FLAGS_NODATA;
891  a->uid = uid;
892  a->gid = gid;
893 
895 }
896 
897 void
899 {
901 }
902 
903 void *
905 {
906  svm_main_region_t *mp;
907  svm_region_t *rp;
908  uword need_nbits;
909  int index, i;
910  void *oldheap;
911  uword *p;
912  u8 *name;
913  svm_subregion_t *subp;
914 
915  ASSERT (root_rp);
916 
917  a->size += MMAP_PAGESIZE +
919  a->size = rnd_pagesize (a->size);
920 
921  region_lock (root_rp, 4);
922  oldheap = svm_push_pvt_heap (root_rp);
923  mp = root_rp->data_base;
924 
925  ASSERT (mp);
926 
927  /* Map the named region from the correct chroot environment */
928  if (a->root_path == NULL)
929  a->root_path = (char *) mp->root_path;
930 
931  /*
932  * See if this region is already known. If it is, we're
933  * almost done...
934  */
935  p = hash_get_mem (mp->name_hash, a->name);
936 
937  if (p)
938  {
939  rp = svm_map_region (a);
940  region_unlock (root_rp);
941  svm_pop_heap (oldheap);
942  return rp;
943  }
944 
945  /* Create the region. */
946  ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
947 
948  need_nbits = a->size / MMAP_PAGESIZE;
949 
950  index = 1; /* $$$ fixme, figure out how many bit to really skip */
951 
952  /*
953  * Scan the virtual space allocation bitmap, looking for a large
954  * enough chunk
955  */
956  do
957  {
958  if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0)
959  {
960  for (i = 0; i < (need_nbits - 1); i++)
961  {
962  if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1)
963  {
964  index = index + i;
965  goto next;
966  }
967  }
968  break;
969  }
970  index++;
971  next:;
972  }
973  while (index < root_rp->bitmap_size);
974 
975  /* Completely out of VM? */
976  if (index >= root_rp->bitmap_size)
977  {
978  clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)",
979  root_rp->region_name, a->size, a->size);
980  svm_pop_heap (oldheap);
981  region_unlock (root_rp);
982  return 0;
983  }
984 
985  /*
986  * Mark virtual space allocated
987  */
988 #if CLIB_DEBUG > 1
989  clib_warning ("set %d bits at index %d", need_nbits, index);
990 #endif
991 
992  for (i = 0; i < need_nbits; i++)
993  {
994  clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1);
995  }
996 
997  /* Place this region where it goes... */
998  a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
999 
1000  rp = svm_map_region (a);
1001 
1002  pool_get (mp->subregions, subp);
1003  name = format (0, "%s%c", a->name, 0);
1004  subp->subregion_name = name;
1005 
1006  hash_set_mem (mp->name_hash, name, subp - mp->subregions);
1007 
1008  svm_pop_heap (oldheap);
1009 
1010  region_unlock (root_rp);
1011 
1012  return (rp);
1013 }
1014 
1015 void
1017 {
1018  svm_map_region_args_t _a, *a = &_a;
1019  svm_main_region_t *mp;
1020  u8 *shm_name;
1021 
1022  ASSERT (root_rp);
1023  ASSERT (rp);
1025 
1026  mp = root_rp->data_base;
1027  ASSERT (mp);
1028 
1029  a->root_path = (char *) mp->root_path;
1030  a->name = rp->region_name;
1031  shm_name = shm_name_from_svm_map_region_args (a);
1032  if (CLIB_DEBUG > 1)
1033  clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name);
1034  shm_unlink ((const char *) shm_name);
1035  vec_free (shm_name);
1036 }
1037 
1038 /*
1039  * svm_region_unmap
1040  *
1041  * Let go of the indicated region. If the calling process
1042  * is the last customer, throw it away completely.
1043  * The root region mutex guarantees atomicity with respect to
1044  * a new region client showing up at the wrong moment.
1045  */
1046 void
1047 svm_region_unmap_internal (void *rp_arg, u8 is_client)
1048 {
1049  int i, mypid = getpid ();
1050  int nclients_left;
1051  void *oldheap;
1052  uword virtual_base, virtual_size;
1053  svm_region_t *rp = rp_arg;
1054  char *name;
1055 
1056  /*
1057  * If we take a signal while holding one or more shared-memory
1058  * mutexes, we may end up back here from an otherwise
1059  * benign exit handler. Bail out to avoid a recursive
1060  * mutex screw-up.
1061  */
1062  if (nheld)
1063  return;
1064 
1065  ASSERT (rp);
1066  ASSERT (root_rp);
1067 
1068  if (CLIB_DEBUG > 1)
1069  clib_warning ("[%d] unmap region %s", getpid (), rp->region_name);
1070 
1071  region_lock (root_rp, 5);
1072  region_lock (rp, 6);
1073 
1074  oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
1075 
1076  /* Remove the caller from the list of mappers */
1077  for (i = 0; i < vec_len (rp->client_pids); i++)
1078  {
1079  if (rp->client_pids[i] == mypid)
1080  {
1081  vec_delete (rp->client_pids, 1, i);
1082  goto found;
1083  }
1084  }
1085  clib_warning ("pid %d AWOL", mypid);
1086 
1087 found:
1088 
1089  svm_pop_heap (oldheap);
1090 
1091  nclients_left = vec_len (rp->client_pids);
1092  virtual_base = rp->virtual_base;
1093  virtual_size = rp->virtual_size;
1094 
1095  if (nclients_left == 0)
1096  {
1097  int index, nbits, i;
1098  svm_main_region_t *mp;
1099  uword *p;
1100  svm_subregion_t *subp;
1101 
1102  /* Kill the region, last guy on his way out */
1103 
1104  oldheap = svm_push_pvt_heap (root_rp);
1105  name = vec_dup (rp->region_name);
1106 
1107  virtual_base = rp->virtual_base;
1108  virtual_size = rp->virtual_size;
1109 
1110  /* Figure out which bits to clear in the root region bitmap */
1111  index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE;
1112 
1113  nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE;
1114 
1115 #if CLIB_DEBUG > 1
1116  clib_warning ("clear %d bits at index %d", nbits, index);
1117 #endif
1118  /* Give back the allocated VM */
1119  for (i = 0; i < nbits; i++)
1120  {
1121  clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0);
1122  }
1123 
1124  mp = root_rp->data_base;
1125 
1126  p = hash_get_mem (mp->name_hash, name);
1127 
1128  /* Better never happen ... */
1129  if (p == NULL)
1130  {
1131  region_unlock (rp);
1132  region_unlock (root_rp);
1133  svm_pop_heap (oldheap);
1134  clib_warning ("Region name '%s' not found?", name);
1135  return;
1136  }
1137 
1138  /* Remove from the root region subregion pool */
1139  subp = mp->subregions + p[0];
1140  pool_put (mp->subregions, subp);
1141 
1142  hash_unset_mem (mp->name_hash, name);
1143 
1144  vec_free (name);
1145 
1146  region_unlock (rp);
1147 
1148  /* If a client asks for the cleanup, don't unlink the backing
1149  * file since we can't tell if it has been recreated. */
1150  if (!is_client)
1151  svm_region_unlink (rp);
1152 
1153  munmap ((void *) virtual_base, virtual_size);
1154  region_unlock (root_rp);
1155  svm_pop_heap (oldheap);
1156  return;
1157  }
1158 
1159  region_unlock (rp);
1160  region_unlock (root_rp);
1161 
1162  munmap ((void *) virtual_base, virtual_size);
1163 }
1164 
1165 void
1166 svm_region_unmap (void *rp_arg)
1167 {
1168  svm_region_unmap_internal (rp_arg, 0 /* is_client */ );
1169 }
1170 
1171 void
1173 {
1174  svm_region_unmap_internal (rp_arg, 1 /* is_client */ );
1175 }
1176 
1177 /*
1178  * svm_region_exit
1179  */
1180 static void
1182 {
1183  void *oldheap;
1184  int i, mypid = getpid ();
1185  uword virtual_base, virtual_size;
1186 
1187  /* It felt so nice we did it twice... */
1188  if (root_rp == 0)
1189  return;
1190 
1191  if (--root_rp_refcount > 0)
1192  return;
1193 
1194  /*
1195  * If we take a signal while holding one or more shared-memory
1196  * mutexes, we may end up back here from an otherwise
1197  * benign exit handler. Bail out to avoid a recursive
1198  * mutex screw-up.
1199  */
1200  if (nheld)
1201  return;
1202 
1203  region_lock (root_rp, 7);
1204  oldheap = svm_push_pvt_heap (root_rp);
1205 
1206  virtual_base = root_rp->virtual_base;
1207  virtual_size = root_rp->virtual_size;
1208 
1209  for (i = 0; i < vec_len (root_rp->client_pids); i++)
1210  {
1211  if (root_rp->client_pids[i] == mypid)
1212  {
1213  vec_delete (root_rp->client_pids, 1, i);
1214  goto found;
1215  }
1216  }
1217  clib_warning ("pid %d AWOL", mypid);
1218 
1219 found:
1220 
1221  if (!is_client && vec_len (root_rp->client_pids) == 0)
1222  svm_region_unlink (root_rp);
1223 
1224  region_unlock (root_rp);
1225  svm_pop_heap (oldheap);
1226 
1227  root_rp = 0;
1228  munmap ((void *) virtual_base, virtual_size);
1229 }
1230 
1231 void
1233 {
1234  svm_region_exit_internal (0 /* is_client */ );
1235 }
1236 
1237 void
1239 {
1240  svm_region_exit_internal (1 /* is_client */ );
1241 }
1242 
1243 void
1245 {
1246  int j;
1247  int mypid = getpid ();
1248  void *oldheap;
1249 
1250  for (j = 0; j < vec_len (rp->client_pids); j++)
1251  {
1252  if (mypid == rp->client_pids[j])
1253  continue;
1254  if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0))
1255  {
1256  clib_warning ("%s: cleanup ghost pid %d",
1257  rp->region_name, rp->client_pids[j]);
1258  /* nb: client vec in rp->region_heap */
1259  oldheap = svm_push_pvt_heap (rp);
1260  vec_delete (rp->client_pids, 1, j);
1261  j--;
1262  svm_pop_heap (oldheap);
1263  }
1264  }
1265 }
1266 
1267 
1268 /*
1269  * Scan svm regions for dead clients
1270  */
1271 void
1272 svm_client_scan (const char *root_path)
1273 {
1274  int i, j;
1275  svm_main_region_t *mp;
1276  svm_map_region_args_t *a = 0;
1278  svm_region_t *rp;
1279  svm_subregion_t *subp;
1280  u8 *name = 0;
1281  u8 **svm_names = 0;
1282  void *oldheap;
1283  int mypid = getpid ();
1284 
1285  vec_validate (a, 0);
1286 
1287  svm_region_init_chroot (root_path);
1288 
1289  root_rp = svm_get_root_rp ();
1290 
1291  pthread_mutex_lock (&root_rp->mutex);
1292 
1293  mp = root_rp->data_base;
1294 
1295  for (j = 0; j < vec_len (root_rp->client_pids); j++)
1296  {
1297  if (mypid == root_rp->client_pids[j])
1298  continue;
1299  if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0))
1300  {
1301  clib_warning ("%s: cleanup ghost pid %d",
1302  root_rp->region_name, root_rp->client_pids[j]);
1303  /* nb: client vec in root_rp->region_heap */
1304  oldheap = svm_push_pvt_heap (root_rp);
1305  vec_delete (root_rp->client_pids, 1, j);
1306  j--;
1307  svm_pop_heap (oldheap);
1308  }
1309  }
1310 
1311  /*
1312  * Snapshoot names, can't hold root rp mutex across
1313  * find_or_create.
1314  */
1315  /* *INDENT-OFF* */
1316  pool_foreach (subp, mp->subregions, ({
1317  name = vec_dup (subp->subregion_name);
1318  vec_add1(svm_names, name);
1319  }));
1320  /* *INDENT-ON* */
1321 
1322  pthread_mutex_unlock (&root_rp->mutex);
1323 
1324  for (i = 0; i < vec_len (svm_names); i++)
1325  {
1326  vec_validate (a, 0);
1327  a->root_path = root_path;
1328  a->name = (char *) svm_names[i];
1329  rp = svm_region_find_or_create (a);
1330  if (rp)
1331  {
1332  pthread_mutex_lock (&rp->mutex);
1333 
1335 
1336  pthread_mutex_unlock (&rp->mutex);
1337  svm_region_unmap (rp);
1338  vec_free (svm_names[i]);
1339  }
1340  vec_free (a);
1341  }
1342  vec_free (svm_names);
1343 
1344  svm_region_exit ();
1345 
1346  vec_free (a);
1347 }
1348 
1349 /*
1350  * fd.io coding-style-patch-verification: ON
1351  *
1352  * Local Variables:
1353  * eval: (c-set-style "gnu")
1354  * End:
1355  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:437
vmrglw vmrglh hi
void svm_region_init_chroot_uid_gid(const char *root_path, int uid, int gid)
Definition: svm.c:881
svm_region_t * svm_get_root_rp(void)
Definition: svm.c:54
#define SVM_GLOBAL_REGION_NAME
Definition: svm_common.h:87
const char * root_path
Definition: svm_common.h:67
static int nheld
Definition: svm.c:51
static void svm_pop_heap(void *oldheap)
Definition: svm.h:94
#define vec_c_string_is_terminated(V)
Test whether a vector is a NULL terminated c-string.
Definition: vec.h:987
a
Definition: bitmap.h:537
#define SVM_FLAGS_NODATA
Definition: svm_common.h:29
#define SVM_FLAGS_NEED_DATA_INIT
Definition: svm_common.h:30
void * svm_map_region(svm_map_region_args_t *a)
Definition: svm.c:583
#define count_leading_zeros(x)
Definition: clib.h:132
unsigned long u64
Definition: types.h:89
Fixed length block allocator.
#define NULL
Definition: clib.h:55
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
static u64 clib_cpu_time_now(void)
Definition: time.h:73
void svm_client_scan(const char *root_path)
Definition: svm.c:1272
uword virtual_base
Definition: svm_common.h:42
#define SVM_PVT_MHEAP_SIZE
Definition: svm_common.h:32
int i
static mheap_t * mheap_header(u8 *v)
void svm_region_unmap_client(void *rp_arg)
Definition: svm.c:1172
#define hash_set_mem(h, key, value)
Definition: hash.h:275
void svm_region_exit_client(void)
Definition: svm.c:1238
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
#define MHEAP_FLAG_THREAD_SAFE
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:227
static u8 * format_svm_size(u8 *s, va_list *args)
Definition: svm.c:160
static uword clib_bitmap_get_no_check(uword *ai, uword i)
Gets the ith bit value from a bitmap Does not sanity-check the bit position.
Definition: bitmap.h:212
unsigned char u8
Definition: types.h:56
u8 * format_mheap(u8 *s, va_list *va)
Definition: mheap.c:1162
void unformat_init_clib_file(unformat_input_t *input, int file_descriptor)
Definition: unformat.c:1058
#define clib_unix_error(format, args...)
Definition: error.h:65
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
Definition: bitmap.h:141
#define SVM_VERSION
Definition: svm_common.h:25
void svm_region_exit(void)
Definition: svm.c:1232
static void unformat_skip_line(unformat_input_t *i)
Definition: format.h:220
void svm_region_init(void)
Definition: svm.c:847
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:440
uword * client_pids
Definition: svm_common.h:54
#define MHEAP_FLAG_DISABLE_VM
void * svm_region_find_or_create(svm_map_region_args_t *a)
Definition: svm.c:904
volatile void * user_ctx
Definition: svm_common.h:47
static u64 rnd_pagesize(u64 size)
Definition: svm.c:257
pthread_cond_t condvar
Definition: svm_common.h:38
u8 * format_svm_region(u8 *s, va_list *args)
Definition: svm.c:180
#define hash_create_string(elts, value_bytes)
Definition: hash.h:690
#define SVM_FLAGS_MHEAP
Definition: svm_common.h:27
static int svm_region_init_internal(svm_map_region_args_t *a)
Definition: svm.c:795
void * data_base
Definition: svm_common.h:45
uword size
#define hash_unset_mem(h, key)
Definition: hash.h:291
lo
#define MAXLOCK
Definition: svm.c:49
struct _unformat_input_t unformat_input_t
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:273
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:373
svm_subregion_t * subregions
Definition: svm_common.h:105
char * backing_file
Definition: svm_common.h:52
uword virtual_size
Definition: svm_common.h:43
void svm_region_init_args(svm_map_region_args_t *a)
Definition: svm.c:898
#define SVM_GLOBAL_REGION_SIZE
Definition: svm_common.h:86
char * region_name
Definition: svm_common.h:51
void svm_region_init_mapped_region(svm_map_region_args_t *a, svm_region_t *rp)
Definition: svm.c:479
static void * svm_push_pvt_heap(svm_region_t *rp)
Definition: svm.h:78
static int root_rp_refcount
Definition: svm.c:47
#define UNFORMAT_END_OF_INPUT
Definition: format.h:143
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
#define clib_warning(format, args...)
Definition: error.h:59
static pthread_mutex_t * mutexes_held[MAXLOCK]
Definition: svm.c:50
u8 * shm_name_from_svm_map_region_args(svm_map_region_args_t *a)
Definition: svm.c:429
uword bitmap_size
Definition: svm_common.h:49
static int svm_data_region_map(svm_map_region_args_t *a, svm_region_t *rp)
Definition: svm.c:355
void * mheap_alloc_with_flags(void *memory, uword memory_size, uword flags)
Definition: mheap.c:869
#define uword_to_pointer(u, type)
Definition: types.h:136
#define ASSERT(truth)
#define vec_delete(V, N, M)
Delete N elements starting at element M.
Definition: vec.h:786
static void region_lock(svm_region_t *rp, int tag)
Definition: svm.c:99
volatile uword version
Definition: svm_common.h:36
u64 svm_get_global_region_base_va()
Definition: svm.c:62
Bitmaps built as vectors of machine words.
static void region_unlock(svm_region_t *rp)
Definition: svm.c:116
int mutex_owner_tag
Definition: svm_common.h:40
void svm_region_unmap_internal(void *rp_arg, u8 is_client)
Definition: svm.c:1047
static heap_header_t * heap_header(void *v)
Definition: heap.h:161
#define MMAP_PAGESIZE
Definition: ssvm.h:42
void svm_region_unlink(svm_region_t *rp)
Definition: svm.c:1016
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
const char * name
Definition: svm_common.h:68
u64 uword
Definition: types.h:112
static void unformat_free(unformat_input_t *i)
Definition: format.h:161
#define clib_unix_warning(format, args...)
Definition: error.h:68
void svm_region_unmap(void *rp_arg)
Definition: svm.c:1166
void svm_client_scan_this_region_nolock(svm_region_t *rp)
Definition: svm.c:1244
#define hash_get_mem(h, key)
Definition: hash.h:269
void * region_heap
Definition: svm_common.h:44
uword * bitmap
Definition: svm_common.h:50
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
void * data_heap
Definition: svm_common.h:46
static int svm_data_region_create(svm_map_region_args_t *a, svm_region_t *rp)
Definition: svm.c:269
static u8 * format_svm_flags(u8 *s, va_list *args)
Definition: svm.c:143
int mutex_owner_pid
Definition: svm_common.h:39
#define BITS(x)
Definition: clib.h:58
uword flags
Definition: svm_common.h:41
static void svm_mutex_cleanup(void)
Definition: svm.c:785
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:972
pthread_mutex_t mutex
Definition: svm_common.h:37
#define SVM_FLAGS_FILE
Definition: svm_common.h:28
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:169
static svm_region_t * root_rp
Definition: svm.c:46
int svm_region_init_chroot(const char *root_path)
Definition: svm.c:864
static void svm_region_exit_internal(u8 is_client)
Definition: svm.c:1181