changelog shortlog tags branches changeset files revisions annotate raw help

Mercurial > hg > plan9front / sys/src/libc/port/pool.c

changeset 4807: fbd2319ae4be
parent: de32ddc1b04b
child: a6be9152730c
author: cinap_lenrek@felloff.net
date: Fri, 21 Aug 2015 03:32:05 +0200
permissions: -rw-r--r--
description: authsrv: randomize aes key in mkkey(), not used yet.
1 /*
2  * This allocator takes blocks from a coarser allocator (p->alloc) and
3  * uses them as arenas.
4  *
5  * An arena is split into a sequence of blocks of variable size. The
6  * blocks begin with a Bhdr that denotes the length (including the Bhdr)
7  * of the block. An arena begins with an Arena header block (Arena,
8  * ARENA_MAGIC) and ends with a Bhdr block with magic ARENATAIL_MAGIC and
9  * size 0. Intermediate blocks are either allocated or free. At the end
10  * of each intermediate block is a Btail, which contains information
11  * about where the block starts. This is useful for walking backwards.
12  *
13  * Free blocks (Free*) have a magic value of FREE_MAGIC in their Bhdr
14  * headers. They are kept in a binary tree (p->freeroot) traversible by
15  * walking ->left and ->right. Each node of the binary tree is a pointer
16  * to a circular doubly-linked list (next, prev) of blocks of identical
17  * size. Blocks are added to this ``tree of lists'' by pooladd(), and
18  * removed by pooldel().
19  *
20  * When freed, adjacent blocks are coalesced to create larger blocks when
21  * possible.
22  *
23  * Allocated blocks (Alloc*) have one of two magic values: ALLOC_MAGIC or
24  * UNALLOC_MAGIC. When blocks are released from the pool, they have
25  * magic value UNALLOC_MAGIC. Once the block has been trimmed by trim()
26  * and the amount of user-requested data has been recorded in the
27  * datasize field of the tail, the magic value is changed to ALLOC_MAGIC.
28  * All blocks returned to callers should be of type ALLOC_MAGIC, as
29  * should all blocks passed to us by callers. The amount of data the user
30  * asked us for can be found by subtracting the short in tail->datasize
31  * from header->size. Further, the up to at most four bytes between the
32  * end of the user-requested data block and the actual Btail structure are
33  * marked with a magic value, which is checked to detect user overflow.
34  *
35  * The arenas returned by p->alloc are kept in a doubly-linked list
36  * (p->arenalist) running through the arena headers, sorted by descending
37  * base address (prev, next). When a new arena is allocated, we attempt
38  * to merge it with its two neighbors via p->merge.
39  */
40 
41 #include <u.h>
42 #include <libc.h>
43 #include <pool.h>
44 
45 typedef struct Alloc Alloc;
46 typedef struct Arena Arena;
47 typedef struct Bhdr Bhdr;
48 typedef struct Btail Btail;
49 typedef struct Free Free;
50 
51 struct Bhdr {
52  ulong magic;
53  ulong size;
54 };
55 enum {
56  NOT_MAGIC = 0xdeadfa11,
57  DEAD_MAGIC = 0xdeaddead,
58 };
59 #define B2NB(b) ((Bhdr*)((uchar*)(b)+(b)->size))
60 
61 #define SHORT(x) (((x)[0] << 8) | (x)[1])
62 #define PSHORT(p, x) \
63  (((uchar*)(p))[0] = ((x)>>8)&0xFF, \
64  ((uchar*)(p))[1] = (x)&0xFF)
65 
66 enum {
67  TAIL_MAGIC0 = 0xBE,
68  TAIL_MAGIC1 = 0xEF
69 };
70 struct Btail {
71  uchar magic0;
72  uchar datasize[2];
73  uchar magic1;
74  ulong size; /* same as Bhdr->size */
75 };
76 #define B2T(b) ((Btail*)((uchar*)(b)+(b)->size-sizeof(Btail)))
77 #define B2PT(b) ((Btail*)((uchar*)(b)-sizeof(Btail)))
78 #define T2HDR(t) ((Bhdr*)((uchar*)(t)+sizeof(Btail)-(t)->size))
79 struct Free {
80  Bhdr;
81  Free* left;
82  Free* right;
83  Free* next;
84  Free* prev;
85 };
86 enum {
87  FREE_MAGIC = 0xBA5EBA11,
88 };
89 
90 /*
91  * the point of the notused fields is to make 8c differentiate
92  * between Bhdr and Allocblk, and between Kempt and Unkempt.
93  */
94 struct Alloc {
95  Bhdr;
96 };
97 enum {
98  ALLOC_MAGIC = 0x0A110C09,
99  UNALLOC_MAGIC = 0xCAB00D1E+1,
100 };
101 
102 struct Arena {
103  Bhdr;
104  Arena* aup;
105  Arena* down;
106  ulong asize;
107  ulong pad; /* to a multiple of 8 bytes */
108 };
109 enum {
110  ARENA_MAGIC = 0xC0A1E5CE+1,
111  ARENATAIL_MAGIC = 0xEC5E1A0C+1,
112 };
113 #define A2TB(a) ((Bhdr*)((uchar*)(a)+(a)->asize-sizeof(Bhdr)))
114 #define A2B(a) B2NB(a)
115 
116 enum {
117  ALIGN_MAGIC = 0xA1F1D1C1,
118 };
119 
120 enum {
121  MINBLOCKSIZE = sizeof(Free)+sizeof(Btail)
122 };
123 
124 static uchar datamagic[] = { 0xFE, 0xF1, 0xF0, 0xFA };
125 
126 #define Poison ((void*)-0x35014542) /* cafebabe */
127 
128 #define _B2D(a) ((void*)((uchar*)a+sizeof(Bhdr)))
129 #define _D2B(v) ((Alloc*)((uchar*)v-sizeof(Bhdr)))
130 
131 // static void* _B2D(void*);
132 // static void* _D2B(void*);
133 static void* B2D(Pool*, Alloc*);
134 static Alloc* D2B(Pool*, void*);
135 static Arena* arenamerge(Pool*, Arena*, Arena*);
136 static void blockcheck(Pool*, Bhdr*);
137 static Alloc* blockmerge(Pool*, Bhdr*, Bhdr*);
138 static Alloc* blocksetdsize(Pool*, Alloc*, ulong);
139 static Bhdr* blocksetsize(Bhdr*, ulong);
140 static ulong bsize2asize(Pool*, ulong);
141 static ulong dsize2bsize(Pool*, ulong);
142 static ulong getdsize(Alloc*);
143 static Alloc* trim(Pool*, Alloc*, ulong);
144 static void logstack(Pool*);
145 static void memmark(void*, int, ulong);
146 static Free* pooladd(Pool*, Alloc*);
147 static void* poolallocl(Pool*, ulong);
148 static void poolcheckl(Pool*);
149 static void poolcheckarena(Pool*, Arena*);
150 static int poolcompactl(Pool*);
151 static Alloc* pooldel(Pool*, Free*);
152 static void pooldumpl(Pool*);
153 static void pooldumparena(Pool*, Arena*);
154 static void poolfreel(Pool*, void*);
155 static void poolnewarena(Pool*, ulong);
156 static void* poolreallocl(Pool*, void*, ulong);
157 static Free* treelookupgt(Free*, ulong);
158 static Free* treesplay(Free*, ulong);
159 
160 /*
161  * Debugging
162  *
163  * Antagonism causes blocks to always be filled with garbage if their
164  * contents are undefined. This tickles both programs and the library.
165  * It's a linear time hit but not so noticeable during nondegenerate use.
166  * It would be worth leaving in except that it negates the benefits of the
167  * kernel's demand-paging. The tail magic and end-of-data magic
168  * provide most of the user-visible benefit that antagonism does anyway.
169  *
170  * Paranoia causes the library to recheck the entire pool on each lock
171  * or unlock. A failed check on unlock means we tripped over ourselves,
172  * while a failed check on lock tends to implicate the user. Paranoia has
173  * the potential to slow things down a fair amount for pools with large
174  * numbers of allocated blocks. It completely negates all benefits won
175  * by the binary tree. Turning on paranoia in the kernel makes it painfully
176  * slow.
177  *
178  * Verbosity induces the dumping of the pool via p->print at each lock operation.
179  * By default, only one line is logged for each alloc, free, and realloc.
180  */
181 
182 /* the if(!x);else avoids ``dangling else'' problems */
183 #define antagonism if(!(p->flags & POOL_ANTAGONISM)){}else
184 #define paranoia if(!(p->flags & POOL_PARANOIA)){}else
185 #define verbosity if(!(p->flags & POOL_VERBOSITY)){}else
186 
187 #define DPRINT if(!(p->flags & POOL_DEBUGGING)){}else p->print
188 #define LOG if(!(p->flags & POOL_LOGGING)){}else p->print
189 
190 /*
191  * Tree walking
192  */
193 
194 static void
195 checklist(Free *t)
196 {
197  Free *q;
198 
199  for(q=t->next; q!=t; q=q->next){
200  assert(q->magic==FREE_MAGIC);
201  assert(q->size==t->size);
202  assert(q->left==Poison);
203  assert(q->right==Poison);
204  assert(q->next!=nil && q->next!=Poison && q->next->prev==q);
205  assert(q->prev!=nil && q->prev!=Poison && q->prev->next==q);
206  }
207 }
208 
209 static void
210 checktree(Free *t, int a, int b)
211 {
212  assert(t->magic==FREE_MAGIC);
213  assert(a < t->size && t->size < b);
214  assert(t->left!=Poison);
215  assert(t->right!=Poison);
216  assert(t->next!=nil && t->next!=Poison && t->next->prev==t);
217  assert(t->prev!=nil && t->prev!=Poison && t->prev->next==t);
218  checklist(t);
219  if(t->left)
220  checktree(t->left, a, t->size);
221  if(t->right)
222  checktree(t->right, t->size, b);
223 
224 }
225 
226 /* treelookupgt: find smallest node in tree with size >= size */
227 static Free*
228 treelookupgt(Free *t, ulong size)
229 {
230  Free *lastgood; /* last node we saw that was big enough */
231 
232  lastgood = nil;
233  for(;;) {
234  if(t == nil)
235  return lastgood;
236  assert(t->magic == FREE_MAGIC);
237  if(size == t->size)
238  return t;
239  if(size < t->size) {
240  lastgood = t;
241  t = t->left;
242  } else
243  t = t->right;
244  }
245 }
246 
247 /* treesplay: splay node of size size to the root and return new root */
248 static Free*
249 treesplay(Free *t, ulong size)
250 {
251  Free N, *l, *r, *y;
252 
253  N.left = N.right = nil;
254  l = r = &N;
255 
256  for(;;) {
257  assert(t->magic == FREE_MAGIC);
258  if(size < t->size) {
259  y = t->left;
260  if(y != nil) {
261  assert(y->magic == FREE_MAGIC);
262  if(size < y->size) {
263  t->left = y->right;
264  y->right = t;
265  t = y;
266  }
267  }
268  if(t->left == nil)
269  break;
270  r->left = t;
271  r = t;
272  t = t->left;
273  } else if(size > t->size) {
274  y = t->right;
275  if(y != nil) {
276  assert(y->magic == FREE_MAGIC);
277  if(size > y->size) {
278  t->right = y->left;
279  y->left = t;
280  t = y;
281  }
282  }
283  if(t->right == nil)
284  break;
285  l->right = t;
286  l = t;
287  t = t->right;
288  } else
289  break;
290  }
291 
292  l->right=t->left;
293  r->left=t->right;
294  t->left=N.right;
295  t->right=N.left;
296 
297  return t;
298 }
299 
300 /* pooladd: add anode to the free pool */
301 static Free*
302 pooladd(Pool *p, Alloc *anode)
303 {
304  Free *node, *root;
305 
306  antagonism {
307  memmark(_B2D(anode), 0xF7, anode->size-sizeof(Bhdr)-sizeof(Btail));
308  }
309 
310  node = (Free*)anode;
311  node->magic = FREE_MAGIC;
312  node->left = node->right = nil;
313  node->next = node->prev = node;
314 
315  if(p->freeroot != nil) {
316  root = treesplay(p->freeroot, node->size);
317  if(root->size > node->size) {
318  node->left = root->left;
319  node->right = root;
320  root->left = nil;
321  } else if(root->size < node->size) {
322  node->right = root->right;
323  node->left = root;
324  root->right = nil;
325  } else {
326  node->left = root->left;
327  node->right = root->right;
328  root->left = root->right = Poison;
329 
330  node->prev = root->prev;
331  node->next = root;
332  node->prev->next = node;
333  node->next->prev = node;
334  }
335  }
336  p->freeroot = node;
337  p->curfree += node->size;
338 
339  return node;
340 }
341 
342 /* pooldel: remove node from the free pool */
343 static Alloc*
344 pooldel(Pool *p, Free *node)
345 {
346  Free *root;
347 
348  root = treesplay(p->freeroot, node->size);
349  if(node == root && node->next == node) {
350  if(node->left == nil)
351  root = node->right;
352  else {
353  root = treesplay(node->left, node->size);
354  assert(root->right == nil);
355  root->right = node->right;
356  }
357  } else {
358  if(node == root) {
359  root = node->next;
360  root->left = node->left;
361  root->right = node->right;
362  }
363  assert(root->magic == FREE_MAGIC && root->size == node->size);
364  node->next->prev = node->prev;
365  node->prev->next = node->next;
366  }
367  p->freeroot = root;
368  p->curfree -= node->size;
369 
370  node->left = node->right = node->next = node->prev = Poison;
371 
372  antagonism {
373  memmark(_B2D(node), 0xF9, node->size-sizeof(Bhdr)-sizeof(Btail));
374  }
375 
376  node->magic = UNALLOC_MAGIC;
377 
378  return (Alloc*)node;
379 }
380 
381 /*
382  * Block maintenance
383  */
384 /* block allocation */
385 static ulong
386 dsize2bsize(Pool *p, ulong sz)
387 {
388  sz += sizeof(Bhdr)+sizeof(Btail);
389  if(sz < p->minblock)
390  sz = p->minblock;
391  if(sz < MINBLOCKSIZE)
392  sz = MINBLOCKSIZE;
393  sz = (sz+p->quantum-1)&~(p->quantum-1);
394  return sz;
395 }
396 
397 static ulong
398 bsize2asize(Pool *p, ulong sz)
399 {
400  sz += sizeof(Arena)+sizeof(Btail);
401  if(sz < p->minarena)
402  sz = p->minarena;
403  sz = (sz+p->quantum)&~(p->quantum-1);
404  return sz;
405 }
406 
407 /* blockmerge: merge a and b, known to be adjacent */
408 /* both are removed from pool if necessary. */
409 static Alloc*
410 blockmerge(Pool *pool, Bhdr *a, Bhdr *b)
411 {
412  Btail *t;
413 
414  assert(B2NB(a) == b);
415 
416  if(a->magic == FREE_MAGIC)
417  pooldel(pool, (Free*)a);
418  if(b->magic == FREE_MAGIC)
419  pooldel(pool, (Free*)b);
420 
421  t = B2T(a);
422  t->size = (ulong)Poison;
423  t->magic0 = NOT_MAGIC;
424  t->magic1 = NOT_MAGIC;
425  PSHORT(t->datasize, NOT_MAGIC);
426 
427  a->size += b->size;
428  t = B2T(a);
429  t->size = a->size;
430  PSHORT(t->datasize, 0xFFFF);
431 
432  b->size = NOT_MAGIC;
433  b->magic = NOT_MAGIC;
434 
435  a->magic = UNALLOC_MAGIC;
436  return (Alloc*)a;
437 }
438 
439 /* blocksetsize: set the total size of a block, fixing tail pointers */
440 static Bhdr*
441 blocksetsize(Bhdr *b, ulong bsize)
442 {
443  Btail *t;
444 
445  assert(b->magic != FREE_MAGIC /* blocksetsize */);
446 
447  b->size = bsize;
448  t = B2T(b);
449  t->size = b->size;
450  t->magic0 = TAIL_MAGIC0;
451  t->magic1 = TAIL_MAGIC1;
452  return b;
453 }
454 
455 /* getdsize: return the requested data size for an allocated block */
456 static ulong
457 getdsize(Alloc *b)
458 {
459  Btail *t;
460  t = B2T(b);
461  return b->size - SHORT(t->datasize);
462 }
463 
464 /* blocksetdsize: set the user data size of a block */
465 static Alloc*
466 blocksetdsize(Pool *p, Alloc *b, ulong dsize)
467 {
468  Btail *t;
469  uchar *q, *eq;
470 
471  assert(b->size >= dsize2bsize(p, dsize));
472  assert(b->size - dsize < 0x10000);
473 
474  t = B2T(b);
475  PSHORT(t->datasize, b->size - dsize);
476 
477  q=(uchar*)_B2D(b)+dsize;
478  eq = (uchar*)t;
479  if(eq > q+4)
480  eq = q+4;
481  for(; q<eq; q++)
482  *q = datamagic[((ulong)(uintptr)q)%nelem(datamagic)];
483 
484  return b;
485 }
486 
487 /* trim: trim a block down to what is needed to hold dsize bytes of user data */
488 static Alloc*
489 trim(Pool *p, Alloc *b, ulong dsize)
490 {
491  ulong extra, bsize;
492  Alloc *frag;
493 
494  bsize = dsize2bsize(p, dsize);
495  extra = b->size - bsize;
496  if(b->size - dsize >= 0x10000 ||
497  (extra >= bsize>>2 && extra >= MINBLOCKSIZE && extra >= p->minblock)) {
498  blocksetsize(b, bsize);
499  frag = (Alloc*) B2NB(b);
500 
501  antagonism {
502  memmark(frag, 0xF1, extra);
503  }
504 
505  frag->magic = UNALLOC_MAGIC;
506  blocksetsize(frag, extra);
507  pooladd(p, frag);
508  }
509 
510  b->magic = ALLOC_MAGIC;
511  blocksetdsize(p, b, dsize);
512  return b;
513 }
514 
515 static Alloc*
516 freefromfront(Pool *p, Alloc *b, ulong skip)
517 {
518  Alloc *bb;
519 
520  skip = skip&~(p->quantum-1);
521  if(skip >= 0x1000 || (skip >= b->size>>2 && skip >= MINBLOCKSIZE && skip >= p->minblock)){
522  bb = (Alloc*)((uchar*)b+skip);
523  bb->magic = UNALLOC_MAGIC;
524  blocksetsize(bb, b->size-skip);
525  b->magic = UNALLOC_MAGIC;
526  blocksetsize(b, skip);
527  pooladd(p, b);
528  return bb;
529  }
530  return b;
531 }
532 
533 /*
534  * Arena maintenance
535  */
536 
537 /* arenasetsize: set arena size, updating tail */
538 static void
539 arenasetsize(Arena *a, ulong asize)
540 {
541  Bhdr *atail;
542 
543  a->asize = asize;
544  atail = A2TB(a);
545  atail->magic = ARENATAIL_MAGIC;
546  atail->size = 0;
547 }
548 
549 /* poolnewarena: allocate new arena */
550 static void
551 poolnewarena(Pool *p, ulong asize)
552 {
553  Arena *a;
554  Arena *ap, *lastap;
555  Alloc *b;
556 
557  LOG(p, "newarena %lud\n", asize);
558  if(p->cursize+asize > p->maxsize) {
559  if(poolcompactl(p) == 0){
560  LOG(p, "pool too big: %llud+%lud > %llud\n",
561  (uvlong)p->cursize, asize, (uvlong)p->maxsize);
562  werrstr("memory pool too large");
563  }
564  return;
565  }
566 
567  if((a = p->alloc(asize)) == nil) {
568  /* assume errstr set by p->alloc */
569  return;
570  }
571 
572  p->cursize += asize;
573 
574  /* arena hdr */
575  a->magic = ARENA_MAGIC;
576  blocksetsize(a, sizeof(Arena));
577  arenasetsize(a, asize);
578  blockcheck(p, a);
579 
580  /* create one large block in arena */
581  b = (Alloc*)A2B(a);
582  b->magic = UNALLOC_MAGIC;
583  blocksetsize(b, (uchar*)A2TB(a)-(uchar*)b);
584  blockcheck(p, b);
585  pooladd(p, b);
586  blockcheck(p, b);
587 
588  /* sort arena into descending sorted arena list */
589  for(lastap=nil, ap=p->arenalist; ap > a; lastap=ap, ap=ap->down)
590  ;
591 
592  if(a->down = ap) /* assign = */
593  a->down->aup = a;
594 
595  if(a->aup = lastap) /* assign = */
596  a->aup->down = a;
597  else
598  p->arenalist = a;
599 
600  /* merge with surrounding arenas if possible */
601  /* must do a with up before down with a (think about it) */
602  if(a->aup)
603  arenamerge(p, a, a->aup);
604  if(a->down)
605  arenamerge(p, a->down, a);
606 }
607 
608 /* blockresize: grow a block to encompass space past its end, possibly by */
609 /* trimming it into two different blocks. */
610 static void
611 blockgrow(Pool *p, Bhdr *b, ulong nsize)
612 {
613  if(b->magic == FREE_MAGIC) {
614  Alloc *a;
615  Bhdr *bnxt;
616  a = pooldel(p, (Free*)b);
617  blockcheck(p, a);
618  blocksetsize(a, nsize);
619  blockcheck(p, a);
620  bnxt = B2NB(a);
621  if(bnxt->magic == FREE_MAGIC)
622  a = blockmerge(p, a, bnxt);
623  blockcheck(p, a);
624  pooladd(p, a);
625  } else {
626  Alloc *a;
627  ulong dsize;
628 
629  a = (Alloc*)b;
630  p->curalloc -= a->size;
631  dsize = getdsize(a);
632  blocksetsize(a, nsize);
633  trim(p, a, dsize);
634  p->curalloc += a->size;
635  }
636 }
637 
638 /* arenamerge: attempt to coalesce to arenas that might be adjacent */
639 static Arena*
640 arenamerge(Pool *p, Arena *bot, Arena *top)
641 {
642  Bhdr *bbot, *btop;
643  Btail *t;
644  ulong newsize;
645 
646  blockcheck(p, bot);
647  blockcheck(p, top);
648  assert(bot->aup == top && top > bot);
649 
650  newsize = top->asize + ((uchar*)top - (uchar*)bot);
651  if(newsize < top->asize || p->merge == nil || p->merge(bot, top) == 0)
652  return nil;
653 
654  /* remove top from list */
655  if(bot->aup = top->aup) /* assign = */
656  bot->aup->down = bot;
657  else
658  p->arenalist = bot;
659 
660  /* save ptrs to last block in bot, first block in top */
661  t = B2PT(A2TB(bot));
662  bbot = T2HDR(t);
663  btop = A2B(top);
664  blockcheck(p, bbot);
665  blockcheck(p, btop);
666 
667  /* grow bottom arena to encompass top */
668  arenasetsize(bot, newsize);
669 
670  /* grow bottom block to encompass space between arenas */
671  blockgrow(p, bbot, (uchar*)btop-(uchar*)bbot);
672  blockcheck(p, bbot);
673  return bot;
674 }
675 
676 /* dumpblock: print block's vital stats */
677 static void
678 dumpblock(Pool *p, Bhdr *b)
679 {
680  ulong *dp;
681  ulong dsize;
682  uchar *cp;
683 
684  dp = (ulong*)b;
685  p->print(p, "pool %s block %p\nhdr %.8lux %.8lux %.8lux %.8lux %.8lux %.8lux\n",
686  p->name, b, dp[0], dp[1], dp[2], dp[3], dp[4], dp[5], dp[6]);
687 
688  dp = (ulong*)B2T(b);
689  p->print(p, "tail %.8lux %.8lux %.8lux %.8lux %.8lux %.8lux | %.8lux %.8lux\n",
690  dp[-6], dp[-5], dp[-4], dp[-3], dp[-2], dp[-1], dp[0], dp[1]);
691 
692  if(b->magic == ALLOC_MAGIC){
693  dsize = getdsize((Alloc*)b);
694  if(dsize >= b->size) /* user data size corrupt */
695  return;
696 
697  cp = (uchar*)_B2D(b)+dsize;
698  p->print(p, "user data ");
699  p->print(p, "%.2ux %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux",
700  cp[-8], cp[-7], cp[-6], cp[-5], cp[-4], cp[-3], cp[-2], cp[-1]);
701  p->print(p, " | %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux\n",
702  cp[0], cp[1], cp[2], cp[3], cp[4], cp[5], cp[6], cp[7]);
703  }
704 }
705 
706 static void
707 printblock(Pool *p, Bhdr *b, char *msg)
708 {
709  p->print(p, "%s\n", msg);
710  dumpblock(p, b);
711 }
712 
713 static void
714 panicblock(Pool *p, Bhdr *b, char *msg)
715 {
716  p->print(p, "%s\n", msg);
717  dumpblock(p, b);
718  p->panic(p, "pool panic");
719 }
720 
721 /* blockcheck: ensure a block consistent with our expectations */
722 /* should only be called when holding pool lock */
723 static void
724 blockcheck(Pool *p, Bhdr *b)
725 {
726  Alloc *a;
727  Btail *t;
728  int i, n;
729  uchar *q, *bq, *eq;
730  ulong dsize;
731 
732  switch(b->magic) {
733  default:
734  panicblock(p, b, "bad magic");
735  case FREE_MAGIC:
736  case UNALLOC_MAGIC:
737  t = B2T(b);
738  if(t->magic0 != TAIL_MAGIC0 || t->magic1 != TAIL_MAGIC1)
739  panicblock(p, b, "corrupt tail magic");
740  if(T2HDR(t) != b)
741  panicblock(p, b, "corrupt tail ptr");
742  break;
743  case DEAD_MAGIC:
744  t = B2T(b);
745  if(t->magic0 != TAIL_MAGIC0 || t->magic1 != TAIL_MAGIC1)
746  panicblock(p, b, "corrupt tail magic");
747  if(T2HDR(t) != b)
748  panicblock(p, b, "corrupt tail ptr");
749  n = getdsize((Alloc*)b);
750  q = _B2D(b);
751  q += 8;
752  for(i=8; i<n; i++)
753  if(*q++ != 0xDA)
754  panicblock(p, b, "dangling pointer write");
755  break;
756  case ARENA_MAGIC:
757  b = A2TB((Arena*)b);
758  if(b->magic != ARENATAIL_MAGIC)
759  panicblock(p, b, "bad arena size");
760  /* fall through */
761  case ARENATAIL_MAGIC:
762  if(b->size != 0)
763  panicblock(p, b, "bad arena tail size");
764  break;
765  case ALLOC_MAGIC:
766  a = (Alloc*)b;
767  t = B2T(b);
768  dsize = getdsize(a);
769  bq = (uchar*)_B2D(a)+dsize;
770  eq = (uchar*)t;
771 
772  if(t->magic0 != TAIL_MAGIC0){
773  /* if someone wrote exactly one byte over and it was a NUL, we sometimes only complain. */
774  if((p->flags & POOL_TOLERANCE) && bq == eq && t->magic0 == 0)
775  printblock(p, b, "mem user overflow (magic0)");
776  else
777  panicblock(p, b, "corrupt tail magic0");
778  }
779 
780  if(t->magic1 != TAIL_MAGIC1)
781  panicblock(p, b, "corrupt tail magic1");
782  if(T2HDR(t) != b)
783  panicblock(p, b, "corrupt tail ptr");
784 
785  if(dsize2bsize(p, dsize) > a->size)
786  panicblock(p, b, "too much block data");
787 
788  if(eq > bq+4)
789  eq = bq+4;
790  for(q=bq; q<eq; q++){
791  if(*q != datamagic[((uintptr)q)%nelem(datamagic)]){
792  if(q == bq && *q == 0 && (p->flags & POOL_TOLERANCE)){
793  printblock(p, b, "mem user overflow");
794  continue;
795  }
796  panicblock(p, b, "mem user overflow");
797  }
798  }
799  break;
800  }
801 }
802 
803 /*
804  * compact an arena by shifting all the free blocks to the end.
805  * assumes pool lock is held.
806  */
807 enum {
808  FLOATING_MAGIC = 0xCBCBCBCB, /* temporarily neither allocated nor in the free tree */
809 };
810 
811 static int
812 arenacompact(Pool *p, Arena *a)
813 {
814  Bhdr *b, *wb, *eb, *nxt;
815  int compacted;
816 
817  if(p->move == nil)
818  p->panic(p, "don't call me when pool->move is nil\n");
819 
820  poolcheckarena(p, a);
821  eb = A2TB(a);
822  compacted = 0;
823  for(b=wb=A2B(a); b && b < eb; b=nxt) {
824  nxt = B2NB(b);
825  switch(b->magic) {
826  case FREE_MAGIC:
827  pooldel(p, (Free*)b);
828  b->magic = FLOATING_MAGIC;
829  break;
830  case ALLOC_MAGIC:
831  if(wb != b) {
832  memmove(wb, b, b->size);
833  p->move(_B2D(b), _B2D(wb));
834  compacted = 1;
835  }
836  wb = B2NB(wb);
837  break;
838  }
839  }
840 
841  /*
842  * the only free data is now at the end of the arena, pointed
843  * at by wb. all we need to do is set its size and get out.
844  */
845  if(wb < eb) {
846  wb->magic = UNALLOC_MAGIC;
847  blocksetsize(wb, (uchar*)eb-(uchar*)wb);
848  pooladd(p, (Alloc*)wb);
849  }
850 
851  return compacted;
852 }
853 
854 /*
855  * compact a pool by compacting each individual arena.
856  * 'twould be nice to shift blocks from one arena to the
857  * next but it's a pain to code.
858  */
859 static int
860 poolcompactl(Pool *pool)
861 {
862  Arena *a;
863  int compacted;
864 
865  if(pool->move == nil || pool->lastcompact == pool->nfree)
866  return 0;
867 
868  pool->lastcompact = pool->nfree;
869  compacted = 0;
870  for(a=pool->arenalist; a; a=a->down)
871  compacted |= arenacompact(pool, a);
872  return compacted;
873 }
874 
875 /*
876 static int
877 poolcompactl(Pool*)
878 {
879  return 0;
880 }
881 */
882 
883 /*
884  * Actual allocators
885  */
886 
887 /*
888 static void*
889 _B2D(void *a)
890 {
891  return (uchar*)a+sizeof(Bhdr);
892 }
893 */
894 
895 static void*
896 B2D(Pool *p, Alloc *a)
897 {
898  if(a->magic != ALLOC_MAGIC)
899  p->panic(p, "B2D called on unworthy block");
900  return _B2D(a);
901 }
902 
903 /*
904 static void*
905 _D2B(void *v)
906 {
907  Alloc *a;
908  a = (Alloc*)((uchar*)v-sizeof(Bhdr));
909  return a;
910 }
911 */
912 
913 static Alloc*
914 D2B(Pool *p, void *v)
915 {
916  Alloc *a;
917  ulong *u;
918 
919  if((uintptr)v&(sizeof(ulong)-1))
920  v = (char*)v - ((uintptr)v&(sizeof(ulong)-1));
921  u = v;
922  while(u[-1] == ALIGN_MAGIC)
923  u--;
924  a = _D2B(u);
925  if(a->magic != ALLOC_MAGIC)
926  p->panic(p, "D2B called on non-block %p (double-free?)", v);
927  return a;
928 }
929 
930 /* poolallocl: attempt to allocate block to hold dsize user bytes; assumes lock held */
931 static void*
932 poolallocl(Pool *p, ulong dsize)
933 {
934  ulong bsize;
935  Free *fb;
936  Alloc *ab;
937 
938  if(dsize >= 0x80000000UL){ /* for sanity, overflow */
939  werrstr("invalid allocation size");
940  return nil;
941  }
942 
943  bsize = dsize2bsize(p, dsize);
944 
945  fb = treelookupgt(p->freeroot, bsize);
946  if(fb == nil) {
947  poolnewarena(p, bsize2asize(p, bsize));
948  if((fb = treelookupgt(p->freeroot, bsize)) == nil) {
949  /* assume poolnewarena failed and set %r */
950  return nil;
951  }
952  }
953 
954  ab = trim(p, pooldel(p, fb), dsize);
955  p->curalloc += ab->size;
956  antagonism {
957  memset(B2D(p, ab), 0xDF, dsize);
958  }
959  return B2D(p, ab);
960 }
961 
962 /* poolreallocl: attempt to grow v to ndsize bytes; assumes lock held */
963 static void*
964 poolreallocl(Pool *p, void *v, ulong ndsize)
965 {
966  Alloc *a;
967  Bhdr *left, *right, *newb;
968  Btail *t;
969  ulong nbsize;
970  ulong odsize;
971  ulong obsize;
972  void *nv;
973 
974  if(v == nil) /* for ANSI */
975  return poolallocl(p, ndsize);
976  if(ndsize == 0) {
977  poolfreel(p, v);
978  return nil;
979  }
980  a = D2B(p, v);
981  blockcheck(p, a);
982  odsize = getdsize(a);
983  obsize = a->size;
984 
985  /* can reuse the same block? */
986  nbsize = dsize2bsize(p, ndsize);
987  if(nbsize <= a->size) {
988  Returnblock:
989  if(v != _B2D(a))
990  memmove(_B2D(a), v, odsize);
991  a = trim(p, a, ndsize);
992  p->curalloc -= obsize;
993  p->curalloc += a->size;
994  v = B2D(p, a);
995  return v;
996  }
997 
998  /* can merge with surrounding blocks? */
999  right = B2NB(a);
1000  if(right->magic == FREE_MAGIC && a->size+right->size >= nbsize) {
1001  a = blockmerge(p, a, right);
1002  goto Returnblock;
1003  }
1004 
1005  t = B2PT(a);
1006  left = T2HDR(t);
1007  if(left->magic == FREE_MAGIC && left->size+a->size >= nbsize) {
1008  a = blockmerge(p, left, a);
1009  goto Returnblock;
1010  }
1011 
1012  if(left->magic == FREE_MAGIC && right->magic == FREE_MAGIC
1013  && left->size+a->size+right->size >= nbsize) {
1014  a = blockmerge(p, blockmerge(p, left, a), right);
1015  goto Returnblock;
1016  }
1017 
1018  if((nv = poolallocl(p, ndsize)) == nil)
1019  return nil;
1020 
1021  /* maybe the new block is next to us; if so, merge */
1022  left = T2HDR(B2PT(a));
1023  right = B2NB(a);
1024  newb = D2B(p, nv);
1025  if(left == newb || right == newb) {
1026  if(left == newb || left->magic == FREE_MAGIC)
1027  a = blockmerge(p, left, a);
1028  if(right == newb || right->magic == FREE_MAGIC)
1029  a = blockmerge(p, a, right);
1030  assert(a->size >= nbsize);
1031  goto Returnblock;
1032  }
1033 
1034  /* enough cleverness */
1035  memmove(nv, v, odsize);
1036  antagonism {
1037  memset((char*)nv+odsize, 0xDE, ndsize-odsize);
1038  }
1039  poolfreel(p, v);
1040  return nv;
1041 }
1042 
1043 static void*
1044 alignptr(void *v, ulong align, long offset)
1045 {
1046  char *c;
1047  ulong off;
1048 
1049  c = v;
1050  if(align){
1051  off = ((ulong)(uintptr)c) % align;
1052  if(off != offset){
1053  offset -= off;
1054  if(offset < 0)
1055  offset += align;
1056  c += offset;
1057  }
1058  }
1059  return c;
1060 }
1061 
1062 /* poolspanallocl: allocate as described below; assumes pool locked */
1063 static void*
1064 poolallocalignl(Pool *p, ulong dsize, ulong align, long offset, ulong span)
1065 {
1066  ulong asize;
1067  void *v;
1068  char *c;
1069  ulong *u;
1070  int skip;
1071  Alloc *b;
1072 
1073  /*
1074  * allocate block
1075  * dsize bytes
1076  * addr == offset (modulo align)
1077  * does not cross span-byte block boundary
1078  *
1079  * to satisfy alignment, just allocate an extra
1080  * align bytes and then shift appropriately.
1081  *
1082  * to satisfy span, try once and see if we're
1083  * lucky. the second time, allocate 2x asize
1084  * so that we definitely get one not crossing
1085  * the boundary.
1086  */
1087  if(align){
1088  if(offset < 0)
1089  offset = align - ((-offset)%align);
1090  offset %= align;
1091  }
1092  asize = dsize+align;
1093  v = poolallocl(p, asize);
1094  if(v == nil)
1095  return nil;
1096  if(span && (uintptr)v/span != ((uintptr)v+asize)/span){
1097  /* try again */
1098  poolfreel(p, v);
1099  v = poolallocl(p, 2*asize);
1100  if(v == nil)
1101  return nil;
1102  }
1103 
1104  /*
1105  * figure out what pointer we want to return
1106  */
1107  c = alignptr(v, align, offset);
1108  if(span && (uintptr)c/span != (uintptr)(c+dsize-1)/span){
1109  c += span - (uintptr)c%span;
1110  c = alignptr(c, align, offset);
1111  if((uintptr)c/span != (uintptr)(c+dsize-1)/span){
1112  poolfreel(p, v);
1113  werrstr("cannot satisfy dsize %lud span %lud with align %lud+%ld", dsize, span, align, offset);
1114  return nil;
1115  }
1116  }
1117  skip = c - (char*)v;
1118 
1119  /*
1120  * free up the skip bytes before that pointer
1121  * or mark it as unavailable.
1122  */
1123  b = _D2B(v);
1124  p->curalloc -= b->size;
1125  b = freefromfront(p, b, skip);
1126  v = _B2D(b);
1127  skip = c - (char*)v;
1128  if(c > (char*)v){
1129  u = v;
1130  while(c >= (char*)u+sizeof(ulong))
1131  *u++ = ALIGN_MAGIC;
1132  }
1133  trim(p, b, skip+dsize);
1134  p->curalloc += b->size;
1135  assert(D2B(p, c) == b);
1136  antagonism {
1137  memset(c, 0xDD, dsize);
1138  }
1139  return c;
1140 }
1141 
1142 /* poolfree: free block obtained from poolalloc; assumes lock held */
1143 static void
1144 poolfreel(Pool *p, void *v)
1145 {
1146  Alloc *ab;
1147  Bhdr *back, *fwd;
1148 
1149  if(v == nil) /* for ANSI */
1150  return;
1151 
1152  ab = D2B(p, v);
1153  blockcheck(p, ab);
1154 
1155  if(p->flags&POOL_NOREUSE){
1156  int n;
1157 
1158  ab->magic = DEAD_MAGIC;
1159  n = getdsize(ab)-8;
1160  if(n > 0)
1161  memset((uchar*)v+8, 0xDA, n);
1162  return;
1163  }
1164 
1165  p->nfree++;
1166  p->curalloc -= ab->size;
1167  back = T2HDR(B2PT(ab));
1168  if(back->magic == FREE_MAGIC)
1169  ab = blockmerge(p, back, ab);
1170 
1171  fwd = B2NB(ab);
1172  if(fwd->magic == FREE_MAGIC)
1173  ab = blockmerge(p, ab, fwd);
1174 
1175  pooladd(p, ab);
1176 }
1177 
1178 void*
1179 poolalloc(Pool *p, ulong n)
1180 {
1181  void *v;
1182 
1183  p->lock(p);
1184  paranoia {
1185  poolcheckl(p);
1186  }
1187  verbosity {
1188  pooldumpl(p);
1189  }
1190  v = poolallocl(p, n);
1191  paranoia {
1192  poolcheckl(p);
1193  }
1194  verbosity {
1195  pooldumpl(p);
1196  }
1197  if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1198  LOG(p, "poolalloc %p %lud = %p\n", p, n, v);
1199  p->unlock(p);
1200  return v;
1201 }
1202 
1203 void*
1204 poolallocalign(Pool *p, ulong n, ulong align, long offset, ulong span)
1205 {
1206  void *v;
1207 
1208  p->lock(p);
1209  paranoia {
1210  poolcheckl(p);
1211  }
1212  verbosity {
1213  pooldumpl(p);
1214  }
1215  v = poolallocalignl(p, n, align, offset, span);
1216  paranoia {
1217  poolcheckl(p);
1218  }
1219  verbosity {
1220  pooldumpl(p);
1221  }
1222  if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1223  LOG(p, "poolallocalign %p %lud %lud %ld %lud = %p\n", p, n, align, offset, span, v);
1224  p->unlock(p);
1225  return v;
1226 }
1227 
1228 int
1229 poolcompact(Pool *p)
1230 {
1231  int rv;
1232 
1233  p->lock(p);
1234  paranoia {
1235  poolcheckl(p);
1236  }
1237  verbosity {
1238  pooldumpl(p);
1239  }
1240  rv = poolcompactl(p);
1241  paranoia {
1242  poolcheckl(p);
1243  }
1244  verbosity {
1245  pooldumpl(p);
1246  }
1247  LOG(p, "poolcompact %p\n", p);
1248  p->unlock(p);
1249  return rv;
1250 }
1251 
1252 void*
1253 poolrealloc(Pool *p, void *v, ulong n)
1254 {
1255  void *nv;
1256 
1257  p->lock(p);
1258  paranoia {
1259  poolcheckl(p);
1260  }
1261  verbosity {
1262  pooldumpl(p);
1263  }
1264  nv = poolreallocl(p, v, n);
1265  paranoia {
1266  poolcheckl(p);
1267  }
1268  verbosity {
1269  pooldumpl(p);
1270  }
1271  if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1272  LOG(p, "poolrealloc %p %p %ld = %p\n", p, v, n, nv);
1273  p->unlock(p);
1274  return nv;
1275 }
1276 
1277 void
1278 poolfree(Pool *p, void *v)
1279 {
1280  p->lock(p);
1281  paranoia {
1282  poolcheckl(p);
1283  }
1284  verbosity {
1285  pooldumpl(p);
1286  }
1287  poolfreel(p, v);
1288  paranoia {
1289  poolcheckl(p);
1290  }
1291  verbosity {
1292  pooldumpl(p);
1293  }
1294  if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1295  LOG(p, "poolfree %p %p\n", p, v);
1296  p->unlock(p);
1297 }
1298 
1299 /*
1300  * Return the real size of a block, and let the user use it.
1301  */
1302 ulong
1303 poolmsize(Pool *p, void *v)
1304 {
1305  Alloc *b;
1306  ulong dsize;
1307 
1308  p->lock(p);
1309  paranoia {
1310  poolcheckl(p);
1311  }
1312  verbosity {
1313  pooldumpl(p);
1314  }
1315  if(v == nil) /* consistency with other braindead ANSI-ness */
1316  dsize = 0;
1317  else {
1318  b = D2B(p, v);
1319  dsize = (b->size&~(p->quantum-1)) - sizeof(Bhdr) - sizeof(Btail);
1320  assert(dsize >= getdsize(b));
1321  blocksetdsize(p, b, dsize);
1322  }
1323  paranoia {
1324  poolcheckl(p);
1325  }
1326  verbosity {
1327  pooldumpl(p);
1328  }
1329  if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
1330  LOG(p, "poolmsize %p %p = %ld\n", p, v, dsize);
1331  p->unlock(p);
1332  return dsize;
1333 }
1334 
1335 /*
1336  * Debugging
1337  */
1338 
1339 static void
1340 poolcheckarena(Pool *p, Arena *a)
1341 {
1342  Bhdr *b;
1343  Bhdr *atail;
1344 
1345  atail = A2TB(a);
1346  for(b=a; b->magic != ARENATAIL_MAGIC && b<atail; b=B2NB(b))
1347  blockcheck(p, b);
1348  blockcheck(p, b);
1349  if(b != atail)
1350  p->panic(p, "found wrong tail");
1351 }
1352 
1353 static void
1354 poolcheckl(Pool *p)
1355 {
1356  Arena *a;
1357 
1358  for(a=p->arenalist; a; a=a->down)
1359  poolcheckarena(p, a);
1360  if(p->freeroot)
1361  checktree(p->freeroot, 0, 1<<30);
1362 }
1363 
1364 void
1365 poolcheck(Pool *p)
1366 {
1367  p->lock(p);
1368  poolcheckl(p);
1369  p->unlock(p);
1370 }
1371 
1372 void
1373 poolblockcheck(Pool *p, void *v)
1374 {
1375  if(v == nil)
1376  return;
1377 
1378  p->lock(p);
1379  blockcheck(p, D2B(p, v));
1380  p->unlock(p);
1381 }
1382 
1383 static void
1384 pooldumpl(Pool *p)
1385 {
1386  Arena *a;
1387 
1388  p->print(p, "pool %p %s\n", p, p->name);
1389  for(a=p->arenalist; a; a=a->down)
1390  pooldumparena(p, a);
1391 }
1392 
1393 void
1394 pooldump(Pool *p)
1395 {
1396  p->lock(p);
1397  pooldumpl(p);
1398  p->unlock(p);
1399 }
1400 
1401 static void
1402 pooldumparena(Pool *p, Arena *a)
1403 {
1404  Bhdr *b;
1405 
1406  for(b=a; b->magic != ARENATAIL_MAGIC; b=B2NB(b))
1407  p->print(p, "(%p %.8lux %lud)", b, b->magic, b->size);
1408  p->print(p, "\n");
1409 }
1410 
1411 /*
1412  * mark the memory in such a way that we know who marked it
1413  * (via the signature) and we know where the marking started.
1414  */
1415 static void
1416 memmark(void *v, int sig, ulong size)
1417 {
1418  uchar *p, *ep;
1419  ulong *lp, *elp;
1420  lp = v;
1421  elp = lp+size/4;
1422  while(lp < elp)
1423  *lp++ = (sig<<24) ^ ((uintptr)lp-(uintptr)v);
1424  p = (uchar*)lp;
1425  ep = (uchar*)v+size;
1426  while(p<ep)
1427  *p++ = sig;
1428 }