-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathmpool.c
1768 lines (1598 loc) · 40.9 KB
/
mpool.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Memory pool routines.
*
* Copyright 1996 by Gray Watson.
*
* This file is part of the mpool package.
*
* Permission to use, copy, modify, and distribute this software for
* any purpose and without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies, and that the name of Gray Watson not be used in advertising
* or publicity pertaining to distribution of the document or software
* without specific, written prior permission.
*
* Gray Watson makes no representations about the suitability of the
* software described herein for any purpose. It is provided "as is"
* without express or implied warranty.
*
* The author may be reached via http://256.com/gray/
*
* $Id: mpool.c,v 1.5 2006/05/31 20:28:31 gray Exp $
*/
/*
* Memory-pool allocation routines. I got sick of the GNU mmalloc
* library which was close to what we needed but did not exactly do
* what I wanted.
*
* The following uses mmap from /dev/zero. It allows a number of
* allocations to be made inside of a memory pool then with a clear or
* close the pool can be reset without any memory fragmentation and
* growth problems.
*/
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#ifdef DMALLOC
#include "dmalloc.h"
#endif
#define MPOOL_MAIN
#include "mpool.h"
#include "mpool_loc.h"
#ifdef __GNUC__
#ident "$Id: mpool.c,v 1.5 2006/05/31 20:28:31 gray Exp $"
#else
static char *rcs_id = "$Id: mpool.c,v 1.5 2006/05/31 20:28:31 gray Exp $";
#endif
/* version */
//static char *version = "mpool library version 2.1.0";
/* local variables */
static int enabled_b = 0; /* lib initialized? */
static unsigned int min_bit_free_next = 0; /* min size of next pnt */
static unsigned int min_bit_free_size = 0; /* min size of next + size */
static unsigned long bit_array[MAX_BITS + 1]; /* size -> bit */
/****************************** local utilities ******************************/
/*
* static void startup
*
* DESCRIPTION:
*
* Perform any library level initialization.
*
* RETURNS:
*
* None.
*
* ARGUMENTS:
*
* None.
*/
static void startup(void)
{
int bit_c;
unsigned long size = 1;
if (enabled_b) {
return;
}
/* allocate our free bit array list */
for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) {
bit_array[bit_c] = size;
/*
* Note our minimum number of bits that can store a pointer. This
* is smallest address that we can have a linked list for.
*/
if (min_bit_free_next == 0 && size >= sizeof(void *)) {
min_bit_free_next = bit_c;
}
/*
* Note our minimum number of bits that can store a pointer and
* the size of the block.
*/
if (min_bit_free_size == 0 && size >= sizeof(mpool_free_t)) {
min_bit_free_size = bit_c;
}
size *= 2;
}
enabled_b = 1;
}
/*
* static int size_to_bits
*
* DESCRIPTION:
*
* Calculate the number of bits in a size.
*
* RETURNS:
*
* Number of bits.
*
* ARGUMENTS:
*
* size -> Size of memory of which to calculate the number of bits.
*/
static int size_to_bits(const unsigned long size)
{
int bit_c = 0;
for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) {
if (size <= bit_array[bit_c]) {
break;
}
}
return bit_c;
}
/*
* static int size_to_free_bits
*
* DESCRIPTION:
*
* Calculate the number of bits in a size going on the free list.
*
* RETURNS:
*
* Number of bits.
*
* ARGUMENTS:
*
* size -> Size of memory of which to calculate the number of bits.
*/
static int size_to_free_bits(const unsigned long size)
{
int bit_c = 0;
if (size == 0) {
return 0;
}
for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) {
if (size < bit_array[bit_c]) {
break;
}
}
return bit_c - 1;
}
/*
* static int bits_to_size
*
* DESCRIPTION:
*
* Calculate the size represented by a number of bits.
*
* RETURNS:
*
* Number of bits.
*
* ARGUMENTS:
*
* bit_n -> Number of bits
*/
static unsigned long bits_to_size(const int bit_n)
{
if (bit_n > MAX_BITS) {
return bit_array[MAX_BITS];
}
else {
return bit_array[bit_n];
}
}
/*
* static void *alloc_pages
*
* DESCRIPTION:
*
* Allocate space for a number of memory pages in the memory pool.
*
* RETURNS:
*
* Success - New pages of memory
*
* Failure - NULL
*
* ARGUMENTS:
*
* mp_p <-> Pointer to our memory pool.
*
* page_n -> Number of pages to alloc.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a mpool error code.
*/
static void *alloc_pages(mpool_t *mp_p, const unsigned int page_n,
int *error_p)
{
void *mem, *fill_mem;
unsigned long size, fill;
int state;
/* are we over our max-pages? */
if (mp_p->mp_max_pages > 0 && mp_p->mp_page_c >= mp_p->mp_max_pages) {
SET_POINTER(error_p, MPOOL_ERROR_NO_PAGES);
return NULL;
}
size = SIZE_OF_PAGES(mp_p, page_n);
#ifdef DEBUG
(void)printf("allocating %u pages or %lu bytes\n", page_n, size);
#endif
if (BIT_IS_SET(mp_p->mp_flags, MPOOL_FLAG_USE_SBRK)) {
mem = sbrk(size);
if (mem == (void *)-1) {
SET_POINTER(error_p, MPOOL_ERROR_NO_MEM);
return NULL;
}
fill = (unsigned long)mem % mp_p->mp_page_size;
if (fill > 0) {
fill = mp_p->mp_page_size - fill;
fill_mem = sbrk(fill);
if (fill_mem == (void *)-1) {
SET_POINTER(error_p, MPOOL_ERROR_NO_MEM);
return NULL;
}
if ((char *)fill_mem != (char *)mem + size) {
SET_POINTER(error_p, MPOOL_ERROR_SBRK_CONTIG);
return NULL;
}
mem = (char *)mem + fill;
}
}
else {
state = MAP_PRIVATE;
#ifdef MAP_FILE
state |= MAP_FILE;
#endif
#ifdef MAP_VARIABLE
state |= MAP_VARIABLE;
#endif
/* mmap from /dev/zero */
mem = mmap((caddr_t)mp_p->mp_addr, size, PROT_READ | PROT_WRITE, state,
mp_p->mp_fd, mp_p->mp_top);
if (mem == (void *)MAP_FAILED) {
if (errno == ENOMEM) {
SET_POINTER(error_p, MPOOL_ERROR_NO_MEM);
}
else {
SET_POINTER(error_p, MPOOL_ERROR_MMAP);
}
return NULL;
}
mp_p->mp_top += size;
if (mp_p->mp_addr != NULL) {
mp_p->mp_addr = (char *)mp_p->mp_addr + size;
}
}
mp_p->mp_page_c += page_n;
SET_POINTER(error_p, MPOOL_ERROR_NONE);
return mem;
}
/*
* static int free_pages
*
* DESCRIPTION:
*
* Free previously allocated pages of memory.
*
* RETURNS:
*
* Success - MPOOL_ERROR_NONE
*
* Failure - Mpool error code
*
* ARGUMENTS:
*
* pages <-> Pointer to memory pages that we are freeing.
*
* size -> Size of the block that we are freeing.
*
* sbrk_b -> Set to one if the pages were allocated with sbrk else mmap.
*/
static int free_pages(void *pages, const unsigned long size,
const int sbrk_b)
{
if (! sbrk_b) {
(void)munmap((caddr_t)pages, size);
}
return MPOOL_ERROR_NONE;
}
/*
* static int check_magic
*
* DESCRIPTION:
*
* Check for the existance of the magic ID in a memory pointer.
*
* RETURNS:
*
* Success - MPOOL_ERROR_NONE
*
* Failure - Mpool error code
*
* ARGUMENTS:
*
* addr -> Address inside of the block that we are tryign to locate.
*
* size -> Size of the block.
*/
static int check_magic(const void *addr, const unsigned long size)
{
const unsigned char *mem_p;
/* set our starting point */
mem_p = (unsigned char *)addr + size;
if (*mem_p == FENCE_MAGIC0 && *(mem_p + 1) == FENCE_MAGIC1) {
return MPOOL_ERROR_NONE;
}
else {
return MPOOL_ERROR_PNT_OVER;
}
}
/*
* static void write_magic
*
* DESCRIPTION:
*
* Write the magic ID to the address.
*
* RETURNS:
*
* None.
*
* ARGUMENTS:
*
* addr -> Address where to write the magic.
*/
static void write_magic(const void *addr)
{
*(unsigned char *)addr = FENCE_MAGIC0;
*((unsigned char *)addr + 1) = FENCE_MAGIC1;
}
/*
* static void free_pointer
*
* DESCRIPTION:
*
* Moved a pointer into our free lists.
*
* RETURNS:
*
* Success - MPOOL_ERROR_NONE
*
* Failure - Mpool error code
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* addr <-> Address where to write the magic. We may write a next
* pointer to it.
*
* size -> Size of the address space.
*/
static int free_pointer(mpool_t *mp_p, void *addr,
const unsigned long size)
{
unsigned int bit_n;
unsigned long real_size;
mpool_free_t free_pnt;
#ifdef DEBUG
(void)printf("freeing a block at %lx of %lu bytes\n", (long)addr, size);
#endif
if (size == 0) {
return MPOOL_ERROR_NONE;
}
/*
* if the user size is larger then can fit in an entire block then
* we change the size
*/
if (size > MAX_BLOCK_USER_MEMORY(mp_p)) {
real_size = SIZE_OF_PAGES(mp_p, PAGES_IN_SIZE(mp_p, size)) -
sizeof(mpool_block_t);
}
else {
real_size = size;
}
/*
* We use a specific free bits calculation here because if we are
* freeing 10 bytes then we will be putting it into the 8-byte free
* list and not the 16 byte list. size_to_bits(10) will return 4
* instead of 3.
*/
bit_n = size_to_free_bits(real_size);
/*
* Minimal error checking. We could go all the way through the
* list however this might be prohibitive.
*/
if (mp_p->mp_free[bit_n] == addr) {
return MPOOL_ERROR_IS_FREE;
}
/* add the freed pointer to the free list */
if (bit_n < min_bit_free_next) {
/*
* Yes we know this will lose 99% of the allocations but what else
* can we do? No space for a next pointer.
*/
if (mp_p->mp_free[bit_n] == NULL) {
mp_p->mp_free[bit_n] = addr;
}
}
else if (bit_n < min_bit_free_size) {
/* we copy, not assign, to maintain the free list */
memcpy(addr, mp_p->mp_free + bit_n, sizeof(void *));
mp_p->mp_free[bit_n] = addr;
}
else {
/* setup our free list structure */
free_pnt.mf_next_p = mp_p->mp_free[bit_n];
free_pnt.mf_size = real_size;
/* we copy the structure in since we don't know about alignment */
memcpy(addr, &free_pnt, sizeof(free_pnt));
mp_p->mp_free[bit_n] = addr;
}
return MPOOL_ERROR_NONE;
}
/*
* static int split_block
*
* DESCRIPTION:
*
* When freeing space in a multi-block chunk we have to create new
* blocks out of the upper areas being freed.
*
* RETURNS:
*
* Success - MPOOL_ERROR_NONE
*
* Failure - Mpool error code
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* free_addr -> Address that we are freeing.
*
* size -> Size of the space that we are taking from address.
*/
static int split_block(mpool_t *mp_p, void *free_addr,
const unsigned long size)
{
mpool_block_t *block_p, *new_block_p;
int ret, page_n;
void *end_p;
/*
* 1st we find the block pointer from our free addr. At this point
* the pointer must be the 1st one in the block if it is spans
* multiple blocks.
*/
block_p = (mpool_block_t *)((char *)free_addr - sizeof(mpool_block_t));
if (block_p->mb_magic != BLOCK_MAGIC
|| block_p->mb_magic2 != BLOCK_MAGIC) {
return MPOOL_ERROR_POOL_OVER;
}
page_n = PAGES_IN_SIZE(mp_p, size);
/* we are creating a new block structure for the 2nd ... */
new_block_p = (mpool_block_t *)((char *)block_p +
SIZE_OF_PAGES(mp_p, page_n));
new_block_p->mb_magic = BLOCK_MAGIC;
/* New bounds is 1st block bounds. The 1st block's is reset below. */
new_block_p->mb_bounds_p = block_p->mb_bounds_p;
/* Continue the linked list. The 1st block will point to us below. */
new_block_p->mb_next_p = block_p->mb_next_p;
new_block_p->mb_magic2 = BLOCK_MAGIC;
/* bounds for the 1st block are reset to the 1st page only */
block_p->mb_bounds_p = (char *)new_block_p;
/* the next block pointer for the 1st block is now the new one */
block_p->mb_next_p = new_block_p;
/* only free the space in the 1st block if it is only 1 block in size */
if (page_n == 1) {
/* now free the rest of the 1st block block */
end_p = (char *)free_addr + size;
ret = free_pointer(mp_p, end_p,
(char *)block_p->mb_bounds_p - (char *)end_p);
if (ret != MPOOL_ERROR_NONE) {
return ret;
}
}
/* now free the rest of the block */
ret = free_pointer(mp_p, FIRST_ADDR_IN_BLOCK(new_block_p),
MEMORY_IN_BLOCK(new_block_p));
if (ret != MPOOL_ERROR_NONE) {
return ret;
}
return MPOOL_ERROR_NONE;
}
/*
* static void *get_space
*
* DESCRIPTION:
*
* Moved a pointer into our free lists.
*
* RETURNS:
*
* Success - New address that we can use.
*
* Failure - NULL
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* byte_size -> Size of the address space that we need.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a mpool error code.
*/
static void *get_space(mpool_t *mp_p, const unsigned long byte_size,
int *error_p)
{
mpool_block_t *block_p;
mpool_free_t free_pnt;
int ret;
unsigned long size;
unsigned int bit_c, page_n, left;
void *free_addr = NULL, *free_end;
size = byte_size;
while ((size & (sizeof(void *) - 1)) > 0) {
size++;
}
/*
* First we check the free lists looking for something with enough
* pages. Maybe we should only look X bits higher in the list.
*
* XXX: this is where we'd do the best fit. We'd look for the
* closest match. We then could put the rest of the allocation that
* we did not use in a lower free list. Have a define which states
* how deep in the free list to go to find the closest match.
*/
for (bit_c = size_to_bits(size); bit_c <= MAX_BITS; bit_c++) {
if (mp_p->mp_free[bit_c] != NULL) {
free_addr = mp_p->mp_free[bit_c];
break;
}
}
/*
* If we haven't allocated any blocks or if the last block doesn't
* have enough memory then we need a new block.
*/
if (bit_c > MAX_BITS) {
/* we need to allocate more space */
page_n = PAGES_IN_SIZE(mp_p, size);
/* now we try and get the pages we need/want */
block_p = alloc_pages(mp_p, page_n, error_p);
if (block_p == NULL) {
/* error_p set in alloc_pages */
return NULL;
}
/* init the block header */
block_p->mb_magic = BLOCK_MAGIC;
block_p->mb_bounds_p = (char *)block_p + SIZE_OF_PAGES(mp_p, page_n);
block_p->mb_next_p = mp_p->mp_first_p;
block_p->mb_magic2 = BLOCK_MAGIC;
/*
* We insert it into the front of the queue. We could add it to
* the end but there is not much use.
*/
mp_p->mp_first_p = block_p;
if (mp_p->mp_last_p == NULL) {
mp_p->mp_last_p = block_p;
}
free_addr = FIRST_ADDR_IN_BLOCK(block_p);
#ifdef DEBUG
(void)printf("had to allocate space for %lx of %lu bytes\n",
(long)free_addr, size);
#endif
free_end = (char *)free_addr + size;
left = (char *)block_p->mb_bounds_p - (char *)free_end;
}
else {
if (bit_c < min_bit_free_next) {
mp_p->mp_free[bit_c] = NULL;
/* calculate the number of left over bytes */
left = bits_to_size(bit_c) - size;
}
else if (bit_c < min_bit_free_next) {
/* grab the next pointer from the freed address into our list */
memcpy(mp_p->mp_free + bit_c, free_addr, sizeof(void *));
/* calculate the number of left over bytes */
left = bits_to_size(bit_c) - size;
}
else {
/* grab the free structure from the address */
memcpy(&free_pnt, free_addr, sizeof(free_pnt));
mp_p->mp_free[bit_c] = free_pnt.mf_next_p;
/* are we are splitting up a multiblock chunk into fewer blocks? */
if (PAGES_IN_SIZE(mp_p, free_pnt.mf_size) > PAGES_IN_SIZE(mp_p, size)) {
ret = split_block(mp_p, free_addr, size);
if (ret != MPOOL_ERROR_NONE) {
SET_POINTER(error_p, ret);
return NULL;
}
/* left over memory was taken care of in split_block */
left = 0;
}
else {
/* calculate the number of left over bytes */
left = free_pnt.mf_size - size;
}
}
#ifdef DEBUG
(void)printf("found a free block at %lx of %lu bytes\n",
(long)free_addr, left + size);
#endif
free_end = (char *)free_addr + size;
}
/*
* If we have memory left over then we free it so someone else can
* use it. We do not free the space if we just allocated a
* multi-block chunk because we need to have every allocation easily
* find the start of the block. Every user address % page-size
* should take us to the start of the block.
*/
if (left > 0 && size <= MAX_BLOCK_USER_MEMORY(mp_p)) {
/* free the rest of the block */
ret = free_pointer(mp_p, free_end, left);
if (ret != MPOOL_ERROR_NONE) {
SET_POINTER(error_p, ret);
return NULL;
}
}
/* update our bounds */
if (free_addr > mp_p->mp_bounds_p) {
mp_p->mp_bounds_p = free_addr;
}
else if (free_addr < mp_p->mp_min_p) {
mp_p->mp_min_p = free_addr;
}
return free_addr;
}
/*
* static void *alloc_mem
*
* DESCRIPTION:
*
* Allocate space for bytes inside of an already open memory pool.
*
* RETURNS:
*
* Success - Pointer to the address to use.
*
* Failure - NULL
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* normal malloc.
*
* byte_size -> Number of bytes to allocate in the pool. Must be >0.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a mpool error code.
*/
static void *alloc_mem(mpool_t *mp_p, const unsigned long byte_size,
int *error_p)
{
unsigned long size, fence;
void *addr;
/* make sure we have enough bytes */
if (byte_size < MIN_ALLOCATION) {
size = MIN_ALLOCATION;
}
else {
size = byte_size;
}
if (BIT_IS_SET(mp_p->mp_flags, MPOOL_FLAG_NO_FREE)) {
fence = 0;
}
else {
fence = FENCE_SIZE;
}
/* get our free space + the space for the fence post */
addr = get_space(mp_p, size + fence, error_p);
if (addr == NULL) {
/* error_p set in get_space */
return NULL;
}
if (! BIT_IS_SET(mp_p->mp_flags, MPOOL_FLAG_NO_FREE)) {
write_magic((char *)addr + size);
}
/* maintain our stats */
mp_p->mp_alloc_c++;
mp_p->mp_user_alloc += size;
if (mp_p->mp_user_alloc > mp_p->mp_max_alloc) {
mp_p->mp_max_alloc = mp_p->mp_user_alloc;
}
SET_POINTER(error_p, MPOOL_ERROR_NONE);
return addr;
}
/*
* static int free_mem
*
* DESCRIPTION:
*
* Free an address from a memory pool.
*
* RETURNS:
*
* Success - MPOOL_ERROR_NONE
*
* Failure - Mpool error code
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* normal free.
*
* addr <-> Address to free.
*
* size -> Size of the address being freed.
*/
static int free_mem(mpool_t *mp_p, void *addr, const unsigned long size)
{
unsigned long old_size, fence;
int ret;
mpool_block_t *block_p;
/*
* If the size is larger than a block then the allocation must be at
* the front of the block.
*/
if (size > MAX_BLOCK_USER_MEMORY(mp_p)) {
block_p = (mpool_block_t *)((char *)addr - sizeof(mpool_block_t));
if (block_p->mb_magic != BLOCK_MAGIC
|| block_p->mb_magic2 != BLOCK_MAGIC) {
return MPOOL_ERROR_POOL_OVER;
}
}
/* make sure we have enough bytes */
if (size < MIN_ALLOCATION) {
old_size = MIN_ALLOCATION;
}
else {
old_size = size;
}
/* if we are packing the pool smaller */
if (BIT_IS_SET(mp_p->mp_flags, MPOOL_FLAG_NO_FREE)) {
fence = 0;
}
else {
/* find the user's magic numbers if they were written */
ret = check_magic(addr, old_size);
if (ret != MPOOL_ERROR_NONE) {
return ret;
}
fence = FENCE_SIZE;
}
/* now we free the pointer */
ret = free_pointer(mp_p, addr, old_size + fence);
if (ret != MPOOL_ERROR_NONE) {
return ret;
}
mp_p->mp_user_alloc -= old_size;
/* adjust our stats */
mp_p->mp_alloc_c--;
return MPOOL_ERROR_NONE;
}
/***************************** exported routines *****************************/
/*
* mpool_t *mpool_open
*
* DESCRIPTION:
*
* Open/allocate a new memory pool.
*
* RETURNS:
*
* Success - Pool pointer which must be passed to mpool_close to
* deallocate.
*
* Failure - NULL
*
* ARGUMENTS:
*
* flags -> Flags to set attributes of the memory pool. See the top
* of mpool.h.
*
* page_size -> Set the internal memory page-size. This must be a
* multiple of the getpagesize() value. Set to 0 for the default.
*
* start_addr -> Starting address to try and allocate memory pools.
* This is ignored if the MPOOL_FLAG_USE_SBRK is enabled.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a mpool error code.
*/
mpool_t *mpool_open(const unsigned int flags, const unsigned int page_size,
void *start_addr, int *error_p)
{
mpool_block_t *block_p;
int page_n, ret;
mpool_t mp, *mp_p;
void *free_addr;
if (! enabled_b) {
startup();
}
/* zero our temp struct */
memset(&mp, 0, sizeof(mp));
mp.mp_magic = MPOOL_MAGIC;
mp.mp_flags = flags;
mp.mp_alloc_c = 0;
mp.mp_user_alloc = 0;
mp.mp_max_alloc = 0;
mp.mp_page_c = 0;
/* mp.mp_page_size set below */
/* mp.mp_blocks_bit_n set below */
/* mp.mp_fd set below */
/* mp.mp_top set below */
/* mp.mp_addr set below */
mp.mp_log_func = NULL;
mp.mp_min_p = NULL;
mp.mp_bounds_p = NULL;
mp.mp_first_p = NULL;
mp.mp_last_p = NULL;
mp.mp_magic2 = MPOOL_MAGIC;
/* get and sanity check our page size */
if (page_size > 0) {
mp.mp_page_size = page_size;
if (mp.mp_page_size % getpagesize() != 0) {
SET_POINTER(error_p, MPOOL_ERROR_ARG_INVALID);
return NULL;
}
}
else {
mp.mp_page_size = getpagesize() * DEFAULT_PAGE_MULT;
if (mp.mp_page_size % 1024 != 0) {
SET_POINTER(error_p, MPOOL_ERROR_PAGE_SIZE);
return NULL;
}
}
if (BIT_IS_SET(flags, MPOOL_FLAG_USE_SBRK)) {
mp.mp_fd = -1;
mp.mp_addr = NULL;
mp.mp_top = 0;
}
else {
/* open dev-zero for our mmaping */
mp.mp_fd = open("/dev/zero", O_RDWR, 0);
if (mp.mp_fd < 0) {
SET_POINTER(error_p, MPOOL_ERROR_OPEN_ZERO);
return NULL;
}
mp.mp_addr = start_addr;
/* we start at the front of the file */
mp.mp_top = 0;
}
/*
* Find out how many pages we need for our mpool structure.
*
* NOTE: this adds possibly unneeded space for mpool_block_t which
* may not be in this block.
*/
page_n = PAGES_IN_SIZE(&mp, sizeof(mpool_t));
/* now allocate us space for the actual struct */
mp_p = alloc_pages(&mp, page_n, error_p);
if (mp_p == NULL) {
if (mp.mp_fd >= 0) {
(void)close(mp.mp_fd);
mp.mp_fd = -1;
}
return NULL;
}
/*
* NOTE: we do not normally free the rest of the block here because
* we want to lesson the chance of an allocation overwriting the
* main structure.
*/
if (BIT_IS_SET(flags, MPOOL_FLAG_HEAVY_PACKING)) {
/* we add a block header to the front of the block */
block_p = (mpool_block_t *)mp_p;
/* init the block header */
block_p->mb_magic = BLOCK_MAGIC;
block_p->mb_bounds_p = (char *)block_p + SIZE_OF_PAGES(&mp, page_n);
block_p->mb_next_p = NULL;
block_p->mb_magic2 = BLOCK_MAGIC;
/* the mpool pointer is then the 2nd thing in the block */
mp_p = FIRST_ADDR_IN_BLOCK(block_p);
free_addr = (char *)mp_p + sizeof(mpool_t);
/* free the rest of the block */
ret = free_pointer(&mp, free_addr,
(char *)block_p->mb_bounds_p - (char *)free_addr);
if (ret != MPOOL_ERROR_NONE) {
if (mp.mp_fd >= 0) {
(void)close(mp.mp_fd);