Bug Summary

File:libs/sofia-sip/libsofia-sip-ua/su/su_alloc.c
Location:line 1428, column 2
Description:Use of memory after it is freed

Annotated Source Code

1/*
2 * This file is part of the Sofia-SIP package
3 *
4 * Copyright (C) 2005 Nokia Corporation.
5 *
6 * Contact: Pekka Pessi <pekka.pessi@nokia.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public License
10 * as published by the Free Software Foundation; either version 2.1 of
11 * the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#include "config.h"
26
27/**@defgroup su_alloc Memory Management Tutorial
28 *
29 * This page gives a short overview of home-based memory management used
30 * with Sofia. Such home-based memory management is useful when a lot of
31 * memory blocks are allocated for given task. The allocations are done via
32 * the @e memory @e home, which keeps a reference to each block. When the
33 * memory home is then freed, it will free all blocks to which it has
34 * reference.
35 *
36 * Typically, there is a @e home @e object which contains a su_home_t
37 * structure in the beginning of the object (sort of inheritance from
38 * su_home_t):
39 * @code
40 * struct context {
41 * su_home_t ctx_home[1];
42 * other_t *ctx_stuff;
43 * ...
44 * }
45 * @endcode
46 *
47 * A new home memory pool can be created with su_home_new():
48 * @code
49 * struct context *ctx = su_home_new(sizeof (struct context));
50 * @endcode
51 *
52 * It is also possible to create a secondary memory pool that can be
53 * released separately:
54 *
55 * @code
56 * struct context *ctx = su_home_clone(tophome, sizeof (struct context));
57 * @endcode
58 *
59 * Note that the tophome has a reference to @a ctx structure; whenever
60 * tophome is freed, the @a ctx is also freed.
61 *
62 * You can also create an independent home object by passing NULL as @a
63 * tophome argument. This is identical to the call to su_home_new().
64 *
65 * The memory allocations using @a ctx proceed then as follows:
66 * @code
67 * zeroblock = su_zalloc(ctx->ctx_home, sizeof (*zeroblock));
68 * @endcode
69 *
70 * The home memory pool - the home object and all the memory blocks
71 * allocated using it - are freed when su_home_unref() is called:
72 *
73 * @code
74 * su_home_unref(ctx->ctx_home).
75 * @endcode
76 *
77 * @note For historical reasons, su_home_unref() is also known as
78 * su_home_zap().
79 *
80 * As you might have guessed, it is also possible to use reference counting
81 * with home objects. The function su_home_ref() increases the reference
82 * count, su_home_unref() decreases it. A newly allocated or initialized
83 * home object has reference count of 1.
84 *
85 * @note Please note that while it is possible to create new references to
86 * secondary home objects which have a parent home, the secondary home
87 * objects will always be destroyed when the parent home is destroyed even
88 * if there are other references left to them.
89 *
90 * The memory blocks in a cloned home object are freed when the object with
91 * home itself is freed:
92 * @code
93 * su_free(tophome, ctx);
94 * @endcode
95 *
96 * @note
97 *
98 * The su_home_destroy() function is deprecated as it does not free the home
99 * object itself. Like su_home_deinit(), it should be called only on home
100 * objects with reference count of 1.
101 *
102 * The function su_home_init() initializes a home object structure. When the
103 * initialized home object is destroyed or deinitialized or its reference
104 * count reaches zero, the memory allocate thorugh it reclaimed but the home
105 * object structure itself is not freed.
106 *
107 * @section su_home_destructor_usage Destructors
108 *
109 * It is possible to give a destructor function to a home object. The
110 * destructor releases other resources associated with the home object
111 * besides memory. The destructor function will be called when the reference
112 * count of home reaches zero (upon calling su_home_unref()) or the home
113 * object is otherwise deinitialized (calling su_home_deinit() on
114 * objects allocated from stack).
115 *
116 * @section su_home_move_example Combining Allocations
117 *
118 * In some cases, an operation that makes multiple memory allocations may
119 * fail, making those allocations redundant. If the allocations are made
120 * through a temporary home, they can be conveniently freed by calling
121 * su_home_deinit(), for instance. If, however, the operation is successful,
122 * and one wants to keep the allocations, the allocations can be combined
123 * into an existing home with su_home_move(). For example,
124 * @code
125 * int example(su_home_t *home, ...)
126 * {
127 * su_home_t temphome[1] = { SU_HOME_INIT(temphome) };
128 *
129 * ... do lot of allocations with temphome ...
130 *
131 * if (success)
132 * su_home_move(home, temphome);
133 * su_home_deinit(temphome);
134 *
135 * return success;
136 * }
137 * @endcode
138 *
139 * Note that the @a temphome is deinitialized in every case, but when
140 * operation is successful, the allocations are moved from @a temphome to @a
141 * home.
142 *
143 * @section su_alloc_threadsafe Threadsafe Operation
144 *
145 * If multiple threads need to access same home object, it must be marked as
146 * @e threadsafe by calling su_home_threadsafe() with the home pointer as
147 * argument. The threadsafeness is not inherited by clones.
148 *
149 * The threadsafe home objects can be locked and unlocked with
150 * su_home_mutex_lock() and su_home_mutex_unlock(). These operations are
151 * no-op on home object that is not threadsafe.
152 *
153 * @section su_alloc_preloading Preloading a Memory Home
154 *
155 * In some situations there is quite heavy overhead if the global heap
156 * allocator is used. The overhead caused by the large number of small
157 * allocations can be reduced by using su_home_preload(): it allocates or
158 * preloads some a memory to home to be used as a kind of private heap. The
159 * preloaded memory area is then used to satisfy small enough allocations.
160 * For instance, the SIP parser typically preloads some 2K of memory when it
161 * starts to parse the message.
162 *
163 * @section su_alloc_stack Using Stack
164 *
165 * In some situation, it is sensible to use memory allocated from stack for
166 * some operations. The su_home_auto() function can be used for that
167 * purpose. The memory area from stack is used to satisfy the allocations as
168 * far as possible; if it is not enough, allocation is made from heap.
169 *
170 * The word @e auto refers to the automatic scope; however, the home object
171 * that was initialized with su_home_auto() must be explicitly deinitialized
172 * with su_home_deinit() or su_home_unref() when the program exits the scope
173 * where the stack frame used in su_home_auto() was allocated.
174 */
175
176/**@ingroup su_alloc
177 * @CFILE su_alloc.c Home-based memory management.
178 *
179 * @author Pekka Pessi <Pekka.Pessi@nokia.com>.
180 *
181 * @date Created: Thu Aug 19 01:12:25 1999 ppessi
182 */
183
184#include <sofia-sip/su_config.h>
185#include "sofia-sip/su_alloc.h"
186#include "sofia-sip/su_alloc_stat.h"
187#include "sofia-sip/su_errno.h"
188
189#include <stdio.h>
190#include <stdlib.h>
191#include <stddef.h>
192#include <memory.h>
193#include <limits.h>
194
195#include <assert.h>
196
197int (*_su_home_locker)(void *mutex);
198int (*_su_home_unlocker)(void *mutex);
199
200int (*_su_home_mutex_locker)(void *mutex);
201int (*_su_home_mutex_trylocker)(void *mutex);
202int (*_su_home_mutex_unlocker)(void *mutex);
203
204void (*_su_home_destroy_mutexes)(void *mutex);
205
206#if HAVE_FREE_NULL1
207#define safefree(x)free((x)) free((x))
208#else
209su_inlinestatic inline void safefree(void *b)free((void *b)) { b ? free(b) : (void)0; }
210#endif
211
212static inline su_block_t* MEMLOCK(const su_home_t *h) {
213 if (h && h->suh_lock) _su_home_locker(h->suh_lock);
214 return h->suh_blocks;
215}
216static inline void* UNLOCK(const su_home_t *h) {
217 if (h && h->suh_lock) _su_home_unlocker(h->suh_lock);
218 return NULL((void*)0);
219}
220
221#ifdef NDEBUG
222#define MEMCHECK1 0
223#define MEMCHECK_EXTRA0 0
224#elif !defined(MEMCHECK1)
225/* Default settings for valgrinding */
226#define MEMCHECK1 1
227#define MEMCHECK_EXTRA0 0
228#elif !defined(MEMCHECK_EXTRA0)
229#define MEMCHECK_EXTRA0 sizeof (size_t)
230#endif
231
232enum {
233 SUB_N = 31, /**< Initial size */
234 SUB_N_AUTO = 7, /**< Initial size for autohome */
235 SUB_P = 29 /**< Secondary probe.
236 * Secondary probe must be relative prime
237 * with all sub_n values */
238};
239
240#define ALIGNMENT(8) (8)
241#define __ALIGN(n)(size_t)(((n) + ((8) - 1)) & (size_t)~((8) - 1)) (size_t)(((n) + (ALIGNMENT(8) - 1)) & (size_t)~(ALIGNMENT(8) - 1))
242#define SIZEBITS(sizeof (unsigned) * 8 - 1) (sizeof (unsigned) * 8 - 1)
243
244typedef struct {
245 unsigned sua_size:SIZEBITS(sizeof (unsigned) * 8 - 1); /**< Size of the block */
246 unsigned sua_home:1; /**< Is this another home? */
247 unsigned :0;
248 void *sua_data; /**< Data pointer */
249} su_alloc_t;
250
251struct su_block_s {
252 su_home_t *sub_parent; /**< Parent home */
253 char *sub_preload; /**< Preload area */
254 su_home_stat_t *sub_stats; /**< Statistics.. */
255 void (*sub_destructor)(void *); /**< Destructor function */
256 size_t sub_ref; /**< Reference count */
257#define REF_MAX(18446744073709551615UL) SIZE_MAX(18446744073709551615UL)
258 size_t sub_used; /**< Number of blocks allocated */
259 size_t sub_n; /**< Size of hash table */
260
261 unsigned sub_prsize:16; /**< Preload size */
262 unsigned sub_prused:16; /**< Used from preload */
263 unsigned sub_hauto:1; /**< "Home" is not from malloc */
264 unsigned sub_auto:1; /**< struct su_block_s is not from malloc */
265 unsigned sub_preauto:1; /**< Preload is not from malloc */
266 unsigned sub_auto_all:1; /**< Everything is from stack! */
267 unsigned :0;
268
269 su_alloc_t sub_nodes[SUB_N]; /**< Pointers to data/lower blocks */
270};
271
272static void su_home_check_blocks(su_block_t const *b);
273
274static void su_home_stats_alloc(su_block_t *, void *p, void *preload,
275 size_t size, int zero);
276static void su_home_stats_free(su_block_t *sub, void *p, void *preload,
277 unsigned size);
278
279static void _su_home_deinit(su_home_t *home);
280
281#define SU_ALLOC_STATS1 1
282
283#if SU_ALLOC_STATS1
284size_t count_su_block_find, count_su_block_find_loop;
285size_t size_su_block_find, used_su_block_find;
286size_t max_size_su_block_find, max_used_su_block_find;
287size_t su_block_find_collision, su_block_find_collision_used,
288 su_block_find_collision_size;
289#endif
290
291su_inlinestatic inline su_alloc_t *su_block_find(su_block_t const *b, void const *p)
292{
293 size_t h, h0, probe;
294
295#if SU_ALLOC_STATS1
296 size_t collision = 0;
297
298 count_su_block_find++;
299 size_su_block_find += b->sub_n;
300 used_su_block_find += b->sub_used;
301 if (b->sub_n > max_size_su_block_find)
302 max_size_su_block_find = b->sub_n;
303 if (b->sub_used > max_used_su_block_find)
304 max_used_su_block_find = b->sub_used;
305#endif
306
307 assert(p != NULL)((p != ((void*)0)) ? (void) (0) : __assert_fail ("p != ((void*)0)"
, "su_alloc.c", 307, __PRETTY_FUNCTION__))
;
308
309 h = h0 = (size_t)((uintptr_t)p % b->sub_n);
310
311 probe = (b->sub_n > SUB_P) ? SUB_P : 1;
312
313 do {
314 if (b->sub_nodes[h].sua_data == p) {
315 su_alloc_t const *retval = &b->sub_nodes[h];
316 return (su_alloc_t *)retval; /* discard const */
317 }
318 h += probe;
319 if (h >= b->sub_n)
320 h -= b->sub_n;
321#if SU_ALLOC_STATS1
322 if (++collision > su_block_find_collision)
323 su_block_find_collision = collision,
324 su_block_find_collision_used = b->sub_used,
325 su_block_find_collision_size = b->sub_n;
326 count_su_block_find_loop++;
327#endif
328 } while (h != h0);
329
330 return NULL((void*)0);
331}
332
333su_inlinestatic inline su_alloc_t *su_block_add(su_block_t *b, void *p)
334{
335 size_t h, probe;
336
337 assert(p != NULL)((p != ((void*)0)) ? (void) (0) : __assert_fail ("p != ((void*)0)"
, "su_alloc.c", 337, __PRETTY_FUNCTION__))
;
338
339 h = (size_t)((uintptr_t)p % b->sub_n);
340
341 probe = (b->sub_n > SUB_P) ? SUB_P : 1;
342
343 while (b->sub_nodes[h].sua_data) {
344 h += probe;
345 if (h >= b->sub_n)
346 h -= b->sub_n;
347 }
348
349 b->sub_used++;
350 b->sub_nodes[h].sua_data = p;
351
352 return &b->sub_nodes[h];
353}
354
355su_inlinestatic inline int su_is_preloaded(su_block_t const *sub, char *data)
356{
357 return
358 sub->sub_preload &&
359 sub->sub_preload <= data &&
360 sub->sub_preload + sub->sub_prsize > data;
361}
362
363su_inlinestatic inline int su_alloc_check(su_block_t const *sub, su_alloc_t const *sua)
364{
365#if MEMCHECK_EXTRA0
366 size_t size, term;
367 assert(sua)((sua) ? (void) (0) : __assert_fail ("sua", "su_alloc.c", 367
, __PRETTY_FUNCTION__))
;
368 if (sua) {
369 size = (size_t)sua->sua_size;
370 memcpy(&term, (char *)sua->sua_data + size, sizeof (term));
371 assert(size - term == 0)((size - term == 0) ? (void) (0) : __assert_fail ("size - term == 0"
, "su_alloc.c", 371, __PRETTY_FUNCTION__))
;
372 return size - term == 0;
373 }
374 else
375 return 0;
376#endif
377 return sua != NULL((void*)0);
378}
379
380/** Allocate the block hash table.
381 *
382 * @internal
383 *
384 * Allocate a block hash table of @a n elements.
385 *
386 * @param home pointer to home object
387 * @param n number of buckets in hash table
388 *
389 * @return
390 * This function returns a pointer to the allocated hash table or
391 * NULL if an error occurred.
392 */
393su_inlinestatic inline su_block_t *su_hash_alloc(size_t n)
394{
395 su_block_t *b = calloc(1, offsetof(su_block_t, sub_nodes[n])__builtin_offsetof(su_block_t, sub_nodes[n]));
396
397 if (b) {
398 /* Implicit su_home_init(); */
399 b->sub_ref = 1;
400 b->sub_hauto = 1;
401 b->sub_n = n;
402 }
403
404 return b;
405}
406
407enum sub_zero { do_malloc, do_calloc, do_clone };
408
409/** Allocate a memory block.
410 *
411 * @internal
412 *
413 * Precondition: locked home
414 *
415 * @param home home to allocate
416 * @param sub block structure used to allocate
417 * @param size
418 * @param zero if true, zero allocated block;
419 * if > 1, allocate a subhome
420 *
421 */
422static
423void *sub_alloc(su_home_t *home,
424 su_block_t *sub,
425 size_t size,
426 enum sub_zero zero)
427{
428 void *data, *preload = NULL((void*)0);
429
430 assert (size < (((size_t)1) << SIZEBITS))((size < (((size_t)1) << (sizeof (unsigned) * 8 - 1)
)) ? (void) (0) : __assert_fail ("size < (((size_t)1) << (sizeof (unsigned) * 8 - 1))"
, "su_alloc.c", 430, __PRETTY_FUNCTION__))
;
431
432 if (size >= ((size_t)1) << SIZEBITS(sizeof (unsigned) * 8 - 1))
433 return (void)(errno(*__errno_location ()) = ENOMEM12), NULL((void*)0);
434
435 if (!size) return NULL((void*)0);
436
437 if (sub == NULL((void*)0) || 3 * sub->sub_used > 2 * sub->sub_n) {
438 /* Resize the hash table */
439 size_t i, n, n2;
440 su_block_t *b2;
441
442 if (sub)
443 n = home->suh_blocks->sub_n, n2 = 4 * n + 3; //, used = sub->sub_used;
444 else
445 n = 0, n2 = SUB_N; //, used = 0;
446
447#if 0
448 printf("su_alloc(home = %p): realloc block hash of size %d\n", home, n2);
449#endif
450
451 if (!(b2 = su_hash_alloc(n2)))
452 return NULL((void*)0);
453
454 for (i = 0; i < n; i++) {
455 if (sub->sub_nodes[i].sua_data)
456 su_block_add(b2, sub->sub_nodes[i].sua_data)[0] = sub->sub_nodes[i];
457 }
458
459 if (sub) {
460 b2->sub_parent = sub->sub_parent;
461 b2->sub_ref = sub->sub_ref;
462 b2->sub_preload = sub->sub_preload;
463 b2->sub_prsize = sub->sub_prsize;
464 b2->sub_prused = sub->sub_prused;
465 b2->sub_hauto = sub->sub_hauto;
466 b2->sub_preauto = sub->sub_preauto;
467 b2->sub_destructor = sub->sub_destructor;
468 /* auto_all is not copied! */
469 b2->sub_stats = sub->sub_stats;
470 }
471
472 home->suh_blocks = b2;
473
474 if (sub && !sub->sub_auto)
475 free(sub);
476 sub = b2;
477 }
478
479 if (sub && zero < do_clone &&
480 sub->sub_preload && size <= sub->sub_prsize) {
481 /* Use preloaded memory */
482 size_t prused = sub->sub_prused + size + MEMCHECK_EXTRA0;
483 prused = __ALIGN(prused)(size_t)(((prused) + ((8) - 1)) & (size_t)~((8) - 1));
484 if (prused <= sub->sub_prsize) {
485 preload = (char *)sub->sub_preload + sub->sub_prused;
486 sub->sub_prused = (unsigned)prused;
487 }
488 }
489
490 if (preload && zero)
491 data = memset(preload, 0, size);
492 else if (preload)
493 data = preload;
494 else if (zero)
495 data = calloc(1, size + MEMCHECK_EXTRA0);
496 else
497 data = malloc(size + MEMCHECK_EXTRA0);
498
499 if (data) {
500 su_alloc_t *sua;
501
502#if MEMCHECK_EXTRA0
503 size_t term = 0 - size;
504 memcpy((char *)data + size, &term, sizeof (term));
505#endif
506
507 if (!preload)
508 sub->sub_auto_all = 0;
509
510 if (zero >= do_clone) {
511 /* Prepare cloned home */
512 su_home_t *subhome = data;
513
514 assert(preload == 0)((preload == 0) ? (void) (0) : __assert_fail ("preload == 0",
"su_alloc.c", 514, __PRETTY_FUNCTION__))
;
515
516 subhome->suh_blocks = su_hash_alloc(SUB_N);
517 if (!subhome->suh_blocks)
518 return (void)safefree(data)free((data)), NULL((void*)0);
519
520 subhome->suh_size = (unsigned)size;
521 subhome->suh_blocks->sub_parent = home;
522 subhome->suh_blocks->sub_hauto = 0;
523 }
524
525 /* OK, add the block to the hash table. */
526
527 sua = su_block_add(sub, data); assert(sua)((sua) ? (void) (0) : __assert_fail ("sua", "su_alloc.c", 527
, __PRETTY_FUNCTION__))
;
528 sua->sua_size = (unsigned)size;
529 sua->sua_home = zero > 1;
530
531 if (sub->sub_stats)
532 su_home_stats_alloc(sub, data, preload, size, zero);
533 }
534
535 return data;
536}
537
538/**Create a new su_home_t object.
539 *
540 * Create a home object used to collect multiple memory allocations under
541 * one handle. The memory allocations made using this home object is freed
542 * either when this home is destroyed.
543 *
544 * The maximum @a size of a home object is INT_MAX (2 gigabytes).
545 *
546 * @param size size of home object
547 *
548 * The memory home object allocated with su_home_new() can be reclaimed with
549 * su_home_unref().
550 *
551 * @return
552 * This function returns a pointer to an su_home_t object, or NULL upon
553 * an error.
554 */
555void *su_home_new(isize_t size)
556{
557 su_home_t *home;
558
559 assert(size >= sizeof (*home))((size >= sizeof (*home)) ? (void) (0) : __assert_fail ("size >= sizeof (*home)"
, "su_alloc.c", 559, __PRETTY_FUNCTION__))
;
560
561 if (size < sizeof (*home))
562 return (void)(errno(*__errno_location ()) = EINVAL22), NULL((void*)0);
563 else if (size > INT_MAX2147483647)
564 return (void)(errno(*__errno_location ()) = ENOMEM12), NULL((void*)0);
565
566 home = calloc(1, size);
567 if (home) {
568 home->suh_size = (int)size;
569 home->suh_blocks = su_hash_alloc(SUB_N);
570 if (home->suh_blocks)
571 home->suh_blocks->sub_hauto = 0;
572 else
573 safefree(home)free((home)), home = NULL((void*)0);
574 }
575
576 return home;
577}
578
579/** Set destructor function.
580 *
581 * The destructor function is called after the reference count of a
582 * #su_home_t object reaches zero or a home object is deinitialized, but
583 * before any of the memory areas within the home object are freed.
584 *
585 * @since New in @VERSION_1_12_4.
586 * Earlier versions had su_home_desctructor() (spelling).
587 */
588int su_home_destructor(su_home_t *home, void (*destructor)(void *))
589{
590 int retval = -1;
591
592 if (home) {
593 su_block_t *sub = MEMLOCK(home);
594 if (sub && sub->sub_destructor == NULL((void*)0)) {
595 sub->sub_destructor = destructor;
596 retval = 0;
597 }
598 UNLOCK(home);
599 }
600 else
601 su_seterrno(EFAULT14);
602
603 return retval;
604}
605
606#undef su_home_desctructor
607
608/** Set destructor function.
609 *
610 * @deprecated The su_home_destructor() was added in @VERSION_1_12_4. The
611 * su_home_desctructor() is now defined as a macro expanding as
612 * su_home_destructor(). If you want to compile an application as binary
613 * compatible with earlier versions, you have to define su_home_desctructor
614 * as itself, e.g.,
615 * @code
616 * #define su_home_desctructor su_home_desctructor
617 * #include <sofia-sip/su_alloc.h>
618 * @endcode
619 */
620int su_home_desctructor(su_home_t *home, void (*destructor)(void *))
621{
622 return su_home_destructor(home, destructor);
623}
624
625
626#if (defined(HAVE_MEMLEAK_LOG) && (HAVE_MEMLEAK_LOG != 1))
627#include "sofia-sip/su_debug.h"
628
629
630static void *real_su_home_ref(su_home_t const *home)
631{
632 if (home) {
633 su_block_t *sub = MEMLOCK(home);
634
635 if (sub == NULL((void*)0) || sub->sub_ref == 0) {
636 assert(sub && sub->sub_ref != 0)((sub && sub->sub_ref != 0) ? (void) (0) : __assert_fail
("sub && sub->sub_ref != 0", "su_alloc.c", 636, __PRETTY_FUNCTION__
))
;
637 UNLOCK(home);
638 return NULL((void*)0);
639 }
640
641 if (sub->sub_ref != REF_MAX(18446744073709551615UL))
642 sub->sub_ref++;
643 UNLOCK(home);
644 }
645 else
646 su_seterrno(EFAULT14);
647
648 return (void *)home;
649}
650
651
652static int real_su_home_unref(su_home_t *home)
653{
654 su_block_t *sub;
655
656 if (home == NULL((void*)0))
657 return 0;
658
659 sub = MEMLOCK(home);
660
661 if (sub == NULL((void*)0)) {
662 /* Xyzzy */
663 return 0;
664 }
665 else if (sub->sub_ref == REF_MAX(18446744073709551615UL)) {
666 UNLOCK(home);
667 return 0;
668 }
669 else if (--sub->sub_ref > 0) {
670 UNLOCK(home);
671 return 0;
672 }
673 else if (sub->sub_parent) {
674 su_home_t *parent = sub->sub_parent;
675 UNLOCK(home);
676 su_free(parent, home);
677 return 1;
678 }
679 else {
680 int hauto = sub->sub_hauto;
681 _su_home_deinit(home);
682 if (!hauto)
683 safefree(home)free((home));
684 /* UNLOCK(home); */
685 return 1;
686 }
687}
688
689su_home_t *
690_su_home_ref_by(su_home_t *home,
691 char const *file, unsigned line,
692 char const *function)
693{
694 if (home)
695 SU_DEBUG_0(("%ld %p - su_home_ref() => "MOD_ZU"%zu"" by %s:%u: %s()\n", pthread_self(),
696 home, su_home_refcount(home) + 1, file, line, function));
697 return (su_home_t *)real_su_home_ref(home);
698}
699
700int
701_su_home_unref_by(su_home_t *home,
702 char const *file, unsigned line,
703 char const *function)
704{
705 if (home) {
706 size_t refcount = su_home_refcount(home) - 1;
707 int freed = real_su_home_unref(home);
708
709 if (freed) refcount = 0;
710 SU_DEBUG_0(("%ld %p - su_home_unref() => "MOD_ZU"%zu"" by %s:%u: %s()\n", pthread_self(),
711 home, refcount, file, line, function));
712 return freed;
713 }
714
715 return 0;
716}
717#else
718
719/** Create a new reference to a home object. */
720void *su_home_ref(su_home_t const *home)
721{
722 if (home) {
723 su_block_t *sub = MEMLOCK(home);
724
725 if (sub == NULL((void*)0) || sub->sub_ref == 0) {
726 assert(sub && sub->sub_ref != 0)((sub && sub->sub_ref != 0) ? (void) (0) : __assert_fail
("sub && sub->sub_ref != 0", "su_alloc.c", 726, __PRETTY_FUNCTION__
))
;
727 UNLOCK(home);
728 return NULL((void*)0);
729 }
730
731 if (sub->sub_ref != REF_MAX(18446744073709551615UL))
732 sub->sub_ref++;
733 UNLOCK(home);
734 }
735 else
736 su_seterrno(EFAULT14);
737
738 return (void *)home;
739}
740
741
742/**Unreference a su_home_t object.
743 *
744 * Decrements the reference count on home object and destroys and frees it
745 * and the memory allocations using it if the reference count reaches 0.
746 *
747 * @param home memory pool object to be unreferenced
748 *
749 * @retval 1 if object was freed
750 * @retval 0 if object is still alive
751 */
752int su_home_unref(su_home_t *home)
753{
754 su_block_t *sub;
755
756 if (home == NULL((void*)0))
757 return 0;
758
759 sub = MEMLOCK(home);
760
761 if (sub == NULL((void*)0)) {
762 /* Xyzzy */
763 return 0;
764 }
765 else if (sub->sub_ref == REF_MAX(18446744073709551615UL)) {
766 UNLOCK(home);
767 return 0;
768 }
769 else if (--sub->sub_ref > 0) {
770 UNLOCK(home);
771 return 0;
772 }
773 else if (sub->sub_parent) {
774 su_home_t *parent = sub->sub_parent;
775 UNLOCK(home);
776 su_free(parent, home);
777 return 1;
778 }
779 else {
780 int hauto = sub->sub_hauto;
781 _su_home_deinit(home);
782 if (!hauto)
783 safefree(home)free((home));
784 /* UNLOCK(home); */
785 return 1;
786 }
787}
788#endif
789
790/** Return reference count of home. */
791size_t su_home_refcount(su_home_t *home)
792{
793 size_t count = 0;
794
795 if (home) {
796 su_block_t *sub = MEMLOCK(home);
797
798 if (sub)
799 count = sub->sub_ref;
800
801 UNLOCK(home);
802 }
803
804 return count;
805}
806
807/**Clone a su_home_t object.
808 *
809 * Clone a secondary home object used to collect multiple memoryf
810 * allocations under one handle. The memory is freed either when the cloned
811 * home is destroyed or when the parent home is destroyed.
812 *
813 * An independent
814 * home object is created if NULL is passed as @a parent argument.
815 *
816 * @param parent a parent object (may be NULL)
817 * @param size size of home object
818 *
819 * The memory home object allocated with su_home_clone() can be freed with
820 * su_home_unref().
821 *
822 * @return
823 * This function returns a pointer to an su_home_t object, or NULL upon
824 * an error.
825 */
826void *su_home_clone(su_home_t *parent, isize_t size)
827{
828 su_home_t *home;
829
830 assert(size >= sizeof (*home))((size >= sizeof (*home)) ? (void) (0) : __assert_fail ("size >= sizeof (*home)"
, "su_alloc.c", 830, __PRETTY_FUNCTION__))
;
831
832 if (size < sizeof (*home))
833 return (void)(errno(*__errno_location ()) = EINVAL22), NULL((void*)0);
834 else if (size > INT_MAX2147483647)
835 return (void)(errno(*__errno_location ()) = ENOMEM12), NULL((void*)0);
836
837 if (parent) {
838 su_block_t *sub = MEMLOCK(parent);
839 home = sub_alloc(parent, sub, size, (enum sub_zero)2);
840 UNLOCK(parent);
841 }
842 else {
843 home = su_home_new(size);
844 }
845
846 return home;
847}
848
849/** Return true if home is a clone. */
850int su_home_has_parent(su_home_t const *home)
851{
852 return su_home_parent(home) != NULL((void*)0);
853}
854
855/** Return home's parent home. */
856su_home_t *su_home_parent(su_home_t const *home)
857{
858 su_home_t *parent = NULL((void*)0);
859
860 if (home && home->suh_blocks) {
861 su_block_t *sub = MEMLOCK(home);
862 parent = sub->sub_parent;
863 UNLOCK(home);
864 }
865
866 return parent;
867}
868
869/** Allocate a memory block.
870 *
871 * Allocates a memory block of a given @a size.
872 *
873 * If @a home is NULL, this function behaves exactly like malloc().
874 *
875 * @param home pointer to home object
876 * @param size size of the memory block to be allocated
877 *
878 * @return
879 * This function returns a pointer to the allocated memory block or
880 * NULL if an error occurred.
881 */
882void *su_alloc(su_home_t *home, isize_t size)
883{
884 void *data;
885
886 if (home) {
887 data = sub_alloc(home, MEMLOCK(home), size, (enum sub_zero)0);
888 UNLOCK(home);
889 }
890 else
891 data = malloc(size);
892
893 return data;
894}
895
896/**Free a memory block.
897 *
898 * Frees a single memory block. The @a home must be the owner of the memory
899 * block (usually the memory home used to allocate the memory block, or NULL
900 * if no home was used).
901 *
902 * @param home pointer to home object
903 * @param data pointer to the memory block to be freed
904 */
905void su_free(su_home_t *home, void *data)
906{
907 if (!data)
908 return;
909
910 if (home) {
911 su_alloc_t *allocation;
912 su_block_t *sub = MEMLOCK(home);
913
914 assert(sub)((sub) ? (void) (0) : __assert_fail ("sub", "su_alloc.c", 914
, __PRETTY_FUNCTION__))
;
915 allocation = su_block_find(sub, data);
916 assert(allocation)((allocation) ? (void) (0) : __assert_fail ("allocation", "su_alloc.c"
, 916, __PRETTY_FUNCTION__))
;
917
918 if (su_alloc_check(sub, allocation)) {
919 void *preloaded = NULL((void*)0);
920
921 /* Is this preloaded data? */
922 if (su_is_preloaded(sub, data))
923 preloaded = data;
924
925 if (sub->sub_stats)
926 su_home_stats_free(sub, data, preloaded, allocation->sua_size);
927
928 if (allocation->sua_home) {
929 su_home_t *subhome = data;
930 su_block_t *sub = MEMLOCK(subhome);
931
932 assert(sub->sub_ref != REF_MAX)((sub->sub_ref != (18446744073709551615UL)) ? (void) (0) :
__assert_fail ("sub->sub_ref != (18446744073709551615UL)"
, "su_alloc.c", 932, __PRETTY_FUNCTION__))
;
933 /* assert(sub->sub_ref > 0); */
934
935 sub->sub_ref = 0; /* Zap all references */
936
937 _su_home_deinit(subhome);
938 }
939
940#if MEMCHECK1 != 0
941 memset(data, 0xaa, (size_t)allocation->sua_size);
942#endif
943
944 memset(allocation, 0, sizeof (*allocation));
945 sub->sub_used--;
946
947 if (preloaded)
948 data = NULL((void*)0);
949 }
950
951 UNLOCK(home);
952 }
953
954 safefree(data)free((data));
955}
956
957/** Check if pointer has been allocated through home.
958 *
959 * @param home pointer to a memory home
960 * @param data pointer to a memory area possibly allocated though home
961 *
962 * @NEW_1_12_9
963 */
964int su_home_check_alloc(su_home_t const *home, void const *data)
965{
966 int retval = 0;
967
968 if (home && data) {
969 su_block_t const *sub = MEMLOCK(home);
970 su_alloc_t *allocation = su_block_find(sub, data);
971
972 retval = allocation != NULL((void*)0);
973
974 UNLOCK(home);
975 }
976
977 return retval;
978}
979
980/** Check home consistency.
981 *
982 * Ensures that the home structure and all memory blocks allocated through
983 * it are consistent. It can be used to catch memory allocation and usage
984 * errors.
985 *
986 * @param home Pointer to a memory home.
987 */
988void su_home_check(su_home_t const *home)
989{
990#if MEMCHECK1 != 0
991 su_block_t const *b = MEMLOCK(home);
992
993 su_home_check_blocks(b);
994
995 UNLOCK(home);
996#endif
997}
998
999/** Check home blocks. */
1000static
1001void su_home_check_blocks(su_block_t const *b)
1002{
1003#if MEMCHECK1 != 0
1004 if (b) {
1005 size_t i, used;
1006 assert(b->sub_used <= b->sub_n)((b->sub_used <= b->sub_n) ? (void) (0) : __assert_fail
("b->sub_used <= b->sub_n", "su_alloc.c", 1006, __PRETTY_FUNCTION__
))
;
1007
1008 for (i = 0, used = 0; i < b->sub_n; i++)
1009 if (b->sub_nodes[i].sua_data) {
1010 su_alloc_check(b, &b->sub_nodes[i]), used++;
1011 if (b->sub_nodes[i].sua_home)
1012 su_home_check((su_home_t *)b->sub_nodes[i].sua_data);
1013 }
1014
1015 assert(used == b->sub_used)((used == b->sub_used) ? (void) (0) : __assert_fail ("used == b->sub_used"
, "su_alloc.c", 1015, __PRETTY_FUNCTION__))
;
1016 }
1017#endif
1018}
1019
1020/**
1021 * Create an su_home_t object.
1022 *
1023 * Creates a home object. A home object is used to collect multiple memory
1024 * allocations, so that they all can be freed by calling su_home_unref().
1025 *
1026 * @return This function returns a pointer to an #su_home_t object, or
1027 * NULL upon an error.
1028 */
1029su_home_t *su_home_create(void)
1030{
1031 return su_home_new(sizeof(su_home_t));
1032}
1033
1034/** Destroy a home object
1035 *
1036 * Frees all memory blocks associated with a home object. Note that the home
1037 * object structure is not freed.
1038 *
1039 * @param home pointer to a home object
1040 *
1041 * @deprecated
1042 * su_home_destroy() is included for backwards compatibility only. Use
1043 * su_home_unref() instead of su_home_destroy().
1044 */
1045void su_home_destroy(su_home_t *home)
1046{
1047 if (MEMLOCK(home)) {
1048 assert(home->suh_blocks)((home->suh_blocks) ? (void) (0) : __assert_fail ("home->suh_blocks"
, "su_alloc.c", 1048, __PRETTY_FUNCTION__))
;
1049 assert(home->suh_blocks->sub_ref == 1)((home->suh_blocks->sub_ref == 1) ? (void) (0) : __assert_fail
("home->suh_blocks->sub_ref == 1", "su_alloc.c", 1049,
__PRETTY_FUNCTION__))
;
1050 if (!home->suh_blocks->sub_hauto)
1051 /* should warn user */;
1052 home->suh_blocks->sub_hauto = 1;
1053 _su_home_deinit(home);
1054 /* UNLOCK(home); */
1055 }
1056}
1057
1058/** Initialize an su_home_t struct.
1059 *
1060 * Initializes an su_home_t structure. It can be used when the home
1061 * structure is allocated from stack or when the home structure is part of
1062 * an another object.
1063 *
1064 * @param home pointer to home object
1065 *
1066 * @retval 0 when successful
1067 * @retval -1 upon an error.
1068 *
1069 * @sa SU_HOME_INIT(), su_home_deinit(), su_home_new(), su_home_clone()
1070 *
1071 * @bug
1072 * Prior to @VERSION_1_12_8 the su_home_t structure should have been
1073 * initialized with SU_HOME_INIT() or otherwise zeroed before calling
1074 * su_home_init().
1075 */
1076int su_home_init(su_home_t *home)
1077{
1078 su_block_t *sub;
1079
1080 if (home == NULL((void*)0))
1081 return -1;
1082
1083 home->suh_blocks = sub = su_hash_alloc(SUB_N);
1084 home->suh_lock = NULL((void*)0);
1085
1086 if (!sub)
1087 return -1;
1088
1089 return 0;
1090}
1091
1092/** Internal deinitialization */
1093static
1094void _su_home_deinit(su_home_t *home)
1095{
1096 if (home->suh_blocks) {
1097 size_t i;
1098 su_block_t *b;
1099 void *suh_lock = home->suh_lock;
1100
1101 home->suh_lock = NULL((void*)0);
1102
1103 if (home->suh_blocks->sub_destructor) {
1104 void (*destructor)(void *) = home->suh_blocks->sub_destructor;
1105 home->suh_blocks->sub_destructor = NULL((void*)0);
1106 destructor(home);
1107 }
1108
1109 b = home->suh_blocks;
1110
1111 su_home_check_blocks(b);
1112
1113 for (i = 0; i < b->sub_n; i++) {
1114 if (b->sub_nodes[i].sua_data) {
1115 if (b->sub_nodes[i].sua_home) {
1116 su_home_t *subhome = b->sub_nodes[i].sua_data;
1117 su_block_t *subb = MEMLOCK(subhome);
1118
1119 assert(subb)((subb) ? (void) (0) : __assert_fail ("subb", "su_alloc.c", 1119
, __PRETTY_FUNCTION__))
; assert(subb->sub_ref >= 1)((subb->sub_ref >= 1) ? (void) (0) : __assert_fail ("subb->sub_ref >= 1"
, "su_alloc.c", 1119, __PRETTY_FUNCTION__))
;
1120#if 0
1121 if (subb->sub_ref > 0)
1122 SU_DEBUG_7(("su_home_unref: subhome %p with destructor %p has still %u refs\n",
1123 subhome, subb->sub_destructor, subb->sub_ref));
1124#endif
1125 subb->sub_ref = 0; /* zap them all */
1126 _su_home_deinit(subhome);
1127 }
1128 else if (su_is_preloaded(b, b->sub_nodes[i].sua_data))
1129 continue;
1130 safefree(b->sub_nodes[i].sua_data)free((b->sub_nodes[i].sua_data));
1131 }
1132 }
1133
1134 if (b->sub_preload && !b->sub_preauto)
1135 free(b->sub_preload);
1136 if (b->sub_stats)
1137 free(b->sub_stats);
1138 if (!b->sub_auto)
1139 free(b);
1140
1141 home->suh_blocks = NULL((void*)0);
1142
1143 if (suh_lock) {
1144 /* Unlock, or risk assert() or leak handles on Windows */
1145 _su_home_unlocker(suh_lock);
1146 _su_home_destroy_mutexes(suh_lock);
1147 }
1148 }
1149}
1150
1151/** Free memory blocks allocated through home.
1152 *
1153 * Frees the memory blocks associated with the home object allocated. It
1154 * does not free the home object itself. Use su_home_unref() to free the
1155 * home object.
1156 *
1157 * @param home pointer to home object
1158 *
1159 * @sa su_home_init()
1160 */
1161void su_home_deinit(su_home_t *home)
1162{
1163 if (MEMLOCK(home)) {
1164 assert(home->suh_blocks)((home->suh_blocks) ? (void) (0) : __assert_fail ("home->suh_blocks"
, "su_alloc.c", 1164, __PRETTY_FUNCTION__))
;
1165 assert(home->suh_blocks->sub_ref == 1)((home->suh_blocks->sub_ref == 1) ? (void) (0) : __assert_fail
("home->suh_blocks->sub_ref == 1", "su_alloc.c", 1165,
__PRETTY_FUNCTION__))
;
1166 assert(home->suh_blocks->sub_hauto)((home->suh_blocks->sub_hauto) ? (void) (0) : __assert_fail
("home->suh_blocks->sub_hauto", "su_alloc.c", 1166, __PRETTY_FUNCTION__
))
;
1167 _su_home_deinit(home);
1168 /* UNLOCK(home); */
1169 }
1170}
1171
1172/**Move allocations from a su_home_t object to another.
1173 *
1174 * Moves allocations made through the @a src home object under the @a dst
1175 * home object. It is handy, for example, if an operation allocates some
1176 * number of blocks that should be freed upon an error. It uses a temporary
1177 * home and moves the blocks from temporary to a proper home when
1178 * successful, but frees the temporary home upon an error.
1179 *
1180 * If @a src has destructor, it is called before starting to move.
1181 *
1182 * @param dst destination home
1183 * @param src source home
1184 *
1185 * @retval 0 if succesful
1186 * @retval -1 upon an error
1187 */
1188int su_home_move(su_home_t *dst, su_home_t *src)
1189{
1190 size_t i, n, n2, used;
1191 su_block_t *s, *d, *d2;
1192
1193 if (src == NULL((void*)0) || dst == src)
1194 return 0;
1195
1196 if (dst) {
1197 s = MEMLOCK(src); d = MEMLOCK(dst);
1198
1199 if (s && s->sub_n) {
1200
1201 if (s->sub_destructor) {
1202 void (*destructor)(void *) = s->sub_destructor;
1203 s->sub_destructor = NULL((void*)0);
1204 destructor(src);
1205 }
1206
1207 if (d)
1208 used = s->sub_used + d->sub_used;
1209 else
1210 used = s->sub_used;
1211
1212 if (used && (d == NULL((void*)0) || 3 * used > 2 * d->sub_n)) {
1213 if (d)
1214 for (n = n2 = d->sub_n; 3 * used > 2 * n2; n2 = 4 * n2 + 3)
1215 ;
1216 else
1217 n = 0, n2 = s->sub_n;
1218
1219 if (!(d2 = su_hash_alloc(n2))) {
1220 UNLOCK(dst); UNLOCK(src);
1221 return -1;
1222 }
1223
1224 dst->suh_blocks = d2;
1225
1226 for (i = 0; i < n; i++)
1227 if (d->sub_nodes[i].sua_data)
1228 su_block_add(d2, d->sub_nodes[i].sua_data)[0] = d->sub_nodes[i];
1229
1230 if (d) {
1231 d2->sub_parent = d->sub_parent;
1232 d2->sub_ref = d->sub_ref;
1233 d2->sub_preload = d->sub_preload;
1234 d2->sub_prsize = d->sub_prsize;
1235 d2->sub_prused = d->sub_prused;
1236 d2->sub_preauto = d->sub_preauto;
1237 d2->sub_stats = d->sub_stats;
1238 }
1239
1240 if (d && !d->sub_auto)
1241 free(d);
1242
1243 d = d2;
1244 }
1245
1246 if (s->sub_used) {
1247 n = s->sub_n;
1248
1249 for (i = 0; i < n; i++)
1250 if (s->sub_nodes[i].sua_data) {
1251 su_block_add(d, s->sub_nodes[i].sua_data)[0] = s->sub_nodes[i];
1252 if (s->sub_nodes[i].sua_home) {
1253 su_home_t *subhome = s->sub_nodes[i].sua_data;
1254 su_block_t *subsub = MEMLOCK(subhome);
1255 subsub->sub_parent = dst;
1256 UNLOCK(subhome);
1257 }
1258 }
1259
1260 s->sub_used = 0;
1261
1262 memset(s->sub_nodes, 0, n * sizeof (s->sub_nodes[0]));
1263 }
1264
1265 if (s->sub_stats) {
1266 /* XXX */
1267 }
1268 }
1269
1270 UNLOCK(dst); UNLOCK(src);
1271 }
1272 else {
1273 s = MEMLOCK(src);
1274
1275 if (s && s->sub_used) {
1276 n = s->sub_n;
1277
1278 for (i = 0; i < n; i++) {
1279 if (s->sub_nodes[i].sua_data && s->sub_nodes[i].sua_home) {
1280 su_home_t *subhome = s->sub_nodes[i].sua_data;
1281 su_block_t *subsub = MEMLOCK(subhome);
1282 subsub->sub_parent = dst;
1283 UNLOCK(subhome);
1284 }
1285 }
1286
1287 s->sub_used = 0;
1288 memset(s->sub_nodes, 0, s->sub_n * sizeof (s->sub_nodes[0]));
1289
1290 s->sub_used = 0;
1291 }
1292
1293 UNLOCK(src);
1294 }
1295
1296 return 0;
1297}
1298
1299/** Preload a memory home.
1300 *
1301 * The function su_home_preload() preloads a memory home.
1302 */
1303void su_home_preload(su_home_t *home, isize_t n, isize_t isize)
1304{
1305 su_block_t *sub;
1306
1307 if (home == NULL((void*)0))
1308 return;
1309
1310 if (home->suh_blocks == NULL((void*)0))
1311 su_home_init(home);
1312
1313 sub = MEMLOCK(home);
1314 if (!sub->sub_preload) {
1315 size_t size;
1316 void *preload;
1317
1318 size = n * __ALIGN(isize)(size_t)(((isize) + ((8) - 1)) & (size_t)~((8) - 1));
1319 if (size > 65535) /* We have 16 bits... */
1320 size = 65535 & (ALIGNMENT(8) - 1);
1321
1322 preload = malloc(size);
1323
1324 home->suh_blocks->sub_preload = preload;
1325 home->suh_blocks->sub_prsize = (unsigned)size;
1326 }
1327 UNLOCK(home);
1328}
1329
1330/** Preload a memory home from stack.
1331 *
1332 * Initializes a memory home using an area allocated from stack. Poor man's
1333 * alloca().
1334 */
1335su_home_t *su_home_auto(void *area, isize_t size)
1336{
1337 su_home_t *home;
1338 su_block_t *sub;
1339 size_t homesize = __ALIGN(sizeof *home)(size_t)(((sizeof *home) + ((8) - 1)) & (size_t)~((8) - 1
))
;
1340 size_t subsize = __ALIGN(offsetof(su_block_t, sub_nodes[SUB_N_AUTO]))(size_t)(((__builtin_offsetof(su_block_t, sub_nodes[SUB_N_AUTO
])) + ((8) - 1)) & (size_t)~((8) - 1))
;
1341 size_t prepsize;
1342
1343 char *p = area;
1344
1345 prepsize = homesize + subsize + (__ALIGN((intptr_t)p)(size_t)((((intptr_t)p) + ((8) - 1)) & (size_t)~((8) - 1)
)
- (intptr_t)p);
1346
1347 if (area == NULL((void*)0) || size < prepsize)
1348 return NULL((void*)0);
1349
1350 if (size > INT_MAX2147483647)
1351 size = INT_MAX2147483647;
1352
1353 home = memset(p, 0, homesize);
1354 home->suh_size = (int)size;
1355
1356 sub = memset(p + homesize, 0, subsize);
1357 home->suh_blocks = sub;
1358
1359 if (size > prepsize + 65535)
1360 size = prepsize + 65535;
1361
1362 sub->sub_n = SUB_N_AUTO;
1363 sub->sub_ref = 1;
1364 sub->sub_preload = p + prepsize;
1365 sub->sub_prsize = (unsigned)(size - prepsize);
1366 sub->sub_hauto = 1;
1367 sub->sub_auto = 1;
1368 sub->sub_preauto = 1;
1369 sub->sub_auto_all = 1;
1370
1371 return home;
1372}
1373
1374
1375/** Reallocate a memory block.
1376 *
1377 * Allocates a memory block of @a size bytes.
1378 * It copies the old block contents to the new block and frees the old
1379 * block.
1380 *
1381 * If @a home is NULL, this function behaves exactly like realloc().
1382 *
1383 * @param home pointer to memory pool object
1384 * @param data pointer to old memory block
1385 * @param size size of the memory block to be allocated
1386 *
1387 * @return
1388 * A pointer to the allocated memory block or
1389 * NULL if an error occurred.
1390 */
1391void *su_realloc(su_home_t *home, void *data, isize_t size)
1392{
1393 void *ndata;
1394 su_alloc_t *sua;
1395 su_block_t *sub;
1396 size_t p;
1397 size_t term = 0 - size;
1398
1399 if (!home)
1
Assuming 'home' is non-null
2
Taking false branch
1400 return realloc(data, size);
1401
1402 if (size == 0) {
3
Assuming 'size' is not equal to 0
4
Taking false branch
1403 if (data)
1404 su_free(home, data);
1405 return NULL((void*)0);
1406 }
1407
1408 sub = MEMLOCK(home);
1409 if (!data) {
5
Assuming 'data' is non-null
6
Taking false branch
1410 data = sub_alloc(home, sub, size, (enum sub_zero)0);
1411 UNLOCK(home);
1412 return data;
1413 }
1414
1415 sua = su_block_find(sub, data);
1416
1417 if (!su_alloc_check(sub, sua))
7
Taking false branch
1418 return UNLOCK(home);
1419
1420 assert(!sua->sua_home)((!sua->sua_home) ? (void) (0) : __assert_fail ("!sua->sua_home"
, "su_alloc.c", 1420, __PRETTY_FUNCTION__))
;
1421 if (sua->sua_home)
8
Taking false branch
1422 return UNLOCK(home);
1423
1424 if (!su_is_preloaded(sub, data)) {
9
Taking true branch
1425 ndata = realloc(data, size + MEMCHECK_EXTRA0);
10
Memory is released
1426 if (ndata) {
11
Assuming 'ndata' is non-null
12
Taking true branch
1427 if (sub->sub_stats) {
13
Taking true branch
1428 su_home_stats_free(sub, data, 0, sua->sua_size);
14
Use of memory after it is freed
1429 su_home_stats_alloc(sub, data, 0, size, 1);
1430 }
1431
1432#if MEMCHECK_EXTRA0
1433 memcpy((char *)ndata + size, &term, sizeof (term));
1434#else
1435 (void)term;
1436#endif
1437 memset(sua, 0, sizeof *sua);
1438 sub->sub_used--;
1439 su_block_add(sub, ndata)->sua_size = (unsigned)size;
1440 }
1441 UNLOCK(home);
1442
1443 return ndata;
1444 }
1445
1446 p = (char *)data - home->suh_blocks->sub_preload;
1447 p += sua->sua_size + MEMCHECK_EXTRA0;
1448 p = __ALIGN(p)(size_t)(((p) + ((8) - 1)) & (size_t)~((8) - 1));
1449
1450 if (p == sub->sub_prused) {
1451 size_t p2 = (char *)data - sub->sub_preload + size + MEMCHECK_EXTRA0;
1452 p2 = __ALIGN(p2)(size_t)(((p2) + ((8) - 1)) & (size_t)~((8) - 1));
1453 if (p2 <= sub->sub_prsize) {
1454 /* Extend/reduce existing preload */
1455 if (sub->sub_stats) {
1456 su_home_stats_free(sub, data, data, sua->sua_size);
1457 su_home_stats_alloc(sub, data, data, size, 0);
1458 }
1459
1460 sub->sub_prused = (unsigned)p2;
1461 sua->sua_size = (unsigned)size;
1462
1463#if MEMCHECK_EXTRA0
1464 memcpy((char *)data + size, &term, sizeof (term));
1465#endif
1466 UNLOCK(home);
1467 return data;
1468 }
1469 }
1470 else if (size < (size_t)sua->sua_size) {
1471 /* Reduce existing preload */
1472 if (sub->sub_stats) {
1473 su_home_stats_free(sub, data, data, sua->sua_size);
1474 su_home_stats_alloc(sub, data, data, size, 0);
1475 }
1476#if MEMCHECK_EXTRA0
1477 memcpy((char *)data + size, &term, sizeof (term));
1478#endif
1479 sua->sua_size = (unsigned)size;
1480 UNLOCK(home);
1481 return data;
1482 }
1483
1484 ndata = malloc(size + MEMCHECK_EXTRA0);
1485
1486 if (ndata) {
1487 if (p == sub->sub_prused) {
1488 /* Free preload */
1489 sub->sub_prused = (char *)data - home->suh_blocks->sub_preload;
1490 if (sub->sub_stats)
1491 su_home_stats_free(sub, data, data, sua->sua_size);
1492 }
1493
1494 memcpy(ndata, data,
1495 (size_t)sua->sua_size < size
1496 ? (size_t)sua->sua_size
1497 : size);
1498#if MEMCHECK_EXTRA0
1499 memcpy((char *)ndata + size, &term, sizeof (term));
1500#endif
1501
1502 if (sub->sub_stats)
1503 su_home_stats_alloc(sub, data, 0, size, 1);
1504
1505 memset(sua, 0, sizeof *sua); sub->sub_used--;
1506
1507 su_block_add(sub, ndata)->sua_size = (unsigned)size;
1508 }
1509
1510 UNLOCK(home);
1511
1512 return ndata;
1513}
1514
1515
1516/**Check if a memory block has been allocated from the @a home.
1517 *
1518 * Check if the given memory block has been allocated from the home.
1519 *
1520 * @param home pointer to memory pool object
1521 * @param memory ponter to memory block
1522 *
1523 * @retval 1 if @a memory has been allocated from @a home.
1524 * @retval 0 otherwise
1525 *
1526 * @since New in @VERSION_1_12_4.
1527 */
1528int su_in_home(su_home_t *home, void const *memory)
1529{
1530 su_alloc_t *sua;
1531 su_block_t *sub;
1532 int retval = 0;
1533
1534 if (!home || !memory)
1535 return 0;
1536
1537 sub = MEMLOCK(home);
1538
1539 if (sub) {
1540 sua = su_block_find(sub, memory);
1541
1542 retval = su_alloc_check(sub, sua);
1543
1544 UNLOCK(home);
1545 }
1546
1547 return retval;
1548}
1549
1550
1551/**Allocate and zero a memory block.
1552 *
1553 * Allocates a memory block with a given size from
1554 * given memory home @a home and zeroes the allocated block.
1555 *
1556 * @param home pointer to memory pool object
1557 * @param size size of the memory block
1558 *
1559 * @note The memory home pointer @a home may be @c NULL. In that case, the
1560 * allocated memory block is not associated with any memory home, and it
1561 * must be freed by calling su_free() or free().
1562 *
1563 * @return
1564 * The function su_zalloc() returns a pointer to the allocated memory block,
1565 * or NULL upon an error.
1566 */
1567void *su_zalloc(su_home_t *home, isize_t size)
1568{
1569 void *data;
1570
1571 assert (size >= 0)((size >= 0) ? (void) (0) : __assert_fail ("size >= 0",
"su_alloc.c", 1571, __PRETTY_FUNCTION__))
;
1572
1573 if (home) {
1574 data = sub_alloc(home, MEMLOCK(home), size, (enum sub_zero)1);
1575 UNLOCK(home);
1576 }
1577 else
1578 data = calloc(1, size);
1579
1580 return data;
1581}
1582
1583/** Allocate a structure
1584 *
1585 * Allocates a structure with a given size, zeros
1586 * it, and initializes the size field to the given size. The size field
1587 * is an int at the beginning of the structure. Note that it has type of int.
1588 *
1589 * @param home pointer to memory pool object
1590 * @param size size of the structure
1591 *
1592 * @par Example
1593 * The structure is defined and allocated as follows:
1594 * @code
1595 * struct test {
1596 * int tst_size;
1597 * char *tst_name;
1598 * void *tst_ptr[3];
1599 * };
1600 *
1601 * struct test *t;
1602 * ...
1603 * t = su_salloc(home, sizeof (*t));
1604 * assert(t && t->t_size == sizeof (*t));
1605 *
1606 * @endcode
1607 * After calling su_salloc() we get a pointer t to a struct test,
1608 * initialized to zero except the tst_size field, which is initialized to
1609 * sizeof (*t).
1610 *
1611 * @return A pointer to the allocated structure, or NULL upon an error.
1612 */
1613void *su_salloc(su_home_t *home, isize_t size)
1614{
1615 struct { int size; } *retval;
1616
1617 if (size < sizeof (*retval))
1618 size = sizeof (*retval);
1619
1620 if (size > INT_MAX2147483647)
1621 return (void)(errno(*__errno_location ()) = ENOMEM12), NULL((void*)0);
1622
1623 if (home) {
1624 retval = sub_alloc(home, MEMLOCK(home), size, (enum sub_zero)1);
1625 UNLOCK(home);
1626 }
1627 else
1628 retval = calloc(1, size);
1629
1630 if (retval)
1631 retval->size = (int)size;
1632
1633 return retval;
1634}
1635
1636/** Check if a memory home is threadsafe */
1637int su_home_is_threadsafe(su_home_t const *home)
1638{
1639 return home && home->suh_lock;
1640}
1641
1642/** Increase refcount and obtain exclusive lock on home.
1643 *
1644 * @note The #su_home_t structure must be created with su_home_new() or
1645 * su_home_clone(), or initialized with su_home_init() before using this
1646 * function.
1647 *
1648 * In order to enable actual locking, use su_home_threadsafe(), too.
1649 * Otherwise the su_home_mutex_lock() will just increase the reference
1650 * count.
1651 */
1652
1653#if (defined(HAVE_MEMLEAK_LOG) && (HAVE_MEMLEAK_LOG != 1))
1654int _su_home_mutex_lock(su_home_t *home, const char *file, unsigned int line, const char *function)
1655#else
1656int su_home_mutex_lock(su_home_t *home)
1657#endif
1658
1659{
1660 int error;
1661
1662 if (home == NULL((void*)0))
1663 return su_seterrno(EFAULT14);
1664
1665#if (defined(HAVE_MEMLEAK_LOG) && (HAVE_MEMLEAK_LOG != 1))
1666 if (home->suh_blocks == NULL((void*)0) || !_su_home_ref_by(home, file, line, function))
1667#else
1668 if (home->suh_blocks == NULL((void*)0) || !su_home_ref(home))
1669#endif
1670 return su_seterrno(EINVAL22); /* Uninitialized home */
1671
1672 if (!home->suh_lock)
1673 return 0; /* No-op */
1674
1675 error = _su_home_mutex_locker(home->suh_lock);
1676 if (error)
1677 return su_seterrno(error);
1678
1679 return 0;
1680}
1681
1682/** Release exclusive lock on home and decrease refcount (if home is threadsafe).
1683 *
1684 * @sa su_home_unlock().
1685 */
1686
1687#if (defined(HAVE_MEMLEAK_LOG) && (HAVE_MEMLEAK_LOG != 1))
1688int _su_home_mutex_unlock(su_home_t *home, const char *file, unsigned int line, const char *function)
1689#else
1690int su_home_mutex_unlock(su_home_t *home)
1691#endif
1692{
1693 if (home == NULL((void*)0))
1694 return su_seterrno(EFAULT14);
1695
1696 if (home->suh_lock) {
1697 int error = _su_home_mutex_unlocker(home->suh_lock);
1698 if (error)
1699 return su_seterrno(error);
1700 }
1701
1702 if (home->suh_blocks == NULL((void*)0))
1703 return su_seterrno(EINVAL22), -1; /* Uninitialized home */
1704
1705#if (defined(HAVE_MEMLEAK_LOG) && (HAVE_MEMLEAK_LOG != 1))
1706 _su_home_unref_by(home, file, line, function);
1707#else
1708 su_home_unref(home);
1709#endif
1710
1711 return 0;
1712}
1713
1714
1715/** Obtain exclusive lock on home without increasing refcount.
1716 *
1717 * Unless su_home_threadsafe() has been used to intialize locking on home
1718 * object the function just returns -1.
1719 *
1720 * @return 0 if successful, -1 if not threadsafe, error code otherwise.
1721 *
1722 * @sa su_home_mutex_lock(), su_home_unlock(), su_home_trylock().
1723 *
1724 * @NEW_1_12_8
1725 */
1726int su_home_lock(su_home_t *home)
1727{
1728 if (home == NULL((void*)0))
1729 return EFAULT14;
1730
1731 if (home->suh_lock == NULL((void*)0))
1732 return -1; /* No-op */
1733
1734 return _su_home_mutex_locker(home->suh_lock);
1735}
1736
1737
1738/** Try to obtain exclusive lock on home without increasing refcount.
1739 *
1740 * @return 0 if successful, -1 if not threadsafe,
1741 * EBUSY if already locked, error code otherwise.
1742 *
1743 * @sa su_home_lock(), su_home_unlock().
1744 *
1745 * @NEW_1_12_8
1746 */
1747int su_home_trylock(su_home_t *home)
1748{
1749 if (home == NULL((void*)0))
1750 return EFAULT14;
1751
1752 if (home->suh_lock == NULL((void*)0))
1753 return -1; /* No-op */
1754
1755 return _su_home_mutex_trylocker(home->suh_lock);
1756}
1757
1758
1759/** Release exclusive lock on home.
1760 *
1761 * Release lock without decreasing refcount.
1762 *
1763 * @return 0 if successful, -1 if not threadsafe, error code otherwise.
1764 *
1765 * @sa su_home_lock(), su_home_trylock(), su_home_mutex_unlock().
1766 *
1767 * @NEW_1_12_8
1768 */
1769int su_home_unlock(su_home_t *home)
1770{
1771 if (home == NULL((void*)0))
1772 return EFAULT14;
1773
1774 if (home->suh_lock == NULL((void*)0))
1775 return -1; /* No-op */
1776
1777 return _su_home_mutex_unlocker(home->suh_lock);
1778}
1779
1780
1781/** Initialize statistics structure */
1782void su_home_init_stats(su_home_t *home)
1783{
1784 su_block_t *sub;
1785 size_t size;
1786
1787 if (home == NULL((void*)0))
1788 return;
1789
1790 sub = home->suh_blocks;
1791
1792 if (!sub)
1793 sub = home->suh_blocks = su_hash_alloc(SUB_N);
1794 if (!sub)
1795 return;
1796
1797 if (!sub->sub_stats) {
1798 size = sizeof (*sub->sub_stats);
1799 sub->sub_stats = malloc(size);
1800 if (!sub->sub_stats)
1801 return;
1802 }
1803 else
1804 size = sub->sub_stats->hs_size;
1805
1806 memset(sub->sub_stats, 0, size);
1807 sub->sub_stats->hs_size = (int)size;
1808 sub->sub_stats->hs_blocksize = sub->sub_n;
1809}
1810
1811/** Retrieve statistics from memory home.
1812 */
1813void su_home_get_stats(su_home_t *home, int include_clones,
1814 su_home_stat_t *hs,
1815 isize_t size)
1816{
1817 su_block_t *sub;
1818
1819 if (hs == NULL((void*)0) || size < (sizeof hs->hs_size))
1820 return;
1821
1822 memset((void *)hs, 0, size);
1823
1824 sub = MEMLOCK(home);
1825
1826 if (sub && sub->sub_stats) {
1827 int sub_size = sub->sub_stats->hs_size;
1828 if (sub_size > (int)size)
1829 sub_size = (int)size;
1830 sub->sub_stats->hs_preload.hsp_size = sub->sub_prsize;
1831 sub->sub_stats->hs_preload.hsp_used = sub->sub_prused;
1832 memcpy(hs, sub->sub_stats, sub_size);
1833 hs->hs_size = sub_size;
1834 }
1835
1836 UNLOCK(home);
1837}
1838
1839static
1840void su_home_stats_alloc(su_block_t *sub, void *p, void *preload,
1841 size_t size, int zero)
1842{
1843 su_home_stat_t *hs = sub->sub_stats;
1844
1845 size_t rsize = __ALIGN(size)(size_t)(((size) + ((8) - 1)) & (size_t)~((8) - 1));
1846
1847 hs->hs_rehash += (sub->sub_n != hs->hs_blocksize);
1848 hs->hs_blocksize = sub->sub_n;
1849
1850 hs->hs_clones += zero > 1;
1851
1852 if (preload) {
1853 hs->hs_allocs.hsa_preload++;
1854 return;
1855 }
1856
1857 hs->hs_allocs.hsa_number++;
1858 hs->hs_allocs.hsa_bytes += size;
1859 hs->hs_allocs.hsa_rbytes += rsize;
1860 if (hs->hs_allocs.hsa_rbytes > hs->hs_allocs.hsa_maxrbytes)
1861 hs->hs_allocs.hsa_maxrbytes = hs->hs_allocs.hsa_rbytes;
1862
1863 hs->hs_blocks.hsb_number++;
1864 hs->hs_blocks.hsb_bytes += size;
1865 hs->hs_blocks.hsb_rbytes += rsize;
1866}
1867
1868static
1869void su_home_stats_free(su_block_t *sub, void *p, void *preload,
1870 unsigned size)
1871{
1872 su_home_stat_t *hs = sub->sub_stats;
1873
1874 size_t rsize = __ALIGN(size)(size_t)(((size) + ((8) - 1)) & (size_t)~((8) - 1));
1875
1876 if (preload) {
1877 hs->hs_frees.hsf_preload++;
1878 return;
1879 }
1880
1881 hs->hs_frees.hsf_number++;
1882 hs->hs_frees.hsf_bytes += size;
1883 hs->hs_frees.hsf_rbytes += rsize;
1884
1885 hs->hs_blocks.hsb_number--;
1886 hs->hs_blocks.hsb_bytes -= size;
1887 hs->hs_blocks.hsb_rbytes -= rsize;
1888}
1889
1890void su_home_stat_add(su_home_stat_t total[1], su_home_stat_t const hs[1])
1891{
1892 total->hs_clones += hs->hs_clones;
1893 total->hs_rehash += hs->hs_rehash;
1894
1895 if (total->hs_blocksize < hs->hs_blocksize)
1896 total->hs_blocksize = hs->hs_blocksize;
1897
1898 total->hs_allocs.hsa_number += hs->hs_allocs.hsa_number;
1899 total->hs_allocs.hsa_bytes += hs->hs_allocs.hsa_bytes;
1900 total->hs_allocs.hsa_rbytes += hs->hs_allocs.hsa_rbytes;
1901 total->hs_allocs.hsa_maxrbytes += hs->hs_allocs.hsa_maxrbytes;
1902
1903 total->hs_frees.hsf_number += hs->hs_frees.hsf_number;
1904 total->hs_frees.hsf_bytes += hs->hs_frees.hsf_bytes;
1905 total->hs_frees.hsf_rbytes += hs->hs_frees.hsf_rbytes;
1906
1907 total->hs_blocks.hsb_number += hs->hs_blocks.hsb_number;
1908 total->hs_blocks.hsb_bytes += hs->hs_blocks.hsb_bytes;
1909 total->hs_blocks.hsb_rbytes += hs->hs_blocks.hsb_rbytes;
1910}