Bug Summary

File:libs/apr/memory/unix/apr_pools.c
Location:line 823, column 21
Description:Access to field 'allocator' results in a dereference of a null pointer (loaded from variable 'parent')

Annotated Source Code

1/* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "apr.h"
18#include "apr_private.h"
19
20#include "apr_atomic.h"
21#include "apr_portable.h" /* for get_os_proc */
22#include "apr_strings.h"
23#include "apr_general.h"
24#include "apr_pools.h"
25#include "apr_allocator.h"
26#include "apr_lib.h"
27#include "apr_thread_mutex.h"
28#include "apr_hash.h"
29#include "apr_time.h"
30#define APR_WANT_MEMFUNC
31#include "apr_want.h"
32#include "apr_env.h"
33
34#if APR_HAVE_STDLIB_H1
35#include <stdlib.h> /* for malloc, free and abort */
36#endif
37
38#if APR_HAVE_UNISTD_H1
39#include <unistd.h> /* for getpid */
40#endif
41
42
43/*
44 * Magic numbers
45 */
46
47#define MIN_ALLOC8192 8192
48#define MAX_INDEX20 20
49
50#define BOUNDARY_INDEX12 12
51#define BOUNDARY_SIZE(1 << 12) (1 << BOUNDARY_INDEX12)
52
53/*
54 * Timing constants for killing subprocesses
55 * There is a total 3-second delay between sending a SIGINT
56 * and sending of the final SIGKILL.
57 * TIMEOUT_INTERVAL should be set to TIMEOUT_USECS / 64
58 * for the exponetial timeout alogrithm.
59 */
60#define TIMEOUT_USECS3000000 3000000
61#define TIMEOUT_INTERVAL46875 46875
62
63/*
64 * Allocator
65 */
66
67struct apr_allocator_t {
68 apr_uint32_t max_index;
69 apr_uint32_t max_free_index;
70 apr_uint32_t current_free_index;
71#if APR_HAS_THREADS1
72 apr_thread_mutex_t *mutex;
73#endif /* APR_HAS_THREADS */
74 apr_pool_t *owner;
75 apr_memnode_t *free[MAX_INDEX20];
76};
77
78#define SIZEOF_ALLOCATOR_T(((sizeof(apr_allocator_t)) + ((8) - 1)) & ~((8) - 1)) APR_ALIGN_DEFAULT(sizeof(apr_allocator_t))(((sizeof(apr_allocator_t)) + ((8) - 1)) & ~((8) - 1))
79
80
81/*
82 * Allocator
83 */
84
85APR_DECLARE(apr_status_t)apr_status_t apr_allocator_create(apr_allocator_t **allocator)
86{
87 apr_allocator_t *new_allocator;
88
89 *allocator = NULL((void*)0);
90
91 if ((new_allocator = malloc(SIZEOF_ALLOCATOR_T(((sizeof(apr_allocator_t)) + ((8) - 1)) & ~((8) - 1)))) == NULL((void*)0))
92 return APR_ENOMEM12;
93
94 memset(new_allocator, 0, SIZEOF_ALLOCATOR_T(((sizeof(apr_allocator_t)) + ((8) - 1)) & ~((8) - 1)));
95 new_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED0;
96
97 *allocator = new_allocator;
98
99 return APR_SUCCESS0;
100}
101
102APR_DECLARE(void)void apr_allocator_destroy(apr_allocator_t *allocator)
103{
104 apr_uint32_t index;
105 apr_memnode_t *node, **ref;
106
107 for (index = 0; index < MAX_INDEX20; index++) {
108 ref = &allocator->free[index];
109 while ((node = *ref) != NULL((void*)0)) {
110 *ref = node->next;
111 free(node);
112 }
113 }
114
115 free(allocator);
116}
117
118#if APR_HAS_THREADS1
119APR_DECLARE(void)void apr_allocator_mutex_set(apr_allocator_t *allocator,
120 apr_thread_mutex_t *mutex)
121{
122 allocator->mutex = mutex;
123}
124
125APR_DECLARE(apr_thread_mutex_t *)apr_thread_mutex_t * apr_allocator_mutex_get(
126 apr_allocator_t *allocator)
127{
128 return allocator->mutex;
129}
130#endif /* APR_HAS_THREADS */
131
132APR_DECLARE(void)void apr_allocator_owner_set(apr_allocator_t *allocator,
133 apr_pool_t *pool)
134{
135 allocator->owner = pool;
136}
137
138APR_DECLARE(apr_pool_t *)apr_pool_t * apr_allocator_owner_get(apr_allocator_t *allocator)
139{
140 return allocator->owner;
141}
142
143APR_DECLARE(void)void apr_allocator_max_free_set(apr_allocator_t *allocator,
144 apr_size_t in_size)
145{
146 apr_uint32_t max_free_index;
147 apr_uint32_t size = (APR_UINT32_TRUNC_CASTapr_uint32_t)in_size;
148
149#if APR_HAS_THREADS1
150 apr_thread_mutex_t *mutex;
151
152 mutex = apr_allocator_mutex_get(allocator);
153 if (mutex != NULL((void*)0))
154 apr_thread_mutex_lock(mutex);
155#endif /* APR_HAS_THREADS */
156
157 max_free_index = APR_ALIGN(size, BOUNDARY_SIZE)(((size) + (((1 << 12)) - 1)) & ~(((1 << 12))
- 1))
>> BOUNDARY_INDEX12;
158 allocator->current_free_index += max_free_index;
159 allocator->current_free_index -= allocator->max_free_index;
160 allocator->max_free_index = max_free_index;
161 if (allocator->current_free_index > max_free_index)
162 allocator->current_free_index = max_free_index;
163
164#if APR_HAS_THREADS1
165 if (mutex != NULL((void*)0))
166 apr_thread_mutex_unlock(mutex);
167#endif
168}
169
170static APR_INLINE__inline__
171apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t size)
172{
173 apr_memnode_t *node, **ref;
174 apr_uint32_t max_index;
175 apr_size_t i, index;
176
177 /* Round up the block size to the next boundary, but always
178 * allocate at least a certain size (MIN_ALLOC).
179 */
180 size = APR_ALIGN(size + APR_MEMNODE_T_SIZE, BOUNDARY_SIZE)(((size + (((sizeof(apr_memnode_t)) + ((8) - 1)) & ~((8) -
1))) + (((1 << 12)) - 1)) & ~(((1 << 12)) - 1
))
;
181 if (size < MIN_ALLOC8192)
182 size = MIN_ALLOC8192;
183
184 /* Find the index for this node size by
185 * dividing its size by the boundary size
186 */
187 index = (size >> BOUNDARY_INDEX12) - 1;
188
189 if (index > APR_UINT32_MAX0xFFFFFFFFUL) {
190 return NULL((void*)0);
191 }
192
193 /* First see if there are any nodes in the area we know
194 * our node will fit into.
195 */
196 if (index <= allocator->max_index) {
197#if APR_HAS_THREADS1
198 if (allocator->mutex)
199 apr_thread_mutex_lock(allocator->mutex);
200#endif /* APR_HAS_THREADS */
201
202 /* Walk the free list to see if there are
203 * any nodes on it of the requested size
204 *
205 * NOTE: an optimization would be to check
206 * allocator->free[index] first and if no
207 * node is present, directly use
208 * allocator->free[max_index]. This seems
209 * like overkill though and could cause
210 * memory waste.
211 */
212 max_index = allocator->max_index;
213 ref = &allocator->free[index];
214 i = index;
215 while (*ref == NULL((void*)0) && i < max_index) {
216 ref++;
217 i++;
218 }
219
220 if ((node = *ref) != NULL((void*)0)) {
221 /* If we have found a node and it doesn't have any
222 * nodes waiting in line behind it _and_ we are on
223 * the highest available index, find the new highest
224 * available index
225 */
226 if ((*ref = node->next) == NULL((void*)0) && i >= max_index) {
227 do {
228 ref--;
229 max_index--;
230 }
231 while (*ref == NULL((void*)0) && max_index > 0);
232
233 allocator->max_index = max_index;
234 }
235
236 allocator->current_free_index += node->index;
237 if (allocator->current_free_index > allocator->max_free_index)
238 allocator->current_free_index = allocator->max_free_index;
239
240#if APR_HAS_THREADS1
241 if (allocator->mutex)
242 apr_thread_mutex_unlock(allocator->mutex);
243#endif /* APR_HAS_THREADS */
244
245 node->next = NULL((void*)0);
246 node->first_avail = (char *)node + APR_MEMNODE_T_SIZE(((sizeof(apr_memnode_t)) + ((8) - 1)) & ~((8) - 1));
247
248 return node;
249 }
250
251#if APR_HAS_THREADS1
252 if (allocator->mutex)
253 apr_thread_mutex_unlock(allocator->mutex);
254#endif /* APR_HAS_THREADS */
255 }
256
257 /* If we found nothing, seek the sink (at index 0), if
258 * it is not empty.
259 */
260 else if (allocator->free[0]) {
261#if APR_HAS_THREADS1
262 if (allocator->mutex)
263 apr_thread_mutex_lock(allocator->mutex);
264#endif /* APR_HAS_THREADS */
265
266 /* Walk the free list to see if there are
267 * any nodes on it of the requested size
268 */
269 ref = &allocator->free[0];
270 while ((node = *ref) != NULL((void*)0) && index > node->index)
271 ref = &node->next;
272
273 if (node) {
274 *ref = node->next;
275
276 allocator->current_free_index += node->index;
277 if (allocator->current_free_index > allocator->max_free_index)
278 allocator->current_free_index = allocator->max_free_index;
279
280#if APR_HAS_THREADS1
281 if (allocator->mutex)
282 apr_thread_mutex_unlock(allocator->mutex);
283#endif /* APR_HAS_THREADS */
284
285 node->next = NULL((void*)0);
286 node->first_avail = (char *)node + APR_MEMNODE_T_SIZE(((sizeof(apr_memnode_t)) + ((8) - 1)) & ~((8) - 1));
287
288 return node;
289 }
290
291#if APR_HAS_THREADS1
292 if (allocator->mutex)
293 apr_thread_mutex_unlock(allocator->mutex);
294#endif /* APR_HAS_THREADS */
295 }
296
297 /* If we haven't got a suitable node, malloc a new one
298 * and initialize it.
299 */
300 if ((node = malloc(size)) == NULL((void*)0))
301 return NULL((void*)0);
302
303 node->next = NULL((void*)0);
304 node->index = (APR_UINT32_TRUNC_CASTapr_uint32_t)index;
305 node->first_avail = (char *)node + APR_MEMNODE_T_SIZE(((sizeof(apr_memnode_t)) + ((8) - 1)) & ~((8) - 1));
306 node->endp = (char *)node + size;
307
308 return node;
309}
310
311static APR_INLINE__inline__
312void allocator_free(apr_allocator_t *allocator, apr_memnode_t *node)
313{
314 apr_memnode_t *next, *freelist = NULL((void*)0);
315 apr_uint32_t index, max_index;
316 apr_uint32_t max_free_index, current_free_index;
317
318#if APR_HAS_THREADS1
319 if (allocator->mutex)
320 apr_thread_mutex_lock(allocator->mutex);
321#endif /* APR_HAS_THREADS */
322
323 max_index = allocator->max_index;
324 max_free_index = allocator->max_free_index;
325 current_free_index = allocator->current_free_index;
326
327 /* Walk the list of submitted nodes and free them one by one,
328 * shoving them in the right 'size' buckets as we go.
329 */
330 do {
331 next = node->next;
332 index = node->index;
333
334 if (max_free_index != APR_ALLOCATOR_MAX_FREE_UNLIMITED0
335 && index > current_free_index) {
336 node->next = freelist;
337 freelist = node;
338 }
339 else if (index < MAX_INDEX20) {
340 /* Add the node to the appropiate 'size' bucket. Adjust
341 * the max_index when appropiate.
342 */
343 if ((node->next = allocator->free[index]) == NULL((void*)0)
344 && index > max_index) {
345 max_index = index;
346 }
347 allocator->free[index] = node;
348 current_free_index -= index;
349 }
350 else {
351 /* This node is too large to keep in a specific size bucket,
352 * just add it to the sink (at index 0).
353 */
354 node->next = allocator->free[0];
355 allocator->free[0] = node;
356 current_free_index -= index;
357 }
358 } while ((node = next) != NULL((void*)0));
359
360 allocator->max_index = max_index;
361 allocator->current_free_index = current_free_index;
362
363#if APR_HAS_THREADS1
364 if (allocator->mutex)
365 apr_thread_mutex_unlock(allocator->mutex);
366#endif /* APR_HAS_THREADS */
367
368 while (freelist != NULL((void*)0)) {
369 node = freelist;
370 freelist = node->next;
371 free(node);
372 }
373}
374
375APR_DECLARE(apr_memnode_t *)apr_memnode_t * apr_allocator_alloc(apr_allocator_t *allocator,
376 apr_size_t size)
377{
378 return allocator_alloc(allocator, size);
379}
380
381APR_DECLARE(void)void apr_allocator_free(apr_allocator_t *allocator,
382 apr_memnode_t *node)
383{
384 allocator_free(allocator, node);
385}
386
387
388
389/*
390 * Debug level
391 */
392
393#define APR_POOL_DEBUG_GENERAL0x01 0x01
394#define APR_POOL_DEBUG_VERBOSE0x02 0x02
395#define APR_POOL_DEBUG_LIFETIME0x04 0x04
396#define APR_POOL_DEBUG_OWNER0x08 0x08
397#define APR_POOL_DEBUG_VERBOSE_ALLOC0x10 0x10
398
399#define APR_POOL_DEBUG_VERBOSE_ALL(0x02 | 0x10) (APR_POOL_DEBUG_VERBOSE0x02 \
400 | APR_POOL_DEBUG_VERBOSE_ALLOC0x10)
401
402
403/*
404 * Structures
405 */
406
407typedef struct cleanup_t cleanup_t;
408
409/** A list of processes */
410struct process_chain {
411 /** The process ID */
412 apr_proc_t *proc;
413 apr_kill_conditions_e kill_how;
414 /** The next process in the list */
415 struct process_chain *next;
416};
417
418
419#if APR_POOL_DEBUG0
420
421typedef struct debug_node_t debug_node_t;
422
423struct debug_node_t {
424 debug_node_t *next;
425 apr_uint32_t index;
426 void *beginp[64];
427 void *endp[64];
428};
429
430#define SIZEOF_DEBUG_NODE_T APR_ALIGN_DEFAULT(sizeof(debug_node_t))(((sizeof(debug_node_t)) + ((8) - 1)) & ~((8) - 1))
431
432#endif /* APR_POOL_DEBUG */
433
434/* The ref field in the apr_pool_t struct holds a
435 * pointer to the pointer referencing this pool.
436 * It is used for parent, child, sibling management.
437 * Look at apr_pool_create_ex() and apr_pool_destroy()
438 * to see how it is used.
439 */
440struct apr_pool_t {
441 apr_pool_t *parent;
442 apr_pool_t *child;
443 apr_pool_t *sibling;
444 apr_pool_t **ref;
445 cleanup_t *cleanups;
446 cleanup_t *free_cleanups;
447 apr_allocator_t *allocator;
448 struct process_chain *subprocesses;
449 apr_abortfunc_t abort_fn;
450 apr_hash_t *user_data;
451 const char *tag;
452#if APR_HAS_THREADS1
453 apr_thread_mutex_t *user_mutex;
454#endif
455#if !APR_POOL_DEBUG0
456 apr_memnode_t *active;
457 apr_memnode_t *self; /* The node containing the pool itself */
458 char *self_first_avail;
459
460#else /* APR_POOL_DEBUG */
461 apr_pool_t *joined; /* the caller has guaranteed that this pool
462 * will survive as long as ->joined */
463 debug_node_t *nodes;
464 const char *file_line;
465 apr_uint32_t creation_flags;
466 unsigned int stat_alloc;
467 unsigned int stat_total_alloc;
468 unsigned int stat_clear;
469#if APR_HAS_THREADS1
470 apr_os_thread_t owner;
471 apr_thread_mutex_t *mutex;
472#endif /* APR_HAS_THREADS */
473#endif /* APR_POOL_DEBUG */
474#ifdef NETWARE
475 apr_os_proc_t owner_proc;
476
477#endif /* defined(NETWARE) */
478};
479
480#define SIZEOF_POOL_T(((sizeof(apr_pool_t)) + ((8) - 1)) & ~((8) - 1)) APR_ALIGN_DEFAULT(sizeof(apr_pool_t))(((sizeof(apr_pool_t)) + ((8) - 1)) & ~((8) - 1))
481
482
483/*
484 * Variables
485 */
486
487static apr_byte_t apr_pools_initialized = 0;
488static apr_pool_t *global_pool = NULL((void*)0);
489
490#if !APR_POOL_DEBUG0
491static apr_allocator_t *global_allocator = NULL((void*)0);
492#endif /* !APR_POOL_DEBUG */
493
494#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALL(0x02 | 0x10))
495static apr_file_t *file_stderr = NULL((void*)0);
496#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
497
498/*
499 * Local functions
500 */
501
502static void run_cleanups(cleanup_t **c);
503static void run_child_cleanups(cleanup_t **c);
504static void free_proc_chain(struct process_chain *procs);
505
506#if APR_POOL_DEBUG0
507static void pool_destroy_debug(apr_pool_t *pool, const char *file_line);
508#endif
509
510#if !APR_POOL_DEBUG0
511/*
512 * Initialization
513 */
514
515APR_DECLARE(apr_status_t)apr_status_t apr_pool_initialize(void)
516{
517 apr_status_t rv;
518
519 if (apr_pools_initialized++)
520 return APR_SUCCESS0;
521
522 if ((rv = apr_allocator_create(&global_allocator)) != APR_SUCCESS0) {
523 apr_pools_initialized = 0;
524 return rv;
525 }
526
527 if ((rv = apr_pool_create_ex(&global_pool, NULL((void*)0), NULL((void*)0),
528 global_allocator)) != APR_SUCCESS0) {
529 apr_allocator_destroy(global_allocator);
530 global_allocator = NULL((void*)0);
531 apr_pools_initialized = 0;
532 return rv;
533 }
534
535 apr_pool_tag(global_pool, "apr_global_pool");
536
537 /* This has to happen here because mutexes might be backed by
538 * atomics. It used to be snug and safe in apr_initialize().
539 */
540 if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS0) {
541 return rv;
542 }
543
544#if APR_HAS_THREADS1
545 {
546 apr_thread_mutex_t *mutex;
547
548 if ((rv = apr_thread_mutex_create(&mutex,
549 APR_THREAD_MUTEX_DEFAULT0x0,
550 global_pool)) != APR_SUCCESS0) {
551 return rv;
552 }
553
554 apr_allocator_mutex_set(global_allocator, mutex);
555 }
556#endif /* APR_HAS_THREADS */
557
558 apr_allocator_owner_set(global_allocator, global_pool);
559
560 return APR_SUCCESS0;
561}
562
563APR_DECLARE(void)void apr_pool_terminate(void)
564{
565 if (!apr_pools_initialized)
566 return;
567
568 if (--apr_pools_initialized)
569 return;
570
571 apr_pool_destroy(global_pool); /* This will also destroy the mutex */
572 global_pool = NULL((void*)0);
573
574 global_allocator = NULL((void*)0);
575}
576
577
578/* Node list management helper macros; list_insert() inserts 'node'
579 * before 'point'. */
580#define list_insert(node, point)do { node->ref = point->ref; *node->ref = node; node
->next = point; point->ref = &node->next; } while
(0)
do { \
581 node->ref = point->ref; \
582 *node->ref = node; \
583 node->next = point; \
584 point->ref = &node->next; \
585} while (0)
586
587/* list_remove() removes 'node' from its list. */
588#define list_remove(node)do { *node->ref = node->next; node->next->ref = node
->ref; } while (0)
do { \
589 *node->ref = node->next; \
590 node->next->ref = node->ref; \
591} while (0)
592
593/*
594 * Memory allocation
595 */
596
597APR_DECLARE(void *)void * apr_palloc(apr_pool_t *pool, apr_size_t size)
598{
599 apr_memnode_t *active, *node;
600 void *mem = NULL((void*)0);
601 apr_size_t free_index;
602#if APR_HAS_THREADS1
603 if (pool->user_mutex) apr_thread_mutex_lock(pool->user_mutex);
604#endif
605 size = APR_ALIGN_DEFAULT(size)(((size) + ((8) - 1)) & ~((8) - 1));
606 active = pool->active;
607
608 /* If the active node has enough bytes left, use it. */
609 if (size < (apr_size_t)(active->endp - active->first_avail)) {
610 mem = active->first_avail;
611 active->first_avail += size;
612
613 goto end;
614 }
615
616 node = active->next;
617 if (size < (apr_size_t)(node->endp - node->first_avail)) {
618 list_remove(node)do { *node->ref = node->next; node->next->ref = node
->ref; } while (0)
;
619 }
620 else {
621 if ((node = allocator_alloc(pool->allocator, size)) == NULL((void*)0)) {
622 if (pool->abort_fn)
623 pool->abort_fn(APR_ENOMEM12);
624
625 mem = NULL((void*)0);
626 goto end;
627 }
628 }
629
630 node->free_index = 0;
631
632 mem = node->first_avail;
633 node->first_avail += size;
634
635 list_insert(node, active)do { node->ref = active->ref; *node->ref = node; node
->next = active; active->ref = &node->next; } while
(0)
;
636
637 pool->active = node;
638
639 free_index = (APR_ALIGN(active->endp - active->first_avail + 1,(((active->endp - active->first_avail + 1) + (((1 <<
12)) - 1)) & ~(((1 << 12)) - 1))
640 BOUNDARY_SIZE)(((active->endp - active->first_avail + 1) + (((1 <<
12)) - 1)) & ~(((1 << 12)) - 1))
- BOUNDARY_SIZE(1 << 12)) >> BOUNDARY_INDEX12;
641
642 active->free_index = (APR_UINT32_TRUNC_CASTapr_uint32_t)free_index;
643 node = active->next;
644 if (free_index >= node->free_index)
645 goto end;
646
647 do {
648 node = node->next;
649 }
650 while (free_index < node->free_index);
651
652 list_remove(active)do { *active->ref = active->next; active->next->ref
= active->ref; } while (0)
;
653 list_insert(active, node)do { active->ref = node->ref; *active->ref = active;
active->next = node; node->ref = &active->next;
} while (0)
;
654
655 end:
656#if APR_HAS_THREADS1
657 if (pool->user_mutex) apr_thread_mutex_unlock(pool->user_mutex);
658#endif
659 return mem;
660}
661
662/* Provide an implementation of apr_pcalloc for backward compatibility
663 * with code built before apr_pcalloc was a macro
664 */
665
666#ifdef apr_pcalloc
667#undef apr_pcalloc
668#endif
669
670APR_DECLARE(void *)void * apr_pcalloc(apr_pool_t *pool, apr_size_t size);
671APR_DECLARE(void *)void * apr_pcalloc(apr_pool_t *pool, apr_size_t size)
672{
673 void *mem;
674
675 size = APR_ALIGN_DEFAULT(size)(((size) + ((8) - 1)) & ~((8) - 1));
676 if ((mem = apr_palloc(pool, size)) != NULL((void*)0)) {
677 memset(mem, 0, size);
678 }
679
680 return mem;
681}
682
683
684/*
685 * Pool creation/destruction
686 */
687
688APR_DECLARE(void)void apr_pool_clear(apr_pool_t *pool)
689{
690 apr_memnode_t *active;
691#if APR_HAS_THREADS1
692 if (pool->user_mutex) apr_thread_mutex_lock(pool->user_mutex);
693#endif
694 /* Destroy the subpools. The subpools will detach themselves from
695 * this pool thus this loop is safe and easy.
696 */
697 while (pool->child)
698 apr_pool_destroy(pool->child);
699
700 /* Run cleanups */
701 run_cleanups(&pool->cleanups);
702 pool->cleanups = NULL((void*)0);
703 pool->free_cleanups = NULL((void*)0);
704
705 /* Free subprocesses */
706 free_proc_chain(pool->subprocesses);
707 pool->subprocesses = NULL((void*)0);
708
709 /* Clear the user data. */
710 pool->user_data = NULL((void*)0);
711
712 /* Find the node attached to the pool structure, reset it, make
713 * it the active node and free the rest of the nodes.
714 */
715 active = pool->active = pool->self;
716 active->first_avail = pool->self_first_avail;
717
718 if (active->next == active)
719 goto end;
720
721 *active->ref = NULL((void*)0);
722 allocator_free(pool->allocator, active->next);
723 active->next = active;
724 active->ref = &active->next;
725
726 end:
727#if APR_HAS_THREADS1
728 if (pool->user_mutex) apr_thread_mutex_unlock(pool->user_mutex);
729#endif
730}
731
732#if APR_HAS_THREADS1
733APR_DECLARE(void)void apr_pool_mutex_set(apr_pool_t *pool,
734 apr_thread_mutex_t *mutex)
735{
736 pool->user_mutex = mutex;
737}
738#endif
739
740APR_DECLARE(void)void apr_pool_destroy(apr_pool_t *pool)
741{
742 apr_memnode_t *active;
743 apr_allocator_t *allocator;
744
745 /* Destroy the subpools. The subpools will detach themselve from
746 * this pool thus this loop is safe and easy.
747 */
748 while (pool->child)
749 apr_pool_destroy(pool->child);
750
751 /* Run cleanups */
752 run_cleanups(&pool->cleanups);
753
754 /* Free subprocesses */
755 free_proc_chain(pool->subprocesses);
756
757 /* Remove the pool from the parents child list */
758 if (pool->parent) {
759#if APR_HAS_THREADS1
760 apr_thread_mutex_t *mutex;
761
762 if ((mutex = apr_allocator_mutex_get(pool->parent->allocator)) != NULL((void*)0))
763 apr_thread_mutex_lock(mutex);
764#endif /* APR_HAS_THREADS */
765
766 if ((*pool->ref = pool->sibling) != NULL((void*)0))
767 pool->sibling->ref = pool->ref;
768
769#if APR_HAS_THREADS1
770 if (mutex)
771 apr_thread_mutex_unlock(mutex);
772#endif /* APR_HAS_THREADS */
773 }
774
775 /* Find the block attached to the pool structure. Save a copy of the
776 * allocator pointer, because the pool struct soon will be no more.
777 */
778 allocator = pool->allocator;
779 active = pool->self;
780 *active->ref = NULL((void*)0);
781
782#if APR_HAS_THREADS1
783 if (apr_allocator_owner_get(allocator) == pool) {
784 /* Make sure to remove the lock, since it is highly likely to
785 * be invalid now.
786 */
787 apr_allocator_mutex_set(allocator, NULL((void*)0));
788 }
789#endif /* APR_HAS_THREADS */
790
791 /* Free all the nodes in the pool (including the node holding the
792 * pool struct), by giving them back to the allocator.
793 */
794 allocator_free(allocator, active);
795
796 /* If this pool happens to be the owner of the allocator, free
797 * everything in the allocator (that includes the pool struct
798 * and the allocator). Don't worry about destroying the optional mutex
799 * in the allocator, it will have been destroyed by the cleanup function.
800 */
801 if (apr_allocator_owner_get(allocator) == pool) {
802 apr_allocator_destroy(allocator);
803 }
804}
805
806APR_DECLARE(apr_status_t)apr_status_t apr_pool_create_ex(apr_pool_t **newpool,
807 apr_pool_t *parent,
808 apr_abortfunc_t abort_fn,
809 apr_allocator_t *allocator)
810{
811 apr_pool_t *pool;
812 apr_memnode_t *node;
813
814 *newpool = NULL((void*)0);
815
816 if (!parent)
2
Assuming 'parent' is null
3
Taking true branch
817 parent = global_pool;
4
Value assigned to 'parent'
818
819 if (!abort_fn && parent)
5
Assuming 'abort_fn' is null
6
Assuming pointer value is null
7
Taking false branch
820 abort_fn = parent->abort_fn;
821
822 if (allocator == NULL((void*)0))
8
Assuming 'allocator' is equal to null
9
Taking true branch
823 allocator = parent->allocator;
10
Access to field 'allocator' results in a dereference of a null pointer (loaded from variable 'parent')
824
825 if ((node = allocator_alloc(allocator,
826 MIN_ALLOC8192 - APR_MEMNODE_T_SIZE(((sizeof(apr_memnode_t)) + ((8) - 1)) & ~((8) - 1)))) == NULL((void*)0)) {
827 if (abort_fn)
828 abort_fn(APR_ENOMEM12);
829
830 return APR_ENOMEM12;
831 }
832
833 node->next = node;
834 node->ref = &node->next;
835
836 pool = (apr_pool_t *)node->first_avail;
837 node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T(((sizeof(apr_pool_t)) + ((8) - 1)) & ~((8) - 1));
838
839 pool->allocator = allocator;
840 pool->active = pool->self = node;
841 pool->abort_fn = abort_fn;
842 pool->child = NULL((void*)0);
843 pool->cleanups = NULL((void*)0);
844 pool->free_cleanups = NULL((void*)0);
845 pool->subprocesses = NULL((void*)0);
846 pool->user_data = NULL((void*)0);
847 pool->tag = NULL((void*)0);
848#if APR_HAS_THREADS1
849 pool->user_mutex = NULL((void*)0);
850#endif
851#ifdef NETWARE
852 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
853#endif /* defined(NETWARE) */
854
855 if ((pool->parent = parent) != NULL((void*)0)) {
856#if APR_HAS_THREADS1
857 apr_thread_mutex_t *mutex;
858
859 if ((mutex = apr_allocator_mutex_get(parent->allocator)) != NULL((void*)0))
860 apr_thread_mutex_lock(mutex);
861#endif /* APR_HAS_THREADS */
862
863 if ((pool->sibling = parent->child) != NULL((void*)0))
864 pool->sibling->ref = &pool->sibling;
865
866 parent->child = pool;
867 pool->ref = &parent->child;
868
869#if APR_HAS_THREADS1
870 if (mutex)
871 apr_thread_mutex_unlock(mutex);
872#endif /* APR_HAS_THREADS */
873 }
874 else {
875 pool->sibling = NULL((void*)0);
876 pool->ref = NULL((void*)0);
877 }
878
879 *newpool = pool;
880
881 return APR_SUCCESS0;
882}
883
884
885/*
886 * "Print" functions
887 */
888
889/*
890 * apr_psprintf is implemented by writing directly into the current
891 * block of the pool, starting right at first_avail. If there's
892 * insufficient room, then a new block is allocated and the earlier
893 * output is copied over. The new block isn't linked into the pool
894 * until all the output is done.
895 *
896 * Note that this is completely safe because nothing else can
897 * allocate in this apr_pool_t while apr_psprintf is running. alarms are
898 * blocked, and the only thing outside of apr_pools.c that's invoked
899 * is apr_vformatter -- which was purposefully written to be
900 * self-contained with no callouts.
901 */
902
903struct psprintf_data {
904 apr_vformatter_buff_t vbuff;
905 apr_memnode_t *node;
906 apr_pool_t *pool;
907 apr_byte_t got_a_new_node;
908 apr_memnode_t *free;
909};
910
911#define APR_PSPRINTF_MIN_STRINGSIZE32 32
912
913static int psprintf_flush(apr_vformatter_buff_t *vbuff)
914{
915 struct psprintf_data *ps = (struct psprintf_data *)vbuff;
916 apr_memnode_t *node, *active;
917 apr_size_t cur_len, size;
918 char *strp;
919 apr_pool_t *pool;
920 apr_size_t free_index;
921
922 pool = ps->pool;
923 active = ps->node;
924 strp = ps->vbuff.curpos;
925 cur_len = strp - active->first_avail;
926 size = cur_len << 1;
927
928 /* Make sure that we don't try to use a block that has less
929 * than APR_PSPRINTF_MIN_STRINGSIZE bytes left in it. This
930 * also catches the case where size == 0, which would result
931 * in reusing a block that can't even hold the NUL byte.
932 */
933 if (size < APR_PSPRINTF_MIN_STRINGSIZE32)
934 size = APR_PSPRINTF_MIN_STRINGSIZE32;
935
936 node = active->next;
937 if (!ps->got_a_new_node
938 && size < (apr_size_t)(node->endp - node->first_avail)) {
939
940 list_remove(node)do { *node->ref = node->next; node->next->ref = node
->ref; } while (0)
;
941 list_insert(node, active)do { node->ref = active->ref; *node->ref = node; node
->next = active; active->ref = &node->next; } while
(0)
;
942
943 node->free_index = 0;
944
945 pool->active = node;
946
947 free_index = (APR_ALIGN(active->endp - active->first_avail + 1,(((active->endp - active->first_avail + 1) + (((1 <<
12)) - 1)) & ~(((1 << 12)) - 1))
948 BOUNDARY_SIZE)(((active->endp - active->first_avail + 1) + (((1 <<
12)) - 1)) & ~(((1 << 12)) - 1))
- BOUNDARY_SIZE(1 << 12)) >> BOUNDARY_INDEX12;
949
950 active->free_index = (APR_UINT32_TRUNC_CASTapr_uint32_t)free_index;
951 node = active->next;
952 if (free_index < node->free_index) {
953 do {
954 node = node->next;
955 }
956 while (free_index < node->free_index);
957
958 list_remove(active)do { *active->ref = active->next; active->next->ref
= active->ref; } while (0)
;
959 list_insert(active, node)do { active->ref = node->ref; *active->ref = active;
active->next = node; node->ref = &active->next;
} while (0)
;
960 }
961
962 node = pool->active;
963 }
964 else {
965 if ((node = allocator_alloc(pool->allocator, size)) == NULL((void*)0))
966 return -1;
967
968 if (ps->got_a_new_node) {
969 active->next = ps->free;
970 ps->free = active;
971 }
972
973 ps->got_a_new_node = 1;
974 }
975
976 memcpy(node->first_avail, active->first_avail, cur_len);
977
978 ps->node = node;
979 ps->vbuff.curpos = node->first_avail + cur_len;
980 ps->vbuff.endpos = node->endp - 1; /* Save a byte for NUL terminator */
981
982 return 0;
983}
984
985APR_DECLARE(char *)char * apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
986{
987 struct psprintf_data ps;
988 char *strp;
989 apr_size_t size;
990 apr_memnode_t *active, *node;
991 apr_size_t free_index;
992
993#if APR_HAS_THREADS1
994 if (pool->user_mutex) apr_thread_mutex_lock(pool->user_mutex);
995#endif
996
997 ps.node = active = pool->active;
998 ps.pool = pool;
999 ps.vbuff.curpos = ps.node->first_avail;
1000
1001 /* Save a byte for the NUL terminator */
1002 ps.vbuff.endpos = ps.node->endp - 1;
1003 ps.got_a_new_node = 0;
1004 ps.free = NULL((void*)0);
1005
1006 /* Make sure that the first node passed to apr_vformatter has at least
1007 * room to hold the NUL terminator.
1008 */
1009 if (ps.node->first_avail == ps.node->endp) {
1010 if (psprintf_flush(&ps.vbuff) == -1) {
1011 if (pool->abort_fn) {
1012 pool->abort_fn(APR_ENOMEM12);
1013 }
1014
1015 strp = NULL((void*)0);
1016 goto end;
1017 }
1018 }
1019
1020 if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
1021 if (pool->abort_fn)
1022 pool->abort_fn(APR_ENOMEM12);
1023
1024 strp = NULL((void*)0);
1025 goto end;
1026 }
1027
1028 strp = ps.vbuff.curpos;
1029 *strp++ = '\0';
1030
1031 size = strp - ps.node->first_avail;
1032 size = APR_ALIGN_DEFAULT(size)(((size) + ((8) - 1)) & ~((8) - 1));
1033 strp = ps.node->first_avail;
1034 ps.node->first_avail += size;
1035
1036 if (ps.free)
1037 allocator_free(pool->allocator, ps.free);
1038
1039 /*
1040 * Link the node in if it's a new one
1041 */
1042 if (!ps.got_a_new_node)
1043 goto end;
1044
1045 active = pool->active;
1046 node = ps.node;
1047
1048 node->free_index = 0;
1049
1050 list_insert(node, active)do { node->ref = active->ref; *node->ref = node; node
->next = active; active->ref = &node->next; } while
(0)
;
1051
1052 pool->active = node;
1053
1054 free_index = (APR_ALIGN(active->endp - active->first_avail + 1,(((active->endp - active->first_avail + 1) + (((1 <<
12)) - 1)) & ~(((1 << 12)) - 1))
1055 BOUNDARY_SIZE)(((active->endp - active->first_avail + 1) + (((1 <<
12)) - 1)) & ~(((1 << 12)) - 1))
- BOUNDARY_SIZE(1 << 12)) >> BOUNDARY_INDEX12;
1056
1057 active->free_index = (APR_UINT32_TRUNC_CASTapr_uint32_t)free_index;
1058 node = active->next;
1059
1060 if (free_index >= node->free_index)
1061 goto end;
1062
1063 do {
1064 node = node->next;
1065 }
1066 while (free_index < node->free_index);
1067
1068 list_remove(active)do { *active->ref = active->next; active->next->ref
= active->ref; } while (0)
;
1069 list_insert(active, node)do { active->ref = node->ref; *active->ref = active;
active->next = node; node->ref = &active->next;
} while (0)
;
1070
1071 end:
1072
1073#if APR_HAS_THREADS1
1074 if (pool->user_mutex) apr_thread_mutex_unlock(pool->user_mutex);
1075#endif
1076
1077 return strp;
1078}
1079
1080
1081#else /* APR_POOL_DEBUG */
1082/*
1083 * Debug helper functions
1084 */
1085
1086
1087/*
1088 * Walk the pool tree rooted at pool, depth first. When fn returns
1089 * anything other than 0, abort the traversal and return the value
1090 * returned by fn.
1091 */
1092static int apr_pool_walk_tree(apr_pool_t *pool,
1093 int (*fn)(apr_pool_t *pool, void *data),
1094 void *data)
1095{
1096 int rv;
1097 apr_pool_t *child;
1098
1099 rv = fn(pool, data);
1100 if (rv)
1101 return rv;
1102
1103#if APR_HAS_THREADS1
1104 if (pool->mutex) {
1105 apr_thread_mutex_lock(pool->mutex);
1106 }
1107#endif /* APR_HAS_THREADS */
1108
1109 child = pool->child;
1110 while (child) {
1111 rv = apr_pool_walk_tree(child, fn, data);
1112 if (rv)
1113 break;
1114
1115 child = child->sibling;
1116 }
1117
1118#if APR_HAS_THREADS1
1119 if (pool->mutex) {
1120 apr_thread_mutex_unlock(pool->mutex);
1121 }
1122#endif /* APR_HAS_THREADS */
1123
1124 return rv;
1125}
1126
1127#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALL(0x02 | 0x10))
1128static void apr_pool_log_event(apr_pool_t *pool, const char *event,
1129 const char *file_line, int deref)
1130{
1131 if (file_stderr) {
1132 if (deref) {
1133 apr_file_printf(file_stderr,
1134 "POOL DEBUG: "
1135 "[%lu"
1136#if APR_HAS_THREADS1
1137 "/%lu"
1138#endif /* APR_HAS_THREADS */
1139 "] "
1140 "%7s "
1141 "(%10lu/%10lu/%10lu) "
1142 "0x%08X \"%s\" "
1143 "<%s> "
1144 "(%u/%u/%u) "
1145 "\n",
1146 (unsigned long)getpid(),
1147#if APR_HAS_THREADS1
1148 (unsigned long)apr_os_thread_current(),
1149#endif /* APR_HAS_THREADS */
1150 event,
1151 (unsigned long)apr_pool_num_bytes(pool, 0),
1152 (unsigned long)apr_pool_num_bytes(pool, 1),
1153 (unsigned long)apr_pool_num_bytes(global_pool, 1),
1154 (unsigned int)pool, pool->tag,
1155 file_line,
1156 pool->stat_alloc, pool->stat_total_alloc, pool->stat_clear);
1157 }
1158 else {
1159 apr_file_printf(file_stderr,
1160 "POOL DEBUG: "
1161 "[%lu"
1162#if APR_HAS_THREADS1
1163 "/%lu"
1164#endif /* APR_HAS_THREADS */
1165 "] "
1166 "%7s "
1167 " "
1168 "0x%08X "
1169 "<%s> "
1170 "\n",
1171 (unsigned long)getpid(),
1172#if APR_HAS_THREADS1
1173 (unsigned long)apr_os_thread_current(),
1174#endif /* APR_HAS_THREADS */
1175 event,
1176 (unsigned int)pool,
1177 file_line);
1178 }
1179 }
1180}
1181#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1182
1183#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_LIFETIME0x04)
1184static int pool_is_child_of(apr_pool_t *parent, void *data)
1185{
1186 apr_pool_t *pool = (apr_pool_t *)data;
1187
1188 return (pool == parent);
1189}
1190
1191static int apr_pool_is_child_of(apr_pool_t *pool, apr_pool_t *parent)
1192{
1193 if (parent == NULL((void*)0))
1194 return 0;
1195
1196 return apr_pool_walk_tree(parent, pool_is_child_of, pool);
1197}
1198#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */
1199
1200static void apr_pool_check_integrity(apr_pool_t *pool)
1201{
1202 /* Rule of thumb: use of the global pool is always
1203 * ok, since the only user is apr_pools.c. Unless
1204 * people have searched for the top level parent and
1205 * started to use that...
1206 */
1207 if (pool == global_pool || global_pool == NULL((void*)0))
1208 return;
1209
1210 /* Lifetime
1211 * This basically checks to see if the pool being used is still
1212 * a relative to the global pool. If not it was previously
1213 * destroyed, in which case we abort().
1214 */
1215#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_LIFETIME0x04)
1216 if (!apr_pool_is_child_of(pool, global_pool)) {
1217#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALL(0x02 | 0x10))
1218 apr_pool_log_event(pool, "LIFE",
1219 __FILE__"memory/unix/apr_pools.c" ":apr_pool_integrity check", 0);
1220#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1221 abort();
1222 }
1223#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */
1224
1225#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_OWNER0x08)
1226#if APR_HAS_THREADS1
1227 if (!apr_os_thread_equal(pool->owner, apr_os_thread_current())) {
1228#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALL(0x02 | 0x10))
1229 apr_pool_log_event(pool, "THREAD",
1230 __FILE__"memory/unix/apr_pools.c" ":apr_pool_integrity check", 0);
1231#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1232 abort();
1233 }
1234#endif /* APR_HAS_THREADS */
1235#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER) */
1236}
1237
1238
1239/*
1240 * Initialization (debug)
1241 */
1242
1243APR_DECLARE(apr_status_t)apr_status_t apr_pool_initialize(void)
1244{
1245 apr_status_t rv;
1246#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALL(0x02 | 0x10))
1247 char *logpath;
1248#endif
1249
1250 if (apr_pools_initialized++)
1251 return APR_SUCCESS0;
1252
1253 /* Since the debug code works a bit differently then the
1254 * regular pools code, we ask for a lock here. The regular
1255 * pools code has got this lock embedded in the global
1256 * allocator, a concept unknown to debug mode.
1257 */
1258 if ((rv = apr_pool_create_ex(&global_pool, NULL((void*)0), NULL((void*)0),
1259 NULL((void*)0))) != APR_SUCCESS0) {
1260 return rv;
1261 }
1262
1263 apr_pool_tag(global_pool, "APR global pool");
1264
1265 apr_pools_initialized = 1;
1266
1267 /* This has to happen here because mutexes might be backed by
1268 * atomics. It used to be snug and safe in apr_initialize().
1269 */
1270 if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS0) {
1271 return rv;
1272 }
1273
1274#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALL(0x02 | 0x10))
1275 rv = apr_env_get(&logpath, "APR_POOL_DEBUG_LOG", global_pool);
1276
1277 if (rv == APR_SUCCESS0) {
1278 apr_file_open(&file_stderr, logpath, APR_APPEND0x00008|APR_WRITE0x00002|APR_CREATE0x00004,
1279 APR_OS_DEFAULT0x0FFF, global_pool);
1280 }
1281 else {
1282 apr_file_open_stderr(&file_stderr, global_pool);
1283 }
1284
1285 if (file_stderr) {
1286 apr_file_printf(file_stderr,
1287 "POOL DEBUG: [PID"
1288#if APR_HAS_THREADS1
1289 "/TID"
1290#endif /* APR_HAS_THREADS */
1291 "] ACTION (SIZE /POOL SIZE /TOTAL SIZE) "
1292 "POOL \"TAG\" <__FILE__:__LINE__> (ALLOCS/TOTAL ALLOCS/CLEARS)\n");
1293
1294 apr_pool_log_event(global_pool, "GLOBAL", __FILE__"memory/unix/apr_pools.c" ":apr_pool_initialize", 0);
1295 }
1296#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1297
1298 return APR_SUCCESS0;
1299}
1300
1301APR_DECLARE(void)void apr_pool_terminate(void)
1302{
1303 if (!apr_pools_initialized)
1304 return;
1305
1306 apr_pools_initialized = 0;
1307
1308 apr_pool_destroy(global_pool); /* This will also destroy the mutex */
1309 global_pool = NULL((void*)0);
1310
1311#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALL(0x02 | 0x10))
1312 file_stderr = NULL((void*)0);
1313#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1314}
1315
1316
1317/*
1318 * Memory allocation (debug)
1319 */
1320
1321static void *pool_alloc(apr_pool_t *pool, apr_size_t size)
1322{
1323 debug_node_t *node;
1324 void *mem;
1325
1326 if ((mem = malloc(size)) == NULL((void*)0)) {
1327 if (pool->abort_fn)
1328 pool->abort_fn(APR_ENOMEM12);
1329
1330 return NULL((void*)0);
1331 }
1332
1333 node = pool->nodes;
1334 if (node == NULL((void*)0) || node->index == 64) {
1335 if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL((void*)0)) {
1336 if (pool->abort_fn)
1337 pool->abort_fn(APR_ENOMEM12);
1338
1339 return NULL((void*)0);
1340 }
1341
1342 memset(node, 0, SIZEOF_DEBUG_NODE_T);
1343
1344 node->next = pool->nodes;
1345 pool->nodes = node;
1346 node->index = 0;
1347 }
1348
1349 node->beginp[node->index] = mem;
1350 node->endp[node->index] = (char *)mem + size;
1351 node->index++;
1352
1353 pool->stat_alloc++;
1354 pool->stat_total_alloc++;
1355
1356 return mem;
1357}
1358
1359APR_DECLARE(void *)void * apr_palloc_debug(apr_pool_t *pool, apr_size_t size,
1360 const char *file_line)
1361{
1362 void *mem;
1363
1364 apr_pool_check_integrity(pool);
1365
1366 mem = pool_alloc(pool, size);
1367
1368#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALLOC0x10)
1369 apr_pool_log_event(pool, "PALLOC", file_line, 1);
1370#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */
1371
1372 return mem;
1373}
1374
1375APR_DECLARE(void *)void * apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size,
1376 const char *file_line)
1377{
1378 void *mem;
1379
1380 apr_pool_check_integrity(pool);
1381
1382 mem = pool_alloc(pool, size);
1383 memset(mem, 0, size);
1384
1385#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALLOC0x10)
1386 apr_pool_log_event(pool, "PCALLOC", file_line, 1);
1387#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */
1388
1389 return mem;
1390}
1391
1392
1393/*
1394 * Pool creation/destruction (debug)
1395 */
1396
1397#define POOL_POISON_BYTE 'A'
1398
1399static void pool_clear_debug(apr_pool_t *pool, const char *file_line)
1400{
1401 debug_node_t *node;
1402 apr_uint32_t index;
1403
1404 /* Destroy the subpools. The subpools will detach themselves from
1405 * this pool thus this loop is safe and easy.
1406 */
1407 while (pool->child)
1408 pool_destroy_debug(pool->child, file_line);
1409
1410 /* Run cleanups */
1411 run_cleanups(&pool->cleanups);
1412 pool->free_cleanups = NULL((void*)0);
1413 pool->cleanups = NULL((void*)0);
1414
1415 /* If new child pools showed up, this is a reason to raise a flag */
1416 if (pool->child)
1417 abort();
1418
1419 /* Free subprocesses */
1420 free_proc_chain(pool->subprocesses);
1421 pool->subprocesses = NULL((void*)0);
1422
1423 /* Clear the user data. */
1424 pool->user_data = NULL((void*)0);
1425
1426 /* Free the blocks, scribbling over them first to help highlight
1427 * use-after-free issues. */
1428 while ((node = pool->nodes) != NULL((void*)0)) {
1429 pool->nodes = node->next;
1430
1431 for (index = 0; index < node->index; index++) {
1432 memset(node->beginp[index], POOL_POISON_BYTE,
1433 node->endp[index] - node->beginp[index]);
1434 free(node->beginp[index]);
1435 }
1436
1437 memset(node, POOL_POISON_BYTE, SIZEOF_DEBUG_NODE_T);
1438 free(node);
1439 }
1440
1441 pool->stat_alloc = 0;
1442 pool->stat_clear++;
1443}
1444
1445APR_DECLARE(void)void apr_pool_clear_debug(apr_pool_t *pool,
1446 const char *file_line)
1447{
1448#if APR_HAS_THREADS1
1449 apr_thread_mutex_t *mutex = NULL((void*)0);
1450#endif
1451
1452 apr_pool_check_integrity(pool);
1453
1454#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE0x02)
1455 apr_pool_log_event(pool, "CLEAR", file_line, 1);
1456#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1457
1458#if APR_HAS_THREADS1
1459 if (pool->parent != NULL((void*)0))
1460 mutex = pool->parent->mutex;
1461
1462 /* Lock the parent mutex before clearing so that if we have our
1463 * own mutex it won't be accessed by apr_pool_walk_tree after
1464 * it has been destroyed.
1465 */
1466 if (mutex != NULL((void*)0) && mutex != pool->mutex) {
1467 apr_thread_mutex_lock(mutex);
1468 }
1469#endif
1470
1471 pool_clear_debug(pool, file_line);
1472
1473#if APR_HAS_THREADS1
1474 /* If we had our own mutex, it will have been destroyed by
1475 * the registered cleanups. Recreate the mutex. Unlock
1476 * the mutex we obtained above.
1477 */
1478 if (mutex != pool->mutex) {
1479 (void)apr_thread_mutex_create(&pool->mutex,
1480 APR_THREAD_MUTEX_NESTED0x1, pool);
1481
1482 if (mutex != NULL((void*)0))
1483 (void)apr_thread_mutex_unlock(mutex);
1484 }
1485#endif /* APR_HAS_THREADS */
1486}
1487
1488static void pool_destroy_debug(apr_pool_t *pool, const char *file_line)
1489{
1490 apr_pool_check_integrity(pool);
1491
1492#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE0x02)
1493 apr_pool_log_event(pool, "DESTROY", file_line, 1);
1494#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1495
1496 pool_clear_debug(pool, file_line);
1497
1498 /* Remove the pool from the parents child list */
1499 if (pool->parent) {
1500#if APR_HAS_THREADS1
1501 apr_thread_mutex_t *mutex;
1502
1503 if ((mutex = pool->parent->mutex) != NULL((void*)0))
1504 apr_thread_mutex_lock(mutex);
1505#endif /* APR_HAS_THREADS */
1506
1507 if ((*pool->ref = pool->sibling) != NULL((void*)0))
1508 pool->sibling->ref = pool->ref;
1509
1510#if APR_HAS_THREADS1
1511 if (mutex)
1512 apr_thread_mutex_unlock(mutex);
1513#endif /* APR_HAS_THREADS */
1514 }
1515
1516 if (pool->allocator != NULL((void*)0)
1517 && apr_allocator_owner_get(pool->allocator) == pool) {
1518 apr_allocator_destroy(pool->allocator);
1519 }
1520
1521 /* Free the pool itself */
1522 free(pool);
1523}
1524
1525APR_DECLARE(void)void apr_pool_destroy_debug(apr_pool_t *pool,
1526 const char *file_line)
1527{
1528 if (pool->joined) {
1529 /* Joined pools must not be explicitly destroyed; the caller
1530 * has broken the guarantee. */
1531#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE_ALL(0x02 | 0x10))
1532 apr_pool_log_event(pool, "LIFE",
1533 __FILE__"memory/unix/apr_pools.c" ":apr_pool_destroy abort on joined", 0);
1534#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1535
1536 abort();
1537 }
1538 pool_destroy_debug(pool, file_line);
1539}
1540
1541APR_DECLARE(apr_status_t)apr_status_t apr_pool_create_ex_debug(apr_pool_t **newpool,
1542 apr_pool_t *parent,
1543 apr_abortfunc_t abort_fn,
1544 apr_allocator_t *allocator,
1545 const char *file_line)
1546{
1547 apr_pool_t *pool;
1548
1549 *newpool = NULL((void*)0);
1550
1551 if (!parent) {
1552 parent = global_pool;
1553 }
1554 else {
1555 apr_pool_check_integrity(parent);
1556
1557 if (!allocator)
1558 allocator = parent->allocator;
1559 }
1560
1561 if (!abort_fn && parent)
1562 abort_fn = parent->abort_fn;
1563
1564 if ((pool = malloc(SIZEOF_POOL_T(((sizeof(apr_pool_t)) + ((8) - 1)) & ~((8) - 1)))) == NULL((void*)0)) {
1565 if (abort_fn)
1566 abort_fn(APR_ENOMEM12);
1567
1568 return APR_ENOMEM12;
1569 }
1570
1571 memset(pool, 0, SIZEOF_POOL_T(((sizeof(apr_pool_t)) + ((8) - 1)) & ~((8) - 1)));
1572
1573 pool->allocator = allocator;
1574 pool->abort_fn = abort_fn;
1575 pool->tag = file_line;
1576 pool->file_line = file_line;
1577
1578 if ((pool->parent = parent) != NULL((void*)0)) {
1579#if APR_HAS_THREADS1
1580 if (parent->mutex)
1581 apr_thread_mutex_lock(parent->mutex);
1582#endif /* APR_HAS_THREADS */
1583 if ((pool->sibling = parent->child) != NULL((void*)0))
1584 pool->sibling->ref = &pool->sibling;
1585
1586 parent->child = pool;
1587 pool->ref = &parent->child;
1588
1589#if APR_HAS_THREADS1
1590 if (parent->mutex)
1591 apr_thread_mutex_unlock(parent->mutex);
1592#endif /* APR_HAS_THREADS */
1593 }
1594 else {
1595 pool->sibling = NULL((void*)0);
1596 pool->ref = NULL((void*)0);
1597 }
1598
1599#if APR_HAS_THREADS1
1600 pool->owner = apr_os_thread_current();
1601#endif /* APR_HAS_THREADS */
1602#ifdef NETWARE
1603 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
1604#endif /* defined(NETWARE) */
1605
1606
1607 if (parent == NULL((void*)0) || parent->allocator != allocator) {
1608#if APR_HAS_THREADS1
1609 apr_status_t rv;
1610
1611 /* No matter what the creation flags say, always create
1612 * a lock. Without it integrity_check and apr_pool_num_bytes
1613 * blow up (because they traverse pools child lists that
1614 * possibly belong to another thread, in combination with
1615 * the pool having no lock). However, this might actually
1616 * hide problems like creating a child pool of a pool
1617 * belonging to another thread.
1618 */
1619 if ((rv = apr_thread_mutex_create(&pool->mutex,
1620 APR_THREAD_MUTEX_NESTED0x1, pool)) != APR_SUCCESS0) {
1621 free(pool);
1622 return rv;
1623 }
1624#endif /* APR_HAS_THREADS */
1625 }
1626 else {
1627#if APR_HAS_THREADS1
1628 if (parent)
1629 pool->mutex = parent->mutex;
1630#endif /* APR_HAS_THREADS */
1631 }
1632
1633 *newpool = pool;
1634
1635#if (APR_POOL_DEBUG0 & APR_POOL_DEBUG_VERBOSE0x02)
1636 apr_pool_log_event(pool, "CREATE", file_line, 1);
1637#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1638
1639 return APR_SUCCESS0;
1640}
1641
1642
1643/*
1644 * "Print" functions (debug)
1645 */
1646
1647struct psprintf_data {
1648 apr_vformatter_buff_t vbuff;
1649 char *mem;
1650 apr_size_t size;
1651};
1652
1653static int psprintf_flush(apr_vformatter_buff_t *vbuff)
1654{
1655 struct psprintf_data *ps = (struct psprintf_data *)vbuff;
1656 apr_size_t size;
1657
1658 size = ps->vbuff.curpos - ps->mem;
1659
1660 ps->size <<= 1;
1661 if ((ps->mem = realloc(ps->mem, ps->size)) == NULL((void*)0))
1662 return -1;
1663
1664 ps->vbuff.curpos = ps->mem + size;
1665 ps->vbuff.endpos = ps->mem + ps->size - 1;
1666
1667 return 0;
1668}
1669
1670APR_DECLARE(char *)char * apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
1671{
1672 struct psprintf_data ps;
1673 debug_node_t *node;
1674
1675 apr_pool_check_integrity(pool);
1676
1677 ps.size = 64;
1678 ps.mem = malloc(ps.size);
1679 ps.vbuff.curpos = ps.mem;
1680
1681 /* Save a byte for the NUL terminator */
1682 ps.vbuff.endpos = ps.mem + ps.size - 1;
1683
1684 if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
1685 if (pool->abort_fn)
1686 pool->abort_fn(APR_ENOMEM12);
1687
1688 return NULL((void*)0);
1689 }
1690
1691 *ps.vbuff.curpos++ = '\0';
1692
1693 /*
1694 * Link the node in
1695 */
1696 node = pool->nodes;
1697 if (node == NULL((void*)0) || node->index == 64) {
1698 if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL((void*)0)) {
1699 if (pool->abort_fn)
1700 pool->abort_fn(APR_ENOMEM12);
1701
1702 return NULL((void*)0);
1703 }
1704
1705 node->next = pool->nodes;
1706 pool->nodes = node;
1707 node->index = 0;
1708 }
1709
1710 node->beginp[node->index] = ps.mem;
1711 node->endp[node->index] = ps.mem + ps.size;
1712 node->index++;
1713
1714 return ps.mem;
1715}
1716
1717
1718/*
1719 * Debug functions
1720 */
1721
1722APR_DECLARE(void)void apr_pool_join(apr_pool_t *p, apr_pool_t *sub)
1723{
1724#if APR_POOL_DEBUG0
1725 if (sub->parent != p) {
1726 abort();
1727 }
1728 sub->joined = p;
1729#endif
1730}
1731
1732static int pool_find(apr_pool_t *pool, void *data)
1733{
1734 void **pmem = (void **)data;
1735 debug_node_t *node;
1736 apr_uint32_t index;
1737
1738 node = pool->nodes;
1739
1740 while (node) {
1741 for (index = 0; index < node->index; index++) {
1742 if (node->beginp[index] <= *pmem
1743 && node->endp[index] > *pmem) {
1744 *pmem = pool;
1745 return 1;
1746 }
1747 }
1748
1749 node = node->next;
1750 }
1751
1752 return 0;
1753}
1754
1755APR_DECLARE(apr_pool_t *)apr_pool_t * apr_pool_find(const void *mem)
1756{
1757 void *pool = (void *)mem;
1758
1759 if (apr_pool_walk_tree(global_pool, pool_find, &pool))
1760 return pool;
1761
1762 return NULL((void*)0);
1763}
1764
1765static int pool_num_bytes(apr_pool_t *pool, void *data)
1766{
1767 apr_size_t *psize = (apr_size_t *)data;
1768 debug_node_t *node;
1769 apr_uint32_t index;
1770
1771 node = pool->nodes;
1772
1773 while (node) {
1774 for (index = 0; index < node->index; index++) {
1775 *psize += (char *)node->endp[index] - (char *)node->beginp[index];
1776 }
1777
1778 node = node->next;
1779 }
1780
1781 return 0;
1782}
1783
1784APR_DECLARE(apr_size_t)apr_size_t apr_pool_num_bytes(apr_pool_t *pool, int recurse)
1785{
1786 apr_size_t size = 0;
1787
1788 if (!recurse) {
1789 pool_num_bytes(pool, &size);
1790
1791 return size;
1792 }
1793
1794 apr_pool_walk_tree(pool, pool_num_bytes, &size);
1795
1796 return size;
1797}
1798
1799APR_DECLARE(void)void apr_pool_lock(apr_pool_t *pool, int flag)
1800{
1801}
1802
1803#endif /* !APR_POOL_DEBUG */
1804
1805#ifdef NETWARE
1806void netware_pool_proc_cleanup ()
1807{
1808 apr_pool_t *pool = global_pool->child;
1809 apr_os_proc_t owner_proc = (apr_os_proc_t)getnlmhandle();
1810
1811 while (pool) {
1812 if (pool->owner_proc == owner_proc) {
1813 apr_pool_destroy (pool);
1814 pool = global_pool->child;
1815 }
1816 else {
1817 pool = pool->sibling;
1818 }
1819 }
1820 return;
1821}
1822#endif /* defined(NETWARE) */
1823
1824
1825/*
1826 * "Print" functions (common)
1827 */
1828
1829APR_DECLARE_NONSTD(char *)char * apr_psprintf(apr_pool_t *p, const char *fmt, ...)
1830{
1831 va_list ap;
1832 char *res;
1833
1834 va_start(ap, fmt)__builtin_va_start(ap, fmt);
1835 res = apr_pvsprintf(p, fmt, ap);
1836 va_end(ap)__builtin_va_end(ap);
1837 return res;
1838}
1839
1840/*
1841 * Pool Properties
1842 */
1843
1844APR_DECLARE(void)void apr_pool_abort_set(apr_abortfunc_t abort_fn,
1845 apr_pool_t *pool)
1846{
1847 pool->abort_fn = abort_fn;
1848}
1849
1850APR_DECLARE(apr_abortfunc_t)apr_abortfunc_t apr_pool_abort_get(apr_pool_t *pool)
1851{
1852 return pool->abort_fn;
1853}
1854
1855APR_DECLARE(apr_pool_t *)apr_pool_t * apr_pool_parent_get(apr_pool_t *pool)
1856{
1857#ifdef NETWARE
1858 /* On NetWare, don't return the global_pool, return the application pool
1859 as the top most pool */
1860 if (pool->parent == global_pool)
1861 return NULL((void*)0);
1862 else
1863#endif
1864 return pool->parent;
1865}
1866
1867APR_DECLARE(apr_allocator_t *)apr_allocator_t * apr_pool_allocator_get(apr_pool_t *pool)
1868{
1869 return pool->allocator;
1870}
1871
1872/* return TRUE if a is an ancestor of b
1873 * NULL is considered an ancestor of all pools
1874 */
1875APR_DECLARE(int)int apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b)
1876{
1877 if (a == NULL((void*)0))
1878 return 1;
1879
1880#if APR_POOL_DEBUG0
1881 /* Find the pool with the longest lifetime guaranteed by the
1882 * caller: */
1883 while (a->joined) {
1884 a = a->joined;
1885 }
1886#endif
1887
1888 while (b) {
1889 if (a == b)
1890 return 1;
1891
1892 b = b->parent;
1893 }
1894
1895 return 0;
1896}
1897
1898APR_DECLARE(const char *)const char * apr_pool_tag(apr_pool_t *pool, const char *tag)
1899{
1900 if (tag) {
1901 pool->tag = tag;
1902 }
1903
1904 return pool->tag;
1905}
1906
1907
1908/*
1909 * User data management
1910 */
1911
1912APR_DECLARE(apr_status_t)apr_status_t apr_pool_userdata_set(const void *data, const char *key,
1913 apr_status_t (*cleanup) (void *),
1914 apr_pool_t *pool)
1915{
1916#if APR_POOL_DEBUG0
1917 apr_pool_check_integrity(pool);
1918#endif /* APR_POOL_DEBUG */
1919
1920 if (pool->user_data == NULL((void*)0))
1921 pool->user_data = apr_hash_make(pool);
1922
1923 if (apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING(-1)) == NULL((void*)0)) {
1924 char *new_key = apr_pstrdup(pool, key);
1925 apr_hash_set(pool->user_data, new_key, APR_HASH_KEY_STRING(-1), data);
1926 }
1927 else {
1928 apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING(-1), data);
1929 }
1930
1931 if (cleanup)
1932 apr_pool_cleanup_register(pool, data, cleanup, cleanup);
1933
1934 return APR_SUCCESS0;
1935}
1936
1937APR_DECLARE(apr_status_t)apr_status_t apr_pool_userdata_setn(const void *data,
1938 const char *key,
1939 apr_status_t (*cleanup)(void *),
1940 apr_pool_t *pool)
1941{
1942#if APR_POOL_DEBUG0
1943 apr_pool_check_integrity(pool);
1944#endif /* APR_POOL_DEBUG */
1945
1946 if (pool->user_data == NULL((void*)0))
1947 pool->user_data = apr_hash_make(pool);
1948
1949 apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING(-1), data);
1950
1951 if (cleanup)
1952 apr_pool_cleanup_register(pool, data, cleanup, cleanup);
1953
1954 return APR_SUCCESS0;
1955}
1956
1957APR_DECLARE(apr_status_t)apr_status_t apr_pool_userdata_get(void **data, const char *key,
1958 apr_pool_t *pool)
1959{
1960#if APR_POOL_DEBUG0
1961 apr_pool_check_integrity(pool);
1962#endif /* APR_POOL_DEBUG */
1963
1964 if (pool->user_data == NULL((void*)0)) {
1965 *data = NULL((void*)0);
1966 }
1967 else {
1968 *data = apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING(-1));
1969 }
1970
1971 return APR_SUCCESS0;
1972}
1973
1974
1975/*
1976 * Cleanup
1977 */
1978
1979struct cleanup_t {
1980 struct cleanup_t *next;
1981 const void *data;
1982 apr_status_t (*plain_cleanup_fn)(void *data);
1983 apr_status_t (*child_cleanup_fn)(void *data);
1984};
1985
1986APR_DECLARE(void)void apr_pool_cleanup_register(apr_pool_t *p, const void *data,
1987 apr_status_t (*plain_cleanup_fn)(void *data),
1988 apr_status_t (*child_cleanup_fn)(void *data))
1989{
1990 cleanup_t *c;
1991
1992#if APR_POOL_DEBUG0
1993 apr_pool_check_integrity(p);
1994#endif /* APR_POOL_DEBUG */
1995
1996 if (p != NULL((void*)0)) {
1997 if (p->free_cleanups) {
1998 /* reuse a cleanup structure */
1999 c = p->free_cleanups;
2000 p->free_cleanups = c->next;
2001 } else {
2002 c = apr_palloc(p, sizeof(cleanup_t));
2003 }
2004 c->data = data;
2005 c->plain_cleanup_fn = plain_cleanup_fn;
2006 c->child_cleanup_fn = child_cleanup_fn;
2007 c->next = p->cleanups;
2008 p->cleanups = c;
2009 }
2010}
2011
2012APR_DECLARE(void)void apr_pool_cleanup_kill(apr_pool_t *p, const void *data,
2013 apr_status_t (*cleanup_fn)(void *))
2014{
2015 cleanup_t *c, **lastp;
2016
2017#if APR_POOL_DEBUG0
2018 apr_pool_check_integrity(p);
2019#endif /* APR_POOL_DEBUG */
2020
2021 if (p == NULL((void*)0))
2022 return;
2023
2024 c = p->cleanups;
2025 lastp = &p->cleanups;
2026 while (c) {
2027 if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
2028 *lastp = c->next;
2029 /* move to freelist */
2030 c->next = p->free_cleanups;
2031 p->free_cleanups = c;
2032 break;
2033 }
2034
2035 lastp = &c->next;
2036
2037 if (c == c->next) {
2038 c = NULL((void*)0);
2039 } else {
2040 c = c->next;
2041 }
2042 }
2043}
2044
2045APR_DECLARE(void)void apr_pool_child_cleanup_set(apr_pool_t *p, const void *data,
2046 apr_status_t (*plain_cleanup_fn)(void *),
2047 apr_status_t (*child_cleanup_fn)(void *))
2048{
2049 cleanup_t *c;
2050
2051#if APR_POOL_DEBUG0
2052 apr_pool_check_integrity(p);
2053#endif /* APR_POOL_DEBUG */
2054
2055 if (p == NULL((void*)0))
2056 return;
2057
2058 c = p->cleanups;
2059 while (c) {
2060 if (c->data == data && c->plain_cleanup_fn == plain_cleanup_fn) {
2061 c->child_cleanup_fn = child_cleanup_fn;
2062 break;
2063 }
2064
2065 c = c->next;
2066 }
2067}
2068
2069APR_DECLARE(apr_status_t)apr_status_t apr_pool_cleanup_run(apr_pool_t *p, void *data,
2070 apr_status_t (*cleanup_fn)(void *))
2071{
2072 apr_pool_cleanup_kill(p, data, cleanup_fn);
2073 return (*cleanup_fn)(data);
2074}
2075
2076static void run_cleanups(cleanup_t **cref)
2077{
2078 cleanup_t *c = *cref;
2079
2080 while (c) {
2081 *cref = c->next;
2082 (*c->plain_cleanup_fn)((void *)c->data);
2083 c = *cref;
2084 }
2085}
2086
2087static void run_child_cleanups(cleanup_t **cref)
2088{
2089 cleanup_t *c = *cref;
2090
2091 while (c) {
2092 *cref = c->next;
2093 (*c->child_cleanup_fn)((void *)c->data);
2094 c = *cref;
2095 }
2096}
2097
2098static void cleanup_pool_for_exec(apr_pool_t *p)
2099{
2100 run_child_cleanups(&p->cleanups);
2101
2102 for (p = p->child; p; p = p->sibling)
2103 cleanup_pool_for_exec(p);
2104}
2105
2106APR_DECLARE(void)void apr_pool_cleanup_for_exec(void)
2107{
2108#if !defined(WIN32) && !defined(OS2)
2109 /*
2110 * Don't need to do anything on NT or OS/2, because I
2111 * am actually going to spawn the new process - not
2112 * exec it. All handles that are not inheritable, will
2113 * be automajically closed. The only problem is with
2114 * file handles that are open, but there isn't much
2115 * I can do about that (except if the child decides
2116 * to go out and close them
2117 */
2118 cleanup_pool_for_exec(global_pool);
2119#endif /* !defined(WIN32) && !defined(OS2) */
2120}
2121
2122APR_DECLARE_NONSTD(apr_status_t)apr_status_t apr_pool_cleanup_null(void *data)
2123{
2124 /* do nothing cleanup routine */
2125 return APR_SUCCESS0;
2126}
2127
2128/* Subprocesses don't use the generic cleanup interface because
2129 * we don't want multiple subprocesses to result in multiple
2130 * three-second pauses; the subprocesses have to be "freed" all
2131 * at once. If other resources are introduced with the same property,
2132 * we might want to fold support for that into the generic interface.
2133 * For now, it's a special case.
2134 */
2135APR_DECLARE(void)void apr_pool_note_subprocess(apr_pool_t *pool, apr_proc_t *proc,
2136 apr_kill_conditions_e how)
2137{
2138 struct process_chain *pc = apr_palloc(pool, sizeof(struct process_chain));
2139
2140 pc->proc = proc;
2141 pc->kill_how = how;
2142 pc->next = pool->subprocesses;
2143 pool->subprocesses = pc;
2144}
2145
2146static void free_proc_chain(struct process_chain *procs)
2147{
2148 /* Dispose of the subprocesses we've spawned off in the course of
2149 * whatever it was we're cleaning up now. This may involve killing
2150 * some of them off...
2151 */
2152 struct process_chain *pc;
2153 int need_timeout = 0;
2154 apr_time_t timeout_interval;
2155
2156 if (!procs)
2157 return; /* No work. Whew! */
2158
2159 /* First, check to see if we need to do the SIGTERM, sleep, SIGKILL
2160 * dance with any of the processes we're cleaning up. If we've got
2161 * any kill-on-sight subprocesses, ditch them now as well, so they
2162 * don't waste any more cycles doing whatever it is that they shouldn't
2163 * be doing anymore.
2164 */
2165
2166#ifndef NEED_WAITPID
2167 /* Pick up all defunct processes */
2168 for (pc = procs; pc; pc = pc->next) {
2169 if (apr_proc_wait(pc->proc, NULL((void*)0), NULL((void*)0), APR_NOWAIT) != APR_CHILD_NOTDONE((20000 + 50000) + 6))
2170 pc->kill_how = APR_KILL_NEVER;
2171 }
2172#endif /* !defined(NEED_WAITPID) */
2173
2174 for (pc = procs; pc; pc = pc->next) {
2175#ifndef WIN32
2176 if ((pc->kill_how == APR_KILL_AFTER_TIMEOUT)
2177 || (pc->kill_how == APR_KILL_ONLY_ONCE)) {
2178 /*
2179 * Subprocess may be dead already. Only need the timeout if not.
2180 * Note: apr_proc_kill on Windows is TerminateProcess(), which is
2181 * similar to a SIGKILL, so always give the process a timeout
2182 * under Windows before killing it.
2183 */
2184 if (apr_proc_kill(pc->proc, SIGTERM15) == APR_SUCCESS0)
2185 need_timeout = 1;
2186 }
2187 else if (pc->kill_how == APR_KILL_ALWAYS) {
2188#else /* WIN32 knows only one fast, clean method of killing processes today */
2189 if (pc->kill_how != APR_KILL_NEVER) {
2190 need_timeout = 1;
2191 pc->kill_how = APR_KILL_ALWAYS;
2192#endif
2193 apr_proc_kill(pc->proc, SIGKILL9);
2194 }
2195 }
2196
2197 /* Sleep only if we have to. The sleep algorithm grows
2198 * by a factor of two on each iteration. TIMEOUT_INTERVAL
2199 * is equal to TIMEOUT_USECS / 64.
2200 */
2201 if (need_timeout) {
2202 timeout_interval = TIMEOUT_INTERVAL46875;
2203 apr_sleep(timeout_interval);
2204
2205 do {
2206 /* check the status of the subprocesses */
2207 need_timeout = 0;
2208 for (pc = procs; pc; pc = pc->next) {
2209 if (pc->kill_how == APR_KILL_AFTER_TIMEOUT) {
2210 if (apr_proc_wait(pc->proc, NULL((void*)0), NULL((void*)0), APR_NOWAIT)
2211 == APR_CHILD_NOTDONE((20000 + 50000) + 6))
2212 need_timeout = 1; /* subprocess is still active */
2213 else
2214 pc->kill_how = APR_KILL_NEVER; /* subprocess has exited */
2215 }
2216 }
2217 if (need_timeout) {
2218 if (timeout_interval >= TIMEOUT_USECS3000000) {
2219 break;
2220 }
2221 apr_sleep(timeout_interval);
2222 timeout_interval *= 2;
2223 }
2224 } while (need_timeout);
2225 }
2226
2227 /* OK, the scripts we just timed out for have had a chance to clean up
2228 * --- now, just get rid of them, and also clean up the system accounting
2229 * goop...
2230 */
2231 for (pc = procs; pc; pc = pc->next) {
2232 if (pc->kill_how == APR_KILL_AFTER_TIMEOUT)
2233 apr_proc_kill(pc->proc, SIGKILL9);
2234 }
2235
2236 /* Now wait for all the signaled processes to die */
2237 for (pc = procs; pc; pc = pc->next) {
2238 if (pc->kill_how != APR_KILL_NEVER)
2239 (void)apr_proc_wait(pc->proc, NULL((void*)0), NULL((void*)0), APR_WAIT);
2240 }
2241}
2242
2243
2244/*
2245 * Pool creation/destruction stubs, for people who are running
2246 * mixed release/debug enviroments.
2247 */
2248
2249#if !APR_POOL_DEBUG0
2250APR_DECLARE(void *)void * apr_palloc_debug(apr_pool_t *pool, apr_size_t size,
2251 const char *file_line)
2252{
2253 return apr_palloc(pool, size);
2254}
2255
2256APR_DECLARE(void *)void * apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size,
2257 const char *file_line)
2258{
2259 return apr_pcalloc(pool, size);
2260}
2261
2262APR_DECLARE(void)void apr_pool_clear_debug(apr_pool_t *pool,
2263 const char *file_line)
2264{
2265 apr_pool_clear(pool);
2266}
2267
2268APR_DECLARE(void)void apr_pool_destroy_debug(apr_pool_t *pool,
2269 const char *file_line)
2270{
2271 apr_pool_destroy(pool);
2272}
2273
2274APR_DECLARE(apr_status_t)apr_status_t apr_pool_create_ex_debug(apr_pool_t **newpool,
2275 apr_pool_t *parent,
2276 apr_abortfunc_t abort_fn,
2277 apr_allocator_t *allocator,
2278 const char *file_line)
2279{
2280 return apr_pool_create_ex(newpool, parent, abort_fn, allocator);
1
Calling 'apr_pool_create_ex'
2281}
2282
2283#else /* APR_POOL_DEBUG */
2284
2285#undef apr_palloc
2286APR_DECLARE(void *)void * apr_palloc(apr_pool_t *pool, apr_size_t size);
2287
2288APR_DECLARE(void *)void * apr_palloc(apr_pool_t *pool, apr_size_t size)
2289{
2290 return apr_palloc_debug(pool, size, "undefined");
2291}
2292
2293#undef apr_pcalloc
2294APR_DECLARE(void *)void * apr_pcalloc(apr_pool_t *pool, apr_size_t size);
2295
2296APR_DECLARE(void *)void * apr_pcalloc(apr_pool_t *pool, apr_size_t size)
2297{
2298 return apr_pcalloc_debug(pool, size, "undefined");
2299}
2300
2301#undef apr_pool_clear
2302APR_DECLARE(void)void apr_pool_clear(apr_pool_t *pool);
2303
2304APR_DECLARE(void)void apr_pool_clear(apr_pool_t *pool)
2305{
2306 apr_pool_clear_debug(pool, "undefined");
2307}
2308
2309#undef apr_pool_destroy
2310APR_DECLARE(void)void apr_pool_destroy(apr_pool_t *pool);
2311
2312APR_DECLARE(void)void apr_pool_destroy(apr_pool_t *pool)
2313{
2314 apr_pool_destroy_debug(pool, "undefined");
2315}
2316
2317#undef apr_pool_create_ex
2318APR_DECLARE(apr_status_t)apr_status_t apr_pool_create_ex(apr_pool_t **newpool,
2319 apr_pool_t *parent,
2320 apr_abortfunc_t abort_fn,
2321 apr_allocator_t *allocator);
2322
2323APR_DECLARE(apr_status_t)apr_status_t apr_pool_create_ex(apr_pool_t **newpool,
2324 apr_pool_t *parent,
2325 apr_abortfunc_t abort_fn,
2326 apr_allocator_t *allocator)
2327{
2328 return apr_pool_create_ex_debug(newpool, parent,
2329 abort_fn, allocator,
2330 "undefined");
2331}
2332
2333#endif /* APR_POOL_DEBUG */