aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/linden/indra/llcommon/aiaprpool.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'linden/indra/llcommon/aiaprpool.cpp')
-rw-r--r--linden/indra/llcommon/aiaprpool.cpp198
1 files changed, 198 insertions, 0 deletions
diff --git a/linden/indra/llcommon/aiaprpool.cpp b/linden/indra/llcommon/aiaprpool.cpp
new file mode 100644
index 0000000..d3748e9
--- /dev/null
+++ b/linden/indra/llcommon/aiaprpool.cpp
@@ -0,0 +1,198 @@
1/**
2 * @file aiaprpool.cpp
3 *
4 * Copyright (c) 2010, Aleric Inglewood.
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * There are special exceptions to the terms and conditions of the GPL as
20 * it is applied to this Source Code. View the full text of the exception
21 * in the file doc/FLOSS-exception.txt in this software distribution.
22 *
23 * CHANGELOG
24 * and additional copyright holders.
25 *
26 * 04/04/2010
27 * - Initial version, written by Aleric Inglewood @ SL
28 *
29 * 10/11/2010
30 * - Changed filename, class names and license to a more
31 * company-neutral format.
32 * - Added APR_HAS_THREADS #if's to allow creation and destruction
33 * of subpools by threads other than the parent pool owner.
34 */
35
36#include "linden_common.h"
37
38#include "llerror.h"
39#include "aiaprpool.h"
40#include "llthread.h"
41
42// Create a subpool from parent.
43void AIAPRPool::create(AIAPRPool& parent)
44{
45 llassert(!mPool); // Must be non-initialized.
46 mParent = &parent;
47 if (!mParent) // Using the default parameter?
48 {
49 // By default use the root pool of the current thread.
50 mParent = &AIThreadLocalData::tldata().mRootPool;
51 }
52 llassert(mParent->mPool); // Parent must be initialized.
53#if APR_HAS_THREADS
54 // As per the documentation of APR (ie http://apr.apache.org/docs/apr/1.4/apr__pools_8h.html):
55 //
56 // Note that most operations on pools are not thread-safe: a single pool should only be
57 // accessed by a single thread at any given time. The one exception to this rule is creating
58 // a subpool of a given pool: one or more threads can safely create subpools at the same
59 // time that another thread accesses the parent pool.
60 //
61 // In other words, it's safe for any thread to create a (sub)pool, independent of who
62 // owns the parent pool.
63 mOwner = apr_os_thread_current();
64#else
65 mOwner = mParent->mOwner;
66 llassert(apr_os_thread_equal(mOwner, apr_os_thread_current()));
67#endif
68 apr_status_t const apr_pool_create_status = apr_pool_create(&mPool, mParent->mPool);
69 llassert_always(apr_pool_create_status == APR_SUCCESS);
70 llassert(mPool); // Initialized.
71 apr_pool_cleanup_register(mPool, this, &s_plain_cleanup, &apr_pool_cleanup_null);
72}
73
74// Destroy the (sub)pool, if any.
75void AIAPRPool::destroy(void)
76{
77 // Only do anything if we are not already (being) destroyed.
78 if (mPool)
79 {
80#if !APR_HAS_THREADS
81 // If we are a root pool, then every thread may destruct us: in that case
82 // we have to assume that no other thread will use this pool concurrently,
83 // of course. Otherwise, if we are a subpool, only the thread that owns
84 // the parent may destruct us, since that is the pool that is still alive,
85 // possibly being used by others and being altered here.
86 llassert(!mParent || apr_os_thread_equal(mParent->mOwner, apr_os_thread_current()));
87#endif
88 apr_pool_t* pool = mPool;
89 mPool = NULL; // Mark that we are BEING destructed.
90 apr_pool_cleanup_kill(pool, this, &s_plain_cleanup);
91 apr_pool_destroy(pool);
92 }
93}
94
95bool AIAPRPool::parent_is_being_destructed(void)
96{
97 return mParent && (!mParent->mPool || mParent->parent_is_being_destructed());
98}
99
100AIAPRInitialization::AIAPRInitialization(void)
101{
102 static bool apr_initialized = false;
103
104 if (!apr_initialized)
105 {
106 apr_initialize();
107 }
108
109 apr_initialized = true;
110}
111
112bool AIAPRRootPool::sCountInitialized = false;
113apr_uint32_t volatile AIAPRRootPool::sCount;
114
115extern apr_thread_mutex_t* gLogMutexp;
116extern apr_thread_mutex_t* gCallStacksLogMutexp;
117
118AIAPRRootPool::AIAPRRootPool(void) : AIAPRInitialization(), AIAPRPool(0)
119{
120 // sCountInitialized don't need locking because when we get here there is still only a single thread.
121 if (!sCountInitialized)
122 {
123 // Initialize the logging mutex
124 apr_thread_mutex_create(&gLogMutexp, APR_THREAD_MUTEX_UNNESTED, mPool);
125 apr_thread_mutex_create(&gCallStacksLogMutexp, APR_THREAD_MUTEX_UNNESTED, mPool);
126
127 apr_status_t status = apr_atomic_init(mPool);
128 llassert_always(status == APR_SUCCESS);
129 apr_atomic_set32(&sCount, 1); // Set to 1 to account for the global root pool.
130 sCountInitialized = true;
131
132 // Initialize thread-local APR pool support.
133 // Because this recursively calls AIAPRRootPool::AIAPRRootPool(void)
134 // it must be done last, so that sCount is already initialized.
135 AIThreadLocalData::init();
136 }
137 apr_atomic_inc32(&sCount);
138}
139
140AIAPRRootPool::~AIAPRRootPool()
141{
142 if (!apr_atomic_dec32(&sCount))
143 {
144 // The last pool was destructed. Cleanup remainder of APR.
145 LL_INFOS("APR") << "Cleaning up APR" << LL_ENDL;
146
147 if (gLogMutexp)
148 {
149 // Clean up the logging mutex
150
151 // All other threads NEED to be done before we clean up APR, so this is okay.
152 apr_thread_mutex_destroy(gLogMutexp);
153 gLogMutexp = NULL;
154 }
155 if (gCallStacksLogMutexp)
156 {
157 // Clean up the logging mutex
158
159 // All other threads NEED to be done before we clean up APR, so this is okay.
160 apr_thread_mutex_destroy(gCallStacksLogMutexp);
161 gCallStacksLogMutexp = NULL;
162 }
163
164 // Must destroy ALL, and therefore this last AIAPRRootPool, before terminating APR.
165 static_cast<AIAPRRootPool*>(this)->destroy();
166
167 apr_terminate();
168 }
169}
170
171//static
172AIAPRRootPool& AIAPRRootPool::get(void)
173{
174 static AIAPRRootPool global_APRpool(0); // This is what used to be gAPRPoolp.
175 return global_APRpool;
176}
177
178void AIVolatileAPRPool::clearVolatileAPRPool()
179{
180 llassert_always(mNumActiveRef > 0);
181 if (--mNumActiveRef == 0)
182 {
183 if (isOld())
184 {
185 destroy();
186 mNumTotalRef = 0 ;
187 }
188 else
189 {
190 // This does not actually free the memory,
191 // it just allows the pool to re-use this memory for the next allocation.
192 clear();
193 }
194 }
195
196 // Paranoia check if the pool is jammed.
197 llassert(mNumTotalRef < (FULL_VOLATILE_APR_POOL << 2)) ;
198}