diff options
author | David Seikel | 2011-02-20 20:31:57 +1000 |
---|---|---|
committer | David Seikel | 2011-02-20 20:31:57 +1000 |
commit | d6666d0776985bada40f19c623b86d82a4113f92 (patch) | |
tree | eb4d234a313c51b40f6d53d4a59bdc831a7cd0fc /linden/indra/llcommon/llthread.h | |
parent | Remove catering to LL's probably illegal attempt to block copying of open sou... (diff) | |
parent | Reduce the logging spam to warnings. Debug was too much. (diff) | |
download | meta-impy-d6666d0776985bada40f19c623b86d82a4113f92.zip meta-impy-d6666d0776985bada40f19c623b86d82a4113f92.tar.gz meta-impy-d6666d0776985bada40f19c623b86d82a4113f92.tar.bz2 meta-impy-d6666d0776985bada40f19c623b86d82a4113f92.tar.xz |
Merge branch 'weekly' into branding
Conflicts (solved):
linden/indra/media_plugins/gstreamer010/media_plugin_gstreamer010.cpp
linden/indra/newview/llappviewer.cpp
linden/indra/newview/llappviewerwin32.cpp
Diffstat (limited to 'linden/indra/llcommon/llthread.h')
-rw-r--r-- | linden/indra/llcommon/llthread.h | 184 |
1 files changed, 169 insertions, 15 deletions
diff --git a/linden/indra/llcommon/llthread.h b/linden/indra/llcommon/llthread.h index 98d64ef..1e982cc 100644 --- a/linden/indra/llcommon/llthread.h +++ b/linden/indra/llcommon/llthread.h | |||
@@ -38,11 +38,28 @@ | |||
38 | #include "llmemory.h" | 38 | #include "llmemory.h" |
39 | 39 | ||
40 | #include "apr_thread_cond.h" | 40 | #include "apr_thread_cond.h" |
41 | #include "aiaprpool.h" | ||
41 | 42 | ||
42 | class LLThread; | 43 | class LLThread; |
43 | class LLMutex; | 44 | class LLMutex; |
44 | class LLCondition; | 45 | class LLCondition; |
45 | 46 | ||
47 | class LL_COMMON_API AIThreadLocalData | ||
48 | { | ||
49 | private: | ||
50 | static apr_threadkey_t* sThreadLocalDataKey; | ||
51 | |||
52 | public: | ||
53 | // Thread-local memory pool. | ||
54 | AIAPRRootPool mRootPool; | ||
55 | AIVolatileAPRPool mVolatileAPRPool; | ||
56 | |||
57 | static void init(void); | ||
58 | static void destroy(void* thread_local_data); | ||
59 | static void create(LLThread* pthread); | ||
60 | static AIThreadLocalData& tldata(void); | ||
61 | }; | ||
62 | |||
46 | class LL_COMMON_API LLThread | 63 | class LL_COMMON_API LLThread |
47 | { | 64 | { |
48 | public: | 65 | public: |
@@ -53,7 +70,7 @@ public: | |||
53 | QUITTING= 2 // Someone wants this thread to quit | 70 | QUITTING= 2 // Someone wants this thread to quit |
54 | } EThreadStatus; | 71 | } EThreadStatus; |
55 | 72 | ||
56 | LLThread(const std::string& name, apr_pool_t *poolp = NULL); | 73 | LLThread(std::string const& name); |
57 | virtual ~LLThread(); // Warning! You almost NEVER want to destroy a thread unless it's in the STOPPED state. | 74 | virtual ~LLThread(); // Warning! You almost NEVER want to destroy a thread unless it's in the STOPPED state. |
58 | virtual void shutdown(); // stops the thread | 75 | virtual void shutdown(); // stops the thread |
59 | 76 | ||
@@ -82,7 +99,8 @@ public: | |||
82 | // this kicks off the apr thread | 99 | // this kicks off the apr thread |
83 | void start(void); | 100 | void start(void); |
84 | 101 | ||
85 | apr_pool_t *getAPRPool() { return mAPRPoolp; } | 102 | // Return thread-local data for the current thread. |
103 | static AIThreadLocalData& tldata(void) { return AIThreadLocalData::tldata(); } | ||
86 | 104 | ||
87 | private: | 105 | private: |
88 | bool mPaused; | 106 | bool mPaused; |
@@ -95,10 +113,11 @@ protected: | |||
95 | LLCondition* mRunCondition; | 113 | LLCondition* mRunCondition; |
96 | 114 | ||
97 | apr_thread_t *mAPRThreadp; | 115 | apr_thread_t *mAPRThreadp; |
98 | apr_pool_t *mAPRPoolp; | ||
99 | bool mIsLocalPool; | ||
100 | EThreadStatus mStatus; | 116 | EThreadStatus mStatus; |
101 | 117 | ||
118 | friend void AIThreadLocalData::create(LLThread* threadp); | ||
119 | AIThreadLocalData* mThreadLocalData; | ||
120 | |||
102 | void setQuitting(); | 121 | void setQuitting(); |
103 | 122 | ||
104 | // virtual function overridden by subclass -- this will be called when the thread runs | 123 | // virtual function overridden by subclass -- this will be called when the thread runs |
@@ -125,12 +144,9 @@ protected: | |||
125 | 144 | ||
126 | //============================================================================ | 145 | //============================================================================ |
127 | 146 | ||
128 | class LL_COMMON_API LLMutex | 147 | class LL_COMMON_API LLMutexBase |
129 | { | 148 | { |
130 | public: | 149 | public: |
131 | LLMutex(apr_pool_t *apr_poolp); // NULL pool constructs a new pool for the mutex | ||
132 | ~LLMutex(); | ||
133 | |||
134 | void lock() { apr_thread_mutex_lock(mAPRMutexp); } | 150 | void lock() { apr_thread_mutex_lock(mAPRMutexp); } |
135 | void unlock() { apr_thread_mutex_unlock(mAPRMutexp); } | 151 | void unlock() { apr_thread_mutex_unlock(mAPRMutexp); } |
136 | // Returns true if lock was obtained successfully. | 152 | // Returns true if lock was obtained successfully. |
@@ -139,16 +155,60 @@ public: | |||
139 | bool isLocked(); // non-blocking, but does do a lock/unlock so not free | 155 | bool isLocked(); // non-blocking, but does do a lock/unlock so not free |
140 | 156 | ||
141 | protected: | 157 | protected: |
142 | apr_thread_mutex_t *mAPRMutexp; | 158 | // mAPRMutexp is initialized and uninitialized in the derived class. |
143 | apr_pool_t *mAPRPoolp; | 159 | apr_thread_mutex_t* mAPRMutexp; |
144 | bool mIsLocalPool; | 160 | }; |
161 | |||
162 | class LL_COMMON_API LLMutex : public LLMutexBase | ||
163 | { | ||
164 | public: | ||
165 | LLMutex(AIAPRPool& parent = LLThread::tldata().mRootPool) : mPool(parent) | ||
166 | { | ||
167 | apr_thread_mutex_create(&mAPRMutexp, APR_THREAD_MUTEX_UNNESTED, mPool()); | ||
168 | } | ||
169 | ~LLMutex() | ||
170 | { | ||
171 | llassert(!isLocked()); // better not be locked! | ||
172 | apr_thread_mutex_destroy(mAPRMutexp); | ||
173 | mAPRMutexp = NULL; | ||
174 | } | ||
175 | |||
176 | protected: | ||
177 | AIAPRPool mPool; | ||
145 | }; | 178 | }; |
146 | 179 | ||
180 | #if APR_HAS_THREADS | ||
181 | // No need to use a root pool in this case. | ||
182 | typedef LLMutex LLMutexRootPool; | ||
183 | #else // APR_HAS_THREADS | ||
184 | class LL_COMMON_API LLMutexRootPool : public LLMutexBase | ||
185 | { | ||
186 | public: | ||
187 | LLMutexRootPool(void) | ||
188 | { | ||
189 | apr_thread_mutex_create(&mAPRMutexp, APR_THREAD_MUTEX_UNNESTED, mRootPool()); | ||
190 | } | ||
191 | ~LLMutexRootPool() | ||
192 | { | ||
193 | #if APR_POOL_DEBUG | ||
194 | // It is allowed to destruct root pools from a different thread. | ||
195 | mRootPool.grab_ownership(); | ||
196 | #endif | ||
197 | llassert(!isLocked()); // better not be locked! | ||
198 | apr_thread_mutex_destroy(mAPRMutexp); | ||
199 | mAPRMutexp = NULL; | ||
200 | } | ||
201 | |||
202 | protected: | ||
203 | AIAPRRootPool mRootPool; | ||
204 | }; | ||
205 | #endif // APR_HAS_THREADS | ||
206 | |||
147 | // Actually a condition/mutex pair (since each condition needs to be associated with a mutex). | 207 | // Actually a condition/mutex pair (since each condition needs to be associated with a mutex). |
148 | class LL_COMMON_API LLCondition : public LLMutex | 208 | class LL_COMMON_API LLCondition : public LLMutex |
149 | { | 209 | { |
150 | public: | 210 | public: |
151 | LLCondition(apr_pool_t *apr_poolp); // Defaults to global pool, could use the thread pool as well. | 211 | LLCondition(AIAPRPool& parent = LLThread::tldata().mRootPool); |
152 | ~LLCondition(); | 212 | ~LLCondition(); |
153 | 213 | ||
154 | void wait(); // blocks | 214 | void wait(); // blocks |
@@ -162,7 +222,7 @@ protected: | |||
162 | class LL_COMMON_API LLMutexLock | 222 | class LL_COMMON_API LLMutexLock |
163 | { | 223 | { |
164 | public: | 224 | public: |
165 | LLMutexLock(LLMutex* mutex) | 225 | LLMutexLock(LLMutexBase* mutex) |
166 | { | 226 | { |
167 | mMutex = mutex; | 227 | mMutex = mutex; |
168 | mMutex->lock(); | 228 | mMutex->lock(); |
@@ -172,7 +232,102 @@ public: | |||
172 | mMutex->unlock(); | 232 | mMutex->unlock(); |
173 | } | 233 | } |
174 | private: | 234 | private: |
175 | LLMutex* mMutex; | 235 | LLMutexBase* mMutex; |
236 | }; | ||
237 | |||
238 | class AIRWLock | ||
239 | { | ||
240 | public: | ||
241 | AIRWLock(AIAPRPool& parent = LLThread::tldata().mRootPool) : | ||
242 | mWriterWaitingMutex(parent), mNoHoldersCondition(parent), mHoldersCount(0), mWriterIsWaiting(false) { } | ||
243 | |||
244 | private: | ||
245 | LLMutex mWriterWaitingMutex; //!< This mutex is locked while some writer is waiting for access. | ||
246 | LLCondition mNoHoldersCondition; //!< Access control for mHoldersCount. Condition true when there are no more holders. | ||
247 | int mHoldersCount; //!< Number of readers or -1 if a writer locked this object. | ||
248 | // This is volatile because we read it outside the critical area of mWriterWaitingMutex, at [1]. | ||
249 | // That means that other threads can change it while we are already in the (inlined) function rdlock. | ||
250 | // Without volatile, the following assembly would fail: | ||
251 | // register x = mWriterIsWaiting; | ||
252 | // /* some thread changes mWriterIsWaiting */ | ||
253 | // if (x ... | ||
254 | // However, because the function is fuzzy to begin with (we don't mind that this race | ||
255 | // condition exists) it would work fine without volatile. So, basically it's just here | ||
256 | // out of principle ;). -- Aleric | ||
257 | bool volatile mWriterIsWaiting; //!< True when there is a writer waiting for write access. | ||
258 | |||
259 | public: | ||
260 | void rdlock(bool high_priority = false) | ||
261 | { | ||
262 | // Give a writer a higher priority (kinda fuzzy). | ||
263 | if (mWriterIsWaiting && !high_priority) // [1] If there is a writer interested, | ||
264 | { | ||
265 | mWriterWaitingMutex.lock(); // [2] then give it precedence and wait here. | ||
266 | // If we get here then the writer got it's access; mHoldersCount == -1. | ||
267 | mWriterWaitingMutex.unlock(); | ||
268 | } | ||
269 | mNoHoldersCondition.lock(); // [3] Get exclusive access to mHoldersCount. | ||
270 | while (mHoldersCount == -1) // [4] | ||
271 | { | ||
272 | mNoHoldersCondition.wait(); // [5] Wait till mHoldersCount is (or just was) 0. | ||
273 | } | ||
274 | ++mHoldersCount; // One more reader. | ||
275 | mNoHoldersCondition.unlock(); // Release lock on mHoldersCount. | ||
276 | } | ||
277 | void rdunlock(void) | ||
278 | { | ||
279 | mNoHoldersCondition.lock(); // Get exclusive access to mHoldersCount. | ||
280 | if (--mHoldersCount == 0) // Was this the last reader? | ||
281 | { | ||
282 | mNoHoldersCondition.signal(); // Tell waiting threads, see [5], [6] and [7]. | ||
283 | } | ||
284 | mNoHoldersCondition.unlock(); // Release lock on mHoldersCount. | ||
285 | } | ||
286 | void wrlock(void) | ||
287 | { | ||
288 | mWriterWaitingMutex.lock(); // Block new readers, see [2], | ||
289 | mWriterIsWaiting = true; // from this moment on, see [1]. | ||
290 | mNoHoldersCondition.lock(); // Get exclusive access to mHoldersCount. | ||
291 | while (mHoldersCount != 0) // Other readers or writers have this lock? | ||
292 | { | ||
293 | mNoHoldersCondition.wait(); // [6] Wait till mHoldersCount is (or just was) 0. | ||
294 | } | ||
295 | mWriterIsWaiting = false; // Stop checking the lock for new readers, see [1]. | ||
296 | mWriterWaitingMutex.unlock(); // Release blocked readers, they will still hang at [3]. | ||
297 | mHoldersCount = -1; // We are a writer now (will cause a hang at [5], see [4]). | ||
298 | mNoHoldersCondition.unlock(); // Release lock on mHolders (readers go from [3] to [5]). | ||
299 | } | ||
300 | void wrunlock(void) | ||
301 | { | ||
302 | mNoHoldersCondition.lock(); // Get exclusive access to mHoldersCount. | ||
303 | mHoldersCount = 0; // We have no writer anymore. | ||
304 | mNoHoldersCondition.signal(); // Tell waiting threads, see [5], [6] and [7]. | ||
305 | mNoHoldersCondition.unlock(); // Release lock on mHoldersCount. | ||
306 | } | ||
307 | void rd2wrlock(void) | ||
308 | { | ||
309 | mNoHoldersCondition.lock(); // Get exclusive access to mHoldersCount. Blocks new readers at [3]. | ||
310 | if (--mHoldersCount > 0) // Any other reads left? | ||
311 | { | ||
312 | mWriterWaitingMutex.lock(); // Block new readers, see [2], | ||
313 | mWriterIsWaiting = true; // from this moment on, see [1]. | ||
314 | while (mHoldersCount != 0) // Other readers (still) have this lock? | ||
315 | { | ||
316 | mNoHoldersCondition.wait(); // [7] Wait till mHoldersCount is (or just was) 0. | ||
317 | } | ||
318 | mWriterIsWaiting = false; // Stop checking the lock for new readers, see [1]. | ||
319 | mWriterWaitingMutex.unlock(); // Release blocked readers, they will still hang at [3]. | ||
320 | } | ||
321 | mHoldersCount = -1; // We are a writer now (will cause a hang at [5], see [4]). | ||
322 | mNoHoldersCondition.unlock(); // Release lock on mHolders (readers go from [3] to [5]). | ||
323 | } | ||
324 | void wr2rdlock(void) | ||
325 | { | ||
326 | mNoHoldersCondition.lock(); // Get exclusive access to mHoldersCount. | ||
327 | mHoldersCount = 1; // Turn writer into a reader. | ||
328 | mNoHoldersCondition.signal(); // Tell waiting readers, see [5]. | ||
329 | mNoHoldersCondition.unlock(); // Release lock on mHoldersCount. | ||
330 | } | ||
176 | }; | 331 | }; |
177 | 332 | ||
178 | //============================================================================ | 333 | //============================================================================ |
@@ -187,7 +342,6 @@ void LLThread::unlockData() | |||
187 | mRunCondition->unlock(); | 342 | mRunCondition->unlock(); |
188 | } | 343 | } |
189 | 344 | ||
190 | |||
191 | //============================================================================ | 345 | //============================================================================ |
192 | 346 | ||
193 | // see llmemory.h for LLPointer<> definition | 347 | // see llmemory.h for LLPointer<> definition |