aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/libraries/ecore/src/lib/ecore/ecore_thread.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--libraries/ecore/src/lib/ecore/ecore_thread.c1666
1 files changed, 1666 insertions, 0 deletions
diff --git a/libraries/ecore/src/lib/ecore/ecore_thread.c b/libraries/ecore/src/lib/ecore/ecore_thread.c
new file mode 100644
index 0000000..2a8ea76
--- /dev/null
+++ b/libraries/ecore/src/lib/ecore/ecore_thread.c
@@ -0,0 +1,1666 @@
1#ifdef HAVE_CONFIG_H
2# include <config.h>
3#endif
4
5#include <sys/time.h>
6#include <assert.h>
7#include <sys/types.h>
8#include <unistd.h>
9
10#ifdef HAVE_EVIL
11# include <Evil.h>
12#endif
13
14#include "Ecore.h"
15#include "ecore_private.h"
16
17#ifdef EFL_HAVE_THREADS
18
19# ifdef EFL_HAVE_POSIX_THREADS
20# include <pthread.h>
21# ifdef __linux__
22# include <sched.h>
23# include <sys/resource.h>
24# include <unistd.h>
25# include <sys/syscall.h>
26# include <errno.h>
27# endif
28
29# define PH(x) pthread_t x
30# define PHE(x, y) pthread_equal(x, y)
31# define PHS() pthread_self()
32# define PHC(x, f, d) pthread_create(&(x), NULL, (void *)f, d)
33# define PHJ(x, p) pthread_join(x, (void **)(&(p)))
34# define PHA(x) pthread_cancel(x)
35
36# define CD(x) pthread_cond_t x
37# define CDI(x) pthread_cond_init(&(x), NULL);
38# define CDD(x) pthread_cond_destroy(&(x));
39# define CDB(x) pthread_cond_broadcast(&(x));
40# define CDW(x, y, t) pthread_cond_timedwait(&(x), &(y), t);
41
42# define LK(x) pthread_mutex_t x
43# define LKI(x) pthread_mutex_init(&(x), NULL);
44# define LKD(x) pthread_mutex_destroy(&(x));
45# define LKL(x) pthread_mutex_lock(&(x));
46# define LKU(x) pthread_mutex_unlock(&(x));
47
48# define LRWK(x) pthread_rwlock_t x
49# define LRWKI(x) pthread_rwlock_init(&(x), NULL);
50# define LRWKD(x) pthread_rwlock_destroy(&(x));
51# define LRWKWL(x) pthread_rwlock_wrlock(&(x));
52# define LRWKRL(x) pthread_rwlock_rdlock(&(x));
53# define LRWKU(x) pthread_rwlock_unlock(&(x));
54
55# else /* EFL_HAVE_WIN32_THREADS */
56
57# define WIN32_LEAN_AND_MEAN
58# include <windows.h>
59# undef WIN32_LEAN_AND_MEAN
60
61typedef struct
62{
63 HANDLE thread;
64 void *val;
65} win32_thread;
66
67# define PH(x) win32_thread * x
68# define PHE(x, y) ((x) == (y))
69# define PHS() (HANDLE)GetCurrentThreadId()
70
71int
72_ecore_thread_win32_create(win32_thread **x,
73 LPTHREAD_START_ROUTINE f,
74 void *d)
75{
76 win32_thread *t;
77 t = (win32_thread *)calloc(1, sizeof(win32_thread));
78 if (!t)
79 return -1;
80
81 (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
82 if (!t->thread)
83 {
84 free(t);
85 return -1;
86 }
87 t->val = d;
88 *x = t;
89
90 return 0;
91}
92
93# define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
94
95int
96_ecore_thread_win32_join(win32_thread *x,
97 void **res)
98{
99 if (!PHE(x, PHS()))
100 {
101 WaitForSingleObject(x->thread, INFINITE);
102 CloseHandle(x->thread);
103 }
104 if (res) *res = x->val;
105 free(x);
106
107 return 0;
108}
109
110# define PHJ(x, p) _ecore_thread_win32_join(x, (void **)(&(p)))
111# define PHA(x) TerminateThread(x->thread, 0)
112
113# define LK(x) HANDLE x
114# define LKI(x) x = CreateMutex(NULL, FALSE, NULL)
115# define LKD(x) CloseHandle(x)
116# define LKL(x) WaitForSingleObject(x, INFINITE)
117# define LKU(x) ReleaseMutex(x)
118
119typedef struct
120{
121 HANDLE semaphore;
122 LONG threads_count;
123 CRITICAL_SECTION threads_count_lock;
124} win32_cond;
125
126# define CD(x) win32_cond * x
127
128# define CDI(x) \
129 do { \
130 x = (win32_cond *)calloc(1, sizeof(win32_cond)); \
131 if (x) \
132 { \
133 x->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL); \
134 if (x->semaphore) \
135 InitializeCriticalSection(&x->threads_count_lock); \
136 else \
137 { \
138 free(x); \
139 x = NULL; \
140 } \
141 } \
142 } while (0)
143
144# define CDD(x) \
145 do { \
146 CloseHandle(x->semaphore); \
147 free(x); \
148 x = NULL; \
149 } while (0)
150
151# define CDB(x) \
152 do { \
153 EnterCriticalSection(&x->threads_count_lock); \
154 if (x->threads_count > 0) \
155 ReleaseSemaphore(x->semaphore, x->threads_count, NULL); \
156 LeaveCriticalSection (&x->threads_count_lock); \
157 } while (0)
158
159int
160_ecore_thread_win32_cond_timedwait(win32_cond *c,
161 HANDLE *external_mutex,
162 struct timeval *t)
163{
164 DWORD res;
165 DWORD val = t->tv_sec * 1000 + (t->tv_usec / 1000);
166 LKL(external_mutex);
167 EnterCriticalSection (&c->threads_count_lock);
168 c->threads_count++;
169 LeaveCriticalSection (&c->threads_count_lock);
170 LKU(external_mutex);
171 res = WaitForSingleObject(c->semaphore, val);
172 if (res == WAIT_OBJECT_0)
173 return 0;
174 else
175 return -1;
176}
177
178# define CDW(x, y, t) _ecore_thread_win32_cond_timedwait(x, y, t)
179
180typedef struct
181{
182 LONG readers_count;
183 LONG writers_count;
184 int readers;
185 int writers;
186 LK(mutex);
187 CD(cond_read);
188 CD(cond_write);
189} win32_rwl;
190
191# define LRWK(x) win32_rwl * x
192# define LRWKI(x) \
193 do { \
194 x = (win32_rwl *)calloc(1, sizeof(win32_rwl)); \
195 if (x) \
196 { \
197 LKI(x->mutex); \
198 if (x->mutex) \
199 { \
200 CDI(x->cond_read); \
201 if (x->cond_read) \
202 { \
203 CDI(x->cond_write); \
204 if (!x->cond_write) \
205 { \
206 CDD(x->cond_read); \
207 LKD(x->mutex); \
208 free(x); \
209 x = NULL; \
210 } \
211 } \
212 else \
213 { \
214 LKD(x->mutex); \
215 free(x); \
216 x = NULL; \
217 } \
218 } \
219 else \
220 { \
221 free(x); \
222 x = NULL; \
223 } \
224 } \
225 } while (0)
226
227# define LRWKD(x) \
228 do { \
229 LKU(x->mutex); \
230 LKD(x->mutex); \
231 CDD(x->cond_write); \
232 CDD(x->cond_read); \
233 free(x); \
234 } while (0)
235# define LRWKWL(x) \
236 do { \
237 DWORD res; \
238 LKU(x->mutex); \
239 if (x->writers || x->readers > 0) \
240 { \
241 x->writers_count++; \
242 while (x->writers || x->readers > 0) \
243 { \
244 EnterCriticalSection(&x->cond_write->threads_count_lock); \
245 x->cond_read->threads_count++; \
246 LeaveCriticalSection(&x->cond_write->threads_count_lock); \
247 res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
248 if (res != WAIT_OBJECT_0) break; \
249 } \
250 x->writers_count--; \
251 } \
252 if (res == 0) x->writers_count = 1; \
253 LKU(x->mutex); \
254 } while (0)
255# define LRWKRL(x) \
256 do { \
257 DWORD res; \
258 LKL(x->mutex); \
259 if (x->writers) \
260 { \
261 x->readers_count++; \
262 while (x->writers) \
263 { \
264 EnterCriticalSection(&x->cond_write->threads_count_lock); \
265 x->cond_read->threads_count++; \
266 LeaveCriticalSection(&x->cond_write->threads_count_lock); \
267 res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
268 if (res != WAIT_OBJECT_0) break; \
269 } \
270 x->readers_count--; \
271 } \
272 if (res == 0) \
273 x->readers++; \
274 LKU(x->mutex); \
275 } while (0)
276# define LRWKU(x) \
277 do { \
278 LKL(x->mutex); \
279 if (x->writers) \
280 { \
281 x->writers = 0; \
282 if (x->readers_count == 1) \
283 { \
284 EnterCriticalSection(&x->cond_read->threads_count_lock); \
285 if (x->cond_read->threads_count > 0) \
286 ReleaseSemaphore(x->cond_read->semaphore, 1, 0); \
287 LeaveCriticalSection(&x->cond_read->threads_count_lock); \
288 } \
289 else if (x->readers_count > 0) \
290 CDB(x->cond_read); \
291 else if (x->writers_count > 0) \
292 { \
293 EnterCriticalSection (&x->cond_write->threads_count_lock); \
294 if (x->cond_write->threads_count > 0) \
295 ReleaseSemaphore(x->cond_write->semaphore, 1, 0); \
296 LeaveCriticalSection (&x->cond_write->threads_count_lock); \
297 } \
298 } \
299 else if (x->readers > 0) \
300 { \
301 x->readers--; \
302 if (x->readers == 0 && x->writers_count > 0) \
303 { \
304 EnterCriticalSection (&x->cond_write->threads_count_lock); \
305 if (x->cond_write->threads_count > 0) \
306 ReleaseSemaphore(x->cond_write->semaphore, 1, 0); \
307 LeaveCriticalSection (&x->cond_write->threads_count_lock); \
308 } \
309 } \
310 LKU(x->mutex); \
311 } while (0)
312
313# endif
314
315#endif
316
317typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
318typedef struct _Ecore_Pthread Ecore_Pthread;
319typedef struct _Ecore_Thread_Data Ecore_Thread_Data;
320
321struct _Ecore_Thread_Data
322{
323 void *data;
324 Eina_Free_Cb cb;
325};
326
327struct _Ecore_Pthread_Worker
328{
329 union {
330 struct
331 {
332 Ecore_Thread_Cb func_blocking;
333 } short_run;
334 struct
335 {
336 Ecore_Thread_Cb func_heavy;
337 Ecore_Thread_Notify_Cb func_notify;
338 Ecore_Pipe *notify;
339
340 Ecore_Pipe *direct_pipe;
341 Ecore_Pthread_Worker *direct_worker;
342
343 int send;
344 int received;
345 } feedback_run;
346 } u;
347
348 Ecore_Thread_Cb func_cancel;
349 Ecore_Thread_Cb func_end;
350#ifdef EFL_HAVE_THREADS
351 PH(self);
352 Eina_Hash *hash;
353 CD(cond);
354 LK(mutex);
355#endif
356
357 const void *data;
358
359 Eina_Bool cancel : 1;
360 Eina_Bool feedback_run : 1;
361 Eina_Bool kill : 1;
362 Eina_Bool reschedule : 1;
363 Eina_Bool no_queue : 1;
364};
365
366#ifdef EFL_HAVE_THREADS
367typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
368
369struct _Ecore_Pthread_Data
370{
371 Ecore_Pthread_Worker *death_job;
372 Ecore_Pipe *p;
373 void *data;
374 PH(thread);
375};
376#endif
377
378static int _ecore_thread_count_max = 0;
379static int ECORE_THREAD_PIPE_DEL = 0;
380static Eina_Array *_ecore_thread_pipe = NULL;
381
382#ifdef EFL_HAVE_THREADS
383
384static void _ecore_thread_handler(void *data __UNUSED__,
385 void *buffer,
386 unsigned int nbyte);
387
388static Ecore_Pipe *
389_ecore_thread_pipe_get(void)
390{
391 if (eina_array_count_get(_ecore_thread_pipe) > 0)
392 return eina_array_pop(_ecore_thread_pipe);
393
394 return ecore_pipe_add(_ecore_thread_handler, NULL);
395}
396
397static int _ecore_thread_count = 0;
398
399static Ecore_Event_Handler *del_handler = NULL;
400static Eina_List *_ecore_active_job_threads = NULL;
401static Eina_List *_ecore_pending_job_threads = NULL;
402static Eina_List *_ecore_pending_job_threads_feedback = NULL;
403static LK(_ecore_pending_job_threads_mutex);
404
405static Eina_Hash *_ecore_thread_global_hash = NULL;
406static LRWK(_ecore_thread_global_hash_lock);
407static LK(_ecore_thread_global_hash_mutex);
408static CD(_ecore_thread_global_hash_cond);
409
410static Eina_Bool have_main_loop_thread = 0;
411
412static Eina_Trash *_ecore_thread_worker_trash = NULL;
413static int _ecore_thread_worker_count = 0;
414
415static void *_ecore_thread_worker(Ecore_Pthread_Data *pth);
416static Ecore_Pthread_Worker *_ecore_thread_worker_new(void);
417
418static PH(get_main_loop_thread) (void)
419{
420 static PH(main_loop_thread);
421 static pid_t main_loop_pid;
422 pid_t pid = getpid();
423
424 if (pid != main_loop_pid)
425 {
426 main_loop_pid = pid;
427 main_loop_thread = PHS();
428 have_main_loop_thread = 1;
429 }
430
431 return main_loop_thread;
432}
433
434static void
435_ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
436{
437 if (_ecore_thread_worker_count > (_ecore_thread_count_max + 1) * 16)
438 {
439 free(worker);
440 return;
441 }
442
443 eina_trash_push(&_ecore_thread_worker_trash, worker);
444}
445
446static void
447_ecore_thread_data_free(void *data)
448{
449 Ecore_Thread_Data *d = data;
450
451 if (d->cb) d->cb(d->data);
452 free(d);
453}
454
455static void
456_ecore_thread_pipe_free(void *data __UNUSED__,
457 void *event)
458{
459 Ecore_Pipe *p = event;
460
461 if (eina_array_count_get(_ecore_thread_pipe) < 50)
462 eina_array_push(_ecore_thread_pipe, p);
463 else
464 ecore_pipe_del(p);
465 eina_threads_shutdown();
466}
467
468static Eina_Bool
469_ecore_thread_pipe_del(void *data __UNUSED__,
470 int type __UNUSED__,
471 void *event __UNUSED__)
472{
473 /* This is a hack to delay pipe destruction until we are out of its internal loop. */
474 return ECORE_CALLBACK_CANCEL;
475}
476
477static void
478_ecore_thread_end(Ecore_Pthread_Data *pth,
479 Ecore_Thread *work)
480{
481 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)work;
482 Ecore_Pipe *p;
483
484 if (!worker->feedback_run || (worker->feedback_run && !worker->no_queue))
485 _ecore_thread_count--;
486
487 if (PHJ(pth->thread, p) != 0)
488 return;
489
490 if (eina_list_count(_ecore_pending_job_threads) > 0
491 && (unsigned int)_ecore_thread_count < eina_list_count(_ecore_pending_job_threads)
492 && _ecore_thread_count < _ecore_thread_count_max)
493 {
494 /* One more thread should be created. */
495 INF("spawning threads because of still pending jobs.");
496
497 pth->death_job = _ecore_thread_worker_new();
498 if (!pth->p || !pth->death_job) goto end;
499
500 eina_threads_init();
501
502 if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
503 {
504 _ecore_thread_count++;
505 return;
506 }
507
508 eina_threads_shutdown();
509
510end:
511 if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
512 }
513
514 _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
515
516 ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
517 free(pth);
518}
519
520static void
521_ecore_thread_kill(Ecore_Pthread_Worker *work)
522{
523 if (work->cancel)
524 {
525 if (work->func_cancel)
526 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
527 }
528 else
529 {
530 if (work->func_end)
531 work->func_end((void *)work->data, (Ecore_Thread *)work);
532 }
533
534 if (work->feedback_run)
535 {
536 ecore_pipe_del(work->u.feedback_run.notify);
537
538 if (work->u.feedback_run.direct_pipe)
539 eina_array_push(_ecore_thread_pipe, work->u.feedback_run.direct_pipe);
540 if (work->u.feedback_run.direct_worker)
541 _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
542 }
543 CDD(work->cond);
544 LKD(work->mutex);
545 if (work->hash)
546 eina_hash_free(work->hash);
547 free(work);
548}
549
550static void
551_ecore_thread_handler(void *data __UNUSED__,
552 void *buffer,
553 unsigned int nbyte)
554{
555 Ecore_Pthread_Worker *work;
556
557 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return;
558
559 work = *(Ecore_Pthread_Worker **)buffer;
560
561 if (work->feedback_run)
562 {
563 if (work->u.feedback_run.send != work->u.feedback_run.received)
564 {
565 work->kill = EINA_TRUE;
566 return;
567 }
568 }
569
570 _ecore_thread_kill(work);
571}
572
573static void
574_ecore_notify_handler(void *data,
575 void *buffer,
576 unsigned int nbyte)
577{
578 Ecore_Pthread_Worker *work = data;
579 void *user_data;
580
581 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return;
582
583 user_data = *(void **)buffer;
584 work->u.feedback_run.received++;
585
586 if (work->u.feedback_run.func_notify)
587 work->u.feedback_run.func_notify((void *)work->data, (Ecore_Thread *)work, user_data);
588
589 /* Force reading all notify event before killing the thread */
590 if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
591 {
592 _ecore_thread_kill(work);
593 }
594}
595
596static void
597_ecore_short_job(Ecore_Pipe *end_pipe)
598{
599 Ecore_Pthread_Worker *work;
600
601 while (_ecore_pending_job_threads)
602 {
603 LKL(_ecore_pending_job_threads_mutex);
604
605 if (!_ecore_pending_job_threads)
606 {
607 LKU(_ecore_pending_job_threads_mutex);
608 break;
609 }
610
611 work = eina_list_data_get(_ecore_pending_job_threads);
612 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
613 _ecore_pending_job_threads);
614
615 LKU(_ecore_pending_job_threads_mutex);
616
617 if (!work->cancel)
618 work->u.short_run.func_blocking((void *)work->data, (Ecore_Thread *)work);
619
620 if (work->reschedule)
621 {
622 work->reschedule = EINA_FALSE;
623
624 LKL(_ecore_pending_job_threads_mutex);
625 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
626 LKU(_ecore_pending_job_threads_mutex);
627 }
628 else
629 {
630 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
631 }
632 }
633}
634
635static void
636_ecore_feedback_job(Ecore_Pipe *end_pipe,
637 PH(thread))
638{
639 Ecore_Pthread_Worker *work;
640
641 while (_ecore_pending_job_threads_feedback)
642 {
643 LKL(_ecore_pending_job_threads_mutex);
644
645 if (!_ecore_pending_job_threads_feedback)
646 {
647 LKU(_ecore_pending_job_threads_mutex);
648 break;
649 }
650
651 work = eina_list_data_get(_ecore_pending_job_threads_feedback);
652 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
653 _ecore_pending_job_threads_feedback);
654
655 LKU(_ecore_pending_job_threads_mutex);
656
657 work->self = thread;
658 if (!work->cancel)
659 work->u.feedback_run.func_heavy((void *)work->data, (Ecore_Thread *)work);
660
661 if (work->reschedule)
662 {
663 work->reschedule = EINA_FALSE;
664
665 LKL(_ecore_pending_job_threads_mutex);
666 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
667 LKU(_ecore_pending_job_threads_mutex);
668 }
669 else
670 {
671 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
672 }
673 }
674}
675
676static void *
677_ecore_direct_worker(Ecore_Pthread_Worker *work)
678{
679 Ecore_Pthread_Data *pth;
680
681#ifdef EFL_POSIX_THREADS
682 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
683 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
684#endif
685
686 eina_sched_prio_drop();
687
688 pth = malloc(sizeof (Ecore_Pthread_Data));
689 if (!pth) return NULL;
690
691 pth->p = work->u.feedback_run.direct_pipe;
692 if (!pth->p)
693 {
694 free(pth);
695 return NULL;
696 }
697 pth->thread = PHS();
698
699 work->self = pth->thread;
700 work->u.feedback_run.func_heavy((void *)work->data, (Ecore_Thread *)work);
701
702 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
703
704 work = work->u.feedback_run.direct_worker;
705 if (!work)
706 {
707 free(pth);
708 return NULL;
709 }
710
711 work->data = pth;
712 work->u.short_run.func_blocking = NULL;
713 work->func_end = (void *)_ecore_thread_end;
714 work->func_cancel = NULL;
715 work->cancel = EINA_FALSE;
716 work->feedback_run = EINA_FALSE;
717 work->kill = EINA_FALSE;
718 work->hash = NULL;
719 CDI(work->cond);
720 LKI(work->mutex);
721
722 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
723
724 return pth->p;
725}
726
727static void *
728_ecore_thread_worker(Ecore_Pthread_Data *pth)
729{
730 Ecore_Pthread_Worker *work;
731
732#ifdef EFL_POSIX_THREADS
733 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
734 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
735#endif
736
737 eina_sched_prio_drop();
738
739restart:
740 if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
741 if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(pth->p, pth->thread);
742
743 /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
744
745 LKL(_ecore_pending_job_threads_mutex);
746 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
747 {
748 LKU(_ecore_pending_job_threads_mutex);
749 goto restart;
750 }
751 LKU(_ecore_pending_job_threads_mutex);
752
753 /* Sleep a little to prevent premature death */
754#ifdef _WIN32
755 Sleep(1); /* around 50ms */
756#else
757 usleep(200);
758#endif
759
760 LKL(_ecore_pending_job_threads_mutex);
761 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
762 {
763 LKU(_ecore_pending_job_threads_mutex);
764 goto restart;
765 }
766 LKU(_ecore_pending_job_threads_mutex);
767
768 work = pth->death_job;
769 if (!work) return NULL;
770
771 work->data = pth;
772 work->u.short_run.func_blocking = NULL;
773 work->func_end = (void *)_ecore_thread_end;
774 work->func_cancel = NULL;
775 work->cancel = EINA_FALSE;
776 work->feedback_run = EINA_FALSE;
777 work->kill = EINA_FALSE;
778 work->hash = NULL;
779 CDI(work->cond);
780 LKI(work->mutex);
781
782 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
783
784 return pth->p;
785}
786
787#endif
788
789static Ecore_Pthread_Worker *
790_ecore_thread_worker_new(void)
791{
792#ifdef EFL_HAVE_THREADS
793 Ecore_Pthread_Worker *result;
794
795 result = eina_trash_pop(&_ecore_thread_worker_trash);
796
797 if (!result) result = malloc(sizeof (Ecore_Pthread_Worker));
798 else _ecore_thread_worker_count--;
799
800 return result;
801#else
802 return malloc(sizeof (Ecore_Pthread_Worker));
803#endif
804}
805
806void
807_ecore_thread_init(void)
808{
809 _ecore_thread_count_max = eina_cpu_count();
810 if (_ecore_thread_count_max <= 0)
811 _ecore_thread_count_max = 1;
812
813 ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
814 _ecore_thread_pipe = eina_array_new(8);
815
816#ifdef EFL_HAVE_THREADS
817 del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
818
819 LKI(_ecore_pending_job_threads_mutex);
820 LRWKI(_ecore_thread_global_hash_lock);
821 LKI(_ecore_thread_global_hash_mutex);
822 CDI(_ecore_thread_global_hash_cond);
823#endif
824}
825
826void
827_ecore_thread_shutdown(void)
828{
829 /* FIXME: If function are still running in the background, should we kill them ? */
830 Ecore_Pipe *p;
831 Eina_Array_Iterator it;
832 unsigned int i;
833
834#ifdef EFL_HAVE_THREADS
835 Ecore_Pthread_Worker *work;
836 Ecore_Pthread_Data *pth;
837
838 LKL(_ecore_pending_job_threads_mutex);
839
840 EINA_LIST_FREE(_ecore_pending_job_threads, work)
841 {
842 if (work->func_cancel)
843 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
844 free(work);
845 }
846
847 EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
848 {
849 if (work->func_cancel)
850 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
851 free(work);
852 }
853
854 LKU(_ecore_pending_job_threads_mutex);
855
856 /* Improve emergency shutdown */
857 EINA_LIST_FREE(_ecore_active_job_threads, pth)
858 {
859 Ecore_Pipe *ep;
860
861 PHA(pth->thread);
862 PHJ(pth->thread, ep);
863
864 ecore_pipe_del(pth->p);
865 }
866 if (_ecore_thread_global_hash)
867 eina_hash_free(_ecore_thread_global_hash);
868 _ecore_event_handler_del(del_handler);
869 have_main_loop_thread = 0;
870 del_handler = NULL;
871
872 LKD(_ecore_pending_job_threads_mutex);
873 LRWKD(_ecore_thread_global_hash_lock);
874 LKD(_ecore_thread_global_hash_mutex);
875 CDD(_ecore_thread_global_hash_cond);
876#endif
877
878 EINA_ARRAY_ITER_NEXT(_ecore_thread_pipe, i, p, it)
879 ecore_pipe_del(p);
880
881 eina_array_free(_ecore_thread_pipe);
882 _ecore_thread_pipe = NULL;
883}
884
885void
886_ecore_thread_assert_main_loop_thread(const char *function)
887{
888 Eina_Bool good;
889#ifdef EFL_HAVE_THREADS
890 good = PHE(get_main_loop_thread(), PHS());
891#else
892 good = EINA_TRUE;
893#endif
894 if (!good)
895 {
896 EINA_LOG_CRIT("Call to %s from wrong thread!", function);
897 abort();
898 }
899}
900
901EAPI Ecore_Thread *
902ecore_thread_run(Ecore_Thread_Cb func_blocking,
903 Ecore_Thread_Cb func_end,
904 Ecore_Thread_Cb func_cancel,
905 const void *data)
906{
907 Ecore_Pthread_Worker *work;
908#ifdef EFL_HAVE_THREADS
909 Ecore_Pthread_Data *pth = NULL;
910#endif
911
912 if (!func_blocking) return NULL;
913
914 work = _ecore_thread_worker_new();
915 if (!work)
916 {
917 if (func_cancel)
918 func_cancel((void *)data, NULL);
919 return NULL;
920 }
921
922 work->u.short_run.func_blocking = func_blocking;
923 work->func_end = func_end;
924 work->func_cancel = func_cancel;
925 work->cancel = EINA_FALSE;
926 work->feedback_run = EINA_FALSE;
927 work->kill = EINA_FALSE;
928 work->reschedule = EINA_FALSE;
929 work->data = data;
930
931#ifdef EFL_HAVE_THREADS
932 work->hash = NULL;
933 CDI(work->cond);
934 LKI(work->mutex);
935
936 LKL(_ecore_pending_job_threads_mutex);
937 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
938
939 if (_ecore_thread_count == _ecore_thread_count_max)
940 {
941 LKU(_ecore_pending_job_threads_mutex);
942 return (Ecore_Thread *)work;
943 }
944
945 LKU(_ecore_pending_job_threads_mutex);
946
947 /* One more thread could be created. */
948 pth = malloc(sizeof (Ecore_Pthread_Data));
949 if (!pth) goto on_error;
950
951 pth->p = _ecore_thread_pipe_get();
952 pth->death_job = _ecore_thread_worker_new();
953 if (!pth->p || !pth->death_job) goto on_error;
954
955 eina_threads_init();
956
957 if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
958 {
959 _ecore_thread_count++;
960 return (Ecore_Thread *)work;
961 }
962
963 eina_threads_shutdown();
964
965on_error:
966 if (pth)
967 {
968 if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
969 if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
970 free(pth);
971 }
972
973 if (_ecore_thread_count == 0)
974 {
975 LKL(_ecore_pending_job_threads_mutex);
976 _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
977 LKU(_ecore_pending_job_threads_mutex);
978
979 if (work->func_cancel)
980 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
981 free(work);
982 work = NULL;
983 }
984 return (Ecore_Thread *)work;
985#else
986 /*
987 If no thread and as we don't want to break app that rely on this
988 facility, we will lock the interface until we are done.
989 */
990 do {
991 /* Handle reschedule by forcing it here. That would mean locking the app,
992 * would be better with an idler, but really to complex for a case where
993 * thread should really exist.
994 */
995 work->reschedule = EINA_FALSE;
996
997 func_blocking((void *)data, (Ecore_Thread *)work);
998 if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *)work);
999 else func_cancel((void *)data, (Ecore_Thread *)work);
1000 } while (work->reschedule == EINA_TRUE);
1001
1002 free(work);
1003
1004 return NULL;
1005#endif
1006}
1007
1008EAPI Eina_Bool
1009ecore_thread_cancel(Ecore_Thread *thread)
1010{
1011#ifdef EFL_HAVE_THREADS
1012 Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread;
1013 Eina_List *l;
1014
1015 if (!work)
1016 return EINA_TRUE;
1017 if (work->cancel)
1018 return EINA_FALSE;
1019
1020 if (work->feedback_run)
1021 {
1022 if (work->kill)
1023 return EINA_TRUE;
1024 if (work->u.feedback_run.send != work->u.feedback_run.received)
1025 goto on_exit;
1026 }
1027
1028 LKL(_ecore_pending_job_threads_mutex);
1029
1030 if ((have_main_loop_thread) &&
1031 (PHE(get_main_loop_thread(), PHS())))
1032 {
1033 if (!work->feedback_run)
1034 EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
1035 {
1036 if ((void *)work == (void *)thread)
1037 {
1038 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
1039
1040 LKU(_ecore_pending_job_threads_mutex);
1041
1042 if (work->func_cancel)
1043 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
1044 free(work);
1045
1046 return EINA_TRUE;
1047 }
1048 }
1049 else
1050 EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
1051 {
1052 if ((void *)work == (void *)thread)
1053 {
1054 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
1055
1056 LKU(_ecore_pending_job_threads_mutex);
1057
1058 if (work->func_cancel)
1059 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
1060 free(work);
1061
1062 return EINA_TRUE;
1063 }
1064 }
1065 }
1066
1067 LKU(_ecore_pending_job_threads_mutex);
1068
1069 /* Delay the destruction */
1070on_exit:
1071 ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
1072 return EINA_FALSE;
1073#else
1074 (void) thread;
1075 return EINA_TRUE;
1076#endif
1077}
1078
1079EAPI Eina_Bool
1080ecore_thread_check(Ecore_Thread *thread)
1081{
1082 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1083
1084 if (!worker) return EINA_TRUE;
1085 return worker->cancel;
1086}
1087
1088EAPI Ecore_Thread *
1089ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
1090 Ecore_Thread_Notify_Cb func_notify,
1091 Ecore_Thread_Cb func_end,
1092 Ecore_Thread_Cb func_cancel,
1093 const void *data,
1094 Eina_Bool try_no_queue)
1095{
1096#ifdef EFL_HAVE_THREADS
1097 Ecore_Pthread_Worker *worker;
1098 Ecore_Pthread_Data *pth = NULL;
1099
1100 if (!func_heavy) return NULL;
1101
1102 worker = _ecore_thread_worker_new();
1103 if (!worker) goto on_error;
1104
1105 worker->u.feedback_run.func_heavy = func_heavy;
1106 worker->u.feedback_run.func_notify = func_notify;
1107 worker->hash = NULL;
1108 CDI(worker->cond);
1109 LKI(worker->mutex);
1110 worker->func_cancel = func_cancel;
1111 worker->func_end = func_end;
1112 worker->data = data;
1113 worker->cancel = EINA_FALSE;
1114 worker->feedback_run = EINA_TRUE;
1115 worker->kill = EINA_FALSE;
1116 worker->reschedule = EINA_FALSE;
1117
1118 worker->u.feedback_run.send = 0;
1119 worker->u.feedback_run.received = 0;
1120
1121 worker->u.feedback_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
1122 worker->u.feedback_run.direct_pipe = NULL;
1123 worker->u.feedback_run.direct_worker = NULL;
1124
1125 if (!try_no_queue)
1126 {
1127 PH(t);
1128
1129 worker->u.feedback_run.direct_pipe = _ecore_thread_pipe_get();
1130 worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
1131 worker->no_queue = EINA_TRUE;
1132
1133 eina_threads_init();
1134
1135 if (PHC(t, _ecore_direct_worker, worker) == 0)
1136 return (Ecore_Thread *)worker;
1137
1138 eina_threads_shutdown();
1139 }
1140
1141 worker->no_queue = EINA_FALSE;
1142
1143 LKL(_ecore_pending_job_threads_mutex);
1144 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
1145
1146 if (_ecore_thread_count == _ecore_thread_count_max)
1147 {
1148 LKU(_ecore_pending_job_threads_mutex);
1149 return (Ecore_Thread *)worker;
1150 }
1151
1152 LKU(_ecore_pending_job_threads_mutex);
1153
1154 /* One more thread could be created. */
1155 pth = malloc(sizeof (Ecore_Pthread_Data));
1156 if (!pth) goto on_error;
1157
1158 pth->p = _ecore_thread_pipe_get();
1159 pth->death_job = _ecore_thread_worker_new();
1160 if (!pth->p || !pth->death_job) goto on_error;
1161
1162 eina_threads_init();
1163
1164 if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
1165 {
1166 _ecore_thread_count++;
1167 return (Ecore_Thread *)worker;
1168 }
1169
1170 eina_threads_shutdown();
1171
1172on_error:
1173 if (pth)
1174 {
1175 if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
1176 if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
1177 free(pth);
1178 }
1179
1180 if (_ecore_thread_count == 0)
1181 {
1182 LKL(_ecore_pending_job_threads_mutex);
1183 _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
1184 worker);
1185 LKU(_ecore_pending_job_threads_mutex);
1186
1187 if (func_cancel) func_cancel((void *)data, NULL);
1188
1189 if (worker)
1190 {
1191 ecore_pipe_del(worker->u.feedback_run.notify);
1192 free(worker);
1193 worker = NULL;
1194 }
1195 }
1196
1197 return (Ecore_Thread *)worker;
1198#else
1199 Ecore_Pthread_Worker worker;
1200
1201 (void)try_no_queue;
1202
1203 /*
1204 If no thread and as we don't want to break app that rely on this
1205 facility, we will lock the interface until we are done.
1206 */
1207 worker.u.feedback_run.func_heavy = func_heavy;
1208 worker.u.feedback_run.func_notify = func_notify;
1209 worker.u.feedback_run.notify = NULL;
1210 worker.u.feedback_run.send = 0;
1211 worker.u.feedback_run.received = 0;
1212 worker.func_cancel = func_cancel;
1213 worker.func_end = func_end;
1214 worker.data = data;
1215 worker.cancel = EINA_FALSE;
1216 worker.feedback_run = EINA_TRUE;
1217 worker.kill = EINA_FALSE;
1218
1219 do {
1220 worker.reschedule = EINA_FALSE;
1221
1222 func_heavy((void *)data, (Ecore_Thread *)&worker);
1223
1224 if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *)&worker);
1225 else func_end((void *)data, (Ecore_Thread *)&worker);
1226 } while (worker.reschedule == EINA_TRUE);
1227
1228 return NULL;
1229#endif
1230}
1231
1232EAPI Eina_Bool
1233ecore_thread_feedback(Ecore_Thread *thread,
1234 const void *data)
1235{
1236 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1237
1238 if (!worker) return EINA_FALSE;
1239 if (!worker->feedback_run) return EINA_FALSE;
1240
1241#ifdef EFL_HAVE_THREADS
1242 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1243
1244 worker->u.feedback_run.send++;
1245 ecore_pipe_write(worker->u.feedback_run.notify, &data, sizeof (void *));
1246
1247 return EINA_TRUE;
1248#else
1249 worker->u.feedback_run.func_notify((void *)worker->data, thread, (void *)data);
1250
1251 return EINA_TRUE;
1252#endif
1253}
1254
1255EAPI Eina_Bool
1256ecore_thread_reschedule(Ecore_Thread *thread)
1257{
1258 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1259
1260 if (!worker) return EINA_FALSE;
1261
1262#ifdef EFL_HAVE_THREADS
1263 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1264#endif
1265
1266 worker->reschedule = EINA_TRUE;
1267 return EINA_TRUE;
1268}
1269
1270EAPI int
1271ecore_thread_active_get(void)
1272{
1273#ifdef EFL_HAVE_THREADS
1274 return _ecore_thread_count;
1275#else
1276 return 0;
1277#endif
1278}
1279
1280EAPI int
1281ecore_thread_pending_get(void)
1282{
1283#ifdef EFL_HAVE_THREADS
1284 int ret;
1285
1286 LKL(_ecore_pending_job_threads_mutex);
1287 ret = eina_list_count(_ecore_pending_job_threads);
1288 LKU(_ecore_pending_job_threads_mutex);
1289 return ret;
1290#else
1291 return 0;
1292#endif
1293}
1294
1295EAPI int
1296ecore_thread_pending_feedback_get(void)
1297{
1298#ifdef EFL_HAVE_THREADS
1299 int ret;
1300
1301 LKL(_ecore_pending_job_threads_mutex);
1302 ret = eina_list_count(_ecore_pending_job_threads_feedback);
1303 LKU(_ecore_pending_job_threads_mutex);
1304 return ret;
1305#else
1306 return 0;
1307#endif
1308}
1309
1310EAPI int
1311ecore_thread_pending_total_get(void)
1312{
1313#ifdef EFL_HAVE_THREADS
1314 int ret;
1315
1316 LKL(_ecore_pending_job_threads_mutex);
1317 ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1318 LKU(_ecore_pending_job_threads_mutex);
1319 return ret;
1320#else
1321 return 0;
1322#endif
1323}
1324
1325EAPI int
1326ecore_thread_max_get(void)
1327{
1328 return _ecore_thread_count_max;
1329}
1330
1331EAPI void
1332ecore_thread_max_set(int num)
1333{
1334 if (num < 1) return;
1335 /* avoid doing something hilarious by blocking dumb users */
1336 if (num >= (2 * eina_cpu_count())) return;
1337
1338 _ecore_thread_count_max = num;
1339}
1340
1341EAPI void
1342ecore_thread_max_reset(void)
1343{
1344 _ecore_thread_count_max = eina_cpu_count();
1345}
1346
1347EAPI int
1348ecore_thread_available_get(void)
1349{
1350#ifdef EFL_HAVE_THREADS
1351 int ret;
1352
1353 LKL(_ecore_pending_job_threads_mutex);
1354 ret = _ecore_thread_count_max - _ecore_thread_count;
1355 LKU(_ecore_pending_job_threads_mutex);
1356 return ret;
1357#else
1358 return 0;
1359#endif
1360}
1361
1362EAPI Eina_Bool
1363ecore_thread_local_data_add(Ecore_Thread *thread,
1364 const char *key,
1365 void *value,
1366 Eina_Free_Cb cb,
1367 Eina_Bool direct)
1368{
1369#ifdef EFL_HAVE_THREADS
1370 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1371 Ecore_Thread_Data *d;
1372 Eina_Bool ret;
1373#endif
1374
1375 if ((!thread) || (!key) || (!value))
1376 return EINA_FALSE;
1377#ifdef EFL_HAVE_THREADS
1378 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1379
1380 if (!worker->hash)
1381 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1382
1383 if (!worker->hash)
1384 return EINA_FALSE;
1385
1386 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1387 return EINA_FALSE;
1388
1389 d->data = value;
1390 d->cb = cb;
1391
1392 if (direct)
1393 ret = eina_hash_direct_add(worker->hash, key, d);
1394 else
1395 ret = eina_hash_add(worker->hash, key, d);
1396 CDB(worker->cond);
1397 return ret;
1398#else
1399 (void) cb;
1400 (void) direct;
1401 return EINA_FALSE;
1402#endif
1403}
1404
1405EAPI void *
1406ecore_thread_local_data_set(Ecore_Thread *thread,
1407 const char *key,
1408 void *value,
1409 Eina_Free_Cb cb)
1410{
1411#ifdef EFL_HAVE_THREADS
1412 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1413 Ecore_Thread_Data *d, *r;
1414 void *ret;
1415#endif
1416
1417 if ((!thread) || (!key) || (!value))
1418 return NULL;
1419#ifdef EFL_HAVE_THREADS
1420 if (!PHE(worker->self, PHS())) return NULL;
1421
1422 if (!worker->hash)
1423 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1424
1425 if (!worker->hash)
1426 return NULL;
1427
1428 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1429 return NULL;
1430
1431 d->data = value;
1432 d->cb = cb;
1433
1434 r = eina_hash_set(worker->hash, key, d);
1435 CDB(worker->cond);
1436 ret = r->data;
1437 free(r);
1438 return ret;
1439#else
1440 (void) cb;
1441 return NULL;
1442#endif
1443}
1444
1445EAPI void *
1446ecore_thread_local_data_find(Ecore_Thread *thread,
1447 const char *key)
1448{
1449#ifdef EFL_HAVE_THREADS
1450 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1451 Ecore_Thread_Data *d;
1452#endif
1453
1454 if ((!thread) || (!key))
1455 return NULL;
1456#ifdef EFL_HAVE_THREADS
1457 if (!PHE(worker->self, PHS())) return NULL;
1458
1459 if (!worker->hash)
1460 return NULL;
1461
1462 d = eina_hash_find(worker->hash, key);
1463 if (d)
1464 return d->data;
1465 return NULL;
1466#else
1467 return NULL;
1468#endif
1469}
1470
1471EAPI Eina_Bool
1472ecore_thread_local_data_del(Ecore_Thread *thread,
1473 const char *key)
1474{
1475#ifdef EFL_HAVE_THREADS
1476 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1477#endif
1478
1479 if ((!thread) || (!key))
1480 return EINA_FALSE;
1481#ifdef EFL_HAVE_THREADS
1482 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1483
1484 if (!worker->hash)
1485 return EINA_FALSE;
1486 return eina_hash_del_by_key(worker->hash, key);
1487#else
1488 return EINA_TRUE;
1489#endif
1490}
1491
1492EAPI Eina_Bool
1493ecore_thread_global_data_add(const char *key,
1494 void *value,
1495 Eina_Free_Cb cb,
1496 Eina_Bool direct)
1497{
1498#ifdef EFL_HAVE_THREADS
1499 Ecore_Thread_Data *d;
1500 Eina_Bool ret;
1501#endif
1502
1503 if ((!key) || (!value))
1504 return EINA_FALSE;
1505#ifdef EFL_HAVE_THREADS
1506 LRWKWL(_ecore_thread_global_hash_lock);
1507 if (!_ecore_thread_global_hash)
1508 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1509 LRWKU(_ecore_thread_global_hash_lock);
1510
1511 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1512 return EINA_FALSE;
1513
1514 d->data = value;
1515 d->cb = cb;
1516
1517 if (!_ecore_thread_global_hash)
1518 return EINA_FALSE;
1519 LRWKWL(_ecore_thread_global_hash_lock);
1520 if (direct)
1521 ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1522 else
1523 ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1524 LRWKU(_ecore_thread_global_hash_lock);
1525 CDB(_ecore_thread_global_hash_cond);
1526 return ret;
1527#else
1528 (void) cb;
1529 (void) direct;
1530 return EINA_TRUE;
1531#endif
1532}
1533
1534EAPI void *
1535ecore_thread_global_data_set(const char *key,
1536 void *value,
1537 Eina_Free_Cb cb)
1538{
1539#ifdef EFL_HAVE_THREADS
1540 Ecore_Thread_Data *d, *r;
1541 void *ret;
1542#endif
1543
1544 if ((!key) || (!value))
1545 return NULL;
1546#ifdef EFL_HAVE_THREADS
1547 LRWKWL(_ecore_thread_global_hash_lock);
1548 if (!_ecore_thread_global_hash)
1549 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1550 LRWKU(_ecore_thread_global_hash_lock);
1551
1552 if (!_ecore_thread_global_hash)
1553 return NULL;
1554
1555 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1556 return NULL;
1557
1558 d->data = value;
1559 d->cb = cb;
1560
1561 LRWKWL(_ecore_thread_global_hash_lock);
1562 r = eina_hash_set(_ecore_thread_global_hash, key, d);
1563 LRWKU(_ecore_thread_global_hash_lock);
1564 CDB(_ecore_thread_global_hash_cond);
1565
1566 ret = r->data;
1567 free(r);
1568 return ret;
1569#else
1570 (void) cb;
1571 return NULL;
1572#endif
1573}
1574
1575EAPI void *
1576ecore_thread_global_data_find(const char *key)
1577{
1578#ifdef EFL_HAVE_THREADS
1579 Ecore_Thread_Data *ret;
1580#endif
1581
1582 if (!key)
1583 return NULL;
1584#ifdef EFL_HAVE_THREADS
1585 if (!_ecore_thread_global_hash) return NULL;
1586
1587 LRWKRL(_ecore_thread_global_hash_lock);
1588 ret = eina_hash_find(_ecore_thread_global_hash, key);
1589 LRWKU(_ecore_thread_global_hash_lock);
1590 if (ret)
1591 return ret->data;
1592 return NULL;
1593#else
1594 return NULL;
1595#endif
1596}
1597
1598EAPI Eina_Bool
1599ecore_thread_global_data_del(const char *key)
1600{
1601#ifdef EFL_HAVE_THREADS
1602 Eina_Bool ret;
1603#endif
1604
1605 if (!key)
1606 return EINA_FALSE;
1607#ifdef EFL_HAVE_THREADS
1608 if (!_ecore_thread_global_hash)
1609 return EINA_FALSE;
1610
1611 LRWKWL(_ecore_thread_global_hash_lock);
1612 ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1613 LRWKU(_ecore_thread_global_hash_lock);
1614 return ret;
1615#else
1616 return EINA_TRUE;
1617#endif
1618}
1619
1620EAPI void *
1621ecore_thread_global_data_wait(const char *key,
1622 double seconds)
1623{
1624#ifdef EFL_HAVE_THREADS
1625 double tm = 0;
1626 Ecore_Thread_Data *ret = NULL;
1627#endif
1628
1629 if (!key)
1630 return NULL;
1631#ifdef EFL_HAVE_THREADS
1632 if (!_ecore_thread_global_hash)
1633 return NULL;
1634 if (seconds > 0)
1635 tm = ecore_time_get() + seconds;
1636
1637 while (1)
1638 {
1639#ifndef _WIN32
1640 struct timespec t = { 0, 0 };
1641
1642 t.tv_sec = (long int)tm;
1643 t.tv_nsec = (long int)((tm - (double)t.tv_sec) * 1000000000);
1644#else
1645 struct timeval t = { 0, 0 };
1646
1647 t.tv_sec = (long int)tm;
1648 t.tv_usec = (long int)((tm - (double)t.tv_sec) * 1000000);
1649#endif
1650 LRWKRL(_ecore_thread_global_hash_lock);
1651 ret = eina_hash_find(_ecore_thread_global_hash, key);
1652 LRWKU(_ecore_thread_global_hash_lock);
1653 if ((ret) || (!seconds) || ((seconds > 0) && (tm <= ecore_time_get())))
1654 break;
1655 LKL(_ecore_thread_global_hash_mutex);
1656 CDW(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex, &t);
1657 LKU(_ecore_thread_global_hash_mutex);
1658 }
1659 if (ret) return ret->data;
1660 return NULL;
1661#else
1662 (void) seconds;
1663 return NULL;
1664#endif
1665}
1666