aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/libraries/evas/src/lib/engines/common/evas_pipe.c
diff options
context:
space:
mode:
authorDavid Walter Seikel2012-01-04 18:41:13 +1000
committerDavid Walter Seikel2012-01-04 18:41:13 +1000
commitdd7595a3475407a7fa96a97393bae8c5220e8762 (patch)
treee341e911d7eb911a51684a7412ef7f7c7605d28e /libraries/evas/src/lib/engines/common/evas_pipe.c
parentAdd the skeleton. (diff)
downloadSledjHamr-dd7595a3475407a7fa96a97393bae8c5220e8762.zip
SledjHamr-dd7595a3475407a7fa96a97393bae8c5220e8762.tar.gz
SledjHamr-dd7595a3475407a7fa96a97393bae8c5220e8762.tar.bz2
SledjHamr-dd7595a3475407a7fa96a97393bae8c5220e8762.tar.xz
Add the base Enlightenment Foundation Libraries - eina, eet, evas, ecore, embryo, and edje.
Note that embryo wont be used, but I'm not sure yet if you can build edje without it.
Diffstat (limited to 'libraries/evas/src/lib/engines/common/evas_pipe.c')
-rw-r--r--libraries/evas/src/lib/engines/common/evas_pipe.c1733
1 files changed, 1733 insertions, 0 deletions
diff --git a/libraries/evas/src/lib/engines/common/evas_pipe.c b/libraries/evas/src/lib/engines/common/evas_pipe.c
new file mode 100644
index 0000000..66085c8
--- /dev/null
+++ b/libraries/evas/src/lib/engines/common/evas_pipe.c
@@ -0,0 +1,1733 @@
1// THIS IS DEPRECATED. WILL GO EVENTUALLTY. NO NEED TO SUPPORT ANYMORE
2
3#include "evas_common.h"
4#include <unistd.h>
5
6#ifdef BUILD_PIPE_RENDER
7
8#ifdef EVAS_FRAME_QUEUING
9#define SCALECACHE
10static Evas_FrameQ gframeq; // global frameQ
11
12static Evas_Surface *
13evas_common_surface_alloc(void *surface, int x, int y, int w, int h)
14{
15 Evas_Surface *e_surface;
16
17 e_surface = calloc(1, sizeof(Evas_Surface));
18 e_surface->im = surface;
19 LKL(e_surface->im->cache_entry.ref_fq_add);
20 e_surface->im->cache_entry.ref_fq[0]++;
21 LKU(e_surface->im->cache_entry.ref_fq_add);
22 e_surface->x = x;
23 e_surface->y = y;
24 e_surface->w = w;
25 e_surface->h = h;
26
27 return e_surface;
28}
29
30static void
31evas_common_surface_dealloc(Evas_Surface *surface)
32{
33 Evas_Surface *d_surface;
34
35 while (surface)
36 {
37 d_surface = surface;
38 surface = (Evas_Surface *)eina_inlist_remove(EINA_INLIST_GET(surface), EINA_INLIST_GET(d_surface));
39 LKL(d_surface->im->cache_entry.ref_fq_del);
40 d_surface->im->cache_entry.ref_fq[1]++;
41 LKU(d_surface->im->cache_entry.ref_fq_del);
42 free(d_surface);
43 }
44}
45
46static void
47evas_common_surface_add(Evas_Frame *frame, Evas_Surface *surface)
48{
49 frame->surfaces = (Evas_Surface *)eina_inlist_append(EINA_INLIST_GET(frame->surfaces), EINA_INLIST_GET(surface));
50}
51
52static Evas_Frame *
53evas_common_frame_alloc(void)
54{
55 Evas_Frame *frame;
56
57 frame = calloc(1, sizeof(Evas_Frame));
58 frame->surfaces = NULL;
59 return frame;
60}
61
62static void
63evas_common_frame_dealloc(Evas_Frame *frame)
64{
65 evas_common_surface_dealloc(frame->surfaces);
66 free(frame);
67}
68
69static void
70evas_common_frame_add(Evas_FrameQ *frameq, Evas_Frame *frame)
71{
72 Evas_Frame *temp_frame;
73
74 LKL(frameq->mutex);
75 while ((int)eina_inlist_count(EINA_INLIST_GET(frameq->frames)) >= frameq->frameq_sz)
76 {
77 /* wait a worker thread finish previous frame */
78 eina_condition_wait(&(frameq->cond_done));
79 }
80 frameq->frames = (Evas_Frame *) eina_inlist_append(EINA_INLIST_GET(frameq->frames), EINA_INLIST_GET(frame));
81
82 // this frame need not to be scheduled for flushing time
83 EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), temp_frame)
84 {
85 if (!temp_frame->ready)
86 {
87 break;
88 }
89 }
90 if (temp_frame && temp_frame == frame)
91 frame->dont_schedule = 1;
92
93 LKU(frameq->mutex);
94
95 eina_condition_signal(&(frameq->cond_new));
96}
97
98EAPI Evas_Surface *
99evas_common_frameq_new_surface(void *surface, int x, int y, int w, int h)
100{
101 return evas_common_surface_alloc(surface, x, y, w, h);
102}
103
104EAPI void
105evas_common_frameq_add_surface(Evas_Surface *surface)
106{
107 evas_common_surface_add(gframeq.cur_frame, surface);
108}
109
110EAPI void
111evas_common_frameq_set_frame_data(void *data,
112 void (*fn_output_redraws_next_update_push) (void *data, void *surface, int x, int y, int w, int h),
113 void (*fn_output_flush) (void *data),
114 void (*fn_output_set_priv)(void *data, void *cur, void *prev))
115{
116 if (gframeq.cur_frame)
117 {
118 gframeq.cur_frame->data = data;
119 gframeq.cur_frame->output_redraws_next_update_push = fn_output_redraws_next_update_push;
120 gframeq.cur_frame->output_flush = fn_output_flush;
121 gframeq.cur_frame->output_set_priv = fn_output_set_priv;
122 }
123}
124
125EAPI void
126evas_common_frameq_prepare_frame(void)
127{
128 if (!gframeq.cur_frame )
129 {
130 gframeq.cur_frame = evas_common_frame_alloc();
131 }
132}
133
134EAPI void
135evas_common_frameq_ready_frame(void)
136{
137 if (gframeq.cur_frame)
138 {
139 evas_common_frame_add(&gframeq, gframeq.cur_frame);
140 gframeq.cur_frame = NULL; // create a new frame for the next frame later
141 }
142}
143
144
145EAPI void
146evas_common_frameq_init(void)
147{
148 gframeq.frames = NULL;
149 LKI(gframeq.mutex);
150 eina_condition_new(&(gframeq.cond_new), &(gframeq.mutex));
151 eina_condition_new(&(gframeq.cond_ready), &(gframeq.mutex));
152 eina_condition_new(&(gframeq.cond_done), &(gframeq.mutex));
153 gframeq.initialised = 0; // worker thread are not created yet
154 gframeq.frameq_sz = 1; // this value ensures the first frame can be enqueued.
155}
156
157EAPI void
158evas_common_frameq_destroy(void)
159{
160#if 0 // let them destroyed indirectly with program exit
161 LKL(gframeq.mutex);
162 eina_condition_free(&(gframeq.cond_new));
163 eina_condition_free(&(gframeq.cond_ready));
164 eina_condition_free(&(gframeq.cond_done));
165 LKU(gframeq.mutex);
166#endif
167 LKD(gframeq.mutex);
168
169 gframeq.frames = NULL;
170 gframeq.initialised = 0;
171}
172
173EAPI void
174evas_common_frameq_flush(void)
175{
176 if (! evas_common_frameq_enabled())
177 return;
178
179 LKL(gframeq.mutex);
180 while(eina_inlist_count(EINA_INLIST_GET(gframeq.frames)) > 0)
181 {
182 /* wait a worker thread finish previous frame */
183 eina_condition_wait(&(gframeq.cond_done));
184 }
185 LKU(gframeq.mutex);
186}
187
188
189EAPI void
190evas_common_frameq_flush_ready(void)
191{
192 return;
193}
194
195EAPI int
196evas_common_frameq_get_frameq_sz(void)
197{
198 return gframeq.frameq_sz;
199}
200
201EAPI int
202evas_common_frameq_enabled(void)
203{
204 return gframeq.initialised;
205}
206#endif
207
208static RGBA_Pipe *evas_common_pipe_add(RGBA_Pipe *pipe, RGBA_Pipe_Op **op);
209static void evas_common_pipe_draw_context_copy(RGBA_Draw_Context *dc, RGBA_Pipe_Op *op);
210static void evas_common_pipe_op_free(RGBA_Pipe_Op *op);
211
212/* utils */
213static RGBA_Pipe *
214evas_common_pipe_add(RGBA_Pipe *rpipe, RGBA_Pipe_Op **op)
215{
216 RGBA_Pipe *p;
217 int first_pipe = 0;
218
219 if (!rpipe)
220 {
221 first_pipe = 1;
222 p = calloc(1, sizeof(RGBA_Pipe));
223 if (!p) return NULL;
224 rpipe = (RGBA_Pipe *)eina_inlist_append(EINA_INLIST_GET(rpipe), EINA_INLIST_GET(p));
225 }
226 p = (RGBA_Pipe *)(EINA_INLIST_GET(rpipe))->last;
227 if (p->op_num == PIPE_LEN)
228 {
229 p = calloc(1, sizeof(RGBA_Pipe));
230 if (!p) return NULL;
231 rpipe = (RGBA_Pipe *)eina_inlist_append(EINA_INLIST_GET(rpipe), EINA_INLIST_GET(p));
232 }
233 p->op_num++;
234 *op = &(p->op[p->op_num - 1]);
235 if (first_pipe)
236 {
237 /* FIXME: PTHREAD init any thread locks etc */
238 }
239 return rpipe;
240}
241
242static void
243evas_common_pipe_draw_context_copy(RGBA_Draw_Context *dc, RGBA_Pipe_Op *op)
244{
245 memcpy(&(op->context), dc, sizeof(RGBA_Draw_Context));
246 if (op->context.cutout.active > 0)
247 {
248 op->context.cutout.rects = malloc(sizeof(Cutout_Rect) * op->context.cutout.active);
249 memcpy(op->context.cutout.rects, dc->cutout.rects, sizeof(Cutout_Rect) * op->context.cutout.active);
250 }
251 else
252 {
253 op->context.cutout.rects = NULL;
254 }
255}
256
257static void
258evas_common_pipe_op_free(RGBA_Pipe_Op *op)
259{
260 evas_common_draw_context_apply_clean_cutouts(&op->context.cutout);
261}
262
263#ifdef BUILD_PTHREAD
264/* main api calls */
265static void *
266evas_common_pipe_thread(void *data)
267{
268 Thinfo *thinfo;
269
270// INF("TH [...........");
271 thinfo = data;
272 for (;;)
273 {
274 RGBA_Pipe_Thread_Info *info;
275 RGBA_Pipe *p;
276
277 /* wait for start signal */
278// INF(" TH %i START...", thinfo->thread_num);
279 pthread_barrier_wait(&(thinfo->barrier[0]));
280 info = thinfo->info;
281// if (info)
282// {
283// thinfo->info = NULL;
284// INF(" TH %i GO", thinfo->thread_num);
285 EINA_INLIST_FOREACH(EINA_INLIST_GET(info->im->cache_entry.pipe), p)
286 {
287 int i;
288
289 for (i = 0; i < p->op_num; i++)
290 {
291 if (p->op[i].op_func)
292 p->op[i].op_func(info->im, &(p->op[i]), info);
293 }
294 }
295 free(info);
296// }
297// INF(" TH %i DONE", thinfo->thread_num);
298 /* send finished signal */
299 pthread_barrier_wait(&(thinfo->barrier[1]));
300 }
301 return NULL;
302}
303
304#ifdef EVAS_FRAME_QUEUING
305static void
306evas_common_frameq_release(void *data)
307{
308 Evas_FrameQ *frameq;
309 Evas_Frameq_Thread_Info *fq_info;
310 Thinfo *thinfo;
311
312 thinfo = data;
313 fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
314 frameq = fq_info->frameq;
315
316 /* This thread may or may not own the mutex.
317 * But there's no way to determine the ownership of the mutex, so release it anyway
318 */
319 LKU(frameq->mutex);
320}
321
322static void *
323evas_common_frameq_thread(void *data)
324{
325 Evas_FrameQ *frameq;
326 Evas_Frame *frame;
327 Evas_Surface *surface;
328 RGBA_Pipe *p;
329 Thinfo *thinfo;
330 Evas_Frameq_Thread_Info *fq_info;
331 RGBA_Pipe_Thread_Info p_info;
332
333 thinfo = data;
334 fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
335 frameq = fq_info->frameq;
336
337 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
338 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
339 /* install thread cancelation cleanup handler */
340 pthread_cleanup_push(evas_common_frameq_release, data);
341
342 for (;;)
343 {
344 frame = NULL;
345
346 /* 1. pick a frame to draw */
347 LKL(frameq->mutex);
348 while(!frame)
349 {
350 EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), frame)
351 {
352 if (!frame->in_process)
353 {
354 frame->in_process = 1;
355 break;
356 }
357 }
358 if (frame)
359 {
360 break;
361 }
362 pthread_testcancel();
363 eina_condition_wait(&(frameq->cond_new));
364 }
365 LKU(frameq->mutex);
366
367 /* 2. draw selected frame */
368 EINA_INLIST_FOREACH(EINA_INLIST_GET(frame->surfaces), surface)
369 {
370 p_info.im = surface->im;
371 p_info.x = 0;
372 p_info.y = 0;
373 p_info.w = surface->im->cache_entry.w;
374 p_info.h = surface->im->cache_entry.h;
375
376 EINA_INLIST_FOREACH(EINA_INLIST_GET(p_info.im->cache_entry.pipe), p)
377 {
378 int i;
379
380 for (i = 0; i < p->op_num; i++)
381 {
382 if (p->op[i].op_func)
383 {
384 p->op[i].op_func(p_info.im, &(p->op[i]), &p_info);
385 }
386 }
387 }
388
389 /* push surface out */
390 if (! surface->dontpush)
391 {
392 frame->output_redraws_next_update_push(frame->data,
393 surface->im, surface->x, surface->y, surface->w, surface->h);
394 }
395 }
396
397 // record frame ready time, will be used in post worker thread, evas_common_frameq_thread_post()
398 gettimeofday(&frame->ready_time, NULL);
399
400 LKL(frameq->mutex);
401 frame->ready = 1;
402 eina_condition_signal(&(frameq->cond_ready));
403 LKU(frameq->mutex);
404 }
405
406 // Remove cleanup handler
407 pthread_cleanup_pop(0);
408 return NULL;
409}
410
411
412#define INTERVAL_QSIZE 17 // Actual size is 'INTERVAL_QSIZE - 1' because of not using index
413#define SATISFACTION_THRESHOLD 4 // 4 ms --> 250 FPS
414#define RESET_RATIO 4 // RESET_RATIO * [Average Ready Gap | get_max_interval()] --> Reset Threshold
415#define DOUBLE_RESET_TIME_INTERVAL_THRESHOLD 16000 // make it double in case of less 16ms
416#define RESET_ABSOLUTE_INTERVAL 600000 // 600 msec
417
418struct iq_node
419{
420 long long rt;
421 long long ri;
422};
423
424static struct iq_node _IQ[INTERVAL_QSIZE];
425static int _IQ_head = 0, _IQ_tail = 0;
426static int _IQ_length = 0;
427static long long min_ready, max_ready;
428static long long average_interval;
429
430static int
431_IQ_next_index(int i)
432{
433 return (i + 1) % INTERVAL_QSIZE;
434}
435
436static int
437_IQ_previous_index(int i)
438{
439 if (--i < 0) i += INTERVAL_QSIZE;
440 return i;
441}
442
443static void
444_IQ_init(void)
445{
446 _IQ_length = _IQ_head = _IQ_tail = 0;
447 min_ready = LLONG_MAX, max_ready = LLONG_MIN;
448 average_interval = 0;
449}
450
451static int
452_IQ_empty(void)
453{
454 return (_IQ_head == _IQ_tail) ? 1 : 0;
455}
456
457static int
458_IQ_full(void)
459{
460 return (_IQ_head == ((_IQ_tail + 1) % INTERVAL_QSIZE)) ? 1 : 0;
461}
462
463static void
464_IQ_insert(long long ready_time, long long last_interval)
465{
466 if (_IQ_full()) return;
467
468 if (_IQ_empty())
469 {
470 if (last_interval < 0)
471 {
472 last_interval = -last_interval;
473 }
474 _IQ[_IQ_tail].rt = ready_time;
475 _IQ[_IQ_tail].ri = last_interval;
476 min_ready = ready_time - last_interval;
477 max_ready = ready_time;
478 _IQ_tail = _IQ_next_index(_IQ_tail);
479 _IQ_length++;
480 }
481 else
482 {
483 if (max_ready < ready_time)
484 {
485 _IQ[_IQ_tail].rt = ready_time;
486 _IQ[_IQ_tail].ri = ready_time - max_ready;
487 _IQ_tail = _IQ_next_index(_IQ_tail);
488 _IQ_length++;
489 max_ready = ready_time;
490 }
491 else if (ready_time < min_ready)
492 {
493 last_interval = _IQ[_IQ_head].ri;
494 _IQ[_IQ_head].ri = _IQ[_IQ_head].rt - ready_time;
495 _IQ_head = _IQ_previous_index(_IQ_head);
496 _IQ[_IQ_head].rt = ready_time;
497 _IQ[_IQ_head].ri = last_interval;
498 min_ready = ready_time;
499 _IQ_length++;
500 }
501 else
502 {
503 int i, j, k, l = 0;
504 for (i = _IQ_head; i != _IQ_tail; i = j)
505 {
506 j = _IQ_next_index(i);
507 if (_IQ[j].rt < ready_time)
508 {
509 continue;
510 }
511 break;
512 }
513 for (k = _IQ_tail; k != j; k = l)
514 {
515 l = _IQ_previous_index(k);
516 _IQ[k] = _IQ[l];
517 }
518 i = _IQ_next_index(j);
519 _IQ[j].ri -= (_IQ[j].rt - ready_time);
520 _IQ[j].rt = ready_time;
521 _IQ[i].ri = _IQ[i].rt - ready_time;
522 _IQ_tail = _IQ_next_index(_IQ_tail);
523 _IQ_length++;
524 }
525 }
526 average_interval = (max_ready - min_ready) / _IQ_length;
527}
528
529static long long
530_IQ_delete(void)
531{
532 struct iq_node oldest;
533
534 if (_IQ_empty()) return 0;
535 oldest = _IQ[_IQ_head];
536 _IQ_head = (_IQ_head + 1) % INTERVAL_QSIZE;
537 if ((--_IQ_length) == 0)
538 {
539 _IQ_init();
540 }
541 else
542 {
543 min_ready = _IQ[_IQ_head].rt;
544 average_interval = (max_ready - min_ready) / _IQ_length;
545 }
546
547 return oldest.ri;
548}
549
550static long long
551get_max_interval(void)
552{
553 int i;
554 long long max = LLONG_MIN;
555
556 for ( i= _IQ_head ; i != _IQ_tail ; i = _IQ_next_index(i))
557 {
558 if (_IQ[i].ri > max)
559 {
560 max = _IQ[i].ri;
561 }
562 }
563
564 return max;
565}
566
567static long long
568tv_to_long_long(struct timeval *tv)
569{
570 if (!tv)
571 {
572 return 0;
573 }
574
575 return tv->tv_sec * 1000000LL + tv->tv_usec;
576}
577
578static long long
579evas_common_frameq_schedule_flush_time(int frameq_sz, int thread_no,
580 long long last_ready_time, long long current_ready_time,
581 long long last_flush_time, int ready_frames_num,
582 int dont_schedule)
583{
584 // to get each time and to do others
585 long long current_time = 0LL;
586 long long current_ready_interval = 0LL;
587 long long theshold_time = SATISFACTION_THRESHOLD * 1000LL; // ms -> usec
588 long long reset_time_interval = 0LL;
589 long long sleep_time = 0LL;
590 long long saved_ready_time, saved_ready_interval;
591 long long time_slept = 0LL;
592 static long long time_lag = 0;
593 struct timeval now;
594 int frameq_full_threshold =0;
595 int need_reset = 0;
596 int need_schedule = 0;
597
598 frameq_full_threshold = frameq_sz -thread_no; // Qsize - threads#
599
600 /* 1.5 defer flush time of current frame if need */
601 // in case of the first time, just keep ready time only
602 if (last_ready_time == 0LL)
603 {
604 last_ready_time = current_ready_time;
605 }
606 else
607 {
608 /* 1.5.1 get current ready time & interval */
609 saved_ready_time = current_ready_time;
610 saved_ready_interval = current_ready_interval = current_ready_time - last_ready_time;
611 // compensate a case which current ready time is older than previous one,
612 // doesn't work on the interval queue
613 if (current_ready_interval < 0)
614 {
615 current_ready_time = last_ready_time;
616 current_ready_interval = 0;
617 }
618
619 /* 1.5.2 get the reset time interval before keeping a new one */
620 if (!_IQ_empty())
621 {
622 reset_time_interval = RESET_RATIO * average_interval;
623 if (average_interval < DOUBLE_RESET_TIME_INTERVAL_THRESHOLD)
624 {
625 reset_time_interval *= 2;
626 }
627 }
628
629 /* 1.5.3 reset - if too late, discard all saved interval and start from here */
630 if (current_ready_interval > RESET_ABSOLUTE_INTERVAL)
631 {
632 need_reset = 1;
633 }
634 else if (_IQ_length >= thread_no * 2 && current_ready_interval > reset_time_interval)
635 {
636 need_reset = 1;
637 }
638 else if (_IQ_length >= thread_no && _IQ_length < thread_no * 2
639 && current_ready_interval > get_max_interval() * RESET_RATIO)
640 {
641 need_reset = 1;
642 }
643
644 if (need_reset)
645 {
646 _IQ_init();
647 }
648 else
649 {
650 /* 1.5.4 enqueue - keep a new interval for next average interval */
651 if (_IQ_full())
652 {
653 _IQ_delete();
654 }
655 _IQ_insert(saved_ready_time, saved_ready_interval);
656
657 /* 1.5.5 schedule - if faster than average interval, figure out sleep time to meet it */
658 if (!dont_schedule)
659 {
660 need_schedule = 0;
661 sleep_time = 0;
662 if (_IQ_length >= thread_no * 2 && average_interval > theshold_time)
663 {
664 need_schedule = 1;
665 }
666 // compensate the case that postworker blocks the workers from getting a new fresh frame
667 // It's actually occurred when during the wait time of postworker, the frame queue is full
668 // Consequently check the number of currently ready frames and apply some time drop to average time according to the number
669 if (ready_frames_num >= frameq_full_threshold)
670 {
671 need_schedule = 0;
672 }
673 if (need_schedule)
674 {
675 gettimeofday(&now, NULL);
676 current_time = tv_to_long_long(&now);
677 time_lag += (current_time - last_flush_time);
678 sleep_time = (average_interval < time_lag) ? 0 : (average_interval - time_lag);
679 }
680 }
681
682 /* 1.5.6 sleep - actually sleep and get over-slept time (time_lag) for next frame */
683 if (sleep_time > 0)
684 {
685 sleep_time = sleep_time * 9 / 10;
686 usleep((unsigned int)sleep_time);
687 gettimeofday(&now, NULL);
688 time_slept = tv_to_long_long(&now) - current_time;
689 time_lag = time_slept - sleep_time;
690 }
691 else
692 {
693 time_lag = 0;
694 }
695 }
696 last_ready_time = current_ready_time;
697 }
698
699 return last_ready_time;
700}
701
702static void *
703evas_common_frameq_thread_post(void *data)
704{
705 Evas_FrameQ *frameq;
706 Evas_Frame *frame;
707 Evas_Surface *surface;
708 Thinfo *thinfo;
709 Evas_Frameq_Thread_Info *fq_info;
710 Eina_List *pending_writes = NULL;
711 Eina_List *prev_pending_writes = NULL;
712
713 long long last_ready_time = 0LL;
714 long long current_ready_time;
715 Evas_Frame *temp_frame = NULL;
716 int ready_frames_num;
717 long long last_flush_time = 0LL;
718 struct timeval now;
719 int dont_schedule = 0;
720
721 thinfo = data;
722 fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
723 frameq = fq_info->frameq;
724
725 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
726 pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
727 /* install thread cancelation cleanup handler */
728 pthread_cleanup_push(evas_common_frameq_release, data);
729
730 _IQ_init();
731
732 for (;;)
733 {
734 /* 1. wait the first frame being done */
735 LKL(frameq->mutex);
736 while(!frameq->frames || !frameq->frames->ready)
737 {
738 eina_condition_wait(&(frameq->cond_ready));
739 }
740 frame = frameq->frames;
741
742 /* 1.5. prepare to schedule flush time */
743 current_ready_time = tv_to_long_long(&frame->ready_time);
744 ready_frames_num = 0;
745 EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), temp_frame)
746 {
747 if (temp_frame->ready == 1)
748 {
749 ready_frames_num++;
750 }
751 }
752 dont_schedule = (frame->dont_schedule)?1:0;
753 LKU(frameq->mutex);
754
755 /* 2. generate pending_writes */
756 EINA_INLIST_FOREACH(EINA_INLIST_GET(frame->surfaces), surface)
757 {
758 evas_common_pipe_flush(surface->im);
759 if (! surface->dontpush)
760 {
761 pending_writes = eina_list_append(pending_writes, surface->im);
762 }
763 }
764
765 /* 2.5. schedule flush time */
766 last_ready_time = evas_common_frameq_schedule_flush_time(
767 frameq->frameq_sz, frameq->thread_num,
768 last_ready_time, current_ready_time,
769 last_flush_time, ready_frames_num, dont_schedule);
770
771 /* 3. flush redraws */
772 frame->output_set_priv(frame->data, pending_writes, prev_pending_writes);
773 frame->output_flush(frame->data);
774 gettimeofday(&now, NULL);
775 // keep as the last flush time
776 last_flush_time = now.tv_sec * 1000000LL + now.tv_usec;
777
778 prev_pending_writes = pending_writes;
779 pending_writes = NULL;
780
781 /* 4. remove this frame from the frame queue */
782 LKL(frameq->mutex);
783 frameq->frames =
784 (Evas_Frame *)eina_inlist_remove(EINA_INLIST_GET(frameq->frames),
785 EINA_INLIST_GET(frame));
786
787 LKU(frameq->mutex);
788 eina_condition_broadcast(&frameq->cond_done);
789 evas_common_frame_dealloc(frame);
790 }
791
792 // Remove cleanup handler
793 pthread_cleanup_pop(0);
794 return NULL;
795}
796
797#endif /* EVAS_FRAME_QUEUING */
798#endif
799
800#ifdef BUILD_PTHREAD
801static int thread_num = 0;
802static Thinfo thinfo[TH_MAX];
803static pthread_barrier_t thbarrier[2];
804#endif
805
806static void
807evas_common_pipe_begin(RGBA_Image *im)
808{
809#ifdef BUILD_PTHREAD
810 int i, y, h;
811
812#ifdef EVAS_FRAME_QUEUING
813 return;
814#endif
815
816 if (!im->cache_entry.pipe) return;
817 if (thread_num == 1) return;
818 y = 0;
819 h = im->cache_entry.h / thread_num;
820 if (h < 1) h = 1;
821 for (i = 0; i < thread_num; i++)
822 {
823 RGBA_Pipe_Thread_Info *info;
824
825// if (y >= im->cache_entry.h) break;
826 info = calloc(1, sizeof(RGBA_Pipe_Thread_Info));
827 info->im = im;
828#ifdef EVAS_SLI
829 info->x = 0;
830 info->w = im->cache_entry.w;
831 info->y = i;
832 info->h = thread_num;
833#else
834 info->x = 0;
835 info->y = y;
836 info->w = im->cache_entry.w;
837 if (i == (thread_num - 1))
838 {
839 info->h = im->cache_entry.h - y;
840 }
841 else
842 {
843 info->h = h;
844 }
845 y += info->h;
846#endif
847 thinfo[i].info = info;
848 }
849 /* tell worker threads to start */
850 pthread_barrier_wait(&(thbarrier[0]));
851#endif
852}
853
854#ifdef EVAS_FRAME_QUEUING
855EAPI void
856evas_common_frameq_begin(void)
857{
858#ifdef BUILD_PTHREAD
859 int i;
860 Evas_Frameq_Thread_Info *fp_info;
861 pthread_attr_t attr;
862 cpu_set_t cpu;
863
864 if (!gframeq.initialised)
865 {
866 int cpunum, set_cpu_affinity = 0;
867
868 cpunum = eina_cpu_count();
869 gframeq.thread_num = cpunum;
870 gframeq.frameq_sz = cpunum * FRAMEQ_SZ_PER_THREAD;
871
872 eina_threads_init();
873
874 for (i = 0; i < gframeq.thread_num; i++)
875 {
876
877 fp_info = calloc(1, sizeof(Evas_Frameq_Thread_Info));
878 fp_info->frameq = &gframeq;
879
880 gframeq.thinfo[i].thread_num = i;
881 gframeq.thinfo[i].fq_info = fp_info;
882
883 pthread_attr_init(&attr);
884 if (set_cpu_affinity)
885 {
886 CPU_ZERO(&cpu);
887 CPU_SET((i+1) % cpunum, &cpu);
888 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
889 }
890
891 pthread_create(&(gframeq.thinfo[i].thread_id), &attr,
892 evas_common_frameq_thread, &(gframeq.thinfo[i]));
893
894 pthread_attr_destroy(&attr);
895 pthread_detach(gframeq.thinfo[i].thread_id);
896 }
897
898 {
899 fp_info = calloc(1, sizeof(Evas_Frameq_Thread_Info));
900 fp_info->frameq = &gframeq;
901
902 gframeq.thinfo[i].thread_num = i;
903 gframeq.thinfo[i].fq_info = fp_info;
904
905 pthread_attr_init(&attr);
906 if (set_cpu_affinity)
907 {
908 CPU_ZERO(&cpu);
909 CPU_SET((i+1) % cpunum, &cpu);
910 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
911 }
912
913 pthread_create(&(gframeq.thinfo[i].thread_id), &attr,
914 evas_common_frameq_thread_post, &(gframeq.thinfo[i]));
915 pthread_attr_destroy(&attr);
916 pthread_detach(gframeq.thinfo[i].thread_id);
917 }
918 gframeq.initialised = 1; // now worker threads are created.
919
920 INF("initialised");
921 DBG("%d cpus, set_cpu_affinity=%d, frameq_sz=%d",
922 cpunum, set_cpu_affinity, gframeq.frameq_sz);
923 }
924#endif /* BUILD_PTHREAD */
925}
926
927EAPI void
928evas_common_frameq_finish(void)
929{
930 int i;
931
932 /* 1. cancel all worker threads */
933 for (i = 0; i < gframeq.thread_num; i++)
934 {
935 pthread_cancel(gframeq.thinfo[i].thread_id);
936 }
937 // cancel post-worker thread
938 pthread_cancel(gframeq.thinfo[i].thread_id);
939
940 /* 2. send signal to worker threads so that they enter to the thread cancelation cleanup handler */
941 for (i = 0; i < gframeq.thread_num; i++)
942 {
943 eina_condition_signal(&(gframeq.cond_new));
944 }
945 // send signal to post-worker thread
946 eina_condition_signal(&(gframeq.cond_ready));
947
948 /* all the workers were created and detached before
949 * so don't need to join them here.
950 */
951
952}
953
954#endif /* EVAS_FRAME_QUEUING */
955
956EAPI void
957evas_common_pipe_flush(RGBA_Image *im)
958{
959 if (!im->cache_entry.pipe) return;
960#ifndef EVAS_FRAME_QUEUING
961#ifdef BUILD_PTHREAD
962 if (thread_num > 1)
963 {
964 /* sync worker threads */
965 pthread_barrier_wait(&(thbarrier[1]));
966 }
967 else
968#endif
969 {
970 RGBA_Pipe *p;
971 int i;
972
973 /* process pipe - 1 thead */
974 for (p = im->cache_entry.pipe; p; p = (RGBA_Pipe *)(EINA_INLIST_GET(p))->next)
975 {
976 for (i = 0; i < p->op_num; i++)
977 {
978 if (p->op[i].op_func)
979 {
980 p->op[i].op_func(im, &(p->op[i]), NULL);
981 }
982 }
983 }
984 }
985#endif /* !EVAS_FRAME_QUEUING */
986 evas_common_cpu_end_opt();
987 evas_common_pipe_free(im);
988}
989
990EAPI void
991evas_common_pipe_free(RGBA_Image *im)
992{
993
994 RGBA_Pipe *p;
995 int i;
996
997 if (!im->cache_entry.pipe) return;
998 /* FIXME: PTHREAD join all threads here (if not finished) */
999
1000 /* free pipe */
1001 while (im->cache_entry.pipe)
1002 {
1003 p = im->cache_entry.pipe;
1004 for (i = 0; i < p->op_num; i++)
1005 {
1006 if (p->op[i].free_func)
1007 {
1008 p->op[i].free_func(&(p->op[i]));
1009 }
1010 }
1011 im->cache_entry.pipe = (RGBA_Pipe *)eina_inlist_remove(EINA_INLIST_GET(im->cache_entry.pipe), EINA_INLIST_GET(p));
1012 free(p);
1013 }
1014}
1015
1016
1017
1018/* draw ops */
1019/**************** RECT ******************/
1020static void
1021evas_common_pipe_rectangle_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1022{
1023 if (info)
1024 {
1025 RGBA_Draw_Context context;
1026
1027 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1028#ifdef EVAS_SLI
1029 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1030#else
1031 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1032#endif
1033 evas_common_rectangle_draw(dst, &(context),
1034 op->op.rect.x, op->op.rect.y,
1035 op->op.rect.w, op->op.rect.h);
1036 }
1037 else
1038 {
1039 evas_common_rectangle_draw(dst, &(op->context),
1040 op->op.rect.x, op->op.rect.y,
1041 op->op.rect.w, op->op.rect.h);
1042 }
1043}
1044
1045EAPI void
1046evas_common_pipe_rectangle_draw(RGBA_Image *dst, RGBA_Draw_Context *dc, int x, int y, int w, int h)
1047{
1048 RGBA_Pipe_Op *op;
1049
1050 if ((w < 1) || (h < 1)) return;
1051 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1052 if (!dst->cache_entry.pipe) return;
1053 op->op.rect.x = x;
1054 op->op.rect.y = y;
1055 op->op.rect.w = w;
1056 op->op.rect.h = h;
1057 op->op_func = evas_common_pipe_rectangle_draw_do;
1058 op->free_func = evas_common_pipe_op_free;
1059 evas_common_pipe_draw_context_copy(dc, op);
1060}
1061
1062/**************** LINE ******************/
1063static void
1064evas_common_pipe_line_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1065{
1066 if (info)
1067 {
1068 RGBA_Draw_Context context;
1069
1070 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1071#ifdef EVAS_SLI
1072 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1073#else
1074 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1075#endif
1076 evas_common_line_draw(dst, &(context),
1077 op->op.line.x0, op->op.line.y0,
1078 op->op.line.x1, op->op.line.y1);
1079 }
1080 else
1081 {
1082 evas_common_line_draw(dst, &(op->context),
1083 op->op.line.x0, op->op.line.y0,
1084 op->op.line.x1, op->op.line.y1);
1085 }
1086}
1087
1088EAPI void
1089evas_common_pipe_line_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1090 int x0, int y0, int x1, int y1)
1091{
1092 RGBA_Pipe_Op *op;
1093
1094 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1095 if (!dst->cache_entry.pipe) return;
1096 op->op.line.x0 = x0;
1097 op->op.line.y0 = y0;
1098 op->op.line.x1 = x1;
1099 op->op.line.y1 = y1;
1100 op->op_func = evas_common_pipe_line_draw_do;
1101 op->free_func = evas_common_pipe_op_free;
1102 evas_common_pipe_draw_context_copy(dc, op);
1103}
1104
1105/**************** POLY ******************/
1106static void
1107evas_common_pipe_op_poly_free(RGBA_Pipe_Op *op)
1108{
1109 RGBA_Polygon_Point *p;
1110
1111 while (op->op.poly.points)
1112 {
1113 p = op->op.poly.points;
1114 op->op.poly.points = (RGBA_Polygon_Point *)eina_inlist_remove(EINA_INLIST_GET(op->op.poly.points),
1115 EINA_INLIST_GET(p));
1116 free(p);
1117 }
1118 evas_common_pipe_op_free(op);
1119}
1120
1121static void
1122evas_common_pipe_poly_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1123{
1124 if (info)
1125 {
1126 RGBA_Draw_Context context;
1127
1128 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1129#ifdef EVAS_SLI
1130 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1131#else
1132 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1133#endif
1134 evas_common_polygon_draw(dst, &(context),
1135 op->op.poly.points, 0, 0);
1136 }
1137 else
1138 {
1139 evas_common_polygon_draw(dst, &(op->context),
1140 op->op.poly.points, 0, 0);
1141 }
1142}
1143
1144EAPI void
1145evas_common_pipe_poly_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1146 RGBA_Polygon_Point *points, int x, int y)
1147{
1148 RGBA_Pipe_Op *op;
1149 RGBA_Polygon_Point *pts = NULL, *p, *pp;
1150
1151 if (!points) return;
1152 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1153 if (!dst->cache_entry.pipe) return;
1154 /* FIXME: copy points - maybe we should refcount? */
1155 for (p = points; p; p = (RGBA_Polygon_Point *)(EINA_INLIST_GET(p))->next)
1156 {
1157 pp = calloc(1, sizeof(RGBA_Polygon_Point));
1158 if (pp)
1159 {
1160 pp->x = p->x + x;
1161 pp->y = p->y + y;
1162 pts = (RGBA_Polygon_Point *)eina_inlist_append(EINA_INLIST_GET(pts), EINA_INLIST_GET(pp));
1163 }
1164 }
1165 op->op.poly.points = pts;
1166 op->op_func = evas_common_pipe_poly_draw_do;
1167 op->free_func = evas_common_pipe_op_poly_free;
1168 evas_common_pipe_draw_context_copy(dc, op);
1169}
1170
1171/**************** TEXT ******************/
1172static void
1173evas_common_pipe_op_text_free(RGBA_Pipe_Op *op)
1174{
1175#ifdef EVAS_FRAME_QUEUING
1176 LKL(op->op.text.font->ref_fq_del);
1177 op->op.text.font->ref_fq[1]++;
1178 LKU(op->op.text.font->ref_fq_del);
1179 eina_condition_signal(&(op->op.text.font->cond_fq_del));
1180#else
1181 evas_common_font_free(op->op.text.font);
1182#endif
1183 evas_common_text_props_content_unref(&(op->op.text.intl_props));
1184 evas_common_pipe_op_free(op);
1185}
1186
1187#ifdef EVAS_FRAME_QUEUING
1188/* flush all op using @fn */
1189EAPI void
1190evas_common_pipe_op_text_flush(RGBA_Font *fn)
1191{
1192 if (! evas_common_frameq_enabled())
1193 return;
1194
1195 LKL(fn->ref_fq_add);
1196 LKL(fn->ref_fq_del);
1197
1198 while (fn->ref_fq[0] != fn->ref_fq[1])
1199 eina_condition_wait(&(fn->cond_fq_del));
1200
1201 LKU(fn->ref_fq_del);
1202 LKU(fn->ref_fq_add);
1203}
1204#endif
1205
1206static void
1207evas_common_pipe_text_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1208{
1209 if (info)
1210 {
1211 RGBA_Draw_Context context;
1212
1213 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1214#ifdef EVAS_SLI
1215 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1216#else
1217 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1218#endif
1219 evas_common_font_draw(dst, &(context),
1220 op->op.text.font, op->op.text.x, op->op.text.y,
1221 &op->op.text.intl_props);
1222 }
1223 else
1224 {
1225 evas_common_font_draw(dst, &(op->context),
1226 op->op.text.font, op->op.text.x, op->op.text.y,
1227 &op->op.text.intl_props);
1228 }
1229}
1230
1231EAPI void
1232evas_common_pipe_text_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1233 RGBA_Font *fn, int x, int y, const Evas_Text_Props *intl_props)
1234{
1235 RGBA_Pipe_Op *op;
1236
1237 if (!fn) return;
1238 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1239 if (!dst->cache_entry.pipe) return;
1240 op->op.text.x = x;
1241 op->op.text.y = y;
1242 evas_common_text_props_content_copy_and_ref(&(op->op.text.intl_props),
1243 intl_props);
1244#ifdef EVAS_FRAME_QUEUING
1245 LKL(fn->ref_fq_add);
1246 fn->ref_fq[0]++;
1247 LKU(fn->ref_fq_add);
1248#else
1249 fn->references++;
1250#endif
1251 op->op.text.font = fn;
1252 op->op_func = evas_common_pipe_text_draw_do;
1253 op->free_func = evas_common_pipe_op_text_free;
1254 evas_common_pipe_draw_context_copy(dc, op);
1255}
1256
1257/**************** IMAGE *****************/
1258static void
1259evas_common_pipe_op_image_free(RGBA_Pipe_Op *op)
1260{
1261#ifdef EVAS_FRAME_QUEUING
1262 LKL(op->op.image.src->cache_entry.ref_fq_del);
1263 op->op.image.src->cache_entry.ref_fq[1]++;
1264 LKU(op->op.image.src->cache_entry.ref_fq_del);
1265 eina_condition_signal(&(op->op.image.src->cache_entry.cond_fq_del));
1266#else
1267 op->op.image.src->ref--;
1268 if (op->op.image.src->ref == 0)
1269 {
1270 evas_cache_image_drop(&op->op.image.src->cache_entry);
1271 }
1272#endif
1273 evas_common_pipe_op_free(op);
1274}
1275
1276#ifdef EVAS_FRAME_QUEUING
1277EAPI void
1278evas_common_pipe_op_image_flush(RGBA_Image *im)
1279{
1280 if (! evas_common_frameq_enabled())
1281 return;
1282
1283 LKL(im->cache_entry.ref_fq_add);
1284 LKL(im->cache_entry.ref_fq_del);
1285
1286 while (im->cache_entry.ref_fq[0] != im->cache_entry.ref_fq[1])
1287 eina_condition_wait(&(im->cache_entry.cond_fq_del));
1288
1289 LKU(im->cache_entry.ref_fq_del);
1290 LKU(im->cache_entry.ref_fq_add);
1291}
1292#endif
1293
1294static void
1295evas_common_pipe_image_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1296{
1297 if (info)
1298 {
1299 RGBA_Draw_Context context;
1300
1301 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1302#ifdef EVAS_SLI
1303 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1304#else
1305 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1306#endif
1307
1308#ifdef SCALECACHE
1309 evas_common_rgba_image_scalecache_do((Image_Entry *)(op->op.image.src),
1310 dst, &(context),
1311 op->op.image.smooth,
1312 op->op.image.sx,
1313 op->op.image.sy,
1314 op->op.image.sw,
1315 op->op.image.sh,
1316 op->op.image.dx,
1317 op->op.image.dy,
1318 op->op.image.dw,
1319 op->op.image.dh);
1320#else
1321 if (op->op.image.smooth)
1322 {
1323 evas_common_scale_rgba_in_to_out_clip_smooth(op->op.image.src,
1324 dst, &(context),
1325 op->op.image.sx,
1326 op->op.image.sy,
1327 op->op.image.sw,
1328 op->op.image.sh,
1329 op->op.image.dx,
1330 op->op.image.dy,
1331 op->op.image.dw,
1332 op->op.image.dh);
1333 }
1334 else
1335 {
1336 evas_common_scale_rgba_in_to_out_clip_sample(op->op.image.src,
1337 dst, &(context),
1338 op->op.image.sx,
1339 op->op.image.sy,
1340 op->op.image.sw,
1341 op->op.image.sh,
1342 op->op.image.dx,
1343 op->op.image.dy,
1344 op->op.image.dw,
1345 op->op.image.dh);
1346 }
1347#endif
1348 }
1349 else
1350 {
1351#ifdef SCALECACHE
1352 evas_common_rgba_image_scalecache_do((Image_Entry *)(op->op.image.src),
1353 dst, &(op->context),
1354 op->op.image.smooth,
1355 op->op.image.sx,
1356 op->op.image.sy,
1357 op->op.image.sw,
1358 op->op.image.sh,
1359 op->op.image.dx,
1360 op->op.image.dy,
1361 op->op.image.dw,
1362 op->op.image.dh);
1363#else
1364 if (op->op.image.smooth)
1365 {
1366 evas_common_scale_rgba_in_to_out_clip_smooth(op->op.image.src,
1367 dst, &(op->context),
1368 op->op.image.sx,
1369 op->op.image.sy,
1370 op->op.image.sw,
1371 op->op.image.sh,
1372 op->op.image.dx,
1373 op->op.image.dy,
1374 op->op.image.dw,
1375 op->op.image.dh);
1376 }
1377 else
1378 {
1379 evas_common_scale_rgba_in_to_out_clip_sample(op->op.image.src,
1380 dst, &(op->context),
1381 op->op.image.sx,
1382 op->op.image.sy,
1383 op->op.image.sw,
1384 op->op.image.sh,
1385 op->op.image.dx,
1386 op->op.image.dy,
1387 op->op.image.dw,
1388 op->op.image.dh);
1389 }
1390#endif
1391 }
1392}
1393
1394EAPI void
1395evas_common_pipe_image_draw(RGBA_Image *src, RGBA_Image *dst,
1396 RGBA_Draw_Context *dc, int smooth,
1397 int src_region_x, int src_region_y,
1398 int src_region_w, int src_region_h,
1399 int dst_region_x, int dst_region_y,
1400 int dst_region_w, int dst_region_h)
1401{
1402 RGBA_Pipe_Op *op;
1403
1404 if (!src) return;
1405// evas_common_pipe_flush(src);
1406 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1407 if (!dst->cache_entry.pipe) return;
1408 op->op.image.smooth = smooth;
1409 op->op.image.sx = src_region_x;
1410 op->op.image.sy = src_region_y;
1411 op->op.image.sw = src_region_w;
1412 op->op.image.sh = src_region_h;
1413 op->op.image.dx = dst_region_x;
1414 op->op.image.dy = dst_region_y;
1415 op->op.image.dw = dst_region_w;
1416 op->op.image.dh = dst_region_h;
1417#ifdef EVAS_FRAME_QUEUING
1418 LKL(src->cache_entry.ref_fq_add);
1419 src->cache_entry.ref_fq[0]++;
1420 LKU(src->cache_entry.ref_fq_add);
1421#else
1422 src->ref++;
1423#endif
1424 op->op.image.src = src;
1425 op->op_func = evas_common_pipe_image_draw_do;
1426 op->free_func = evas_common_pipe_op_image_free;
1427 evas_common_pipe_draw_context_copy(dc, op);
1428
1429#ifdef EVAS_FRAME_QUEUING
1430 /* laod every src image here.
1431 * frameq utilize all cpu cores already by worker threads
1432 * so another threads and barrier waiting can't be of any benefit.
1433 * therefore, not instantiate loader threads.
1434 */
1435 if (src->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1436 evas_cache_image_load_data(&src->cache_entry);
1437 evas_common_image_colorspace_normalize(src);
1438#else
1439 evas_common_pipe_image_load(src);
1440#endif
1441}
1442
1443static void
1444evas_common_pipe_op_map_free(RGBA_Pipe_Op *op)
1445{
1446#ifdef EVAS_FRAME_QUEUING
1447 LKL(op->op.image.src->cache_entry.ref_fq_del);
1448 op->op.image.src->cache_entry.ref_fq[1]++;
1449 LKU(op->op.image.src->cache_entry.ref_fq_del);
1450#else
1451 op->op.map.src->ref--;
1452 if (op->op.map.src->ref == 0)
1453 evas_cache_image_drop(&op->op.map.src->cache_entry);
1454#endif
1455 free(op->op.map.p);
1456 evas_common_pipe_op_free(op);
1457}
1458
1459static void
1460evas_common_pipe_map_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1461{
1462 if (info)
1463 {
1464 RGBA_Draw_Context context;
1465
1466 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1467#ifdef EVAS_SLI
1468 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1469#else
1470 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1471#endif
1472
1473 evas_common_map_rgba(op->op.map.src, dst,
1474 &context, op->op.map.npoints, op->op.map.p,
1475 op->op.map.smooth, op->op.map.level);
1476 }
1477 else
1478 {
1479 evas_common_map_rgba(op->op.map.src, dst,
1480 &(op->context), op->op.map.npoints, op->op.map.p,
1481 op->op.map.smooth, op->op.map.level);
1482 }
1483}
1484
1485EAPI void
1486evas_common_pipe_map_draw(RGBA_Image *src, RGBA_Image *dst,
1487 RGBA_Draw_Context *dc, int npoints, RGBA_Map_Point *p,
1488 int smooth, int level)
1489{
1490 RGBA_Pipe_Op *op;
1491 RGBA_Map_Point *pts_copy;
1492 int i;
1493
1494 if (!src) return;
1495 pts_copy = malloc(sizeof (RGBA_Map_Point) * 4);
1496 if (!pts_copy) return;
1497 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1498 if (!dst->cache_entry.pipe)
1499 {
1500 free(pts_copy);
1501 return;
1502 }
1503
1504 for (i = 0; i < 4; ++i)
1505 pts_copy[i] = p[i];
1506
1507 op->op.map.npoints = npoints;
1508 op->op.map.smooth = smooth;
1509 op->op.map.level = level;
1510#ifdef EVAS_FRAME_QUEUING
1511 LKL(src->cache_entry.ref_fq_add);
1512 src->cache_entry.ref_fq[0]++;
1513 LKU(src->cache_entry.ref_fq_add);
1514#else
1515 src->ref++;
1516#endif
1517 op->op.map.src = src;
1518 op->op.map.p = pts_copy;
1519 op->op_func = evas_common_pipe_map_draw_do;
1520 op->free_func = evas_common_pipe_op_map_free;
1521 evas_common_pipe_draw_context_copy(dc, op);
1522
1523#ifdef EVAS_FRAME_QUEUING
1524 /* laod every src image here.
1525 * frameq utilize all cpu cores already by worker threads
1526 * so another threads and barrier waiting can't be of any benefit.
1527 * therefore, not instantiate loader threads.
1528 */
1529 if (src->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1530 evas_cache_image_load_data(&src->cache_entry);
1531 evas_common_image_colorspace_normalize(src);
1532#else
1533 evas_common_pipe_image_load(src);
1534#endif
1535}
1536
1537static void
1538evas_common_pipe_map_render(RGBA_Image *root)
1539{
1540 RGBA_Pipe *p;
1541 int i;
1542
1543 /* Map imply that we need to process them recursively first. */
1544 for (p = root->cache_entry.pipe; p; p = (RGBA_Pipe *)(EINA_INLIST_GET(p))->next)
1545 {
1546 for (i = 0; i < p->op_num; i++)
1547 {
1548 if (p->op[i].op_func == evas_common_pipe_map_draw_do)
1549 {
1550 if (p->op[i].op.map.src->cache_entry.pipe)
1551 evas_common_pipe_map_render(p->op[i].op.map.src);
1552 }
1553 else if (p->op[i].op_func == evas_common_pipe_image_draw_do)
1554 {
1555 if (p->op[i].op.image.src->cache_entry.pipe)
1556 evas_common_pipe_map_render(p->op[i].op.image.src);
1557 }
1558 }
1559 }
1560
1561 evas_common_pipe_begin(root);
1562 evas_common_pipe_flush(root);
1563}
1564
1565#ifdef BUILD_PTHREAD
1566static Eina_List *task = NULL;
1567static Thinfo task_thinfo[TH_MAX];
1568static pthread_barrier_t task_thbarrier[2];
1569static LK(task_mutext);
1570#endif
1571
1572#ifdef BUILD_PTHREAD
1573static void*
1574evas_common_pipe_load(void *data)
1575{
1576 Thinfo *tinfo;
1577
1578 tinfo = data;
1579 for (;;)
1580 {
1581 /* wait for start signal */
1582 pthread_barrier_wait(&(tinfo->barrier[0]));
1583
1584 while (task)
1585 {
1586 RGBA_Image *im = NULL;
1587
1588 LKL(task_mutext);
1589 im = eina_list_data_get(task);
1590 task = eina_list_remove_list(task, task);
1591 LKU(task_mutext);
1592
1593 if (im)
1594 {
1595 if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1596 evas_cache_image_load_data(&im->cache_entry);
1597 evas_common_image_colorspace_normalize(im);
1598
1599 im->flags &= ~RGBA_IMAGE_TODO_LOAD;
1600 }
1601 }
1602
1603 /* send finished signal */
1604 pthread_barrier_wait(&(tinfo->barrier[1]));
1605 }
1606
1607 return NULL;
1608}
1609#endif
1610
1611static volatile int bval = 0;
1612
1613static void
1614evas_common_pipe_image_load_do(void)
1615{
1616#ifdef BUILD_PTHREAD
1617 /* Notify worker thread. */
1618 pthread_barrier_wait(&(task_thbarrier[0]));
1619
1620 /* sync worker threads */
1621 pthread_barrier_wait(&(task_thbarrier[1]));
1622#endif
1623}
1624
1625static Eina_Bool
1626evas_common_pipe_init(void)
1627{
1628#ifdef BUILD_PTHREAD
1629 if (thread_num == 0)
1630 {
1631 int cpunum;
1632 int i;
1633
1634 cpunum = eina_cpu_count();
1635 thread_num = cpunum;
1636// on single cpu we still want this initted.. otherwise we block forever
1637// waiting onm pthread barriers for async rendering on a single core!
1638// if (thread_num == 1) return EINA_FALSE;
1639
1640 eina_threads_init();
1641
1642 LKI(task_mutext);
1643
1644 pthread_barrier_init(&(thbarrier[0]), NULL, thread_num + 1);
1645 pthread_barrier_init(&(thbarrier[1]), NULL, thread_num + 1);
1646 for (i = 0; i < thread_num; i++)
1647 {
1648 pthread_attr_t attr;
1649 cpu_set_t cpu;
1650
1651 pthread_attr_init(&attr);
1652 CPU_ZERO(&cpu);
1653 CPU_SET(i % cpunum, &cpu);
1654 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
1655 thinfo[i].thread_num = i;
1656 thinfo[i].info = NULL;
1657 thinfo[i].barrier = thbarrier;
1658 /* setup initial locks */
1659 pthread_create(&(thinfo[i].thread_id), &attr,
1660 evas_common_pipe_thread, &(thinfo[i]));
1661 pthread_attr_destroy(&attr);
1662 }
1663
1664 pthread_barrier_init(&(task_thbarrier[0]), NULL, thread_num + 1);
1665 pthread_barrier_init(&(task_thbarrier[1]), NULL, thread_num + 1);
1666 for (i = 0; i < thread_num; i++)
1667 {
1668 pthread_attr_t attr;
1669 cpu_set_t cpu;
1670
1671 pthread_attr_init(&attr);
1672 CPU_ZERO(&cpu);
1673 CPU_SET(i % cpunum, &cpu);
1674 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
1675 task_thinfo[i].thread_num = i;
1676 task_thinfo[i].info = NULL;
1677 task_thinfo[i].barrier = task_thbarrier;
1678 /* setup initial locks */
1679 pthread_create(&(task_thinfo[i].thread_id), &attr,
1680 evas_common_pipe_load, &(task_thinfo[i]));
1681 pthread_attr_destroy(&attr);
1682 }
1683 }
1684
1685 if (thread_num == 1) return EINA_FALSE;
1686 return EINA_TRUE;
1687#endif
1688 return EINA_FALSE;
1689}
1690
1691EAPI void
1692evas_common_pipe_image_load(RGBA_Image *im)
1693{
1694 if (im->flags & RGBA_IMAGE_TODO_LOAD)
1695 return ;
1696
1697 if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888
1698 && !evas_cache_image_is_loaded(&(im->cache_entry)))
1699 goto add_task;
1700
1701 if ((!im->cs.data) || ((!im->cs.dirty) && (!(im->flags & RGBA_IMAGE_IS_DIRTY))))
1702 goto add_task;
1703
1704 return ;
1705
1706 add_task:
1707 task = eina_list_append(task, im);
1708 im->flags |= RGBA_IMAGE_TODO_LOAD;
1709}
1710
1711EAPI void
1712evas_common_pipe_map_begin(RGBA_Image *root)
1713{
1714 if (!evas_common_pipe_init())
1715 {
1716 RGBA_Image *im;
1717
1718 EINA_LIST_FREE(task, im)
1719 {
1720 if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1721 evas_cache_image_load_data(&im->cache_entry);
1722 evas_common_image_colorspace_normalize(im);
1723
1724 im->flags &= ~RGBA_IMAGE_TODO_LOAD;
1725 }
1726 }
1727
1728 evas_common_pipe_image_load_do();
1729
1730 evas_common_pipe_map_render(root);
1731}
1732
1733#endif