diff options
Diffstat (limited to '')
-rw-r--r-- | libraries/evas/src/lib/engines/common/evas_scale_span.c | 653 |
1 files changed, 0 insertions, 653 deletions
diff --git a/libraries/evas/src/lib/engines/common/evas_scale_span.c b/libraries/evas/src/lib/engines/common/evas_scale_span.c deleted file mode 100644 index e0fefb2..0000000 --- a/libraries/evas/src/lib/engines/common/evas_scale_span.c +++ /dev/null | |||
@@ -1,653 +0,0 @@ | |||
1 | #include "evas_common.h" | ||
2 | #include "evas_convert_color.h" | ||
3 | #include "evas_scale_span.h" | ||
4 | |||
5 | static void | ||
6 | evas_common_scale_rgba_span_(DATA32 *src, DATA8 *mask __UNUSED__, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
7 | { | ||
8 | int mul = 0, step = 1; | ||
9 | DATA32 *pdst = dst; | ||
10 | |||
11 | if (!src || !dst) return; | ||
12 | if ((src_len < 1) || (dst_len < 1)) return; | ||
13 | if ((src_len > SCALE_SIZE_MAX) || (dst_len > SCALE_SIZE_MAX)) return; | ||
14 | if (mul_col != 0xffffffff) | ||
15 | mul = 1; | ||
16 | if (dir < 0) | ||
17 | { | ||
18 | pdst += dst_len - 1; | ||
19 | step = -1; | ||
20 | } | ||
21 | |||
22 | if ((src_len == 1) || (dst_len == 1)) | ||
23 | { | ||
24 | DATA32 c = *src; | ||
25 | |||
26 | if (mul) c = MUL4_SYM(mul_col, c); | ||
27 | while (dst_len--) | ||
28 | *dst++ = c; | ||
29 | return; | ||
30 | } | ||
31 | |||
32 | if (src_len == dst_len) | ||
33 | { | ||
34 | if (mul) | ||
35 | { | ||
36 | #ifdef BUILD_MMX | ||
37 | pxor_r2r(mm0, mm0); | ||
38 | MOV_A2R(ALPHA_255, mm5) | ||
39 | MOV_P2R(mul_col, mm7, mm0) | ||
40 | #endif | ||
41 | while (dst_len--) | ||
42 | { | ||
43 | #ifdef BUILD_MMX | ||
44 | MOV_P2R(*src, mm1, mm0) | ||
45 | MUL4_SYM_R2R(mm7, mm1, mm5) | ||
46 | MOV_R2P(mm1, *pdst, mm0) | ||
47 | #else | ||
48 | *pdst = MUL4_SYM(mul_col, *src); | ||
49 | #endif | ||
50 | src++; pdst += step; | ||
51 | } | ||
52 | return; | ||
53 | } | ||
54 | while (dst_len--) | ||
55 | { | ||
56 | *pdst = *src; | ||
57 | src++; pdst += step; | ||
58 | } | ||
59 | return; | ||
60 | } | ||
61 | |||
62 | { | ||
63 | DATA32 dsxx = (((src_len - 1) << 16) / (dst_len - 1)); | ||
64 | DATA32 sxx = 0; | ||
65 | int sx = sxx >> 16; | ||
66 | |||
67 | #ifdef BUILD_MMX | ||
68 | pxor_r2r(mm0, mm0); | ||
69 | MOV_A2R(ALPHA_255, mm5) | ||
70 | if (mul) | ||
71 | { | ||
72 | MOV_P2R(mul_col, mm7, mm0) | ||
73 | } | ||
74 | #endif | ||
75 | while (dst_len--) | ||
76 | { | ||
77 | DATA32 p2, p1 = 0; | ||
78 | int a; | ||
79 | |||
80 | sx = (sxx >> 16); | ||
81 | if (sx < src_len) | ||
82 | p1 = *(src + sx); | ||
83 | p2 = p1; | ||
84 | if ((sx + 1) < src_len) | ||
85 | p2 = *(src + sx + 1); | ||
86 | a = 1 + ((sxx - (sx << 16)) >> 8); | ||
87 | #ifdef BUILD_MMX | ||
88 | MOV_A2R(a, mm3) | ||
89 | MOV_P2R(p1, mm1, mm0) | ||
90 | MOV_P2R(p2, mm2, mm0) | ||
91 | INTERP_256_R2R(mm3, mm2, mm1, mm5) | ||
92 | if (mul) | ||
93 | { | ||
94 | MUL4_SYM_R2R(mm7, mm1, mm5) | ||
95 | } | ||
96 | MOV_R2P(mm1, *pdst, mm0) | ||
97 | #else | ||
98 | p1 = INTERP_256(a, p2, p1); | ||
99 | if (mul) | ||
100 | p1 = MUL4_SYM(mul_col, p1); | ||
101 | *pdst = p1; | ||
102 | #endif | ||
103 | pdst += step; sxx += dsxx; | ||
104 | } | ||
105 | return; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | static void | ||
110 | evas_common_scale_rgba_a8_span_(DATA32 *src, DATA8 *mask, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
111 | { | ||
112 | int mul = 0, step = 1; | ||
113 | DATA32 *pdst = dst; | ||
114 | |||
115 | if (!src || !mask || !dst) return; | ||
116 | if ((src_len < 1) || (dst_len < 1)) return; | ||
117 | if ((src_len > SCALE_SIZE_MAX) || (dst_len > SCALE_SIZE_MAX)) return; | ||
118 | if (mul_col != 0xffffffff) | ||
119 | mul = 1; | ||
120 | if (dir < 0) | ||
121 | { | ||
122 | pdst += dst_len - 1; | ||
123 | step = -1; | ||
124 | } | ||
125 | |||
126 | if ((src_len == 1) || (dst_len == 1)) | ||
127 | { | ||
128 | DATA32 c = MUL_SYM(*mask, *src); | ||
129 | |||
130 | if (mul) c = MUL4_SYM(mul_col, c); | ||
131 | while (dst_len--) | ||
132 | *dst++ = c; | ||
133 | return; | ||
134 | } | ||
135 | |||
136 | if (src_len == dst_len) | ||
137 | { | ||
138 | #ifdef BUILD_MMX | ||
139 | pxor_r2r(mm0, mm0); | ||
140 | MOV_A2R(ALPHA_255, mm5) | ||
141 | #endif | ||
142 | if (mul) | ||
143 | { | ||
144 | #ifdef BUILD_MMX | ||
145 | MOV_P2R(mul_col, mm7, mm0) | ||
146 | #endif | ||
147 | while (dst_len--) | ||
148 | { | ||
149 | #ifdef BUILD_MMX | ||
150 | MOV_P2R(*src, mm1, mm0) | ||
151 | MOV_A2R(*mask, mm3) | ||
152 | MUL4_SYM_R2R(mm3, mm1, mm5) | ||
153 | MUL4_SYM_R2R(mm7, mm1, mm5) | ||
154 | MOV_R2P(mm1, *pdst, mm0) | ||
155 | #else | ||
156 | DATA32 c = MUL_SYM(*mask, *src); | ||
157 | *pdst = MUL4_SYM(mul_col, c); | ||
158 | #endif | ||
159 | src++; mask++; pdst += step; | ||
160 | } | ||
161 | return; | ||
162 | } | ||
163 | while (dst_len--) | ||
164 | { | ||
165 | #ifdef BUILD_MMX | ||
166 | MOV_P2R(*src, mm1, mm0) | ||
167 | MOV_A2R(*mask, mm3) | ||
168 | MUL4_SYM_R2R(mm3, mm1, mm5) | ||
169 | MOV_R2P(mm1, *pdst, mm0) | ||
170 | #else | ||
171 | *pdst = MUL_SYM(*mask, *src); | ||
172 | #endif | ||
173 | src++; mask++; pdst += step; | ||
174 | } | ||
175 | return; | ||
176 | } | ||
177 | |||
178 | { | ||
179 | DATA32 dsxx = (((src_len - 1) << 16) / (dst_len - 1)); | ||
180 | DATA32 sxx = 0; | ||
181 | int sx = sxx >> 16; | ||
182 | |||
183 | #ifdef BUILD_MMX | ||
184 | pxor_r2r(mm0, mm0); | ||
185 | MOV_A2R(ALPHA_255, mm5) | ||
186 | if (mul) | ||
187 | { | ||
188 | MOV_P2R(mul_col, mm7, mm0) | ||
189 | } | ||
190 | #endif | ||
191 | while (dst_len--) | ||
192 | { | ||
193 | DATA32 p2, p1 = 0; | ||
194 | int a, a2, a1 = 0; | ||
195 | |||
196 | sx = (sxx >> 16); | ||
197 | if (sx < src_len) | ||
198 | { | ||
199 | p1 = *(src + sx); | ||
200 | a1 = *(mask + sx); | ||
201 | } | ||
202 | p2 = p1; a2 = a1; | ||
203 | if ((sx + 1) < src_len) | ||
204 | { | ||
205 | p2 = *(src + sx + 1); | ||
206 | a2 = *(mask + sx + 1); | ||
207 | } | ||
208 | a = 1 + ((sxx - (sx << 16)) >> 8); | ||
209 | #ifdef BUILD_MMX | ||
210 | MOV_A2R(a, mm3) | ||
211 | MOV_P2R(p1, mm1, mm0) | ||
212 | MOV_P2R(p2, mm2, mm0) | ||
213 | INTERP_256_R2R(mm3, mm2, mm1, mm5) | ||
214 | a1 += 1 + ((a * (a2 - a1)) >> 8); | ||
215 | MOV_A2R(a1, mm3) | ||
216 | MUL4_256_R2R(mm3, mm1) | ||
217 | if (mul) | ||
218 | { | ||
219 | MUL4_SYM_R2R(mm7, mm1, mm5) | ||
220 | } | ||
221 | MOV_R2P(mm1, *pdst, mm0) | ||
222 | #else | ||
223 | p1 = INTERP_256(a, p2, p1); | ||
224 | a1 += 1 + ((a * (a2 - a1)) >> 8); | ||
225 | p1 = MUL_256(a1, p1); | ||
226 | if (mul) | ||
227 | p1 = MUL4_SYM(mul_col, p1); | ||
228 | *pdst = p1; | ||
229 | #endif | ||
230 | pdst += step; sxx += dsxx; | ||
231 | } | ||
232 | return; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | static void | ||
237 | evas_common_scale_a8_span_(DATA32 *src __UNUSED__, DATA8 *mask, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
238 | { | ||
239 | int step = 1; | ||
240 | DATA32 *pdst = dst; | ||
241 | |||
242 | if (!mask || !dst) return; | ||
243 | if ((src_len < 1) || (dst_len < 1)) return; | ||
244 | if ((src_len > SCALE_SIZE_MAX) || (dst_len > SCALE_SIZE_MAX)) return; | ||
245 | if (dir < 0) | ||
246 | { | ||
247 | pdst += dst_len - 1; | ||
248 | step = -1; | ||
249 | } | ||
250 | |||
251 | if ((src_len == 1) || (dst_len == 1)) | ||
252 | { | ||
253 | DATA32 c = MUL_SYM(*mask, mul_col); | ||
254 | |||
255 | while (dst_len--) | ||
256 | *dst++ = c; | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | #ifdef BUILD_MMX | ||
261 | pxor_r2r(mm0, mm0); | ||
262 | MOV_A2R(ALPHA_255, mm5) | ||
263 | MOV_P2R(mul_col, mm7, mm0) | ||
264 | #endif | ||
265 | if (src_len == dst_len) | ||
266 | { | ||
267 | while (dst_len--) | ||
268 | { | ||
269 | #ifdef BUILD_MMX | ||
270 | MOV_A2R(*mask, mm3) | ||
271 | MUL4_SYM_R2R(mm7, mm3, mm5) | ||
272 | MOV_R2P(mm3, *pdst, mm0) | ||
273 | #else | ||
274 | *pdst = MUL_SYM(*mask, mul_col); | ||
275 | #endif | ||
276 | mask++; pdst += step; | ||
277 | } | ||
278 | return; | ||
279 | } | ||
280 | |||
281 | { | ||
282 | DATA32 dsxx = (((src_len - 1) << 16) / (dst_len - 1)); | ||
283 | DATA32 sxx = 0; | ||
284 | int sx = sxx >> 16; | ||
285 | |||
286 | while (dst_len--) | ||
287 | { | ||
288 | int a, a2, a1 = 0; | ||
289 | |||
290 | sx = (sxx >> 16); | ||
291 | if (sx < src_len) | ||
292 | a1 = *(mask + sx); | ||
293 | a2 = a1; | ||
294 | if ((sx + 1) < src_len) | ||
295 | a2 = *(mask + sx + 1); | ||
296 | a = 1 + ((sxx - (sx << 16)) >> 8); | ||
297 | a1 += 1 + ((a * (a2 - a1)) >> 8); | ||
298 | #ifdef BUILD_MMX | ||
299 | MOV_A2R(a1, mm3) | ||
300 | MUL4_256_R2R(mm7, mm3) | ||
301 | MOV_R2P(mm3, *pdst, mm0) | ||
302 | #else | ||
303 | *pdst = MUL_256(a1, mul_col); | ||
304 | #endif | ||
305 | pdst += step; sxx += dsxx; | ||
306 | } | ||
307 | return; | ||
308 | } | ||
309 | } | ||
310 | |||
311 | static void | ||
312 | evas_common_scale_clip_a8_span_(DATA32 *src __UNUSED__, DATA8 *mask, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
313 | { | ||
314 | int mul = 0, step = 1; | ||
315 | DATA32 *pdst = dst; | ||
316 | |||
317 | if (!mask || !dst) return; | ||
318 | if ((src_len < 1) || (dst_len < 1)) return; | ||
319 | if ((src_len > SCALE_SIZE_MAX) || (dst_len > SCALE_SIZE_MAX)) return; | ||
320 | if (mul_col != 0xffffffff) | ||
321 | mul = 1; | ||
322 | if (dir < 0) | ||
323 | { | ||
324 | pdst += dst_len - 1; | ||
325 | step = -1; | ||
326 | } | ||
327 | |||
328 | #ifdef BUILD_MMX | ||
329 | pxor_r2r(mm0, mm0); | ||
330 | MOV_A2R(ALPHA_255, mm5) | ||
331 | if (mul) | ||
332 | { | ||
333 | MOV_P2R(mul_col, mm7, mm0) | ||
334 | } | ||
335 | #endif | ||
336 | if ((src_len == 1) || (dst_len == 1)) | ||
337 | { | ||
338 | #ifdef BUILD_MMX | ||
339 | MOV_A2R(*mask, mm3) | ||
340 | #else | ||
341 | DATA32 c = *mask; | ||
342 | #endif | ||
343 | if (mul) | ||
344 | { | ||
345 | #ifdef BUILD_MMX | ||
346 | MUL4_SYM_R2R(mm7, mm3, mm5) | ||
347 | #else | ||
348 | c = MUL_SYM(c, mul_col); | ||
349 | #endif | ||
350 | while (dst_len--) | ||
351 | { | ||
352 | #ifdef BUILD_MMX | ||
353 | MOV_P2R(*dst, mm1, mm0) | ||
354 | MUL4_SYM_R2R(mm3, mm1, mm5) | ||
355 | MOV_R2P(mm1, *dst, mm0) | ||
356 | #else | ||
357 | *dst = MUL4_SYM(c, *dst); | ||
358 | #endif | ||
359 | dst++; | ||
360 | } | ||
361 | return; | ||
362 | } | ||
363 | while (dst_len--) | ||
364 | { | ||
365 | #ifdef BUILD_MMX | ||
366 | MOV_P2R(*dst, mm1, mm0) | ||
367 | MUL4_SYM_R2R(mm3, mm1, mm5) | ||
368 | MOV_R2P(mm1, *dst, mm0) | ||
369 | #else | ||
370 | *dst = MUL_SYM(c, *dst); | ||
371 | #endif | ||
372 | dst++; | ||
373 | } | ||
374 | return; | ||
375 | } | ||
376 | |||
377 | if (src_len == dst_len) | ||
378 | { | ||
379 | if (mul) | ||
380 | { | ||
381 | while (dst_len--) | ||
382 | { | ||
383 | #ifdef BUILD_MMX | ||
384 | MOV_A2R(*mask, mm3) | ||
385 | MUL4_SYM_R2R(mm7, mm3, mm5) | ||
386 | MOV_P2R(*pdst, mm1, mm0) | ||
387 | MUL4_SYM_R2R(mm3, mm1, mm5) | ||
388 | MOV_R2P(mm1, *pdst, mm0) | ||
389 | #else | ||
390 | DATA32 c = MUL_SYM(*mask, mul_col); | ||
391 | |||
392 | *pdst = MUL4_SYM(c, *pdst); | ||
393 | #endif | ||
394 | mask++; pdst += step; | ||
395 | } | ||
396 | return; | ||
397 | } | ||
398 | while (dst_len--) | ||
399 | { | ||
400 | #ifdef BUILD_MMX | ||
401 | MOV_A2R(*mask, mm3) | ||
402 | MOV_P2R(*pdst, mm1, mm0) | ||
403 | MUL4_SYM_R2R(mm3, mm1, mm5) | ||
404 | MOV_R2P(mm1, *pdst, mm0) | ||
405 | #else | ||
406 | *pdst = MUL_SYM(*mask, *pdst); | ||
407 | #endif | ||
408 | mask++; pdst += step; | ||
409 | } | ||
410 | return; | ||
411 | } | ||
412 | |||
413 | { | ||
414 | DATA32 dsxx = (((src_len - 1) << 16) / (dst_len - 1)); | ||
415 | DATA32 sxx = 0; | ||
416 | int sx = sxx >> 16; | ||
417 | |||
418 | while (dst_len--) | ||
419 | { | ||
420 | int a, a2, a1 = 0; | ||
421 | |||
422 | sx = (sxx >> 16); | ||
423 | if (sx < src_len) | ||
424 | a1 = *(mask + sx); | ||
425 | a2 = a1; | ||
426 | if ((sx + 1) < src_len) | ||
427 | a2 = *(mask + sx + 1); | ||
428 | a = 1 + ((sxx - (sx << 16)) >> 8); | ||
429 | a1 += 1 + ((a * (a2 - a1)) >> 8); | ||
430 | #ifdef BUILD_MMX | ||
431 | MOV_A2R(a1, mm3) | ||
432 | MOV_P2R(*pdst, mm1, mm0) | ||
433 | MUL4_256_R2R(mm3, mm1) | ||
434 | if (mul) | ||
435 | { | ||
436 | MUL4_SYM_R2R(mm7, mm1, mm5) | ||
437 | } | ||
438 | MOV_R2P(mm1, *pdst, mm0) | ||
439 | #else | ||
440 | *pdst = MUL_256(a1, *pdst); | ||
441 | if (mul) | ||
442 | *pdst = MUL4_SYM(mul_col, *pdst); | ||
443 | #endif | ||
444 | pdst += step; sxx += dsxx; | ||
445 | } | ||
446 | return; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | EAPI void | ||
451 | evas_common_scale_rgba_span(DATA32 *src, DATA8 *mask, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
452 | { | ||
453 | evas_common_scale_rgba_span_(src, mask, src_len, mul_col, dst, dst_len, dir); | ||
454 | evas_common_cpu_end_opt(); | ||
455 | } | ||
456 | |||
457 | EAPI void | ||
458 | evas_common_scale_rgba_a8_span(DATA32 *src, DATA8 *mask, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
459 | { | ||
460 | evas_common_scale_rgba_a8_span_(src, mask, src_len, mul_col, dst, dst_len, dir); | ||
461 | evas_common_cpu_end_opt(); | ||
462 | } | ||
463 | |||
464 | EAPI void | ||
465 | evas_common_scale_a8_span(DATA32 *src, DATA8 *mask, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
466 | { | ||
467 | evas_common_scale_a8_span_(src, mask, src_len, mul_col, dst, dst_len, dir); | ||
468 | evas_common_cpu_end_opt(); | ||
469 | } | ||
470 | |||
471 | EAPI void | ||
472 | evas_common_scale_clip_a8_span(DATA32 *src, DATA8 *mask, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
473 | { | ||
474 | evas_common_scale_clip_a8_span_(src, mask, src_len, mul_col, dst, dst_len, dir); | ||
475 | evas_common_cpu_end_opt(); | ||
476 | } | ||
477 | |||
478 | EAPI void | ||
479 | evas_common_scale_hsva_span(DATA32 *src, DATA8 *mask __UNUSED__, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
480 | { | ||
481 | int mul = 0, step = 1; | ||
482 | DATA32 *pdst = dst; | ||
483 | |||
484 | if (!src || !dst) return; | ||
485 | if ((src_len < 1) || (dst_len < 1)) return; | ||
486 | if ((src_len > SCALE_SIZE_MAX) || (dst_len > SCALE_SIZE_MAX)) return; | ||
487 | if (mul_col != 0xffffffff) | ||
488 | mul = 1; | ||
489 | if (dir < 0) | ||
490 | { | ||
491 | pdst += dst_len - 1; | ||
492 | step = -1; | ||
493 | } | ||
494 | |||
495 | if ((src_len == 1) || (dst_len == 1)) | ||
496 | { | ||
497 | DATA32 c = *src; | ||
498 | |||
499 | if (mul) c = MUL4_SYM(mul_col, c); | ||
500 | while (dst_len--) | ||
501 | *dst++ = c; | ||
502 | return; | ||
503 | } | ||
504 | |||
505 | if (src_len == dst_len) | ||
506 | { | ||
507 | if (mul) | ||
508 | { | ||
509 | while (dst_len--) | ||
510 | { | ||
511 | *pdst = MUL4_SYM(mul_col, *src); | ||
512 | src++; pdst += step; | ||
513 | } | ||
514 | return; | ||
515 | } | ||
516 | while (dst_len--) | ||
517 | { | ||
518 | *pdst = *src; | ||
519 | src++; pdst += step; | ||
520 | } | ||
521 | return; | ||
522 | } | ||
523 | |||
524 | { | ||
525 | DATA32 dsxx = (((src_len - 1) << 16) / (dst_len - 1)); | ||
526 | DATA32 sxx = 0; | ||
527 | int sx = sxx >> 16; | ||
528 | |||
529 | while (dst_len--) | ||
530 | { | ||
531 | DATA32 p2, p1 = 0; | ||
532 | int a, h1, s1, v1, h2, s2, v2; | ||
533 | |||
534 | sx = (sxx >> 16); | ||
535 | if (sx < src_len) | ||
536 | p1 = *(src + sx); | ||
537 | evas_common_convert_color_rgb_to_hsv_int((p1 >> 16) & 0xff, (p1 >> 8) & 0xff, p1 & 0xff, | ||
538 | &h1, &s1, &v1); | ||
539 | p2 = p1; | ||
540 | if ((sx + 1) < src_len) | ||
541 | p2 = *(src + sx + 1); | ||
542 | evas_common_convert_color_rgb_to_hsv_int((p2 >> 16) & 0xff, (p2 >> 8) & 0xff, p2 & 0xff, | ||
543 | &h2, &s2, &v2); | ||
544 | a = 1 + ((sxx - (sx << 16)) >> 8); | ||
545 | h1 += (a * (h2 - h1)) >> 8; | ||
546 | s1 += (a * (s2 - s1)) >> 8; | ||
547 | v1 += (a * (v2 - v1)) >> 8; | ||
548 | a = (((((p2 >> 8) & 0xff0000) - ((p1 >> 8) & 0xff0000)) * a) + | ||
549 | (p1 & 0xff000000)) & 0xff000000; | ||
550 | evas_common_convert_color_hsv_to_rgb_int(h1, s1, v1, &h2, &s2, &v2); | ||
551 | p1 = a + RGB_JOIN(h2,s2,v2); | ||
552 | if (mul) | ||
553 | p1 = MUL4_SYM(mul_col, p1); | ||
554 | *pdst = p1; | ||
555 | pdst += step; sxx += dsxx; | ||
556 | } | ||
557 | return; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | EAPI void | ||
562 | evas_common_scale_hsva_a8_span(DATA32 *src, DATA8 *mask, int src_len, DATA32 mul_col, DATA32 *dst, int dst_len, int dir) | ||
563 | { | ||
564 | int mul = 0, step = 1; | ||
565 | DATA32 *pdst = dst; | ||
566 | |||
567 | if (!src || !mask || !dst) return; | ||
568 | if ((src_len < 1) || (dst_len < 1)) return; | ||
569 | if ((src_len > SCALE_SIZE_MAX) || (dst_len > SCALE_SIZE_MAX)) return; | ||
570 | if (mul_col != 0xffffffff) | ||
571 | mul = 1; | ||
572 | if (dir < 0) | ||
573 | { | ||
574 | pdst += dst_len - 1; | ||
575 | step = -1; | ||
576 | } | ||
577 | |||
578 | if ((src_len == 1) || (dst_len == 1)) | ||
579 | { | ||
580 | DATA32 c = MUL_SYM(*mask, *src); | ||
581 | |||
582 | if (mul) c = MUL4_SYM(mul_col, c); | ||
583 | while (dst_len--) | ||
584 | *dst++ = c; | ||
585 | return; | ||
586 | } | ||
587 | |||
588 | if (src_len == dst_len) | ||
589 | { | ||
590 | if (mul) | ||
591 | { | ||
592 | while (dst_len--) | ||
593 | { | ||
594 | DATA32 c = MUL_SYM(*mask, *src); | ||
595 | *pdst = MUL4_SYM(mul_col, c); | ||
596 | src++; mask++; pdst += step; | ||
597 | } | ||
598 | return; | ||
599 | } | ||
600 | while (dst_len--) | ||
601 | { | ||
602 | *pdst = MUL_SYM(*mask, *src); | ||
603 | src++; mask++; pdst += step; | ||
604 | } | ||
605 | return; | ||
606 | } | ||
607 | |||
608 | { | ||
609 | DATA32 dsxx = (((src_len - 1) << 16) / (dst_len - 1)); | ||
610 | DATA32 sxx = 0; | ||
611 | int sx = sxx >> 16; | ||
612 | |||
613 | while (dst_len--) | ||
614 | { | ||
615 | DATA32 p2, p1 = 0; | ||
616 | int a, a2, a1 = 0; | ||
617 | int h1, s1, v1, h2, s2, v2; | ||
618 | |||
619 | sx = (sxx >> 16); | ||
620 | if (sx < src_len) | ||
621 | { | ||
622 | p1 = *(src + sx); | ||
623 | a1 = *(mask + sx); | ||
624 | } | ||
625 | p2 = p1; a2 = a1; | ||
626 | if ((sx + 1) < src_len) | ||
627 | { | ||
628 | p2 = *(src + sx + 1); | ||
629 | a2 = *(mask + sx + 1); | ||
630 | } | ||
631 | evas_common_convert_color_rgb_to_hsv_int((p1 >> 16) & 0xff, (p1 >> 8) & 0xff, p1 & 0xff, | ||
632 | &h1, &s1, &v1); | ||
633 | evas_common_convert_color_rgb_to_hsv_int((p2 >> 16) & 0xff, (p2 >> 8) & 0xff, p2 & 0xff, | ||
634 | &h2, &s2, &v2); | ||
635 | a = 1 + ((sxx - (sx << 16)) >> 8); | ||
636 | a1 += (a * (a2 - a1)) >> 8; | ||
637 | h1 += (a * (h2 - h1)) >> 8; | ||
638 | s1 += (a * (s2 - s1)) >> 8; | ||
639 | v1 += (a * (v2 - v1)) >> 8; | ||
640 | a = (((((p2 >> 8) & 0xff0000) - ((p1 >> 8) & 0xff0000)) * a) + | ||
641 | (p1 & 0xff000000)) & 0xff000000; | ||
642 | |||
643 | evas_common_convert_color_hsv_to_rgb_int(h1, s1, v1, &h2, &s2, &v2); | ||
644 | p1 = a + RGB_JOIN(h2,s2,v2); | ||
645 | p1 = MUL_SYM(a1, p1); | ||
646 | if (mul) | ||
647 | p1 = MUL4_SYM(mul_col, p1); | ||
648 | *pdst = p1; | ||
649 | pdst += step; sxx += dsxx; | ||
650 | } | ||
651 | return; | ||
652 | } | ||
653 | } | ||