📄 pixman-pict.c
字号:
{ PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_b5g6r5, fbCompositeSolidMask_nx1xn, 0 }, { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_r8g8b8, fbCompositeSolidMask_nx1xn, 0 }, { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_b8g8r8, fbCompositeSolidMask_nx1xn, 0 }, { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx1xn, 0 }, { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx1xn, 0 }, { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx1xn, 0 }, { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a1, PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx1xn, 0 },#endif { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fbCompositeSrcAdd_8888x8888, 0 }, { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fbCompositeSrcAdd_8888x8888, 0 }, { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fbCompositeSrcAdd_8000x8000, 0 },#if 0 /* FIXME */ { PIXMAN_OP_ADD, PIXMAN_a1, PIXMAN_null, PIXMAN_a1, fbCompositeSrcAdd_1000x1000, 0 },#endif { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fbCompositeSrcAdd_8888x8x8, 0 }, { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, fbCompositeSolidFill, 0 }, { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, fbCompositeSolidFill, 0 }, { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8b8g8r8, fbCompositeSolidFill, 0 }, { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8b8g8r8, fbCompositeSolidFill, 0 }, { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8, fbCompositeSolidFill, 0 }, { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, fbCompositeSolidFill, 0 }, { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fbCompositeSrc_8888xx888, 0 }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fbCompositeSrc_8888xx888, 0 }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fbCompositeSrc_8888xx888, 0 }, { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fbCompositeSrc_8888xx888, 0 },#if 0 /* FIXME */ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fbCompositeSrcSrc_nxn, 0 }, { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fbCompositeSrcSrc_nxn, 0 }, { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fbCompositeSrcSrc_nxn, 0 }, { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fbCompositeSrcSrc_nxn, 0 }, { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, fbCompositeSrcSrc_nxn, 0 }, { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, fbCompositeSrcSrc_nxn, 0 },#endif { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fbCompositeSrcIn_8x8, 0 }, { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fbCompositeSolidMaskIn_nx8x8, 0 }, { PIXMAN_OP_NONE },};static pixman_bool_tmask_is_solid (pixman_image_t *mask){ if (mask->type == SOLID) return TRUE; if (mask->type == BITS && mask->common.repeat == PIXMAN_REPEAT_NORMAL && mask->bits.width == 1 && mask->bits.height == 1) { return TRUE; } return FALSE;}static const FastPathInfo *get_fast_path (const FastPathInfo *fast_paths, pixman_op_t op, pixman_image_t *pSrc, pixman_image_t *pMask, pixman_image_t *pDst, pixman_bool_t is_pixbuf){ const FastPathInfo *info; for (info = fast_paths; info->op != PIXMAN_OP_NONE; info++) { pixman_bool_t valid_src = FALSE; pixman_bool_t valid_mask = FALSE; if (info->op != op) continue; if ((info->src_format == PIXMAN_solid && can_get_solid (pSrc)) || (pSrc->type == BITS && info->src_format == pSrc->bits.format)) { valid_src = TRUE; } if (!valid_src) continue; if ((info->mask_format == PIXMAN_null && !pMask) || (pMask && pMask->type == BITS && info->mask_format == pMask->bits.format)) { valid_mask = TRUE; if (info->flags & NEED_SOLID_MASK) { if (!pMask || !mask_is_solid (pMask)) valid_mask = FALSE; } if (info->flags & NEED_COMPONENT_ALPHA) { if (!pMask || !pMask->common.component_alpha) valid_mask = FALSE; } } if (!valid_mask) continue; if (info->dest_format != pDst->bits.format) continue; if ((info->flags & NEED_PIXBUF) && !is_pixbuf) continue; return info; } return NULL;}voidpixman_image_composite (pixman_op_t op, pixman_image_t * pSrc, pixman_image_t * pMask, pixman_image_t * pDst, int16_t xSrc, int16_t ySrc, int16_t xMask, int16_t yMask, int16_t xDst, int16_t yDst, uint16_t width, uint16_t height){ pixman_bool_t srcRepeat = pSrc->type == BITS && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL; pixman_bool_t maskRepeat = FALSE; pixman_bool_t srcTransform = pSrc->common.transform != NULL; pixman_bool_t maskTransform = FALSE; pixman_bool_t srcAlphaMap = pSrc->common.alpha_map != NULL; pixman_bool_t maskAlphaMap = FALSE; pixman_bool_t dstAlphaMap = pDst->common.alpha_map != NULL; CompositeFunc func = NULL;#ifdef USE_SSE2 fbComposeSetupSSE();#endif #ifdef USE_MMX fbComposeSetupMMX();#endif if (srcRepeat && srcTransform && pSrc->bits.width == 1 && pSrc->bits.height == 1) { srcTransform = FALSE; } if (pMask && pMask->type == BITS) { maskRepeat = pMask->common.repeat == PIXMAN_REPEAT_NORMAL; maskTransform = pMask->common.transform != 0; if (pMask->common.filter == PIXMAN_FILTER_CONVOLUTION) maskTransform = TRUE; maskAlphaMap = pMask->common.alpha_map != 0; if (maskRepeat && maskTransform && pMask->bits.width == 1 && pMask->bits.height == 1) { maskTransform = FALSE; } } if ((pSrc->type == BITS || can_get_solid (pSrc)) && (!pMask || pMask->type == BITS) && !srcTransform && !maskTransform && !maskAlphaMap && !srcAlphaMap && !dstAlphaMap && (pSrc->common.filter != PIXMAN_FILTER_CONVOLUTION) && (pSrc->common.repeat != PIXMAN_REPEAT_PAD) && (!pMask || (pMask->common.filter != PIXMAN_FILTER_CONVOLUTION && pMask->common.repeat != PIXMAN_REPEAT_PAD)) && !pSrc->common.read_func && !pSrc->common.write_func && !(pMask && pMask->common.read_func) && !(pMask && pMask->common.write_func) && !pDst->common.read_func && !pDst->common.write_func) { const FastPathInfo *info; pixman_bool_t pixbuf; pixbuf = pSrc && pSrc->type == BITS && pMask && pMask->type == BITS && pSrc->bits.bits == pMask->bits.bits && xSrc == xMask && ySrc == yMask && !pMask->common.component_alpha && !maskRepeat; info = NULL; #ifdef USE_SSE2 if (pixman_have_sse ()) info = get_fast_path (sse_fast_paths, op, pSrc, pMask, pDst, pixbuf); if (!info)#endif#ifdef USE_MMX if (pixman_have_mmx()) info = get_fast_path (mmx_fast_paths, op, pSrc, pMask, pDst, pixbuf); if (!info)#endif info = get_fast_path (c_fast_paths, op, pSrc, pMask, pDst, pixbuf); if (info) { func = info->func; if (info->src_format == PIXMAN_solid) srcRepeat = FALSE; if (info->mask_format == PIXMAN_solid || info->flags & NEED_SOLID_MASK) { maskRepeat = FALSE; } } } if ((srcRepeat && pSrc->bits.width == 1 && pSrc->bits.height == 1) || (maskRepeat && pMask->bits.width == 1 && pMask->bits.height == 1)) { /* If src or mask are repeating 1x1 images and srcRepeat or * maskRepeat are still TRUE, it means the fast path we * selected does not actually handle repeating images. * * So rather than call the "fast path" with a zillion * 1x1 requests, we just use the general code (which does * do something sensible with 1x1 repeating images). */ func = NULL; } if (!func) { func = pixman_image_composite_rect; /* CompositeGeneral optimizes 1x1 repeating images itself */ if (pSrc->type == BITS && pSrc->bits.width == 1 && pSrc->bits.height == 1) { srcRepeat = FALSE; } if (pMask && pMask->type == BITS && pMask->bits.width == 1 && pMask->bits.height == 1) { maskRepeat = FALSE; } /* if we are transforming, repeats are handled in fbFetchTransformed */ if (srcTransform) srcRepeat = FALSE; if (maskTransform) maskRepeat = FALSE; } pixman_walk_composite_region (op, pSrc, pMask, pDst, xSrc, ySrc, xMask, yMask, xDst, yDst, width, height, srcRepeat, maskRepeat, func);}#ifdef USE_MMX/* The CPU detection code needs to be in a file not compiled with * "-mmmx -msse", as gcc would generate CMOV instructions otherwise * that would lead to SIGILL instructions on old CPUs that don't have * it. */#if !defined(__amd64__) && !defined(__x86_64__)#ifdef HAVE_GETISAX#include <sys/auxv.h>#endifenum CPUFeatures { NoFeatures = 0, MMX = 0x1, MMX_Extensions = 0x2, SSE = 0x6, SSE2 = 0x8, CMOV = 0x10};static unsigned int detectCPUFeatures(void) { unsigned int features = 0; unsigned int result = 0;#ifdef HAVE_GETISAX if (getisax(&result, 1)) { if (result & AV_386_CMOV) features |= CMOV; if (result & AV_386_MMX) features |= MMX; if (result & AV_386_AMD_MMX) features |= MMX_Extensions; if (result & AV_386_SSE) features |= SSE; if (result & AV_386_SSE2) features |= SSE2; }#else char vendor[13];#ifdef _MSC_VER int vendor0 = 0, vendor1, vendor2;#endif vendor[0] = 0; vendor[12] = 0;#ifdef __GNUC__ /* see p. 118 of amd64 instruction set manual Vol3 */ /* We need to be careful about the handling of %ebx and * %esp here. We can't declare either one as clobbered * since they are special registers (%ebx is the "PIC * register" holding an offset to global data, %esp the * stack pointer), so we need to make sure they have their * original values when we access the output operands. */ __asm__ ("pushf\n" "pop %%eax\n" "mov %%eax, %%ecx\n" "xor $0x00200000, %%eax\n" "push %%eax\n" "popf\n" "pushf\n" "pop %%eax\n" "mov $0x0, %%edx\n" "xor %%ecx, %%eax\n" "jz 1f\n" "mov $0x00000000, %%eax\n" "push %%ebx\n" "cpuid\n" "mov %%ebx, %%eax\n" "pop %%ebx\n" "mov %%eax, %1\n" "mov %%edx, %2\n" "mov %%ecx, %3\n" "mov $0x00000001, %%eax\n" "push %%ebx\n" "cpuid\n" "pop %%ebx\n" "1:\n" "mov %%edx, %0\n" : "=r" (result), "=m" (vendor[0]), "=m" (vendor[4]), "=m" (vendor[8]) : : "%eax", "%ecx", "%edx" );#elif defined (_MSC_VER) _asm { pushfd pop eax mov ecx, eax xor eax, 00200000h push eax popfd pushfd pop eax mov edx, 0 xor eax, ecx jz nocpuid mov eax, 0 push ebx cpuid mov eax, ebx pop ebx mov vendor0, eax mov vendor1, edx mov vendor2, ecx mov eax, 1 push ebx cpuid pop ebx nocpuid: mov result, edx } memmove (vendor+0, &vendor0, 4); memmove (vendor+4, &vendor1, 4); memmove (vendor+8, &vendor2, 4);#else# error unsupported compiler#endif features = 0; if (result) { /* result now contains the standard feature bits */ if (result & (1 << 15)) features |= CMOV; if (result & (1 << 23)) features |= MMX; if (result & (1 << 25)) features |= SSE; if (result & (1 << 26)) features |= SSE2; if ((features & MMX) && !(features & SSE) && (strcmp(vendor, "AuthenticAMD") == 0 || strcmp(vendor, "Geode by NSC") == 0)) { /* check for AMD MMX extensions */#ifdef __GNUC__ __asm__("push %%ebx\n" "mov $0x80000000, %%eax\n" "cpuid\n" "xor %%edx, %%edx\n" "cmp $0x1, %%eax\n" "jge 2f\n" "mov $0x80000001, %%eax\n" "cpuid\n" "2:\n" "pop %%ebx\n" "mov %%edx, %0\n" : "=r" (result) : : "%eax", "%ecx", "%edx" );#elif defined _MSC_VER _asm { push ebx mov eax, 80000000h cpuid xor edx, edx cmp eax, 1 jge notamd mov eax, 80000001h cpuid notamd: pop ebx mov result, edx }#endif if (result & (1<<22)) features |= MMX_Extensions; } }#endif /* HAVE_GETISAX */ return features;}pixman_bool_tpixman_have_mmx (void){ static pixman_bool_t initialized = FALSE; static pixman_bool_t mmx_present; if (!initialized) { unsigned int features = detectCPUFeatures(); mmx_present = (features & (MMX|MMX_Extensions)) == (MMX|MMX_Extensions); initialized = TRUE; } return mmx_present;}#ifdef USE_SSE2pixman_bool_tpixman_have_sse (void){ static pixman_bool_t initialized = FALSE; static pixman_bool_t sse_present; if (!initialized) { unsigned int features = detectCPUFeatures(); sse_present = (features & (MMX|MMX_Extensions|SSE|SSE2)) == (MMX|MMX_Extensions|SSE|SSE2); initialized = TRUE; } return sse_present;}#endif#endif /* __amd64__ */#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -