⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pixman-mmx.c

📁 嵌入式图形库
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * Copyright © 2004, 2005 Red Hat, Inc. * Copyright © 2004 Nicholas Miell * Copyright © 2005 Trolltech AS * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Red Hat not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission.  Red Hat makes no representations about the * suitability of this software for any purpose.  It is provided "as is" * without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author:  Søren Sandmann (sandmann@redhat.com) * Minor Improvements: Nicholas Miell (nmiell@gmail.com) * MMX code paths for fbcompose.c by Lars Knoll (lars@trolltech.com) * * Based on work by Owen Taylor */#ifdef HAVE_CONFIG_H#include <config.h>#endif#ifdef USE_MMX#include <mmintrin.h>#ifdef USE_SSE#include <xmmintrin.h> /* for _mm_shuffle_pi16 and _MM_SHUFFLE */#endif#include "pixman-mmx.h"#undef READ#undef WRITE#define READ(img,x) *(x)#define WRITE(img,ptr,v) (*(ptr) = (v));#define noVERBOSE#ifdef VERBOSE#define CHECKPOINT() ErrorF ("at %s %d\n", __FUNCTION__, __LINE__)#else#define CHECKPOINT()#endif/* Notes about writing mmx code * * give memory operands as the second operand. If you give it as the * first, gcc will first load it into a register, then use that * register * *   ie. use * *         _mm_mullo_pi16 (x, mmx_constant); * *   not * *         _mm_mullo_pi16 (mmx_constant, x); * * Also try to minimize dependencies. i.e. when you need a value, try * to calculate it from a value that was calculated as early as * possible. *//* --------------- MMX primitivess ------------------------------------ */#ifdef __GNUC__typedef unsigned long long ullong;typedef ullong mmxdatafield;#endif#ifdef _MSC_VERtypedef unsigned __int64 ullong;typedef __m64 mmxdatafield;#endiftypedef struct{    mmxdatafield mmx_4x00ff;    mmxdatafield mmx_4x0080;    mmxdatafield mmx_565_rgb;    mmxdatafield mmx_565_unpack_multiplier;    mmxdatafield mmx_565_r;    mmxdatafield mmx_565_g;    mmxdatafield mmx_565_b;    mmxdatafield mmx_mask_0;    mmxdatafield mmx_mask_1;    mmxdatafield mmx_mask_2;    mmxdatafield mmx_mask_3;    mmxdatafield mmx_full_alpha;    mmxdatafield mmx_ffff0000ffff0000;    mmxdatafield mmx_0000ffff00000000;    mmxdatafield mmx_000000000000ffff;} MMXData;static const MMXData c ={#ifdef __GNUC__    .mmx_4x00ff =			0x00ff00ff00ff00ffULL,    .mmx_4x0080 =			0x0080008000800080ULL,    .mmx_565_rgb =			0x000001f0003f001fULL,    .mmx_565_unpack_multiplier =	0x0000008404100840ULL,    .mmx_565_r =			0x000000f800000000ULL,    .mmx_565_g =			0x0000000000fc0000ULL,    .mmx_565_b =			0x00000000000000f8ULL,    .mmx_mask_0 =			0xffffffffffff0000ULL,    .mmx_mask_1 =			0xffffffff0000ffffULL,    .mmx_mask_2 =			0xffff0000ffffffffULL,    .mmx_mask_3 =			0x0000ffffffffffffULL,    .mmx_full_alpha =			0x00ff000000000000ULL,    .mmx_ffff0000ffff0000 =		0xffff0000ffff0000ULL,    .mmx_0000ffff00000000 =		0x0000ffff00000000ULL,    .mmx_000000000000ffff =		0x000000000000ffffULL,#endif#ifdef _MSC_VER    { 0x00ff00ff00ff00ffUI64 },    { 0x0080008000800080UI64 },    { 0x000001f0003f001fUI64 },    { 0x0000008404100840UI64 },    { 0x000000f800000000UI64 },    { 0x0000000000fc0000UI64 },    { 0x00000000000000f8UI64 },    { 0xffffffffffff0000UI64 },    { 0xffffffff0000ffffUI64 },    { 0xffff0000ffffffffUI64 },    { 0x0000ffffffffffffUI64 },    { 0x00ff000000000000UI64 },    { 0xffff0000ffff0000UI64 },    { 0x0000ffff00000000UI64 },    { 0x000000000000ffffUI64 },#endif};#ifdef _MSC_VER#undef inline#define inline __forceinline#endif#ifdef __GNUC__#define MC(x) ((__m64) c.mmx_##x)#endif#ifdef _MSC_VER#define MC(x) c.mmx_##x#endifstatic inline __m64M64 (ullong x){#ifdef __GNUC__    return (__m64)x;#endif#ifdef _MSC_VER    __m64 res;        res.m64_u64 = x;    return res;#endif}static inline ullongULLONG (__m64 x){#ifdef __GNUC__    return (ullong)x;#endif#ifdef _MSC_VER    ullong res;    res = x.m64_u64;    return res;#endif}static inline __m64shift (__m64 v, int s){    if (s > 0)	return _mm_slli_si64 (v, s);    else if (s < 0)	return _mm_srli_si64 (v, -s);    else	return v;}static inline __m64negate (__m64 mask){    return _mm_xor_si64 (mask, MC(4x00ff));}static inline __m64pix_multiply (__m64 a, __m64 b){    __m64 res;    res = _mm_mullo_pi16 (a, b);    res = _mm_adds_pu16 (res, MC(4x0080));    res = _mm_adds_pu16 (res, _mm_srli_pi16 (res, 8));    res = _mm_srli_pi16 (res, 8);    return res;}static inline __m64pix_add (__m64 a, __m64 b){    return  _mm_adds_pu8 (a, b);}#ifdef USE_SSEstatic inline __m64expand_alpha (__m64 pixel){    return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE(3, 3, 3, 3));}static inline __m64expand_alpha_rev (__m64 pixel){    return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE(0, 0, 0, 0));}static inline __m64invert_colors (__m64 pixel){    return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE(3, 0, 1, 2));}#elsestatic inline __m64expand_alpha (__m64 pixel){    __m64 t1, t2;    t1 = shift (pixel, -48);    t2 = shift (t1, 16);    t1 = _mm_or_si64 (t1, t2);    t2 = shift (t1, 32);    t1 = _mm_or_si64 (t1, t2);    return t1;}static inline __m64expand_alpha_rev (__m64 pixel){    __m64 t1, t2;    /* move alpha to low 16 bits and zero the rest */    t1 = shift (pixel,  48);    t1 = shift (t1, -48);    t2 = shift (t1, 16);    t1 = _mm_or_si64 (t1, t2);    t2 = shift (t1, 32);    t1 = _mm_or_si64 (t1, t2);    return t1;}static inline __m64invert_colors (__m64 pixel){    __m64 x, y, z;    x = y = z = pixel;    x = _mm_and_si64 (x, MC(ffff0000ffff0000));    y = _mm_and_si64 (y, MC(000000000000ffff));    z = _mm_and_si64 (z, MC(0000ffff00000000));    y = shift (y, 32);    z = shift (z, -32);    x = _mm_or_si64 (x, y);    x = _mm_or_si64 (x, z);    return x;}#endifstatic inline __m64over (__m64 src, __m64 srca, __m64 dest){    return  _mm_adds_pu8 (src, pix_multiply(dest, negate(srca)));}static inline __m64over_rev_non_pre (__m64 src, __m64 dest){    __m64 srca = expand_alpha (src);    __m64 srcfaaa = _mm_or_si64 (srca, MC(full_alpha));    return over(pix_multiply(invert_colors(src), srcfaaa), srca, dest);}static inline __m64in (__m64 src,    __m64 mask){    return pix_multiply (src, mask);}static inline __m64in_over_full_src_alpha (__m64 src, __m64 mask, __m64 dest){    src = _mm_or_si64 (src, MC(full_alpha));    return over(in (src, mask), mask, dest);}#ifndef _MSC_VERstatic inline __m64in_over (__m64 src,	 __m64 srca,	 __m64 mask,	 __m64 dest){    return over(in(src, mask), pix_multiply(srca, mask), dest);}#else#define in_over(src, srca, mask, dest) over(in(src, mask), pix_multiply(srca, mask), dest)#endifstatic inline __m64load8888 (uint32_t v){    return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (v), _mm_setzero_si64());}static inline __m64pack8888 (__m64 lo, __m64 hi){    return _mm_packs_pu16 (lo, hi);}static inline uint32_tstore8888 (__m64 v){    return _mm_cvtsi64_si32(pack8888(v, _mm_setzero_si64()));}/* Expand 16 bits positioned at @pos (0-3) of a mmx register into * *    00RR00GG00BB * * --- Expanding 565 in the low word --- * * m = (m << (32 - 3)) | (m << (16 - 5)) | m; * m = m & (01f0003f001f); * m = m * (008404100840); * m = m >> 8; * * Note the trick here - the top word is shifted by another nibble to * avoid it bumping into the middle word */static inline __m64expand565 (__m64 pixel, int pos){    __m64 p = pixel;    __m64 t1, t2;    /* move pixel to low 16 bit and zero the rest */    p = shift (shift (p, (3 - pos) * 16), -48);    t1 = shift (p, 36 - 11);    t2 = shift (p, 16 - 5);    p = _mm_or_si64 (t1, p);    p = _mm_or_si64 (t2, p);    p = _mm_and_si64 (p, MC(565_rgb));    pixel = _mm_mullo_pi16 (p, MC(565_unpack_multiplier));    return _mm_srli_pi16 (pixel, 8);}static inline __m64expand8888 (__m64 in, int pos){    if (pos == 0)	return _mm_unpacklo_pi8 (in, _mm_setzero_si64());    else	return _mm_unpackhi_pi8 (in, _mm_setzero_si64());}static inline __m64pack565 (__m64 pixel, __m64 target, int pos){    __m64 p = pixel;    __m64 t = target;    __m64 r, g, b;    r = _mm_and_si64 (p, MC(565_r));    g = _mm_and_si64 (p, MC(565_g));    b = _mm_and_si64 (p, MC(565_b));    r = shift (r, - (32 - 8) + pos * 16);    g = shift (g, - (16 - 3) + pos * 16);    b = shift (b, - (0  + 3) + pos * 16);    if (pos == 0)	t = _mm_and_si64 (t, MC(mask_0));    else if (pos == 1)	t = _mm_and_si64 (t, MC(mask_1));    else if (pos == 2)	t = _mm_and_si64 (t, MC(mask_2));    else if (pos == 3)	t = _mm_and_si64 (t, MC(mask_3));    p = _mm_or_si64 (r, t);    p = _mm_or_si64 (g, p);    return _mm_or_si64 (b, p);}#ifndef _MSC_VERstatic inline __m64pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b){    x = _mm_mullo_pi16 (x, a);    y = _mm_mullo_pi16 (y, b);    x = _mm_adds_pu16 (x, MC(4x0080));    x = _mm_adds_pu16 (x, y);    x = _mm_adds_pu16 (x, _mm_srli_pi16 (x, 8));    x = _mm_srli_pi16 (x, 8);    return x;}#else#define pix_add_mul(x, a, y, b) \( x = _mm_mullo_pi16 (x, a), \  y = _mm_mullo_pi16 (y, b), \  x = _mm_adds_pu16 (x, MC(4x0080)), \  x = _mm_adds_pu16 (x, y), \  x = _mm_adds_pu16 (x, _mm_srli_pi16 (x, 8)), \  _mm_srli_pi16 (x, 8) )#endif/* --------------- MMX code patch for fbcompose.c --------------------- */static FASTCALL voidmmxCombineMaskU (uint32_t *src, const uint32_t *mask, int width){    const uint32_t *end = mask + width;    while (mask < end) {        uint32_t mmask = *mask;	uint32_t maska = mmask >> 24;	if (maska == 0) {	    *src = 0;	} else if (maska != 0xff) {	    __m64 a = load8888(mmask);	    __m64 s = load8888(*src);	    a = expand_alpha(a);	    s = pix_multiply(s, a);	    *src = store8888(s);	}	++src;	++mask;    }    _mm_empty();}static FASTCALL voidmmxCombineOverU (uint32_t *dest, const uint32_t *src, int width){    const uint32_t *end = dest + width;    while (dest < end) {	uint32_t ssrc = *src;	uint32_t a = ssrc >> 24;	if (a == 0xff) {	    *dest = ssrc;	} else if (a) {	    __m64 s, sa;	    s = load8888(ssrc);	    sa = expand_alpha(s);	    *dest = store8888(over(s, sa, load8888(*dest)));	}	++dest;	++src;    }    _mm_empty();}static FASTCALL voidmmxCombineOverReverseU (uint32_t *dest, const uint32_t *src, int width){    const uint32_t *end = dest + width;    while (dest < end) {	__m64 d, da;	d = load8888(*dest);	da = expand_alpha(d);	*dest = store8888(over (d, da, load8888(*src)));        ++dest;        ++src;    }    _mm_empty();}static FASTCALL voidmmxCombineInU (uint32_t *dest, const uint32_t *src, int width){    const uint32_t *end = dest + width;    while (dest < end) {        __m64 x, a;        x = load8888(*src);        a = load8888(*dest);        a = expand_alpha(a);        x = pix_multiply(x, a);        *dest = store8888(x);        ++dest;        ++src;    }    _mm_empty();}static FASTCALL voidmmxCombineInReverseU (uint32_t *dest, const uint32_t *src, int width){    const uint32_t *end = dest + width;    while (dest < end) {        __m64 x, a;        x = load8888(*dest);        a = load8888(*src);        a = expand_alpha(a);        x = pix_multiply(x, a);        *dest = store8888(x);        ++dest;        ++src;    }    _mm_empty();}static FASTCALL voidmmxCombineOutU (uint32_t *dest, const uint32_t *src, int width){    const uint32_t *end = dest + width;    while (dest < end) {        __m64 x, a;        x = load8888(*src);        a = load8888(*dest);        a = expand_alpha(a);        a = negate(a);        x = pix_multiply(x, a);        *dest = store8888(x);        ++dest;        ++src;    }    _mm_empty();}static FASTCALL voidmmxCombineOutReverseU (uint32_t *dest, const uint32_t *src, int width){    const uint32_t *end = dest + width;    while (dest < end) {        __m64 x, a;        x = load8888(*dest);        a = load8888(*src);        a = expand_alpha(a);        a = negate(a);        x = pix_multiply(x, a);        *dest = store8888(x);        ++dest;        ++src;    }    _mm_empty();}static FASTCALL voidmmxCombineAtopU (uint32_t *dest, const uint32_t *src, int width){    const uint32_t *end = dest + width;    while (dest < end) {        __m64 s, da, d, sia;        s = load8888(*src);        d = load8888(*dest);        sia = expand_alpha(s);        sia = negate(sia);        da = expand_alpha(d);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -