diff options
Diffstat (limited to 'extra/xf86-video-siliconmotion/xf86-video-siliconmotion-1.7.5-loongson-video-accl.patch')
-rw-r--r-- | extra/xf86-video-siliconmotion/xf86-video-siliconmotion-1.7.5-loongson-video-accl.patch | 139 |
1 files changed, 139 insertions, 0 deletions
diff --git a/extra/xf86-video-siliconmotion/xf86-video-siliconmotion-1.7.5-loongson-video-accl.patch b/extra/xf86-video-siliconmotion/xf86-video-siliconmotion-1.7.5-loongson-video-accl.patch new file mode 100644 index 000000000..e31044473 --- /dev/null +++ b/extra/xf86-video-siliconmotion/xf86-video-siliconmotion-1.7.5-loongson-video-accl.patch @@ -0,0 +1,139 @@ +diff --git a/src/smi_video.c b/src/smi_video.c +index c2e8868..3e128fa 100644 +--- a/src/smi_video.c ++++ b/src/smi_video.c +@@ -276,6 +276,7 @@ static XF86ImageRec SMI_VideoImages[] = + XVIMAGE_YUY2, + XVIMAGE_YV12, + XVIMAGE_I420, ++ XVIMAGE_UYVY, + { + FOURCC_RV15, /* id */ + XvRGB, /* type */ +@@ -1464,6 +1465,117 @@ SMI_QueryBestSize( + LEAVE(); + } + ++static void myXVCopyYUV12ToPacked(const unsigned char *srcy, const unsigned char *srcv, const unsigned char *srcu, ++ unsigned char *dst, int srcPitchy, int srcPitchuv, int dstPitch, int h, int w) ++{ ++ int i, j; ++ unsigned char const *y, *u, *v; ++ int dstinc, yinc, uinc, vinc; ++ ++ y = srcy; ++ u = srcu; ++ v = srcv; ++ ++ dstinc = dstPitch - 2*w; ++ yinc = srcPitchy - w; ++ uinc = srcPitchuv - w/2; ++ vinc = srcPitchuv - w/2; ++ ++ for (i = 0; i < h; i++) { ++ asm ( ++// ".set arch=loongson2f\n\t" ++ ".set noreorder\n\t" ++ "move $8, %8 \n\t" ++ "1: \n\t" ++ "beqz $8, 2f \n\t" ++ "xor $f0, $f0, $f0 \n\t" ++ "ldc1 $f4, (%0) \n\t" ++ "punpcklbh $f2, $f4, $f0 \n\t" ++ "punpckhbh $f4, $f4, $f0 \n\t" ++ "ldc1 $f16, 8(%0) \n\t" ++ "punpcklbh $f14, $f16, $f0 \n\t" ++ "punpckhbh $f16, $f16, $f0 \n\t" ++ ++ "lwc1 $f8, (%1) \n\t" ++ "lwc1 $f12, (%2) \n\t" ++ "punpcklbh $f8, $f8, $f12 \n\t" ++ "punpcklbh $f6, $f0, $f8 \n\t" ++ "punpckhbh $f8, $f0, $f8 \n\t" ++ "lwc1 $f18, 4(%1) \n\t" ++ "lwc1 $f12, 4(%2) \n\t" ++ "punpcklbh $f18, $f18, $f12 \n\t" ++ "punpcklbh $f10, $f0, $f18 \n\t" ++ "punpckhbh $f12, $f0, $f18 \n\t" ++ ++ "or $f2, $f2, $f6 \n\t" ++ "or $f4, $f4, $f8 \n\t" ++ "or $f14, $f14, $f10 \n\t" ++ "or $f16, $f16, $f12 \n\t" ++ ++ "sdc1 $f2, (%3) \n\t" ++ "sdc1 $f4, 8(%3) \n\t" ++ "add %0, 16 \n\t" ++ "add %1, 8 \n\t" ++ "add %2, 8 \n\t" ++ "sdc1 $f14, 0x10(%3) \n\t" ++ "sdc1 $f16, 0x18(%3) \n\t" ++ "add $8, -1 \n\t" ++ "b 1b \n\t" ++ "add %3, 32 \n\t" ++ "2: \n\t" ++ ".set reorder\n\t" ++ : "=r" (y), "=r" (u), "=r" (v), "=r" (dst) ++ : "0" (y), "1" (u), "2" (v), "3" (dst), "r" (w>>4) ++ : "memory","$8" ++ ); ++ ++ asm ( ++// ".set arch=loongson2f\n\t" ++ ".set noreorder\n\t" ++ "move $8, %8 \n\t" ++ "1: \n\t" ++ "beqz $8, 2f \n\t" ++ "xor $f0, $f0, $f0 \n\t" ++ "ldc1 $f4, (%0) \n\t" ++ "punpcklbh $f2, $f4, $f0 \n\t" ++ "punpckhbh $f4, $f4, $f0 \n\t" ++ ++ "lwc1 $f8, (%1) \n\t" ++ "lwc1 $f12, (%2) \n\t" ++ "punpcklbh $f8, $f8, $f12 \n\t" ++ "punpcklbh $f6, $f0, $f8 \n\t" ++ "punpckhbh $f8, $f0, $f8 \n\t" ++ ++ "or $f2, $f2, $f6 \n\t" ++ "or $f4, $f4, $f8 \n\t" ++ ++ "sdc1 $f2, (%3) \n\t" ++ "sdc1 $f4, 8(%3) \n\t" ++ "add %0, 8 \n\t" ++ "add %1, 4 \n\t" ++ "add %2, 4 \n\t" ++ "add $8, -1 \n\t" ++ "b 1b \n\t" ++ "add %3, 16 \n\t" ++ "2:\n\t" ++ ".set reorder\n\t" ++ : "=r" (y), "=r" (u), "=r" (v), "=r" (dst) ++ : "0" (y), "1" (u), "2" (v), "3" (dst), "r" ((w&0xf)/8) ++ : "memory","$8" ++ ); ++ ++ for (j = (w&7)/2; j; j--) { ++ *dst++ = *y++; ++ *dst++ = *u++; ++ *dst++ = *y++; ++ *dst++ = *v++; ++ } ++ y += yinc; ++ u = (i%2) ? (u + uinc): (u - w/2); ++ v = (i%2) ? (v + vinc): (v - w/2); ++ dst += dstinc; ++ } ++} + + static int + SMI_PutImage( +@@ -1592,7 +1704,7 @@ SMI_PutImage( + offset3 = tmp; + } + nLines = ((((y2 + 0xffff) >> 16) + 1) & ~1) - top; +- xf86XVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1), ++ myXVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1), + buf + offset2, buf + offset3, dstStart, + srcPitch, srcPitch2, dstPitch, nLines, + nPixels); |