summaryrefslogtreecommitdiff
path: root/extra/xf86-video-siliconmotion/99_xf86-video-siliconmotion-1.7.3-fix-loongson.diff
diff options
context:
space:
mode:
Diffstat (limited to 'extra/xf86-video-siliconmotion/99_xf86-video-siliconmotion-1.7.3-fix-loongson.diff')
-rw-r--r--extra/xf86-video-siliconmotion/99_xf86-video-siliconmotion-1.7.3-fix-loongson.diff160
1 files changed, 160 insertions, 0 deletions
diff --git a/extra/xf86-video-siliconmotion/99_xf86-video-siliconmotion-1.7.3-fix-loongson.diff b/extra/xf86-video-siliconmotion/99_xf86-video-siliconmotion-1.7.3-fix-loongson.diff
new file mode 100644
index 000000000..4be6d6469
--- /dev/null
+++ b/extra/xf86-video-siliconmotion/99_xf86-video-siliconmotion-1.7.3-fix-loongson.diff
@@ -0,0 +1,160 @@
+rixed@happyleptic.org
+ 发送至 loongson-dev
+
+A patch for siliconmotion 1.7.3 is available in the bug trackers
+at freedesktop.org, but a better one will be available monday.
+Still, it lacks the most interresting part : the MMX pack function,
+which should not be implemented there but probably either directly
+in Xv or in pixman (and make Xv use pixman).
+
+So for now the simpliest is to fix these manually.
+For the impatient I attach a patch against SM 1.7.3
+With it, 16bpp and AccelMethod = "XAA" works quite well.
+
+Just an ugly hack really.
+diff -ur orig/src/smi_video.c mod/src/smi_video.c
+--- orig/src/smi_video.c 2010-02-05 13:11:18.000000000 +0100
++++ mod/src/smi_video.c 2010-02-05 13:20:36.000000000 +0100
+@@ -276,6 +276,7 @@
+ XVIMAGE_YUY2,
+ XVIMAGE_YV12,
+ XVIMAGE_I420,
++ XVIMAGE_UYVY,
+ {
+ FOURCC_RV15, /* id */
+ XvRGB, /* type */
+@@ -1462,6 +1463,117 @@
+ LEAVE();
+ }
+
++static void myXVCopyYUV12ToPacked(const unsigned char *srcy, const unsigned char *srcv, const unsigned char *srcu,
++ unsigned char *dst, int srcPitchy, int srcPitchuv, int dstPitch, int h, int w)
++{
++ int i, j;
++ unsigned char const *y, *u, *v;
++ int dstinc, yinc, uinc, vinc;
++
++ y = srcy;
++ u = srcu;
++ v = srcv;
++
++ dstinc = dstPitch - 2*w;
++ yinc = srcPitchy - w;
++ uinc = srcPitchuv - w/2;
++ vinc = srcPitchuv - w/2;
++
++ for (i = 0; i < h; i++) {
++ asm (
++// ".set arch=loongson2f\n\t"
++ ".set noreorder\n\t"
++ "move $8, %8 \n\t"
++ "1: \n\t"
++ "beqz $8, 2f \n\t"
++ "xor $f0, $f0, $f0 \n\t"
++ "ldc1 $f4, (%0) \n\t"
++ "punpcklbh $f2, $f4, $f0 \n\t"
++ "punpckhbh $f4, $f4, $f0 \n\t"
++ "ldc1 $f16, 8(%0) \n\t"
++ "punpcklbh $f14, $f16, $f0 \n\t"
++ "punpckhbh $f16, $f16, $f0 \n\t"
++
++ "lwc1 $f8, (%1) \n\t"
++ "lwc1 $f12, (%2) \n\t"
++ "punpcklbh $f8, $f8, $f12 \n\t"
++ "punpcklbh $f6, $f0, $f8 \n\t"
++ "punpckhbh $f8, $f0, $f8 \n\t"
++ "lwc1 $f18, 4(%1) \n\t"
++ "lwc1 $f12, 4(%2) \n\t"
++ "punpcklbh $f18, $f18, $f12 \n\t"
++ "punpcklbh $f10, $f0, $f18 \n\t"
++ "punpckhbh $f12, $f0, $f18 \n\t"
++
++ "or $f2, $f2, $f6 \n\t"
++ "or $f4, $f4, $f8 \n\t"
++ "or $f14, $f14, $f10 \n\t"
++ "or $f16, $f16, $f12 \n\t"
++
++ "sdc1 $f2, (%3) \n\t"
++ "sdc1 $f4, 8(%3) \n\t"
++ "add %0, 16 \n\t"
++ "add %1, 8 \n\t"
++ "add %2, 8 \n\t"
++ "sdc1 $f14, 0x10(%3) \n\t"
++ "sdc1 $f16, 0x18(%3) \n\t"
++ "add $8, -1 \n\t"
++ "b 1b \n\t"
++ "add %3, 32 \n\t"
++ "2: \n\t"
++ ".set reorder\n\t"
++ : "=r" (y), "=r" (u), "=r" (v), "=r" (dst)
++ : "0" (y), "1" (u), "2" (v), "3" (dst), "r" (w>>4)
++ : "memory","$8"
++ );
++
++ asm (
++// ".set arch=loongson2f\n\t"
++ ".set noreorder\n\t"
++ "move $8, %8 \n\t"
++ "1: \n\t"
++ "beqz $8, 2f \n\t"
++ "xor $f0, $f0, $f0 \n\t"
++ "ldc1 $f4, (%0) \n\t"
++ "punpcklbh $f2, $f4, $f0 \n\t"
++ "punpckhbh $f4, $f4, $f0 \n\t"
++
++ "lwc1 $f8, (%1) \n\t"
++ "lwc1 $f12, (%2) \n\t"
++ "punpcklbh $f8, $f8, $f12 \n\t"
++ "punpcklbh $f6, $f0, $f8 \n\t"
++ "punpckhbh $f8, $f0, $f8 \n\t"
++
++ "or $f2, $f2, $f6 \n\t"
++ "or $f4, $f4, $f8 \n\t"
++
++ "sdc1 $f2, (%3) \n\t"
++ "sdc1 $f4, 8(%3) \n\t"
++ "add %0, 8 \n\t"
++ "add %1, 4 \n\t"
++ "add %2, 4 \n\t"
++ "add $8, -1 \n\t"
++ "b 1b \n\t"
++ "add %3, 16 \n\t"
++ "2:\n\t"
++ ".set reorder\n\t"
++ : "=r" (y), "=r" (u), "=r" (v), "=r" (dst)
++ : "0" (y), "1" (u), "2" (v), "3" (dst), "r" ((w&0xf)/8)
++ : "memory","$8"
++ );
++
++ for (j = (w&7)/2; j; j--) {
++ *dst++ = *y++;
++ *dst++ = *u++;
++ *dst++ = *y++;
++ *dst++ = *v++;
++ }
++ y += yinc;
++ u = (i%2) ? (u + uinc): (u - w/2);
++ v = (i%2) ? (v + vinc): (v - w/2);
++ dst += dstinc;
++ }
++}
+
+ static int
+ SMI_PutImage(
+@@ -1593,7 +1705,7 @@
+ offset3 = tmp;
+ }
+ nLines = ((((y2 + 0xffff) >> 16) + 1) & ~1) - top;
+- xf86XVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1),
++ myXVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1),
+ buf + offset2, buf + offset3, dstStart,
+ srcPitch, srcPitch2, dstPitch, nLines,
+ nPixels);
+@@ -1793,6 +1905,7 @@
+ WRITE_VPR(pSmi, 0x1C, offset >> 3);
+ WRITE_VPR(pSmi, 0x20, (pitch >> 3) | ((pitch >> 3) << 16));
+ WRITE_VPR(pSmi, 0x24, (hstretch & 0xff00) | ((vstretch & 0xff00) >> 8));
++ WRITE_VPR(pSmi, 0x68, ((hstretch & 0xff) << 8) | (vstretch & 0xff));
+ if (pSmi->Chipset == SMI_LYNXEMplus) { /* This one can store additional precision */
+ WRITE_VPR(pSmi, 0x68, ((hstretch & 0xff) << 8) | (vstretch & 0xff));
+ }