1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
|
diff --git a/src/smi_video.c b/src/smi_video.c
index c2e8868..3e128fa 100644
--- a/src/smi_video.c
+++ b/src/smi_video.c
@@ -276,6 +276,7 @@ static XF86ImageRec SMI_VideoImages[] =
XVIMAGE_YUY2,
XVIMAGE_YV12,
XVIMAGE_I420,
+ XVIMAGE_UYVY,
{
FOURCC_RV15, /* id */
XvRGB, /* type */
@@ -1464,6 +1465,117 @@ SMI_QueryBestSize(
LEAVE();
}
+static void myXVCopyYUV12ToPacked(const unsigned char *srcy, const unsigned char *srcv, const unsigned char *srcu,
+ unsigned char *dst, int srcPitchy, int srcPitchuv, int dstPitch, int h, int w)
+{
+ int i, j;
+ unsigned char const *y, *u, *v;
+ int dstinc, yinc, uinc, vinc;
+
+ y = srcy;
+ u = srcu;
+ v = srcv;
+
+ dstinc = dstPitch - 2*w;
+ yinc = srcPitchy - w;
+ uinc = srcPitchuv - w/2;
+ vinc = srcPitchuv - w/2;
+
+ for (i = 0; i < h; i++) {
+ asm (
+// ".set arch=loongson2f\n\t"
+ ".set noreorder\n\t"
+ "move $8, %8 \n\t"
+ "1: \n\t"
+ "beqz $8, 2f \n\t"
+ "xor $f0, $f0, $f0 \n\t"
+ "ldc1 $f4, (%0) \n\t"
+ "punpcklbh $f2, $f4, $f0 \n\t"
+ "punpckhbh $f4, $f4, $f0 \n\t"
+ "ldc1 $f16, 8(%0) \n\t"
+ "punpcklbh $f14, $f16, $f0 \n\t"
+ "punpckhbh $f16, $f16, $f0 \n\t"
+
+ "lwc1 $f8, (%1) \n\t"
+ "lwc1 $f12, (%2) \n\t"
+ "punpcklbh $f8, $f8, $f12 \n\t"
+ "punpcklbh $f6, $f0, $f8 \n\t"
+ "punpckhbh $f8, $f0, $f8 \n\t"
+ "lwc1 $f18, 4(%1) \n\t"
+ "lwc1 $f12, 4(%2) \n\t"
+ "punpcklbh $f18, $f18, $f12 \n\t"
+ "punpcklbh $f10, $f0, $f18 \n\t"
+ "punpckhbh $f12, $f0, $f18 \n\t"
+
+ "or $f2, $f2, $f6 \n\t"
+ "or $f4, $f4, $f8 \n\t"
+ "or $f14, $f14, $f10 \n\t"
+ "or $f16, $f16, $f12 \n\t"
+
+ "sdc1 $f2, (%3) \n\t"
+ "sdc1 $f4, 8(%3) \n\t"
+ "add %0, 16 \n\t"
+ "add %1, 8 \n\t"
+ "add %2, 8 \n\t"
+ "sdc1 $f14, 0x10(%3) \n\t"
+ "sdc1 $f16, 0x18(%3) \n\t"
+ "add $8, -1 \n\t"
+ "b 1b \n\t"
+ "add %3, 32 \n\t"
+ "2: \n\t"
+ ".set reorder\n\t"
+ : "=r" (y), "=r" (u), "=r" (v), "=r" (dst)
+ : "0" (y), "1" (u), "2" (v), "3" (dst), "r" (w>>4)
+ : "memory","$8"
+ );
+
+ asm (
+// ".set arch=loongson2f\n\t"
+ ".set noreorder\n\t"
+ "move $8, %8 \n\t"
+ "1: \n\t"
+ "beqz $8, 2f \n\t"
+ "xor $f0, $f0, $f0 \n\t"
+ "ldc1 $f4, (%0) \n\t"
+ "punpcklbh $f2, $f4, $f0 \n\t"
+ "punpckhbh $f4, $f4, $f0 \n\t"
+
+ "lwc1 $f8, (%1) \n\t"
+ "lwc1 $f12, (%2) \n\t"
+ "punpcklbh $f8, $f8, $f12 \n\t"
+ "punpcklbh $f6, $f0, $f8 \n\t"
+ "punpckhbh $f8, $f0, $f8 \n\t"
+
+ "or $f2, $f2, $f6 \n\t"
+ "or $f4, $f4, $f8 \n\t"
+
+ "sdc1 $f2, (%3) \n\t"
+ "sdc1 $f4, 8(%3) \n\t"
+ "add %0, 8 \n\t"
+ "add %1, 4 \n\t"
+ "add %2, 4 \n\t"
+ "add $8, -1 \n\t"
+ "b 1b \n\t"
+ "add %3, 16 \n\t"
+ "2:\n\t"
+ ".set reorder\n\t"
+ : "=r" (y), "=r" (u), "=r" (v), "=r" (dst)
+ : "0" (y), "1" (u), "2" (v), "3" (dst), "r" ((w&0xf)/8)
+ : "memory","$8"
+ );
+
+ for (j = (w&7)/2; j; j--) {
+ *dst++ = *y++;
+ *dst++ = *u++;
+ *dst++ = *y++;
+ *dst++ = *v++;
+ }
+ y += yinc;
+ u = (i%2) ? (u + uinc): (u - w/2);
+ v = (i%2) ? (v + vinc): (v - w/2);
+ dst += dstinc;
+ }
+}
static int
SMI_PutImage(
@@ -1592,7 +1704,7 @@ SMI_PutImage(
offset3 = tmp;
}
nLines = ((((y2 + 0xffff) >> 16) + 1) & ~1) - top;
- xf86XVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1),
+ myXVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1),
buf + offset2, buf + offset3, dstStart,
srcPitch, srcPitch2, dstPitch, nLines,
nPixels);
|