2 * Note: For libavcodec, this code can also be used under the LGPL license
6 * Copyright (C) 1999-2001 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
8 * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
10 * mpeg2dec is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * mpeg2dec is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include "../common.h"
29 #define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align)))
34 #define round(bias) ((int)(((bias)+0.5) * (1<<ROW_SHIFT)))
35 #define rounder(bias) {round (bias), round (bias)}
38 /* C row IDCT - its just here to document the MMXEXT and MMX versions */
39 static inline void idct_row (int16_t * row, int offset,
40 int16_t * table, int32_t * rounder)
42 int C1, C2, C3, C4, C5, C6, C7;
43 int a0, a1, a2, a3, b0, b1, b2, b3;
55 a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + *rounder;
56 a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + *rounder;
57 a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + *rounder;
58 a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + *rounder;
60 b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7];
61 b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7];
62 b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7];
63 b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7];
65 row[0] = (a0 + b0) >> ROW_SHIFT;
66 row[1] = (a1 + b1) >> ROW_SHIFT;
67 row[2] = (a2 + b2) >> ROW_SHIFT;
68 row[3] = (a3 + b3) >> ROW_SHIFT;
69 row[4] = (a3 - b3) >> ROW_SHIFT;
70 row[5] = (a2 - b2) >> ROW_SHIFT;
71 row[6] = (a1 - b1) >> ROW_SHIFT;
72 row[7] = (a0 - b0) >> ROW_SHIFT;
79 #define mmxext_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, -c4, -c2, \
88 static inline void mmxext_row_head (int16_t * row, int offset, int16_t * table)
90 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
92 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1
93 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0
95 movq_m2r (*table, mm3); // mm3 = -C2 -C4 C2 C4
96 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1
98 movq_m2r (*(table+4), mm4); // mm4 = C6 C4 C6 C4
99 pmaddwd_r2r (mm0, mm3); // mm3 = -C4*x4-C2*x6 C4*x0+C2*x2
101 pshufw_r2r (mm2, mm2, 0x4e); // mm2 = x2 x0 x6 x4
104 static inline void mmxext_row (int16_t * table, int32_t * rounder)
106 movq_m2r (*(table+8), mm1); // mm1 = -C5 -C1 C3 C1
107 pmaddwd_r2r (mm2, mm4); // mm4 = C4*x0+C6*x2 C4*x4+C6*x6
109 pmaddwd_m2r (*(table+16), mm0); // mm0 = C4*x4-C6*x6 C4*x0-C6*x2
110 pshufw_r2r (mm6, mm6, 0x4e); // mm6 = x3 x1 x7 x5
112 movq_m2r (*(table+12), mm7); // mm7 = -C7 C3 C7 C5
113 pmaddwd_r2r (mm5, mm1); // mm1 = -C1*x5-C5*x7 C1*x1+C3*x3
115 paddd_m2r (*rounder, mm3); // mm3 += rounder
116 pmaddwd_r2r (mm6, mm7); // mm7 = C3*x1-C7*x3 C5*x5+C7*x7
118 pmaddwd_m2r (*(table+20), mm2); // mm2 = C4*x0-C2*x2 -C4*x4+C2*x6
119 paddd_r2r (mm4, mm3); // mm3 = a1 a0 + rounder
121 pmaddwd_m2r (*(table+24), mm5); // mm5 = C3*x5-C1*x7 C5*x1-C1*x3
122 movq_r2r (mm3, mm4); // mm4 = a1 a0 + rounder
124 pmaddwd_m2r (*(table+28), mm6); // mm6 = C7*x1-C5*x3 C7*x5+C3*x7
125 paddd_r2r (mm7, mm1); // mm1 = b1 b0
127 paddd_m2r (*rounder, mm0); // mm0 += rounder
128 psubd_r2r (mm1, mm3); // mm3 = a1-b1 a0-b0 + rounder
130 psrad_i2r (ROW_SHIFT, mm3); // mm3 = y6 y7
131 paddd_r2r (mm4, mm1); // mm1 = a1+b1 a0+b0 + rounder
133 paddd_r2r (mm2, mm0); // mm0 = a3 a2 + rounder
134 psrad_i2r (ROW_SHIFT, mm1); // mm1 = y1 y0
136 paddd_r2r (mm6, mm5); // mm5 = b3 b2
137 movq_r2r (mm0, mm4); // mm4 = a3 a2 + rounder
139 paddd_r2r (mm5, mm0); // mm0 = a3+b3 a2+b2 + rounder
140 psubd_r2r (mm5, mm4); // mm4 = a3-b3 a2-b2 + rounder
143 static inline void mmxext_row_tail (int16_t * row, int store)
145 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2
147 psrad_i2r (ROW_SHIFT, mm4); // mm4 = y4 y5
149 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0
151 packssdw_r2r (mm3, mm4); // mm4 = y6 y7 y4 y5
153 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0
154 pshufw_r2r (mm4, mm4, 0xb1); // mm4 = y7 y6 y5 y4
158 movq_r2m (mm4, *(row+store+4)); // save y7 y6 y5 y4
161 static inline void mmxext_row_mid (int16_t * row, int store,
162 int offset, int16_t * table)
164 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
165 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2
167 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1
168 psrad_i2r (ROW_SHIFT, mm4); // mm4 = y4 y5
170 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0
171 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1
173 packssdw_r2r (mm3, mm4); // mm4 = y6 y7 y4 y5
174 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0
176 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0
177 pshufw_r2r (mm4, mm4, 0xb1); // mm4 = y7 y6 y5 y4
179 movq_m2r (*table, mm3); // mm3 = -C2 -C4 C2 C4
180 movq_r2m (mm4, *(row+store+4)); // save y7 y6 y5 y4
182 pmaddwd_r2r (mm0, mm3); // mm3 = -C4*x4-C2*x6 C4*x0+C2*x2
184 movq_m2r (*(table+4), mm4); // mm4 = C6 C4 C6 C4
185 pshufw_r2r (mm2, mm2, 0x4e); // mm2 = x2 x0 x6 x4
191 #define mmx_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, c4, c6, \
200 static inline void mmx_row_head (int16_t * row, int offset, int16_t * table)
202 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
204 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1
205 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0
207 movq_m2r (*table, mm3); // mm3 = C6 C4 C2 C4
208 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1
210 punpckldq_r2r (mm0, mm0); // mm0 = x2 x0 x2 x0
212 movq_m2r (*(table+4), mm4); // mm4 = -C2 -C4 C6 C4
213 pmaddwd_r2r (mm0, mm3); // mm3 = C4*x0+C6*x2 C4*x0+C2*x2
215 movq_m2r (*(table+8), mm1); // mm1 = -C7 C3 C3 C1
216 punpckhdq_r2r (mm2, mm2); // mm2 = x6 x4 x6 x4
219 static inline void mmx_row (int16_t * table, int32_t * rounder)
221 pmaddwd_r2r (mm2, mm4); // mm4 = -C4*x4-C2*x6 C4*x4+C6*x6
222 punpckldq_r2r (mm5, mm5); // mm5 = x3 x1 x3 x1
224 pmaddwd_m2r (*(table+16), mm0); // mm0 = C4*x0-C2*x2 C4*x0-C6*x2
225 punpckhdq_r2r (mm6, mm6); // mm6 = x7 x5 x7 x5
227 movq_m2r (*(table+12), mm7); // mm7 = -C5 -C1 C7 C5
228 pmaddwd_r2r (mm5, mm1); // mm1 = C3*x1-C7*x3 C1*x1+C3*x3
230 paddd_m2r (*rounder, mm3); // mm3 += rounder
231 pmaddwd_r2r (mm6, mm7); // mm7 = -C1*x5-C5*x7 C5*x5+C7*x7
233 pmaddwd_m2r (*(table+20), mm2); // mm2 = C4*x4-C6*x6 -C4*x4+C2*x6
234 paddd_r2r (mm4, mm3); // mm3 = a1 a0 + rounder
236 pmaddwd_m2r (*(table+24), mm5); // mm5 = C7*x1-C5*x3 C5*x1-C1*x3
237 movq_r2r (mm3, mm4); // mm4 = a1 a0 + rounder
239 pmaddwd_m2r (*(table+28), mm6); // mm6 = C3*x5-C1*x7 C7*x5+C3*x7
240 paddd_r2r (mm7, mm1); // mm1 = b1 b0
242 paddd_m2r (*rounder, mm0); // mm0 += rounder
243 psubd_r2r (mm1, mm3); // mm3 = a1-b1 a0-b0 + rounder
245 psrad_i2r (ROW_SHIFT, mm3); // mm3 = y6 y7
246 paddd_r2r (mm4, mm1); // mm1 = a1+b1 a0+b0 + rounder
248 paddd_r2r (mm2, mm0); // mm0 = a3 a2 + rounder
249 psrad_i2r (ROW_SHIFT, mm1); // mm1 = y1 y0
251 paddd_r2r (mm6, mm5); // mm5 = b3 b2
252 movq_r2r (mm0, mm7); // mm7 = a3 a2 + rounder
254 paddd_r2r (mm5, mm0); // mm0 = a3+b3 a2+b2 + rounder
255 psubd_r2r (mm5, mm7); // mm7 = a3-b3 a2-b2 + rounder
258 static inline void mmx_row_tail (int16_t * row, int store)
260 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2
262 psrad_i2r (ROW_SHIFT, mm7); // mm7 = y4 y5
264 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0
266 packssdw_r2r (mm3, mm7); // mm7 = y6 y7 y4 y5
268 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0
269 movq_r2r (mm7, mm4); // mm4 = y6 y7 y4 y5
271 pslld_i2r (16, mm7); // mm7 = y7 0 y5 0
273 psrld_i2r (16, mm4); // mm4 = 0 y6 0 y4
275 por_r2r (mm4, mm7); // mm7 = y7 y6 y5 y4
279 movq_r2m (mm7, *(row+store+4)); // save y7 y6 y5 y4
282 static inline void mmx_row_mid (int16_t * row, int store,
283 int offset, int16_t * table)
285 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
286 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2
288 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1
289 psrad_i2r (ROW_SHIFT, mm7); // mm7 = y4 y5
291 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0
292 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1
294 packssdw_r2r (mm3, mm7); // mm7 = y6 y7 y4 y5
295 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0
297 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0
298 movq_r2r (mm7, mm1); // mm1 = y6 y7 y4 y5
300 punpckldq_r2r (mm0, mm0); // mm0 = x2 x0 x2 x0
301 psrld_i2r (16, mm7); // mm7 = 0 y6 0 y4
303 movq_m2r (*table, mm3); // mm3 = C6 C4 C2 C4
304 pslld_i2r (16, mm1); // mm1 = y7 0 y5 0
306 movq_m2r (*(table+4), mm4); // mm4 = -C2 -C4 C6 C4
307 por_r2r (mm1, mm7); // mm7 = y7 y6 y5 y4
309 movq_m2r (*(table+8), mm1); // mm1 = -C7 C3 C3 C1
310 punpckhdq_r2r (mm2, mm2); // mm2 = x6 x4 x6 x4
312 movq_r2m (mm7, *(row+store+4)); // save y7 y6 y5 y4
313 pmaddwd_r2r (mm0, mm3); // mm3 = C4*x0+C6*x2 C4*x0+C2*x2
318 // C column IDCT - its just here to document the MMXEXT and MMX versions
319 static inline void idct_col (int16_t * col, int offset)
321 /* multiplication - as implemented on mmx */
322 #define F(c,x) (((c) * (x)) >> 16)
324 /* saturation - it helps us handle torture test cases */
325 #define S(x) (((x)>32767) ? 32767 : ((x)<-32768) ? -32768 : (x))
327 int16_t x0, x1, x2, x3, x4, x5, x6, x7;
328 int16_t y0, y1, y2, y3, y4, y5, y6, y7;
329 int16_t a0, a1, a2, a3, b0, b1, b2, b3;
330 int16_t u04, v04, u26, v26, u17, v17, u35, v35, u12, v12;
345 u26 = S (F (T2, x6) + x2);
346 v26 = S (F (T2, x2) - x6);
353 u17 = S (F (T1, x7) + x1);
354 v17 = S (F (T1, x1) - x7);
355 u35 = S (F (T3, x5) + x3);
356 v35 = S (F (T3, x3) - x5);
362 u12 = S (2 * F (C4, u12));
363 v12 = S (2 * F (C4, v12));
367 y0 = S (a0 + b0) >> COL_SHIFT;
368 y1 = S (a1 + b1) >> COL_SHIFT;
369 y2 = S (a2 + b2) >> COL_SHIFT;
370 y3 = S (a3 + b3) >> COL_SHIFT;
372 y4 = S (a3 - b3) >> COL_SHIFT;
373 y5 = S (a2 - b2) >> COL_SHIFT;
374 y6 = S (a1 - b1) >> COL_SHIFT;
375 y7 = S (a0 - b0) >> COL_SHIFT;
390 static inline void idct_col (int16_t * col, int offset)
397 static short _T1[] ATTR_ALIGN(8) = {T1,T1,T1,T1};
398 static short _T2[] ATTR_ALIGN(8) = {T2,T2,T2,T2};
399 static short _T3[] ATTR_ALIGN(8) = {T3,T3,T3,T3};
400 static short _C4[] ATTR_ALIGN(8) = {C4,C4,C4,C4};
402 /* column code adapted from peter gubanov */
403 /* http://www.elecard.com/peter/idct.shtml */
405 movq_m2r (*_T1, mm0); // mm0 = T1
407 movq_m2r (*(col+offset+1*8), mm1); // mm1 = x1
408 movq_r2r (mm0, mm2); // mm2 = T1
410 movq_m2r (*(col+offset+7*8), mm4); // mm4 = x7
411 pmulhw_r2r (mm1, mm0); // mm0 = T1*x1
413 movq_m2r (*_T3, mm5); // mm5 = T3
414 pmulhw_r2r (mm4, mm2); // mm2 = T1*x7
416 movq_m2r (*(col+offset+5*8), mm6); // mm6 = x5
417 movq_r2r (mm5, mm7); // mm7 = T3-1
419 movq_m2r (*(col+offset+3*8), mm3); // mm3 = x3
420 psubsw_r2r (mm4, mm0); // mm0 = v17
422 movq_m2r (*_T2, mm4); // mm4 = T2
423 pmulhw_r2r (mm3, mm5); // mm5 = (T3-1)*x3
425 paddsw_r2r (mm2, mm1); // mm1 = u17
426 pmulhw_r2r (mm6, mm7); // mm7 = (T3-1)*x5
430 movq_r2r (mm4, mm2); // mm2 = T2
431 paddsw_r2r (mm3, mm5); // mm5 = T3*x3
433 pmulhw_m2r (*(col+offset+2*8), mm4);// mm4 = T2*x2
434 paddsw_r2r (mm6, mm7); // mm7 = T3*x5
436 psubsw_r2r (mm6, mm5); // mm5 = v35
437 paddsw_r2r (mm3, mm7); // mm7 = u35
439 movq_m2r (*(col+offset+6*8), mm3); // mm3 = x6
440 movq_r2r (mm0, mm6); // mm6 = v17
442 pmulhw_r2r (mm3, mm2); // mm2 = T2*x6
443 psubsw_r2r (mm5, mm0); // mm0 = b3
445 psubsw_r2r (mm3, mm4); // mm4 = v26
446 paddsw_r2r (mm6, mm5); // mm5 = v12
448 movq_r2m (mm0, *(col+offset+3*8)); // save b3 in scratch0
449 movq_r2r (mm1, mm6); // mm6 = u17
451 paddsw_m2r (*(col+offset+2*8), mm2);// mm2 = u26
452 paddsw_r2r (mm7, mm6); // mm6 = b0
454 psubsw_r2r (mm7, mm1); // mm1 = u12
455 movq_r2r (mm1, mm7); // mm7 = u12
457 movq_m2r (*(col+offset+0*8), mm3); // mm3 = x0
458 paddsw_r2r (mm5, mm1); // mm1 = u12+v12
460 movq_m2r (*_C4, mm0); // mm0 = C4/2
461 psubsw_r2r (mm5, mm7); // mm7 = u12-v12
463 movq_r2m (mm6, *(col+offset+5*8)); // save b0 in scratch1
464 pmulhw_r2r (mm0, mm1); // mm1 = b1/2
466 movq_r2r (mm4, mm6); // mm6 = v26
467 pmulhw_r2r (mm0, mm7); // mm7 = b2/2
469 movq_m2r (*(col+offset+4*8), mm5); // mm5 = x4
470 movq_r2r (mm3, mm0); // mm0 = x0
472 psubsw_r2r (mm5, mm3); // mm3 = v04
473 paddsw_r2r (mm5, mm0); // mm0 = u04
475 paddsw_r2r (mm3, mm4); // mm4 = a1
476 movq_r2r (mm0, mm5); // mm5 = u04
478 psubsw_r2r (mm6, mm3); // mm3 = a2
479 paddsw_r2r (mm2, mm5); // mm5 = a0
481 paddsw_r2r (mm1, mm1); // mm1 = b1
482 psubsw_r2r (mm2, mm0); // mm0 = a3
484 paddsw_r2r (mm7, mm7); // mm7 = b2
485 movq_r2r (mm3, mm2); // mm2 = a2
487 movq_r2r (mm4, mm6); // mm6 = a1
488 paddsw_r2r (mm7, mm3); // mm3 = a2+b2
490 psraw_i2r (COL_SHIFT, mm3); // mm3 = y2
491 paddsw_r2r (mm1, mm4); // mm4 = a1+b1
493 psraw_i2r (COL_SHIFT, mm4); // mm4 = y1
494 psubsw_r2r (mm1, mm6); // mm6 = a1-b1
496 movq_m2r (*(col+offset+5*8), mm1); // mm1 = b0
497 psubsw_r2r (mm7, mm2); // mm2 = a2-b2
499 psraw_i2r (COL_SHIFT, mm6); // mm6 = y6
500 movq_r2r (mm5, mm7); // mm7 = a0
502 movq_r2m (mm4, *(col+offset+1*8)); // save y1
503 psraw_i2r (COL_SHIFT, mm2); // mm2 = y5
505 movq_r2m (mm3, *(col+offset+2*8)); // save y2
506 paddsw_r2r (mm1, mm5); // mm5 = a0+b0
508 movq_m2r (*(col+offset+3*8), mm4); // mm4 = b3
509 psubsw_r2r (mm1, mm7); // mm7 = a0-b0
511 psraw_i2r (COL_SHIFT, mm5); // mm5 = y0
512 movq_r2r (mm0, mm3); // mm3 = a3
514 movq_r2m (mm2, *(col+offset+5*8)); // save y5
515 psubsw_r2r (mm4, mm3); // mm3 = a3-b3
517 psraw_i2r (COL_SHIFT, mm7); // mm7 = y7
518 paddsw_r2r (mm0, mm4); // mm4 = a3+b3
520 movq_r2m (mm5, *(col+offset+0*8)); // save y0
521 psraw_i2r (COL_SHIFT, mm3); // mm3 = y4
523 movq_r2m (mm6, *(col+offset+6*8)); // save y6
524 psraw_i2r (COL_SHIFT, mm4); // mm4 = y3
526 movq_r2m (mm7, *(col+offset+7*8)); // save y7
528 movq_r2m (mm3, *(col+offset+4*8)); // save y4
530 movq_r2m (mm4, *(col+offset+3*8)); // save y3
538 static int32_t rounder0[] ATTR_ALIGN(8) =
539 rounder ((1 << (COL_SHIFT - 1)) - 0.5);
540 static int32_t rounder4[] ATTR_ALIGN(8) = rounder (0);
541 static int32_t rounder1[] ATTR_ALIGN(8) =
542 rounder (1.25683487303); /* C1*(C1/C4+C1+C7)/2 */
543 static int32_t rounder7[] ATTR_ALIGN(8) =
544 rounder (-0.25); /* C1*(C7/C4+C7-C1)/2 */
545 static int32_t rounder2[] ATTR_ALIGN(8) =
546 rounder (0.60355339059); /* C2 * (C6+C2)/2 */
547 static int32_t rounder6[] ATTR_ALIGN(8) =
548 rounder (-0.25); /* C2 * (C6-C2)/2 */
549 static int32_t rounder3[] ATTR_ALIGN(8) =
550 rounder (0.087788325588); /* C3*(-C3/C4+C3+C5)/2 */
551 static int32_t rounder5[] ATTR_ALIGN(8) =
552 rounder (-0.441341716183); /* C3*(-C5/C4+C5-C3)/2 */
557 #define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \
558 void idct (int16_t * block) \
560 static int16_t table04[] ATTR_ALIGN(16) = \
561 table (22725, 21407, 19266, 16384, 12873, 8867, 4520); \
562 static int16_t table17[] ATTR_ALIGN(16) = \
563 table (31521, 29692, 26722, 22725, 17855, 12299, 6270); \
564 static int16_t table26[] ATTR_ALIGN(16) = \
565 table (29692, 27969, 25172, 21407, 16819, 11585, 5906); \
566 static int16_t table35[] ATTR_ALIGN(16) = \
567 table (26722, 25172, 22654, 19266, 15137, 10426, 5315); \
569 idct_row_head (block, 0*8, table04); \
570 idct_row (table04, rounder0); \
571 idct_row_mid (block, 0*8, 4*8, table04); \
572 idct_row (table04, rounder4); \
573 idct_row_mid (block, 4*8, 1*8, table17); \
574 idct_row (table17, rounder1); \
575 idct_row_mid (block, 1*8, 7*8, table17); \
576 idct_row (table17, rounder7); \
577 idct_row_mid (block, 7*8, 2*8, table26); \
578 idct_row (table26, rounder2); \
579 idct_row_mid (block, 2*8, 6*8, table26); \
580 idct_row (table26, rounder6); \
581 idct_row_mid (block, 6*8, 3*8, table35); \
582 idct_row (table35, rounder3); \
583 idct_row_mid (block, 3*8, 5*8, table35); \
584 idct_row (table35, rounder5); \
585 idct_row_tail (block, 5*8); \
587 idct_col (block, 0); \
588 idct_col (block, 4); \
592 declare_idct (ff_mmxext_idct, mmxext_table,
593 mmxext_row_head, mmxext_row, mmxext_row_tail, mmxext_row_mid)
595 declare_idct (ff_mmx_idct, mmx_table,
596 mmx_row_head, mmx_row, mmx_row_tail, mmx_row_mid)