Libav
h264pred.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #include "libavutil/attributes.h"
29 #include "libavutil/intreadwrite.h"
30 #include "avcodec.h"
31 #include "h264pred.h"
32 
33 #define BIT_DEPTH 8
34 #include "h264pred_template.c"
35 #undef BIT_DEPTH
36 
37 #define BIT_DEPTH 9
38 #include "h264pred_template.c"
39 #undef BIT_DEPTH
40 
41 #define BIT_DEPTH 10
42 #include "h264pred_template.c"
43 #undef BIT_DEPTH
44 
45 static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright,
46  ptrdiff_t stride)
47 {
48  const unsigned lt = src[-1-1*stride];
51  uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
52  (t0 + 2*t1 + t2 + 2) >> 2,
53  (t1 + 2*t2 + t3 + 2) >> 2,
54  (t2 + 2*t3 + t4 + 2) >> 2);
55 
56  AV_WN32A(src+0*stride, v);
57  AV_WN32A(src+1*stride, v);
58  AV_WN32A(src+2*stride, v);
59  AV_WN32A(src+3*stride, v);
60 }
61 
62 static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright,
63  ptrdiff_t stride)
64 {
65  const unsigned lt = src[-1-1*stride];
67 
68  AV_WN32A(src+0*stride, ((lt + 2*l0 + l1 + 2) >> 2)*0x01010101);
69  AV_WN32A(src+1*stride, ((l0 + 2*l1 + l2 + 2) >> 2)*0x01010101);
70  AV_WN32A(src+2*stride, ((l1 + 2*l2 + l3 + 2) >> 2)*0x01010101);
71  AV_WN32A(src+3*stride, ((l2 + 2*l3 + l3 + 2) >> 2)*0x01010101);
72 }
73 
74 static void pred4x4_down_left_svq3_c(uint8_t *src, const uint8_t *topright,
75  ptrdiff_t stride)
76 {
79 
80  src[0+0*stride]=(l1 + t1)>>1;
81  src[1+0*stride]=
82  src[0+1*stride]=(l2 + t2)>>1;
83  src[2+0*stride]=
84  src[1+1*stride]=
85  src[0+2*stride]=
86  src[3+0*stride]=
87  src[2+1*stride]=
88  src[1+2*stride]=
89  src[0+3*stride]=
90  src[3+1*stride]=
91  src[2+2*stride]=
92  src[1+3*stride]=
93  src[3+2*stride]=
94  src[2+3*stride]=
95  src[3+3*stride]=(l3 + t3)>>1;
96 }
97 
98 static void pred4x4_down_left_rv40_c(uint8_t *src, const uint8_t *topright,
99  ptrdiff_t stride)
100 {
105 
106  src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
107  src[1+0*stride]=
108  src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
109  src[2+0*stride]=
110  src[1+1*stride]=
111  src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3;
112  src[3+0*stride]=
113  src[2+1*stride]=
114  src[1+2*stride]=
115  src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3;
116  src[3+1*stride]=
117  src[2+2*stride]=
118  src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3;
119  src[3+2*stride]=
120  src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3;
121  src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2;
122 }
123 
125  const uint8_t *topright,
126  ptrdiff_t stride)
127 {
131 
132  src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
133  src[1+0*stride]=
134  src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
135  src[2+0*stride]=
136  src[1+1*stride]=
137  src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3;
138  src[3+0*stride]=
139  src[2+1*stride]=
140  src[1+2*stride]=
141  src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3;
142  src[3+1*stride]=
143  src[2+2*stride]=
144  src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3;
145  src[3+2*stride]=
146  src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3;
147  src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
148 }
149 
150 static void pred4x4_vertical_left_rv40(uint8_t *src, const uint8_t *topright,
151  ptrdiff_t stride,
152  const int l0, const int l1, const int l2,
153  const int l3, const int l4)
154 {
157 
158  src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3;
159  src[1+0*stride]=
160  src[0+2*stride]=(t1 + t2 + 1)>>1;
161  src[2+0*stride]=
162  src[1+2*stride]=(t2 + t3 + 1)>>1;
163  src[3+0*stride]=
164  src[2+2*stride]=(t3 + t4+ 1)>>1;
165  src[3+2*stride]=(t4 + t5+ 1)>>1;
166  src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3;
167  src[1+1*stride]=
168  src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
169  src[2+1*stride]=
170  src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
171  src[3+1*stride]=
172  src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
173  src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
174 }
175 
176 static void pred4x4_vertical_left_rv40_c(uint8_t *src, const uint8_t *topright,
177  ptrdiff_t stride)
178 {
181 
182  pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l4);
183 }
184 
186  const uint8_t *topright,
187  ptrdiff_t stride)
188 {
190 
191  pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l3);
192 }
193 
194 static void pred4x4_vertical_left_vp8_c(uint8_t *src, const uint8_t *topright,
195  ptrdiff_t stride)
196 {
199 
200  src[0+0*stride]=(t0 + t1 + 1)>>1;
201  src[1+0*stride]=
202  src[0+2*stride]=(t1 + t2 + 1)>>1;
203  src[2+0*stride]=
204  src[1+2*stride]=(t2 + t3 + 1)>>1;
205  src[3+0*stride]=
206  src[2+2*stride]=(t3 + t4 + 1)>>1;
207  src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
208  src[1+1*stride]=
209  src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
210  src[2+1*stride]=
211  src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
212  src[3+1*stride]=
213  src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
214  src[3+2*stride]=(t4 + 2*t5 + t6 + 2)>>2;
215  src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2;
216 }
217 
218 static void pred4x4_horizontal_up_rv40_c(uint8_t *src, const uint8_t *topright,
219  ptrdiff_t stride)
220 {
225 
226  src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
227  src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
228  src[2+0*stride]=
229  src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
230  src[3+0*stride]=
231  src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
232  src[2+1*stride]=
233  src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
234  src[3+1*stride]=
235  src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
236  src[3+2*stride]=
237  src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2;
238  src[0+3*stride]=
239  src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2;
240  src[2+3*stride]=(l4 + l5 + 1)>>1;
241  src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2;
242 }
243 
245  const uint8_t *topright,
246  ptrdiff_t stride)
247 {
251 
252  src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
253  src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
254  src[2+0*stride]=
255  src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
256  src[3+0*stride]=
257  src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
258  src[2+1*stride]=
259  src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
260  src[3+1*stride]=
261  src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
262  src[3+2*stride]=
263  src[1+3*stride]=l3;
264  src[0+3*stride]=
265  src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2;
266  src[2+3*stride]=
267  src[3+3*stride]=l3;
268 }
269 
270 static void pred4x4_tm_vp8_c(uint8_t *src, const uint8_t *topright,
271  ptrdiff_t stride)
272 {
273  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP - src[-1-stride];
274  uint8_t *top = src-stride;
275  int y;
276 
277  for (y = 0; y < 4; y++) {
278  const uint8_t *cm_in = cm + src[-1];
279  src[0] = cm_in[top[0]];
280  src[1] = cm_in[top[1]];
281  src[2] = cm_in[top[2]];
282  src[3] = cm_in[top[3]];
283  src += stride;
284  }
285 }
286 
287 static void pred16x16_plane_svq3_c(uint8_t *src, ptrdiff_t stride)
288 {
289  pred16x16_plane_compat_8_c(src, stride, 1, 0);
290 }
291 
292 static void pred16x16_plane_rv40_c(uint8_t *src, ptrdiff_t stride)
293 {
294  pred16x16_plane_compat_8_c(src, stride, 0, 1);
295 }
296 
297 static void pred16x16_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
298 {
299  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP - src[-1-stride];
300  uint8_t *top = src-stride;
301  int y;
302 
303  for (y = 0; y < 16; y++) {
304  const uint8_t *cm_in = cm + src[-1];
305  src[0] = cm_in[top[0]];
306  src[1] = cm_in[top[1]];
307  src[2] = cm_in[top[2]];
308  src[3] = cm_in[top[3]];
309  src[4] = cm_in[top[4]];
310  src[5] = cm_in[top[5]];
311  src[6] = cm_in[top[6]];
312  src[7] = cm_in[top[7]];
313  src[8] = cm_in[top[8]];
314  src[9] = cm_in[top[9]];
315  src[10] = cm_in[top[10]];
316  src[11] = cm_in[top[11]];
317  src[12] = cm_in[top[12]];
318  src[13] = cm_in[top[13]];
319  src[14] = cm_in[top[14]];
320  src[15] = cm_in[top[15]];
321  src += stride;
322  }
323 }
324 
325 static void pred8x8_left_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
326 {
327  int i;
328  unsigned dc0;
329 
330  dc0=0;
331  for(i=0;i<8; i++)
332  dc0+= src[-1+i*stride];
333  dc0= 0x01010101*((dc0 + 4)>>3);
334 
335  for(i=0; i<8; i++){
336  ((uint32_t*)(src+i*stride))[0]=
337  ((uint32_t*)(src+i*stride))[1]= dc0;
338  }
339 }
340 
341 static void pred8x8_top_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
342 {
343  int i;
344  unsigned dc0;
345 
346  dc0=0;
347  for(i=0;i<8; i++)
348  dc0+= src[i-stride];
349  dc0= 0x01010101*((dc0 + 4)>>3);
350 
351  for(i=0; i<8; i++){
352  ((uint32_t*)(src+i*stride))[0]=
353  ((uint32_t*)(src+i*stride))[1]= dc0;
354  }
355 }
356 
357 static void pred8x8_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
358 {
359  int i;
360  unsigned dc0 = 0;
361 
362  for(i=0;i<4; i++){
363  dc0+= src[-1+i*stride] + src[i-stride];
364  dc0+= src[4+i-stride];
365  dc0+= src[-1+(i+4)*stride];
366  }
367  dc0= 0x01010101*((dc0 + 8)>>4);
368 
369  for(i=0; i<4; i++){
370  ((uint32_t*)(src+i*stride))[0]= dc0;
371  ((uint32_t*)(src+i*stride))[1]= dc0;
372  }
373  for(i=4; i<8; i++){
374  ((uint32_t*)(src+i*stride))[0]= dc0;
375  ((uint32_t*)(src+i*stride))[1]= dc0;
376  }
377 }
378 
379 static void pred8x8_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
380 {
381  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP - src[-1-stride];
382  uint8_t *top = src-stride;
383  int y;
384 
385  for (y = 0; y < 8; y++) {
386  const uint8_t *cm_in = cm + src[-1];
387  src[0] = cm_in[top[0]];
388  src[1] = cm_in[top[1]];
389  src[2] = cm_in[top[2]];
390  src[3] = cm_in[top[3]];
391  src[4] = cm_in[top[4]];
392  src[5] = cm_in[top[5]];
393  src[6] = cm_in[top[6]];
394  src[7] = cm_in[top[7]];
395  src += stride;
396  }
397 }
398 
403  const int bit_depth,
404  const int chroma_format_idc)
405 {
406 #undef FUNC
407 #undef FUNCC
408 #define FUNC(a, depth) a ## _ ## depth
409 #define FUNCC(a, depth) a ## _ ## depth ## _c
410 #define FUNCD(a) a ## _c
411 
412 #define H264_PRED(depth) \
413  if(codec_id != AV_CODEC_ID_RV40){\
414  if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
415  h->pred4x4[VERT_PRED ]= FUNCD(pred4x4_vertical_vp8);\
416  h->pred4x4[HOR_PRED ]= FUNCD(pred4x4_horizontal_vp8);\
417  } else {\
418  h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
419  h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
420  }\
421  h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
422  if(codec_id == AV_CODEC_ID_SVQ3)\
423  h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_svq3);\
424  else\
425  h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred4x4_down_left , depth);\
426  h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
427  h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
428  h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
429  if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
430  h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_vp8);\
431  } else\
432  h->pred4x4[VERT_LEFT_PRED ]= FUNCC(pred4x4_vertical_left , depth);\
433  h->pred4x4[HOR_UP_PRED ]= FUNCC(pred4x4_horizontal_up , depth);\
434  if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\
435  h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\
436  h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
437  } else {\
438  h->pred4x4[TM_VP8_PRED ]= FUNCD(pred4x4_tm_vp8);\
439  h->pred4x4[DC_127_PRED ]= FUNCC(pred4x4_127_dc , depth);\
440  h->pred4x4[DC_129_PRED ]= FUNCC(pred4x4_129_dc , depth);\
441  h->pred4x4[VERT_VP8_PRED ]= FUNCC(pred4x4_vertical , depth);\
442  h->pred4x4[HOR_VP8_PRED ]= FUNCC(pred4x4_horizontal , depth);\
443  }\
444  if (codec_id != AV_CODEC_ID_VP8)\
445  h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
446  }else{\
447  h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
448  h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
449  h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
450  h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_rv40);\
451  h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
452  h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
453  h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
454  h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_rv40);\
455  h->pred4x4[HOR_UP_PRED ]= FUNCD(pred4x4_horizontal_up_rv40);\
456  h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\
457  h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
458  h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
459  h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_down_left_rv40_nodown);\
460  h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= FUNCD(pred4x4_horizontal_up_rv40_nodown);\
461  h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_vertical_left_rv40_nodown);\
462  }\
463 \
464  h->pred8x8l[VERT_PRED ]= FUNCC(pred8x8l_vertical , depth);\
465  h->pred8x8l[HOR_PRED ]= FUNCC(pred8x8l_horizontal , depth);\
466  h->pred8x8l[DC_PRED ]= FUNCC(pred8x8l_dc , depth);\
467  h->pred8x8l[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred8x8l_down_left , depth);\
468  h->pred8x8l[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred8x8l_down_right , depth);\
469  h->pred8x8l[VERT_RIGHT_PRED ]= FUNCC(pred8x8l_vertical_right , depth);\
470  h->pred8x8l[HOR_DOWN_PRED ]= FUNCC(pred8x8l_horizontal_down , depth);\
471  h->pred8x8l[VERT_LEFT_PRED ]= FUNCC(pred8x8l_vertical_left , depth);\
472  h->pred8x8l[HOR_UP_PRED ]= FUNCC(pred8x8l_horizontal_up , depth);\
473  h->pred8x8l[LEFT_DC_PRED ]= FUNCC(pred8x8l_left_dc , depth);\
474  h->pred8x8l[TOP_DC_PRED ]= FUNCC(pred8x8l_top_dc , depth);\
475  h->pred8x8l[DC_128_PRED ]= FUNCC(pred8x8l_128_dc , depth);\
476 \
477  if (chroma_format_idc <= 1) {\
478  h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x8_vertical , depth);\
479  h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x8_horizontal , depth);\
480  } else {\
481  h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x16_vertical , depth);\
482  h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\
483  }\
484  if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\
485  if (chroma_format_idc <= 1) {\
486  h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\
487  } else {\
488  h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x16_plane , depth);\
489  }\
490  } else\
491  h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
492  if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && \
493  codec_id != AV_CODEC_ID_VP8) {\
494  if (chroma_format_idc <= 1) {\
495  h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
496  h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
497  h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x8_top_dc , depth);\
498  h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_l0t, depth);\
499  h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_0lt, depth);\
500  h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_l00, depth);\
501  h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_0l0, depth);\
502  } else {\
503  h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x16_dc , depth);\
504  h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x16_left_dc , depth);\
505  h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x16_top_dc , depth);\
506  h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_l0t, depth);\
507  h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_0lt, depth);\
508  h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_l00, depth);\
509  h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_0l0, depth);\
510  }\
511  }else{\
512  h->pred8x8[DC_PRED8x8 ]= FUNCD(pred8x8_dc_rv40);\
513  h->pred8x8[LEFT_DC_PRED8x8]= FUNCD(pred8x8_left_dc_rv40);\
514  h->pred8x8[TOP_DC_PRED8x8 ]= FUNCD(pred8x8_top_dc_rv40);\
515  if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
516  h->pred8x8[DC_127_PRED8x8]= FUNCC(pred8x8_127_dc , depth);\
517  h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\
518  }\
519  }\
520  if (chroma_format_idc <= 1) {\
521  h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc , depth);\
522  } else {\
523  h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x16_128_dc , depth);\
524  }\
525 \
526  h->pred16x16[DC_PRED8x8 ]= FUNCC(pred16x16_dc , depth);\
527  h->pred16x16[VERT_PRED8x8 ]= FUNCC(pred16x16_vertical , depth);\
528  h->pred16x16[HOR_PRED8x8 ]= FUNCC(pred16x16_horizontal , depth);\
529  switch(codec_id){\
530  case AV_CODEC_ID_SVQ3:\
531  h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_svq3);\
532  break;\
533  case AV_CODEC_ID_RV40:\
534  h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_rv40);\
535  break;\
536  case AV_CODEC_ID_VP7:\
537  case AV_CODEC_ID_VP8:\
538  h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_tm_vp8);\
539  h->pred16x16[DC_127_PRED8x8]= FUNCC(pred16x16_127_dc , depth);\
540  h->pred16x16[DC_129_PRED8x8]= FUNCC(pred16x16_129_dc , depth);\
541  break;\
542  default:\
543  h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_plane , depth);\
544  break;\
545  }\
546  h->pred16x16[LEFT_DC_PRED8x8]= FUNCC(pred16x16_left_dc , depth);\
547  h->pred16x16[TOP_DC_PRED8x8 ]= FUNCC(pred16x16_top_dc , depth);\
548  h->pred16x16[DC_128_PRED8x8 ]= FUNCC(pred16x16_128_dc , depth);\
549 \
550  /* special lossless h/v prediction for h264 */ \
551  h->pred4x4_add [VERT_PRED ]= FUNCC(pred4x4_vertical_add , depth);\
552  h->pred4x4_add [ HOR_PRED ]= FUNCC(pred4x4_horizontal_add , depth);\
553  h->pred8x8l_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_add , depth);\
554  h->pred8x8l_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_add , depth);\
555  if (chroma_format_idc <= 1) {\
556  h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x8_vertical_add , depth);\
557  h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x8_horizontal_add , depth);\
558  } else {\
559  h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x16_vertical_add , depth);\
560  h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x16_horizontal_add , depth);\
561  }\
562  h->pred16x16_add[VERT_PRED8x8]= FUNCC(pred16x16_vertical_add , depth);\
563  h->pred16x16_add[ HOR_PRED8x8]= FUNCC(pred16x16_horizontal_add , depth);\
564 
565  switch (bit_depth) {
566  case 9:
567  H264_PRED(9)
568  break;
569  case 10:
570  H264_PRED(10)
571  break;
572  default:
573  H264_PRED(8)
574  break;
575  }
576 
577  if (ARCH_ARM) ff_h264_pred_init_arm(h, codec_id, bit_depth, chroma_format_idc);
578  if (ARCH_X86) ff_h264_pred_init_x86(h, codec_id, bit_depth, chroma_format_idc);
579 }
static void pred4x4_vertical_left_rv40_nodown_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:185
void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
static void pred4x4_tm_vp8_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:270
static void pred4x4_horizontal_up_rv40_nodown_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:244
#define MAX_NEG_CROP
Definition: mathops.h:30
int stride
Definition: mace.c:144
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
Macro definitions for various function/variable attributes.
#define PACK_4U8(a, b, c, d)
Definition: mathops.h:181
static void pred8x8_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:379
uint8_t
#define av_cold
Definition: attributes.h:66
static void pred4x4_vertical_left_rv40(uint8_t *src, const uint8_t *topright, ptrdiff_t stride, const int l0, const int l1, const int l2, const int l3, const int l4)
Definition: h264pred.c:150
static void pred16x16_plane_rv40_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:292
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
static void pred4x4_down_left_rv40_nodown_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:124
static void pred8x8_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:357
#define cm
Definition: dvbsubdec.c:34
static void pred16x16_plane_svq3_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:287
static void pred8x8_top_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:341
enum AVCodecID codec_id
Definition: mov_chan.c:432
#define H264_PRED(depth)
H.264 / AVC / MPEG4 part10 prediction functions.
static void pred4x4_down_left_rv40_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:98
#define ARCH_ARM
Definition: config.h:14
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:402
Libavcodec external API header.
static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:62
static void pred4x4_vertical_left_rv40_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:176
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:45
static void pred16x16_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:297
H.264 / AVC / MPEG4 prediction functions.
static void pred4x4_down_left_svq3_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:74
#define LOAD_LEFT_EDGE
av_cold void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, int bit_depth, const int chroma_format_idc)
#define LOAD_TOP_EDGE
static void pred8x8_left_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.c:325
static void pred4x4_vertical_left_vp8_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:194
#define ARCH_X86
Definition: config.h:33
#define ff_crop_tab
#define LOAD_DOWN_LEFT_EDGE
static void pred4x4_horizontal_up_rv40_c(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.c:218
#define LOAD_TOP_RIGHT_EDGE