Libav
|
00001 /* 00002 * VC-1 and WMV3 decoder 00003 * Copyright (c) 2006-2007 Konstantin Shishkov 00004 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer 00005 * 00006 * This file is part of FFmpeg. 00007 * 00008 * FFmpeg is free software; you can redistribute it and/or 00009 * modify it under the terms of the GNU Lesser General Public 00010 * License as published by the Free Software Foundation; either 00011 * version 2.1 of the License, or (at your option) any later version. 00012 * 00013 * FFmpeg is distributed in the hope that it will be useful, 00014 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00016 * Lesser General Public License for more details. 00017 * 00018 * You should have received a copy of the GNU Lesser General Public 00019 * License along with FFmpeg; if not, write to the Free Software 00020 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00021 */ 00022 00028 #include "internal.h" 00029 #include "dsputil.h" 00030 #include "avcodec.h" 00031 #include "mpegvideo.h" 00032 #include "h263.h" 00033 #include "vc1.h" 00034 #include "vc1data.h" 00035 #include "vc1acdata.h" 00036 #include "msmpeg4data.h" 00037 #include "unary.h" 00038 #include "simple_idct.h" 00039 #include "mathops.h" 00040 #include "vdpau_internal.h" 00041 00042 #undef NDEBUG 00043 #include <assert.h> 00044 00045 #define MB_INTRA_VLC_BITS 9 00046 #define DC_VLC_BITS 9 00047 #define AC_VLC_BITS 9 00048 static const uint16_t table_mb_intra[64][2]; 00049 00050 00051 static const uint16_t vlc_offs[] = { 00052 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436, 00053 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8620, 00054 9262, 10202, 10756, 11310, 12228, 15078 00055 }; 00056 00062 static int vc1_init_common(VC1Context *v) 00063 { 00064 static int done = 0; 00065 int i = 0; 00066 static VLC_TYPE vlc_table[15078][2]; 00067 00068 v->hrd_rate = v->hrd_buffer = NULL; 00069 00070 /* VLC tables */ 00071 if(!done) 00072 { 00073 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23, 00074 ff_vc1_bfraction_bits, 1, 1, 00075 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS); 00076 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4, 00077 ff_vc1_norm2_bits, 1, 1, 00078 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS); 00079 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64, 00080 ff_vc1_norm6_bits, 1, 1, 00081 ff_vc1_norm6_codes, 2, 2, 556); 00082 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7, 00083 ff_vc1_imode_bits, 1, 1, 00084 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS); 00085 for (i=0; i<3; i++) 00086 { 00087 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i*3+0]]; 00088 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i*3+1] - vlc_offs[i*3+0]; 00089 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16, 00090 ff_vc1_ttmb_bits[i], 1, 1, 00091 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC); 00092 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i*3+1]]; 00093 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i*3+2] - vlc_offs[i*3+1]; 00094 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8, 00095 ff_vc1_ttblk_bits[i], 1, 1, 00096 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); 00097 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i*3+2]]; 00098 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i*3+3] - vlc_offs[i*3+2]; 00099 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15, 00100 ff_vc1_subblkpat_bits[i], 1, 1, 00101 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); 00102 } 00103 for(i=0; i<4; i++) 00104 { 00105 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i*3+9]]; 00106 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i*3+10] - vlc_offs[i*3+9]; 00107 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16, 00108 ff_vc1_4mv_block_pattern_bits[i], 1, 1, 00109 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); 00110 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i*3+10]]; 00111 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i*3+11] - vlc_offs[i*3+10]; 00112 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64, 00113 ff_vc1_cbpcy_p_bits[i], 1, 1, 00114 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC); 00115 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i*3+11]]; 00116 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i*3+12] - vlc_offs[i*3+11]; 00117 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73, 00118 ff_vc1_mv_diff_bits[i], 1, 1, 00119 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC); 00120 } 00121 for(i=0; i<8; i++){ 00122 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i+21]]; 00123 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i+22] - vlc_offs[i+21]; 00124 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i], 00125 &vc1_ac_tables[i][0][1], 8, 4, 00126 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC); 00127 } 00128 done = 1; 00129 } 00130 00131 /* Other defaults */ 00132 v->pq = -1; 00133 v->mvrange = 0; /* 7.1.1.18, p80 */ 00134 00135 return 0; 00136 } 00137 00138 /***********************************************************************/ 00149 enum Imode { 00150 IMODE_RAW, 00151 IMODE_NORM2, 00152 IMODE_DIFF2, 00153 IMODE_NORM6, 00154 IMODE_DIFF6, 00155 IMODE_ROWSKIP, 00156 IMODE_COLSKIP 00157 }; //imode defines 00159 00160 //Bitplane group 00162 00163 static void vc1_loop_filter_iblk(MpegEncContext *s, int pq) 00164 { 00165 int i, j; 00166 if(!s->first_slice_line) 00167 s->dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq); 00168 s->dsp.vc1_v_loop_filter16(s->dest[0] + 8*s->linesize, s->linesize, pq); 00169 for(i = !s->mb_x*8; i < 16; i += 8) 00170 s->dsp.vc1_h_loop_filter16(s->dest[0] + i, s->linesize, pq); 00171 for(j = 0; j < 2; j++){ 00172 if(!s->first_slice_line) 00173 s->dsp.vc1_v_loop_filter8(s->dest[j+1], s->uvlinesize, pq); 00174 if(s->mb_x) 00175 s->dsp.vc1_h_loop_filter8(s->dest[j+1], s->uvlinesize, pq); 00176 } 00177 } 00178 00181 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64]) 00182 { 00183 uint8_t *Y; 00184 int ys, us, vs; 00185 DSPContext *dsp = &v->s.dsp; 00186 00187 if(v->rangeredfrm) { 00188 int i, j, k; 00189 for(k = 0; k < 6; k++) 00190 for(j = 0; j < 8; j++) 00191 for(i = 0; i < 8; i++) 00192 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128; 00193 00194 } 00195 ys = v->s.current_picture.linesize[0]; 00196 us = v->s.current_picture.linesize[1]; 00197 vs = v->s.current_picture.linesize[2]; 00198 Y = v->s.dest[0]; 00199 00200 dsp->put_pixels_clamped(block[0], Y, ys); 00201 dsp->put_pixels_clamped(block[1], Y + 8, ys); 00202 Y += ys * 8; 00203 dsp->put_pixels_clamped(block[2], Y, ys); 00204 dsp->put_pixels_clamped(block[3], Y + 8, ys); 00205 00206 if(!(v->s.flags & CODEC_FLAG_GRAY)) { 00207 dsp->put_pixels_clamped(block[4], v->s.dest[1], us); 00208 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs); 00209 } 00210 } 00211 00215 static void vc1_mc_1mv(VC1Context *v, int dir) 00216 { 00217 MpegEncContext *s = &v->s; 00218 DSPContext *dsp = &v->s.dsp; 00219 uint8_t *srcY, *srcU, *srcV; 00220 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; 00221 00222 if(!v->s.last_picture.data[0])return; 00223 00224 mx = s->mv[dir][0][0]; 00225 my = s->mv[dir][0][1]; 00226 00227 // store motion vectors for further use in B frames 00228 if(s->pict_type == FF_P_TYPE) { 00229 s->current_picture.motion_val[1][s->block_index[0]][0] = mx; 00230 s->current_picture.motion_val[1][s->block_index[0]][1] = my; 00231 } 00232 uvmx = (mx + ((mx & 3) == 3)) >> 1; 00233 uvmy = (my + ((my & 3) == 3)) >> 1; 00234 if(v->fastuvmc) { 00235 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1)); 00236 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1)); 00237 } 00238 if(!dir) { 00239 srcY = s->last_picture.data[0]; 00240 srcU = s->last_picture.data[1]; 00241 srcV = s->last_picture.data[2]; 00242 } else { 00243 srcY = s->next_picture.data[0]; 00244 srcU = s->next_picture.data[1]; 00245 srcV = s->next_picture.data[2]; 00246 } 00247 00248 src_x = s->mb_x * 16 + (mx >> 2); 00249 src_y = s->mb_y * 16 + (my >> 2); 00250 uvsrc_x = s->mb_x * 8 + (uvmx >> 2); 00251 uvsrc_y = s->mb_y * 8 + (uvmy >> 2); 00252 00253 if(v->profile != PROFILE_ADVANCED){ 00254 src_x = av_clip( src_x, -16, s->mb_width * 16); 00255 src_y = av_clip( src_y, -16, s->mb_height * 16); 00256 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); 00257 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); 00258 }else{ 00259 src_x = av_clip( src_x, -17, s->avctx->coded_width); 00260 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1); 00261 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1); 00262 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1); 00263 } 00264 00265 srcY += src_y * s->linesize + src_x; 00266 srcU += uvsrc_y * s->uvlinesize + uvsrc_x; 00267 srcV += uvsrc_y * s->uvlinesize + uvsrc_x; 00268 00269 /* for grayscale we should not try to read from unknown area */ 00270 if(s->flags & CODEC_FLAG_GRAY) { 00271 srcU = s->edge_emu_buffer + 18 * s->linesize; 00272 srcV = s->edge_emu_buffer + 18 * s->linesize; 00273 } 00274 00275 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP) 00276 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3 00277 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){ 00278 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize; 00279 00280 srcY -= s->mspel * (1 + s->linesize); 00281 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2, 00282 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos); 00283 srcY = s->edge_emu_buffer; 00284 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1, 00285 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); 00286 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1, 00287 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); 00288 srcU = uvbuf; 00289 srcV = uvbuf + 16; 00290 /* if we deal with range reduction we need to scale source blocks */ 00291 if(v->rangeredfrm) { 00292 int i, j; 00293 uint8_t *src, *src2; 00294 00295 src = srcY; 00296 for(j = 0; j < 17 + s->mspel*2; j++) { 00297 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128; 00298 src += s->linesize; 00299 } 00300 src = srcU; src2 = srcV; 00301 for(j = 0; j < 9; j++) { 00302 for(i = 0; i < 9; i++) { 00303 src[i] = ((src[i] - 128) >> 1) + 128; 00304 src2[i] = ((src2[i] - 128) >> 1) + 128; 00305 } 00306 src += s->uvlinesize; 00307 src2 += s->uvlinesize; 00308 } 00309 } 00310 /* if we deal with intensity compensation we need to scale source blocks */ 00311 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) { 00312 int i, j; 00313 uint8_t *src, *src2; 00314 00315 src = srcY; 00316 for(j = 0; j < 17 + s->mspel*2; j++) { 00317 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]]; 00318 src += s->linesize; 00319 } 00320 src = srcU; src2 = srcV; 00321 for(j = 0; j < 9; j++) { 00322 for(i = 0; i < 9; i++) { 00323 src[i] = v->lutuv[src[i]]; 00324 src2[i] = v->lutuv[src2[i]]; 00325 } 00326 src += s->uvlinesize; 00327 src2 += s->uvlinesize; 00328 } 00329 } 00330 srcY += s->mspel * (1 + s->linesize); 00331 } 00332 00333 if(s->mspel) { 00334 dxy = ((my & 3) << 2) | (mx & 3); 00335 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd); 00336 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd); 00337 srcY += s->linesize * 8; 00338 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd); 00339 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd); 00340 } else { // hpel mc - always used for luma 00341 dxy = (my & 2) | ((mx & 2) >> 1); 00342 00343 if(!v->rnd) 00344 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16); 00345 else 00346 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16); 00347 } 00348 00349 if(s->flags & CODEC_FLAG_GRAY) return; 00350 /* Chroma MC always uses qpel bilinear */ 00351 uvmx = (uvmx&3)<<1; 00352 uvmy = (uvmy&3)<<1; 00353 if(!v->rnd){ 00354 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); 00355 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); 00356 }else{ 00357 dsp->put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); 00358 dsp->put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); 00359 } 00360 } 00361 00364 static void vc1_mc_4mv_luma(VC1Context *v, int n) 00365 { 00366 MpegEncContext *s = &v->s; 00367 DSPContext *dsp = &v->s.dsp; 00368 uint8_t *srcY; 00369 int dxy, mx, my, src_x, src_y; 00370 int off; 00371 00372 if(!v->s.last_picture.data[0])return; 00373 mx = s->mv[0][n][0]; 00374 my = s->mv[0][n][1]; 00375 srcY = s->last_picture.data[0]; 00376 00377 off = s->linesize * 4 * (n&2) + (n&1) * 8; 00378 00379 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2); 00380 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2); 00381 00382 if(v->profile != PROFILE_ADVANCED){ 00383 src_x = av_clip( src_x, -16, s->mb_width * 16); 00384 src_y = av_clip( src_y, -16, s->mb_height * 16); 00385 }else{ 00386 src_x = av_clip( src_x, -17, s->avctx->coded_width); 00387 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1); 00388 } 00389 00390 srcY += src_y * s->linesize + src_x; 00391 00392 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP) 00393 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2 00394 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){ 00395 srcY -= s->mspel * (1 + s->linesize); 00396 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2, 00397 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos); 00398 srcY = s->edge_emu_buffer; 00399 /* if we deal with range reduction we need to scale source blocks */ 00400 if(v->rangeredfrm) { 00401 int i, j; 00402 uint8_t *src; 00403 00404 src = srcY; 00405 for(j = 0; j < 9 + s->mspel*2; j++) { 00406 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128; 00407 src += s->linesize; 00408 } 00409 } 00410 /* if we deal with intensity compensation we need to scale source blocks */ 00411 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) { 00412 int i, j; 00413 uint8_t *src; 00414 00415 src = srcY; 00416 for(j = 0; j < 9 + s->mspel*2; j++) { 00417 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]]; 00418 src += s->linesize; 00419 } 00420 } 00421 srcY += s->mspel * (1 + s->linesize); 00422 } 00423 00424 if(s->mspel) { 00425 dxy = ((my & 3) << 2) | (mx & 3); 00426 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd); 00427 } else { // hpel mc - always used for luma 00428 dxy = (my & 2) | ((mx & 2) >> 1); 00429 if(!v->rnd) 00430 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); 00431 else 00432 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); 00433 } 00434 } 00435 00436 static inline int median4(int a, int b, int c, int d) 00437 { 00438 if(a < b) { 00439 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2; 00440 else return (FFMIN(b, c) + FFMAX(a, d)) / 2; 00441 } else { 00442 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2; 00443 else return (FFMIN(a, c) + FFMAX(b, d)) / 2; 00444 } 00445 } 00446 00447 00450 static void vc1_mc_4mv_chroma(VC1Context *v) 00451 { 00452 MpegEncContext *s = &v->s; 00453 DSPContext *dsp = &v->s.dsp; 00454 uint8_t *srcU, *srcV; 00455 int uvmx, uvmy, uvsrc_x, uvsrc_y; 00456 int i, idx, tx = 0, ty = 0; 00457 int mvx[4], mvy[4], intra[4]; 00458 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4}; 00459 00460 if(!v->s.last_picture.data[0])return; 00461 if(s->flags & CODEC_FLAG_GRAY) return; 00462 00463 for(i = 0; i < 4; i++) { 00464 mvx[i] = s->mv[0][i][0]; 00465 mvy[i] = s->mv[0][i][1]; 00466 intra[i] = v->mb_type[0][s->block_index[i]]; 00467 } 00468 00469 /* calculate chroma MV vector from four luma MVs */ 00470 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0]; 00471 if(!idx) { // all blocks are inter 00472 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]); 00473 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]); 00474 } else if(count[idx] == 1) { // 3 inter blocks 00475 switch(idx) { 00476 case 0x1: 00477 tx = mid_pred(mvx[1], mvx[2], mvx[3]); 00478 ty = mid_pred(mvy[1], mvy[2], mvy[3]); 00479 break; 00480 case 0x2: 00481 tx = mid_pred(mvx[0], mvx[2], mvx[3]); 00482 ty = mid_pred(mvy[0], mvy[2], mvy[3]); 00483 break; 00484 case 0x4: 00485 tx = mid_pred(mvx[0], mvx[1], mvx[3]); 00486 ty = mid_pred(mvy[0], mvy[1], mvy[3]); 00487 break; 00488 case 0x8: 00489 tx = mid_pred(mvx[0], mvx[1], mvx[2]); 00490 ty = mid_pred(mvy[0], mvy[1], mvy[2]); 00491 break; 00492 } 00493 } else if(count[idx] == 2) { 00494 int t1 = 0, t2 = 0; 00495 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;} 00496 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;} 00497 tx = (mvx[t1] + mvx[t2]) / 2; 00498 ty = (mvy[t1] + mvy[t2]) / 2; 00499 } else { 00500 s->current_picture.motion_val[1][s->block_index[0]][0] = 0; 00501 s->current_picture.motion_val[1][s->block_index[0]][1] = 0; 00502 return; //no need to do MC for inter blocks 00503 } 00504 00505 s->current_picture.motion_val[1][s->block_index[0]][0] = tx; 00506 s->current_picture.motion_val[1][s->block_index[0]][1] = ty; 00507 uvmx = (tx + ((tx&3) == 3)) >> 1; 00508 uvmy = (ty + ((ty&3) == 3)) >> 1; 00509 if(v->fastuvmc) { 00510 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1)); 00511 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1)); 00512 } 00513 00514 uvsrc_x = s->mb_x * 8 + (uvmx >> 2); 00515 uvsrc_y = s->mb_y * 8 + (uvmy >> 2); 00516 00517 if(v->profile != PROFILE_ADVANCED){ 00518 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); 00519 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); 00520 }else{ 00521 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1); 00522 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1); 00523 } 00524 00525 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; 00526 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; 00527 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP) 00528 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9 00529 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){ 00530 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1, 00531 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); 00532 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1, 00533 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); 00534 srcU = s->edge_emu_buffer; 00535 srcV = s->edge_emu_buffer + 16; 00536 00537 /* if we deal with range reduction we need to scale source blocks */ 00538 if(v->rangeredfrm) { 00539 int i, j; 00540 uint8_t *src, *src2; 00541 00542 src = srcU; src2 = srcV; 00543 for(j = 0; j < 9; j++) { 00544 for(i = 0; i < 9; i++) { 00545 src[i] = ((src[i] - 128) >> 1) + 128; 00546 src2[i] = ((src2[i] - 128) >> 1) + 128; 00547 } 00548 src += s->uvlinesize; 00549 src2 += s->uvlinesize; 00550 } 00551 } 00552 /* if we deal with intensity compensation we need to scale source blocks */ 00553 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) { 00554 int i, j; 00555 uint8_t *src, *src2; 00556 00557 src = srcU; src2 = srcV; 00558 for(j = 0; j < 9; j++) { 00559 for(i = 0; i < 9; i++) { 00560 src[i] = v->lutuv[src[i]]; 00561 src2[i] = v->lutuv[src2[i]]; 00562 } 00563 src += s->uvlinesize; 00564 src2 += s->uvlinesize; 00565 } 00566 } 00567 } 00568 00569 /* Chroma MC always uses qpel bilinear */ 00570 uvmx = (uvmx&3)<<1; 00571 uvmy = (uvmy&3)<<1; 00572 if(!v->rnd){ 00573 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); 00574 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); 00575 }else{ 00576 dsp->put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); 00577 dsp->put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); 00578 } 00579 } 00580 00581 /***********************************************************************/ 00592 #define GET_MQUANT() \ 00593 if (v->dquantfrm) \ 00594 { \ 00595 int edges = 0; \ 00596 if (v->dqprofile == DQPROFILE_ALL_MBS) \ 00597 { \ 00598 if (v->dqbilevel) \ 00599 { \ 00600 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \ 00601 } \ 00602 else \ 00603 { \ 00604 mqdiff = get_bits(gb, 3); \ 00605 if (mqdiff != 7) mquant = v->pq + mqdiff; \ 00606 else mquant = get_bits(gb, 5); \ 00607 } \ 00608 } \ 00609 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \ 00610 edges = 1 << v->dqsbedge; \ 00611 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \ 00612 edges = (3 << v->dqsbedge) % 15; \ 00613 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \ 00614 edges = 15; \ 00615 if((edges&1) && !s->mb_x) \ 00616 mquant = v->altpq; \ 00617 if((edges&2) && s->first_slice_line) \ 00618 mquant = v->altpq; \ 00619 if((edges&4) && s->mb_x == (s->mb_width - 1)) \ 00620 mquant = v->altpq; \ 00621 if((edges&8) && s->mb_y == (s->mb_height - 1)) \ 00622 mquant = v->altpq; \ 00623 } 00624 00632 #define GET_MVDATA(_dmv_x, _dmv_y) \ 00633 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table,\ 00634 VC1_MV_DIFF_VLC_BITS, 2); \ 00635 if (index > 36) \ 00636 { \ 00637 mb_has_coeffs = 1; \ 00638 index -= 37; \ 00639 } \ 00640 else mb_has_coeffs = 0; \ 00641 s->mb_intra = 0; \ 00642 if (!index) { _dmv_x = _dmv_y = 0; } \ 00643 else if (index == 35) \ 00644 { \ 00645 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \ 00646 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \ 00647 } \ 00648 else if (index == 36) \ 00649 { \ 00650 _dmv_x = 0; \ 00651 _dmv_y = 0; \ 00652 s->mb_intra = 1; \ 00653 } \ 00654 else \ 00655 { \ 00656 index1 = index%6; \ 00657 if (!s->quarter_sample && index1 == 5) val = 1; \ 00658 else val = 0; \ 00659 if(size_table[index1] - val > 0) \ 00660 val = get_bits(gb, size_table[index1] - val); \ 00661 else val = 0; \ 00662 sign = 0 - (val&1); \ 00663 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \ 00664 \ 00665 index1 = index/6; \ 00666 if (!s->quarter_sample && index1 == 5) val = 1; \ 00667 else val = 0; \ 00668 if(size_table[index1] - val > 0) \ 00669 val = get_bits(gb, size_table[index1] - val); \ 00670 else val = 0; \ 00671 sign = 0 - (val&1); \ 00672 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \ 00673 } 00674 00677 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra) 00678 { 00679 int xy, wrap, off = 0; 00680 int16_t *A, *B, *C; 00681 int px, py; 00682 int sum; 00683 00684 /* scale MV difference to be quad-pel */ 00685 dmv_x <<= 1 - s->quarter_sample; 00686 dmv_y <<= 1 - s->quarter_sample; 00687 00688 wrap = s->b8_stride; 00689 xy = s->block_index[n]; 00690 00691 if(s->mb_intra){ 00692 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0; 00693 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0; 00694 s->current_picture.motion_val[1][xy][0] = 0; 00695 s->current_picture.motion_val[1][xy][1] = 0; 00696 if(mv1) { /* duplicate motion data for 1-MV block */ 00697 s->current_picture.motion_val[0][xy + 1][0] = 0; 00698 s->current_picture.motion_val[0][xy + 1][1] = 0; 00699 s->current_picture.motion_val[0][xy + wrap][0] = 0; 00700 s->current_picture.motion_val[0][xy + wrap][1] = 0; 00701 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0; 00702 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0; 00703 s->current_picture.motion_val[1][xy + 1][0] = 0; 00704 s->current_picture.motion_val[1][xy + 1][1] = 0; 00705 s->current_picture.motion_val[1][xy + wrap][0] = 0; 00706 s->current_picture.motion_val[1][xy + wrap][1] = 0; 00707 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0; 00708 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0; 00709 } 00710 return; 00711 } 00712 00713 C = s->current_picture.motion_val[0][xy - 1]; 00714 A = s->current_picture.motion_val[0][xy - wrap]; 00715 if(mv1) 00716 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2; 00717 else { 00718 //in 4-MV mode different blocks have different B predictor position 00719 switch(n){ 00720 case 0: 00721 off = (s->mb_x > 0) ? -1 : 1; 00722 break; 00723 case 1: 00724 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1; 00725 break; 00726 case 2: 00727 off = 1; 00728 break; 00729 case 3: 00730 off = -1; 00731 } 00732 } 00733 B = s->current_picture.motion_val[0][xy - wrap + off]; 00734 00735 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds 00736 if(s->mb_width == 1) { 00737 px = A[0]; 00738 py = A[1]; 00739 } else { 00740 px = mid_pred(A[0], B[0], C[0]); 00741 py = mid_pred(A[1], B[1], C[1]); 00742 } 00743 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds 00744 px = C[0]; 00745 py = C[1]; 00746 } else { 00747 px = py = 0; 00748 } 00749 /* Pullback MV as specified in 8.3.5.3.4 */ 00750 { 00751 int qx, qy, X, Y; 00752 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0); 00753 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0); 00754 X = (s->mb_width << 6) - 4; 00755 Y = (s->mb_height << 6) - 4; 00756 if(mv1) { 00757 if(qx + px < -60) px = -60 - qx; 00758 if(qy + py < -60) py = -60 - qy; 00759 } else { 00760 if(qx + px < -28) px = -28 - qx; 00761 if(qy + py < -28) py = -28 - qy; 00762 } 00763 if(qx + px > X) px = X - qx; 00764 if(qy + py > Y) py = Y - qy; 00765 } 00766 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ 00767 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) { 00768 if(is_intra[xy - wrap]) 00769 sum = FFABS(px) + FFABS(py); 00770 else 00771 sum = FFABS(px - A[0]) + FFABS(py - A[1]); 00772 if(sum > 32) { 00773 if(get_bits1(&s->gb)) { 00774 px = A[0]; 00775 py = A[1]; 00776 } else { 00777 px = C[0]; 00778 py = C[1]; 00779 } 00780 } else { 00781 if(is_intra[xy - 1]) 00782 sum = FFABS(px) + FFABS(py); 00783 else 00784 sum = FFABS(px - C[0]) + FFABS(py - C[1]); 00785 if(sum > 32) { 00786 if(get_bits1(&s->gb)) { 00787 px = A[0]; 00788 py = A[1]; 00789 } else { 00790 px = C[0]; 00791 py = C[1]; 00792 } 00793 } 00794 } 00795 } 00796 /* store MV using signed modulus of MV range defined in 4.11 */ 00797 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; 00798 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y; 00799 if(mv1) { /* duplicate motion data for 1-MV block */ 00800 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0]; 00801 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1]; 00802 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0]; 00803 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1]; 00804 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0]; 00805 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1]; 00806 } 00807 } 00808 00811 static void vc1_interp_mc(VC1Context *v) 00812 { 00813 MpegEncContext *s = &v->s; 00814 DSPContext *dsp = &v->s.dsp; 00815 uint8_t *srcY, *srcU, *srcV; 00816 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; 00817 00818 if(!v->s.next_picture.data[0])return; 00819 00820 mx = s->mv[1][0][0]; 00821 my = s->mv[1][0][1]; 00822 uvmx = (mx + ((mx & 3) == 3)) >> 1; 00823 uvmy = (my + ((my & 3) == 3)) >> 1; 00824 if(v->fastuvmc) { 00825 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1)); 00826 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1)); 00827 } 00828 srcY = s->next_picture.data[0]; 00829 srcU = s->next_picture.data[1]; 00830 srcV = s->next_picture.data[2]; 00831 00832 src_x = s->mb_x * 16 + (mx >> 2); 00833 src_y = s->mb_y * 16 + (my >> 2); 00834 uvsrc_x = s->mb_x * 8 + (uvmx >> 2); 00835 uvsrc_y = s->mb_y * 8 + (uvmy >> 2); 00836 00837 if(v->profile != PROFILE_ADVANCED){ 00838 src_x = av_clip( src_x, -16, s->mb_width * 16); 00839 src_y = av_clip( src_y, -16, s->mb_height * 16); 00840 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); 00841 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); 00842 }else{ 00843 src_x = av_clip( src_x, -17, s->avctx->coded_width); 00844 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1); 00845 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1); 00846 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1); 00847 } 00848 00849 srcY += src_y * s->linesize + src_x; 00850 srcU += uvsrc_y * s->uvlinesize + uvsrc_x; 00851 srcV += uvsrc_y * s->uvlinesize + uvsrc_x; 00852 00853 /* for grayscale we should not try to read from unknown area */ 00854 if(s->flags & CODEC_FLAG_GRAY) { 00855 srcU = s->edge_emu_buffer + 18 * s->linesize; 00856 srcV = s->edge_emu_buffer + 18 * s->linesize; 00857 } 00858 00859 if(v->rangeredfrm 00860 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3 00861 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){ 00862 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize; 00863 00864 srcY -= s->mspel * (1 + s->linesize); 00865 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2, 00866 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos); 00867 srcY = s->edge_emu_buffer; 00868 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1, 00869 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); 00870 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1, 00871 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); 00872 srcU = uvbuf; 00873 srcV = uvbuf + 16; 00874 /* if we deal with range reduction we need to scale source blocks */ 00875 if(v->rangeredfrm) { 00876 int i, j; 00877 uint8_t *src, *src2; 00878 00879 src = srcY; 00880 for(j = 0; j < 17 + s->mspel*2; j++) { 00881 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128; 00882 src += s->linesize; 00883 } 00884 src = srcU; src2 = srcV; 00885 for(j = 0; j < 9; j++) { 00886 for(i = 0; i < 9; i++) { 00887 src[i] = ((src[i] - 128) >> 1) + 128; 00888 src2[i] = ((src2[i] - 128) >> 1) + 128; 00889 } 00890 src += s->uvlinesize; 00891 src2 += s->uvlinesize; 00892 } 00893 } 00894 srcY += s->mspel * (1 + s->linesize); 00895 } 00896 00897 if(s->mspel) { 00898 dxy = ((my & 3) << 2) | (mx & 3); 00899 dsp->avg_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd); 00900 dsp->avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd); 00901 srcY += s->linesize * 8; 00902 dsp->avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd); 00903 dsp->avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd); 00904 } else { // hpel mc 00905 dxy = (my & 2) | ((mx & 2) >> 1); 00906 00907 if(!v->rnd) 00908 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16); 00909 else 00910 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16); 00911 } 00912 00913 if(s->flags & CODEC_FLAG_GRAY) return; 00914 /* Chroma MC always uses qpel blilinear */ 00915 uvmx = (uvmx&3)<<1; 00916 uvmy = (uvmy&3)<<1; 00917 if(!v->rnd){ 00918 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); 00919 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); 00920 }else{ 00921 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); 00922 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); 00923 } 00924 } 00925 00926 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs) 00927 { 00928 int n = bfrac; 00929 00930 #if B_FRACTION_DEN==256 00931 if(inv) 00932 n -= 256; 00933 if(!qs) 00934 return 2 * ((value * n + 255) >> 9); 00935 return (value * n + 128) >> 8; 00936 #else 00937 if(inv) 00938 n -= B_FRACTION_DEN; 00939 if(!qs) 00940 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN)); 00941 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN; 00942 #endif 00943 } 00944 00947 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode) 00948 { 00949 if(v->use_ic) { 00950 v->mv_mode2 = v->mv_mode; 00951 v->mv_mode = MV_PMODE_INTENSITY_COMP; 00952 } 00953 if(direct) { 00954 vc1_mc_1mv(v, 0); 00955 vc1_interp_mc(v); 00956 if(v->use_ic) v->mv_mode = v->mv_mode2; 00957 return; 00958 } 00959 if(mode == BMV_TYPE_INTERPOLATED) { 00960 vc1_mc_1mv(v, 0); 00961 vc1_interp_mc(v); 00962 if(v->use_ic) v->mv_mode = v->mv_mode2; 00963 return; 00964 } 00965 00966 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2; 00967 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD)); 00968 if(v->use_ic) v->mv_mode = v->mv_mode2; 00969 } 00970 00971 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype) 00972 { 00973 MpegEncContext *s = &v->s; 00974 int xy, wrap, off = 0; 00975 int16_t *A, *B, *C; 00976 int px, py; 00977 int sum; 00978 int r_x, r_y; 00979 const uint8_t *is_intra = v->mb_type[0]; 00980 00981 r_x = v->range_x; 00982 r_y = v->range_y; 00983 /* scale MV difference to be quad-pel */ 00984 dmv_x[0] <<= 1 - s->quarter_sample; 00985 dmv_y[0] <<= 1 - s->quarter_sample; 00986 dmv_x[1] <<= 1 - s->quarter_sample; 00987 dmv_y[1] <<= 1 - s->quarter_sample; 00988 00989 wrap = s->b8_stride; 00990 xy = s->block_index[0]; 00991 00992 if(s->mb_intra) { 00993 s->current_picture.motion_val[0][xy][0] = 00994 s->current_picture.motion_val[0][xy][1] = 00995 s->current_picture.motion_val[1][xy][0] = 00996 s->current_picture.motion_val[1][xy][1] = 0; 00997 return; 00998 } 00999 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample); 01000 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample); 01001 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample); 01002 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample); 01003 01004 /* Pullback predicted motion vectors as specified in 8.4.5.4 */ 01005 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); 01006 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); 01007 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); 01008 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); 01009 if(direct) { 01010 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; 01011 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; 01012 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; 01013 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; 01014 return; 01015 } 01016 01017 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { 01018 C = s->current_picture.motion_val[0][xy - 2]; 01019 A = s->current_picture.motion_val[0][xy - wrap*2]; 01020 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; 01021 B = s->current_picture.motion_val[0][xy - wrap*2 + off]; 01022 01023 if(!s->mb_x) C[0] = C[1] = 0; 01024 if(!s->first_slice_line) { // predictor A is not out of bounds 01025 if(s->mb_width == 1) { 01026 px = A[0]; 01027 py = A[1]; 01028 } else { 01029 px = mid_pred(A[0], B[0], C[0]); 01030 py = mid_pred(A[1], B[1], C[1]); 01031 } 01032 } else if(s->mb_x) { // predictor C is not out of bounds 01033 px = C[0]; 01034 py = C[1]; 01035 } else { 01036 px = py = 0; 01037 } 01038 /* Pullback MV as specified in 8.3.5.3.4 */ 01039 { 01040 int qx, qy, X, Y; 01041 if(v->profile < PROFILE_ADVANCED) { 01042 qx = (s->mb_x << 5); 01043 qy = (s->mb_y << 5); 01044 X = (s->mb_width << 5) - 4; 01045 Y = (s->mb_height << 5) - 4; 01046 if(qx + px < -28) px = -28 - qx; 01047 if(qy + py < -28) py = -28 - qy; 01048 if(qx + px > X) px = X - qx; 01049 if(qy + py > Y) py = Y - qy; 01050 } else { 01051 qx = (s->mb_x << 6); 01052 qy = (s->mb_y << 6); 01053 X = (s->mb_width << 6) - 4; 01054 Y = (s->mb_height << 6) - 4; 01055 if(qx + px < -60) px = -60 - qx; 01056 if(qy + py < -60) py = -60 - qy; 01057 if(qx + px > X) px = X - qx; 01058 if(qy + py > Y) py = Y - qy; 01059 } 01060 } 01061 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ 01062 if(0 && !s->first_slice_line && s->mb_x) { 01063 if(is_intra[xy - wrap]) 01064 sum = FFABS(px) + FFABS(py); 01065 else 01066 sum = FFABS(px - A[0]) + FFABS(py - A[1]); 01067 if(sum > 32) { 01068 if(get_bits1(&s->gb)) { 01069 px = A[0]; 01070 py = A[1]; 01071 } else { 01072 px = C[0]; 01073 py = C[1]; 01074 } 01075 } else { 01076 if(is_intra[xy - 2]) 01077 sum = FFABS(px) + FFABS(py); 01078 else 01079 sum = FFABS(px - C[0]) + FFABS(py - C[1]); 01080 if(sum > 32) { 01081 if(get_bits1(&s->gb)) { 01082 px = A[0]; 01083 py = A[1]; 01084 } else { 01085 px = C[0]; 01086 py = C[1]; 01087 } 01088 } 01089 } 01090 } 01091 /* store MV using signed modulus of MV range defined in 4.11 */ 01092 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x; 01093 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y; 01094 } 01095 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { 01096 C = s->current_picture.motion_val[1][xy - 2]; 01097 A = s->current_picture.motion_val[1][xy - wrap*2]; 01098 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; 01099 B = s->current_picture.motion_val[1][xy - wrap*2 + off]; 01100 01101 if(!s->mb_x) C[0] = C[1] = 0; 01102 if(!s->first_slice_line) { // predictor A is not out of bounds 01103 if(s->mb_width == 1) { 01104 px = A[0]; 01105 py = A[1]; 01106 } else { 01107 px = mid_pred(A[0], B[0], C[0]); 01108 py = mid_pred(A[1], B[1], C[1]); 01109 } 01110 } else if(s->mb_x) { // predictor C is not out of bounds 01111 px = C[0]; 01112 py = C[1]; 01113 } else { 01114 px = py = 0; 01115 } 01116 /* Pullback MV as specified in 8.3.5.3.4 */ 01117 { 01118 int qx, qy, X, Y; 01119 if(v->profile < PROFILE_ADVANCED) { 01120 qx = (s->mb_x << 5); 01121 qy = (s->mb_y << 5); 01122 X = (s->mb_width << 5) - 4; 01123 Y = (s->mb_height << 5) - 4; 01124 if(qx + px < -28) px = -28 - qx; 01125 if(qy + py < -28) py = -28 - qy; 01126 if(qx + px > X) px = X - qx; 01127 if(qy + py > Y) py = Y - qy; 01128 } else { 01129 qx = (s->mb_x << 6); 01130 qy = (s->mb_y << 6); 01131 X = (s->mb_width << 6) - 4; 01132 Y = (s->mb_height << 6) - 4; 01133 if(qx + px < -60) px = -60 - qx; 01134 if(qy + py < -60) py = -60 - qy; 01135 if(qx + px > X) px = X - qx; 01136 if(qy + py > Y) py = Y - qy; 01137 } 01138 } 01139 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ 01140 if(0 && !s->first_slice_line && s->mb_x) { 01141 if(is_intra[xy - wrap]) 01142 sum = FFABS(px) + FFABS(py); 01143 else 01144 sum = FFABS(px - A[0]) + FFABS(py - A[1]); 01145 if(sum > 32) { 01146 if(get_bits1(&s->gb)) { 01147 px = A[0]; 01148 py = A[1]; 01149 } else { 01150 px = C[0]; 01151 py = C[1]; 01152 } 01153 } else { 01154 if(is_intra[xy - 2]) 01155 sum = FFABS(px) + FFABS(py); 01156 else 01157 sum = FFABS(px - C[0]) + FFABS(py - C[1]); 01158 if(sum > 32) { 01159 if(get_bits1(&s->gb)) { 01160 px = A[0]; 01161 py = A[1]; 01162 } else { 01163 px = C[0]; 01164 py = C[1]; 01165 } 01166 } 01167 } 01168 } 01169 /* store MV using signed modulus of MV range defined in 4.11 */ 01170 01171 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x; 01172 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y; 01173 } 01174 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; 01175 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; 01176 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; 01177 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; 01178 } 01179 01189 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, 01190 int16_t **dc_val_ptr, int *dir_ptr) 01191 { 01192 int a, b, c, wrap, pred, scale; 01193 int16_t *dc_val; 01194 static const uint16_t dcpred[32] = { 01195 -1, 1024, 512, 341, 256, 205, 171, 146, 128, 01196 114, 102, 93, 85, 79, 73, 68, 64, 01197 60, 57, 54, 51, 49, 47, 45, 43, 01198 41, 39, 38, 37, 35, 34, 33 01199 }; 01200 01201 /* find prediction - wmv3_dc_scale always used here in fact */ 01202 if (n < 4) scale = s->y_dc_scale; 01203 else scale = s->c_dc_scale; 01204 01205 wrap = s->block_wrap[n]; 01206 dc_val= s->dc_val[0] + s->block_index[n]; 01207 01208 /* B A 01209 * C X 01210 */ 01211 c = dc_val[ - 1]; 01212 b = dc_val[ - 1 - wrap]; 01213 a = dc_val[ - wrap]; 01214 01215 if (pq < 9 || !overlap) 01216 { 01217 /* Set outer values */ 01218 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale]; 01219 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale]; 01220 } 01221 else 01222 { 01223 /* Set outer values */ 01224 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0; 01225 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0; 01226 } 01227 01228 if (abs(a - b) <= abs(b - c)) { 01229 pred = c; 01230 *dir_ptr = 1;//left 01231 } else { 01232 pred = a; 01233 *dir_ptr = 0;//top 01234 } 01235 01236 /* update predictor */ 01237 *dc_val_ptr = &dc_val[0]; 01238 return pred; 01239 } 01240 01241 01253 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, 01254 int a_avail, int c_avail, 01255 int16_t **dc_val_ptr, int *dir_ptr) 01256 { 01257 int a, b, c, wrap, pred; 01258 int16_t *dc_val; 01259 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; 01260 int q1, q2 = 0; 01261 01262 wrap = s->block_wrap[n]; 01263 dc_val= s->dc_val[0] + s->block_index[n]; 01264 01265 /* B A 01266 * C X 01267 */ 01268 c = dc_val[ - 1]; 01269 b = dc_val[ - 1 - wrap]; 01270 a = dc_val[ - wrap]; 01271 /* scale predictors if needed */ 01272 q1 = s->current_picture.qscale_table[mb_pos]; 01273 if(c_avail && (n!= 1 && n!=3)) { 01274 q2 = s->current_picture.qscale_table[mb_pos - 1]; 01275 if(q2 && q2 != q1) 01276 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; 01277 } 01278 if(a_avail && (n!= 2 && n!=3)) { 01279 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; 01280 if(q2 && q2 != q1) 01281 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; 01282 } 01283 if(a_avail && c_avail && (n!=3)) { 01284 int off = mb_pos; 01285 if(n != 1) off--; 01286 if(n != 2) off -= s->mb_stride; 01287 q2 = s->current_picture.qscale_table[off]; 01288 if(q2 && q2 != q1) 01289 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; 01290 } 01291 01292 if(a_avail && c_avail) { 01293 if(abs(a - b) <= abs(b - c)) { 01294 pred = c; 01295 *dir_ptr = 1;//left 01296 } else { 01297 pred = a; 01298 *dir_ptr = 0;//top 01299 } 01300 } else if(a_avail) { 01301 pred = a; 01302 *dir_ptr = 0;//top 01303 } else if(c_avail) { 01304 pred = c; 01305 *dir_ptr = 1;//left 01306 } else { 01307 pred = 0; 01308 *dir_ptr = 1;//left 01309 } 01310 01311 /* update predictor */ 01312 *dc_val_ptr = &dc_val[0]; 01313 return pred; 01314 } 01315 // Block group 01317 01324 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr) 01325 { 01326 int xy, wrap, pred, a, b, c; 01327 01328 xy = s->block_index[n]; 01329 wrap = s->b8_stride; 01330 01331 /* B C 01332 * A X 01333 */ 01334 a = s->coded_block[xy - 1 ]; 01335 b = s->coded_block[xy - 1 - wrap]; 01336 c = s->coded_block[xy - wrap]; 01337 01338 if (b == c) { 01339 pred = a; 01340 } else { 01341 pred = c; 01342 } 01343 01344 /* store value */ 01345 *coded_block_ptr = &s->coded_block[xy]; 01346 01347 return pred; 01348 } 01349 01359 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset) 01360 { 01361 GetBitContext *gb = &v->s.gb; 01362 int index, escape, run = 0, level = 0, lst = 0; 01363 01364 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3); 01365 if (index != vc1_ac_sizes[codingset] - 1) { 01366 run = vc1_index_decode_table[codingset][index][0]; 01367 level = vc1_index_decode_table[codingset][index][1]; 01368 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0; 01369 if(get_bits1(gb)) 01370 level = -level; 01371 } else { 01372 escape = decode210(gb); 01373 if (escape != 2) { 01374 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3); 01375 run = vc1_index_decode_table[codingset][index][0]; 01376 level = vc1_index_decode_table[codingset][index][1]; 01377 lst = index >= vc1_last_decode_table[codingset]; 01378 if(escape == 0) { 01379 if(lst) 01380 level += vc1_last_delta_level_table[codingset][run]; 01381 else 01382 level += vc1_delta_level_table[codingset][run]; 01383 } else { 01384 if(lst) 01385 run += vc1_last_delta_run_table[codingset][level] + 1; 01386 else 01387 run += vc1_delta_run_table[codingset][level] + 1; 01388 } 01389 if(get_bits1(gb)) 01390 level = -level; 01391 } else { 01392 int sign; 01393 lst = get_bits1(gb); 01394 if(v->s.esc3_level_length == 0) { 01395 if(v->pq < 8 || v->dquantfrm) { // table 59 01396 v->s.esc3_level_length = get_bits(gb, 3); 01397 if(!v->s.esc3_level_length) 01398 v->s.esc3_level_length = get_bits(gb, 2) + 8; 01399 } else { //table 60 01400 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2; 01401 } 01402 v->s.esc3_run_length = 3 + get_bits(gb, 2); 01403 } 01404 run = get_bits(gb, v->s.esc3_run_length); 01405 sign = get_bits1(gb); 01406 level = get_bits(gb, v->s.esc3_level_length); 01407 if(sign) 01408 level = -level; 01409 } 01410 } 01411 01412 *last = lst; 01413 *skip = run; 01414 *value = level; 01415 } 01416 01424 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset) 01425 { 01426 GetBitContext *gb = &v->s.gb; 01427 MpegEncContext *s = &v->s; 01428 int dc_pred_dir = 0; /* Direction of the DC prediction used */ 01429 int i; 01430 int16_t *dc_val; 01431 int16_t *ac_val, *ac_val2; 01432 int dcdiff; 01433 01434 /* Get DC differential */ 01435 if (n < 4) { 01436 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); 01437 } else { 01438 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); 01439 } 01440 if (dcdiff < 0){ 01441 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n"); 01442 return -1; 01443 } 01444 if (dcdiff) 01445 { 01446 if (dcdiff == 119 /* ESC index value */) 01447 { 01448 /* TODO: Optimize */ 01449 if (v->pq == 1) dcdiff = get_bits(gb, 10); 01450 else if (v->pq == 2) dcdiff = get_bits(gb, 9); 01451 else dcdiff = get_bits(gb, 8); 01452 } 01453 else 01454 { 01455 if (v->pq == 1) 01456 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3; 01457 else if (v->pq == 2) 01458 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1; 01459 } 01460 if (get_bits1(gb)) 01461 dcdiff = -dcdiff; 01462 } 01463 01464 /* Prediction */ 01465 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir); 01466 *dc_val = dcdiff; 01467 01468 /* Store the quantized DC coeff, used for prediction */ 01469 if (n < 4) { 01470 block[0] = dcdiff * s->y_dc_scale; 01471 } else { 01472 block[0] = dcdiff * s->c_dc_scale; 01473 } 01474 /* Skip ? */ 01475 if (!coded) { 01476 goto not_coded; 01477 } 01478 01479 //AC Decoding 01480 i = 1; 01481 01482 { 01483 int last = 0, skip, value; 01484 const int8_t *zz_table; 01485 int scale; 01486 int k; 01487 01488 scale = v->pq * 2 + v->halfpq; 01489 01490 if(v->s.ac_pred) { 01491 if(!dc_pred_dir) 01492 zz_table = wmv1_scantable[2]; 01493 else 01494 zz_table = wmv1_scantable[3]; 01495 } else 01496 zz_table = wmv1_scantable[1]; 01497 01498 ac_val = s->ac_val[0][0] + s->block_index[n] * 16; 01499 ac_val2 = ac_val; 01500 if(dc_pred_dir) //left 01501 ac_val -= 16; 01502 else //top 01503 ac_val -= 16 * s->block_wrap[n]; 01504 01505 while (!last) { 01506 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset); 01507 i += skip; 01508 if(i > 63) 01509 break; 01510 block[zz_table[i++]] = value; 01511 } 01512 01513 /* apply AC prediction if needed */ 01514 if(s->ac_pred) { 01515 if(dc_pred_dir) { //left 01516 for(k = 1; k < 8; k++) 01517 block[k << 3] += ac_val[k]; 01518 } else { //top 01519 for(k = 1; k < 8; k++) 01520 block[k] += ac_val[k + 8]; 01521 } 01522 } 01523 /* save AC coeffs for further prediction */ 01524 for(k = 1; k < 8; k++) { 01525 ac_val2[k] = block[k << 3]; 01526 ac_val2[k + 8] = block[k]; 01527 } 01528 01529 /* scale AC coeffs */ 01530 for(k = 1; k < 64; k++) 01531 if(block[k]) { 01532 block[k] *= scale; 01533 if(!v->pquantizer) 01534 block[k] += (block[k] < 0) ? -v->pq : v->pq; 01535 } 01536 01537 if(s->ac_pred) i = 63; 01538 } 01539 01540 not_coded: 01541 if(!coded) { 01542 int k, scale; 01543 ac_val = s->ac_val[0][0] + s->block_index[n] * 16; 01544 ac_val2 = ac_val; 01545 01546 i = 0; 01547 scale = v->pq * 2 + v->halfpq; 01548 memset(ac_val2, 0, 16 * 2); 01549 if(dc_pred_dir) {//left 01550 ac_val -= 16; 01551 if(s->ac_pred) 01552 memcpy(ac_val2, ac_val, 8 * 2); 01553 } else {//top 01554 ac_val -= 16 * s->block_wrap[n]; 01555 if(s->ac_pred) 01556 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2); 01557 } 01558 01559 /* apply AC prediction if needed */ 01560 if(s->ac_pred) { 01561 if(dc_pred_dir) { //left 01562 for(k = 1; k < 8; k++) { 01563 block[k << 3] = ac_val[k] * scale; 01564 if(!v->pquantizer && block[k << 3]) 01565 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq; 01566 } 01567 } else { //top 01568 for(k = 1; k < 8; k++) { 01569 block[k] = ac_val[k + 8] * scale; 01570 if(!v->pquantizer && block[k]) 01571 block[k] += (block[k] < 0) ? -v->pq : v->pq; 01572 } 01573 } 01574 i = 63; 01575 } 01576 } 01577 s->block_last_index[n] = i; 01578 01579 return 0; 01580 } 01581 01590 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant) 01591 { 01592 GetBitContext *gb = &v->s.gb; 01593 MpegEncContext *s = &v->s; 01594 int dc_pred_dir = 0; /* Direction of the DC prediction used */ 01595 int i; 01596 int16_t *dc_val; 01597 int16_t *ac_val, *ac_val2; 01598 int dcdiff; 01599 int a_avail = v->a_avail, c_avail = v->c_avail; 01600 int use_pred = s->ac_pred; 01601 int scale; 01602 int q1, q2 = 0; 01603 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; 01604 01605 /* Get DC differential */ 01606 if (n < 4) { 01607 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); 01608 } else { 01609 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); 01610 } 01611 if (dcdiff < 0){ 01612 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n"); 01613 return -1; 01614 } 01615 if (dcdiff) 01616 { 01617 if (dcdiff == 119 /* ESC index value */) 01618 { 01619 /* TODO: Optimize */ 01620 if (mquant == 1) dcdiff = get_bits(gb, 10); 01621 else if (mquant == 2) dcdiff = get_bits(gb, 9); 01622 else dcdiff = get_bits(gb, 8); 01623 } 01624 else 01625 { 01626 if (mquant == 1) 01627 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3; 01628 else if (mquant == 2) 01629 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1; 01630 } 01631 if (get_bits1(gb)) 01632 dcdiff = -dcdiff; 01633 } 01634 01635 /* Prediction */ 01636 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir); 01637 *dc_val = dcdiff; 01638 01639 /* Store the quantized DC coeff, used for prediction */ 01640 if (n < 4) { 01641 block[0] = dcdiff * s->y_dc_scale; 01642 } else { 01643 block[0] = dcdiff * s->c_dc_scale; 01644 } 01645 01646 //AC Decoding 01647 i = 1; 01648 01649 /* check if AC is needed at all */ 01650 if(!a_avail && !c_avail) use_pred = 0; 01651 ac_val = s->ac_val[0][0] + s->block_index[n] * 16; 01652 ac_val2 = ac_val; 01653 01654 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0); 01655 01656 if(dc_pred_dir) //left 01657 ac_val -= 16; 01658 else //top 01659 ac_val -= 16 * s->block_wrap[n]; 01660 01661 q1 = s->current_picture.qscale_table[mb_pos]; 01662 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1]; 01663 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; 01664 if(dc_pred_dir && n==1) q2 = q1; 01665 if(!dc_pred_dir && n==2) q2 = q1; 01666 if(n==3) q2 = q1; 01667 01668 if(coded) { 01669 int last = 0, skip, value; 01670 const int8_t *zz_table; 01671 int k; 01672 01673 if(v->s.ac_pred) { 01674 if(!dc_pred_dir) 01675 zz_table = wmv1_scantable[2]; 01676 else 01677 zz_table = wmv1_scantable[3]; 01678 } else 01679 zz_table = wmv1_scantable[1]; 01680 01681 while (!last) { 01682 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset); 01683 i += skip; 01684 if(i > 63) 01685 break; 01686 block[zz_table[i++]] = value; 01687 } 01688 01689 /* apply AC prediction if needed */ 01690 if(use_pred) { 01691 /* scale predictors if needed*/ 01692 if(q2 && q1!=q2) { 01693 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; 01694 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; 01695 01696 if(dc_pred_dir) { //left 01697 for(k = 1; k < 8; k++) 01698 block[k << 3] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; 01699 } else { //top 01700 for(k = 1; k < 8; k++) 01701 block[k] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; 01702 } 01703 } else { 01704 if(dc_pred_dir) { //left 01705 for(k = 1; k < 8; k++) 01706 block[k << 3] += ac_val[k]; 01707 } else { //top 01708 for(k = 1; k < 8; k++) 01709 block[k] += ac_val[k + 8]; 01710 } 01711 } 01712 } 01713 /* save AC coeffs for further prediction */ 01714 for(k = 1; k < 8; k++) { 01715 ac_val2[k] = block[k << 3]; 01716 ac_val2[k + 8] = block[k]; 01717 } 01718 01719 /* scale AC coeffs */ 01720 for(k = 1; k < 64; k++) 01721 if(block[k]) { 01722 block[k] *= scale; 01723 if(!v->pquantizer) 01724 block[k] += (block[k] < 0) ? -mquant : mquant; 01725 } 01726 01727 if(use_pred) i = 63; 01728 } else { // no AC coeffs 01729 int k; 01730 01731 memset(ac_val2, 0, 16 * 2); 01732 if(dc_pred_dir) {//left 01733 if(use_pred) { 01734 memcpy(ac_val2, ac_val, 8 * 2); 01735 if(q2 && q1!=q2) { 01736 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; 01737 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; 01738 for(k = 1; k < 8; k++) 01739 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; 01740 } 01741 } 01742 } else {//top 01743 if(use_pred) { 01744 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2); 01745 if(q2 && q1!=q2) { 01746 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; 01747 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; 01748 for(k = 1; k < 8; k++) 01749 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; 01750 } 01751 } 01752 } 01753 01754 /* apply AC prediction if needed */ 01755 if(use_pred) { 01756 if(dc_pred_dir) { //left 01757 for(k = 1; k < 8; k++) { 01758 block[k << 3] = ac_val2[k] * scale; 01759 if(!v->pquantizer && block[k << 3]) 01760 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant; 01761 } 01762 } else { //top 01763 for(k = 1; k < 8; k++) { 01764 block[k] = ac_val2[k + 8] * scale; 01765 if(!v->pquantizer && block[k]) 01766 block[k] += (block[k] < 0) ? -mquant : mquant; 01767 } 01768 } 01769 i = 63; 01770 } 01771 } 01772 s->block_last_index[n] = i; 01773 01774 return 0; 01775 } 01776 01785 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset) 01786 { 01787 GetBitContext *gb = &v->s.gb; 01788 MpegEncContext *s = &v->s; 01789 int dc_pred_dir = 0; /* Direction of the DC prediction used */ 01790 int i; 01791 int16_t *dc_val; 01792 int16_t *ac_val, *ac_val2; 01793 int dcdiff; 01794 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; 01795 int a_avail = v->a_avail, c_avail = v->c_avail; 01796 int use_pred = s->ac_pred; 01797 int scale; 01798 int q1, q2 = 0; 01799 01800 s->dsp.clear_block(block); 01801 01802 /* XXX: Guard against dumb values of mquant */ 01803 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant ); 01804 01805 /* Set DC scale - y and c use the same */ 01806 s->y_dc_scale = s->y_dc_scale_table[mquant]; 01807 s->c_dc_scale = s->c_dc_scale_table[mquant]; 01808 01809 /* Get DC differential */ 01810 if (n < 4) { 01811 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); 01812 } else { 01813 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); 01814 } 01815 if (dcdiff < 0){ 01816 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n"); 01817 return -1; 01818 } 01819 if (dcdiff) 01820 { 01821 if (dcdiff == 119 /* ESC index value */) 01822 { 01823 /* TODO: Optimize */ 01824 if (mquant == 1) dcdiff = get_bits(gb, 10); 01825 else if (mquant == 2) dcdiff = get_bits(gb, 9); 01826 else dcdiff = get_bits(gb, 8); 01827 } 01828 else 01829 { 01830 if (mquant == 1) 01831 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3; 01832 else if (mquant == 2) 01833 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1; 01834 } 01835 if (get_bits1(gb)) 01836 dcdiff = -dcdiff; 01837 } 01838 01839 /* Prediction */ 01840 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir); 01841 *dc_val = dcdiff; 01842 01843 /* Store the quantized DC coeff, used for prediction */ 01844 01845 if (n < 4) { 01846 block[0] = dcdiff * s->y_dc_scale; 01847 } else { 01848 block[0] = dcdiff * s->c_dc_scale; 01849 } 01850 01851 //AC Decoding 01852 i = 1; 01853 01854 /* check if AC is needed at all and adjust direction if needed */ 01855 if(!a_avail) dc_pred_dir = 1; 01856 if(!c_avail) dc_pred_dir = 0; 01857 if(!a_avail && !c_avail) use_pred = 0; 01858 ac_val = s->ac_val[0][0] + s->block_index[n] * 16; 01859 ac_val2 = ac_val; 01860 01861 scale = mquant * 2 + v->halfpq; 01862 01863 if(dc_pred_dir) //left 01864 ac_val -= 16; 01865 else //top 01866 ac_val -= 16 * s->block_wrap[n]; 01867 01868 q1 = s->current_picture.qscale_table[mb_pos]; 01869 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1]; 01870 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; 01871 if(dc_pred_dir && n==1) q2 = q1; 01872 if(!dc_pred_dir && n==2) q2 = q1; 01873 if(n==3) q2 = q1; 01874 01875 if(coded) { 01876 int last = 0, skip, value; 01877 const int8_t *zz_table; 01878 int k; 01879 01880 zz_table = wmv1_scantable[0]; 01881 01882 while (!last) { 01883 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset); 01884 i += skip; 01885 if(i > 63) 01886 break; 01887 block[zz_table[i++]] = value; 01888 } 01889 01890 /* apply AC prediction if needed */ 01891 if(use_pred) { 01892 /* scale predictors if needed*/ 01893 if(q2 && q1!=q2) { 01894 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; 01895 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; 01896 01897 if(dc_pred_dir) { //left 01898 for(k = 1; k < 8; k++) 01899 block[k << 3] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; 01900 } else { //top 01901 for(k = 1; k < 8; k++) 01902 block[k] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; 01903 } 01904 } else { 01905 if(dc_pred_dir) { //left 01906 for(k = 1; k < 8; k++) 01907 block[k << 3] += ac_val[k]; 01908 } else { //top 01909 for(k = 1; k < 8; k++) 01910 block[k] += ac_val[k + 8]; 01911 } 01912 } 01913 } 01914 /* save AC coeffs for further prediction */ 01915 for(k = 1; k < 8; k++) { 01916 ac_val2[k] = block[k << 3]; 01917 ac_val2[k + 8] = block[k]; 01918 } 01919 01920 /* scale AC coeffs */ 01921 for(k = 1; k < 64; k++) 01922 if(block[k]) { 01923 block[k] *= scale; 01924 if(!v->pquantizer) 01925 block[k] += (block[k] < 0) ? -mquant : mquant; 01926 } 01927 01928 if(use_pred) i = 63; 01929 } else { // no AC coeffs 01930 int k; 01931 01932 memset(ac_val2, 0, 16 * 2); 01933 if(dc_pred_dir) {//left 01934 if(use_pred) { 01935 memcpy(ac_val2, ac_val, 8 * 2); 01936 if(q2 && q1!=q2) { 01937 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; 01938 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; 01939 for(k = 1; k < 8; k++) 01940 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; 01941 } 01942 } 01943 } else {//top 01944 if(use_pred) { 01945 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2); 01946 if(q2 && q1!=q2) { 01947 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; 01948 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; 01949 for(k = 1; k < 8; k++) 01950 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; 01951 } 01952 } 01953 } 01954 01955 /* apply AC prediction if needed */ 01956 if(use_pred) { 01957 if(dc_pred_dir) { //left 01958 for(k = 1; k < 8; k++) { 01959 block[k << 3] = ac_val2[k] * scale; 01960 if(!v->pquantizer && block[k << 3]) 01961 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant; 01962 } 01963 } else { //top 01964 for(k = 1; k < 8; k++) { 01965 block[k] = ac_val2[k + 8] * scale; 01966 if(!v->pquantizer && block[k]) 01967 block[k] += (block[k] < 0) ? -mquant : mquant; 01968 } 01969 } 01970 i = 63; 01971 } 01972 } 01973 s->block_last_index[n] = i; 01974 01975 return 0; 01976 } 01977 01980 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block, 01981 uint8_t *dst, int linesize, int skip_block, int apply_filter, int cbp_top, int cbp_left) 01982 { 01983 MpegEncContext *s = &v->s; 01984 GetBitContext *gb = &s->gb; 01985 int i, j; 01986 int subblkpat = 0; 01987 int scale, off, idx, last, skip, value; 01988 int ttblk = ttmb & 7; 01989 int pat = 0; 01990 01991 s->dsp.clear_block(block); 01992 01993 if(ttmb == -1) { 01994 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)]; 01995 } 01996 if(ttblk == TT_4X4) { 01997 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1); 01998 } 01999 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) { 02000 subblkpat = decode012(gb); 02001 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits 02002 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4; 02003 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8; 02004 } 02005 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0); 02006 02007 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT 02008 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) { 02009 subblkpat = 2 - (ttblk == TT_8X4_TOP); 02010 ttblk = TT_8X4; 02011 } 02012 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) { 02013 subblkpat = 2 - (ttblk == TT_4X8_LEFT); 02014 ttblk = TT_4X8; 02015 } 02016 switch(ttblk) { 02017 case TT_8X8: 02018 pat = 0xF; 02019 i = 0; 02020 last = 0; 02021 while (!last) { 02022 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); 02023 i += skip; 02024 if(i > 63) 02025 break; 02026 idx = wmv1_scantable[0][i++]; 02027 block[idx] = value * scale; 02028 if(!v->pquantizer) 02029 block[idx] += (block[idx] < 0) ? -mquant : mquant; 02030 } 02031 if(!skip_block){ 02032 if(i==1) 02033 s->dsp.vc1_inv_trans_8x8_dc(dst, linesize, block); 02034 else{ 02035 s->dsp.vc1_inv_trans_8x8(block); 02036 s->dsp.add_pixels_clamped(block, dst, linesize); 02037 } 02038 if(apply_filter && cbp_top & 0xC) 02039 s->dsp.vc1_v_loop_filter8(dst, linesize, v->pq); 02040 if(apply_filter && cbp_left & 0xA) 02041 s->dsp.vc1_h_loop_filter8(dst, linesize, v->pq); 02042 } 02043 break; 02044 case TT_4X4: 02045 pat = ~subblkpat & 0xF; 02046 for(j = 0; j < 4; j++) { 02047 last = subblkpat & (1 << (3 - j)); 02048 i = 0; 02049 off = (j & 1) * 4 + (j & 2) * 16; 02050 while (!last) { 02051 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); 02052 i += skip; 02053 if(i > 15) 02054 break; 02055 idx = ff_vc1_simple_progressive_4x4_zz[i++]; 02056 block[idx + off] = value * scale; 02057 if(!v->pquantizer) 02058 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant; 02059 } 02060 if(!(subblkpat & (1 << (3 - j))) && !skip_block){ 02061 if(i==1) 02062 s->dsp.vc1_inv_trans_4x4_dc(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off); 02063 else 02064 s->dsp.vc1_inv_trans_4x4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off); 02065 if(apply_filter && (j&2 ? pat & (1<<(j-2)) : (cbp_top & (1 << (j + 2))))) 02066 s->dsp.vc1_v_loop_filter4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, v->pq); 02067 if(apply_filter && (j&1 ? pat & (1<<(j-1)) : (cbp_left & (1 << (j + 1))))) 02068 s->dsp.vc1_h_loop_filter4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, v->pq); 02069 } 02070 } 02071 break; 02072 case TT_8X4: 02073 pat = ~((subblkpat & 2)*6 + (subblkpat & 1)*3) & 0xF; 02074 for(j = 0; j < 2; j++) { 02075 last = subblkpat & (1 << (1 - j)); 02076 i = 0; 02077 off = j * 32; 02078 while (!last) { 02079 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); 02080 i += skip; 02081 if(i > 31) 02082 break; 02083 idx = v->zz_8x4[i++]+off; 02084 block[idx] = value * scale; 02085 if(!v->pquantizer) 02086 block[idx] += (block[idx] < 0) ? -mquant : mquant; 02087 } 02088 if(!(subblkpat & (1 << (1 - j))) && !skip_block){ 02089 if(i==1) 02090 s->dsp.vc1_inv_trans_8x4_dc(dst + j*4*linesize, linesize, block + off); 02091 else 02092 s->dsp.vc1_inv_trans_8x4(dst + j*4*linesize, linesize, block + off); 02093 if(apply_filter && j ? pat & 0x3 : (cbp_top & 0xC)) 02094 s->dsp.vc1_v_loop_filter8(dst + j*4*linesize, linesize, v->pq); 02095 if(apply_filter && cbp_left & (2 << j)) 02096 s->dsp.vc1_h_loop_filter4(dst + j*4*linesize, linesize, v->pq); 02097 } 02098 } 02099 break; 02100 case TT_4X8: 02101 pat = ~(subblkpat*5) & 0xF; 02102 for(j = 0; j < 2; j++) { 02103 last = subblkpat & (1 << (1 - j)); 02104 i = 0; 02105 off = j * 4; 02106 while (!last) { 02107 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); 02108 i += skip; 02109 if(i > 31) 02110 break; 02111 idx = v->zz_4x8[i++]+off; 02112 block[idx] = value * scale; 02113 if(!v->pquantizer) 02114 block[idx] += (block[idx] < 0) ? -mquant : mquant; 02115 } 02116 if(!(subblkpat & (1 << (1 - j))) && !skip_block){ 02117 if(i==1) 02118 s->dsp.vc1_inv_trans_4x8_dc(dst + j*4, linesize, block + off); 02119 else 02120 s->dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off); 02121 if(apply_filter && cbp_top & (2 << j)) 02122 s->dsp.vc1_v_loop_filter4(dst + j*4, linesize, v->pq); 02123 if(apply_filter && j ? pat & 0x5 : (cbp_left & 0xA)) 02124 s->dsp.vc1_h_loop_filter8(dst + j*4, linesize, v->pq); 02125 } 02126 } 02127 break; 02128 } 02129 return pat; 02130 } 02131 // Macroblock group 02133 02134 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 }; 02135 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 }; 02136 02139 static int vc1_decode_p_mb(VC1Context *v) 02140 { 02141 MpegEncContext *s = &v->s; 02142 GetBitContext *gb = &s->gb; 02143 int i, j; 02144 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; 02145 int cbp; /* cbp decoding stuff */ 02146 int mqdiff, mquant; /* MB quantization */ 02147 int ttmb = v->ttfrm; /* MB Transform type */ 02148 02149 int mb_has_coeffs = 1; /* last_flag */ 02150 int dmv_x, dmv_y; /* Differential MV components */ 02151 int index, index1; /* LUT indexes */ 02152 int val, sign; /* temp values */ 02153 int first_block = 1; 02154 int dst_idx, off; 02155 int skipped, fourmv; 02156 int block_cbp = 0, pat; 02157 int apply_loop_filter; 02158 02159 mquant = v->pq; /* Loosy initialization */ 02160 02161 if (v->mv_type_is_raw) 02162 fourmv = get_bits1(gb); 02163 else 02164 fourmv = v->mv_type_mb_plane[mb_pos]; 02165 if (v->skip_is_raw) 02166 skipped = get_bits1(gb); 02167 else 02168 skipped = v->s.mbskip_table[mb_pos]; 02169 02170 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY); 02171 if (!fourmv) /* 1MV mode */ 02172 { 02173 if (!skipped) 02174 { 02175 GET_MVDATA(dmv_x, dmv_y); 02176 02177 if (s->mb_intra) { 02178 s->current_picture.motion_val[1][s->block_index[0]][0] = 0; 02179 s->current_picture.motion_val[1][s->block_index[0]][1] = 0; 02180 } 02181 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16; 02182 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]); 02183 02184 /* FIXME Set DC val for inter block ? */ 02185 if (s->mb_intra && !mb_has_coeffs) 02186 { 02187 GET_MQUANT(); 02188 s->ac_pred = get_bits1(gb); 02189 cbp = 0; 02190 } 02191 else if (mb_has_coeffs) 02192 { 02193 if (s->mb_intra) s->ac_pred = get_bits1(gb); 02194 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); 02195 GET_MQUANT(); 02196 } 02197 else 02198 { 02199 mquant = v->pq; 02200 cbp = 0; 02201 } 02202 s->current_picture.qscale_table[mb_pos] = mquant; 02203 02204 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs) 02205 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, 02206 VC1_TTMB_VLC_BITS, 2); 02207 if(!s->mb_intra) vc1_mc_1mv(v, 0); 02208 dst_idx = 0; 02209 for (i=0; i<6; i++) 02210 { 02211 s->dc_val[0][s->block_index[i]] = 0; 02212 dst_idx += i >> 2; 02213 val = ((cbp >> (5 - i)) & 1); 02214 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); 02215 v->mb_type[0][s->block_index[i]] = s->mb_intra; 02216 if(s->mb_intra) { 02217 /* check if prediction blocks A and C are available */ 02218 v->a_avail = v->c_avail = 0; 02219 if(i == 2 || i == 3 || !s->first_slice_line) 02220 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]]; 02221 if(i == 1 || i == 3 || s->mb_x) 02222 v->c_avail = v->mb_type[0][s->block_index[i] - 1]; 02223 02224 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset); 02225 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue; 02226 s->dsp.vc1_inv_trans_8x8(s->block[i]); 02227 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1; 02228 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); 02229 if(v->pq >= 9 && v->overlap) { 02230 if(v->c_avail) 02231 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); 02232 if(v->a_avail) 02233 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); 02234 } 02235 if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){ 02236 int left_cbp, top_cbp; 02237 if(i & 4){ 02238 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4); 02239 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4); 02240 }else{ 02241 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4)); 02242 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4)); 02243 } 02244 if(left_cbp & 0xC) 02245 s->dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq); 02246 if(top_cbp & 0xA) 02247 s->dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq); 02248 } 02249 block_cbp |= 0xF << (i << 2); 02250 } else if(val) { 02251 int left_cbp = 0, top_cbp = 0, filter = 0; 02252 if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){ 02253 filter = 1; 02254 if(i & 4){ 02255 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4); 02256 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4); 02257 }else{ 02258 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4)); 02259 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4)); 02260 } 02261 if(left_cbp & 0xC) 02262 s->dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq); 02263 if(top_cbp & 0xA) 02264 s->dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq); 02265 } 02266 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp); 02267 block_cbp |= pat << (i << 2); 02268 if(!v->ttmbf && ttmb < 8) ttmb = -1; 02269 first_block = 0; 02270 } 02271 } 02272 } 02273 else //Skipped 02274 { 02275 s->mb_intra = 0; 02276 for(i = 0; i < 6; i++) { 02277 v->mb_type[0][s->block_index[i]] = 0; 02278 s->dc_val[0][s->block_index[i]] = 0; 02279 } 02280 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP; 02281 s->current_picture.qscale_table[mb_pos] = 0; 02282 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]); 02283 vc1_mc_1mv(v, 0); 02284 return 0; 02285 } 02286 } //1MV mode 02287 else //4MV mode 02288 { 02289 if (!skipped /* unskipped MB */) 02290 { 02291 int intra_count = 0, coded_inter = 0; 02292 int is_intra[6], is_coded[6]; 02293 /* Get CBPCY */ 02294 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); 02295 for (i=0; i<6; i++) 02296 { 02297 val = ((cbp >> (5 - i)) & 1); 02298 s->dc_val[0][s->block_index[i]] = 0; 02299 s->mb_intra = 0; 02300 if(i < 4) { 02301 dmv_x = dmv_y = 0; 02302 s->mb_intra = 0; 02303 mb_has_coeffs = 0; 02304 if(val) { 02305 GET_MVDATA(dmv_x, dmv_y); 02306 } 02307 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]); 02308 if(!s->mb_intra) vc1_mc_4mv_luma(v, i); 02309 intra_count += s->mb_intra; 02310 is_intra[i] = s->mb_intra; 02311 is_coded[i] = mb_has_coeffs; 02312 } 02313 if(i&4){ 02314 is_intra[i] = (intra_count >= 3); 02315 is_coded[i] = val; 02316 } 02317 if(i == 4) vc1_mc_4mv_chroma(v); 02318 v->mb_type[0][s->block_index[i]] = is_intra[i]; 02319 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i]; 02320 } 02321 // if there are no coded blocks then don't do anything more 02322 if(!intra_count && !coded_inter) return 0; 02323 dst_idx = 0; 02324 GET_MQUANT(); 02325 s->current_picture.qscale_table[mb_pos] = mquant; 02326 /* test if block is intra and has pred */ 02327 { 02328 int intrapred = 0; 02329 for(i=0; i<6; i++) 02330 if(is_intra[i]) { 02331 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]]) 02332 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) { 02333 intrapred = 1; 02334 break; 02335 } 02336 } 02337 if(intrapred)s->ac_pred = get_bits1(gb); 02338 else s->ac_pred = 0; 02339 } 02340 if (!v->ttmbf && coded_inter) 02341 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2); 02342 for (i=0; i<6; i++) 02343 { 02344 dst_idx += i >> 2; 02345 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); 02346 s->mb_intra = is_intra[i]; 02347 if (is_intra[i]) { 02348 /* check if prediction blocks A and C are available */ 02349 v->a_avail = v->c_avail = 0; 02350 if(i == 2 || i == 3 || !s->first_slice_line) 02351 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]]; 02352 if(i == 1 || i == 3 || s->mb_x) 02353 v->c_avail = v->mb_type[0][s->block_index[i] - 1]; 02354 02355 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset); 02356 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue; 02357 s->dsp.vc1_inv_trans_8x8(s->block[i]); 02358 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1; 02359 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize); 02360 if(v->pq >= 9 && v->overlap) { 02361 if(v->c_avail) 02362 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); 02363 if(v->a_avail) 02364 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); 02365 } 02366 if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){ 02367 int left_cbp, top_cbp; 02368 if(i & 4){ 02369 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4); 02370 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4); 02371 }else{ 02372 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4)); 02373 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4)); 02374 } 02375 if(left_cbp & 0xC) 02376 s->dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq); 02377 if(top_cbp & 0xA) 02378 s->dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq); 02379 } 02380 block_cbp |= 0xF << (i << 2); 02381 } else if(is_coded[i]) { 02382 int left_cbp = 0, top_cbp = 0, filter = 0; 02383 if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){ 02384 filter = 1; 02385 if(i & 4){ 02386 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4); 02387 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4); 02388 }else{ 02389 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4)); 02390 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4)); 02391 } 02392 if(left_cbp & 0xC) 02393 s->dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq); 02394 if(top_cbp & 0xA) 02395 s->dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq); 02396 } 02397 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp); 02398 block_cbp |= pat << (i << 2); 02399 if(!v->ttmbf && ttmb < 8) ttmb = -1; 02400 first_block = 0; 02401 } 02402 } 02403 return 0; 02404 } 02405 else //Skipped MB 02406 { 02407 s->mb_intra = 0; 02408 s->current_picture.qscale_table[mb_pos] = 0; 02409 for (i=0; i<6; i++) { 02410 v->mb_type[0][s->block_index[i]] = 0; 02411 s->dc_val[0][s->block_index[i]] = 0; 02412 } 02413 for (i=0; i<4; i++) 02414 { 02415 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]); 02416 vc1_mc_4mv_luma(v, i); 02417 } 02418 vc1_mc_4mv_chroma(v); 02419 s->current_picture.qscale_table[mb_pos] = 0; 02420 return 0; 02421 } 02422 } 02423 v->cbp[s->mb_x] = block_cbp; 02424 02425 /* Should never happen */ 02426 return -1; 02427 } 02428 02431 static void vc1_decode_b_mb(VC1Context *v) 02432 { 02433 MpegEncContext *s = &v->s; 02434 GetBitContext *gb = &s->gb; 02435 int i, j; 02436 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; 02437 int cbp = 0; /* cbp decoding stuff */ 02438 int mqdiff, mquant; /* MB quantization */ 02439 int ttmb = v->ttfrm; /* MB Transform type */ 02440 int mb_has_coeffs = 0; /* last_flag */ 02441 int index, index1; /* LUT indexes */ 02442 int val, sign; /* temp values */ 02443 int first_block = 1; 02444 int dst_idx, off; 02445 int skipped, direct; 02446 int dmv_x[2], dmv_y[2]; 02447 int bmvtype = BMV_TYPE_BACKWARD; 02448 02449 mquant = v->pq; /* Loosy initialization */ 02450 s->mb_intra = 0; 02451 02452 if (v->dmb_is_raw) 02453 direct = get_bits1(gb); 02454 else 02455 direct = v->direct_mb_plane[mb_pos]; 02456 if (v->skip_is_raw) 02457 skipped = get_bits1(gb); 02458 else 02459 skipped = v->s.mbskip_table[mb_pos]; 02460 02461 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0; 02462 for(i = 0; i < 6; i++) { 02463 v->mb_type[0][s->block_index[i]] = 0; 02464 s->dc_val[0][s->block_index[i]] = 0; 02465 } 02466 s->current_picture.qscale_table[mb_pos] = 0; 02467 02468 if (!direct) { 02469 if (!skipped) { 02470 GET_MVDATA(dmv_x[0], dmv_y[0]); 02471 dmv_x[1] = dmv_x[0]; 02472 dmv_y[1] = dmv_y[0]; 02473 } 02474 if(skipped || !s->mb_intra) { 02475 bmvtype = decode012(gb); 02476 switch(bmvtype) { 02477 case 0: 02478 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD; 02479 break; 02480 case 1: 02481 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD; 02482 break; 02483 case 2: 02484 bmvtype = BMV_TYPE_INTERPOLATED; 02485 dmv_x[0] = dmv_y[0] = 0; 02486 } 02487 } 02488 } 02489 for(i = 0; i < 6; i++) 02490 v->mb_type[0][s->block_index[i]] = s->mb_intra; 02491 02492 if (skipped) { 02493 if(direct) bmvtype = BMV_TYPE_INTERPOLATED; 02494 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); 02495 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 02496 return; 02497 } 02498 if (direct) { 02499 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); 02500 GET_MQUANT(); 02501 s->mb_intra = 0; 02502 s->current_picture.qscale_table[mb_pos] = mquant; 02503 if(!v->ttmbf) 02504 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2); 02505 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0; 02506 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); 02507 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 02508 } else { 02509 if(!mb_has_coeffs && !s->mb_intra) { 02510 /* no coded blocks - effectively skipped */ 02511 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); 02512 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 02513 return; 02514 } 02515 if(s->mb_intra && !mb_has_coeffs) { 02516 GET_MQUANT(); 02517 s->current_picture.qscale_table[mb_pos] = mquant; 02518 s->ac_pred = get_bits1(gb); 02519 cbp = 0; 02520 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); 02521 } else { 02522 if(bmvtype == BMV_TYPE_INTERPOLATED) { 02523 GET_MVDATA(dmv_x[0], dmv_y[0]); 02524 if(!mb_has_coeffs) { 02525 /* interpolated skipped block */ 02526 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); 02527 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 02528 return; 02529 } 02530 } 02531 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); 02532 if(!s->mb_intra) { 02533 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 02534 } 02535 if(s->mb_intra) 02536 s->ac_pred = get_bits1(gb); 02537 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); 02538 GET_MQUANT(); 02539 s->current_picture.qscale_table[mb_pos] = mquant; 02540 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs) 02541 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2); 02542 } 02543 } 02544 dst_idx = 0; 02545 for (i=0; i<6; i++) 02546 { 02547 s->dc_val[0][s->block_index[i]] = 0; 02548 dst_idx += i >> 2; 02549 val = ((cbp >> (5 - i)) & 1); 02550 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); 02551 v->mb_type[0][s->block_index[i]] = s->mb_intra; 02552 if(s->mb_intra) { 02553 /* check if prediction blocks A and C are available */ 02554 v->a_avail = v->c_avail = 0; 02555 if(i == 2 || i == 3 || !s->first_slice_line) 02556 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]]; 02557 if(i == 1 || i == 3 || s->mb_x) 02558 v->c_avail = v->mb_type[0][s->block_index[i] - 1]; 02559 02560 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset); 02561 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue; 02562 s->dsp.vc1_inv_trans_8x8(s->block[i]); 02563 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1; 02564 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); 02565 } else if(val) { 02566 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), 0, 0, 0); 02567 if(!v->ttmbf && ttmb < 8) ttmb = -1; 02568 first_block = 0; 02569 } 02570 } 02571 } 02572 02575 static void vc1_decode_i_blocks(VC1Context *v) 02576 { 02577 int k, j; 02578 MpegEncContext *s = &v->s; 02579 int cbp, val; 02580 uint8_t *coded_val; 02581 int mb_pos; 02582 02583 /* select codingmode used for VLC tables selection */ 02584 switch(v->y_ac_table_index){ 02585 case 0: 02586 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA; 02587 break; 02588 case 1: 02589 v->codingset = CS_HIGH_MOT_INTRA; 02590 break; 02591 case 2: 02592 v->codingset = CS_MID_RATE_INTRA; 02593 break; 02594 } 02595 02596 switch(v->c_ac_table_index){ 02597 case 0: 02598 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER; 02599 break; 02600 case 1: 02601 v->codingset2 = CS_HIGH_MOT_INTER; 02602 break; 02603 case 2: 02604 v->codingset2 = CS_MID_RATE_INTER; 02605 break; 02606 } 02607 02608 /* Set DC scale - y and c use the same */ 02609 s->y_dc_scale = s->y_dc_scale_table[v->pq]; 02610 s->c_dc_scale = s->c_dc_scale_table[v->pq]; 02611 02612 //do frame decode 02613 s->mb_x = s->mb_y = 0; 02614 s->mb_intra = 1; 02615 s->first_slice_line = 1; 02616 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { 02617 s->mb_x = 0; 02618 ff_init_block_index(s); 02619 for(; s->mb_x < s->mb_width; s->mb_x++) { 02620 ff_update_block_index(s); 02621 s->dsp.clear_blocks(s->block[0]); 02622 mb_pos = s->mb_x + s->mb_y * s->mb_width; 02623 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA; 02624 s->current_picture.qscale_table[mb_pos] = v->pq; 02625 s->current_picture.motion_val[1][s->block_index[0]][0] = 0; 02626 s->current_picture.motion_val[1][s->block_index[0]][1] = 0; 02627 02628 // do actual MB decoding and displaying 02629 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2); 02630 v->s.ac_pred = get_bits1(&v->s.gb); 02631 02632 for(k = 0; k < 6; k++) { 02633 val = ((cbp >> (5 - k)) & 1); 02634 02635 if (k < 4) { 02636 int pred = vc1_coded_block_pred(&v->s, k, &coded_val); 02637 val = val ^ pred; 02638 *coded_val = val; 02639 } 02640 cbp |= val << (5 - k); 02641 02642 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2); 02643 02644 s->dsp.vc1_inv_trans_8x8(s->block[k]); 02645 if(v->pq >= 9 && v->overlap) { 02646 for(j = 0; j < 64; j++) s->block[k][j] += 128; 02647 } 02648 } 02649 02650 vc1_put_block(v, s->block); 02651 if(v->pq >= 9 && v->overlap) { 02652 if(s->mb_x) { 02653 s->dsp.vc1_h_overlap(s->dest[0], s->linesize); 02654 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize); 02655 if(!(s->flags & CODEC_FLAG_GRAY)) { 02656 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize); 02657 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize); 02658 } 02659 } 02660 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize); 02661 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); 02662 if(!s->first_slice_line) { 02663 s->dsp.vc1_v_overlap(s->dest[0], s->linesize); 02664 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize); 02665 if(!(s->flags & CODEC_FLAG_GRAY)) { 02666 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize); 02667 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize); 02668 } 02669 } 02670 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize); 02671 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); 02672 } 02673 if(v->s.loop_filter) vc1_loop_filter_iblk(s, v->pq); 02674 02675 if(get_bits_count(&s->gb) > v->bits) { 02676 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)); 02677 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits); 02678 return; 02679 } 02680 } 02681 ff_draw_horiz_band(s, s->mb_y * 16, 16); 02682 s->first_slice_line = 0; 02683 } 02684 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); 02685 } 02686 02689 static void vc1_decode_i_blocks_adv(VC1Context *v) 02690 { 02691 int k, j; 02692 MpegEncContext *s = &v->s; 02693 int cbp, val; 02694 uint8_t *coded_val; 02695 int mb_pos; 02696 int mquant = v->pq; 02697 int mqdiff; 02698 int overlap; 02699 GetBitContext *gb = &s->gb; 02700 02701 /* select codingmode used for VLC tables selection */ 02702 switch(v->y_ac_table_index){ 02703 case 0: 02704 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA; 02705 break; 02706 case 1: 02707 v->codingset = CS_HIGH_MOT_INTRA; 02708 break; 02709 case 2: 02710 v->codingset = CS_MID_RATE_INTRA; 02711 break; 02712 } 02713 02714 switch(v->c_ac_table_index){ 02715 case 0: 02716 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER; 02717 break; 02718 case 1: 02719 v->codingset2 = CS_HIGH_MOT_INTER; 02720 break; 02721 case 2: 02722 v->codingset2 = CS_MID_RATE_INTER; 02723 break; 02724 } 02725 02726 //do frame decode 02727 s->mb_x = s->mb_y = 0; 02728 s->mb_intra = 1; 02729 s->first_slice_line = 1; 02730 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { 02731 s->mb_x = 0; 02732 ff_init_block_index(s); 02733 for(;s->mb_x < s->mb_width; s->mb_x++) { 02734 ff_update_block_index(s); 02735 s->dsp.clear_blocks(s->block[0]); 02736 mb_pos = s->mb_x + s->mb_y * s->mb_stride; 02737 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA; 02738 s->current_picture.motion_val[1][s->block_index[0]][0] = 0; 02739 s->current_picture.motion_val[1][s->block_index[0]][1] = 0; 02740 02741 // do actual MB decoding and displaying 02742 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2); 02743 if(v->acpred_is_raw) 02744 v->s.ac_pred = get_bits1(&v->s.gb); 02745 else 02746 v->s.ac_pred = v->acpred_plane[mb_pos]; 02747 02748 if(v->condover == CONDOVER_SELECT) { 02749 if(v->overflg_is_raw) 02750 overlap = get_bits1(&v->s.gb); 02751 else 02752 overlap = v->over_flags_plane[mb_pos]; 02753 } else 02754 overlap = (v->condover == CONDOVER_ALL); 02755 02756 GET_MQUANT(); 02757 02758 s->current_picture.qscale_table[mb_pos] = mquant; 02759 /* Set DC scale - y and c use the same */ 02760 s->y_dc_scale = s->y_dc_scale_table[mquant]; 02761 s->c_dc_scale = s->c_dc_scale_table[mquant]; 02762 02763 for(k = 0; k < 6; k++) { 02764 val = ((cbp >> (5 - k)) & 1); 02765 02766 if (k < 4) { 02767 int pred = vc1_coded_block_pred(&v->s, k, &coded_val); 02768 val = val ^ pred; 02769 *coded_val = val; 02770 } 02771 cbp |= val << (5 - k); 02772 02773 v->a_avail = !s->first_slice_line || (k==2 || k==3); 02774 v->c_avail = !!s->mb_x || (k==1 || k==3); 02775 02776 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant); 02777 02778 s->dsp.vc1_inv_trans_8x8(s->block[k]); 02779 for(j = 0; j < 64; j++) s->block[k][j] += 128; 02780 } 02781 02782 vc1_put_block(v, s->block); 02783 if(overlap) { 02784 if(s->mb_x) { 02785 s->dsp.vc1_h_overlap(s->dest[0], s->linesize); 02786 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize); 02787 if(!(s->flags & CODEC_FLAG_GRAY)) { 02788 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize); 02789 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize); 02790 } 02791 } 02792 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize); 02793 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); 02794 if(!s->first_slice_line) { 02795 s->dsp.vc1_v_overlap(s->dest[0], s->linesize); 02796 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize); 02797 if(!(s->flags & CODEC_FLAG_GRAY)) { 02798 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize); 02799 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize); 02800 } 02801 } 02802 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize); 02803 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); 02804 } 02805 if(v->s.loop_filter) vc1_loop_filter_iblk(s, v->pq); 02806 02807 if(get_bits_count(&s->gb) > v->bits) { 02808 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)); 02809 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits); 02810 return; 02811 } 02812 } 02813 ff_draw_horiz_band(s, s->mb_y * 16, 16); 02814 s->first_slice_line = 0; 02815 } 02816 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); 02817 } 02818 02819 static void vc1_decode_p_blocks(VC1Context *v) 02820 { 02821 MpegEncContext *s = &v->s; 02822 02823 /* select codingmode used for VLC tables selection */ 02824 switch(v->c_ac_table_index){ 02825 case 0: 02826 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA; 02827 break; 02828 case 1: 02829 v->codingset = CS_HIGH_MOT_INTRA; 02830 break; 02831 case 2: 02832 v->codingset = CS_MID_RATE_INTRA; 02833 break; 02834 } 02835 02836 switch(v->c_ac_table_index){ 02837 case 0: 02838 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER; 02839 break; 02840 case 1: 02841 v->codingset2 = CS_HIGH_MOT_INTER; 02842 break; 02843 case 2: 02844 v->codingset2 = CS_MID_RATE_INTER; 02845 break; 02846 } 02847 02848 s->first_slice_line = 1; 02849 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride); 02850 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { 02851 s->mb_x = 0; 02852 ff_init_block_index(s); 02853 for(; s->mb_x < s->mb_width; s->mb_x++) { 02854 ff_update_block_index(s); 02855 02856 vc1_decode_p_mb(v); 02857 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) { 02858 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)); 02859 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y); 02860 return; 02861 } 02862 } 02863 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0])*s->mb_stride); 02864 ff_draw_horiz_band(s, s->mb_y * 16, 16); 02865 s->first_slice_line = 0; 02866 } 02867 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); 02868 } 02869 02870 static void vc1_decode_b_blocks(VC1Context *v) 02871 { 02872 MpegEncContext *s = &v->s; 02873 02874 /* select codingmode used for VLC tables selection */ 02875 switch(v->c_ac_table_index){ 02876 case 0: 02877 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA; 02878 break; 02879 case 1: 02880 v->codingset = CS_HIGH_MOT_INTRA; 02881 break; 02882 case 2: 02883 v->codingset = CS_MID_RATE_INTRA; 02884 break; 02885 } 02886 02887 switch(v->c_ac_table_index){ 02888 case 0: 02889 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER; 02890 break; 02891 case 1: 02892 v->codingset2 = CS_HIGH_MOT_INTER; 02893 break; 02894 case 2: 02895 v->codingset2 = CS_MID_RATE_INTER; 02896 break; 02897 } 02898 02899 s->first_slice_line = 1; 02900 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { 02901 s->mb_x = 0; 02902 ff_init_block_index(s); 02903 for(; s->mb_x < s->mb_width; s->mb_x++) { 02904 ff_update_block_index(s); 02905 02906 vc1_decode_b_mb(v); 02907 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) { 02908 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)); 02909 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y); 02910 return; 02911 } 02912 if(v->s.loop_filter) vc1_loop_filter_iblk(s, v->pq); 02913 } 02914 ff_draw_horiz_band(s, s->mb_y * 16, 16); 02915 s->first_slice_line = 0; 02916 } 02917 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); 02918 } 02919 02920 static void vc1_decode_skip_blocks(VC1Context *v) 02921 { 02922 MpegEncContext *s = &v->s; 02923 02924 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); 02925 s->first_slice_line = 1; 02926 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { 02927 s->mb_x = 0; 02928 ff_init_block_index(s); 02929 ff_update_block_index(s); 02930 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16); 02931 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); 02932 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); 02933 ff_draw_horiz_band(s, s->mb_y * 16, 16); 02934 s->first_slice_line = 0; 02935 } 02936 s->pict_type = FF_P_TYPE; 02937 } 02938 02939 static void vc1_decode_blocks(VC1Context *v) 02940 { 02941 02942 v->s.esc3_level_length = 0; 02943 if(v->x8_type){ 02944 ff_intrax8_decode_picture(&v->x8, 2*v->pq+v->halfpq, v->pq*(!v->pquantizer) ); 02945 }else{ 02946 02947 switch(v->s.pict_type) { 02948 case FF_I_TYPE: 02949 if(v->profile == PROFILE_ADVANCED) 02950 vc1_decode_i_blocks_adv(v); 02951 else 02952 vc1_decode_i_blocks(v); 02953 break; 02954 case FF_P_TYPE: 02955 if(v->p_frame_skipped) 02956 vc1_decode_skip_blocks(v); 02957 else 02958 vc1_decode_p_blocks(v); 02959 break; 02960 case FF_B_TYPE: 02961 if(v->bi_type){ 02962 if(v->profile == PROFILE_ADVANCED) 02963 vc1_decode_i_blocks_adv(v); 02964 else 02965 vc1_decode_i_blocks(v); 02966 }else 02967 vc1_decode_b_blocks(v); 02968 break; 02969 } 02970 } 02971 } 02972 02977 static av_cold int vc1_decode_init(AVCodecContext *avctx) 02978 { 02979 VC1Context *v = avctx->priv_data; 02980 MpegEncContext *s = &v->s; 02981 GetBitContext gb; 02982 02983 if (!avctx->extradata_size || !avctx->extradata) return -1; 02984 if (!(avctx->flags & CODEC_FLAG_GRAY)) 02985 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts); 02986 else 02987 avctx->pix_fmt = PIX_FMT_GRAY8; 02988 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); 02989 v->s.avctx = avctx; 02990 avctx->flags |= CODEC_FLAG_EMU_EDGE; 02991 v->s.flags |= CODEC_FLAG_EMU_EDGE; 02992 02993 if(avctx->idct_algo==FF_IDCT_AUTO){ 02994 avctx->idct_algo=FF_IDCT_WMV2; 02995 } 02996 02997 if(ff_msmpeg4_decode_init(avctx) < 0) 02998 return -1; 02999 if (vc1_init_common(v) < 0) return -1; 03000 03001 avctx->coded_width = avctx->width; 03002 avctx->coded_height = avctx->height; 03003 if (avctx->codec_id == CODEC_ID_WMV3) 03004 { 03005 int count = 0; 03006 03007 // looks like WMV3 has a sequence header stored in the extradata 03008 // advanced sequence header may be before the first frame 03009 // the last byte of the extradata is a version number, 1 for the 03010 // samples we can decode 03011 03012 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8); 03013 03014 if (vc1_decode_sequence_header(avctx, v, &gb) < 0) 03015 return -1; 03016 03017 count = avctx->extradata_size*8 - get_bits_count(&gb); 03018 if (count>0) 03019 { 03020 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n", 03021 count, get_bits(&gb, count)); 03022 } 03023 else if (count < 0) 03024 { 03025 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count); 03026 } 03027 } else { // VC1/WVC1 03028 const uint8_t *start = avctx->extradata; 03029 uint8_t *end = avctx->extradata + avctx->extradata_size; 03030 const uint8_t *next; 03031 int size, buf2_size; 03032 uint8_t *buf2 = NULL; 03033 int seq_initialized = 0, ep_initialized = 0; 03034 03035 if(avctx->extradata_size < 16) { 03036 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size); 03037 return -1; 03038 } 03039 03040 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); 03041 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv 03042 next = start; 03043 for(; next < end; start = next){ 03044 next = find_next_marker(start + 4, end); 03045 size = next - start - 4; 03046 if(size <= 0) continue; 03047 buf2_size = vc1_unescape_buffer(start + 4, size, buf2); 03048 init_get_bits(&gb, buf2, buf2_size * 8); 03049 switch(AV_RB32(start)){ 03050 case VC1_CODE_SEQHDR: 03051 if(vc1_decode_sequence_header(avctx, v, &gb) < 0){ 03052 av_free(buf2); 03053 return -1; 03054 } 03055 seq_initialized = 1; 03056 break; 03057 case VC1_CODE_ENTRYPOINT: 03058 if(vc1_decode_entry_point(avctx, v, &gb) < 0){ 03059 av_free(buf2); 03060 return -1; 03061 } 03062 ep_initialized = 1; 03063 break; 03064 } 03065 } 03066 av_free(buf2); 03067 if(!seq_initialized || !ep_initialized){ 03068 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n"); 03069 return -1; 03070 } 03071 } 03072 avctx->has_b_frames= !!(avctx->max_b_frames); 03073 s->low_delay = !avctx->has_b_frames; 03074 03075 s->mb_width = (avctx->coded_width+15)>>4; 03076 s->mb_height = (avctx->coded_height+15)>>4; 03077 03078 /* Allocate mb bitplanes */ 03079 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height); 03080 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height); 03081 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height); 03082 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height); 03083 03084 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride); 03085 v->cbp = v->cbp_base + s->mb_stride; 03086 03087 /* allocate block type info in that way so it could be used with s->block_index[] */ 03088 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2); 03089 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1; 03090 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1; 03091 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1); 03092 03093 /* Init coded blocks info */ 03094 if (v->profile == PROFILE_ADVANCED) 03095 { 03096 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0) 03097 // return -1; 03098 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0) 03099 // return -1; 03100 } 03101 03102 ff_intrax8_common_init(&v->x8,s); 03103 return 0; 03104 } 03105 03106 03110 static int vc1_decode_frame(AVCodecContext *avctx, 03111 void *data, int *data_size, 03112 AVPacket *avpkt) 03113 { 03114 const uint8_t *buf = avpkt->data; 03115 int buf_size = avpkt->size; 03116 VC1Context *v = avctx->priv_data; 03117 MpegEncContext *s = &v->s; 03118 AVFrame *pict = data; 03119 uint8_t *buf2 = NULL; 03120 const uint8_t *buf_start = buf; 03121 03122 /* no supplementary picture */ 03123 if (buf_size == 0) { 03124 /* special case for last picture */ 03125 if (s->low_delay==0 && s->next_picture_ptr) { 03126 *pict= *(AVFrame*)s->next_picture_ptr; 03127 s->next_picture_ptr= NULL; 03128 03129 *data_size = sizeof(AVFrame); 03130 } 03131 03132 return 0; 03133 } 03134 03135 /* We need to set current_picture_ptr before reading the header, 03136 * otherwise we cannot store anything in there. */ 03137 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ 03138 int i= ff_find_unused_picture(s, 0); 03139 s->current_picture_ptr= &s->picture[i]; 03140 } 03141 03142 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU){ 03143 if (v->profile < PROFILE_ADVANCED) 03144 avctx->pix_fmt = PIX_FMT_VDPAU_WMV3; 03145 else 03146 avctx->pix_fmt = PIX_FMT_VDPAU_VC1; 03147 } 03148 03149 //for advanced profile we may need to parse and unescape data 03150 if (avctx->codec_id == CODEC_ID_VC1) { 03151 int buf_size2 = 0; 03152 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); 03153 03154 if(IS_MARKER(AV_RB32(buf))){ /* frame starts with marker and needs to be parsed */ 03155 const uint8_t *start, *end, *next; 03156 int size; 03157 03158 next = buf; 03159 for(start = buf, end = buf + buf_size; next < end; start = next){ 03160 next = find_next_marker(start + 4, end); 03161 size = next - start - 4; 03162 if(size <= 0) continue; 03163 switch(AV_RB32(start)){ 03164 case VC1_CODE_FRAME: 03165 if (avctx->hwaccel || 03166 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) 03167 buf_start = start; 03168 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); 03169 break; 03170 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */ 03171 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); 03172 init_get_bits(&s->gb, buf2, buf_size2*8); 03173 vc1_decode_entry_point(avctx, v, &s->gb); 03174 break; 03175 case VC1_CODE_SLICE: 03176 av_log(avctx, AV_LOG_ERROR, "Sliced decoding is not implemented (yet)\n"); 03177 av_free(buf2); 03178 return -1; 03179 } 03180 } 03181 }else if(v->interlace && ((buf[0] & 0xC0) == 0xC0)){ /* WVC1 interlaced stores both fields divided by marker */ 03182 const uint8_t *divider; 03183 03184 divider = find_next_marker(buf, buf + buf_size); 03185 if((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD){ 03186 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n"); 03187 av_free(buf2); 03188 return -1; 03189 } 03190 03191 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2); 03192 // TODO 03193 if(!v->warn_interlaced++) 03194 av_log(v->s.avctx, AV_LOG_ERROR, "Interlaced WVC1 support is not implemented\n"); 03195 av_free(buf2);return -1; 03196 }else{ 03197 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2); 03198 } 03199 init_get_bits(&s->gb, buf2, buf_size2*8); 03200 } else 03201 init_get_bits(&s->gb, buf, buf_size*8); 03202 // do parse frame header 03203 if(v->profile < PROFILE_ADVANCED) { 03204 if(vc1_parse_frame_header(v, &s->gb) == -1) { 03205 av_free(buf2); 03206 return -1; 03207 } 03208 } else { 03209 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) { 03210 av_free(buf2); 03211 return -1; 03212 } 03213 } 03214 03215 if(s->pict_type != FF_I_TYPE && !v->res_rtm_flag){ 03216 av_free(buf2); 03217 return -1; 03218 } 03219 03220 // for hurry_up==5 03221 s->current_picture.pict_type= s->pict_type; 03222 s->current_picture.key_frame= s->pict_type == FF_I_TYPE; 03223 03224 /* skip B-frames if we don't have reference frames */ 03225 if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)){ 03226 av_free(buf2); 03227 return -1;//buf_size; 03228 } 03229 /* skip b frames if we are in a hurry */ 03230 if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return -1;//buf_size; 03231 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE) 03232 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE) 03233 || avctx->skip_frame >= AVDISCARD_ALL) { 03234 av_free(buf2); 03235 return buf_size; 03236 } 03237 /* skip everything if we are in a hurry>=5 */ 03238 if(avctx->hurry_up>=5) { 03239 av_free(buf2); 03240 return -1;//buf_size; 03241 } 03242 03243 if(s->next_p_frame_damaged){ 03244 if(s->pict_type==FF_B_TYPE) 03245 return buf_size; 03246 else 03247 s->next_p_frame_damaged=0; 03248 } 03249 03250 if(MPV_frame_start(s, avctx) < 0) { 03251 av_free(buf2); 03252 return -1; 03253 } 03254 03255 s->me.qpel_put= s->dsp.put_qpel_pixels_tab; 03256 s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab; 03257 03258 if ((CONFIG_VC1_VDPAU_DECODER) 03259 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) 03260 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start); 03261 else if (avctx->hwaccel) { 03262 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) 03263 return -1; 03264 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) 03265 return -1; 03266 if (avctx->hwaccel->end_frame(avctx) < 0) 03267 return -1; 03268 } else { 03269 ff_er_frame_start(s); 03270 03271 v->bits = buf_size * 8; 03272 vc1_decode_blocks(v); 03273 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8); 03274 // if(get_bits_count(&s->gb) > buf_size * 8) 03275 // return -1; 03276 ff_er_frame_end(s); 03277 } 03278 03279 MPV_frame_end(s); 03280 03281 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); 03282 assert(s->current_picture.pict_type == s->pict_type); 03283 if (s->pict_type == FF_B_TYPE || s->low_delay) { 03284 *pict= *(AVFrame*)s->current_picture_ptr; 03285 } else if (s->last_picture_ptr != NULL) { 03286 *pict= *(AVFrame*)s->last_picture_ptr; 03287 } 03288 03289 if(s->last_picture_ptr || s->low_delay){ 03290 *data_size = sizeof(AVFrame); 03291 ff_print_debug_info(s, pict); 03292 } 03293 03294 av_free(buf2); 03295 return buf_size; 03296 } 03297 03298 03302 static av_cold int vc1_decode_end(AVCodecContext *avctx) 03303 { 03304 VC1Context *v = avctx->priv_data; 03305 03306 av_freep(&v->hrd_rate); 03307 av_freep(&v->hrd_buffer); 03308 MPV_common_end(&v->s); 03309 av_freep(&v->mv_type_mb_plane); 03310 av_freep(&v->direct_mb_plane); 03311 av_freep(&v->acpred_plane); 03312 av_freep(&v->over_flags_plane); 03313 av_freep(&v->mb_type_base); 03314 av_freep(&v->cbp_base); 03315 ff_intrax8_common_end(&v->x8); 03316 return 0; 03317 } 03318 03319 03320 AVCodec vc1_decoder = { 03321 "vc1", 03322 AVMEDIA_TYPE_VIDEO, 03323 CODEC_ID_VC1, 03324 sizeof(VC1Context), 03325 vc1_decode_init, 03326 NULL, 03327 vc1_decode_end, 03328 vc1_decode_frame, 03329 CODEC_CAP_DR1 | CODEC_CAP_DELAY, 03330 NULL, 03331 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"), 03332 .pix_fmts = ff_hwaccel_pixfmt_list_420 03333 }; 03334 03335 #if CONFIG_WMV3_DECODER 03336 AVCodec wmv3_decoder = { 03337 "wmv3", 03338 AVMEDIA_TYPE_VIDEO, 03339 CODEC_ID_WMV3, 03340 sizeof(VC1Context), 03341 vc1_decode_init, 03342 NULL, 03343 vc1_decode_end, 03344 vc1_decode_frame, 03345 CODEC_CAP_DR1 | CODEC_CAP_DELAY, 03346 NULL, 03347 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"), 03348 .pix_fmts = ff_hwaccel_pixfmt_list_420 03349 }; 03350 #endif 03351 03352 #if CONFIG_WMV3_VDPAU_DECODER 03353 AVCodec wmv3_vdpau_decoder = { 03354 "wmv3_vdpau", 03355 AVMEDIA_TYPE_VIDEO, 03356 CODEC_ID_WMV3, 03357 sizeof(VC1Context), 03358 vc1_decode_init, 03359 NULL, 03360 vc1_decode_end, 03361 vc1_decode_frame, 03362 CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, 03363 NULL, 03364 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"), 03365 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE} 03366 }; 03367 #endif 03368 03369 #if CONFIG_VC1_VDPAU_DECODER 03370 AVCodec vc1_vdpau_decoder = { 03371 "vc1_vdpau", 03372 AVMEDIA_TYPE_VIDEO, 03373 CODEC_ID_VC1, 03374 sizeof(VC1Context), 03375 vc1_decode_init, 03376 NULL, 03377 vc1_decode_end, 03378 vc1_decode_frame, 03379 CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, 03380 NULL, 03381 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"), 03382 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE} 03383 }; 03384 #endif