Libav

libavcodec/mpegvideo.c

Go to the documentation of this file.
00001 /*
00002  * The simplest mpeg encoder (well, it was the simplest!)
00003  * Copyright (c) 2000,2001 Fabrice Bellard
00004  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
00005  *
00006  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
00007  *
00008  * This file is part of FFmpeg.
00009  *
00010  * FFmpeg is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  * FFmpeg is distributed in the hope that it will be useful,
00016  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00017  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018  * Lesser General Public License for more details.
00019  *
00020  * You should have received a copy of the GNU Lesser General Public
00021  * License along with FFmpeg; if not, write to the Free Software
00022  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00023  */
00024 
00030 #include "libavutil/intmath.h"
00031 #include "avcodec.h"
00032 #include "dsputil.h"
00033 #include "mpegvideo.h"
00034 #include "mpegvideo_common.h"
00035 #include "mjpegenc.h"
00036 #include "msmpeg4.h"
00037 #include "faandct.h"
00038 #include "xvmc_internal.h"
00039 #include <limits.h>
00040 
00041 //#undef NDEBUG
00042 //#include <assert.h>
00043 
00044 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00045                                    DCTELEM *block, int n, int qscale);
00046 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00047                                    DCTELEM *block, int n, int qscale);
00048 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00049                                    DCTELEM *block, int n, int qscale);
00050 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00051                                    DCTELEM *block, int n, int qscale);
00052 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00053                                    DCTELEM *block, int n, int qscale);
00054 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00055                                   DCTELEM *block, int n, int qscale);
00056 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00057                                   DCTELEM *block, int n, int qscale);
00058 
00059 
00060 /* enable all paranoid tests for rounding, overflows, etc... */
00061 //#define PARANOID
00062 
00063 //#define DEBUG
00064 
00065 
00066 static const uint8_t ff_default_chroma_qscale_table[32]={
00067 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00068     0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
00069 };
00070 
00071 const uint8_t ff_mpeg1_dc_scale_table[128]={
00072 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00073     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00074     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00075     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00076     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00077 };
00078 
00079 static const uint8_t mpeg2_dc_scale_table1[128]={
00080 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00081     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00082     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00083     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00084     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00085 };
00086 
00087 static const uint8_t mpeg2_dc_scale_table2[128]={
00088 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00089     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00090     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00091     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00092     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00093 };
00094 
00095 static const uint8_t mpeg2_dc_scale_table3[128]={
00096 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00097     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00098     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00099     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00100     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00101 };
00102 
00103 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
00104     ff_mpeg1_dc_scale_table,
00105     mpeg2_dc_scale_table1,
00106     mpeg2_dc_scale_table2,
00107     mpeg2_dc_scale_table3,
00108 };
00109 
00110 const enum PixelFormat ff_pixfmt_list_420[] = {
00111     PIX_FMT_YUV420P,
00112     PIX_FMT_NONE
00113 };
00114 
00115 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00116     PIX_FMT_DXVA2_VLD,
00117     PIX_FMT_VAAPI_VLD,
00118     PIX_FMT_YUV420P,
00119     PIX_FMT_NONE
00120 };
00121 
00122 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
00123     int i;
00124 
00125     assert(p<=end);
00126     if(p>=end)
00127         return end;
00128 
00129     for(i=0; i<3; i++){
00130         uint32_t tmp= *state << 8;
00131         *state= tmp + *(p++);
00132         if(tmp == 0x100 || p==end)
00133             return p;
00134     }
00135 
00136     while(p<end){
00137         if     (p[-1] > 1      ) p+= 3;
00138         else if(p[-2]          ) p+= 2;
00139         else if(p[-3]|(p[-1]-1)) p++;
00140         else{
00141             p++;
00142             break;
00143         }
00144     }
00145 
00146     p= FFMIN(p, end)-4;
00147     *state= AV_RB32(p);
00148 
00149     return p+4;
00150 }
00151 
00152 /* init common dct for both encoder and decoder */
00153 av_cold int ff_dct_common_init(MpegEncContext *s)
00154 {
00155     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00156     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00157     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00158     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00159     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00160     if(s->flags & CODEC_FLAG_BITEXACT)
00161         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00162     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00163 
00164 #if   HAVE_MMX
00165     MPV_common_init_mmx(s);
00166 #elif ARCH_ALPHA
00167     MPV_common_init_axp(s);
00168 #elif CONFIG_MLIB
00169     MPV_common_init_mlib(s);
00170 #elif HAVE_MMI
00171     MPV_common_init_mmi(s);
00172 #elif ARCH_ARM
00173     MPV_common_init_arm(s);
00174 #elif HAVE_ALTIVEC
00175     MPV_common_init_altivec(s);
00176 #elif ARCH_BFIN
00177     MPV_common_init_bfin(s);
00178 #endif
00179 
00180     /* load & permutate scantables
00181        note: only wmv uses different ones
00182     */
00183     if(s->alternate_scan){
00184         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
00185         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
00186     }else{
00187         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
00188         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
00189     }
00190     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00191     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00192 
00193     return 0;
00194 }
00195 
00196 void ff_copy_picture(Picture *dst, Picture *src){
00197     *dst = *src;
00198     dst->type= FF_BUFFER_TYPE_COPY;
00199 }
00200 
00204 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00205 {
00206     s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00207     av_freep(&pic->hwaccel_picture_private);
00208 }
00209 
00213 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00214 {
00215     int r;
00216 
00217     if (s->avctx->hwaccel) {
00218         assert(!pic->hwaccel_picture_private);
00219         if (s->avctx->hwaccel->priv_data_size) {
00220             pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00221             if (!pic->hwaccel_picture_private) {
00222                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00223                 return -1;
00224             }
00225         }
00226     }
00227 
00228     r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
00229 
00230     if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
00231         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
00232         av_freep(&pic->hwaccel_picture_private);
00233         return -1;
00234     }
00235 
00236     if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
00237         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
00238         free_frame_buffer(s, pic);
00239         return -1;
00240     }
00241 
00242     if (pic->linesize[1] != pic->linesize[2]) {
00243         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
00244         free_frame_buffer(s, pic);
00245         return -1;
00246     }
00247 
00248     return 0;
00249 }
00250 
00255 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
00256     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
00257     const int mb_array_size= s->mb_stride*s->mb_height;
00258     const int b8_array_size= s->b8_stride*s->mb_height*2;
00259     const int b4_array_size= s->b4_stride*s->mb_height*4;
00260     int i;
00261     int r= -1;
00262 
00263     if(shared){
00264         assert(pic->data[0]);
00265         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
00266         pic->type= FF_BUFFER_TYPE_SHARED;
00267     }else{
00268         assert(!pic->data[0]);
00269 
00270         if (alloc_frame_buffer(s, pic) < 0)
00271             return -1;
00272 
00273         s->linesize  = pic->linesize[0];
00274         s->uvlinesize= pic->linesize[1];
00275     }
00276 
00277     if(pic->qscale_table==NULL){
00278         if (s->encoding) {
00279             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var   , mb_array_size * sizeof(int16_t)  , fail)
00280             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t)  , fail)
00281             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean  , mb_array_size * sizeof(int8_t )  , fail)
00282         }
00283 
00284         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
00285         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t)  , fail)
00286         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
00287         pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
00288         pic->qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
00289         if(s->out_format == FMT_H264){
00290             for(i=0; i<2; i++){
00291                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4)  * sizeof(int16_t), fail)
00292                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00293                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00294             }
00295             pic->motion_subsample_log2= 2;
00296         }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
00297             for(i=0; i<2; i++){
00298                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
00299                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00300                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00301             }
00302             pic->motion_subsample_log2= 3;
00303         }
00304         if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00305             FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
00306         }
00307         pic->qstride= s->mb_stride;
00308         FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
00309     }
00310 
00311     /* It might be nicer if the application would keep track of these
00312      * but it would require an API change. */
00313     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
00314     s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
00315     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
00316         pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
00317 
00318     return 0;
00319 fail: //for the FF_ALLOCZ_OR_GOTO macro
00320     if(r>=0)
00321         free_frame_buffer(s, pic);
00322     return -1;
00323 }
00324 
00328 static void free_picture(MpegEncContext *s, Picture *pic){
00329     int i;
00330 
00331     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
00332         free_frame_buffer(s, pic);
00333     }
00334 
00335     av_freep(&pic->mb_var);
00336     av_freep(&pic->mc_mb_var);
00337     av_freep(&pic->mb_mean);
00338     av_freep(&pic->mbskip_table);
00339     av_freep(&pic->qscale_table_base);
00340     av_freep(&pic->mb_type_base);
00341     av_freep(&pic->dct_coeff);
00342     av_freep(&pic->pan_scan);
00343     pic->mb_type= NULL;
00344     for(i=0; i<2; i++){
00345         av_freep(&pic->motion_val_base[i]);
00346         av_freep(&pic->ref_index[i]);
00347     }
00348 
00349     if(pic->type == FF_BUFFER_TYPE_SHARED){
00350         for(i=0; i<4; i++){
00351             pic->base[i]=
00352             pic->data[i]= NULL;
00353         }
00354         pic->type= 0;
00355     }
00356 }
00357 
00358 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
00359     int i;
00360 
00361     // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
00362     FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
00363     s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
00364 
00365      //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
00366     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,  (s->width+64)*4*16*2*sizeof(uint8_t), fail)
00367     s->me.temp=         s->me.scratchpad;
00368     s->rd_scratchpad=   s->me.scratchpad;
00369     s->b_scratchpad=    s->me.scratchpad;
00370     s->obmc_scratchpad= s->me.scratchpad + 16;
00371     if (s->encoding) {
00372         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map      , ME_MAP_SIZE*sizeof(uint32_t), fail)
00373         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
00374         if(s->avctx->noise_reduction){
00375             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
00376         }
00377     }
00378     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
00379     s->block= s->blocks[0];
00380 
00381     for(i=0;i<12;i++){
00382         s->pblocks[i] = &s->block[i];
00383     }
00384     return 0;
00385 fail:
00386     return -1; //free() through MPV_common_end()
00387 }
00388 
00389 static void free_duplicate_context(MpegEncContext *s){
00390     if(s==NULL) return;
00391 
00392     av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
00393     av_freep(&s->me.scratchpad);
00394     s->me.temp=
00395     s->rd_scratchpad=
00396     s->b_scratchpad=
00397     s->obmc_scratchpad= NULL;
00398 
00399     av_freep(&s->dct_error_sum);
00400     av_freep(&s->me.map);
00401     av_freep(&s->me.score_map);
00402     av_freep(&s->blocks);
00403     s->block= NULL;
00404 }
00405 
00406 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
00407 #define COPY(a) bak->a= src->a
00408     COPY(allocated_edge_emu_buffer);
00409     COPY(edge_emu_buffer);
00410     COPY(me.scratchpad);
00411     COPY(me.temp);
00412     COPY(rd_scratchpad);
00413     COPY(b_scratchpad);
00414     COPY(obmc_scratchpad);
00415     COPY(me.map);
00416     COPY(me.score_map);
00417     COPY(blocks);
00418     COPY(block);
00419     COPY(start_mb_y);
00420     COPY(end_mb_y);
00421     COPY(me.map_generation);
00422     COPY(pb);
00423     COPY(dct_error_sum);
00424     COPY(dct_count[0]);
00425     COPY(dct_count[1]);
00426 #undef COPY
00427 }
00428 
00429 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
00430     MpegEncContext bak;
00431     int i;
00432     //FIXME copy only needed parts
00433 //START_TIMER
00434     backup_duplicate_context(&bak, dst);
00435     memcpy(dst, src, sizeof(MpegEncContext));
00436     backup_duplicate_context(dst, &bak);
00437     for(i=0;i<12;i++){
00438         dst->pblocks[i] = &dst->block[i];
00439     }
00440 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
00441 }
00442 
00447 void MPV_common_defaults(MpegEncContext *s){
00448     s->y_dc_scale_table=
00449     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
00450     s->chroma_qscale_table= ff_default_chroma_qscale_table;
00451     s->progressive_frame= 1;
00452     s->progressive_sequence= 1;
00453     s->picture_structure= PICT_FRAME;
00454 
00455     s->coded_picture_number = 0;
00456     s->picture_number = 0;
00457     s->input_picture_number = 0;
00458 
00459     s->picture_in_gop_number = 0;
00460 
00461     s->f_code = 1;
00462     s->b_code = 1;
00463 }
00464 
00469 void MPV_decode_defaults(MpegEncContext *s){
00470     MPV_common_defaults(s);
00471 }
00472 
00477 av_cold int MPV_common_init(MpegEncContext *s)
00478 {
00479     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
00480 
00481     if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00482         s->mb_height = (s->height + 31) / 32 * 2;
00483     else
00484         s->mb_height = (s->height + 15) / 16;
00485 
00486     if(s->avctx->pix_fmt == PIX_FMT_NONE){
00487         av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
00488         return -1;
00489     }
00490 
00491     if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
00492         av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
00493         return -1;
00494     }
00495 
00496     if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
00497         return -1;
00498 
00499     dsputil_init(&s->dsp, s->avctx);
00500     ff_dct_common_init(s);
00501 
00502     s->flags= s->avctx->flags;
00503     s->flags2= s->avctx->flags2;
00504 
00505     s->mb_width  = (s->width  + 15) / 16;
00506     s->mb_stride = s->mb_width + 1;
00507     s->b8_stride = s->mb_width*2 + 1;
00508     s->b4_stride = s->mb_width*4 + 1;
00509     mb_array_size= s->mb_height * s->mb_stride;
00510     mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
00511 
00512     /* set chroma shifts */
00513     avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00514                                                     &(s->chroma_y_shift) );
00515 
00516     /* set default edge pos, will be overriden in decode_header if needed */
00517     s->h_edge_pos= s->mb_width*16;
00518     s->v_edge_pos= s->mb_height*16;
00519 
00520     s->mb_num = s->mb_width * s->mb_height;
00521 
00522     s->block_wrap[0]=
00523     s->block_wrap[1]=
00524     s->block_wrap[2]=
00525     s->block_wrap[3]= s->b8_stride;
00526     s->block_wrap[4]=
00527     s->block_wrap[5]= s->mb_stride;
00528 
00529     y_size = s->b8_stride * (2 * s->mb_height + 1);
00530     c_size = s->mb_stride * (s->mb_height + 1);
00531     yc_size = y_size + 2 * c_size;
00532 
00533     /* convert fourcc to upper case */
00534     s->codec_tag=          toupper( s->avctx->codec_tag     &0xFF)
00535                         + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
00536                         + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
00537                         + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
00538 
00539     s->stream_codec_tag=          toupper( s->avctx->stream_codec_tag     &0xFF)
00540                                + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
00541                                + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
00542                                + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
00543 
00544     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
00545 
00546     FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
00547     for(y=0; y<s->mb_height; y++){
00548         for(x=0; x<s->mb_width; x++){
00549             s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
00550         }
00551     }
00552     s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
00553 
00554     if (s->encoding) {
00555         /* Allocate MV tables */
00556         FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t), fail)
00557         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00558         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00559         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00560         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00561         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t), fail)
00562         s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
00563         s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
00564         s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
00565         s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00566         s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00567         s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
00568 
00569         if(s->msmpeg4_version){
00570             FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00571         }
00572         FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00573 
00574         /* Allocate MB type table */
00575         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type  , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
00576 
00577         FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00578 
00579         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix  , 64*32   * sizeof(int), fail)
00580         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix  , 64*32   * sizeof(int), fail)
00581         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00582         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00583         FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00584         FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00585 
00586         if(s->avctx->noise_reduction){
00587             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00588         }
00589     }
00590     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
00591     for(i = 0; i < MAX_PICTURE_COUNT; i++) {
00592         avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
00593     }
00594 
00595     FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00596 
00597     if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00598         /* interlaced direct mode decoding tables */
00599             for(i=0; i<2; i++){
00600                 int j, k;
00601                 for(j=0; j<2; j++){
00602                     for(k=0; k<2; k++){
00603                         FF_ALLOCZ_OR_GOTO(s->avctx,    s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00604                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00605                     }
00606                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00607                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00608                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
00609                 }
00610                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00611             }
00612     }
00613     if (s->out_format == FMT_H263) {
00614         /* ac values */
00615         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
00616         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00617         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00618         s->ac_val[2] = s->ac_val[1] + c_size;
00619 
00620         /* cbp values */
00621         FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00622         s->coded_block= s->coded_block_base + s->b8_stride + 1;
00623 
00624         /* cbp, ac_pred, pred_dir */
00625         FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail)
00626         FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
00627     }
00628 
00629     if (s->h263_pred || s->h263_plus || !s->encoding) {
00630         /* dc values */
00631         //MN: we need these for error resilience of intra-frames
00632         FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00633         s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00634         s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00635         s->dc_val[2] = s->dc_val[1] + c_size;
00636         for(i=0;i<yc_size;i++)
00637             s->dc_val_base[i] = 1024;
00638     }
00639 
00640     /* which mb is a intra block */
00641     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00642     memset(s->mbintra_table, 1, mb_array_size);
00643 
00644     /* init macroblock skip table */
00645     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
00646     //Note the +1 is for a quicker mpeg4 slice_end detection
00647     FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
00648 
00649     s->parse_context.state= -1;
00650     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
00651        s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00652        s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00653        s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00654     }
00655 
00656     s->context_initialized = 1;
00657 
00658     s->thread_context[0]= s;
00659     threads = s->avctx->thread_count;
00660 
00661     for(i=1; i<threads; i++){
00662         s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
00663         memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00664     }
00665 
00666     for(i=0; i<threads; i++){
00667         if(init_duplicate_context(s->thread_context[i], s) < 0)
00668            goto fail;
00669         s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
00670         s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
00671     }
00672 
00673     return 0;
00674  fail:
00675     MPV_common_end(s);
00676     return -1;
00677 }
00678 
00679 /* init common structure for both encoder and decoder */
00680 void MPV_common_end(MpegEncContext *s)
00681 {
00682     int i, j, k;
00683 
00684     for(i=0; i<s->avctx->thread_count; i++){
00685         free_duplicate_context(s->thread_context[i]);
00686     }
00687     for(i=1; i<s->avctx->thread_count; i++){
00688         av_freep(&s->thread_context[i]);
00689     }
00690 
00691     av_freep(&s->parse_context.buffer);
00692     s->parse_context.buffer_size=0;
00693 
00694     av_freep(&s->mb_type);
00695     av_freep(&s->p_mv_table_base);
00696     av_freep(&s->b_forw_mv_table_base);
00697     av_freep(&s->b_back_mv_table_base);
00698     av_freep(&s->b_bidir_forw_mv_table_base);
00699     av_freep(&s->b_bidir_back_mv_table_base);
00700     av_freep(&s->b_direct_mv_table_base);
00701     s->p_mv_table= NULL;
00702     s->b_forw_mv_table= NULL;
00703     s->b_back_mv_table= NULL;
00704     s->b_bidir_forw_mv_table= NULL;
00705     s->b_bidir_back_mv_table= NULL;
00706     s->b_direct_mv_table= NULL;
00707     for(i=0; i<2; i++){
00708         for(j=0; j<2; j++){
00709             for(k=0; k<2; k++){
00710                 av_freep(&s->b_field_mv_table_base[i][j][k]);
00711                 s->b_field_mv_table[i][j][k]=NULL;
00712             }
00713             av_freep(&s->b_field_select_table[i][j]);
00714             av_freep(&s->p_field_mv_table_base[i][j]);
00715             s->p_field_mv_table[i][j]=NULL;
00716         }
00717         av_freep(&s->p_field_select_table[i]);
00718     }
00719 
00720     av_freep(&s->dc_val_base);
00721     av_freep(&s->ac_val_base);
00722     av_freep(&s->coded_block_base);
00723     av_freep(&s->mbintra_table);
00724     av_freep(&s->cbp_table);
00725     av_freep(&s->pred_dir_table);
00726 
00727     av_freep(&s->mbskip_table);
00728     av_freep(&s->prev_pict_types);
00729     av_freep(&s->bitstream_buffer);
00730     s->allocated_bitstream_buffer_size=0;
00731 
00732     av_freep(&s->avctx->stats_out);
00733     av_freep(&s->ac_stats);
00734     av_freep(&s->error_status_table);
00735     av_freep(&s->mb_index2xy);
00736     av_freep(&s->lambda_table);
00737     av_freep(&s->q_intra_matrix);
00738     av_freep(&s->q_inter_matrix);
00739     av_freep(&s->q_intra_matrix16);
00740     av_freep(&s->q_inter_matrix16);
00741     av_freep(&s->input_picture);
00742     av_freep(&s->reordered_input_picture);
00743     av_freep(&s->dct_offset);
00744 
00745     if(s->picture){
00746         for(i=0; i<MAX_PICTURE_COUNT; i++){
00747             free_picture(s, &s->picture[i]);
00748         }
00749     }
00750     av_freep(&s->picture);
00751     s->context_initialized = 0;
00752     s->last_picture_ptr=
00753     s->next_picture_ptr=
00754     s->current_picture_ptr= NULL;
00755     s->linesize= s->uvlinesize= 0;
00756 
00757     for(i=0; i<3; i++)
00758         av_freep(&s->visualization_buffer[i]);
00759 
00760     avcodec_default_free_buffers(s->avctx);
00761 }
00762 
00763 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
00764 {
00765     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
00766     uint8_t index_run[MAX_RUN+1];
00767     int last, run, level, start, end, i;
00768 
00769     /* If table is static, we can quit if rl->max_level[0] is not NULL */
00770     if(static_store && rl->max_level[0])
00771         return;
00772 
00773     /* compute max_level[], max_run[] and index_run[] */
00774     for(last=0;last<2;last++) {
00775         if (last == 0) {
00776             start = 0;
00777             end = rl->last;
00778         } else {
00779             start = rl->last;
00780             end = rl->n;
00781         }
00782 
00783         memset(max_level, 0, MAX_RUN + 1);
00784         memset(max_run, 0, MAX_LEVEL + 1);
00785         memset(index_run, rl->n, MAX_RUN + 1);
00786         for(i=start;i<end;i++) {
00787             run = rl->table_run[i];
00788             level = rl->table_level[i];
00789             if (index_run[run] == rl->n)
00790                 index_run[run] = i;
00791             if (level > max_level[run])
00792                 max_level[run] = level;
00793             if (run > max_run[level])
00794                 max_run[level] = run;
00795         }
00796         if(static_store)
00797             rl->max_level[last] = static_store[last];
00798         else
00799             rl->max_level[last] = av_malloc(MAX_RUN + 1);
00800         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00801         if(static_store)
00802             rl->max_run[last] = static_store[last] + MAX_RUN + 1;
00803         else
00804             rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
00805         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
00806         if(static_store)
00807             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
00808         else
00809             rl->index_run[last] = av_malloc(MAX_RUN + 1);
00810         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
00811     }
00812 }
00813 
00814 void init_vlc_rl(RLTable *rl)
00815 {
00816     int i, q;
00817 
00818     for(q=0; q<32; q++){
00819         int qmul= q*2;
00820         int qadd= (q-1)|1;
00821 
00822         if(q==0){
00823             qmul=1;
00824             qadd=0;
00825         }
00826         for(i=0; i<rl->vlc.table_size; i++){
00827             int code= rl->vlc.table[i][0];
00828             int len = rl->vlc.table[i][1];
00829             int level, run;
00830 
00831             if(len==0){ // illegal code
00832                 run= 66;
00833                 level= MAX_LEVEL;
00834             }else if(len<0){ //more bits needed
00835                 run= 0;
00836                 level= code;
00837             }else{
00838                 if(code==rl->n){ //esc
00839                     run= 66;
00840                     level= 0;
00841                 }else{
00842                     run=   rl->table_run  [code] + 1;
00843                     level= rl->table_level[code] * qmul + qadd;
00844                     if(code >= rl->last) run+=192;
00845                 }
00846             }
00847             rl->rl_vlc[q][i].len= len;
00848             rl->rl_vlc[q][i].level= level;
00849             rl->rl_vlc[q][i].run= run;
00850         }
00851     }
00852 }
00853 
00854 int ff_find_unused_picture(MpegEncContext *s, int shared){
00855     int i;
00856 
00857     if(shared){
00858         for(i=0; i<MAX_PICTURE_COUNT; i++){
00859             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
00860         }
00861     }else{
00862         for(i=0; i<MAX_PICTURE_COUNT; i++){
00863             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
00864         }
00865         for(i=0; i<MAX_PICTURE_COUNT; i++){
00866             if(s->picture[i].data[0]==NULL) return i;
00867         }
00868     }
00869 
00870     av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
00871     /* We could return -1, but the codec would crash trying to draw into a
00872      * non-existing frame anyway. This is safer than waiting for a random crash.
00873      * Also the return of this is never useful, an encoder must only allocate
00874      * as much as allowed in the specification. This has no relationship to how
00875      * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
00876      * enough for such valid streams).
00877      * Plus, a decoder has to check stream validity and remove frames if too
00878      * many reference frames are around. Waiting for "OOM" is not correct at
00879      * all. Similarly, missing reference frames have to be replaced by
00880      * interpolated/MC frames, anything else is a bug in the codec ...
00881      */
00882     abort();
00883     return -1;
00884 }
00885 
00886 static void update_noise_reduction(MpegEncContext *s){
00887     int intra, i;
00888 
00889     for(intra=0; intra<2; intra++){
00890         if(s->dct_count[intra] > (1<<16)){
00891             for(i=0; i<64; i++){
00892                 s->dct_error_sum[intra][i] >>=1;
00893             }
00894             s->dct_count[intra] >>= 1;
00895         }
00896 
00897         for(i=0; i<64; i++){
00898             s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
00899         }
00900     }
00901 }
00902 
00906 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
00907 {
00908     int i;
00909     Picture *pic;
00910     s->mb_skipped = 0;
00911 
00912     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
00913 
00914     /* mark&release old frames */
00915     if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
00916       if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
00917           free_frame_buffer(s, s->last_picture_ptr);
00918 
00919         /* release forgotten pictures */
00920         /* if(mpeg124/h263) */
00921         if(!s->encoding){
00922             for(i=0; i<MAX_PICTURE_COUNT; i++){
00923                 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
00924                     av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
00925                     free_frame_buffer(s, &s->picture[i]);
00926                 }
00927             }
00928         }
00929       }
00930     }
00931 
00932     if(!s->encoding){
00933         /* release non reference frames */
00934         for(i=0; i<MAX_PICTURE_COUNT; i++){
00935             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
00936                 free_frame_buffer(s, &s->picture[i]);
00937             }
00938         }
00939 
00940         if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
00941             pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
00942         else{
00943             i= ff_find_unused_picture(s, 0);
00944             pic= &s->picture[i];
00945         }
00946 
00947         pic->reference= 0;
00948         if (!s->dropable){
00949             if (s->codec_id == CODEC_ID_H264)
00950                 pic->reference = s->picture_structure;
00951             else if (s->pict_type != FF_B_TYPE)
00952                 pic->reference = 3;
00953         }
00954 
00955         pic->coded_picture_number= s->coded_picture_number++;
00956 
00957         if(ff_alloc_picture(s, pic, 0) < 0)
00958             return -1;
00959 
00960         s->current_picture_ptr= pic;
00961         s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
00962         s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
00963     }
00964 
00965     s->current_picture_ptr->pict_type= s->pict_type;
00966 //    if(s->flags && CODEC_FLAG_QSCALE)
00967   //      s->current_picture_ptr->quality= s->new_picture_ptr->quality;
00968     s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
00969 
00970     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
00971 
00972     if (s->pict_type != FF_B_TYPE) {
00973         s->last_picture_ptr= s->next_picture_ptr;
00974         if(!s->dropable)
00975             s->next_picture_ptr= s->current_picture_ptr;
00976     }
00977 /*    av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
00978         s->last_picture_ptr    ? s->last_picture_ptr->data[0] : NULL,
00979         s->next_picture_ptr    ? s->next_picture_ptr->data[0] : NULL,
00980         s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
00981         s->pict_type, s->dropable);*/
00982 
00983     if(s->codec_id != CODEC_ID_H264){
00984         if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
00985             av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
00986             /* Allocate a dummy frame */
00987             i= ff_find_unused_picture(s, 0);
00988             s->last_picture_ptr= &s->picture[i];
00989             if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
00990                 return -1;
00991         }
00992         if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
00993             /* Allocate a dummy frame */
00994             i= ff_find_unused_picture(s, 0);
00995             s->next_picture_ptr= &s->picture[i];
00996             if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
00997                 return -1;
00998         }
00999     }
01000 
01001     if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01002     if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01003 
01004     assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
01005 
01006     if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
01007         int i;
01008         for(i=0; i<4; i++){
01009             if(s->picture_structure == PICT_BOTTOM_FIELD){
01010                  s->current_picture.data[i] += s->current_picture.linesize[i];
01011             }
01012             s->current_picture.linesize[i] *= 2;
01013             s->last_picture.linesize[i] *=2;
01014             s->next_picture.linesize[i] *=2;
01015         }
01016     }
01017 
01018     s->hurry_up= s->avctx->hurry_up;
01019     s->error_recognition= avctx->error_recognition;
01020 
01021     /* set dequantizer, we can't do it during init as it might change for mpeg4
01022        and we can't do it in the header decode as init is not called for mpeg4 there yet */
01023     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
01024         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01025         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01026     }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
01027         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01028         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01029     }else{
01030         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01031         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01032     }
01033 
01034     if(s->dct_error_sum){
01035         assert(s->avctx->noise_reduction && s->encoding);
01036 
01037         update_noise_reduction(s);
01038     }
01039 
01040     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01041         return ff_xvmc_field_start(s, avctx);
01042 
01043     return 0;
01044 }
01045 
01046 /* generic function for encode/decode called after a frame has been coded/decoded */
01047 void MPV_frame_end(MpegEncContext *s)
01048 {
01049     int i;
01050     /* draw edge for correct motion prediction if outside */
01051     //just to make sure that all data is rendered.
01052     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01053         ff_xvmc_field_end(s);
01054     }else if(!s->avctx->hwaccel
01055        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
01056        && s->unrestricted_mv
01057        && s->current_picture.reference
01058        && !s->intra_only
01059        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
01060             s->dsp.draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
01061             s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
01062             s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
01063     }
01064     emms_c();
01065 
01066     s->last_pict_type    = s->pict_type;
01067     s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
01068     if(s->pict_type!=FF_B_TYPE){
01069         s->last_non_b_pict_type= s->pict_type;
01070     }
01071 #if 0
01072         /* copy back current_picture variables */
01073     for(i=0; i<MAX_PICTURE_COUNT; i++){
01074         if(s->picture[i].data[0] == s->current_picture.data[0]){
01075             s->picture[i]= s->current_picture;
01076             break;
01077         }
01078     }
01079     assert(i<MAX_PICTURE_COUNT);
01080 #endif
01081 
01082     if(s->encoding){
01083         /* release non-reference frames */
01084         for(i=0; i<MAX_PICTURE_COUNT; i++){
01085             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
01086                 free_frame_buffer(s, &s->picture[i]);
01087             }
01088         }
01089     }
01090     // clear copies, to avoid confusion
01091 #if 0
01092     memset(&s->last_picture, 0, sizeof(Picture));
01093     memset(&s->next_picture, 0, sizeof(Picture));
01094     memset(&s->current_picture, 0, sizeof(Picture));
01095 #endif
01096     s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01097 }
01098 
01106 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01107     int x, y, fr, f;
01108 
01109     sx= av_clip(sx, 0, w-1);
01110     sy= av_clip(sy, 0, h-1);
01111     ex= av_clip(ex, 0, w-1);
01112     ey= av_clip(ey, 0, h-1);
01113 
01114     buf[sy*stride + sx]+= color;
01115 
01116     if(FFABS(ex - sx) > FFABS(ey - sy)){
01117         if(sx > ex){
01118             FFSWAP(int, sx, ex);
01119             FFSWAP(int, sy, ey);
01120         }
01121         buf+= sx + sy*stride;
01122         ex-= sx;
01123         f= ((ey-sy)<<16)/ex;
01124         for(x= 0; x <= ex; x++){
01125             y = (x*f)>>16;
01126             fr= (x*f)&0xFFFF;
01127             buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
01128             buf[(y+1)*stride + x]+= (color*         fr )>>16;
01129         }
01130     }else{
01131         if(sy > ey){
01132             FFSWAP(int, sx, ex);
01133             FFSWAP(int, sy, ey);
01134         }
01135         buf+= sx + sy*stride;
01136         ey-= sy;
01137         if(ey) f= ((ex-sx)<<16)/ey;
01138         else   f= 0;
01139         for(y= 0; y <= ey; y++){
01140             x = (y*f)>>16;
01141             fr= (y*f)&0xFFFF;
01142             buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;
01143             buf[y*stride + x+1]+= (color*         fr )>>16;
01144         }
01145     }
01146 }
01147 
01155 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01156     int dx,dy;
01157 
01158     sx= av_clip(sx, -100, w+100);
01159     sy= av_clip(sy, -100, h+100);
01160     ex= av_clip(ex, -100, w+100);
01161     ey= av_clip(ey, -100, h+100);
01162 
01163     dx= ex - sx;
01164     dy= ey - sy;
01165 
01166     if(dx*dx + dy*dy > 3*3){
01167         int rx=  dx + dy;
01168         int ry= -dx + dy;
01169         int length= ff_sqrt((rx*rx + ry*ry)<<8);
01170 
01171         //FIXME subpixel accuracy
01172         rx= ROUNDED_DIV(rx*3<<4, length);
01173         ry= ROUNDED_DIV(ry*3<<4, length);
01174 
01175         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01176         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01177     }
01178     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01179 }
01180 
01184 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01185 
01186     if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
01187 
01188     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01189         int x,y;
01190 
01191         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01192         switch (pict->pict_type) {
01193             case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
01194             case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
01195             case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
01196             case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
01197             case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
01198             case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
01199         }
01200         for(y=0; y<s->mb_height; y++){
01201             for(x=0; x<s->mb_width; x++){
01202                 if(s->avctx->debug&FF_DEBUG_SKIP){
01203                     int count= s->mbskip_table[x + y*s->mb_stride];
01204                     if(count>9) count=9;
01205                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01206                 }
01207                 if(s->avctx->debug&FF_DEBUG_QP){
01208                     av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01209                 }
01210                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01211                     int mb_type= pict->mb_type[x + y*s->mb_stride];
01212                     //Type & MV direction
01213                     if(IS_PCM(mb_type))
01214                         av_log(s->avctx, AV_LOG_DEBUG, "P");
01215                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01216                         av_log(s->avctx, AV_LOG_DEBUG, "A");
01217                     else if(IS_INTRA4x4(mb_type))
01218                         av_log(s->avctx, AV_LOG_DEBUG, "i");
01219                     else if(IS_INTRA16x16(mb_type))
01220                         av_log(s->avctx, AV_LOG_DEBUG, "I");
01221                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01222                         av_log(s->avctx, AV_LOG_DEBUG, "d");
01223                     else if(IS_DIRECT(mb_type))
01224                         av_log(s->avctx, AV_LOG_DEBUG, "D");
01225                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01226                         av_log(s->avctx, AV_LOG_DEBUG, "g");
01227                     else if(IS_GMC(mb_type))
01228                         av_log(s->avctx, AV_LOG_DEBUG, "G");
01229                     else if(IS_SKIP(mb_type))
01230                         av_log(s->avctx, AV_LOG_DEBUG, "S");
01231                     else if(!USES_LIST(mb_type, 1))
01232                         av_log(s->avctx, AV_LOG_DEBUG, ">");
01233                     else if(!USES_LIST(mb_type, 0))
01234                         av_log(s->avctx, AV_LOG_DEBUG, "<");
01235                     else{
01236                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01237                         av_log(s->avctx, AV_LOG_DEBUG, "X");
01238                     }
01239 
01240                     //segmentation
01241                     if(IS_8X8(mb_type))
01242                         av_log(s->avctx, AV_LOG_DEBUG, "+");
01243                     else if(IS_16X8(mb_type))
01244                         av_log(s->avctx, AV_LOG_DEBUG, "-");
01245                     else if(IS_8X16(mb_type))
01246                         av_log(s->avctx, AV_LOG_DEBUG, "|");
01247                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01248                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01249                     else
01250                         av_log(s->avctx, AV_LOG_DEBUG, "?");
01251 
01252 
01253                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
01254                         av_log(s->avctx, AV_LOG_DEBUG, "=");
01255                     else
01256                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01257                 }
01258 //                av_log(s->avctx, AV_LOG_DEBUG, " ");
01259             }
01260             av_log(s->avctx, AV_LOG_DEBUG, "\n");
01261         }
01262     }
01263 
01264     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
01265         const int shift= 1 + s->quarter_sample;
01266         int mb_y;
01267         uint8_t *ptr;
01268         int i;
01269         int h_chroma_shift, v_chroma_shift, block_height;
01270         const int width = s->avctx->width;
01271         const int height= s->avctx->height;
01272         const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01273         const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01274         s->low_delay=0; //needed to see the vectors without trashing the buffers
01275 
01276         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01277         for(i=0; i<3; i++){
01278             memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01279             pict->data[i]= s->visualization_buffer[i];
01280         }
01281         pict->type= FF_BUFFER_TYPE_COPY;
01282         ptr= pict->data[0];
01283         block_height = 16>>v_chroma_shift;
01284 
01285         for(mb_y=0; mb_y<s->mb_height; mb_y++){
01286             int mb_x;
01287             for(mb_x=0; mb_x<s->mb_width; mb_x++){
01288                 const int mb_index= mb_x + mb_y*s->mb_stride;
01289                 if((s->avctx->debug_mv) && pict->motion_val){
01290                   int type;
01291                   for(type=0; type<3; type++){
01292                     int direction = 0;
01293                     switch (type) {
01294                       case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
01295                                 continue;
01296                               direction = 0;
01297                               break;
01298                       case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
01299                                 continue;
01300                               direction = 0;
01301                               break;
01302                       case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
01303                                 continue;
01304                               direction = 1;
01305                               break;
01306                     }
01307                     if(!USES_LIST(pict->mb_type[mb_index], direction))
01308                         continue;
01309 
01310                     if(IS_8X8(pict->mb_type[mb_index])){
01311                       int i;
01312                       for(i=0; i<4; i++){
01313                         int sx= mb_x*16 + 4 + 8*(i&1);
01314                         int sy= mb_y*16 + 4 + 8*(i>>1);
01315                         int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01316                         int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01317                         int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01318                         draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01319                       }
01320                     }else if(IS_16X8(pict->mb_type[mb_index])){
01321                       int i;
01322                       for(i=0; i<2; i++){
01323                         int sx=mb_x*16 + 8;
01324                         int sy=mb_y*16 + 4 + 8*i;
01325                         int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01326                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01327                         int my=(pict->motion_val[direction][xy][1]>>shift);
01328 
01329                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01330                             my*=2;
01331 
01332                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01333                       }
01334                     }else if(IS_8X16(pict->mb_type[mb_index])){
01335                       int i;
01336                       for(i=0; i<2; i++){
01337                         int sx=mb_x*16 + 4 + 8*i;
01338                         int sy=mb_y*16 + 8;
01339                         int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01340                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01341                         int my=(pict->motion_val[direction][xy][1]>>shift);
01342 
01343                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01344                             my*=2;
01345 
01346                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01347                       }
01348                     }else{
01349                       int sx= mb_x*16 + 8;
01350                       int sy= mb_y*16 + 8;
01351                       int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01352                       int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01353                       int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01354                       draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01355                     }
01356                   }
01357                 }
01358                 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01359                     uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01360                     int y;
01361                     for(y=0; y<block_height; y++){
01362                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
01363                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
01364                     }
01365                 }
01366                 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01367                     int mb_type= pict->mb_type[mb_index];
01368                     uint64_t u,v;
01369                     int y;
01370 #define COLOR(theta, r)\
01371 u= (int)(128 + r*cos(theta*3.141592/180));\
01372 v= (int)(128 + r*sin(theta*3.141592/180));
01373 
01374 
01375                     u=v=128;
01376                     if(IS_PCM(mb_type)){
01377                         COLOR(120,48)
01378                     }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01379                         COLOR(30,48)
01380                     }else if(IS_INTRA4x4(mb_type)){
01381                         COLOR(90,48)
01382                     }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01383 //                        COLOR(120,48)
01384                     }else if(IS_DIRECT(mb_type)){
01385                         COLOR(150,48)
01386                     }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01387                         COLOR(170,48)
01388                     }else if(IS_GMC(mb_type)){
01389                         COLOR(190,48)
01390                     }else if(IS_SKIP(mb_type)){
01391 //                        COLOR(180,48)
01392                     }else if(!USES_LIST(mb_type, 1)){
01393                         COLOR(240,48)
01394                     }else if(!USES_LIST(mb_type, 0)){
01395                         COLOR(0,48)
01396                     }else{
01397                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01398                         COLOR(300,48)
01399                     }
01400 
01401                     u*= 0x0101010101010101ULL;
01402                     v*= 0x0101010101010101ULL;
01403                     for(y=0; y<block_height; y++){
01404                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
01405                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
01406                     }
01407 
01408                     //segmentation
01409                     if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01410                         *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01411                         *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01412                     }
01413                     if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01414                         for(y=0; y<16; y++)
01415                             pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01416                     }
01417                     if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01418                         int dm= 1 << (mv_sample_log2-2);
01419                         for(i=0; i<4; i++){
01420                             int sx= mb_x*16 + 8*(i&1);
01421                             int sy= mb_y*16 + 8*(i>>1);
01422                             int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01423                             //FIXME bidir
01424                             int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01425                             if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01426                                 for(y=0; y<8; y++)
01427                                     pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01428                             if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01429                                 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01430                         }
01431                     }
01432 
01433                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01434                         // hmm
01435                     }
01436                 }
01437                 s->mbskip_table[mb_index]=0;
01438             }
01439         }
01440     }
01441 }
01442 
01443 static inline int hpel_motion_lowres(MpegEncContext *s,
01444                                   uint8_t *dest, uint8_t *src,
01445                                   int field_based, int field_select,
01446                                   int src_x, int src_y,
01447                                   int width, int height, int stride,
01448                                   int h_edge_pos, int v_edge_pos,
01449                                   int w, int h, h264_chroma_mc_func *pix_op,
01450                                   int motion_x, int motion_y)
01451 {
01452     const int lowres= s->avctx->lowres;
01453     const int op_index= FFMIN(lowres, 2);
01454     const int s_mask= (2<<lowres)-1;
01455     int emu=0;
01456     int sx, sy;
01457 
01458     if(s->quarter_sample){
01459         motion_x/=2;
01460         motion_y/=2;
01461     }
01462 
01463     sx= motion_x & s_mask;
01464     sy= motion_y & s_mask;
01465     src_x += motion_x >> (lowres+1);
01466     src_y += motion_y >> (lowres+1);
01467 
01468     src += src_y * stride + src_x;
01469 
01470     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - w
01471        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01472         ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01473                             src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01474         src= s->edge_emu_buffer;
01475         emu=1;
01476     }
01477 
01478     sx= (sx << 2) >> lowres;
01479     sy= (sy << 2) >> lowres;
01480     if(field_select)
01481         src += s->linesize;
01482     pix_op[op_index](dest, src, stride, h, sx, sy);
01483     return emu;
01484 }
01485 
01486 /* apply one mpeg motion vector to the three components */
01487 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01488                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01489                                int field_based, int bottom_field, int field_select,
01490                                uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01491                                int motion_x, int motion_y, int h, int mb_y)
01492 {
01493     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01494     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01495     const int lowres= s->avctx->lowres;
01496     const int op_index= FFMIN(lowres, 2);
01497     const int block_s= 8>>lowres;
01498     const int s_mask= (2<<lowres)-1;
01499     const int h_edge_pos = s->h_edge_pos >> lowres;
01500     const int v_edge_pos = s->v_edge_pos >> lowres;
01501     linesize   = s->current_picture.linesize[0] << field_based;
01502     uvlinesize = s->current_picture.linesize[1] << field_based;
01503 
01504     if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
01505         motion_x/=2;
01506         motion_y/=2;
01507     }
01508 
01509     if(field_based){
01510         motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01511     }
01512 
01513     sx= motion_x & s_mask;
01514     sy= motion_y & s_mask;
01515     src_x = s->mb_x*2*block_s               + (motion_x >> (lowres+1));
01516     src_y =(   mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01517 
01518     if (s->out_format == FMT_H263) {
01519         uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01520         uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01521         uvsrc_x = src_x>>1;
01522         uvsrc_y = src_y>>1;
01523     }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
01524         mx = motion_x / 4;
01525         my = motion_y / 4;
01526         uvsx = (2*mx) & s_mask;
01527         uvsy = (2*my) & s_mask;
01528         uvsrc_x = s->mb_x*block_s               + (mx >> lowres);
01529         uvsrc_y =    mb_y*block_s               + (my >> lowres);
01530     } else {
01531         mx = motion_x / 2;
01532         my = motion_y / 2;
01533         uvsx = mx & s_mask;
01534         uvsy = my & s_mask;
01535         uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
01536         uvsrc_y =(   mb_y*block_s>>field_based) + (my >> (lowres+1));
01537     }
01538 
01539     ptr_y  = ref_picture[0] + src_y * linesize + src_x;
01540     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01541     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01542 
01543     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - 2*block_s
01544        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01545             ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01546                              src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01547             ptr_y = s->edge_emu_buffer;
01548             if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01549                 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01550                 ff_emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based,
01551                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01552                 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01553                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01554                 ptr_cb= uvbuf;
01555                 ptr_cr= uvbuf+16;
01556             }
01557     }
01558 
01559     if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
01560         dest_y += s->linesize;
01561         dest_cb+= s->uvlinesize;
01562         dest_cr+= s->uvlinesize;
01563     }
01564 
01565     if(field_select){
01566         ptr_y += s->linesize;
01567         ptr_cb+= s->uvlinesize;
01568         ptr_cr+= s->uvlinesize;
01569     }
01570 
01571     sx= (sx << 2) >> lowres;
01572     sy= (sy << 2) >> lowres;
01573     pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01574 
01575     if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01576         uvsx= (uvsx << 2) >> lowres;
01577         uvsy= (uvsy << 2) >> lowres;
01578         pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01579         pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01580     }
01581     //FIXME h261 lowres loop filter
01582 }
01583 
01584 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01585                                      uint8_t *dest_cb, uint8_t *dest_cr,
01586                                      uint8_t **ref_picture,
01587                                      h264_chroma_mc_func *pix_op,
01588                                      int mx, int my){
01589     const int lowres= s->avctx->lowres;
01590     const int op_index= FFMIN(lowres, 2);
01591     const int block_s= 8>>lowres;
01592     const int s_mask= (2<<lowres)-1;
01593     const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01594     const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01595     int emu=0, src_x, src_y, offset, sx, sy;
01596     uint8_t *ptr;
01597 
01598     if(s->quarter_sample){
01599         mx/=2;
01600         my/=2;
01601     }
01602 
01603     /* In case of 8X8, we construct a single chroma motion vector
01604        with a special rounding */
01605     mx= ff_h263_round_chroma(mx);
01606     my= ff_h263_round_chroma(my);
01607 
01608     sx= mx & s_mask;
01609     sy= my & s_mask;
01610     src_x = s->mb_x*block_s + (mx >> (lowres+1));
01611     src_y = s->mb_y*block_s + (my >> (lowres+1));
01612 
01613     offset = src_y * s->uvlinesize + src_x;
01614     ptr = ref_picture[1] + offset;
01615     if(s->flags&CODEC_FLAG_EMU_EDGE){
01616         if(   (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01617            || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01618             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01619             ptr= s->edge_emu_buffer;
01620             emu=1;
01621         }
01622     }
01623     sx= (sx << 2) >> lowres;
01624     sy= (sy << 2) >> lowres;
01625     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01626 
01627     ptr = ref_picture[2] + offset;
01628     if(emu){
01629         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01630         ptr= s->edge_emu_buffer;
01631     }
01632     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01633 }
01634 
01646 static inline void MPV_motion_lowres(MpegEncContext *s,
01647                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01648                               int dir, uint8_t **ref_picture,
01649                               h264_chroma_mc_func *pix_op)
01650 {
01651     int mx, my;
01652     int mb_x, mb_y, i;
01653     const int lowres= s->avctx->lowres;
01654     const int block_s= 8>>lowres;
01655 
01656     mb_x = s->mb_x;
01657     mb_y = s->mb_y;
01658 
01659     switch(s->mv_type) {
01660     case MV_TYPE_16X16:
01661         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01662                     0, 0, 0,
01663                     ref_picture, pix_op,
01664                     s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
01665         break;
01666     case MV_TYPE_8X8:
01667         mx = 0;
01668         my = 0;
01669             for(i=0;i<4;i++) {
01670                 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01671                             ref_picture[0], 0, 0,
01672                             (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01673                             s->width, s->height, s->linesize,
01674                             s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01675                             block_s, block_s, pix_op,
01676                             s->mv[dir][i][0], s->mv[dir][i][1]);
01677 
01678                 mx += s->mv[dir][i][0];
01679                 my += s->mv[dir][i][1];
01680             }
01681 
01682         if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01683             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01684         break;
01685     case MV_TYPE_FIELD:
01686         if (s->picture_structure == PICT_FRAME) {
01687             /* top field */
01688             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01689                         1, 0, s->field_select[dir][0],
01690                         ref_picture, pix_op,
01691                         s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
01692             /* bottom field */
01693             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01694                         1, 1, s->field_select[dir][1],
01695                         ref_picture, pix_op,
01696                         s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
01697         } else {
01698             if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
01699                 ref_picture= s->current_picture_ptr->data;
01700             }
01701 
01702             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01703                         0, 0, s->field_select[dir][0],
01704                         ref_picture, pix_op,
01705                         s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
01706         }
01707         break;
01708     case MV_TYPE_16X8:
01709         for(i=0; i<2; i++){
01710             uint8_t ** ref2picture;
01711 
01712             if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
01713                 ref2picture= ref_picture;
01714             }else{
01715                 ref2picture= s->current_picture_ptr->data;
01716             }
01717 
01718             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01719                         0, 0, s->field_select[dir][i],
01720                         ref2picture, pix_op,
01721                         s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
01722 
01723             dest_y += 2*block_s*s->linesize;
01724             dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01725             dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01726         }
01727         break;
01728     case MV_TYPE_DMV:
01729         if(s->picture_structure == PICT_FRAME){
01730             for(i=0; i<2; i++){
01731                 int j;
01732                 for(j=0; j<2; j++){
01733                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01734                                 1, j, j^i,
01735                                 ref_picture, pix_op,
01736                                 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
01737                 }
01738                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01739             }
01740         }else{
01741             for(i=0; i<2; i++){
01742                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01743                             0, 0, s->picture_structure != i+1,
01744                             ref_picture, pix_op,
01745                             s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
01746 
01747                 // after put we make avg of the same block
01748                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01749 
01750                 //opposite parity is always in the same frame if this is second field
01751                 if(!s->first_field){
01752                     ref_picture = s->current_picture_ptr->data;
01753                 }
01754             }
01755         }
01756     break;
01757     default: assert(0);
01758     }
01759 }
01760 
01761 /* put block[] to dest[] */
01762 static inline void put_dct(MpegEncContext *s,
01763                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01764 {
01765     s->dct_unquantize_intra(s, block, i, qscale);
01766     s->dsp.idct_put (dest, line_size, block);
01767 }
01768 
01769 /* add block[] to dest[] */
01770 static inline void add_dct(MpegEncContext *s,
01771                            DCTELEM *block, int i, uint8_t *dest, int line_size)
01772 {
01773     if (s->block_last_index[i] >= 0) {
01774         s->dsp.idct_add (dest, line_size, block);
01775     }
01776 }
01777 
01778 static inline void add_dequant_dct(MpegEncContext *s,
01779                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01780 {
01781     if (s->block_last_index[i] >= 0) {
01782         s->dct_unquantize_inter(s, block, i, qscale);
01783 
01784         s->dsp.idct_add (dest, line_size, block);
01785     }
01786 }
01787 
01791 void ff_clean_intra_table_entries(MpegEncContext *s)
01792 {
01793     int wrap = s->b8_stride;
01794     int xy = s->block_index[0];
01795 
01796     s->dc_val[0][xy           ] =
01797     s->dc_val[0][xy + 1       ] =
01798     s->dc_val[0][xy     + wrap] =
01799     s->dc_val[0][xy + 1 + wrap] = 1024;
01800     /* ac pred */
01801     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
01802     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
01803     if (s->msmpeg4_version>=3) {
01804         s->coded_block[xy           ] =
01805         s->coded_block[xy + 1       ] =
01806         s->coded_block[xy     + wrap] =
01807         s->coded_block[xy + 1 + wrap] = 0;
01808     }
01809     /* chroma */
01810     wrap = s->mb_stride;
01811     xy = s->mb_x + s->mb_y * wrap;
01812     s->dc_val[1][xy] =
01813     s->dc_val[2][xy] = 1024;
01814     /* ac pred */
01815     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
01816     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
01817 
01818     s->mbintra_table[xy]= 0;
01819 }
01820 
01821 /* generic function called after a macroblock has been parsed by the
01822    decoder or after it has been encoded by the encoder.
01823 
01824    Important variables used:
01825    s->mb_intra : true if intra macroblock
01826    s->mv_dir   : motion vector direction
01827    s->mv_type  : motion vector type
01828    s->mv       : motion vector
01829    s->interlaced_dct : true if interlaced dct used (mpeg2)
01830  */
01831 static av_always_inline
01832 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
01833                             int lowres_flag, int is_mpeg12)
01834 {
01835     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
01836     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01837         ff_xvmc_decode_mb(s);//xvmc uses pblocks
01838         return;
01839     }
01840 
01841     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
01842        /* save DCT coefficients */
01843        int i,j;
01844        DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
01845        for(i=0; i<6; i++)
01846            for(j=0; j<64; j++)
01847                *dct++ = block[i][s->dsp.idct_permutation[j]];
01848     }
01849 
01850     s->current_picture.qscale_table[mb_xy]= s->qscale;
01851 
01852     /* update DC predictors for P macroblocks */
01853     if (!s->mb_intra) {
01854         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
01855             if(s->mbintra_table[mb_xy])
01856                 ff_clean_intra_table_entries(s);
01857         } else {
01858             s->last_dc[0] =
01859             s->last_dc[1] =
01860             s->last_dc[2] = 128 << s->intra_dc_precision;
01861         }
01862     }
01863     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
01864         s->mbintra_table[mb_xy]=1;
01865 
01866     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
01867         uint8_t *dest_y, *dest_cb, *dest_cr;
01868         int dct_linesize, dct_offset;
01869         op_pixels_func (*op_pix)[4];
01870         qpel_mc_func (*op_qpix)[16];
01871         const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
01872         const int uvlinesize= s->current_picture.linesize[1];
01873         const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
01874         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
01875 
01876         /* avoid copy if macroblock skipped in last frame too */
01877         /* skip only during decoding as we might trash the buffers during encoding a bit */
01878         if(!s->encoding){
01879             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
01880             const int age= s->current_picture.age;
01881 
01882             assert(age);
01883 
01884             if (s->mb_skipped) {
01885                 s->mb_skipped= 0;
01886                 assert(s->pict_type!=FF_I_TYPE);
01887 
01888                 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
01889                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01890 
01891                 /* if previous was skipped too, then nothing to do !  */
01892                 if (*mbskip_ptr >= age && s->current_picture.reference){
01893                     return;
01894                 }
01895             } else if(!s->current_picture.reference){
01896                 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
01897                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01898             } else{
01899                 *mbskip_ptr = 0; /* not skipped */
01900             }
01901         }
01902 
01903         dct_linesize = linesize << s->interlaced_dct;
01904         dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
01905 
01906         if(readable){
01907             dest_y=  s->dest[0];
01908             dest_cb= s->dest[1];
01909             dest_cr= s->dest[2];
01910         }else{
01911             dest_y = s->b_scratchpad;
01912             dest_cb= s->b_scratchpad+16*linesize;
01913             dest_cr= s->b_scratchpad+32*linesize;
01914         }
01915 
01916         if (!s->mb_intra) {
01917             /* motion handling */
01918             /* decoding or more than one mb_type (MC was already done otherwise) */
01919             if(!s->encoding){
01920                 if(lowres_flag){
01921                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
01922 
01923                     if (s->mv_dir & MV_DIR_FORWARD) {
01924                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
01925                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
01926                     }
01927                     if (s->mv_dir & MV_DIR_BACKWARD) {
01928                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
01929                     }
01930                 }else{
01931                     op_qpix= s->me.qpel_put;
01932                     if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
01933                         op_pix = s->dsp.put_pixels_tab;
01934                     }else{
01935                         op_pix = s->dsp.put_no_rnd_pixels_tab;
01936                     }
01937                     if (s->mv_dir & MV_DIR_FORWARD) {
01938                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
01939                         op_pix = s->dsp.avg_pixels_tab;
01940                         op_qpix= s->me.qpel_avg;
01941                     }
01942                     if (s->mv_dir & MV_DIR_BACKWARD) {
01943                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
01944                     }
01945                 }
01946             }
01947 
01948             /* skip dequant / idct if we are really late ;) */
01949             if(s->hurry_up>1) goto skip_idct;
01950             if(s->avctx->skip_idct){
01951                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
01952                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
01953                    || s->avctx->skip_idct >= AVDISCARD_ALL)
01954                     goto skip_idct;
01955             }
01956 
01957             /* add dct residue */
01958             if(s->encoding || !(   s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
01959                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
01960                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
01961                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
01962                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
01963                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
01964 
01965                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01966                     if (s->chroma_y_shift){
01967                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
01968                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
01969                     }else{
01970                         dct_linesize >>= 1;
01971                         dct_offset >>=1;
01972                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
01973                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
01974                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
01975                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
01976                     }
01977                 }
01978             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
01979                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
01980                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
01981                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
01982                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
01983 
01984                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01985                     if(s->chroma_y_shift){//Chroma420
01986                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
01987                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
01988                     }else{
01989                         //chroma422
01990                         dct_linesize = uvlinesize << s->interlaced_dct;
01991                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
01992 
01993                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
01994                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
01995                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
01996                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
01997                         if(!s->chroma_x_shift){//Chroma444
01998                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
01999                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
02000                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02001                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02002                         }
02003                     }
02004                 }//fi gray
02005             }
02006             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02007                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02008             }
02009         } else {
02010             /* dct only in intra block */
02011             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02012                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02013                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02014                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02015                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02016 
02017                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02018                     if(s->chroma_y_shift){
02019                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02020                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02021                     }else{
02022                         dct_offset >>=1;
02023                         dct_linesize >>=1;
02024                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02025                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02026                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02027                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02028                     }
02029                 }
02030             }else{
02031                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
02032                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
02033                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
02034                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02035 
02036                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02037                     if(s->chroma_y_shift){
02038                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02039                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02040                     }else{
02041 
02042                         dct_linesize = uvlinesize << s->interlaced_dct;
02043                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02044 
02045                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
02046                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
02047                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02048                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02049                         if(!s->chroma_x_shift){//Chroma444
02050                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
02051                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
02052                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02053                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02054                         }
02055                     }
02056                 }//gray
02057             }
02058         }
02059 skip_idct:
02060         if(!readable){
02061             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
02062             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02063             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02064         }
02065     }
02066 }
02067 
02068 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02069 #if !CONFIG_SMALL
02070     if(s->out_format == FMT_MPEG1) {
02071         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02072         else                 MPV_decode_mb_internal(s, block, 0, 1);
02073     } else
02074 #endif
02075     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02076     else                  MPV_decode_mb_internal(s, block, 0, 0);
02077 }
02078 
02083 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02084     if (s->avctx->draw_horiz_band) {
02085         AVFrame *src;
02086         const int field_pic= s->picture_structure != PICT_FRAME;
02087         int offset[4];
02088 
02089         h= FFMIN(h, (s->avctx->height>>field_pic) - y);
02090 
02091         if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
02092             h <<= 1;
02093             y <<= 1;
02094             if(s->first_field) return;
02095         }
02096 
02097         if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02098             src= (AVFrame*)s->current_picture_ptr;
02099         else if(s->last_picture_ptr)
02100             src= (AVFrame*)s->last_picture_ptr;
02101         else
02102             return;
02103 
02104         if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02105             offset[0]=
02106             offset[1]=
02107             offset[2]=
02108             offset[3]= 0;
02109         }else{
02110             offset[0]= y * s->linesize;
02111             offset[1]=
02112             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02113             offset[3]= 0;
02114         }
02115 
02116         emms_c();
02117 
02118         s->avctx->draw_horiz_band(s->avctx, src, offset,
02119                                   y, s->picture_structure, h);
02120     }
02121 }
02122 
02123 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
02124     const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
02125     const int uvlinesize= s->current_picture.linesize[1];
02126     const int mb_size= 4 - s->avctx->lowres;
02127 
02128     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
02129     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
02130     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02131     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02132     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02133     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02134     //block_index is not used by mpeg2, so it is not affected by chroma_format
02135 
02136     s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
02137     s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02138     s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02139 
02140     if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02141     {
02142         if(s->picture_structure==PICT_FRAME){
02143         s->dest[0] += s->mb_y *   linesize << mb_size;
02144         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02145         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02146         }else{
02147             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
02148             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02149             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02150             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02151         }
02152     }
02153 }
02154 
02155 void ff_mpeg_flush(AVCodecContext *avctx){
02156     int i;
02157     MpegEncContext *s = avctx->priv_data;
02158 
02159     if(s==NULL || s->picture==NULL)
02160         return;
02161 
02162     for(i=0; i<MAX_PICTURE_COUNT; i++){
02163        if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
02164                                     || s->picture[i].type == FF_BUFFER_TYPE_USER))
02165         free_frame_buffer(s, &s->picture[i]);
02166     }
02167     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02168 
02169     s->mb_x= s->mb_y= 0;
02170     s->closed_gop= 0;
02171 
02172     s->parse_context.state= -1;
02173     s->parse_context.frame_start_found= 0;
02174     s->parse_context.overread= 0;
02175     s->parse_context.overread_index= 0;
02176     s->parse_context.index= 0;
02177     s->parse_context.last_index= 0;
02178     s->bitstream_buffer_size=0;
02179     s->pp_time=0;
02180 }
02181 
02182 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02183                                    DCTELEM *block, int n, int qscale)
02184 {
02185     int i, level, nCoeffs;
02186     const uint16_t *quant_matrix;
02187 
02188     nCoeffs= s->block_last_index[n];
02189 
02190     if (n < 4)
02191         block[0] = block[0] * s->y_dc_scale;
02192     else
02193         block[0] = block[0] * s->c_dc_scale;
02194     /* XXX: only mpeg1 */
02195     quant_matrix = s->intra_matrix;
02196     for(i=1;i<=nCoeffs;i++) {
02197         int j= s->intra_scantable.permutated[i];
02198         level = block[j];
02199         if (level) {
02200             if (level < 0) {
02201                 level = -level;
02202                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02203                 level = (level - 1) | 1;
02204                 level = -level;
02205             } else {
02206                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02207                 level = (level - 1) | 1;
02208             }
02209             block[j] = level;
02210         }
02211     }
02212 }
02213 
02214 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02215                                    DCTELEM *block, int n, int qscale)
02216 {
02217     int i, level, nCoeffs;
02218     const uint16_t *quant_matrix;
02219 
02220     nCoeffs= s->block_last_index[n];
02221 
02222     quant_matrix = s->inter_matrix;
02223     for(i=0; i<=nCoeffs; i++) {
02224         int j= s->intra_scantable.permutated[i];
02225         level = block[j];
02226         if (level) {
02227             if (level < 0) {
02228                 level = -level;
02229                 level = (((level << 1) + 1) * qscale *
02230                          ((int) (quant_matrix[j]))) >> 4;
02231                 level = (level - 1) | 1;
02232                 level = -level;
02233             } else {
02234                 level = (((level << 1) + 1) * qscale *
02235                          ((int) (quant_matrix[j]))) >> 4;
02236                 level = (level - 1) | 1;
02237             }
02238             block[j] = level;
02239         }
02240     }
02241 }
02242 
02243 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02244                                    DCTELEM *block, int n, int qscale)
02245 {
02246     int i, level, nCoeffs;
02247     const uint16_t *quant_matrix;
02248 
02249     if(s->alternate_scan) nCoeffs= 63;
02250     else nCoeffs= s->block_last_index[n];
02251 
02252     if (n < 4)
02253         block[0] = block[0] * s->y_dc_scale;
02254     else
02255         block[0] = block[0] * s->c_dc_scale;
02256     quant_matrix = s->intra_matrix;
02257     for(i=1;i<=nCoeffs;i++) {
02258         int j= s->intra_scantable.permutated[i];
02259         level = block[j];
02260         if (level) {
02261             if (level < 0) {
02262                 level = -level;
02263                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02264                 level = -level;
02265             } else {
02266                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02267             }
02268             block[j] = level;
02269         }
02270     }
02271 }
02272 
02273 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02274                                    DCTELEM *block, int n, int qscale)
02275 {
02276     int i, level, nCoeffs;
02277     const uint16_t *quant_matrix;
02278     int sum=-1;
02279 
02280     if(s->alternate_scan) nCoeffs= 63;
02281     else nCoeffs= s->block_last_index[n];
02282 
02283     if (n < 4)
02284         block[0] = block[0] * s->y_dc_scale;
02285     else
02286         block[0] = block[0] * s->c_dc_scale;
02287     quant_matrix = s->intra_matrix;
02288     for(i=1;i<=nCoeffs;i++) {
02289         int j= s->intra_scantable.permutated[i];
02290         level = block[j];
02291         if (level) {
02292             if (level < 0) {
02293                 level = -level;
02294                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02295                 level = -level;
02296             } else {
02297                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02298             }
02299             block[j] = level;
02300             sum+=level;
02301         }
02302     }
02303     block[63]^=sum&1;
02304 }
02305 
02306 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02307                                    DCTELEM *block, int n, int qscale)
02308 {
02309     int i, level, nCoeffs;
02310     const uint16_t *quant_matrix;
02311     int sum=-1;
02312 
02313     if(s->alternate_scan) nCoeffs= 63;
02314     else nCoeffs= s->block_last_index[n];
02315 
02316     quant_matrix = s->inter_matrix;
02317     for(i=0; i<=nCoeffs; i++) {
02318         int j= s->intra_scantable.permutated[i];
02319         level = block[j];
02320         if (level) {
02321             if (level < 0) {
02322                 level = -level;
02323                 level = (((level << 1) + 1) * qscale *
02324                          ((int) (quant_matrix[j]))) >> 4;
02325                 level = -level;
02326             } else {
02327                 level = (((level << 1) + 1) * qscale *
02328                          ((int) (quant_matrix[j]))) >> 4;
02329             }
02330             block[j] = level;
02331             sum+=level;
02332         }
02333     }
02334     block[63]^=sum&1;
02335 }
02336 
02337 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02338                                   DCTELEM *block, int n, int qscale)
02339 {
02340     int i, level, qmul, qadd;
02341     int nCoeffs;
02342 
02343     assert(s->block_last_index[n]>=0);
02344 
02345     qmul = qscale << 1;
02346 
02347     if (!s->h263_aic) {
02348         if (n < 4)
02349             block[0] = block[0] * s->y_dc_scale;
02350         else
02351             block[0] = block[0] * s->c_dc_scale;
02352         qadd = (qscale - 1) | 1;
02353     }else{
02354         qadd = 0;
02355     }
02356     if(s->ac_pred)
02357         nCoeffs=63;
02358     else
02359         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02360 
02361     for(i=1; i<=nCoeffs; i++) {
02362         level = block[i];
02363         if (level) {
02364             if (level < 0) {
02365                 level = level * qmul - qadd;
02366             } else {
02367                 level = level * qmul + qadd;
02368             }
02369             block[i] = level;
02370         }
02371     }
02372 }
02373 
02374 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02375                                   DCTELEM *block, int n, int qscale)
02376 {
02377     int i, level, qmul, qadd;
02378     int nCoeffs;
02379 
02380     assert(s->block_last_index[n]>=0);
02381 
02382     qadd = (qscale - 1) | 1;
02383     qmul = qscale << 1;
02384 
02385     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02386 
02387     for(i=0; i<=nCoeffs; i++) {
02388         level = block[i];
02389         if (level) {
02390             if (level < 0) {
02391                 level = level * qmul - qadd;
02392             } else {
02393                 level = level * qmul + qadd;
02394             }
02395             block[i] = level;
02396         }
02397     }
02398 }
02399 
02403 void ff_set_qscale(MpegEncContext * s, int qscale)
02404 {
02405     if (qscale < 1)
02406         qscale = 1;
02407     else if (qscale > 31)
02408         qscale = 31;
02409 
02410     s->qscale = qscale;
02411     s->chroma_qscale= s->chroma_qscale_table[qscale];
02412 
02413     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02414     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02415 }