00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "avcodec.h"
00031 #include "dsputil.h"
00032 #include "mpegvideo.h"
00033 #include "mpegvideo_common.h"
00034 #include "mjpegenc.h"
00035 #include "msmpeg4.h"
00036 #include "faandct.h"
00037 #include <limits.h>
00038
00039
00040
00041
00042 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00043 DCTELEM *block, int n, int qscale);
00044 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00045 DCTELEM *block, int n, int qscale);
00046 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00047 DCTELEM *block, int n, int qscale);
00048 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00049 DCTELEM *block, int n, int qscale);
00050 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00051 DCTELEM *block, int n, int qscale);
00052 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00053 DCTELEM *block, int n, int qscale);
00054 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00055 DCTELEM *block, int n, int qscale);
00056 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
00057
00058 #ifdef HAVE_XVMC
00059 extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
00060 extern void XVMC_field_end(MpegEncContext *s);
00061 extern void XVMC_decode_mb(MpegEncContext *s);
00062 #endif
00063
00064 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
00065
00066
00067
00068
00069
00070
00071
00072
00073 static const uint8_t ff_default_chroma_qscale_table[32]={
00074
00075 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
00076 };
00077
00078 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
00079 int i;
00080 int end;
00081
00082 st->scantable= src_scantable;
00083
00084 for(i=0; i<64; i++){
00085 int j;
00086 j = src_scantable[i];
00087 st->permutated[i] = permutation[j];
00088 #ifdef ARCH_POWERPC
00089 st->inverse[j] = i;
00090 #endif
00091 }
00092
00093 end=-1;
00094 for(i=0; i<64; i++){
00095 int j;
00096 j = st->permutated[i];
00097 if(j>end) end=j;
00098 st->raster_end[i]= end;
00099 }
00100 }
00101
00102 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
00103 int i;
00104
00105 assert(p<=end);
00106 if(p>=end)
00107 return end;
00108
00109 for(i=0; i<3; i++){
00110 uint32_t tmp= *state << 8;
00111 *state= tmp + *(p++);
00112 if(tmp == 0x100 || p==end)
00113 return p;
00114 }
00115
00116 while(p<end){
00117 if (p[-1] > 1 ) p+= 3;
00118 else if(p[-2] ) p+= 2;
00119 else if(p[-3]|(p[-1]-1)) p++;
00120 else{
00121 p++;
00122 break;
00123 }
00124 }
00125
00126 p= FFMIN(p, end)-4;
00127 *state= AV_RB32(p);
00128
00129 return p+4;
00130 }
00131
00132
00133 int ff_dct_common_init(MpegEncContext *s)
00134 {
00135 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00136 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00137 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00138 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00139 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00140 if(s->flags & CODEC_FLAG_BITEXACT)
00141 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00142 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00143
00144 #if defined(HAVE_MMX)
00145 MPV_common_init_mmx(s);
00146 #elif defined(ARCH_ALPHA)
00147 MPV_common_init_axp(s);
00148 #elif defined(HAVE_MLIB)
00149 MPV_common_init_mlib(s);
00150 #elif defined(HAVE_MMI)
00151 MPV_common_init_mmi(s);
00152 #elif defined(ARCH_ARMV4L)
00153 MPV_common_init_armv4l(s);
00154 #elif defined(HAVE_ALTIVEC)
00155 MPV_common_init_altivec(s);
00156 #elif defined(ARCH_BFIN)
00157 MPV_common_init_bfin(s);
00158 #endif
00159
00160
00161
00162
00163 if(s->alternate_scan){
00164 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00165 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00166 }else{
00167 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00168 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00169 }
00170 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00171 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00172
00173 return 0;
00174 }
00175
00176 void copy_picture(Picture *dst, Picture *src){
00177 *dst = *src;
00178 dst->type= FF_BUFFER_TYPE_COPY;
00179 }
00180
00185 int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
00186 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1;
00187 const int mb_array_size= s->mb_stride*s->mb_height;
00188 const int b8_array_size= s->b8_stride*s->mb_height*2;
00189 const int b4_array_size= s->b4_stride*s->mb_height*4;
00190 int i;
00191 int r= -1;
00192
00193 if(shared){
00194 assert(pic->data[0]);
00195 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
00196 pic->type= FF_BUFFER_TYPE_SHARED;
00197 }else{
00198 assert(!pic->data[0]);
00199
00200 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
00201
00202 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
00203 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
00204 return -1;
00205 }
00206
00207 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
00208 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
00209 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00210 return -1;
00211 }
00212
00213 if(pic->linesize[1] != pic->linesize[2]){
00214 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
00215 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00216 return -1;
00217 }
00218
00219 s->linesize = pic->linesize[0];
00220 s->uvlinesize= pic->linesize[1];
00221 }
00222
00223 if(pic->qscale_table==NULL){
00224 if (s->encoding) {
00225 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
00226 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
00227 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
00228 }
00229
00230 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2)
00231 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
00232 CHECKED_ALLOCZ(pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t))
00233 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
00234 if(s->out_format == FMT_H264){
00235 for(i=0; i<2; i++){
00236 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t))
00237 pic->motion_val[i]= pic->motion_val_base[i]+4;
00238 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
00239 }
00240 pic->motion_subsample_log2= 2;
00241 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
00242 for(i=0; i<2; i++){
00243 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t))
00244 pic->motion_val[i]= pic->motion_val_base[i]+4;
00245 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
00246 }
00247 pic->motion_subsample_log2= 3;
00248 }
00249 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00250 CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6)
00251 }
00252 pic->qstride= s->mb_stride;
00253 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
00254 }
00255
00256
00257
00258 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
00259 s->prev_pict_types[0]= s->pict_type;
00260 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
00261 pic->age= INT_MAX;
00262
00263 return 0;
00264 fail:
00265 if(r>=0)
00266 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00267 return -1;
00268 }
00269
00273 static void free_picture(MpegEncContext *s, Picture *pic){
00274 int i;
00275
00276 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
00277 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00278 }
00279
00280 av_freep(&pic->mb_var);
00281 av_freep(&pic->mc_mb_var);
00282 av_freep(&pic->mb_mean);
00283 av_freep(&pic->mbskip_table);
00284 av_freep(&pic->qscale_table);
00285 av_freep(&pic->mb_type_base);
00286 av_freep(&pic->dct_coeff);
00287 av_freep(&pic->pan_scan);
00288 pic->mb_type= NULL;
00289 for(i=0; i<2; i++){
00290 av_freep(&pic->motion_val_base[i]);
00291 av_freep(&pic->ref_index[i]);
00292 }
00293
00294 if(pic->type == FF_BUFFER_TYPE_SHARED){
00295 for(i=0; i<4; i++){
00296 pic->base[i]=
00297 pic->data[i]= NULL;
00298 }
00299 pic->type= 0;
00300 }
00301 }
00302
00303 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
00304 int i;
00305
00306
00307 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*21*2);
00308 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
00309
00310
00311 CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t))
00312 s->rd_scratchpad= s->me.scratchpad;
00313 s->b_scratchpad= s->me.scratchpad;
00314 s->obmc_scratchpad= s->me.scratchpad + 16;
00315 if (s->encoding) {
00316 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
00317 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
00318 if(s->avctx->noise_reduction){
00319 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
00320 }
00321 }
00322 CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM))
00323 s->block= s->blocks[0];
00324
00325 for(i=0;i<12;i++){
00326 s->pblocks[i] = (short *)(&s->block[i]);
00327 }
00328 return 0;
00329 fail:
00330 return -1;
00331 }
00332
00333 static void free_duplicate_context(MpegEncContext *s){
00334 if(s==NULL) return;
00335
00336 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
00337 av_freep(&s->me.scratchpad);
00338 s->rd_scratchpad=
00339 s->b_scratchpad=
00340 s->obmc_scratchpad= NULL;
00341
00342 av_freep(&s->dct_error_sum);
00343 av_freep(&s->me.map);
00344 av_freep(&s->me.score_map);
00345 av_freep(&s->blocks);
00346 s->block= NULL;
00347 }
00348
00349 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
00350 #define COPY(a) bak->a= src->a
00351 COPY(allocated_edge_emu_buffer);
00352 COPY(edge_emu_buffer);
00353 COPY(me.scratchpad);
00354 COPY(rd_scratchpad);
00355 COPY(b_scratchpad);
00356 COPY(obmc_scratchpad);
00357 COPY(me.map);
00358 COPY(me.score_map);
00359 COPY(blocks);
00360 COPY(block);
00361 COPY(start_mb_y);
00362 COPY(end_mb_y);
00363 COPY(me.map_generation);
00364 COPY(pb);
00365 COPY(dct_error_sum);
00366 COPY(dct_count[0]);
00367 COPY(dct_count[1]);
00368 #undef COPY
00369 }
00370
00371 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
00372 MpegEncContext bak;
00373 int i;
00374
00375
00376 backup_duplicate_context(&bak, dst);
00377 memcpy(dst, src, sizeof(MpegEncContext));
00378 backup_duplicate_context(dst, &bak);
00379 for(i=0;i<12;i++){
00380 dst->pblocks[i] = (short *)(&dst->block[i]);
00381 }
00382
00383 }
00384
00389 void MPV_common_defaults(MpegEncContext *s){
00390 s->y_dc_scale_table=
00391 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
00392 s->chroma_qscale_table= ff_default_chroma_qscale_table;
00393 s->progressive_frame= 1;
00394 s->progressive_sequence= 1;
00395 s->picture_structure= PICT_FRAME;
00396
00397 s->coded_picture_number = 0;
00398 s->picture_number = 0;
00399 s->input_picture_number = 0;
00400
00401 s->picture_in_gop_number = 0;
00402
00403 s->f_code = 1;
00404 s->b_code = 1;
00405 }
00406
00411 void MPV_decode_defaults(MpegEncContext *s){
00412 MPV_common_defaults(s);
00413 }
00414
00419 int MPV_common_init(MpegEncContext *s)
00420 {
00421 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
00422
00423 s->mb_height = (s->height + 15) / 16;
00424
00425 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
00426 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
00427 return -1;
00428 }
00429
00430 if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
00431 return -1;
00432
00433 dsputil_init(&s->dsp, s->avctx);
00434 ff_dct_common_init(s);
00435
00436 s->flags= s->avctx->flags;
00437 s->flags2= s->avctx->flags2;
00438
00439 s->mb_width = (s->width + 15) / 16;
00440 s->mb_stride = s->mb_width + 1;
00441 s->b8_stride = s->mb_width*2 + 1;
00442 s->b4_stride = s->mb_width*4 + 1;
00443 mb_array_size= s->mb_height * s->mb_stride;
00444 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
00445
00446
00447 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00448 &(s->chroma_y_shift) );
00449
00450
00451 s->h_edge_pos= s->mb_width*16;
00452 s->v_edge_pos= s->mb_height*16;
00453
00454 s->mb_num = s->mb_width * s->mb_height;
00455
00456 s->block_wrap[0]=
00457 s->block_wrap[1]=
00458 s->block_wrap[2]=
00459 s->block_wrap[3]= s->b8_stride;
00460 s->block_wrap[4]=
00461 s->block_wrap[5]= s->mb_stride;
00462
00463 y_size = s->b8_stride * (2 * s->mb_height + 1);
00464 c_size = s->mb_stride * (s->mb_height + 1);
00465 yc_size = y_size + 2 * c_size;
00466
00467
00468 s->codec_tag= toupper( s->avctx->codec_tag &0xFF)
00469 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
00470 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
00471 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
00472
00473 s->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
00474 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
00475 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
00476 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
00477
00478 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
00479
00480 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int))
00481 for(y=0; y<s->mb_height; y++){
00482 for(x=0; x<s->mb_width; x++){
00483 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
00484 }
00485 }
00486 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width;
00487
00488 if (s->encoding) {
00489
00490 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00491 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00492 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00493 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00494 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00495 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00496 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
00497 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
00498 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
00499 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00500 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00501 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
00502
00503 if(s->msmpeg4_version){
00504 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
00505 }
00506 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
00507
00508
00509 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t))
00510
00511 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
00512
00513 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
00514 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
00515 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
00516 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
00517 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
00518 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
00519
00520 if(s->avctx->noise_reduction){
00521 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
00522 }
00523 }
00524 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
00525
00526 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
00527
00528 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00529
00530 for(i=0; i<2; i++){
00531 int j, k;
00532 for(j=0; j<2; j++){
00533 for(k=0; k<2; k++){
00534 CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t))
00535 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00536 }
00537 CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t))
00538 CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t))
00539 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
00540 }
00541 CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t))
00542 }
00543 }
00544 if (s->out_format == FMT_H263) {
00545
00546 CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16);
00547 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00548 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00549 s->ac_val[2] = s->ac_val[1] + c_size;
00550
00551
00552 CHECKED_ALLOCZ(s->coded_block_base, y_size);
00553 s->coded_block= s->coded_block_base + s->b8_stride + 1;
00554
00555
00556 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
00557 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
00558 }
00559
00560 if (s->h263_pred || s->h263_plus || !s->encoding) {
00561
00562
00563 CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t));
00564 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00565 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00566 s->dc_val[2] = s->dc_val[1] + c_size;
00567 for(i=0;i<yc_size;i++)
00568 s->dc_val_base[i] = 1024;
00569 }
00570
00571
00572 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
00573 memset(s->mbintra_table, 1, mb_array_size);
00574
00575
00576 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
00577
00578 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
00579
00580 s->parse_context.state= -1;
00581 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
00582 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00583 s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
00584 s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
00585 }
00586
00587 s->context_initialized = 1;
00588
00589 s->thread_context[0]= s;
00590 threads = s->avctx->thread_count;
00591
00592 for(i=1; i<threads; i++){
00593 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
00594 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00595 }
00596
00597 for(i=0; i<threads; i++){
00598 if(init_duplicate_context(s->thread_context[i], s) < 0)
00599 goto fail;
00600 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
00601 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
00602 }
00603
00604 return 0;
00605 fail:
00606 MPV_common_end(s);
00607 return -1;
00608 }
00609
00610
00611 void MPV_common_end(MpegEncContext *s)
00612 {
00613 int i, j, k;
00614
00615 for(i=0; i<s->avctx->thread_count; i++){
00616 free_duplicate_context(s->thread_context[i]);
00617 }
00618 for(i=1; i<s->avctx->thread_count; i++){
00619 av_freep(&s->thread_context[i]);
00620 }
00621
00622 av_freep(&s->parse_context.buffer);
00623 s->parse_context.buffer_size=0;
00624
00625 av_freep(&s->mb_type);
00626 av_freep(&s->p_mv_table_base);
00627 av_freep(&s->b_forw_mv_table_base);
00628 av_freep(&s->b_back_mv_table_base);
00629 av_freep(&s->b_bidir_forw_mv_table_base);
00630 av_freep(&s->b_bidir_back_mv_table_base);
00631 av_freep(&s->b_direct_mv_table_base);
00632 s->p_mv_table= NULL;
00633 s->b_forw_mv_table= NULL;
00634 s->b_back_mv_table= NULL;
00635 s->b_bidir_forw_mv_table= NULL;
00636 s->b_bidir_back_mv_table= NULL;
00637 s->b_direct_mv_table= NULL;
00638 for(i=0; i<2; i++){
00639 for(j=0; j<2; j++){
00640 for(k=0; k<2; k++){
00641 av_freep(&s->b_field_mv_table_base[i][j][k]);
00642 s->b_field_mv_table[i][j][k]=NULL;
00643 }
00644 av_freep(&s->b_field_select_table[i][j]);
00645 av_freep(&s->p_field_mv_table_base[i][j]);
00646 s->p_field_mv_table[i][j]=NULL;
00647 }
00648 av_freep(&s->p_field_select_table[i]);
00649 }
00650
00651 av_freep(&s->dc_val_base);
00652 av_freep(&s->ac_val_base);
00653 av_freep(&s->coded_block_base);
00654 av_freep(&s->mbintra_table);
00655 av_freep(&s->cbp_table);
00656 av_freep(&s->pred_dir_table);
00657
00658 av_freep(&s->mbskip_table);
00659 av_freep(&s->prev_pict_types);
00660 av_freep(&s->bitstream_buffer);
00661 s->allocated_bitstream_buffer_size=0;
00662
00663 av_freep(&s->avctx->stats_out);
00664 av_freep(&s->ac_stats);
00665 av_freep(&s->error_status_table);
00666 av_freep(&s->mb_index2xy);
00667 av_freep(&s->lambda_table);
00668 av_freep(&s->q_intra_matrix);
00669 av_freep(&s->q_inter_matrix);
00670 av_freep(&s->q_intra_matrix16);
00671 av_freep(&s->q_inter_matrix16);
00672 av_freep(&s->input_picture);
00673 av_freep(&s->reordered_input_picture);
00674 av_freep(&s->dct_offset);
00675
00676 if(s->picture){
00677 for(i=0; i<MAX_PICTURE_COUNT; i++){
00678 free_picture(s, &s->picture[i]);
00679 }
00680 }
00681 av_freep(&s->picture);
00682 s->context_initialized = 0;
00683 s->last_picture_ptr=
00684 s->next_picture_ptr=
00685 s->current_picture_ptr= NULL;
00686 s->linesize= s->uvlinesize= 0;
00687
00688 for(i=0; i<3; i++)
00689 av_freep(&s->visualization_buffer[i]);
00690
00691 avcodec_default_free_buffers(s->avctx);
00692 }
00693
00694 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
00695 {
00696 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
00697 uint8_t index_run[MAX_RUN+1];
00698 int last, run, level, start, end, i;
00699
00700
00701 if(static_store && rl->max_level[0])
00702 return;
00703
00704
00705 for(last=0;last<2;last++) {
00706 if (last == 0) {
00707 start = 0;
00708 end = rl->last;
00709 } else {
00710 start = rl->last;
00711 end = rl->n;
00712 }
00713
00714 memset(max_level, 0, MAX_RUN + 1);
00715 memset(max_run, 0, MAX_LEVEL + 1);
00716 memset(index_run, rl->n, MAX_RUN + 1);
00717 for(i=start;i<end;i++) {
00718 run = rl->table_run[i];
00719 level = rl->table_level[i];
00720 if (index_run[run] == rl->n)
00721 index_run[run] = i;
00722 if (level > max_level[run])
00723 max_level[run] = level;
00724 if (run > max_run[level])
00725 max_run[level] = run;
00726 }
00727 if(static_store)
00728 rl->max_level[last] = static_store[last];
00729 else
00730 rl->max_level[last] = av_malloc(MAX_RUN + 1);
00731 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00732 if(static_store)
00733 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
00734 else
00735 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
00736 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
00737 if(static_store)
00738 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
00739 else
00740 rl->index_run[last] = av_malloc(MAX_RUN + 1);
00741 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
00742 }
00743 }
00744
00745 void init_vlc_rl(RLTable *rl, int use_static)
00746 {
00747 int i, q;
00748
00749
00750 if(use_static && rl->rl_vlc[0])
00751 return;
00752
00753 init_vlc(&rl->vlc, 9, rl->n + 1,
00754 &rl->table_vlc[0][1], 4, 2,
00755 &rl->table_vlc[0][0], 4, 2, use_static);
00756
00757
00758 for(q=0; q<32; q++){
00759 int qmul= q*2;
00760 int qadd= (q-1)|1;
00761
00762 if(q==0){
00763 qmul=1;
00764 qadd=0;
00765 }
00766 if(use_static)
00767 rl->rl_vlc[q]= av_mallocz_static(rl->vlc.table_size*sizeof(RL_VLC_ELEM));
00768 else
00769 rl->rl_vlc[q]= av_malloc(rl->vlc.table_size*sizeof(RL_VLC_ELEM));
00770 for(i=0; i<rl->vlc.table_size; i++){
00771 int code= rl->vlc.table[i][0];
00772 int len = rl->vlc.table[i][1];
00773 int level, run;
00774
00775 if(len==0){
00776 run= 66;
00777 level= MAX_LEVEL;
00778 }else if(len<0){
00779 run= 0;
00780 level= code;
00781 }else{
00782 if(code==rl->n){
00783 run= 66;
00784 level= 0;
00785 }else{
00786 run= rl->table_run [code] + 1;
00787 level= rl->table_level[code] * qmul + qadd;
00788 if(code >= rl->last) run+=192;
00789 }
00790 }
00791 rl->rl_vlc[q][i].len= len;
00792 rl->rl_vlc[q][i].level= level;
00793 rl->rl_vlc[q][i].run= run;
00794 }
00795 }
00796 }
00797
00798
00799
00800 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
00801 {
00802 uint8_t *ptr, *last_line;
00803 int i;
00804
00805 last_line = buf + (height - 1) * wrap;
00806 for(i=0;i<w;i++) {
00807
00808 memcpy(buf - (i + 1) * wrap, buf, width);
00809 memcpy(last_line + (i + 1) * wrap, last_line, width);
00810 }
00811
00812 ptr = buf;
00813 for(i=0;i<height;i++) {
00814 memset(ptr - w, ptr[0], w);
00815 memset(ptr + width, ptr[width-1], w);
00816 ptr += wrap;
00817 }
00818
00819 for(i=0;i<w;i++) {
00820 memset(buf - (i + 1) * wrap - w, buf[0], w);
00821 memset(buf - (i + 1) * wrap + width, buf[width-1], w);
00822 memset(last_line + (i + 1) * wrap - w, last_line[0], w);
00823 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w);
00824 }
00825 }
00826
00827 int ff_find_unused_picture(MpegEncContext *s, int shared){
00828 int i;
00829
00830 if(shared){
00831 for(i=0; i<MAX_PICTURE_COUNT; i++){
00832 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
00833 }
00834 }else{
00835 for(i=0; i<MAX_PICTURE_COUNT; i++){
00836 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i;
00837 }
00838 for(i=0; i<MAX_PICTURE_COUNT; i++){
00839 if(s->picture[i].data[0]==NULL) return i;
00840 }
00841 }
00842
00843 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
00844
00845
00846
00847
00848
00849
00850
00851
00852
00853
00854
00855 abort();
00856 return -1;
00857 }
00858
00859 static void update_noise_reduction(MpegEncContext *s){
00860 int intra, i;
00861
00862 for(intra=0; intra<2; intra++){
00863 if(s->dct_count[intra] > (1<<16)){
00864 for(i=0; i<64; i++){
00865 s->dct_error_sum[intra][i] >>=1;
00866 }
00867 s->dct_count[intra] >>= 1;
00868 }
00869
00870 for(i=0; i<64; i++){
00871 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
00872 }
00873 }
00874 }
00875
00879 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
00880 {
00881 int i;
00882 AVFrame *pic;
00883 s->mb_skipped = 0;
00884
00885 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
00886
00887
00888 if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
00889 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
00890 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
00891
00892
00893
00894 if(!s->encoding){
00895 for(i=0; i<MAX_PICTURE_COUNT; i++){
00896 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
00897 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
00898 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
00899 }
00900 }
00901 }
00902 }
00903 }
00904 alloc:
00905 if(!s->encoding){
00906
00907 for(i=0; i<MAX_PICTURE_COUNT; i++){
00908 if(s->picture[i].data[0] && !s->picture[i].reference ){
00909 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
00910 }
00911 }
00912
00913 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
00914 pic= (AVFrame*)s->current_picture_ptr;
00915 else{
00916 i= ff_find_unused_picture(s, 0);
00917 pic= (AVFrame*)&s->picture[i];
00918 }
00919
00920 pic->reference= 0;
00921 if (!s->dropable){
00922 if (s->codec_id == CODEC_ID_H264)
00923 pic->reference = s->picture_structure;
00924 else if (s->pict_type != B_TYPE)
00925 pic->reference = 3;
00926 }
00927
00928 pic->coded_picture_number= s->coded_picture_number++;
00929
00930 if( alloc_picture(s, (Picture*)pic, 0) < 0)
00931 return -1;
00932
00933 s->current_picture_ptr= (Picture*)pic;
00934 s->current_picture_ptr->top_field_first= s->top_field_first;
00935 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
00936 }
00937
00938 s->current_picture_ptr->pict_type= s->pict_type;
00939
00940
00941 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
00942
00943 copy_picture(&s->current_picture, s->current_picture_ptr);
00944
00945 if (s->pict_type != B_TYPE) {
00946 s->last_picture_ptr= s->next_picture_ptr;
00947 if(!s->dropable)
00948 s->next_picture_ptr= s->current_picture_ptr;
00949 }
00950
00951
00952
00953
00954
00955
00956 if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
00957 if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
00958
00959 if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable){
00960 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
00961 assert(s->pict_type != B_TYPE);
00962 goto alloc;
00963 }
00964
00965 assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
00966
00967 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
00968 int i;
00969 for(i=0; i<4; i++){
00970 if(s->picture_structure == PICT_BOTTOM_FIELD){
00971 s->current_picture.data[i] += s->current_picture.linesize[i];
00972 }
00973 s->current_picture.linesize[i] *= 2;
00974 s->last_picture.linesize[i] *=2;
00975 s->next_picture.linesize[i] *=2;
00976 }
00977 }
00978
00979 s->hurry_up= s->avctx->hurry_up;
00980 s->error_resilience= avctx->error_resilience;
00981
00982
00983
00984 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
00985 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
00986 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
00987 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
00988 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
00989 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
00990 }else{
00991 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
00992 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
00993 }
00994
00995 if(s->dct_error_sum){
00996 assert(s->avctx->noise_reduction && s->encoding);
00997
00998 update_noise_reduction(s);
00999 }
01000
01001 #ifdef HAVE_XVMC
01002 if(s->avctx->xvmc_acceleration)
01003 return XVMC_field_start(s, avctx);
01004 #endif
01005 return 0;
01006 }
01007
01008
01009 void MPV_frame_end(MpegEncContext *s)
01010 {
01011 int i;
01012
01013 #ifdef HAVE_XVMC
01014
01015 if(s->avctx->xvmc_acceleration){
01016 XVMC_field_end(s);
01017 }else
01018 #endif
01019 if(s->unrestricted_mv && s->current_picture.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
01020 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
01021 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
01022 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
01023 }
01024 emms_c();
01025
01026 s->last_pict_type = s->pict_type;
01027 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
01028 if(s->pict_type!=B_TYPE){
01029 s->last_non_b_pict_type= s->pict_type;
01030 }
01031 #if 0
01032
01033 for(i=0; i<MAX_PICTURE_COUNT; i++){
01034 if(s->picture[i].data[0] == s->current_picture.data[0]){
01035 s->picture[i]= s->current_picture;
01036 break;
01037 }
01038 }
01039 assert(i<MAX_PICTURE_COUNT);
01040 #endif
01041
01042 if(s->encoding){
01043
01044 for(i=0; i<MAX_PICTURE_COUNT; i++){
01045 if(s->picture[i].data[0] && !s->picture[i].reference ){
01046 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
01047 }
01048 }
01049 }
01050
01051 #if 0
01052 memset(&s->last_picture, 0, sizeof(Picture));
01053 memset(&s->next_picture, 0, sizeof(Picture));
01054 memset(&s->current_picture, 0, sizeof(Picture));
01055 #endif
01056 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01057 }
01058
01066 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01067 int x, y, fr, f;
01068
01069 sx= av_clip(sx, 0, w-1);
01070 sy= av_clip(sy, 0, h-1);
01071 ex= av_clip(ex, 0, w-1);
01072 ey= av_clip(ey, 0, h-1);
01073
01074 buf[sy*stride + sx]+= color;
01075
01076 if(FFABS(ex - sx) > FFABS(ey - sy)){
01077 if(sx > ex){
01078 FFSWAP(int, sx, ex);
01079 FFSWAP(int, sy, ey);
01080 }
01081 buf+= sx + sy*stride;
01082 ex-= sx;
01083 f= ((ey-sy)<<16)/ex;
01084 for(x= 0; x <= ex; x++){
01085 y = (x*f)>>16;
01086 fr= (x*f)&0xFFFF;
01087 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
01088 buf[(y+1)*stride + x]+= (color* fr )>>16;
01089 }
01090 }else{
01091 if(sy > ey){
01092 FFSWAP(int, sx, ex);
01093 FFSWAP(int, sy, ey);
01094 }
01095 buf+= sx + sy*stride;
01096 ey-= sy;
01097 if(ey) f= ((ex-sx)<<16)/ey;
01098 else f= 0;
01099 for(y= 0; y <= ey; y++){
01100 x = (y*f)>>16;
01101 fr= (y*f)&0xFFFF;
01102 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;;
01103 buf[y*stride + x+1]+= (color* fr )>>16;;
01104 }
01105 }
01106 }
01107
01115 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01116 int dx,dy;
01117
01118 sx= av_clip(sx, -100, w+100);
01119 sy= av_clip(sy, -100, h+100);
01120 ex= av_clip(ex, -100, w+100);
01121 ey= av_clip(ey, -100, h+100);
01122
01123 dx= ex - sx;
01124 dy= ey - sy;
01125
01126 if(dx*dx + dy*dy > 3*3){
01127 int rx= dx + dy;
01128 int ry= -dx + dy;
01129 int length= ff_sqrt((rx*rx + ry*ry)<<8);
01130
01131
01132 rx= ROUNDED_DIV(rx*3<<4, length);
01133 ry= ROUNDED_DIV(ry*3<<4, length);
01134
01135 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01136 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01137 }
01138 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01139 }
01140
01144 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01145
01146 if(!pict || !pict->mb_type) return;
01147
01148 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01149 int x,y;
01150
01151 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01152 switch (pict->pict_type) {
01153 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
01154 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
01155 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
01156 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
01157 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
01158 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
01159 }
01160 for(y=0; y<s->mb_height; y++){
01161 for(x=0; x<s->mb_width; x++){
01162 if(s->avctx->debug&FF_DEBUG_SKIP){
01163 int count= s->mbskip_table[x + y*s->mb_stride];
01164 if(count>9) count=9;
01165 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01166 }
01167 if(s->avctx->debug&FF_DEBUG_QP){
01168 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01169 }
01170 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01171 int mb_type= pict->mb_type[x + y*s->mb_stride];
01172
01173 if(IS_PCM(mb_type))
01174 av_log(s->avctx, AV_LOG_DEBUG, "P");
01175 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01176 av_log(s->avctx, AV_LOG_DEBUG, "A");
01177 else if(IS_INTRA4x4(mb_type))
01178 av_log(s->avctx, AV_LOG_DEBUG, "i");
01179 else if(IS_INTRA16x16(mb_type))
01180 av_log(s->avctx, AV_LOG_DEBUG, "I");
01181 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01182 av_log(s->avctx, AV_LOG_DEBUG, "d");
01183 else if(IS_DIRECT(mb_type))
01184 av_log(s->avctx, AV_LOG_DEBUG, "D");
01185 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01186 av_log(s->avctx, AV_LOG_DEBUG, "g");
01187 else if(IS_GMC(mb_type))
01188 av_log(s->avctx, AV_LOG_DEBUG, "G");
01189 else if(IS_SKIP(mb_type))
01190 av_log(s->avctx, AV_LOG_DEBUG, "S");
01191 else if(!USES_LIST(mb_type, 1))
01192 av_log(s->avctx, AV_LOG_DEBUG, ">");
01193 else if(!USES_LIST(mb_type, 0))
01194 av_log(s->avctx, AV_LOG_DEBUG, "<");
01195 else{
01196 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01197 av_log(s->avctx, AV_LOG_DEBUG, "X");
01198 }
01199
01200
01201 if(IS_8X8(mb_type))
01202 av_log(s->avctx, AV_LOG_DEBUG, "+");
01203 else if(IS_16X8(mb_type))
01204 av_log(s->avctx, AV_LOG_DEBUG, "-");
01205 else if(IS_8X16(mb_type))
01206 av_log(s->avctx, AV_LOG_DEBUG, "|");
01207 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01208 av_log(s->avctx, AV_LOG_DEBUG, " ");
01209 else
01210 av_log(s->avctx, AV_LOG_DEBUG, "?");
01211
01212
01213 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
01214 av_log(s->avctx, AV_LOG_DEBUG, "=");
01215 else
01216 av_log(s->avctx, AV_LOG_DEBUG, " ");
01217 }
01218
01219 }
01220 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01221 }
01222 }
01223
01224 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
01225 const int shift= 1 + s->quarter_sample;
01226 int mb_y;
01227 uint8_t *ptr;
01228 int i;
01229 int h_chroma_shift, v_chroma_shift;
01230 const int width = s->avctx->width;
01231 const int height= s->avctx->height;
01232 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01233 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01234 s->low_delay=0;
01235
01236 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01237 for(i=0; i<3; i++){
01238 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01239 pict->data[i]= s->visualization_buffer[i];
01240 }
01241 pict->type= FF_BUFFER_TYPE_COPY;
01242 ptr= pict->data[0];
01243
01244 for(mb_y=0; mb_y<s->mb_height; mb_y++){
01245 int mb_x;
01246 for(mb_x=0; mb_x<s->mb_width; mb_x++){
01247 const int mb_index= mb_x + mb_y*s->mb_stride;
01248 if((s->avctx->debug_mv) && pict->motion_val){
01249 int type;
01250 for(type=0; type<3; type++){
01251 int direction = 0;
01252 switch (type) {
01253 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
01254 continue;
01255 direction = 0;
01256 break;
01257 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
01258 continue;
01259 direction = 0;
01260 break;
01261 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
01262 continue;
01263 direction = 1;
01264 break;
01265 }
01266 if(!USES_LIST(pict->mb_type[mb_index], direction))
01267 continue;
01268
01269 if(IS_8X8(pict->mb_type[mb_index])){
01270 int i;
01271 for(i=0; i<4; i++){
01272 int sx= mb_x*16 + 4 + 8*(i&1);
01273 int sy= mb_y*16 + 4 + 8*(i>>1);
01274 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01275 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01276 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01277 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01278 }
01279 }else if(IS_16X8(pict->mb_type[mb_index])){
01280 int i;
01281 for(i=0; i<2; i++){
01282 int sx=mb_x*16 + 8;
01283 int sy=mb_y*16 + 4 + 8*i;
01284 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01285 int mx=(pict->motion_val[direction][xy][0]>>shift);
01286 int my=(pict->motion_val[direction][xy][1]>>shift);
01287
01288 if(IS_INTERLACED(pict->mb_type[mb_index]))
01289 my*=2;
01290
01291 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01292 }
01293 }else if(IS_8X16(pict->mb_type[mb_index])){
01294 int i;
01295 for(i=0; i<2; i++){
01296 int sx=mb_x*16 + 4 + 8*i;
01297 int sy=mb_y*16 + 8;
01298 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01299 int mx=(pict->motion_val[direction][xy][0]>>shift);
01300 int my=(pict->motion_val[direction][xy][1]>>shift);
01301
01302 if(IS_INTERLACED(pict->mb_type[mb_index]))
01303 my*=2;
01304
01305 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01306 }
01307 }else{
01308 int sx= mb_x*16 + 8;
01309 int sy= mb_y*16 + 8;
01310 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01311 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01312 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01313 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01314 }
01315 }
01316 }
01317 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01318 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01319 int y;
01320 for(y=0; y<8; y++){
01321 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c;
01322 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c;
01323 }
01324 }
01325 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01326 int mb_type= pict->mb_type[mb_index];
01327 uint64_t u,v;
01328 int y;
01329 #define COLOR(theta, r)\
01330 u= (int)(128 + r*cos(theta*3.141592/180));\
01331 v= (int)(128 + r*sin(theta*3.141592/180));
01332
01333
01334 u=v=128;
01335 if(IS_PCM(mb_type)){
01336 COLOR(120,48)
01337 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01338 COLOR(30,48)
01339 }else if(IS_INTRA4x4(mb_type)){
01340 COLOR(90,48)
01341 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01342
01343 }else if(IS_DIRECT(mb_type)){
01344 COLOR(150,48)
01345 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01346 COLOR(170,48)
01347 }else if(IS_GMC(mb_type)){
01348 COLOR(190,48)
01349 }else if(IS_SKIP(mb_type)){
01350
01351 }else if(!USES_LIST(mb_type, 1)){
01352 COLOR(240,48)
01353 }else if(!USES_LIST(mb_type, 0)){
01354 COLOR(0,48)
01355 }else{
01356 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01357 COLOR(300,48)
01358 }
01359
01360 u*= 0x0101010101010101ULL;
01361 v*= 0x0101010101010101ULL;
01362 for(y=0; y<8; y++){
01363 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u;
01364 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v;
01365 }
01366
01367
01368 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01369 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01370 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01371 }
01372 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01373 for(y=0; y<16; y++)
01374 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01375 }
01376 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01377 int dm= 1 << (mv_sample_log2-2);
01378 for(i=0; i<4; i++){
01379 int sx= mb_x*16 + 8*(i&1);
01380 int sy= mb_y*16 + 8*(i>>1);
01381 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01382
01383 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01384 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01385 for(y=0; y<8; y++)
01386 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01387 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01388 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01389 }
01390 }
01391
01392 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01393
01394 }
01395 }
01396 s->mbskip_table[mb_index]=0;
01397 }
01398 }
01399 }
01400 }
01401
01414 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
01415 int src_x, int src_y, int w, int h){
01416 int x, y;
01417 int start_y, start_x, end_y, end_x;
01418
01419 if(src_y>= h){
01420 src+= (h-1-src_y)*linesize;
01421 src_y=h-1;
01422 }else if(src_y<=-block_h){
01423 src+= (1-block_h-src_y)*linesize;
01424 src_y=1-block_h;
01425 }
01426 if(src_x>= w){
01427 src+= (w-1-src_x);
01428 src_x=w-1;
01429 }else if(src_x<=-block_w){
01430 src+= (1-block_w-src_x);
01431 src_x=1-block_w;
01432 }
01433
01434 start_y= FFMAX(0, -src_y);
01435 start_x= FFMAX(0, -src_x);
01436 end_y= FFMIN(block_h, h-src_y);
01437 end_x= FFMIN(block_w, w-src_x);
01438
01439
01440 for(y=start_y; y<end_y; y++){
01441 for(x=start_x; x<end_x; x++){
01442 buf[x + y*linesize]= src[x + y*linesize];
01443 }
01444 }
01445
01446
01447 for(y=0; y<start_y; y++){
01448 for(x=start_x; x<end_x; x++){
01449 buf[x + y*linesize]= buf[x + start_y*linesize];
01450 }
01451 }
01452
01453
01454 for(y=end_y; y<block_h; y++){
01455 for(x=start_x; x<end_x; x++){
01456 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
01457 }
01458 }
01459
01460 for(y=0; y<block_h; y++){
01461
01462 for(x=0; x<start_x; x++){
01463 buf[x + y*linesize]= buf[start_x + y*linesize];
01464 }
01465
01466
01467 for(x=end_x; x<block_w; x++){
01468 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
01469 }
01470 }
01471 }
01472
01473 static inline int hpel_motion_lowres(MpegEncContext *s,
01474 uint8_t *dest, uint8_t *src,
01475 int field_based, int field_select,
01476 int src_x, int src_y,
01477 int width, int height, int stride,
01478 int h_edge_pos, int v_edge_pos,
01479 int w, int h, h264_chroma_mc_func *pix_op,
01480 int motion_x, int motion_y)
01481 {
01482 const int lowres= s->avctx->lowres;
01483 const int s_mask= (2<<lowres)-1;
01484 int emu=0;
01485 int sx, sy;
01486
01487 if(s->quarter_sample){
01488 motion_x/=2;
01489 motion_y/=2;
01490 }
01491
01492 sx= motion_x & s_mask;
01493 sy= motion_y & s_mask;
01494 src_x += motion_x >> (lowres+1);
01495 src_y += motion_y >> (lowres+1);
01496
01497 src += src_y * stride + src_x;
01498
01499 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
01500 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01501 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01502 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01503 src= s->edge_emu_buffer;
01504 emu=1;
01505 }
01506
01507 sx <<= 2 - lowres;
01508 sy <<= 2 - lowres;
01509 if(field_select)
01510 src += s->linesize;
01511 pix_op[lowres](dest, src, stride, h, sx, sy);
01512 return emu;
01513 }
01514
01515
01516 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01517 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01518 int field_based, int bottom_field, int field_select,
01519 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01520 int motion_x, int motion_y, int h)
01521 {
01522 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01523 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01524 const int lowres= s->avctx->lowres;
01525 const int block_s= 8>>lowres;
01526 const int s_mask= (2<<lowres)-1;
01527 const int h_edge_pos = s->h_edge_pos >> lowres;
01528 const int v_edge_pos = s->v_edge_pos >> lowres;
01529 linesize = s->current_picture.linesize[0] << field_based;
01530 uvlinesize = s->current_picture.linesize[1] << field_based;
01531
01532 if(s->quarter_sample){
01533 motion_x/=2;
01534 motion_y/=2;
01535 }
01536
01537 if(field_based){
01538 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01539 }
01540
01541 sx= motion_x & s_mask;
01542 sy= motion_y & s_mask;
01543 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
01544 src_y =(s->mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01545
01546 if (s->out_format == FMT_H263) {
01547 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01548 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01549 uvsrc_x = src_x>>1;
01550 uvsrc_y = src_y>>1;
01551 }else if(s->out_format == FMT_H261){
01552 mx = motion_x / 4;
01553 my = motion_y / 4;
01554 uvsx = (2*mx) & s_mask;
01555 uvsy = (2*my) & s_mask;
01556 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
01557 uvsrc_y = s->mb_y*block_s + (my >> lowres);
01558 } else {
01559 mx = motion_x / 2;
01560 my = motion_y / 2;
01561 uvsx = mx & s_mask;
01562 uvsy = my & s_mask;
01563 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
01564 uvsrc_y =(s->mb_y*block_s>>field_based) + (my >> (lowres+1));
01565 }
01566
01567 ptr_y = ref_picture[0] + src_y * linesize + src_x;
01568 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01569 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01570
01571 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
01572 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01573 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01574 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01575 ptr_y = s->edge_emu_buffer;
01576 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01577 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01578 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
01579 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01580 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01581 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01582 ptr_cb= uvbuf;
01583 ptr_cr= uvbuf+16;
01584 }
01585 }
01586
01587 if(bottom_field){
01588 dest_y += s->linesize;
01589 dest_cb+= s->uvlinesize;
01590 dest_cr+= s->uvlinesize;
01591 }
01592
01593 if(field_select){
01594 ptr_y += s->linesize;
01595 ptr_cb+= s->uvlinesize;
01596 ptr_cr+= s->uvlinesize;
01597 }
01598
01599 sx <<= 2 - lowres;
01600 sy <<= 2 - lowres;
01601 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01602
01603 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01604 uvsx <<= 2 - lowres;
01605 uvsy <<= 2 - lowres;
01606 pix_op[lowres](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01607 pix_op[lowres](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01608 }
01609
01610 }
01611
01612 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01613 uint8_t *dest_cb, uint8_t *dest_cr,
01614 uint8_t **ref_picture,
01615 h264_chroma_mc_func *pix_op,
01616 int mx, int my){
01617 const int lowres= s->avctx->lowres;
01618 const int block_s= 8>>lowres;
01619 const int s_mask= (2<<lowres)-1;
01620 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01621 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01622 int emu=0, src_x, src_y, offset, sx, sy;
01623 uint8_t *ptr;
01624
01625 if(s->quarter_sample){
01626 mx/=2;
01627 my/=2;
01628 }
01629
01630
01631
01632 mx= ff_h263_round_chroma(mx);
01633 my= ff_h263_round_chroma(my);
01634
01635 sx= mx & s_mask;
01636 sy= my & s_mask;
01637 src_x = s->mb_x*block_s + (mx >> (lowres+1));
01638 src_y = s->mb_y*block_s + (my >> (lowres+1));
01639
01640 offset = src_y * s->uvlinesize + src_x;
01641 ptr = ref_picture[1] + offset;
01642 if(s->flags&CODEC_FLAG_EMU_EDGE){
01643 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01644 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01645 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01646 ptr= s->edge_emu_buffer;
01647 emu=1;
01648 }
01649 }
01650 sx <<= 2 - lowres;
01651 sy <<= 2 - lowres;
01652 pix_op[lowres](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01653
01654 ptr = ref_picture[2] + offset;
01655 if(emu){
01656 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01657 ptr= s->edge_emu_buffer;
01658 }
01659 pix_op[lowres](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01660 }
01661
01673 static inline void MPV_motion_lowres(MpegEncContext *s,
01674 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01675 int dir, uint8_t **ref_picture,
01676 h264_chroma_mc_func *pix_op)
01677 {
01678 int mx, my;
01679 int mb_x, mb_y, i;
01680 const int lowres= s->avctx->lowres;
01681 const int block_s= 8>>lowres;
01682
01683 mb_x = s->mb_x;
01684 mb_y = s->mb_y;
01685
01686 switch(s->mv_type) {
01687 case MV_TYPE_16X16:
01688 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01689 0, 0, 0,
01690 ref_picture, pix_op,
01691 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
01692 break;
01693 case MV_TYPE_8X8:
01694 mx = 0;
01695 my = 0;
01696 for(i=0;i<4;i++) {
01697 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01698 ref_picture[0], 0, 0,
01699 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01700 s->width, s->height, s->linesize,
01701 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01702 block_s, block_s, pix_op,
01703 s->mv[dir][i][0], s->mv[dir][i][1]);
01704
01705 mx += s->mv[dir][i][0];
01706 my += s->mv[dir][i][1];
01707 }
01708
01709 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01710 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01711 break;
01712 case MV_TYPE_FIELD:
01713 if (s->picture_structure == PICT_FRAME) {
01714
01715 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01716 1, 0, s->field_select[dir][0],
01717 ref_picture, pix_op,
01718 s->mv[dir][0][0], s->mv[dir][0][1], block_s);
01719
01720 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01721 1, 1, s->field_select[dir][1],
01722 ref_picture, pix_op,
01723 s->mv[dir][1][0], s->mv[dir][1][1], block_s);
01724 } else {
01725 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != B_TYPE && !s->first_field){
01726 ref_picture= s->current_picture_ptr->data;
01727 }
01728
01729 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01730 0, 0, s->field_select[dir][0],
01731 ref_picture, pix_op,
01732 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
01733 }
01734 break;
01735 case MV_TYPE_16X8:
01736 for(i=0; i<2; i++){
01737 uint8_t ** ref2picture;
01738
01739 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == B_TYPE || s->first_field){
01740 ref2picture= ref_picture;
01741 }else{
01742 ref2picture= s->current_picture_ptr->data;
01743 }
01744
01745 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01746 0, 0, s->field_select[dir][i],
01747 ref2picture, pix_op,
01748 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s);
01749
01750 dest_y += 2*block_s*s->linesize;
01751 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01752 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01753 }
01754 break;
01755 case MV_TYPE_DMV:
01756 if(s->picture_structure == PICT_FRAME){
01757 for(i=0; i<2; i++){
01758 int j;
01759 for(j=0; j<2; j++){
01760 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01761 1, j, j^i,
01762 ref_picture, pix_op,
01763 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s);
01764 }
01765 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01766 }
01767 }else{
01768 for(i=0; i<2; i++){
01769 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01770 0, 0, s->picture_structure != i+1,
01771 ref_picture, pix_op,
01772 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s);
01773
01774
01775 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01776
01777
01778 if(!s->first_field){
01779 ref_picture = s->current_picture_ptr->data;
01780 }
01781 }
01782 }
01783 break;
01784 default: assert(0);
01785 }
01786 }
01787
01788
01789 static inline void put_dct(MpegEncContext *s,
01790 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01791 {
01792 s->dct_unquantize_intra(s, block, i, qscale);
01793 s->dsp.idct_put (dest, line_size, block);
01794 }
01795
01796
01797 static inline void add_dct(MpegEncContext *s,
01798 DCTELEM *block, int i, uint8_t *dest, int line_size)
01799 {
01800 if (s->block_last_index[i] >= 0) {
01801 s->dsp.idct_add (dest, line_size, block);
01802 }
01803 }
01804
01805 static inline void add_dequant_dct(MpegEncContext *s,
01806 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01807 {
01808 if (s->block_last_index[i] >= 0) {
01809 s->dct_unquantize_inter(s, block, i, qscale);
01810
01811 s->dsp.idct_add (dest, line_size, block);
01812 }
01813 }
01814
01818 void ff_clean_intra_table_entries(MpegEncContext *s)
01819 {
01820 int wrap = s->b8_stride;
01821 int xy = s->block_index[0];
01822
01823 s->dc_val[0][xy ] =
01824 s->dc_val[0][xy + 1 ] =
01825 s->dc_val[0][xy + wrap] =
01826 s->dc_val[0][xy + 1 + wrap] = 1024;
01827
01828 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
01829 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
01830 if (s->msmpeg4_version>=3) {
01831 s->coded_block[xy ] =
01832 s->coded_block[xy + 1 ] =
01833 s->coded_block[xy + wrap] =
01834 s->coded_block[xy + 1 + wrap] = 0;
01835 }
01836
01837 wrap = s->mb_stride;
01838 xy = s->mb_x + s->mb_y * wrap;
01839 s->dc_val[1][xy] =
01840 s->dc_val[2][xy] = 1024;
01841
01842 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
01843 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
01844
01845 s->mbintra_table[xy]= 0;
01846 }
01847
01848
01849
01850
01851
01852
01853
01854
01855
01856
01857
01858 static av_always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], int lowres_flag)
01859 {
01860 int mb_x, mb_y;
01861 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
01862 #ifdef HAVE_XVMC
01863 if(s->avctx->xvmc_acceleration){
01864 XVMC_decode_mb(s);
01865 return;
01866 }
01867 #endif
01868
01869 mb_x = s->mb_x;
01870 mb_y = s->mb_y;
01871
01872 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
01873
01874 int i,j;
01875 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
01876 for(i=0; i<6; i++)
01877 for(j=0; j<64; j++)
01878 *dct++ = block[i][s->dsp.idct_permutation[j]];
01879 }
01880
01881 s->current_picture.qscale_table[mb_xy]= s->qscale;
01882
01883
01884 if (!s->mb_intra) {
01885 if (s->h263_pred || s->h263_aic) {
01886 if(s->mbintra_table[mb_xy])
01887 ff_clean_intra_table_entries(s);
01888 } else {
01889 s->last_dc[0] =
01890 s->last_dc[1] =
01891 s->last_dc[2] = 128 << s->intra_dc_precision;
01892 }
01893 }
01894 else if (s->h263_pred || s->h263_aic)
01895 s->mbintra_table[mb_xy]=1;
01896
01897 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
01898 uint8_t *dest_y, *dest_cb, *dest_cr;
01899 int dct_linesize, dct_offset;
01900 op_pixels_func (*op_pix)[4];
01901 qpel_mc_func (*op_qpix)[16];
01902 const int linesize= s->current_picture.linesize[0];
01903 const int uvlinesize= s->current_picture.linesize[1];
01904 const int readable= s->pict_type != B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
01905 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
01906
01907
01908
01909 if(!s->encoding){
01910 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
01911 const int age= s->current_picture.age;
01912
01913 assert(age);
01914
01915 if (s->mb_skipped) {
01916 s->mb_skipped= 0;
01917 assert(s->pict_type!=I_TYPE);
01918
01919 (*mbskip_ptr) ++;
01920 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01921
01922
01923 if (*mbskip_ptr >= age && s->current_picture.reference){
01924 return;
01925 }
01926 } else if(!s->current_picture.reference){
01927 (*mbskip_ptr) ++;
01928 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01929 } else{
01930 *mbskip_ptr = 0;
01931 }
01932 }
01933
01934 dct_linesize = linesize << s->interlaced_dct;
01935 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
01936
01937 if(readable){
01938 dest_y= s->dest[0];
01939 dest_cb= s->dest[1];
01940 dest_cr= s->dest[2];
01941 }else{
01942 dest_y = s->b_scratchpad;
01943 dest_cb= s->b_scratchpad+16*linesize;
01944 dest_cr= s->b_scratchpad+32*linesize;
01945 }
01946
01947 if (!s->mb_intra) {
01948
01949
01950 if(!s->encoding){
01951 if(lowres_flag){
01952 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
01953
01954 if (s->mv_dir & MV_DIR_FORWARD) {
01955 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
01956 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
01957 }
01958 if (s->mv_dir & MV_DIR_BACKWARD) {
01959 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
01960 }
01961 }else{
01962 op_qpix= s->me.qpel_put;
01963 if ((!s->no_rounding) || s->pict_type==B_TYPE){
01964 op_pix = s->dsp.put_pixels_tab;
01965 }else{
01966 op_pix = s->dsp.put_no_rnd_pixels_tab;
01967 }
01968 if (s->mv_dir & MV_DIR_FORWARD) {
01969 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
01970 op_pix = s->dsp.avg_pixels_tab;
01971 op_qpix= s->me.qpel_avg;
01972 }
01973 if (s->mv_dir & MV_DIR_BACKWARD) {
01974 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
01975 }
01976 }
01977 }
01978
01979
01980 if(s->hurry_up>1) goto skip_idct;
01981 if(s->avctx->skip_idct){
01982 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == B_TYPE)
01983 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != I_TYPE)
01984 || s->avctx->skip_idct >= AVDISCARD_ALL)
01985 goto skip_idct;
01986 }
01987
01988
01989 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
01990 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
01991 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
01992 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
01993 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
01994 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
01995
01996 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01997 if (s->chroma_y_shift){
01998 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
01999 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02000 }else{
02001 dct_linesize >>= 1;
02002 dct_offset >>=1;
02003 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02004 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02005 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02006 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02007 }
02008 }
02009 } else if(s->codec_id != CODEC_ID_WMV2){
02010 add_dct(s, block[0], 0, dest_y , dct_linesize);
02011 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
02012 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
02013 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02014
02015 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02016 if(s->chroma_y_shift){
02017 add_dct(s, block[4], 4, dest_cb, uvlinesize);
02018 add_dct(s, block[5], 5, dest_cr, uvlinesize);
02019 }else{
02020
02021 dct_linesize = uvlinesize << s->interlaced_dct;
02022 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02023
02024 add_dct(s, block[4], 4, dest_cb, dct_linesize);
02025 add_dct(s, block[5], 5, dest_cr, dct_linesize);
02026 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02027 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02028 if(!s->chroma_x_shift){
02029 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
02030 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
02031 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02032 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02033 }
02034 }
02035 }
02036 }
02037 else if (ENABLE_WMV2) {
02038 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02039 }
02040 } else {
02041
02042 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02043 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02044 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02045 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02046 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02047
02048 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02049 if(s->chroma_y_shift){
02050 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02051 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02052 }else{
02053 dct_offset >>=1;
02054 dct_linesize >>=1;
02055 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02056 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02057 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02058 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02059 }
02060 }
02061 }else{
02062 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
02063 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
02064 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
02065 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02066
02067 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02068 if(s->chroma_y_shift){
02069 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02070 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02071 }else{
02072
02073 dct_linesize = uvlinesize << s->interlaced_dct;
02074 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02075
02076 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
02077 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
02078 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02079 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02080 if(!s->chroma_x_shift){
02081 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
02082 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
02083 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02084 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02085 }
02086 }
02087 }
02088 }
02089 }
02090 skip_idct:
02091 if(!readable){
02092 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
02093 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02094 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02095 }
02096 }
02097 }
02098
02099 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02100 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1);
02101 else MPV_decode_mb_internal(s, block, 0);
02102 }
02103
02108 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02109 if (s->avctx->draw_horiz_band) {
02110 AVFrame *src;
02111 int offset[4];
02112
02113 if(s->picture_structure != PICT_FRAME){
02114 h <<= 1;
02115 y <<= 1;
02116 if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02117 }
02118
02119 h= FFMIN(h, s->avctx->height - y);
02120
02121 if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02122 src= (AVFrame*)s->current_picture_ptr;
02123 else if(s->last_picture_ptr)
02124 src= (AVFrame*)s->last_picture_ptr;
02125 else
02126 return;
02127
02128 if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02129 offset[0]=
02130 offset[1]=
02131 offset[2]=
02132 offset[3]= 0;
02133 }else{
02134 offset[0]= y * s->linesize;;
02135 offset[1]=
02136 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02137 offset[3]= 0;
02138 }
02139
02140 emms_c();
02141
02142 s->avctx->draw_horiz_band(s->avctx, src, offset,
02143 y, s->picture_structure, h);
02144 }
02145 }
02146
02147 void ff_init_block_index(MpegEncContext *s){
02148 const int linesize= s->current_picture.linesize[0];
02149 const int uvlinesize= s->current_picture.linesize[1];
02150 const int mb_size= 4 - s->avctx->lowres;
02151
02152 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02153 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02154 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02155 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02156 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02157 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02158
02159
02160 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
02161 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02162 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02163
02164 if(!(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02165 {
02166 s->dest[0] += s->mb_y * linesize << mb_size;
02167 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02168 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02169 }
02170 }
02171
02172 void ff_mpeg_flush(AVCodecContext *avctx){
02173 int i;
02174 MpegEncContext *s = avctx->priv_data;
02175
02176 if(s==NULL || s->picture==NULL)
02177 return;
02178
02179 for(i=0; i<MAX_PICTURE_COUNT; i++){
02180 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
02181 || s->picture[i].type == FF_BUFFER_TYPE_USER))
02182 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
02183 }
02184 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02185
02186 s->mb_x= s->mb_y= 0;
02187
02188 s->parse_context.state= -1;
02189 s->parse_context.frame_start_found= 0;
02190 s->parse_context.overread= 0;
02191 s->parse_context.overread_index= 0;
02192 s->parse_context.index= 0;
02193 s->parse_context.last_index= 0;
02194 s->bitstream_buffer_size=0;
02195 s->pp_time=0;
02196 }
02197
02198 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02199 DCTELEM *block, int n, int qscale)
02200 {
02201 int i, level, nCoeffs;
02202 const uint16_t *quant_matrix;
02203
02204 nCoeffs= s->block_last_index[n];
02205
02206 if (n < 4)
02207 block[0] = block[0] * s->y_dc_scale;
02208 else
02209 block[0] = block[0] * s->c_dc_scale;
02210
02211 quant_matrix = s->intra_matrix;
02212 for(i=1;i<=nCoeffs;i++) {
02213 int j= s->intra_scantable.permutated[i];
02214 level = block[j];
02215 if (level) {
02216 if (level < 0) {
02217 level = -level;
02218 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02219 level = (level - 1) | 1;
02220 level = -level;
02221 } else {
02222 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02223 level = (level - 1) | 1;
02224 }
02225 block[j] = level;
02226 }
02227 }
02228 }
02229
02230 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02231 DCTELEM *block, int n, int qscale)
02232 {
02233 int i, level, nCoeffs;
02234 const uint16_t *quant_matrix;
02235
02236 nCoeffs= s->block_last_index[n];
02237
02238 quant_matrix = s->inter_matrix;
02239 for(i=0; i<=nCoeffs; i++) {
02240 int j= s->intra_scantable.permutated[i];
02241 level = block[j];
02242 if (level) {
02243 if (level < 0) {
02244 level = -level;
02245 level = (((level << 1) + 1) * qscale *
02246 ((int) (quant_matrix[j]))) >> 4;
02247 level = (level - 1) | 1;
02248 level = -level;
02249 } else {
02250 level = (((level << 1) + 1) * qscale *
02251 ((int) (quant_matrix[j]))) >> 4;
02252 level = (level - 1) | 1;
02253 }
02254 block[j] = level;
02255 }
02256 }
02257 }
02258
02259 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02260 DCTELEM *block, int n, int qscale)
02261 {
02262 int i, level, nCoeffs;
02263 const uint16_t *quant_matrix;
02264
02265 if(s->alternate_scan) nCoeffs= 63;
02266 else nCoeffs= s->block_last_index[n];
02267
02268 if (n < 4)
02269 block[0] = block[0] * s->y_dc_scale;
02270 else
02271 block[0] = block[0] * s->c_dc_scale;
02272 quant_matrix = s->intra_matrix;
02273 for(i=1;i<=nCoeffs;i++) {
02274 int j= s->intra_scantable.permutated[i];
02275 level = block[j];
02276 if (level) {
02277 if (level < 0) {
02278 level = -level;
02279 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02280 level = -level;
02281 } else {
02282 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02283 }
02284 block[j] = level;
02285 }
02286 }
02287 }
02288
02289 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02290 DCTELEM *block, int n, int qscale)
02291 {
02292 int i, level, nCoeffs;
02293 const uint16_t *quant_matrix;
02294 int sum=-1;
02295
02296 if(s->alternate_scan) nCoeffs= 63;
02297 else nCoeffs= s->block_last_index[n];
02298
02299 if (n < 4)
02300 block[0] = block[0] * s->y_dc_scale;
02301 else
02302 block[0] = block[0] * s->c_dc_scale;
02303 quant_matrix = s->intra_matrix;
02304 for(i=1;i<=nCoeffs;i++) {
02305 int j= s->intra_scantable.permutated[i];
02306 level = block[j];
02307 if (level) {
02308 if (level < 0) {
02309 level = -level;
02310 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02311 level = -level;
02312 } else {
02313 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02314 }
02315 block[j] = level;
02316 sum+=level;
02317 }
02318 }
02319 block[63]^=sum&1;
02320 }
02321
02322 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02323 DCTELEM *block, int n, int qscale)
02324 {
02325 int i, level, nCoeffs;
02326 const uint16_t *quant_matrix;
02327 int sum=-1;
02328
02329 if(s->alternate_scan) nCoeffs= 63;
02330 else nCoeffs= s->block_last_index[n];
02331
02332 quant_matrix = s->inter_matrix;
02333 for(i=0; i<=nCoeffs; i++) {
02334 int j= s->intra_scantable.permutated[i];
02335 level = block[j];
02336 if (level) {
02337 if (level < 0) {
02338 level = -level;
02339 level = (((level << 1) + 1) * qscale *
02340 ((int) (quant_matrix[j]))) >> 4;
02341 level = -level;
02342 } else {
02343 level = (((level << 1) + 1) * qscale *
02344 ((int) (quant_matrix[j]))) >> 4;
02345 }
02346 block[j] = level;
02347 sum+=level;
02348 }
02349 }
02350 block[63]^=sum&1;
02351 }
02352
02353 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02354 DCTELEM *block, int n, int qscale)
02355 {
02356 int i, level, qmul, qadd;
02357 int nCoeffs;
02358
02359 assert(s->block_last_index[n]>=0);
02360
02361 qmul = qscale << 1;
02362
02363 if (!s->h263_aic) {
02364 if (n < 4)
02365 block[0] = block[0] * s->y_dc_scale;
02366 else
02367 block[0] = block[0] * s->c_dc_scale;
02368 qadd = (qscale - 1) | 1;
02369 }else{
02370 qadd = 0;
02371 }
02372 if(s->ac_pred)
02373 nCoeffs=63;
02374 else
02375 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02376
02377 for(i=1; i<=nCoeffs; i++) {
02378 level = block[i];
02379 if (level) {
02380 if (level < 0) {
02381 level = level * qmul - qadd;
02382 } else {
02383 level = level * qmul + qadd;
02384 }
02385 block[i] = level;
02386 }
02387 }
02388 }
02389
02390 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02391 DCTELEM *block, int n, int qscale)
02392 {
02393 int i, level, qmul, qadd;
02394 int nCoeffs;
02395
02396 assert(s->block_last_index[n]>=0);
02397
02398 qadd = (qscale - 1) | 1;
02399 qmul = qscale << 1;
02400
02401 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02402
02403 for(i=0; i<=nCoeffs; i++) {
02404 level = block[i];
02405 if (level) {
02406 if (level < 0) {
02407 level = level * qmul - qadd;
02408 } else {
02409 level = level * qmul + qadd;
02410 }
02411 block[i] = level;
02412 }
02413 }
02414 }
02415
02419 void ff_set_qscale(MpegEncContext * s, int qscale)
02420 {
02421 if (qscale < 1)
02422 qscale = 1;
02423 else if (qscale > 31)
02424 qscale = 31;
02425
02426 s->qscale = qscale;
02427 s->chroma_qscale= s->chroma_qscale_table[qscale];
02428
02429 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02430 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02431 }