Libav
|
00001 /* 00002 * ADPCM codecs 00003 * Copyright (c) 2001-2003 The ffmpeg Project 00004 * 00005 * This file is part of FFmpeg. 00006 * 00007 * FFmpeg is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * FFmpeg is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with FFmpeg; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 #include "avcodec.h" 00022 #include "get_bits.h" 00023 #include "put_bits.h" 00024 #include "bytestream.h" 00025 00057 #define BLKSIZE 1024 00058 00059 /* step_table[] and index_table[] are from the ADPCM reference source */ 00060 /* This is the index table: */ 00061 static const int index_table[16] = { 00062 -1, -1, -1, -1, 2, 4, 6, 8, 00063 -1, -1, -1, -1, 2, 4, 6, 8, 00064 }; 00065 00070 static const int step_table[89] = { 00071 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 00072 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 00073 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, 00074 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, 00075 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, 00076 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, 00077 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, 00078 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, 00079 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 00080 }; 00081 00082 /* These are for MS-ADPCM */ 00083 /* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */ 00084 static const int AdaptationTable[] = { 00085 230, 230, 230, 230, 307, 409, 512, 614, 00086 768, 614, 512, 409, 307, 230, 230, 230 00087 }; 00088 00090 static const uint8_t AdaptCoeff1[] = { 00091 64, 128, 0, 48, 60, 115, 98 00092 }; 00093 00095 static const int8_t AdaptCoeff2[] = { 00096 0, -64, 0, 16, 0, -52, -58 00097 }; 00098 00099 /* These are for CD-ROM XA ADPCM */ 00100 static const int xa_adpcm_table[5][2] = { 00101 { 0, 0 }, 00102 { 60, 0 }, 00103 { 115, -52 }, 00104 { 98, -55 }, 00105 { 122, -60 } 00106 }; 00107 00108 static const int ea_adpcm_table[] = { 00109 0, 240, 460, 392, 0, 0, -208, -220, 0, 1, 00110 3, 4, 7, 8, 10, 11, 0, -1, -3, -4 00111 }; 00112 00113 // padded to zero where table size is less then 16 00114 static const int swf_index_tables[4][16] = { 00115 /*2*/ { -1, 2 }, 00116 /*3*/ { -1, -1, 2, 4 }, 00117 /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 }, 00118 /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 } 00119 }; 00120 00121 static const int yamaha_indexscale[] = { 00122 230, 230, 230, 230, 307, 409, 512, 614, 00123 230, 230, 230, 230, 307, 409, 512, 614 00124 }; 00125 00126 static const int yamaha_difflookup[] = { 00127 1, 3, 5, 7, 9, 11, 13, 15, 00128 -1, -3, -5, -7, -9, -11, -13, -15 00129 }; 00130 00131 /* end of tables */ 00132 00133 typedef struct ADPCMChannelStatus { 00134 int predictor; 00135 short int step_index; 00136 int step; 00137 /* for encoding */ 00138 int prev_sample; 00139 00140 /* MS version */ 00141 short sample1; 00142 short sample2; 00143 int coeff1; 00144 int coeff2; 00145 int idelta; 00146 } ADPCMChannelStatus; 00147 00148 typedef struct ADPCMContext { 00149 ADPCMChannelStatus status[6]; 00150 } ADPCMContext; 00151 00152 /* XXX: implement encoding */ 00153 00154 #if CONFIG_ENCODERS 00155 static av_cold int adpcm_encode_init(AVCodecContext *avctx) 00156 { 00157 uint8_t *extradata; 00158 int i; 00159 if (avctx->channels > 2) 00160 return -1; /* only stereo or mono =) */ 00161 00162 if(avctx->trellis && (unsigned)avctx->trellis > 16U){ 00163 av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); 00164 return -1; 00165 } 00166 00167 switch(avctx->codec->id) { 00168 case CODEC_ID_ADPCM_IMA_WAV: 00169 avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ 00170 /* and we have 4 bytes per channel overhead */ 00171 avctx->block_align = BLKSIZE; 00172 /* seems frame_size isn't taken into account... have to buffer the samples :-( */ 00173 break; 00174 case CODEC_ID_ADPCM_IMA_QT: 00175 avctx->frame_size = 64; 00176 avctx->block_align = 34 * avctx->channels; 00177 break; 00178 case CODEC_ID_ADPCM_MS: 00179 avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ 00180 /* and we have 7 bytes per channel overhead */ 00181 avctx->block_align = BLKSIZE; 00182 avctx->extradata_size = 32; 00183 extradata = avctx->extradata = av_malloc(avctx->extradata_size); 00184 if (!extradata) 00185 return AVERROR(ENOMEM); 00186 bytestream_put_le16(&extradata, avctx->frame_size); 00187 bytestream_put_le16(&extradata, 7); /* wNumCoef */ 00188 for (i = 0; i < 7; i++) { 00189 bytestream_put_le16(&extradata, AdaptCoeff1[i] * 4); 00190 bytestream_put_le16(&extradata, AdaptCoeff2[i] * 4); 00191 } 00192 break; 00193 case CODEC_ID_ADPCM_YAMAHA: 00194 avctx->frame_size = BLKSIZE * avctx->channels; 00195 avctx->block_align = BLKSIZE; 00196 break; 00197 case CODEC_ID_ADPCM_SWF: 00198 if (avctx->sample_rate != 11025 && 00199 avctx->sample_rate != 22050 && 00200 avctx->sample_rate != 44100) { 00201 av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); 00202 return -1; 00203 } 00204 avctx->frame_size = 512 * (avctx->sample_rate / 11025); 00205 break; 00206 default: 00207 return -1; 00208 } 00209 00210 avctx->coded_frame= avcodec_alloc_frame(); 00211 avctx->coded_frame->key_frame= 1; 00212 00213 return 0; 00214 } 00215 00216 static av_cold int adpcm_encode_close(AVCodecContext *avctx) 00217 { 00218 av_freep(&avctx->coded_frame); 00219 00220 return 0; 00221 } 00222 00223 00224 static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) 00225 { 00226 int delta = sample - c->prev_sample; 00227 int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8; 00228 c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8); 00229 c->prev_sample = av_clip_int16(c->prev_sample); 00230 c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); 00231 return nibble; 00232 } 00233 00234 static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) 00235 { 00236 int predictor, nibble, bias; 00237 00238 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; 00239 00240 nibble= sample - predictor; 00241 if(nibble>=0) bias= c->idelta/2; 00242 else bias=-c->idelta/2; 00243 00244 nibble= (nibble + bias) / c->idelta; 00245 nibble= av_clip(nibble, -8, 7)&0x0F; 00246 00247 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; 00248 00249 c->sample2 = c->sample1; 00250 c->sample1 = av_clip_int16(predictor); 00251 00252 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; 00253 if (c->idelta < 16) c->idelta = 16; 00254 00255 return nibble; 00256 } 00257 00258 static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) 00259 { 00260 int nibble, delta; 00261 00262 if(!c->step) { 00263 c->predictor = 0; 00264 c->step = 127; 00265 } 00266 00267 delta = sample - c->predictor; 00268 00269 nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; 00270 00271 c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8); 00272 c->predictor = av_clip_int16(c->predictor); 00273 c->step = (c->step * yamaha_indexscale[nibble]) >> 8; 00274 c->step = av_clip(c->step, 127, 24567); 00275 00276 return nibble; 00277 } 00278 00279 typedef struct TrellisPath { 00280 int nibble; 00281 int prev; 00282 } TrellisPath; 00283 00284 typedef struct TrellisNode { 00285 uint32_t ssd; 00286 int path; 00287 int sample1; 00288 int sample2; 00289 int step; 00290 } TrellisNode; 00291 00292 static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, 00293 uint8_t *dst, ADPCMChannelStatus *c, int n) 00294 { 00295 #define FREEZE_INTERVAL 128 00296 //FIXME 6% faster if frontier is a compile-time constant 00297 const int frontier = 1 << avctx->trellis; 00298 const int stride = avctx->channels; 00299 const int version = avctx->codec->id; 00300 const int max_paths = frontier*FREEZE_INTERVAL; 00301 TrellisPath paths[max_paths], *p; 00302 TrellisNode node_buf[2][frontier]; 00303 TrellisNode *nodep_buf[2][frontier]; 00304 TrellisNode **nodes = nodep_buf[0]; // nodes[] is always sorted by .ssd 00305 TrellisNode **nodes_next = nodep_buf[1]; 00306 int pathn = 0, froze = -1, i, j, k; 00307 00308 assert(!(max_paths&(max_paths-1))); 00309 00310 memset(nodep_buf, 0, sizeof(nodep_buf)); 00311 nodes[0] = &node_buf[1][0]; 00312 nodes[0]->ssd = 0; 00313 nodes[0]->path = 0; 00314 nodes[0]->step = c->step_index; 00315 nodes[0]->sample1 = c->sample1; 00316 nodes[0]->sample2 = c->sample2; 00317 if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) 00318 nodes[0]->sample1 = c->prev_sample; 00319 if(version == CODEC_ID_ADPCM_MS) 00320 nodes[0]->step = c->idelta; 00321 if(version == CODEC_ID_ADPCM_YAMAHA) { 00322 if(c->step == 0) { 00323 nodes[0]->step = 127; 00324 nodes[0]->sample1 = 0; 00325 } else { 00326 nodes[0]->step = c->step; 00327 nodes[0]->sample1 = c->predictor; 00328 } 00329 } 00330 00331 for(i=0; i<n; i++) { 00332 TrellisNode *t = node_buf[i&1]; 00333 TrellisNode **u; 00334 int sample = samples[i*stride]; 00335 memset(nodes_next, 0, frontier*sizeof(TrellisNode*)); 00336 for(j=0; j<frontier && nodes[j]; j++) { 00337 // higher j have higher ssd already, so they're unlikely to use a suboptimal next sample too 00338 const int range = (j < frontier/2) ? 1 : 0; 00339 const int step = nodes[j]->step; 00340 int nidx; 00341 if(version == CODEC_ID_ADPCM_MS) { 00342 const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; 00343 const int div = (sample - predictor) / step; 00344 const int nmin = av_clip(div-range, -8, 6); 00345 const int nmax = av_clip(div+range, -7, 7); 00346 for(nidx=nmin; nidx<=nmax; nidx++) { 00347 const int nibble = nidx & 0xf; 00348 int dec_sample = predictor + nidx * step; 00349 #define STORE_NODE(NAME, STEP_INDEX)\ 00350 int d;\ 00351 uint32_t ssd;\ 00352 dec_sample = av_clip_int16(dec_sample);\ 00353 d = sample - dec_sample;\ 00354 ssd = nodes[j]->ssd + d*d;\ 00355 if(nodes_next[frontier-1] && ssd >= nodes_next[frontier-1]->ssd)\ 00356 continue;\ 00357 /* Collapse any two states with the same previous sample value. \ 00358 * One could also distinguish states by step and by 2nd to last 00359 * sample, but the effects of that are negligible. */\ 00360 for(k=0; k<frontier && nodes_next[k]; k++) {\ 00361 if(dec_sample == nodes_next[k]->sample1) {\ 00362 assert(ssd >= nodes_next[k]->ssd);\ 00363 goto next_##NAME;\ 00364 }\ 00365 }\ 00366 for(k=0; k<frontier; k++) {\ 00367 if(!nodes_next[k] || ssd < nodes_next[k]->ssd) {\ 00368 TrellisNode *u = nodes_next[frontier-1];\ 00369 if(!u) {\ 00370 assert(pathn < max_paths);\ 00371 u = t++;\ 00372 u->path = pathn++;\ 00373 }\ 00374 u->ssd = ssd;\ 00375 u->step = STEP_INDEX;\ 00376 u->sample2 = nodes[j]->sample1;\ 00377 u->sample1 = dec_sample;\ 00378 paths[u->path].nibble = nibble;\ 00379 paths[u->path].prev = nodes[j]->path;\ 00380 memmove(&nodes_next[k+1], &nodes_next[k], (frontier-k-1)*sizeof(TrellisNode*));\ 00381 nodes_next[k] = u;\ 00382 break;\ 00383 }\ 00384 }\ 00385 next_##NAME:; 00386 STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8)); 00387 } 00388 } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { 00389 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ 00390 const int predictor = nodes[j]->sample1;\ 00391 const int div = (sample - predictor) * 4 / STEP_TABLE;\ 00392 int nmin = av_clip(div-range, -7, 6);\ 00393 int nmax = av_clip(div+range, -6, 7);\ 00394 if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ 00395 if(nmax<0) nmax--;\ 00396 for(nidx=nmin; nidx<=nmax; nidx++) {\ 00397 const int nibble = nidx<0 ? 7-nidx : nidx;\ 00398 int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\ 00399 STORE_NODE(NAME, STEP_INDEX);\ 00400 } 00401 LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88)); 00402 } else { //CODEC_ID_ADPCM_YAMAHA 00403 LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567)); 00404 #undef LOOP_NODES 00405 #undef STORE_NODE 00406 } 00407 } 00408 00409 u = nodes; 00410 nodes = nodes_next; 00411 nodes_next = u; 00412 00413 // prevent overflow 00414 if(nodes[0]->ssd > (1<<28)) { 00415 for(j=1; j<frontier && nodes[j]; j++) 00416 nodes[j]->ssd -= nodes[0]->ssd; 00417 nodes[0]->ssd = 0; 00418 } 00419 00420 // merge old paths to save memory 00421 if(i == froze + FREEZE_INTERVAL) { 00422 p = &paths[nodes[0]->path]; 00423 for(k=i; k>froze; k--) { 00424 dst[k] = p->nibble; 00425 p = &paths[p->prev]; 00426 } 00427 froze = i; 00428 pathn = 0; 00429 // other nodes might use paths that don't coincide with the frozen one. 00430 // checking which nodes do so is too slow, so just kill them all. 00431 // this also slightly improves quality, but I don't know why. 00432 memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); 00433 } 00434 } 00435 00436 p = &paths[nodes[0]->path]; 00437 for(i=n-1; i>froze; i--) { 00438 dst[i] = p->nibble; 00439 p = &paths[p->prev]; 00440 } 00441 00442 c->predictor = nodes[0]->sample1; 00443 c->sample1 = nodes[0]->sample1; 00444 c->sample2 = nodes[0]->sample2; 00445 c->step_index = nodes[0]->step; 00446 c->step = nodes[0]->step; 00447 c->idelta = nodes[0]->step; 00448 } 00449 00450 static int adpcm_encode_frame(AVCodecContext *avctx, 00451 unsigned char *frame, int buf_size, void *data) 00452 { 00453 int n, i, st; 00454 short *samples; 00455 unsigned char *dst; 00456 ADPCMContext *c = avctx->priv_data; 00457 00458 dst = frame; 00459 samples = (short *)data; 00460 st= avctx->channels == 2; 00461 /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ 00462 00463 switch(avctx->codec->id) { 00464 case CODEC_ID_ADPCM_IMA_WAV: 00465 n = avctx->frame_size / 8; 00466 c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ 00467 /* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ 00468 bytestream_put_le16(&dst, c->status[0].prev_sample); 00469 *dst++ = (unsigned char)c->status[0].step_index; 00470 *dst++ = 0; /* unknown */ 00471 samples++; 00472 if (avctx->channels == 2) { 00473 c->status[1].prev_sample = (signed short)samples[0]; 00474 /* c->status[1].step_index = 0; */ 00475 bytestream_put_le16(&dst, c->status[1].prev_sample); 00476 *dst++ = (unsigned char)c->status[1].step_index; 00477 *dst++ = 0; 00478 samples++; 00479 } 00480 00481 /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ 00482 if(avctx->trellis > 0) { 00483 uint8_t buf[2][n*8]; 00484 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n*8); 00485 if(avctx->channels == 2) 00486 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n*8); 00487 for(i=0; i<n; i++) { 00488 *dst++ = buf[0][8*i+0] | (buf[0][8*i+1] << 4); 00489 *dst++ = buf[0][8*i+2] | (buf[0][8*i+3] << 4); 00490 *dst++ = buf[0][8*i+4] | (buf[0][8*i+5] << 4); 00491 *dst++ = buf[0][8*i+6] | (buf[0][8*i+7] << 4); 00492 if (avctx->channels == 2) { 00493 *dst++ = buf[1][8*i+0] | (buf[1][8*i+1] << 4); 00494 *dst++ = buf[1][8*i+2] | (buf[1][8*i+3] << 4); 00495 *dst++ = buf[1][8*i+4] | (buf[1][8*i+5] << 4); 00496 *dst++ = buf[1][8*i+6] | (buf[1][8*i+7] << 4); 00497 } 00498 } 00499 } else 00500 for (; n>0; n--) { 00501 *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); 00502 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; 00503 dst++; 00504 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); 00505 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; 00506 dst++; 00507 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); 00508 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; 00509 dst++; 00510 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); 00511 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; 00512 dst++; 00513 /* right channel */ 00514 if (avctx->channels == 2) { 00515 *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); 00516 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; 00517 dst++; 00518 *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); 00519 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; 00520 dst++; 00521 *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); 00522 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; 00523 dst++; 00524 *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); 00525 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; 00526 dst++; 00527 } 00528 samples += 8 * avctx->channels; 00529 } 00530 break; 00531 case CODEC_ID_ADPCM_IMA_QT: 00532 { 00533 int ch, i; 00534 PutBitContext pb; 00535 init_put_bits(&pb, dst, buf_size*8); 00536 00537 for(ch=0; ch<avctx->channels; ch++){ 00538 put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); 00539 put_bits(&pb, 7, c->status[ch].step_index); 00540 if(avctx->trellis > 0) { 00541 uint8_t buf[64]; 00542 adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); 00543 for(i=0; i<64; i++) 00544 put_bits(&pb, 4, buf[i^1]); 00545 c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F; 00546 } else { 00547 for (i=0; i<64; i+=2){ 00548 int t1, t2; 00549 t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); 00550 t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); 00551 put_bits(&pb, 4, t2); 00552 put_bits(&pb, 4, t1); 00553 } 00554 c->status[ch].prev_sample &= ~0x7F; 00555 } 00556 } 00557 00558 dst += put_bits_count(&pb)>>3; 00559 break; 00560 } 00561 case CODEC_ID_ADPCM_SWF: 00562 { 00563 int i; 00564 PutBitContext pb; 00565 init_put_bits(&pb, dst, buf_size*8); 00566 00567 n = avctx->frame_size-1; 00568 00569 //Store AdpcmCodeSize 00570 put_bits(&pb, 2, 2); //Set 4bits flash adpcm format 00571 00572 //Init the encoder state 00573 for(i=0; i<avctx->channels; i++){ 00574 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits 00575 put_sbits(&pb, 16, samples[i]); 00576 put_bits(&pb, 6, c->status[i].step_index); 00577 c->status[i].prev_sample = (signed short)samples[i]; 00578 } 00579 00580 if(avctx->trellis > 0) { 00581 uint8_t buf[2][n]; 00582 adpcm_compress_trellis(avctx, samples+2, buf[0], &c->status[0], n); 00583 if (avctx->channels == 2) 00584 adpcm_compress_trellis(avctx, samples+3, buf[1], &c->status[1], n); 00585 for(i=0; i<n; i++) { 00586 put_bits(&pb, 4, buf[0][i]); 00587 if (avctx->channels == 2) 00588 put_bits(&pb, 4, buf[1][i]); 00589 } 00590 } else { 00591 for (i=1; i<avctx->frame_size; i++) { 00592 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); 00593 if (avctx->channels == 2) 00594 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); 00595 } 00596 } 00597 flush_put_bits(&pb); 00598 dst += put_bits_count(&pb)>>3; 00599 break; 00600 } 00601 case CODEC_ID_ADPCM_MS: 00602 for(i=0; i<avctx->channels; i++){ 00603 int predictor=0; 00604 00605 *dst++ = predictor; 00606 c->status[i].coeff1 = AdaptCoeff1[predictor]; 00607 c->status[i].coeff2 = AdaptCoeff2[predictor]; 00608 } 00609 for(i=0; i<avctx->channels; i++){ 00610 if (c->status[i].idelta < 16) 00611 c->status[i].idelta = 16; 00612 00613 bytestream_put_le16(&dst, c->status[i].idelta); 00614 } 00615 for(i=0; i<avctx->channels; i++){ 00616 c->status[i].sample2= *samples++; 00617 } 00618 for(i=0; i<avctx->channels; i++){ 00619 c->status[i].sample1= *samples++; 00620 00621 bytestream_put_le16(&dst, c->status[i].sample1); 00622 } 00623 for(i=0; i<avctx->channels; i++) 00624 bytestream_put_le16(&dst, c->status[i].sample2); 00625 00626 if(avctx->trellis > 0) { 00627 int n = avctx->block_align - 7*avctx->channels; 00628 uint8_t buf[2][n]; 00629 if(avctx->channels == 1) { 00630 n *= 2; 00631 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); 00632 for(i=0; i<n; i+=2) 00633 *dst++ = (buf[0][i] << 4) | buf[0][i+1]; 00634 } else { 00635 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); 00636 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n); 00637 for(i=0; i<n; i++) 00638 *dst++ = (buf[0][i] << 4) | buf[1][i]; 00639 } 00640 } else 00641 for(i=7*avctx->channels; i<avctx->block_align; i++) { 00642 int nibble; 00643 nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; 00644 nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); 00645 *dst++ = nibble; 00646 } 00647 break; 00648 case CODEC_ID_ADPCM_YAMAHA: 00649 n = avctx->frame_size / 2; 00650 if(avctx->trellis > 0) { 00651 uint8_t buf[2][n*2]; 00652 n *= 2; 00653 if(avctx->channels == 1) { 00654 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); 00655 for(i=0; i<n; i+=2) 00656 *dst++ = buf[0][i] | (buf[0][i+1] << 4); 00657 } else { 00658 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); 00659 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n); 00660 for(i=0; i<n; i++) 00661 *dst++ = buf[0][i] | (buf[1][i] << 4); 00662 } 00663 } else 00664 for (n *= avctx->channels; n>0; n--) { 00665 int nibble; 00666 nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); 00667 nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; 00668 *dst++ = nibble; 00669 } 00670 break; 00671 default: 00672 return -1; 00673 } 00674 return dst - frame; 00675 } 00676 #endif //CONFIG_ENCODERS 00677 00678 static av_cold int adpcm_decode_init(AVCodecContext * avctx) 00679 { 00680 ADPCMContext *c = avctx->priv_data; 00681 unsigned int min_channels = 1; 00682 unsigned int max_channels = 2; 00683 00684 switch(avctx->codec->id) { 00685 case CODEC_ID_ADPCM_EA: 00686 min_channels = 2; 00687 break; 00688 case CODEC_ID_ADPCM_EA_R1: 00689 case CODEC_ID_ADPCM_EA_R2: 00690 case CODEC_ID_ADPCM_EA_R3: 00691 max_channels = 6; 00692 break; 00693 } 00694 00695 if (avctx->channels < min_channels || avctx->channels > max_channels) { 00696 av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n"); 00697 return AVERROR(EINVAL); 00698 } 00699 00700 switch(avctx->codec->id) { 00701 case CODEC_ID_ADPCM_CT: 00702 c->status[0].step = c->status[1].step = 511; 00703 break; 00704 case CODEC_ID_ADPCM_IMA_WS: 00705 if (avctx->extradata && avctx->extradata_size == 2 * 4) { 00706 c->status[0].predictor = AV_RL32(avctx->extradata); 00707 c->status[1].predictor = AV_RL32(avctx->extradata + 4); 00708 } 00709 break; 00710 default: 00711 break; 00712 } 00713 avctx->sample_fmt = SAMPLE_FMT_S16; 00714 return 0; 00715 } 00716 00717 static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift) 00718 { 00719 int step_index; 00720 int predictor; 00721 int sign, delta, diff, step; 00722 00723 step = step_table[c->step_index]; 00724 step_index = c->step_index + index_table[(unsigned)nibble]; 00725 if (step_index < 0) step_index = 0; 00726 else if (step_index > 88) step_index = 88; 00727 00728 sign = nibble & 8; 00729 delta = nibble & 7; 00730 /* perform direct multiplication instead of series of jumps proposed by 00731 * the reference ADPCM implementation since modern CPUs can do the mults 00732 * quickly enough */ 00733 diff = ((2 * delta + 1) * step) >> shift; 00734 predictor = c->predictor; 00735 if (sign) predictor -= diff; 00736 else predictor += diff; 00737 00738 c->predictor = av_clip_int16(predictor); 00739 c->step_index = step_index; 00740 00741 return (short)c->predictor; 00742 } 00743 00744 static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble) 00745 { 00746 int predictor; 00747 00748 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; 00749 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; 00750 00751 c->sample2 = c->sample1; 00752 c->sample1 = av_clip_int16(predictor); 00753 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; 00754 if (c->idelta < 16) c->idelta = 16; 00755 00756 return c->sample1; 00757 } 00758 00759 static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble) 00760 { 00761 int sign, delta, diff; 00762 int new_step; 00763 00764 sign = nibble & 8; 00765 delta = nibble & 7; 00766 /* perform direct multiplication instead of series of jumps proposed by 00767 * the reference ADPCM implementation since modern CPUs can do the mults 00768 * quickly enough */ 00769 diff = ((2 * delta + 1) * c->step) >> 3; 00770 /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */ 00771 c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff); 00772 c->predictor = av_clip_int16(c->predictor); 00773 /* calculate new step and clamp it to range 511..32767 */ 00774 new_step = (AdaptationTable[nibble & 7] * c->step) >> 8; 00775 c->step = av_clip(new_step, 511, 32767); 00776 00777 return (short)c->predictor; 00778 } 00779 00780 static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift) 00781 { 00782 int sign, delta, diff; 00783 00784 sign = nibble & (1<<(size-1)); 00785 delta = nibble & ((1<<(size-1))-1); 00786 diff = delta << (7 + c->step + shift); 00787 00788 /* clamp result */ 00789 c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256); 00790 00791 /* calculate new step */ 00792 if (delta >= (2*size - 3) && c->step < 3) 00793 c->step++; 00794 else if (delta == 0 && c->step > 0) 00795 c->step--; 00796 00797 return (short) c->predictor; 00798 } 00799 00800 static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble) 00801 { 00802 if(!c->step) { 00803 c->predictor = 0; 00804 c->step = 127; 00805 } 00806 00807 c->predictor += (c->step * yamaha_difflookup[nibble]) / 8; 00808 c->predictor = av_clip_int16(c->predictor); 00809 c->step = (c->step * yamaha_indexscale[nibble]) >> 8; 00810 c->step = av_clip(c->step, 127, 24567); 00811 return c->predictor; 00812 } 00813 00814 static void xa_decode(short *out, const unsigned char *in, 00815 ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc) 00816 { 00817 int i, j; 00818 int shift,filter,f0,f1; 00819 int s_1,s_2; 00820 int d,s,t; 00821 00822 for(i=0;i<4;i++) { 00823 00824 shift = 12 - (in[4+i*2] & 15); 00825 filter = in[4+i*2] >> 4; 00826 f0 = xa_adpcm_table[filter][0]; 00827 f1 = xa_adpcm_table[filter][1]; 00828 00829 s_1 = left->sample1; 00830 s_2 = left->sample2; 00831 00832 for(j=0;j<28;j++) { 00833 d = in[16+i+j*4]; 00834 00835 t = (signed char)(d<<4)>>4; 00836 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); 00837 s_2 = s_1; 00838 s_1 = av_clip_int16(s); 00839 *out = s_1; 00840 out += inc; 00841 } 00842 00843 if (inc==2) { /* stereo */ 00844 left->sample1 = s_1; 00845 left->sample2 = s_2; 00846 s_1 = right->sample1; 00847 s_2 = right->sample2; 00848 out = out + 1 - 28*2; 00849 } 00850 00851 shift = 12 - (in[5+i*2] & 15); 00852 filter = in[5+i*2] >> 4; 00853 00854 f0 = xa_adpcm_table[filter][0]; 00855 f1 = xa_adpcm_table[filter][1]; 00856 00857 for(j=0;j<28;j++) { 00858 d = in[16+i+j*4]; 00859 00860 t = (signed char)d >> 4; 00861 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); 00862 s_2 = s_1; 00863 s_1 = av_clip_int16(s); 00864 *out = s_1; 00865 out += inc; 00866 } 00867 00868 if (inc==2) { /* stereo */ 00869 right->sample1 = s_1; 00870 right->sample2 = s_2; 00871 out -= 1; 00872 } else { 00873 left->sample1 = s_1; 00874 left->sample2 = s_2; 00875 } 00876 } 00877 } 00878 00879 00880 /* DK3 ADPCM support macro */ 00881 #define DK3_GET_NEXT_NIBBLE() \ 00882 if (decode_top_nibble_next) \ 00883 { \ 00884 nibble = last_byte >> 4; \ 00885 decode_top_nibble_next = 0; \ 00886 } \ 00887 else \ 00888 { \ 00889 last_byte = *src++; \ 00890 if (src >= buf + buf_size) break; \ 00891 nibble = last_byte & 0x0F; \ 00892 decode_top_nibble_next = 1; \ 00893 } 00894 00895 static int adpcm_decode_frame(AVCodecContext *avctx, 00896 void *data, int *data_size, 00897 AVPacket *avpkt) 00898 { 00899 const uint8_t *buf = avpkt->data; 00900 int buf_size = avpkt->size; 00901 ADPCMContext *c = avctx->priv_data; 00902 ADPCMChannelStatus *cs; 00903 int n, m, channel, i; 00904 int block_predictor[2]; 00905 short *samples; 00906 short *samples_end; 00907 const uint8_t *src; 00908 int st; /* stereo */ 00909 00910 /* DK3 ADPCM accounting variables */ 00911 unsigned char last_byte = 0; 00912 unsigned char nibble; 00913 int decode_top_nibble_next = 0; 00914 int diff_channel; 00915 00916 /* EA ADPCM state variables */ 00917 uint32_t samples_in_chunk; 00918 int32_t previous_left_sample, previous_right_sample; 00919 int32_t current_left_sample, current_right_sample; 00920 int32_t next_left_sample, next_right_sample; 00921 int32_t coeff1l, coeff2l, coeff1r, coeff2r; 00922 uint8_t shift_left, shift_right; 00923 int count1, count2; 00924 int coeff[2][2], shift[2];//used in EA MAXIS ADPCM 00925 00926 if (!buf_size) 00927 return 0; 00928 00929 //should protect all 4bit ADPCM variants 00930 //8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels 00931 // 00932 if(*data_size/4 < buf_size + 8) 00933 return -1; 00934 00935 samples = data; 00936 samples_end= samples + *data_size/2; 00937 *data_size= 0; 00938 src = buf; 00939 00940 st = avctx->channels == 2 ? 1 : 0; 00941 00942 switch(avctx->codec->id) { 00943 case CODEC_ID_ADPCM_IMA_QT: 00944 n = buf_size - 2*avctx->channels; 00945 for (channel = 0; channel < avctx->channels; channel++) { 00946 cs = &(c->status[channel]); 00947 /* (pppppp) (piiiiiii) */ 00948 00949 /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */ 00950 cs->predictor = (*src++) << 8; 00951 cs->predictor |= (*src & 0x80); 00952 cs->predictor &= 0xFF80; 00953 00954 /* sign extension */ 00955 if(cs->predictor & 0x8000) 00956 cs->predictor -= 0x10000; 00957 00958 cs->predictor = av_clip_int16(cs->predictor); 00959 00960 cs->step_index = (*src++) & 0x7F; 00961 00962 if (cs->step_index > 88){ 00963 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); 00964 cs->step_index = 88; 00965 } 00966 00967 cs->step = step_table[cs->step_index]; 00968 00969 samples = (short*)data + channel; 00970 00971 for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */ 00972 *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3); 00973 samples += avctx->channels; 00974 *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3); 00975 samples += avctx->channels; 00976 src ++; 00977 } 00978 } 00979 if (st) 00980 samples--; 00981 break; 00982 case CODEC_ID_ADPCM_IMA_WAV: 00983 if (avctx->block_align != 0 && buf_size > avctx->block_align) 00984 buf_size = avctx->block_align; 00985 00986 // samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1; 00987 00988 for(i=0; i<avctx->channels; i++){ 00989 cs = &(c->status[i]); 00990 cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src); 00991 00992 cs->step_index = *src++; 00993 if (cs->step_index > 88){ 00994 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); 00995 cs->step_index = 88; 00996 } 00997 if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */ 00998 } 00999 01000 while(src < buf + buf_size){ 01001 for(m=0; m<4; m++){ 01002 for(i=0; i<=st; i++) 01003 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3); 01004 for(i=0; i<=st; i++) 01005 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3); 01006 src++; 01007 } 01008 src += 4*st; 01009 } 01010 break; 01011 case CODEC_ID_ADPCM_4XM: 01012 cs = &(c->status[0]); 01013 c->status[0].predictor= (int16_t)bytestream_get_le16(&src); 01014 if(st){ 01015 c->status[1].predictor= (int16_t)bytestream_get_le16(&src); 01016 } 01017 c->status[0].step_index= (int16_t)bytestream_get_le16(&src); 01018 if(st){ 01019 c->status[1].step_index= (int16_t)bytestream_get_le16(&src); 01020 } 01021 if (cs->step_index < 0) cs->step_index = 0; 01022 if (cs->step_index > 88) cs->step_index = 88; 01023 01024 m= (buf_size - (src - buf))>>st; 01025 for(i=0; i<m; i++) { 01026 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4); 01027 if (st) 01028 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4); 01029 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4); 01030 if (st) 01031 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4); 01032 } 01033 01034 src += m<<st; 01035 01036 break; 01037 case CODEC_ID_ADPCM_MS: 01038 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01039 buf_size = avctx->block_align; 01040 n = buf_size - 7 * avctx->channels; 01041 if (n < 0) 01042 return -1; 01043 block_predictor[0] = av_clip(*src++, 0, 6); 01044 block_predictor[1] = 0; 01045 if (st) 01046 block_predictor[1] = av_clip(*src++, 0, 6); 01047 c->status[0].idelta = (int16_t)bytestream_get_le16(&src); 01048 if (st){ 01049 c->status[1].idelta = (int16_t)bytestream_get_le16(&src); 01050 } 01051 c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]]; 01052 c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]]; 01053 c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]]; 01054 c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]]; 01055 01056 c->status[0].sample1 = bytestream_get_le16(&src); 01057 if (st) c->status[1].sample1 = bytestream_get_le16(&src); 01058 c->status[0].sample2 = bytestream_get_le16(&src); 01059 if (st) c->status[1].sample2 = bytestream_get_le16(&src); 01060 01061 *samples++ = c->status[0].sample2; 01062 if (st) *samples++ = c->status[1].sample2; 01063 *samples++ = c->status[0].sample1; 01064 if (st) *samples++ = c->status[1].sample1; 01065 for(;n>0;n--) { 01066 *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 ); 01067 *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F); 01068 src ++; 01069 } 01070 break; 01071 case CODEC_ID_ADPCM_IMA_DK4: 01072 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01073 buf_size = avctx->block_align; 01074 01075 c->status[0].predictor = (int16_t)bytestream_get_le16(&src); 01076 c->status[0].step_index = *src++; 01077 src++; 01078 *samples++ = c->status[0].predictor; 01079 if (st) { 01080 c->status[1].predictor = (int16_t)bytestream_get_le16(&src); 01081 c->status[1].step_index = *src++; 01082 src++; 01083 *samples++ = c->status[1].predictor; 01084 } 01085 while (src < buf + buf_size) { 01086 01087 /* take care of the top nibble (always left or mono channel) */ 01088 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01089 src[0] >> 4, 3); 01090 01091 /* take care of the bottom nibble, which is right sample for 01092 * stereo, or another mono sample */ 01093 if (st) 01094 *samples++ = adpcm_ima_expand_nibble(&c->status[1], 01095 src[0] & 0x0F, 3); 01096 else 01097 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01098 src[0] & 0x0F, 3); 01099 01100 src++; 01101 } 01102 break; 01103 case CODEC_ID_ADPCM_IMA_DK3: 01104 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01105 buf_size = avctx->block_align; 01106 01107 if(buf_size + 16 > (samples_end - samples)*3/8) 01108 return -1; 01109 01110 c->status[0].predictor = (int16_t)AV_RL16(src + 10); 01111 c->status[1].predictor = (int16_t)AV_RL16(src + 12); 01112 c->status[0].step_index = src[14]; 01113 c->status[1].step_index = src[15]; 01114 /* sign extend the predictors */ 01115 src += 16; 01116 diff_channel = c->status[1].predictor; 01117 01118 /* the DK3_GET_NEXT_NIBBLE macro issues the break statement when 01119 * the buffer is consumed */ 01120 while (1) { 01121 01122 /* for this algorithm, c->status[0] is the sum channel and 01123 * c->status[1] is the diff channel */ 01124 01125 /* process the first predictor of the sum channel */ 01126 DK3_GET_NEXT_NIBBLE(); 01127 adpcm_ima_expand_nibble(&c->status[0], nibble, 3); 01128 01129 /* process the diff channel predictor */ 01130 DK3_GET_NEXT_NIBBLE(); 01131 adpcm_ima_expand_nibble(&c->status[1], nibble, 3); 01132 01133 /* process the first pair of stereo PCM samples */ 01134 diff_channel = (diff_channel + c->status[1].predictor) / 2; 01135 *samples++ = c->status[0].predictor + c->status[1].predictor; 01136 *samples++ = c->status[0].predictor - c->status[1].predictor; 01137 01138 /* process the second predictor of the sum channel */ 01139 DK3_GET_NEXT_NIBBLE(); 01140 adpcm_ima_expand_nibble(&c->status[0], nibble, 3); 01141 01142 /* process the second pair of stereo PCM samples */ 01143 diff_channel = (diff_channel + c->status[1].predictor) / 2; 01144 *samples++ = c->status[0].predictor + c->status[1].predictor; 01145 *samples++ = c->status[0].predictor - c->status[1].predictor; 01146 } 01147 break; 01148 case CODEC_ID_ADPCM_IMA_ISS: 01149 c->status[0].predictor = (int16_t)AV_RL16(src + 0); 01150 c->status[0].step_index = src[2]; 01151 src += 4; 01152 if(st) { 01153 c->status[1].predictor = (int16_t)AV_RL16(src + 0); 01154 c->status[1].step_index = src[2]; 01155 src += 4; 01156 } 01157 01158 while (src < buf + buf_size) { 01159 01160 if (st) { 01161 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01162 src[0] >> 4 , 3); 01163 *samples++ = adpcm_ima_expand_nibble(&c->status[1], 01164 src[0] & 0x0F, 3); 01165 } else { 01166 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01167 src[0] & 0x0F, 3); 01168 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01169 src[0] >> 4 , 3); 01170 } 01171 01172 src++; 01173 } 01174 break; 01175 case CODEC_ID_ADPCM_IMA_WS: 01176 /* no per-block initialization; just start decoding the data */ 01177 while (src < buf + buf_size) { 01178 01179 if (st) { 01180 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01181 src[0] >> 4 , 3); 01182 *samples++ = adpcm_ima_expand_nibble(&c->status[1], 01183 src[0] & 0x0F, 3); 01184 } else { 01185 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01186 src[0] >> 4 , 3); 01187 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01188 src[0] & 0x0F, 3); 01189 } 01190 01191 src++; 01192 } 01193 break; 01194 case CODEC_ID_ADPCM_XA: 01195 while (buf_size >= 128) { 01196 xa_decode(samples, src, &c->status[0], &c->status[1], 01197 avctx->channels); 01198 src += 128; 01199 samples += 28 * 8; 01200 buf_size -= 128; 01201 } 01202 break; 01203 case CODEC_ID_ADPCM_IMA_EA_EACS: 01204 samples_in_chunk = bytestream_get_le32(&src) >> (1-st); 01205 01206 if (samples_in_chunk > buf_size-4-(8<<st)) { 01207 src += buf_size - 4; 01208 break; 01209 } 01210 01211 for (i=0; i<=st; i++) 01212 c->status[i].step_index = bytestream_get_le32(&src); 01213 for (i=0; i<=st; i++) 01214 c->status[i].predictor = bytestream_get_le32(&src); 01215 01216 for (; samples_in_chunk; samples_in_chunk--, src++) { 01217 *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3); 01218 *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3); 01219 } 01220 break; 01221 case CODEC_ID_ADPCM_IMA_EA_SEAD: 01222 for (; src < buf+buf_size; src++) { 01223 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6); 01224 *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6); 01225 } 01226 break; 01227 case CODEC_ID_ADPCM_EA: 01228 if (buf_size < 4 || AV_RL32(src) >= ((buf_size - 12) * 2)) { 01229 src += buf_size; 01230 break; 01231 } 01232 samples_in_chunk = AV_RL32(src); 01233 src += 4; 01234 current_left_sample = (int16_t)bytestream_get_le16(&src); 01235 previous_left_sample = (int16_t)bytestream_get_le16(&src); 01236 current_right_sample = (int16_t)bytestream_get_le16(&src); 01237 previous_right_sample = (int16_t)bytestream_get_le16(&src); 01238 01239 for (count1 = 0; count1 < samples_in_chunk/28;count1++) { 01240 coeff1l = ea_adpcm_table[ *src >> 4 ]; 01241 coeff2l = ea_adpcm_table[(*src >> 4 ) + 4]; 01242 coeff1r = ea_adpcm_table[*src & 0x0F]; 01243 coeff2r = ea_adpcm_table[(*src & 0x0F) + 4]; 01244 src++; 01245 01246 shift_left = (*src >> 4 ) + 8; 01247 shift_right = (*src & 0x0F) + 8; 01248 src++; 01249 01250 for (count2 = 0; count2 < 28; count2++) { 01251 next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left; 01252 next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right; 01253 src++; 01254 01255 next_left_sample = (next_left_sample + 01256 (current_left_sample * coeff1l) + 01257 (previous_left_sample * coeff2l) + 0x80) >> 8; 01258 next_right_sample = (next_right_sample + 01259 (current_right_sample * coeff1r) + 01260 (previous_right_sample * coeff2r) + 0x80) >> 8; 01261 01262 previous_left_sample = current_left_sample; 01263 current_left_sample = av_clip_int16(next_left_sample); 01264 previous_right_sample = current_right_sample; 01265 current_right_sample = av_clip_int16(next_right_sample); 01266 *samples++ = (unsigned short)current_left_sample; 01267 *samples++ = (unsigned short)current_right_sample; 01268 } 01269 } 01270 01271 if (src - buf == buf_size - 2) 01272 src += 2; // Skip terminating 0x0000 01273 01274 break; 01275 case CODEC_ID_ADPCM_EA_MAXIS_XA: 01276 for(channel = 0; channel < avctx->channels; channel++) { 01277 for (i=0; i<2; i++) 01278 coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i]; 01279 shift[channel] = (*src & 0x0F) + 8; 01280 src++; 01281 } 01282 for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) { 01283 for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */ 01284 for(channel = 0; channel < avctx->channels; channel++) { 01285 int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel]; 01286 sample = (sample + 01287 c->status[channel].sample1 * coeff[channel][0] + 01288 c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8; 01289 c->status[channel].sample2 = c->status[channel].sample1; 01290 c->status[channel].sample1 = av_clip_int16(sample); 01291 *samples++ = c->status[channel].sample1; 01292 } 01293 } 01294 src+=avctx->channels; 01295 } 01296 break; 01297 case CODEC_ID_ADPCM_EA_R1: 01298 case CODEC_ID_ADPCM_EA_R2: 01299 case CODEC_ID_ADPCM_EA_R3: { 01300 /* channel numbering 01301 2chan: 0=fl, 1=fr 01302 4chan: 0=fl, 1=rl, 2=fr, 3=rr 01303 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */ 01304 const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3; 01305 int32_t previous_sample, current_sample, next_sample; 01306 int32_t coeff1, coeff2; 01307 uint8_t shift; 01308 unsigned int channel; 01309 uint16_t *samplesC; 01310 const uint8_t *srcC; 01311 const uint8_t *src_end = buf + buf_size; 01312 01313 samples_in_chunk = (big_endian ? bytestream_get_be32(&src) 01314 : bytestream_get_le32(&src)) / 28; 01315 if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) || 01316 28*samples_in_chunk*avctx->channels > samples_end-samples) { 01317 src += buf_size - 4; 01318 break; 01319 } 01320 01321 for (channel=0; channel<avctx->channels; channel++) { 01322 int32_t offset = (big_endian ? bytestream_get_be32(&src) 01323 : bytestream_get_le32(&src)) 01324 + (avctx->channels-channel-1) * 4; 01325 01326 if ((offset < 0) || (offset >= src_end - src - 4)) break; 01327 srcC = src + offset; 01328 samplesC = samples + channel; 01329 01330 if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) { 01331 current_sample = (int16_t)bytestream_get_le16(&srcC); 01332 previous_sample = (int16_t)bytestream_get_le16(&srcC); 01333 } else { 01334 current_sample = c->status[channel].predictor; 01335 previous_sample = c->status[channel].prev_sample; 01336 } 01337 01338 for (count1=0; count1<samples_in_chunk; count1++) { 01339 if (*srcC == 0xEE) { /* only seen in R2 and R3 */ 01340 srcC++; 01341 if (srcC > src_end - 30*2) break; 01342 current_sample = (int16_t)bytestream_get_be16(&srcC); 01343 previous_sample = (int16_t)bytestream_get_be16(&srcC); 01344 01345 for (count2=0; count2<28; count2++) { 01346 *samplesC = (int16_t)bytestream_get_be16(&srcC); 01347 samplesC += avctx->channels; 01348 } 01349 } else { 01350 coeff1 = ea_adpcm_table[ *srcC>>4 ]; 01351 coeff2 = ea_adpcm_table[(*srcC>>4) + 4]; 01352 shift = (*srcC++ & 0x0F) + 8; 01353 01354 if (srcC > src_end - 14) break; 01355 for (count2=0; count2<28; count2++) { 01356 if (count2 & 1) 01357 next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift; 01358 else 01359 next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift; 01360 01361 next_sample += (current_sample * coeff1) + 01362 (previous_sample * coeff2); 01363 next_sample = av_clip_int16(next_sample >> 8); 01364 01365 previous_sample = current_sample; 01366 current_sample = next_sample; 01367 *samplesC = current_sample; 01368 samplesC += avctx->channels; 01369 } 01370 } 01371 } 01372 01373 if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) { 01374 c->status[channel].predictor = current_sample; 01375 c->status[channel].prev_sample = previous_sample; 01376 } 01377 } 01378 01379 src = src + buf_size - (4 + 4*avctx->channels); 01380 samples += 28 * samples_in_chunk * avctx->channels; 01381 break; 01382 } 01383 case CODEC_ID_ADPCM_EA_XAS: 01384 if (samples_end-samples < 32*4*avctx->channels 01385 || buf_size < (4+15)*4*avctx->channels) { 01386 src += buf_size; 01387 break; 01388 } 01389 for (channel=0; channel<avctx->channels; channel++) { 01390 int coeff[2][4], shift[4]; 01391 short *s2, *s = &samples[channel]; 01392 for (n=0; n<4; n++, s+=32*avctx->channels) { 01393 for (i=0; i<2; i++) 01394 coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i]; 01395 shift[n] = (src[2]&0x0F) + 8; 01396 for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels) 01397 s2[0] = (src[0]&0xF0) + (src[1]<<8); 01398 } 01399 01400 for (m=2; m<32; m+=2) { 01401 s = &samples[m*avctx->channels + channel]; 01402 for (n=0; n<4; n++, src++, s+=32*avctx->channels) { 01403 for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) { 01404 int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n]; 01405 int pred = s2[-1*avctx->channels] * coeff[0][n] 01406 + s2[-2*avctx->channels] * coeff[1][n]; 01407 s2[0] = av_clip_int16((level + pred + 0x80) >> 8); 01408 } 01409 } 01410 } 01411 } 01412 samples += 32*4*avctx->channels; 01413 break; 01414 case CODEC_ID_ADPCM_IMA_AMV: 01415 case CODEC_ID_ADPCM_IMA_SMJPEG: 01416 c->status[0].predictor = (int16_t)bytestream_get_le16(&src); 01417 c->status[0].step_index = bytestream_get_le16(&src); 01418 01419 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) 01420 src+=4; 01421 01422 while (src < buf + buf_size) { 01423 char hi, lo; 01424 lo = *src & 0x0F; 01425 hi = *src >> 4; 01426 01427 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) 01428 FFSWAP(char, hi, lo); 01429 01430 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01431 lo, 3); 01432 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01433 hi, 3); 01434 src++; 01435 } 01436 break; 01437 case CODEC_ID_ADPCM_CT: 01438 while (src < buf + buf_size) { 01439 if (st) { 01440 *samples++ = adpcm_ct_expand_nibble(&c->status[0], 01441 src[0] >> 4); 01442 *samples++ = adpcm_ct_expand_nibble(&c->status[1], 01443 src[0] & 0x0F); 01444 } else { 01445 *samples++ = adpcm_ct_expand_nibble(&c->status[0], 01446 src[0] >> 4); 01447 *samples++ = adpcm_ct_expand_nibble(&c->status[0], 01448 src[0] & 0x0F); 01449 } 01450 src++; 01451 } 01452 break; 01453 case CODEC_ID_ADPCM_SBPRO_4: 01454 case CODEC_ID_ADPCM_SBPRO_3: 01455 case CODEC_ID_ADPCM_SBPRO_2: 01456 if (!c->status[0].step_index) { 01457 /* the first byte is a raw sample */ 01458 *samples++ = 128 * (*src++ - 0x80); 01459 if (st) 01460 *samples++ = 128 * (*src++ - 0x80); 01461 c->status[0].step_index = 1; 01462 } 01463 if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) { 01464 while (src < buf + buf_size) { 01465 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01466 src[0] >> 4, 4, 0); 01467 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], 01468 src[0] & 0x0F, 4, 0); 01469 src++; 01470 } 01471 } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) { 01472 while (src < buf + buf_size && samples + 2 < samples_end) { 01473 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01474 src[0] >> 5 , 3, 0); 01475 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01476 (src[0] >> 2) & 0x07, 3, 0); 01477 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01478 src[0] & 0x03, 2, 0); 01479 src++; 01480 } 01481 } else { 01482 while (src < buf + buf_size && samples + 3 < samples_end) { 01483 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01484 src[0] >> 6 , 2, 2); 01485 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], 01486 (src[0] >> 4) & 0x03, 2, 2); 01487 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01488 (src[0] >> 2) & 0x03, 2, 2); 01489 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], 01490 src[0] & 0x03, 2, 2); 01491 src++; 01492 } 01493 } 01494 break; 01495 case CODEC_ID_ADPCM_SWF: 01496 { 01497 GetBitContext gb; 01498 const int *table; 01499 int k0, signmask, nb_bits, count; 01500 int size = buf_size*8; 01501 01502 init_get_bits(&gb, buf, size); 01503 01504 //read bits & initial values 01505 nb_bits = get_bits(&gb, 2)+2; 01506 //av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", nb_bits); 01507 table = swf_index_tables[nb_bits-2]; 01508 k0 = 1 << (nb_bits-2); 01509 signmask = 1 << (nb_bits-1); 01510 01511 while (get_bits_count(&gb) <= size - 22*avctx->channels) { 01512 for (i = 0; i < avctx->channels; i++) { 01513 *samples++ = c->status[i].predictor = get_sbits(&gb, 16); 01514 c->status[i].step_index = get_bits(&gb, 6); 01515 } 01516 01517 for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) { 01518 int i; 01519 01520 for (i = 0; i < avctx->channels; i++) { 01521 // similar to IMA adpcm 01522 int delta = get_bits(&gb, nb_bits); 01523 int step = step_table[c->status[i].step_index]; 01524 long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 01525 int k = k0; 01526 01527 do { 01528 if (delta & k) 01529 vpdiff += step; 01530 step >>= 1; 01531 k >>= 1; 01532 } while(k); 01533 vpdiff += step; 01534 01535 if (delta & signmask) 01536 c->status[i].predictor -= vpdiff; 01537 else 01538 c->status[i].predictor += vpdiff; 01539 01540 c->status[i].step_index += table[delta & (~signmask)]; 01541 01542 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); 01543 c->status[i].predictor = av_clip_int16(c->status[i].predictor); 01544 01545 *samples++ = c->status[i].predictor; 01546 if (samples >= samples_end) { 01547 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); 01548 return -1; 01549 } 01550 } 01551 } 01552 } 01553 src += buf_size; 01554 break; 01555 } 01556 case CODEC_ID_ADPCM_YAMAHA: 01557 while (src < buf + buf_size) { 01558 if (st) { 01559 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], 01560 src[0] & 0x0F); 01561 *samples++ = adpcm_yamaha_expand_nibble(&c->status[1], 01562 src[0] >> 4 ); 01563 } else { 01564 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], 01565 src[0] & 0x0F); 01566 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], 01567 src[0] >> 4 ); 01568 } 01569 src++; 01570 } 01571 break; 01572 case CODEC_ID_ADPCM_THP: 01573 { 01574 int table[2][16]; 01575 unsigned int samplecnt; 01576 int prev[2][2]; 01577 int ch; 01578 01579 if (buf_size < 80) { 01580 av_log(avctx, AV_LOG_ERROR, "frame too small\n"); 01581 return -1; 01582 } 01583 01584 src+=4; 01585 samplecnt = bytestream_get_be32(&src); 01586 01587 for (i = 0; i < 32; i++) 01588 table[0][i] = (int16_t)bytestream_get_be16(&src); 01589 01590 /* Initialize the previous sample. */ 01591 for (i = 0; i < 4; i++) 01592 prev[0][i] = (int16_t)bytestream_get_be16(&src); 01593 01594 if (samplecnt >= (samples_end - samples) / (st + 1)) { 01595 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); 01596 return -1; 01597 } 01598 01599 for (ch = 0; ch <= st; ch++) { 01600 samples = (unsigned short *) data + ch; 01601 01602 /* Read in every sample for this channel. */ 01603 for (i = 0; i < samplecnt / 14; i++) { 01604 int index = (*src >> 4) & 7; 01605 unsigned int exp = 28 - (*src++ & 15); 01606 int factor1 = table[ch][index * 2]; 01607 int factor2 = table[ch][index * 2 + 1]; 01608 01609 /* Decode 14 samples. */ 01610 for (n = 0; n < 14; n++) { 01611 int32_t sampledat; 01612 if(n&1) sampledat= *src++ <<28; 01613 else sampledat= (*src&0xF0)<<24; 01614 01615 sampledat = ((prev[ch][0]*factor1 01616 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp); 01617 *samples = av_clip_int16(sampledat); 01618 prev[ch][1] = prev[ch][0]; 01619 prev[ch][0] = *samples++; 01620 01621 /* In case of stereo, skip one sample, this sample 01622 is for the other channel. */ 01623 samples += st; 01624 } 01625 } 01626 } 01627 01628 /* In the previous loop, in case stereo is used, samples is 01629 increased exactly one time too often. */ 01630 samples -= st; 01631 break; 01632 } 01633 01634 default: 01635 return -1; 01636 } 01637 *data_size = (uint8_t *)samples - (uint8_t *)data; 01638 return src - buf; 01639 } 01640 01641 01642 01643 #if CONFIG_ENCODERS 01644 #define ADPCM_ENCODER(id,name,long_name_) \ 01645 AVCodec name ## _encoder = { \ 01646 #name, \ 01647 AVMEDIA_TYPE_AUDIO, \ 01648 id, \ 01649 sizeof(ADPCMContext), \ 01650 adpcm_encode_init, \ 01651 adpcm_encode_frame, \ 01652 adpcm_encode_close, \ 01653 NULL, \ 01654 .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, \ 01655 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 01656 }; 01657 #else 01658 #define ADPCM_ENCODER(id,name,long_name_) 01659 #endif 01660 01661 #if CONFIG_DECODERS 01662 #define ADPCM_DECODER(id,name,long_name_) \ 01663 AVCodec name ## _decoder = { \ 01664 #name, \ 01665 AVMEDIA_TYPE_AUDIO, \ 01666 id, \ 01667 sizeof(ADPCMContext), \ 01668 adpcm_decode_init, \ 01669 NULL, \ 01670 NULL, \ 01671 adpcm_decode_frame, \ 01672 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 01673 }; 01674 #else 01675 #define ADPCM_DECODER(id,name,long_name_) 01676 #endif 01677 01678 #define ADPCM_CODEC(id,name,long_name_) \ 01679 ADPCM_ENCODER(id,name,long_name_) ADPCM_DECODER(id,name,long_name_) 01680 01681 /* Note: Do not forget to add new entries to the Makefile as well. */ 01682 ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie"); 01683 ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology"); 01684 ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts"); 01685 ADPCM_DECODER(CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA"); 01686 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1"); 01687 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2"); 01688 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3"); 01689 ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS"); 01690 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV"); 01691 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3"); 01692 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4"); 01693 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS"); 01694 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD"); 01695 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS"); 01696 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); 01697 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG"); 01698 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); 01699 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood"); 01700 ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); 01701 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit"); 01702 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit"); 01703 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit"); 01704 ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); 01705 ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP"); 01706 ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA"); 01707 ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");