Imported Debian version 2.5.0~trusty1.1
[deb_ffmpeg.git] / ffmpeg / libavcodec / atrac3plusdec.c
CommitLineData
2ba45a60
DM
1/*
2 * ATRAC3+ compatible decoder
3 *
4 * Copyright (c) 2010-2013 Maxim Poliakovski
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23/**
24 * @file
25 * Sony ATRAC3+ compatible decoder.
26 *
27 * Container formats used to store its data:
28 * RIFF WAV (.at3) and Sony OpenMG (.oma, .aa3).
29 *
30 * Technical description of this codec can be found here:
31 * http://wiki.multimedia.cx/index.php?title=ATRAC3plus
32 *
33 * Kudos to Benjamin Larsson and Michael Karcher
34 * for their precious technical help!
35 */
36
37#include <stdint.h>
38#include <string.h>
39
40#include "libavutil/channel_layout.h"
41#include "libavutil/float_dsp.h"
42#include "avcodec.h"
43#include "get_bits.h"
44#include "internal.h"
45#include "atrac.h"
46#include "atrac3plus.h"
47
48typedef struct ATRAC3PContext {
49 GetBitContext gb;
f6fa7814 50 AVFloatDSPContext *fdsp;
2ba45a60
DM
51
52 DECLARE_ALIGNED(32, float, samples)[2][ATRAC3P_FRAME_SAMPLES]; ///< quantized MDCT spectrum
53 DECLARE_ALIGNED(32, float, mdct_buf)[2][ATRAC3P_FRAME_SAMPLES]; ///< output of the IMDCT
54 DECLARE_ALIGNED(32, float, time_buf)[2][ATRAC3P_FRAME_SAMPLES]; ///< output of the gain compensation
55 DECLARE_ALIGNED(32, float, outp_buf)[2][ATRAC3P_FRAME_SAMPLES];
56
57 AtracGCContext gainc_ctx; ///< gain compensation context
58 FFTContext mdct_ctx;
59 FFTContext ipqf_dct_ctx; ///< IDCT context used by IPQF
60
61 Atrac3pChanUnitCtx *ch_units; ///< global channel units
62
63 int num_channel_blocks; ///< number of channel blocks
64 uint8_t channel_blocks[5]; ///< channel configuration descriptor
65 uint64_t my_channel_layout; ///< current channel layout
66} ATRAC3PContext;
67
68static av_cold int atrac3p_decode_close(AVCodecContext *avctx)
69{
f6fa7814
DM
70 ATRAC3PContext *ctx = avctx->priv_data;
71
72 av_freep(&ctx->ch_units);
73 av_freep(&ctx->fdsp);
2ba45a60
DM
74
75 return 0;
76}
77
78static av_cold int set_channel_params(ATRAC3PContext *ctx,
79 AVCodecContext *avctx)
80{
81 memset(ctx->channel_blocks, 0, sizeof(ctx->channel_blocks));
82
83 switch (avctx->channels) {
84 case 1:
85 if (avctx->channel_layout != AV_CH_FRONT_LEFT)
86 avctx->channel_layout = AV_CH_LAYOUT_MONO;
87
88 ctx->num_channel_blocks = 1;
89 ctx->channel_blocks[0] = CH_UNIT_MONO;
90 break;
91 case 2:
92 avctx->channel_layout = AV_CH_LAYOUT_STEREO;
93 ctx->num_channel_blocks = 1;
94 ctx->channel_blocks[0] = CH_UNIT_STEREO;
95 break;
96 case 3:
97 avctx->channel_layout = AV_CH_LAYOUT_SURROUND;
98 ctx->num_channel_blocks = 2;
99 ctx->channel_blocks[0] = CH_UNIT_STEREO;
100 ctx->channel_blocks[1] = CH_UNIT_MONO;
101 break;
102 case 4:
103 avctx->channel_layout = AV_CH_LAYOUT_4POINT0;
104 ctx->num_channel_blocks = 3;
105 ctx->channel_blocks[0] = CH_UNIT_STEREO;
106 ctx->channel_blocks[1] = CH_UNIT_MONO;
107 ctx->channel_blocks[2] = CH_UNIT_MONO;
108 break;
109 case 6:
110 avctx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK;
111 ctx->num_channel_blocks = 4;
112 ctx->channel_blocks[0] = CH_UNIT_STEREO;
113 ctx->channel_blocks[1] = CH_UNIT_MONO;
114 ctx->channel_blocks[2] = CH_UNIT_STEREO;
115 ctx->channel_blocks[3] = CH_UNIT_MONO;
116 break;
117 case 7:
118 avctx->channel_layout = AV_CH_LAYOUT_6POINT1_BACK;
119 ctx->num_channel_blocks = 5;
120 ctx->channel_blocks[0] = CH_UNIT_STEREO;
121 ctx->channel_blocks[1] = CH_UNIT_MONO;
122 ctx->channel_blocks[2] = CH_UNIT_STEREO;
123 ctx->channel_blocks[3] = CH_UNIT_MONO;
124 ctx->channel_blocks[4] = CH_UNIT_MONO;
125 break;
126 case 8:
127 avctx->channel_layout = AV_CH_LAYOUT_7POINT1;
128 ctx->num_channel_blocks = 5;
129 ctx->channel_blocks[0] = CH_UNIT_STEREO;
130 ctx->channel_blocks[1] = CH_UNIT_MONO;
131 ctx->channel_blocks[2] = CH_UNIT_STEREO;
132 ctx->channel_blocks[3] = CH_UNIT_STEREO;
133 ctx->channel_blocks[4] = CH_UNIT_MONO;
134 break;
135 default:
136 av_log(avctx, AV_LOG_ERROR,
137 "Unsupported channel count: %d!\n", avctx->channels);
138 return AVERROR_INVALIDDATA;
139 }
140
141 return 0;
142}
143
144static av_cold int atrac3p_decode_init(AVCodecContext *avctx)
145{
146 ATRAC3PContext *ctx = avctx->priv_data;
147 int i, ch, ret;
148
149 if (!avctx->block_align) {
150 av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
151 return AVERROR(EINVAL);
152 }
153
154 ff_atrac3p_init_vlcs();
155
2ba45a60
DM
156 /* initialize IPQF */
157 ff_mdct_init(&ctx->ipqf_dct_ctx, 5, 1, 32.0 / 32768.0);
158
159 ff_atrac3p_init_imdct(avctx, &ctx->mdct_ctx);
160
161 ff_atrac_init_gain_compensation(&ctx->gainc_ctx, 6, 2);
162
163 ff_atrac3p_init_wave_synth();
164
165 if ((ret = set_channel_params(ctx, avctx)) < 0)
166 return ret;
167
168 ctx->my_channel_layout = avctx->channel_layout;
169
170 ctx->ch_units = av_mallocz_array(ctx->num_channel_blocks, sizeof(*ctx->ch_units));
f6fa7814 171 ctx->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
2ba45a60 172
f6fa7814 173 if (!ctx->ch_units || !ctx->fdsp) {
2ba45a60
DM
174 atrac3p_decode_close(avctx);
175 return AVERROR(ENOMEM);
176 }
177
178 for (i = 0; i < ctx->num_channel_blocks; i++) {
179 for (ch = 0; ch < 2; ch++) {
180 ctx->ch_units[i].channels[ch].ch_num = ch;
181 ctx->ch_units[i].channels[ch].wnd_shape = &ctx->ch_units[i].channels[ch].wnd_shape_hist[0][0];
182 ctx->ch_units[i].channels[ch].wnd_shape_prev = &ctx->ch_units[i].channels[ch].wnd_shape_hist[1][0];
183 ctx->ch_units[i].channels[ch].gain_data = &ctx->ch_units[i].channels[ch].gain_data_hist[0][0];
184 ctx->ch_units[i].channels[ch].gain_data_prev = &ctx->ch_units[i].channels[ch].gain_data_hist[1][0];
185 ctx->ch_units[i].channels[ch].tones_info = &ctx->ch_units[i].channels[ch].tones_info_hist[0][0];
186 ctx->ch_units[i].channels[ch].tones_info_prev = &ctx->ch_units[i].channels[ch].tones_info_hist[1][0];
187 }
188
189 ctx->ch_units[i].waves_info = &ctx->ch_units[i].wave_synth_hist[0];
190 ctx->ch_units[i].waves_info_prev = &ctx->ch_units[i].wave_synth_hist[1];
191 }
192
193 avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
194
195 return 0;
196}
197
198static void decode_residual_spectrum(Atrac3pChanUnitCtx *ctx,
199 float out[2][ATRAC3P_FRAME_SAMPLES],
200 int num_channels,
201 AVCodecContext *avctx)
202{
203 int i, sb, ch, qu, nspeclines, RNG_index;
204 float *dst, q;
205 int16_t *src;
206 /* calculate RNG table index for each subband */
207 int sb_RNG_index[ATRAC3P_SUBBANDS] = { 0 };
208
209 if (ctx->mute_flag) {
210 for (ch = 0; ch < num_channels; ch++)
211 memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
212 return;
213 }
214
215 for (qu = 0, RNG_index = 0; qu < ctx->used_quant_units; qu++)
216 RNG_index += ctx->channels[0].qu_sf_idx[qu] +
217 ctx->channels[1].qu_sf_idx[qu];
218
219 for (sb = 0; sb < ctx->num_coded_subbands; sb++, RNG_index += 128)
220 sb_RNG_index[sb] = RNG_index & 0x3FC;
221
222 /* inverse quant and power compensation */
223 for (ch = 0; ch < num_channels; ch++) {
224 /* clear channel's residual spectrum */
225 memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
226
227 for (qu = 0; qu < ctx->used_quant_units; qu++) {
228 src = &ctx->channels[ch].spectrum[ff_atrac3p_qu_to_spec_pos[qu]];
229 dst = &out[ch][ff_atrac3p_qu_to_spec_pos[qu]];
230 nspeclines = ff_atrac3p_qu_to_spec_pos[qu + 1] -
231 ff_atrac3p_qu_to_spec_pos[qu];
232
233 if (ctx->channels[ch].qu_wordlen[qu] > 0) {
234 q = ff_atrac3p_sf_tab[ctx->channels[ch].qu_sf_idx[qu]] *
235 ff_atrac3p_mant_tab[ctx->channels[ch].qu_wordlen[qu]];
236 for (i = 0; i < nspeclines; i++)
237 dst[i] = src[i] * q;
238 }
239 }
240
241 for (sb = 0; sb < ctx->num_coded_subbands; sb++)
242 ff_atrac3p_power_compensation(ctx, ch, &out[ch][0],
243 sb_RNG_index[sb], sb);
244 }
245
246 if (ctx->unit_type == CH_UNIT_STEREO) {
247 for (sb = 0; sb < ctx->num_coded_subbands; sb++) {
248 if (ctx->swap_channels[sb]) {
249 for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)
250 FFSWAP(float, out[0][sb * ATRAC3P_SUBBAND_SAMPLES + i],
251 out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);
252 }
253
254 /* flip coefficients' sign if requested */
255 if (ctx->negate_coeffs[sb])
256 for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)
257 out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i] = -(out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);
258 }
259 }
260}
261
262static void reconstruct_frame(ATRAC3PContext *ctx, Atrac3pChanUnitCtx *ch_unit,
263 int num_channels, AVCodecContext *avctx)
264{
265 int ch, sb;
266
267 for (ch = 0; ch < num_channels; ch++) {
268 for (sb = 0; sb < ch_unit->num_subbands; sb++) {
269 /* inverse transform and windowing */
f6fa7814 270 ff_atrac3p_imdct(ctx->fdsp, &ctx->mdct_ctx,
2ba45a60
DM
271 &ctx->samples[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
272 &ctx->mdct_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
273 (ch_unit->channels[ch].wnd_shape_prev[sb] << 1) +
274 ch_unit->channels[ch].wnd_shape[sb], sb);
275
276 /* gain compensation and overlapping */
277 ff_atrac_gain_compensation(&ctx->gainc_ctx,
278 &ctx->mdct_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
279 &ch_unit->prev_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
280 &ch_unit->channels[ch].gain_data_prev[sb],
281 &ch_unit->channels[ch].gain_data[sb],
282 ATRAC3P_SUBBAND_SAMPLES,
283 &ctx->time_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES]);
284 }
285
286 /* zero unused subbands in both output and overlapping buffers */
287 memset(&ch_unit->prev_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES],
288 0,
289 (ATRAC3P_SUBBANDS - ch_unit->num_subbands) *
290 ATRAC3P_SUBBAND_SAMPLES *
291 sizeof(ch_unit->prev_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES]));
292 memset(&ctx->time_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES],
293 0,
294 (ATRAC3P_SUBBANDS - ch_unit->num_subbands) *
295 ATRAC3P_SUBBAND_SAMPLES *
296 sizeof(ctx->time_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES]));
297
298 /* resynthesize and add tonal signal */
299 if (ch_unit->waves_info->tones_present ||
300 ch_unit->waves_info_prev->tones_present) {
301 for (sb = 0; sb < ch_unit->num_subbands; sb++)
302 if (ch_unit->channels[ch].tones_info[sb].num_wavs ||
303 ch_unit->channels[ch].tones_info_prev[sb].num_wavs) {
f6fa7814 304 ff_atrac3p_generate_tones(ch_unit, ctx->fdsp, ch, sb,
2ba45a60
DM
305 &ctx->time_buf[ch][sb * 128]);
306 }
307 }
308
309 /* subband synthesis and acoustic signal output */
310 ff_atrac3p_ipqf(&ctx->ipqf_dct_ctx, &ch_unit->ipqf_ctx[ch],
311 &ctx->time_buf[ch][0], &ctx->outp_buf[ch][0]);
312 }
313
314 /* swap window shape and gain control buffers. */
315 for (ch = 0; ch < num_channels; ch++) {
316 FFSWAP(uint8_t *, ch_unit->channels[ch].wnd_shape,
317 ch_unit->channels[ch].wnd_shape_prev);
318 FFSWAP(AtracGainInfo *, ch_unit->channels[ch].gain_data,
319 ch_unit->channels[ch].gain_data_prev);
320 FFSWAP(Atrac3pWavesData *, ch_unit->channels[ch].tones_info,
321 ch_unit->channels[ch].tones_info_prev);
322 }
323
324 FFSWAP(Atrac3pWaveSynthParams *, ch_unit->waves_info, ch_unit->waves_info_prev);
325}
326
327static int atrac3p_decode_frame(AVCodecContext *avctx, void *data,
328 int *got_frame_ptr, AVPacket *avpkt)
329{
330 ATRAC3PContext *ctx = avctx->priv_data;
331 AVFrame *frame = data;
332 int i, ret, ch_unit_id, ch_block = 0, out_ch_index = 0, channels_to_process;
333 float **samples_p = (float **)frame->extended_data;
334
335 frame->nb_samples = ATRAC3P_FRAME_SAMPLES;
336 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
337 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
338 return ret;
339 }
340
341 if ((ret = init_get_bits8(&ctx->gb, avpkt->data, avpkt->size)) < 0)
342 return ret;
343
344 if (get_bits1(&ctx->gb)) {
345 av_log(avctx, AV_LOG_ERROR, "Invalid start bit!\n");
346 return AVERROR_INVALIDDATA;
347 }
348
349 while (get_bits_left(&ctx->gb) >= 2 &&
350 (ch_unit_id = get_bits(&ctx->gb, 2)) != CH_UNIT_TERMINATOR) {
351 if (ch_unit_id == CH_UNIT_EXTENSION) {
352 avpriv_report_missing_feature(avctx, "Channel unit extension");
353 return AVERROR_PATCHWELCOME;
354 }
355 if (ch_block >= ctx->num_channel_blocks ||
356 ctx->channel_blocks[ch_block] != ch_unit_id) {
357 av_log(avctx, AV_LOG_ERROR,
358 "Frame data doesn't match channel configuration!\n");
359 return AVERROR_INVALIDDATA;
360 }
361
362 ctx->ch_units[ch_block].unit_type = ch_unit_id;
363 channels_to_process = ch_unit_id + 1;
364
365 if ((ret = ff_atrac3p_decode_channel_unit(&ctx->gb,
366 &ctx->ch_units[ch_block],
367 channels_to_process,
368 avctx)) < 0)
369 return ret;
370
371 decode_residual_spectrum(&ctx->ch_units[ch_block], ctx->samples,
372 channels_to_process, avctx);
373 reconstruct_frame(ctx, &ctx->ch_units[ch_block],
374 channels_to_process, avctx);
375
376 for (i = 0; i < channels_to_process; i++)
377 memcpy(samples_p[out_ch_index + i], ctx->outp_buf[i],
378 ATRAC3P_FRAME_SAMPLES * sizeof(**samples_p));
379
380 ch_block++;
381 out_ch_index += channels_to_process;
382 }
383
384 *got_frame_ptr = 1;
385
386 return avctx->block_align;
387}
388
389AVCodec ff_atrac3p_decoder = {
390 .name = "atrac3plus",
391 .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"),
392 .type = AVMEDIA_TYPE_AUDIO,
393 .id = AV_CODEC_ID_ATRAC3P,
394 .priv_data_size = sizeof(ATRAC3PContext),
395 .init = atrac3p_decode_init,
396 .close = atrac3p_decode_close,
397 .decode = atrac3p_decode_frame,
398};