47 #define FRAGMENT_PIXELS 8
56 #define SB_NOT_CODED 0
57 #define SB_PARTIALLY_CODED 1
58 #define SB_FULLY_CODED 2
63 #define MAXIMUM_LONG_BIT_RUN 4129
65 #define MODE_INTER_NO_MV 0
67 #define MODE_INTER_PLUS_MV 2
68 #define MODE_INTER_LAST_MV 3
69 #define MODE_INTER_PRIOR_LAST 4
70 #define MODE_USING_GOLDEN 5
71 #define MODE_GOLDEN_MV 6
72 #define MODE_INTER_FOURMV 7
73 #define CODING_MODE_COUNT 8
120 {0,0}, {1,0}, {1,1}, {0,1},
121 {0,2}, {0,3}, {1,3}, {1,2},
122 {2,2}, {2,3}, {3,3}, {3,2},
123 {3,1}, {2,1}, {2,0}, {3,0}
126 #define MIN_DEQUANT_VAL 2
203 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
204 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1)
205 #define TOKEN_COEFF(coeff) (((coeff) << 2) + 2)
297 for (i = 0; i < 16; i++) {
325 int sb_x, sb_y, plane;
328 for (plane = 0; plane < 3; plane++) {
334 for (sb_y = 0; sb_y < sb_height; sb_y++)
335 for (sb_x = 0; sb_x < sb_width; sb_x++)
336 for (i = 0; i < 16; i++) {
338 y = 4*sb_y + hilbert_offset[i][1];
340 if (x < frag_width && y < frag_height)
358 int i, plane, inter, qri, bmi, bmj, qistart;
360 for(inter=0; inter<2; inter++){
361 for(plane=0; plane<3; plane++){
363 for(qri=0; qri<s->
qr_count[inter][plane]; qri++){
364 sum+= s->
qr_size[inter][plane][qri];
365 if(s->
qps[qpi] <= sum)
368 qistart= sum - s->
qr_size[inter][plane][qri];
369 bmi= s->
qr_base[inter][plane][qri ];
370 bmj= s->
qr_base[inter][plane][qri+1];
374 + s->
qr_size[inter][plane][qri])
375 / (2*s->
qr_size[inter][plane][qri]);
377 int qmin= 8<<(inter + !i);
378 int qscale= i ? ac_scale_factor : dc_scale_factor;
383 s->
qmat[qpi][inter][plane][0] = s->
qmat[0][inter][plane][0];
402 assert(filter_limit < 128);
406 for (x = 0; x < filter_limit; x++) {
407 bounding_values[-x] = -x;
408 bounding_values[x] = x;
410 for (x = value = filter_limit; x < 128 && value; x++, value--) {
411 bounding_values[ x] = value;
412 bounding_values[-x] = -value;
415 bounding_values[128] = value;
416 bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202;
427 int current_superblock = 0;
429 int num_partial_superblocks = 0;
432 int current_fragment;
444 while (current_superblock < s->superblock_count &&
get_bits_left(gb) > 0) {
452 if (current_run == 34)
462 current_superblock += current_run;
464 num_partial_superblocks += current_run;
469 if (num_partial_superblocks < s->superblock_count) {
470 int superblocks_decoded = 0;
472 current_superblock = 0;
476 while (superblocks_decoded < s->superblock_count - num_partial_superblocks
486 if (current_run == 34)
489 for (j = 0; j < current_run; current_superblock++) {
501 superblocks_decoded += current_run;
507 if (num_partial_superblocks) {
522 for (plane = 0; plane < 3; plane++) {
523 int sb_start = superblock_starts[plane];
525 int num_coded_frags = 0;
527 for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
530 for (j = 0; j < 16; j++) {
534 if (current_fragment != -1) {
541 if (current_run-- == 0) {
565 for (i = 0; i < 64; i++)
579 int i, j, k, sb_x, sb_y;
581 int current_macroblock;
582 int current_fragment;
599 for (i = 0; i < 8; i++)
601 for (i = 0; i < 8; i++)
602 custom_mode_alphabet[
get_bits(gb, 3)] = i;
603 alphabet = custom_mode_alphabet;
614 for (j = 0; j < 4; j++) {
615 int mb_x = 2*sb_x + (j>>1);
616 int mb_y = 2*sb_y + (((j>>1)+j)&1);
622 #define BLOCK_X (2*mb_x + (k&1))
623 #define BLOCK_Y (2*mb_y + (k>>1))
626 for (k = 0; k < 4; k++) {
640 coding_mode = alphabet
644 for (k = 0; k < 4; k++) {
650 #define SET_CHROMA_MODES \
651 if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
652 frag[s->fragment_start[1]].coding_method = coding_mode;\
653 if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
654 frag[s->fragment_start[2]].coding_method = coding_mode;
661 for (k = 0; k < 2; k++) {
666 for (k = 0; k < 4; k++) {
685 int j, k, sb_x, sb_y;
689 int last_motion_x = 0;
690 int last_motion_y = 0;
691 int prior_last_motion_x = 0;
692 int prior_last_motion_y = 0;
693 int current_macroblock;
694 int current_fragment;
710 for (j = 0; j < 4; j++) {
711 int mb_x = 2*sb_x + (j>>1);
712 int mb_y = 2*sb_y + (((j>>1)+j)&1);
724 if (coding_mode == 0) {
735 prior_last_motion_x = last_motion_x;
736 prior_last_motion_y = last_motion_y;
737 last_motion_x = motion_x[0];
738 last_motion_y = motion_y[0];
744 prior_last_motion_x = last_motion_x;
745 prior_last_motion_y = last_motion_y;
749 for (k = 0; k < 4; k++) {
752 if (coding_mode == 0) {
759 last_motion_x = motion_x[k];
760 last_motion_y = motion_y[k];
770 motion_x[0] = last_motion_x;
771 motion_y[0] = last_motion_y;
780 motion_x[0] = prior_last_motion_x;
781 motion_y[0] = prior_last_motion_y;
784 prior_last_motion_x = last_motion_x;
785 prior_last_motion_y = last_motion_y;
786 last_motion_x = motion_x[0];
787 last_motion_y = motion_y[0];
800 for (k = 0; k < 4; k++) {
804 s->
motion_val[0][current_fragment][0] = motion_x[k];
805 s->
motion_val[0][current_fragment][1] = motion_y[k];
807 s->
motion_val[0][current_fragment][0] = motion_x[0];
808 s->
motion_val[0][current_fragment][1] = motion_y[0];
814 motion_x[0] =
RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2);
815 motion_y[0] =
RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2);
817 motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
818 motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1);
824 motion_x[0] =
RSHIFT(motion_x[0] + motion_x[1], 1);
825 motion_y[0] =
RSHIFT(motion_y[0] + motion_y[1], 1);
826 motion_x[1] =
RSHIFT(motion_x[2] + motion_x[3], 1);
827 motion_y[1] =
RSHIFT(motion_y[2] + motion_y[3], 1);
829 motion_x[1] = motion_x[0];
830 motion_y[1] = motion_y[0];
832 motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
833 motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1);
836 for (k = 0; k < 2; k++) {
842 for (k = 0; k < 4; k++) {
862 int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
865 for (qpi = 0; qpi < s->
nqps-1 && num_blocks > 0; qpi++) {
866 i = blocks_decoded = num_blocks_at_qpi = 0;
878 if (run_length == 34)
880 blocks_decoded += run_length;
883 num_blocks_at_qpi += run_length;
885 for (j = 0; j < run_length; i++) {
894 }
while (blocks_decoded < num_blocks &&
get_bits_left(gb) > 0);
896 num_blocks -= num_blocks_at_qpi;
915 VLC *table,
int coeff_index,
927 int16_t *dct_tokens = s->
dct_tokens[plane][coeff_index];
937 if (eob_run > num_coeffs) {
938 coeff_i = blocks_ended = num_coeffs;
939 eob_run -= num_coeffs;
941 coeff_i = blocks_ended = eob_run;
947 dct_tokens[j++] = blocks_ended << 2;
951 token =
get_vlc2(gb, vlc_table, 11, 3);
953 if ((
unsigned) token <= 6
U) {
960 if (eob_run > num_coeffs - coeff_i) {
961 dct_tokens[j++] =
TOKEN_EOB(num_coeffs - coeff_i);
962 blocks_ended += num_coeffs - coeff_i;
963 eob_run -= num_coeffs - coeff_i;
964 coeff_i = num_coeffs;
967 blocks_ended += eob_run;
971 }
else if (token >= 0) {
974 bits_to_get =
get_bits(gb, bits_to_get);
989 all_fragments[coded_fragment_list[coeff_i]].
dc = coeff;
994 if (coeff_index + zero_run > 64) {
996 " %d coeffs left\n", zero_run, 64-coeff_index);
997 zero_run = 64 - coeff_index;
1002 for (i = coeff_index+1; i <= coeff_index+zero_run; i++)
1007 "Invalid token %d\n", token);
1018 for (i = coeff_index+1; i < 64; i++)
1023 s->
dct_tokens[plane+1][coeff_index] = dct_tokens + j;
1024 else if (coeff_index < 63)
1025 s->
dct_tokens[0][coeff_index+1] = dct_tokens + j;
1033 int fragment_height);
1045 int residual_eob_run = 0;
1057 0, residual_eob_run);
1058 if (residual_eob_run < 0)
1059 return residual_eob_run;
1066 1, residual_eob_run);
1067 if (residual_eob_run < 0)
1068 return residual_eob_run;
1070 2, residual_eob_run);
1071 if (residual_eob_run < 0)
1072 return residual_eob_run;
1088 for (i = 1; i <= 5; i++) {
1089 y_tables[i] = &s->
ac_vlc_1[ac_y_table];
1090 c_tables[i] = &s->
ac_vlc_1[ac_c_table];
1092 for (i = 6; i <= 14; i++) {
1093 y_tables[i] = &s->
ac_vlc_2[ac_y_table];
1094 c_tables[i] = &s->
ac_vlc_2[ac_c_table];
1096 for (i = 15; i <= 27; i++) {
1097 y_tables[i] = &s->
ac_vlc_3[ac_y_table];
1098 c_tables[i] = &s->
ac_vlc_3[ac_c_table];
1100 for (i = 28; i <= 63; i++) {
1101 y_tables[i] = &s->
ac_vlc_4[ac_y_table];
1102 c_tables[i] = &s->
ac_vlc_4[ac_c_table];
1106 for (i = 1; i <= 63; i++) {
1107 residual_eob_run =
unpack_vlcs(s, gb, y_tables[i], i,
1108 0, residual_eob_run);
1109 if (residual_eob_run < 0)
1110 return residual_eob_run;
1112 residual_eob_run =
unpack_vlcs(s, gb, c_tables[i], i,
1113 1, residual_eob_run);
1114 if (residual_eob_run < 0)
1115 return residual_eob_run;
1116 residual_eob_run =
unpack_vlcs(s, gb, c_tables[i], i,
1117 2, residual_eob_run);
1118 if (residual_eob_run < 0)
1119 return residual_eob_run;
1130 #define COMPATIBLE_FRAME(x) \
1131 (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1132 #define DC_COEFF(u) s->all_fragments[u].dc
1137 int fragment_height)
1146 int i = first_fragment;
1151 int vl, vul, vu, vur;
1163 static const int predictor_transform[16][4] = {
1188 static const unsigned char compatible_frame[9] = {
1199 int current_frame_type;
1206 vul = vu = vur = vl = 0;
1207 last_dc[0] = last_dc[1] = last_dc[2] = 0;
1210 for (y = 0; y < fragment_height; y++) {
1213 for (x = 0; x < fragment_width; x++, i++) {
1218 current_frame_type =
1229 u= i-fragment_width;
1234 ul= i-fragment_width-1;
1239 if(x + 1 < fragment_width){
1240 ur= i-fragment_width+1;
1247 if (transform == 0) {
1251 predicted_dc = last_dc[current_frame_type];
1256 (predictor_transform[transform][0] * vul) +
1257 (predictor_transform[transform][1] * vu) +
1258 (predictor_transform[transform][2] * vur) +
1259 (predictor_transform[transform][3] * vl);
1261 predicted_dc /= 128;
1265 if ((transform == 15) || (transform == 13)) {
1266 if (
FFABS(predicted_dc - vu) > 128)
1268 else if (
FFABS(predicted_dc - vl) > 128)
1270 else if (
FFABS(predicted_dc - vul) > 128)
1278 last_dc[current_frame_type] =
DC_COEFF(i);
1297 for (y = ystart; y < yend; y++) {
1299 for (x = 0; x <
width; x++) {
1310 stride, bounding_values);
1317 stride, bounding_values);
1323 if ((x < width - 1) &&
1326 plane_data + 8*x + 8,
1327 stride, bounding_values);
1333 if ((y < height - 1) &&
1336 plane_data + 8*x + 8*stride,
1337 stride, bounding_values);
1354 int16_t *dequantizer = s->
qmat[frag->
qpi][inter][plane];
1360 switch (token & 3) {
1369 i += (token >> 2) & 0x7f;
1374 block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1378 block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1389 block[0] = frag->
dc * s->
qmat[0][inter][plane][0];
1440 int border = motion_y&1;
1448 ref_row = y + (motion_y>>1);
1449 ref_row =
FFMAX(
FFABS(ref_row), ref_row + 8 + border);
1460 int x, y, i, j, fragment;
1462 int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
1463 int motion_halfpel_index;
1465 int plane, first_pixel;
1470 for (plane = 0; plane < 3; plane++) {
1477 int8_t (*motion_val)[2] = s->
motion_val[!!plane];
1493 for (; sb_y < slice_height; sb_y++) {
1496 for (sb_x = 0; sb_x < slice_width; sb_x++) {
1499 for (j = 0; j < 16; j++) {
1501 y = 4*sb_y + hilbert_offset[j][1];
1502 fragment = y*fragment_width + x;
1504 i = fragment_start + fragment;
1507 if (x >= fragment_width || y >= fragment_height)
1510 first_pixel = 8*y*stride + 8*x;
1519 motion_source= golden_plane;
1521 motion_source= last_plane;
1523 motion_source += first_pixel;
1524 motion_halfpel_index = 0;
1531 motion_x = motion_val[fragment][0];
1532 motion_y = motion_val[fragment][1];
1534 src_x= (motion_x>>1) + 8*x;
1535 src_y= (motion_y>>1) + 8*y;
1537 motion_halfpel_index = motion_x & 0x01;
1538 motion_source += (motion_x >> 1);
1540 motion_halfpel_index |= (motion_y & 0x01) << 1;
1541 motion_source += ((motion_y >> 1) * stride);
1543 if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
1545 if(stride<0) temp -= 8*
stride;
1547 s->
vdsp.
emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
1548 motion_source= temp;
1560 if(motion_halfpel_index != 3){
1562 output_plane + first_pixel,
1563 motion_source,
stride, 8);
1565 int d= (motion_x ^ motion_y)>>31;
1567 output_plane + first_pixel,
1569 motion_source + stride + 1 + d,
1584 output_plane + first_pixel,
1593 output_plane + first_pixel,
1604 output_plane + first_pixel,
1605 last_plane + first_pixel,
1633 int y_fragment_count, c_fragment_count;
1664 int i, inter, plane;
1667 int y_fragment_count, c_fragment_count;
1689 for (i = 0; i < 3; i++)
1728 for (i = 0; i < 64; i++) {
1737 for(inter=0; inter<2; inter++){
1738 for(plane=0; plane<3; plane++){
1740 s->
qr_size [inter][plane][0]= 63;
1742 s->
qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter;
1747 for (i = 0; i < 16; i++) {
1776 for (i = 0; i < 16; i++) {
1825 for (i = 0; i < 3; i++) {
1864 int qps_changed = 0, i, err;
1866 #define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
1868 if (!
s1->current_frame.data[0]
1879 int y_fragment_count, c_fragment_count;
1894 for (i = 0; i < 3; i++) {
1895 if (s->
qps[i] !=
s1->qps[1]) {
1897 memcpy(&s->
qmat[i], &
s1->qmat[i],
sizeof(s->
qmat[i]));
1901 if (s->
qps[0] !=
s1->qps[0])
1915 void *
data,
int *got_frame,
1919 int buf_size = avpkt->
size;
1935 for (i = 0; i < 3; i++)
1941 }
while(s->theora >= 0x030200 && s->nqps<3 &&
get_bits1(&gb));
1942 for (i = s->nqps; i < 3; i++)
1947 s->keyframe?
"key":
"", avctx->frame_number+1, s->qps[0]);
1949 s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
1952 if (s->qps[0] != s->last_qps[0])
1955 for (i = 0; i < s->nqps; i++)
1958 if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
1964 s->current_frame.reference = 3;
1971 if (!s->edge_emu_buffer)
1972 s->edge_emu_buffer =
av_malloc(9*
FFABS(s->current_frame.linesize[0]));
1982 if (avctx->frame_number == 0)
1986 if (s->version || s->theora)
1993 if (!s->golden_frame.data[0]) {
1996 s->golden_frame.reference = 3;
2002 s->last_frame = s->golden_frame;
2008 memset(s->all_fragments, 0, s->fragment_count *
sizeof(
Vp3Fragment));
2032 for (i = 0; i < 3; i++) {
2033 int height = s->height >> (i && s->chroma_y_shift);
2034 if (s->flipped_image)
2035 s->data_offset[i] = 0;
2037 s->data_offset[i] = (height-1) * s->current_frame.linesize[i];
2040 s->last_slice_end = 0;
2041 for (i = 0; i < s->c_superblock_height; i++)
2045 for (i = 0; i < 3; i++) {
2046 int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1;
2063 avctx->release_buffer(avctx, &s->current_frame);
2079 av_dlog(avctx,
"hti %d hbits %x token %d entry : %d size %d\n",
2120 #if CONFIG_THEORA_DECODER
2128 int visible_width, visible_height,
colorspace;
2129 int offset_x = 0, offset_y = 0;
2137 if (s->
theora < 0x030200)
2152 if (s->
theora >= 0x030200) {
2162 if (fps.
num && fps.
den) {
2163 if (fps.
num < 0 || fps.
den < 0) {
2168 fps.
den, fps.
num, 1<<30);
2173 if (aspect.
num && aspect.
den) {
2176 aspect.
num, aspect.
den, 1<<30);
2179 if (s->
theora < 0x030200)
2186 if (s->
theora >= 0x030200)
2195 if ( visible_width <= s->
width && visible_width > s->
width-16
2196 && visible_height <= s->height && visible_height > s->
height-16
2197 && !offset_x && (offset_y == s->
height - visible_height))
2202 if (colorspace == 1) {
2204 }
else if (colorspace == 2) {
2207 if (colorspace == 1 || colorspace == 2) {
2218 int i, n, matrices, inter, plane;
2220 if (s->
theora >= 0x030200) {
2224 for (i = 0; i < 64; i++)
2228 if (s->
theora >= 0x030200)
2233 for (i = 0; i < 64; i++)
2236 if (s->
theora >= 0x030200)
2241 for (i = 0; i < 64; i++)
2244 if (s->
theora >= 0x030200)
2254 for(n=0; n<matrices; n++){
2255 for (i = 0; i < 64; i++)
2259 for (inter = 0; inter <= 1; inter++) {
2260 for (plane = 0; plane <= 2; plane++) {
2262 if (inter || plane > 0)
2270 qtj= (3*inter + plane - 1) / 3;
2271 plj= (plane + 2) % 3;
2286 s->
qr_base[inter][plane][qri]= i;
2290 s->
qr_size[inter][plane][qri++]= i;
2304 for (s->
hti = 0; s->
hti < 80; s->
hti++) {
2340 42, header_start, header_len) < 0) {
2346 if (header_len[i] <= 0)
2352 if (!(ptype & 0x80))
2364 theora_decode_header(avctx, &gb);
2371 if (theora_decode_tables(avctx, &gb))
2380 if (s->
theora < 0x030200)
2392 .
init = theora_decode_init,
static const int16_t vp31_intra_y_dequant[64]
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
discard all frames except keyframes
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
static int init_block_mapping(Vp3DecodeContext *s)
void(* h_loop_filter)(uint8_t *src, int stride, int *bounding_values)
const uint8_t ff_zigzag_direct[64]
This structure describes decoded (raw) audio or video data.
#define TOKEN_EOB(eob_run)
static void render_slice(Vp3DecodeContext *s, int slice)
int bounding_values_array[256+2]
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
uint16_t qr_base[2][3][64]
static void skip_bits_long(GetBitContext *s, int n)
#define AV_NUM_DATA_POINTERS
static const uint16_t fragment_run_length_vlc_table[30][2]
#define MODE_INTER_PLUS_MV
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_cold int vp3_decode_init(AVCodecContext *avctx)
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
static av_cold int vp3_decode_end(AVCodecContext *avctx)
int * superblock_fragments
VLC superblock_run_length_vlc
static const uint32_t vp31_ac_scale_factor[64]
#define MAXIMUM_LONG_BIT_RUN
static const int motion_vector_table[63]
static const uint16_t ac_bias_3[16][32][2]
static const uint16_t dc_bias[16][32][2]
Vp3Fragment * all_fragments
static void init_loop_filter(Vp3DecodeContext *s)
#define COMPATIBLE_FRAME(x)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
#define TOKEN_ZERO_RUN(coeff, zero_run)
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static void vp3_decode_flush(AVCodecContext *avctx)
uint8_t filter_limit_values[64]
static int get_bits_count(const GetBitContext *s)
bitstream reader API header.
uint8_t idct_permutation[64]
idct input permutation.
#define MKTAG(a, b, c, d)
#define LOCAL_ALIGNED_16(t, v,...)
static const uint8_t mode_code_vlc_table[8][2]
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
static int init(AVCodecParserContext *s)
static const int16_t vp31_inter_dequant[64]
static int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, DCTELEM block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
static const uint16_t ac_bias_1[16][32][2]
static int get_bits_left(GetBitContext *gb)
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static const uint8_t motion_vector_vlc_table[63][2]
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, int dst_pitch, int dst_height)
Convert and output the current plane.
Multithreading support functions.
struct Vp3DecodeContext Vp3DecodeContext
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
#define CODING_MODE_COUNT
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static const int zero_run_base[32]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
static const int8_t fixed_motion_vector_table[64]
#define FF_DEBUG_PICT_INFO
void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
void av_log(void *avcl, int level, const char *fmt,...)
const char * name
Name of the codec implementation.
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
void(* clear_block)(DCTELEM *block)
static const int ModeAlphabet[6][CODING_MODE_COUNT]
static const int16_t vp31_intra_c_dequant[64]
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
static const int coeff_get_bits[32]
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
static const int16_t *const coeff_tables[32]
unsigned char * macroblock_coding
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
void(* idct_dc_add)(uint8_t *dest, int line_size, const DCTELEM *block)
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
VLC fragment_run_length_vlc
int width
picture width / height.
int type
type of the buffer (to keep track of who has to deallocate data[*])
#define SB_PARTIALLY_CODED
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run)
#define CODEC_CAP_FRAME_THREADS
uint8_t * edge_emu_buffer
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static const uint16_t ac_bias_2[16][32][2]
static const uint8_t hilbert_offset[16][2]
#define FF_BUFFER_TYPE_COPY
void(* idct_put)(uint8_t *dest, int line_size, DCTELEM *block)
int total_num_coded_frags
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
static void update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
void ff_thread_await_progress(AVFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
static const uint16_t ac_bias_0[16][32][2]
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
main external API structure.
static void close(AVCodecParserContext *s)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
uint8_t qr_size[2][3][64]
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits(GetBitContext *s, int n)
enum AVColorSpace colorspace
YUV colorspace type.
rational number numerator/denominator
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
int avpriv_split_xiph_headers(uint8_t *extradata, int extradata_size, int first_header_size, uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use. ...
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
#define TOKEN_COEFF(coeff)
void ff_init_scantable_permutation(uint8_t *idct_permutation, int idct_permutation_type)
void(* idct_add)(uint8_t *dest, int line_size, DCTELEM *block)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
static const uint16_t superblock_run_length_vlc_table[34][2]
#define MODE_USING_GOLDEN
uint32_t huffman_table[80][32][2]
#define MODE_INTER_FOURMV
#define DECLARE_ALIGNED(n, t, v)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void(* put_no_rnd_pixels_l2[2])(uint8_t *block, const uint8_t *a, const uint8_t *b, int line_size, int h)
#define copy_fields(to, from, start_field, end_field)
int * coded_fragment_list[3]
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
unsigned char * superblock_coding
common internal api header.
void(* v_loop_filter)(uint8_t *src, int stride, int *bounding_values)
int16_t * dct_tokens_base
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
#define AVERROR_INVALIDDATA
struct Vp3Fragment Vp3Fragment
static const int eob_run_get_bits[7]
static int vp3_init_thread_copy(AVCodecContext *avctx)
static const int16_t vp31_dc_scale_factor[64]
uint16_t coded_dc_scale_factor[64]
Core video DSP helper functions.
uint8_t base_matrix[384][64]
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
struct AVCodecInternal * internal
Private context used for internal data.
VLC_TYPE(* table)[2]
code, bits
#define MODE_INTER_PRIOR_LAST
static const int eob_run_base[7]
static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
#define MODE_INTER_LAST_MV
void ff_thread_report_progress(AVFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around get_buffer() for frame-multithreaded codecs.
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
enum AVColorSpace colorspace
static const int zero_run_get_bits[32]
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
static const uint8_t vp31_filter_limit_values[64]
AVPixelFormat
Pixel format.
This structure stores compressed data.
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
void ff_free_vlc(VLC *vlc)
#define CODEC_CAP_DRAW_HORIZ_BAND
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint32_t coded_ac_scale_factor[64]
if(!(ptr_align%ac->ptr_align)&&samples_align >=aligned_len)
int8_t(*[2] motion_val)[2]