00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00028 #include "libavutil/imgutils.h"
00029 #include "avcodec.h"
00030 #include "dsputil.h"
00031 #include "bytestream.h"
00032
00033 #include "indeo3data.h"
00034
00035 typedef struct
00036 {
00037 uint8_t *Ybuf;
00038 uint8_t *Ubuf;
00039 uint8_t *Vbuf;
00040 unsigned short y_w, y_h;
00041 unsigned short uv_w, uv_h;
00042 } YUVBufs;
00043
00044 typedef struct Indeo3DecodeContext {
00045 AVCodecContext *avctx;
00046 int width, height;
00047 AVFrame frame;
00048
00049 uint8_t *buf;
00050 YUVBufs iv_frame[2];
00051 YUVBufs *cur_frame;
00052 YUVBufs *ref_frame;
00053
00054 uint8_t *ModPred;
00055 uint8_t *corrector_type;
00056 } Indeo3DecodeContext;
00057
00058 static const uint8_t corrector_type_0[24] = {
00059 195, 159, 133, 115, 101, 93, 87, 77,
00060 195, 159, 133, 115, 101, 93, 87, 77,
00061 128, 79, 79, 79, 79, 79, 79, 79
00062 };
00063
00064 static const uint8_t corrector_type_2[8] = { 9, 7, 6, 8, 5, 4, 3, 2 };
00065
00066 static av_cold int build_modpred(Indeo3DecodeContext *s)
00067 {
00068 int i, j;
00069
00070 if (!(s->ModPred = av_malloc(8 * 128)))
00071 return AVERROR(ENOMEM);
00072
00073 for (i=0; i < 128; ++i) {
00074 s->ModPred[i+0*128] = i > 126 ? 254 : 2*(i + 1 - ((i + 1) % 2));
00075 s->ModPred[i+1*128] = i == 7 ? 20 :
00076 i == 119 ||
00077 i == 120 ? 236 : 2*(i + 2 - ((i + 1) % 3));
00078 s->ModPred[i+2*128] = i > 125 ? 248 : 2*(i + 2 - ((i + 2) % 4));
00079 s->ModPred[i+3*128] = 2*(i + 1 - ((i - 3) % 5));
00080 s->ModPred[i+4*128] = i == 8 ? 20 : 2*(i + 1 - ((i - 3) % 6));
00081 s->ModPred[i+5*128] = 2*(i + 4 - ((i + 3) % 7));
00082 s->ModPred[i+6*128] = i > 123 ? 240 : 2*(i + 4 - ((i + 4) % 8));
00083 s->ModPred[i+7*128] = 2*(i + 5 - ((i + 4) % 9));
00084 }
00085
00086 if (!(s->corrector_type = av_malloc(24 * 256)))
00087 return AVERROR(ENOMEM);
00088
00089 for (i=0; i < 24; ++i) {
00090 for (j=0; j < 256; ++j) {
00091 s->corrector_type[i*256+j] = j < corrector_type_0[i] ? 1 :
00092 j < 248 || (i == 16 && j == 248) ? 0 :
00093 corrector_type_2[j - 248];
00094 }
00095 }
00096
00097 return 0;
00098 }
00099
00100 static av_cold int iv_alloc_frames(Indeo3DecodeContext *s)
00101 {
00102 int luma_width = (s->width + 3) & ~3,
00103 luma_height = (s->height + 3) & ~3,
00104 chroma_width = ((luma_width >> 2) + 3) & ~3,
00105 chroma_height = ((luma_height >> 2) + 3) & ~3,
00106 luma_pixels = luma_width * luma_height,
00107 chroma_pixels = chroma_width * chroma_height,
00108 i;
00109 unsigned int bufsize = luma_pixels * 2 + luma_width * 3 +
00110 (chroma_pixels + chroma_width) * 4;
00111
00112 av_freep(&s->buf);
00113 if(!(s->buf = av_malloc(bufsize)))
00114 return AVERROR(ENOMEM);
00115 s->iv_frame[0].y_w = s->iv_frame[1].y_w = luma_width;
00116 s->iv_frame[0].y_h = s->iv_frame[1].y_h = luma_height;
00117 s->iv_frame[0].uv_w = s->iv_frame[1].uv_w = chroma_width;
00118 s->iv_frame[0].uv_h = s->iv_frame[1].uv_h = chroma_height;
00119
00120 s->iv_frame[0].Ybuf = s->buf + luma_width;
00121 i = luma_pixels + luma_width * 2;
00122 s->iv_frame[1].Ybuf = s->buf + i;
00123 i += (luma_pixels + luma_width);
00124 s->iv_frame[0].Ubuf = s->buf + i;
00125 i += (chroma_pixels + chroma_width);
00126 s->iv_frame[1].Ubuf = s->buf + i;
00127 i += (chroma_pixels + chroma_width);
00128 s->iv_frame[0].Vbuf = s->buf + i;
00129 i += (chroma_pixels + chroma_width);
00130 s->iv_frame[1].Vbuf = s->buf + i;
00131
00132 for(i = 1; i <= luma_width; i++)
00133 s->iv_frame[0].Ybuf[-i] = s->iv_frame[1].Ybuf[-i] =
00134 s->iv_frame[0].Ubuf[-i] = 0x80;
00135
00136 for(i = 1; i <= chroma_width; i++) {
00137 s->iv_frame[1].Ubuf[-i] = 0x80;
00138 s->iv_frame[0].Vbuf[-i] = 0x80;
00139 s->iv_frame[1].Vbuf[-i] = 0x80;
00140 s->iv_frame[1].Vbuf[chroma_pixels+i-1] = 0x80;
00141 }
00142
00143 return 0;
00144 }
00145
00146 static av_cold void iv_free_func(Indeo3DecodeContext *s)
00147 {
00148 av_freep(&s->buf);
00149 av_freep(&s->ModPred);
00150 av_freep(&s->corrector_type);
00151 }
00152
00153 struct ustr {
00154 int xpos;
00155 int ypos;
00156 int width;
00157 int height;
00158 int split_flag;
00159 int split_direction;
00160 int usl7;
00161 };
00162
00163
00164 #define LV1_CHECK(buf1,rle_v3,lv1,lp2) \
00165 if((lv1 & 0x80) != 0) { \
00166 if(rle_v3 != 0) \
00167 rle_v3 = 0; \
00168 else { \
00169 rle_v3 = 1; \
00170 buf1 -= 2; \
00171 } \
00172 } \
00173 lp2 = 4;
00174
00175
00176 #define RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) \
00177 if(rle_v3 == 0) { \
00178 rle_v2 = *buf1; \
00179 rle_v1 = 1; \
00180 if(rle_v2 > 32) { \
00181 rle_v2 -= 32; \
00182 rle_v1 = 0; \
00183 } \
00184 rle_v3 = 1; \
00185 } \
00186 buf1--;
00187
00188
00189 #define LP2_CHECK(buf1,rle_v3,lp2) \
00190 if(lp2 == 0 && rle_v3 != 0) \
00191 rle_v3 = 0; \
00192 else { \
00193 buf1--; \
00194 rle_v3 = 1; \
00195 }
00196
00197
00198 #define RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) \
00199 rle_v2--; \
00200 if(rle_v2 == 0) { \
00201 rle_v3 = 0; \
00202 buf1 += 2; \
00203 } \
00204 lp2 = 4;
00205
00206 static void iv_Decode_Chunk(Indeo3DecodeContext *s,
00207 uint8_t *cur, uint8_t *ref, int width, int height,
00208 const uint8_t *buf1, int cb_offset, const uint8_t *hdr,
00209 const uint8_t *buf2, int min_width_160)
00210 {
00211 uint8_t bit_buf;
00212 unsigned int bit_pos, lv, lv1, lv2;
00213 int *width_tbl, width_tbl_arr[10];
00214 const signed char *ref_vectors;
00215 uint8_t *cur_frm_pos, *ref_frm_pos, *cp, *cp2;
00216 uint8_t *cur_end = cur + width*height + width;
00217 uint32_t *cur_lp, *ref_lp;
00218 const uint32_t *correction_lp[2], *correctionloworder_lp[2], *correctionhighorder_lp[2];
00219 uint8_t *correction_type_sp[2];
00220 struct ustr strip_tbl[20], *strip;
00221 int i, j, k, lp1, lp2, flag1, cmd, blks_width, blks_height, region_160_width,
00222 rle_v1, rle_v2, rle_v3;
00223 unsigned short res;
00224
00225 bit_buf = 0;
00226 ref_vectors = NULL;
00227
00228 width_tbl = width_tbl_arr + 1;
00229 i = (width < 0 ? width + 3 : width)/4;
00230 for(j = -1; j < 8; j++)
00231 width_tbl[j] = i * j;
00232
00233 strip = strip_tbl;
00234
00235 for(region_160_width = 0; region_160_width < (width - min_width_160); region_160_width += min_width_160);
00236
00237 strip->ypos = strip->xpos = 0;
00238 for(strip->width = min_width_160; width > strip->width; strip->width *= 2);
00239 strip->height = height;
00240 strip->split_direction = 0;
00241 strip->split_flag = 0;
00242 strip->usl7 = 0;
00243
00244 bit_pos = 0;
00245
00246 rle_v1 = rle_v2 = rle_v3 = 0;
00247
00248 while(strip >= strip_tbl) {
00249 if(bit_pos <= 0) {
00250 bit_pos = 8;
00251 bit_buf = *buf1++;
00252 }
00253
00254 bit_pos -= 2;
00255 cmd = (bit_buf >> bit_pos) & 0x03;
00256
00257 if(cmd == 0) {
00258 strip++;
00259 if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
00260 av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
00261 break;
00262 }
00263 memcpy(strip, strip-1, sizeof(*strip));
00264 strip->split_flag = 1;
00265 strip->split_direction = 0;
00266 strip->height = (strip->height > 8 ? ((strip->height+8)>>4)<<3 : 4);
00267 continue;
00268 } else if(cmd == 1) {
00269 strip++;
00270 if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
00271 av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
00272 break;
00273 }
00274 memcpy(strip, strip-1, sizeof(*strip));
00275 strip->split_flag = 1;
00276 strip->split_direction = 1;
00277 strip->width = (strip->width > 8 ? ((strip->width+8)>>4)<<3 : 4);
00278 continue;
00279 } else if(cmd == 2) {
00280 if(strip->usl7 == 0) {
00281 strip->usl7 = 1;
00282 ref_vectors = NULL;
00283 continue;
00284 }
00285 } else if(cmd == 3) {
00286 if(strip->usl7 == 0) {
00287 strip->usl7 = 1;
00288 ref_vectors = (const signed char*)buf2 + (*buf1 * 2);
00289 buf1++;
00290 continue;
00291 }
00292 }
00293
00294 cur_frm_pos = cur + width * strip->ypos + strip->xpos;
00295
00296 if((blks_width = strip->width) < 0)
00297 blks_width += 3;
00298 blks_width >>= 2;
00299 blks_height = strip->height;
00300
00301 if(ref_vectors != NULL) {
00302 ref_frm_pos = ref + (ref_vectors[0] + strip->ypos) * width +
00303 ref_vectors[1] + strip->xpos;
00304 } else
00305 ref_frm_pos = cur_frm_pos - width_tbl[4];
00306
00307 if(cmd == 2) {
00308 if(bit_pos <= 0) {
00309 bit_pos = 8;
00310 bit_buf = *buf1++;
00311 }
00312
00313 bit_pos -= 2;
00314 cmd = (bit_buf >> bit_pos) & 0x03;
00315
00316 if(cmd == 0 || ref_vectors != NULL) {
00317 for(lp1 = 0; lp1 < blks_width; lp1++) {
00318 for(i = 0, j = 0; i < blks_height; i++, j += width_tbl[1])
00319 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
00320 cur_frm_pos += 4;
00321 ref_frm_pos += 4;
00322 }
00323 } else if(cmd != 1)
00324 return;
00325 } else {
00326 k = *buf1 >> 4;
00327 j = *buf1 & 0x0f;
00328 buf1++;
00329 lv = j + cb_offset;
00330
00331 if((lv - 8) <= 7 && (k == 0 || k == 3 || k == 10)) {
00332 cp2 = s->ModPred + ((lv - 8) << 7);
00333 cp = ref_frm_pos;
00334 for(i = 0; i < blks_width << 2; i++) {
00335 int v = *cp >> 1;
00336 *(cp++) = cp2[v];
00337 }
00338 }
00339
00340 if(k == 1 || k == 4) {
00341 lv = (hdr[j] & 0xf) + cb_offset;
00342 correction_type_sp[0] = s->corrector_type + (lv << 8);
00343 correction_lp[0] = correction + (lv << 8);
00344 lv = (hdr[j] >> 4) + cb_offset;
00345 correction_lp[1] = correction + (lv << 8);
00346 correction_type_sp[1] = s->corrector_type + (lv << 8);
00347 } else {
00348 correctionloworder_lp[0] = correctionloworder_lp[1] = correctionloworder + (lv << 8);
00349 correctionhighorder_lp[0] = correctionhighorder_lp[1] = correctionhighorder + (lv << 8);
00350 correction_type_sp[0] = correction_type_sp[1] = s->corrector_type + (lv << 8);
00351 correction_lp[0] = correction_lp[1] = correction + (lv << 8);
00352 }
00353
00354 switch(k) {
00355 case 1:
00356 case 0:
00357 for( ; blks_height > 0; blks_height -= 4) {
00358 for(lp1 = 0; lp1 < blks_width; lp1++) {
00359 for(lp2 = 0; lp2 < 4; ) {
00360 k = *buf1++;
00361 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2];
00362 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2];
00363 if ((uint8_t *)cur_lp >= cur_end-3)
00364 break;
00365
00366 switch(correction_type_sp[0][k]) {
00367 case 0:
00368 *cur_lp = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00369 lp2++;
00370 break;
00371 case 1:
00372 res = ((av_le2ne16(((unsigned short *)(ref_lp))[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
00373 ((unsigned short *)cur_lp)[0] = av_le2ne16(res);
00374 res = ((av_le2ne16(((unsigned short *)(ref_lp))[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
00375 ((unsigned short *)cur_lp)[1] = av_le2ne16(res);
00376 buf1++;
00377 lp2++;
00378 break;
00379 case 2:
00380 if(lp2 == 0) {
00381 for(i = 0, j = 0; i < 2; i++, j += width_tbl[1])
00382 cur_lp[j] = ref_lp[j];
00383 lp2 += 2;
00384 }
00385 break;
00386 case 3:
00387 if(lp2 < 2) {
00388 for(i = 0, j = 0; i < (3 - lp2); i++, j += width_tbl[1])
00389 cur_lp[j] = ref_lp[j];
00390 lp2 = 3;
00391 }
00392 break;
00393 case 8:
00394 if(lp2 == 0) {
00395 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00396
00397 if(rle_v1 == 1 || ref_vectors != NULL) {
00398 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00399 cur_lp[j] = ref_lp[j];
00400 }
00401
00402 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00403 break;
00404 } else {
00405 rle_v1 = 1;
00406 rle_v2 = *buf1 - 1;
00407 }
00408 case 5:
00409 LP2_CHECK(buf1,rle_v3,lp2)
00410 case 4:
00411 for(i = 0, j = 0; i < (4 - lp2); i++, j += width_tbl[1])
00412 cur_lp[j] = ref_lp[j];
00413 lp2 = 4;
00414 break;
00415
00416 case 7:
00417 if(rle_v3 != 0)
00418 rle_v3 = 0;
00419 else {
00420 buf1--;
00421 rle_v3 = 1;
00422 }
00423 case 6:
00424 if(ref_vectors != NULL) {
00425 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00426 cur_lp[j] = ref_lp[j];
00427 }
00428 lp2 = 4;
00429 break;
00430
00431 case 9:
00432 lv1 = *buf1++;
00433 lv = (lv1 & 0x7F) << 1;
00434 lv += (lv << 8);
00435 lv += (lv << 16);
00436 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00437 cur_lp[j] = lv;
00438
00439 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00440 break;
00441 default:
00442 return;
00443 }
00444 }
00445
00446 cur_frm_pos += 4;
00447 ref_frm_pos += 4;
00448 }
00449
00450 cur_frm_pos += ((width - blks_width) * 4);
00451 ref_frm_pos += ((width - blks_width) * 4);
00452 }
00453 break;
00454
00455 case 4:
00456 case 3:
00457 if(ref_vectors != NULL)
00458 return;
00459 flag1 = 1;
00460
00461 for( ; blks_height > 0; blks_height -= 8) {
00462 for(lp1 = 0; lp1 < blks_width; lp1++) {
00463 for(lp2 = 0; lp2 < 4; ) {
00464 k = *buf1++;
00465
00466 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
00467 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
00468
00469 switch(correction_type_sp[lp2 & 0x01][k]) {
00470 case 0:
00471 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00472 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
00473 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00474 else
00475 cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00476 lp2++;
00477 break;
00478
00479 case 1:
00480 res = ((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
00481 ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res);
00482 res = ((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
00483 ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res);
00484
00485 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
00486 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00487 else
00488 cur_lp[0] = cur_lp[width_tbl[1]];
00489 buf1++;
00490 lp2++;
00491 break;
00492
00493 case 2:
00494 if(lp2 == 0) {
00495 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00496 cur_lp[j] = *ref_lp;
00497 lp2 += 2;
00498 }
00499 break;
00500
00501 case 3:
00502 if(lp2 < 2) {
00503 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
00504 cur_lp[j] = *ref_lp;
00505 lp2 = 3;
00506 }
00507 break;
00508
00509 case 6:
00510 lp2 = 4;
00511 break;
00512
00513 case 7:
00514 if(rle_v3 != 0)
00515 rle_v3 = 0;
00516 else {
00517 buf1--;
00518 rle_v3 = 1;
00519 }
00520 lp2 = 4;
00521 break;
00522
00523 case 8:
00524 if(lp2 == 0) {
00525 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00526
00527 if(rle_v1 == 1) {
00528 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
00529 cur_lp[j] = ref_lp[j];
00530 }
00531
00532 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00533 break;
00534 } else {
00535 rle_v2 = (*buf1) - 1;
00536 rle_v1 = 1;
00537 }
00538 case 5:
00539 LP2_CHECK(buf1,rle_v3,lp2)
00540 case 4:
00541 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
00542 cur_lp[j] = *ref_lp;
00543 lp2 = 4;
00544 break;
00545
00546 case 9:
00547 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
00548 lv1 = *buf1++;
00549 lv = (lv1 & 0x7F) << 1;
00550 lv += (lv << 8);
00551 lv += (lv << 16);
00552
00553 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00554 cur_lp[j] = lv;
00555
00556 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00557 break;
00558
00559 default:
00560 return;
00561 }
00562 }
00563
00564 cur_frm_pos += 4;
00565 }
00566
00567 cur_frm_pos += (((width * 2) - blks_width) * 4);
00568 flag1 = 0;
00569 }
00570 break;
00571
00572 case 10:
00573 if(ref_vectors == NULL) {
00574 flag1 = 1;
00575
00576 for( ; blks_height > 0; blks_height -= 8) {
00577 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
00578 for(lp2 = 0; lp2 < 4; ) {
00579 k = *buf1++;
00580 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
00581 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
00582 lv1 = ref_lp[0];
00583 lv2 = ref_lp[1];
00584 if(lp2 == 0 && flag1 != 0) {
00585 #if HAVE_BIGENDIAN
00586 lv1 = lv1 & 0xFF00FF00;
00587 lv1 = (lv1 >> 8) | lv1;
00588 lv2 = lv2 & 0xFF00FF00;
00589 lv2 = (lv2 >> 8) | lv2;
00590 #else
00591 lv1 = lv1 & 0x00FF00FF;
00592 lv1 = (lv1 << 8) | lv1;
00593 lv2 = lv2 & 0x00FF00FF;
00594 lv2 = (lv2 << 8) | lv2;
00595 #endif
00596 }
00597
00598 switch(correction_type_sp[lp2 & 0x01][k]) {
00599 case 0:
00600 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
00601 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionhighorder_lp[lp2 & 0x01][k]) << 1);
00602 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
00603 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00604 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00605 } else {
00606 cur_lp[0] = cur_lp[width_tbl[1]];
00607 cur_lp[1] = cur_lp[width_tbl[1]+1];
00608 }
00609 lp2++;
00610 break;
00611
00612 case 1:
00613 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][*buf1]) << 1);
00614 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
00615 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
00616 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00617 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00618 } else {
00619 cur_lp[0] = cur_lp[width_tbl[1]];
00620 cur_lp[1] = cur_lp[width_tbl[1]+1];
00621 }
00622 buf1++;
00623 lp2++;
00624 break;
00625
00626 case 2:
00627 if(lp2 == 0) {
00628 if(flag1 != 0) {
00629 for(i = 0, j = width_tbl[1]; i < 3; i++, j += width_tbl[1]) {
00630 cur_lp[j] = lv1;
00631 cur_lp[j+1] = lv2;
00632 }
00633 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00634 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00635 } else {
00636 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
00637 cur_lp[j] = lv1;
00638 cur_lp[j+1] = lv2;
00639 }
00640 }
00641 lp2 += 2;
00642 }
00643 break;
00644
00645 case 3:
00646 if(lp2 < 2) {
00647 if(lp2 == 0 && flag1 != 0) {
00648 for(i = 0, j = width_tbl[1]; i < 5; i++, j += width_tbl[1]) {
00649 cur_lp[j] = lv1;
00650 cur_lp[j+1] = lv2;
00651 }
00652 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00653 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00654 } else {
00655 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
00656 cur_lp[j] = lv1;
00657 cur_lp[j+1] = lv2;
00658 }
00659 }
00660 lp2 = 3;
00661 }
00662 break;
00663
00664 case 8:
00665 if(lp2 == 0) {
00666 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00667 if(rle_v1 == 1) {
00668 if(flag1 != 0) {
00669 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
00670 cur_lp[j] = lv1;
00671 cur_lp[j+1] = lv2;
00672 }
00673 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00674 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00675 } else {
00676 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
00677 cur_lp[j] = lv1;
00678 cur_lp[j+1] = lv2;
00679 }
00680 }
00681 }
00682 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00683 break;
00684 } else {
00685 rle_v1 = 1;
00686 rle_v2 = (*buf1) - 1;
00687 }
00688 case 5:
00689 LP2_CHECK(buf1,rle_v3,lp2)
00690 case 4:
00691 if(lp2 == 0 && flag1 != 0) {
00692 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
00693 cur_lp[j] = lv1;
00694 cur_lp[j+1] = lv2;
00695 }
00696 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00697 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00698 } else {
00699 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
00700 cur_lp[j] = lv1;
00701 cur_lp[j+1] = lv2;
00702 }
00703 }
00704 lp2 = 4;
00705 break;
00706
00707 case 6:
00708 lp2 = 4;
00709 break;
00710
00711 case 7:
00712 if(lp2 == 0) {
00713 if(rle_v3 != 0)
00714 rle_v3 = 0;
00715 else {
00716 buf1--;
00717 rle_v3 = 1;
00718 }
00719 lp2 = 4;
00720 }
00721 break;
00722
00723 case 9:
00724 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
00725 lv1 = *buf1;
00726 lv = (lv1 & 0x7F) << 1;
00727 lv += (lv << 8);
00728 lv += (lv << 16);
00729 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
00730 cur_lp[j] = lv;
00731 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00732 break;
00733
00734 default:
00735 return;
00736 }
00737 }
00738
00739 cur_frm_pos += 8;
00740 }
00741
00742 cur_frm_pos += (((width * 2) - blks_width) * 4);
00743 flag1 = 0;
00744 }
00745 } else {
00746 for( ; blks_height > 0; blks_height -= 8) {
00747 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
00748 for(lp2 = 0; lp2 < 4; ) {
00749 k = *buf1++;
00750 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
00751 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
00752
00753 switch(correction_type_sp[lp2 & 0x01][k]) {
00754 case 0:
00755 lv1 = correctionloworder_lp[lp2 & 0x01][k];
00756 lv2 = correctionhighorder_lp[lp2 & 0x01][k];
00757 cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1);
00758 cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1);
00759 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
00760 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
00761 lp2++;
00762 break;
00763
00764 case 1:
00765 lv1 = correctionloworder_lp[lp2 & 0x01][*buf1++];
00766 lv2 = correctionloworder_lp[lp2 & 0x01][k];
00767 cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1);
00768 cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1);
00769 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
00770 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
00771 lp2++;
00772 break;
00773
00774 case 2:
00775 if(lp2 == 0) {
00776 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
00777 cur_lp[j] = ref_lp[j];
00778 cur_lp[j+1] = ref_lp[j+1];
00779 }
00780 lp2 += 2;
00781 }
00782 break;
00783
00784 case 3:
00785 if(lp2 < 2) {
00786 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
00787 cur_lp[j] = ref_lp[j];
00788 cur_lp[j+1] = ref_lp[j+1];
00789 }
00790 lp2 = 3;
00791 }
00792 break;
00793
00794 case 8:
00795 if(lp2 == 0) {
00796 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00797 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
00798 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
00799 ((uint32_t *)cur_frm_pos)[j+1] = ((uint32_t *)ref_frm_pos)[j+1];
00800 }
00801 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00802 break;
00803 } else {
00804 rle_v1 = 1;
00805 rle_v2 = (*buf1) - 1;
00806 }
00807 case 5:
00808 case 7:
00809 LP2_CHECK(buf1,rle_v3,lp2)
00810 case 6:
00811 case 4:
00812 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
00813 cur_lp[j] = ref_lp[j];
00814 cur_lp[j+1] = ref_lp[j+1];
00815 }
00816 lp2 = 4;
00817 break;
00818
00819 case 9:
00820 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
00821 lv1 = *buf1;
00822 lv = (lv1 & 0x7F) << 1;
00823 lv += (lv << 8);
00824 lv += (lv << 16);
00825 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
00826 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)cur_frm_pos)[j+1] = lv;
00827 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00828 break;
00829
00830 default:
00831 return;
00832 }
00833 }
00834
00835 cur_frm_pos += 8;
00836 ref_frm_pos += 8;
00837 }
00838
00839 cur_frm_pos += (((width * 2) - blks_width) * 4);
00840 ref_frm_pos += (((width * 2) - blks_width) * 4);
00841 }
00842 }
00843 break;
00844
00845 case 11:
00846 if(ref_vectors == NULL)
00847 return;
00848
00849 for( ; blks_height > 0; blks_height -= 8) {
00850 for(lp1 = 0; lp1 < blks_width; lp1++) {
00851 for(lp2 = 0; lp2 < 4; ) {
00852 k = *buf1++;
00853 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
00854 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
00855
00856 switch(correction_type_sp[lp2 & 0x01][k]) {
00857 case 0:
00858 cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00859 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00860 lp2++;
00861 break;
00862
00863 case 1:
00864 lv1 = (unsigned short)(correction_lp[lp2 & 0x01][*buf1++]);
00865 lv2 = (unsigned short)(correction_lp[lp2 & 0x01][k]);
00866 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + lv1) << 1);
00867 ((unsigned short *)cur_lp)[0] = av_le2ne16(res);
00868 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + lv2) << 1);
00869 ((unsigned short *)cur_lp)[1] = av_le2ne16(res);
00870 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]]) >> 1) + lv1) << 1);
00871 ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res);
00872 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]+1]) >> 1) + lv2) << 1);
00873 ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res);
00874 lp2++;
00875 break;
00876
00877 case 2:
00878 if(lp2 == 0) {
00879 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00880 cur_lp[j] = ref_lp[j];
00881 lp2 += 2;
00882 }
00883 break;
00884
00885 case 3:
00886 if(lp2 < 2) {
00887 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
00888 cur_lp[j] = ref_lp[j];
00889 lp2 = 3;
00890 }
00891 break;
00892
00893 case 8:
00894 if(lp2 == 0) {
00895 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00896
00897 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
00898 cur_lp[j] = ref_lp[j];
00899
00900 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00901 break;
00902 } else {
00903 rle_v1 = 1;
00904 rle_v2 = (*buf1) - 1;
00905 }
00906 case 5:
00907 case 7:
00908 LP2_CHECK(buf1,rle_v3,lp2)
00909 case 4:
00910 case 6:
00911 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
00912 cur_lp[j] = ref_lp[j];
00913 lp2 = 4;
00914 break;
00915
00916 case 9:
00917 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
00918 lv1 = *buf1++;
00919 lv = (lv1 & 0x7F) << 1;
00920 lv += (lv << 8);
00921 lv += (lv << 16);
00922 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00923 cur_lp[j] = lv;
00924 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00925 break;
00926
00927 default:
00928 return;
00929 }
00930 }
00931
00932 cur_frm_pos += 4;
00933 ref_frm_pos += 4;
00934 }
00935
00936 cur_frm_pos += (((width * 2) - blks_width) * 4);
00937 ref_frm_pos += (((width * 2) - blks_width) * 4);
00938 }
00939 break;
00940
00941 default:
00942 return;
00943 }
00944 }
00945
00946 for( ; strip >= strip_tbl; strip--) {
00947 if(strip->split_flag != 0) {
00948 strip->split_flag = 0;
00949 strip->usl7 = (strip-1)->usl7;
00950
00951 if(strip->split_direction) {
00952 strip->xpos += strip->width;
00953 strip->width = (strip-1)->width - strip->width;
00954 if(region_160_width <= strip->xpos && width < strip->width + strip->xpos)
00955 strip->width = width - strip->xpos;
00956 } else {
00957 strip->ypos += strip->height;
00958 strip->height = (strip-1)->height - strip->height;
00959 }
00960 break;
00961 }
00962 }
00963 }
00964 }
00965
00966 static av_cold int indeo3_decode_init(AVCodecContext *avctx)
00967 {
00968 Indeo3DecodeContext *s = avctx->priv_data;
00969 int ret = 0;
00970
00971 s->avctx = avctx;
00972 s->width = avctx->width;
00973 s->height = avctx->height;
00974 avctx->pix_fmt = PIX_FMT_YUV410P;
00975 avcodec_get_frame_defaults(&s->frame);
00976
00977 if (!(ret = build_modpred(s)))
00978 ret = iv_alloc_frames(s);
00979 if (ret)
00980 iv_free_func(s);
00981
00982 return ret;
00983 }
00984
00985 static int iv_decode_frame(AVCodecContext *avctx,
00986 const uint8_t *buf, int buf_size)
00987 {
00988 Indeo3DecodeContext *s = avctx->priv_data;
00989 unsigned int image_width, image_height,
00990 chroma_width, chroma_height;
00991 unsigned int flags, cb_offset, data_size,
00992 y_offset, v_offset, u_offset, mc_vector_count;
00993 const uint8_t *hdr_pos, *buf_pos;
00994
00995 buf_pos = buf;
00996 buf_pos += 18;
00997
00998 flags = bytestream_get_le16(&buf_pos);
00999 data_size = bytestream_get_le32(&buf_pos);
01000 cb_offset = *buf_pos++;
01001 buf_pos += 3;
01002 image_height = bytestream_get_le16(&buf_pos);
01003 image_width = bytestream_get_le16(&buf_pos);
01004
01005 if(av_image_check_size(image_width, image_height, 0, avctx))
01006 return -1;
01007 if (image_width != avctx->width || image_height != avctx->height) {
01008 int ret;
01009 avcodec_set_dimensions(avctx, image_width, image_height);
01010 s->width = avctx->width;
01011 s->height = avctx->height;
01012 ret = iv_alloc_frames(s);
01013 if (ret < 0) {
01014 s->width = s->height = 0;
01015 return ret;
01016 }
01017 }
01018
01019 chroma_height = ((image_height >> 2) + 3) & 0x7ffc;
01020 chroma_width = ((image_width >> 2) + 3) & 0x7ffc;
01021 y_offset = bytestream_get_le32(&buf_pos);
01022 v_offset = bytestream_get_le32(&buf_pos);
01023 u_offset = bytestream_get_le32(&buf_pos);
01024 buf_pos += 4;
01025 hdr_pos = buf_pos;
01026 if(data_size == 0x80) return 4;
01027
01028 if(FFMAX3(y_offset, v_offset, u_offset) >= buf_size-16) {
01029 av_log(s->avctx, AV_LOG_ERROR, "y/u/v offset outside buffer\n");
01030 return -1;
01031 }
01032
01033 if(flags & 0x200) {
01034 s->cur_frame = s->iv_frame + 1;
01035 s->ref_frame = s->iv_frame;
01036 } else {
01037 s->cur_frame = s->iv_frame;
01038 s->ref_frame = s->iv_frame + 1;
01039 }
01040
01041 buf_pos = buf + 16 + y_offset;
01042 mc_vector_count = bytestream_get_le32(&buf_pos);
01043 if(2LL*mc_vector_count >= buf_size-16-y_offset) {
01044 av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
01045 return -1;
01046 }
01047
01048 iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, image_width,
01049 image_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
01050 FFMIN(image_width, 160));
01051
01052 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
01053 {
01054
01055 buf_pos = buf + 16 + v_offset;
01056 mc_vector_count = bytestream_get_le32(&buf_pos);
01057 if(2LL*mc_vector_count >= buf_size-16-v_offset) {
01058 av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
01059 return -1;
01060 }
01061
01062 iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width,
01063 chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
01064 FFMIN(chroma_width, 40));
01065
01066 buf_pos = buf + 16 + u_offset;
01067 mc_vector_count = bytestream_get_le32(&buf_pos);
01068 if(2LL*mc_vector_count >= buf_size-16-u_offset) {
01069 av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
01070 return -1;
01071 }
01072
01073 iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width,
01074 chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
01075 FFMIN(chroma_width, 40));
01076
01077 }
01078
01079 return 8;
01080 }
01081
01082 static int indeo3_decode_frame(AVCodecContext *avctx,
01083 void *data, int *data_size,
01084 AVPacket *avpkt)
01085 {
01086 const uint8_t *buf = avpkt->data;
01087 int buf_size = avpkt->size;
01088 Indeo3DecodeContext *s=avctx->priv_data;
01089 uint8_t *src, *dest;
01090 int y;
01091
01092 if (iv_decode_frame(avctx, buf, buf_size) < 0)
01093 return -1;
01094
01095 if(s->frame.data[0])
01096 avctx->release_buffer(avctx, &s->frame);
01097
01098 s->frame.reference = 0;
01099 if(avctx->get_buffer(avctx, &s->frame) < 0) {
01100 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
01101 return -1;
01102 }
01103
01104 src = s->cur_frame->Ybuf;
01105 dest = s->frame.data[0];
01106 for (y = 0; y < s->height; y++) {
01107 memcpy(dest, src, s->cur_frame->y_w);
01108 src += s->cur_frame->y_w;
01109 dest += s->frame.linesize[0];
01110 }
01111
01112 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
01113 {
01114 src = s->cur_frame->Ubuf;
01115 dest = s->frame.data[1];
01116 for (y = 0; y < s->height / 4; y++) {
01117 memcpy(dest, src, s->cur_frame->uv_w);
01118 src += s->cur_frame->uv_w;
01119 dest += s->frame.linesize[1];
01120 }
01121
01122 src = s->cur_frame->Vbuf;
01123 dest = s->frame.data[2];
01124 for (y = 0; y < s->height / 4; y++) {
01125 memcpy(dest, src, s->cur_frame->uv_w);
01126 src += s->cur_frame->uv_w;
01127 dest += s->frame.linesize[2];
01128 }
01129 }
01130
01131 *data_size=sizeof(AVFrame);
01132 *(AVFrame*)data= s->frame;
01133
01134 return buf_size;
01135 }
01136
01137 static av_cold int indeo3_decode_end(AVCodecContext *avctx)
01138 {
01139 Indeo3DecodeContext *s = avctx->priv_data;
01140
01141 iv_free_func(s);
01142
01143 if (s->frame.data[0])
01144 avctx->release_buffer(avctx, &s->frame);
01145
01146 return 0;
01147 }
01148
01149 AVCodec ff_indeo3_decoder = {
01150 "indeo3",
01151 AVMEDIA_TYPE_VIDEO,
01152 CODEC_ID_INDEO3,
01153 sizeof(Indeo3DecodeContext),
01154 indeo3_decode_init,
01155 NULL,
01156 indeo3_decode_end,
01157 indeo3_decode_frame,
01158 CODEC_CAP_DR1,
01159 NULL,
01160 .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"),
01161 };