Commit b98e3176 authored by Aaron Holtzman's avatar Aaron Holtzman

Landed bp_frames branch. It doesn't work yet, but I needed

some of the code in order to move the codec into libmpeg2.a.
b and p frames are turned off by default. At some point in
the near future, I'll move all of the libmpeg2 files into
their own directory, with possibily it's own configure
script.

aaron
parent 2f092eb2
......@@ -4,16 +4,19 @@ AUTOMAKE_OPTIONS = 1.3 foreign
CFLAGS = -Wall -O3 -g @CFLAGS@ @X_CFLAGS@
bin_PROGRAMS = mpeg2dec
noinst_LIBRARIES = libmpeg2.a
mpeg2dec_LIBS=
mpeg2dec_LDADD= @CONFIG_OBJS@ @X_PRE_LIBS@ @X_LIBS@ -lXext -lX11\
@X_EXTRA_LIBS@ -lm
mpeg2dec_SOURCES = getvlc.c mb_buffer.c motion.c parse.c stats.c\
libmpeg2_a_LIBADD = @LIB_CONFIG_OBJS@
libmpeg2_a_SOURCES = getvlc.c mb_buffer.c motion.c parse.c stats.c\
idct.c motion_comp.c inv_quantize.c decode.c bitstream.c debug.c\
yuv2rgb.c
yuv2rgb.c
libmpeg2_a_DEPENDENCIES = @LIB_CONFIG_OBJS@
mpeg2dec_LDADD= -L. -lmpeg2 @CONFIG_OBJS@ @X_PRE_LIBS@ @X_LIBS@ -lXext -lX11\
@X_EXTRA_LIBS@ -lm
mpeg2dec_DEPENDENCIES = @CONFIG_OBJS@
mpeg2dec_SOURCES = mpeg2dec.c
mpeg2dec_DEPENDENCIES = libmpeg2.a @CONFIG_OBJS@
EXTRA_mpeg2dec_SOURCES = display_gatos.c display_mga_vid.c display_x11.c\
display_xil.c idct_mmx.c idct_block_mmx.S idct_mlib.c\
......
......@@ -49,10 +49,10 @@ bitstream_fill_next()
}
//
// The fast paths for _show _flush and _get are in the
// The fast paths for _show, _flush, and _get are in the
// bitstream.h header file so they can be inlined.
//
// The "bottom half" of these routine are suffixed _bh
// The "bottom half" of these routines are suffixed _bh
//
// -ah
//
......
......@@ -68,6 +68,7 @@ fi
CONFIG_OBJS="$CONFIG_OBJS $DISPLAY_DRIVER $EXTENSION_DRIVER"
AC_SUBST(CONFIG_OBJS)
AC_SUBST(LIB_CONFIG_OBJS)
AC_SUBST(DRIVERS)
......@@ -95,7 +96,7 @@ dnl Set the appropriate architecture define
case "$host" in
i?86-*)
AC_DEFINE(__i386__)
CONFIG_OBJS="$CONFIG_OBJS idct_mmx.o idct_block_mmx.o motion_comp_mmx.o";;
LIB_CONFIG_OBJS="$LIB_CONFIG_OBJS idct_mmx.o idct_block_mmx.o motion_comp_mmx.o";;
alpha*-*) AC_DEFINE(__alpha__);;
sparc-*) AC_DEFINE(__sparc__);;
ppc-*) AC_DEFINE(__ppc__);;
......
......@@ -49,6 +49,9 @@ static macroblock_t *mb;
//global config struct
mpeg2_config_t config;
//frame structure to pass back to caller
mpeg2_frame_t mpeg2_frame;
static uint_32 is_display_initialized = 0;
static uint_32 is_sequence_needed = 1;
......@@ -131,9 +134,15 @@ mpeg2_init(void)
//the frame is, or size it so that it will be big enough for
//all cases
frame_size = 720 * 576;
picture.current_frame[0] = malloc(frame_size);
picture.current_frame[1] = malloc(frame_size / 4);
picture.current_frame[2] = malloc(frame_size / 4);
picture.throwaway_frame[0] = malloc(frame_size);
picture.throwaway_frame[1] = malloc(frame_size / 4);
picture.throwaway_frame[2] = malloc(frame_size / 4);
picture.backward_reference_frame[0] = malloc(frame_size);
picture.backward_reference_frame[1] = malloc(frame_size / 4);
picture.backward_reference_frame[2] = malloc(frame_size / 4);
picture.forward_reference_frame[0] = malloc(frame_size);
picture.forward_reference_frame[1] = malloc(frame_size / 4);
picture.forward_reference_frame[2] = malloc(frame_size / 4);
//FIXME setup config properly
config.flags = MPEG2_MMX_ENABLE;
......@@ -147,11 +156,46 @@ mpeg2_init(void)
}
uint_32 frame_counter = 0;
void
mpeg2_decode(void)
decode_reorder_frames(void)
{
uint_8 *tmp[3];
if(picture.picture_coding_type != B_TYPE)
{
//reuse the soon to be outdated forward reference frame
picture.current_frame[0] = picture.forward_reference_frame[0];
picture.current_frame[1] = picture.forward_reference_frame[1];
picture.current_frame[2] = picture.forward_reference_frame[2];
//make the backward reference frame the new forward reference frame
tmp[0] = picture.forward_reference_frame[0];
tmp[1] = picture.forward_reference_frame[1];
tmp[2] = picture.forward_reference_frame[2];
picture.forward_reference_frame[0] = picture.backward_reference_frame[0];
picture.forward_reference_frame[1] = picture.backward_reference_frame[1];
picture.forward_reference_frame[2] = picture.backward_reference_frame[2];
picture.backward_reference_frame[0] = tmp[0];
picture.backward_reference_frame[1] = tmp[1];
picture.backward_reference_frame[2] = tmp[2];
}
else
{
picture.current_frame[0] = picture.throwaway_frame[0];
picture.current_frame[1] = picture.throwaway_frame[1];
picture.current_frame[2] = picture.throwaway_frame[2];
}
}
mpeg2_frame_t*
mpeg2_decode_frame(void)
{
uint_32 mba; //macroblock address
uint_32 last_mba; //last macroblock in frame
uint_32 prev_macroblock_type = 0;
uint_32 mba_inc;
uint_32 mb_width;
uint_32 code;
......@@ -174,7 +218,9 @@ mpeg2_decode(void)
//XXX We only do I-frames now
if( picture.picture_coding_type != I_TYPE)
return;
return &mpeg2_frame;
decode_reorder_frames();
last_mba = ((picture.coded_picture_height * picture.coded_picture_width) >> 8) - 1;
mb_width = picture.coded_picture_width >> 4;
......@@ -192,7 +238,6 @@ mpeg2_decode(void)
code = decode_find_header(SLICE_START_CODE_MIN,&picture);
mba = ((code &0xff) - 1) * mb_width - 1;
//printf("starting mba %d of %d mbwidth=%d\n",mba,last_mba,mb_width);
parse_slice_header(&picture,&slice);
do
......@@ -200,17 +245,42 @@ mpeg2_decode(void)
mba_inc = Get_macroblock_address_increment();
if(mba_inc > 1)
for(i=0; i< mba_inc - 1; i++)
{
//handling of skipped mb's differs between P_TYPE and B_TYPE
//pictures
if(picture.picture_coding_type == P_TYPE)
{
mb->skipped = 1;
mb->mba = ++mba;
mb = mb_buffer_increment();
parse_reset_pmv(&slice);
for(i=0; i< mba_inc - 1; i++)
{
memset(mb->f_motion_vectors,0,8);
mb->skipped = 1;
mb->mba = ++mba;
mb = mb_buffer_increment();
}
}
else
{
for(i=0; i< mba_inc - 1; i++)
{
memcpy(mb->f_motion_vectors[0],slice.f_pmv,8);
memcpy(mb->f_motion_vectors[1],slice.f_pmv,8);
mb->macroblock_type = prev_macroblock_type;
mb->skipped = 1;
mb->mba = ++mba;
mb = mb_buffer_increment();
}
}
}
mb->skipped = 0;
mb->mba = ++mba;
parse_macroblock(&picture,&slice,mb);
//we store the last macroblock mv flags, as skipped b-frame blocks
//inherit them
prev_macroblock_type = mb->macroblock_type & (MACROBLOCK_MOTION_FORWARD | MACROBLOCK_MOTION_BACKWARD);
mb = mb_buffer_increment();
if(!mb)
......@@ -221,62 +291,26 @@ mpeg2_decode(void)
while(mba < last_mba);
decode_flush_buffer();
display_frame(picture.current_frame);
if(bitstream_show(32) == SEQUENCE_END_CODE)
is_sequence_needed = 1;
printf("frame_counter = %d\n",frame_counter++);
}
uint_32 buf[2048/4];
FILE *in_file;
void fill_buffer(uint_32 **start,uint_32 **end)
{
uint_32 bytes_read;
bytes_read = fread(buf,1,2048,in_file);
*start = buf;
*end = buf + bytes_read/4;
if(bytes_read != 2048)
exit(1);
}
int main(int argc,char *argv[])
{
if(argc < 2)
//decide which frame to send to the display
if(picture.picture_coding_type == B_TYPE)
{
fprintf(stderr,"usage: %s video_stream\n",argv[0]);
exit(1);
mpeg2_frame.frame[0] = picture.throwaway_frame[0];
mpeg2_frame.frame[1] = picture.throwaway_frame[1];
mpeg2_frame.frame[2] = picture.throwaway_frame[2];
}
printf(PACKAGE"-"VERSION" (C) 1999 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>\n");
if(argv[1][0] != '-')
else
{
in_file = fopen(argv[1],"r");
if(!in_file)
{
fprintf(stderr,"%s - Couldn't open file ",argv[1]);
perror(0);
exit(1);
}
mpeg2_frame.frame[0] = picture.forward_reference_frame[0];
mpeg2_frame.frame[1] = picture.forward_reference_frame[1];
mpeg2_frame.frame[2] = picture.forward_reference_frame[2];
}
else
in_file = stdin;
bitstream_init(fill_buffer);
//FIXME this doesn't go here later
mpeg2_init();
while(1)
mpeg2_decode();
if(bitstream_show(32) == SEQUENCE_END_CODE)
is_sequence_needed = 1;
return 0;
printf("frame_counter = %d\n",frame_counter++);
return &mpeg2_frame;
}
......@@ -34,7 +34,7 @@
//FIXME dynamically set this
#define MACROBLOCK_BUFFER_SIZE 100
#define MACROBLOCK_BUFFER_SIZE 2000
macroblock_t *macroblocks;
uint_32 num_blocks = 0;
......
......@@ -48,7 +48,9 @@ motion_comp_init(void)
for(i=-384;i<640;i++)
clip[i] = i < 0 ? 0 : (i > 255 ? 255 : i);
#ifdef __i386__
//FIXME turn mmx back on
//#ifdef __i386__
#if 0
if(config.flags & MPEG2_MMX_ENABLE)
motion_comp = motion_comp_mmx;
else
......@@ -79,8 +81,32 @@ motion_comp_block(uint_8 *pred,sint_16 *block,uint_8 *dst,uint_32 pitch)
}
}
void
motion_comp_c(picture_t *picture,mb_buffer_t *mb_buffer)
static void
motion_comp_block_1mv_intra(uint_8 *pred,sint_16 *block,uint_8 *dst,uint_32 pitch)
{
uint_32 i;
pitch = pitch - 8;
for(i=0;i<8;i++)
{
*dst++ = clip[*pred++ + *block++];
*dst++ = clip[*pred++ + *block++];
*dst++ = clip[*pred++ + *block++];
*dst++ = clip[*pred++ + *block++];
*dst++ = clip[*pred++ + *block++];
*dst++ = clip[*pred++ + *block++];
*dst++ = clip[*pred++ + *block++];
*dst++ = clip[*pred++ + *block++];
pred += pitch;
dst += pitch;
}
}
static void
motion_comp_non_intra_frame(picture_t *picture,mb_buffer_t *mb_buffer)
{
macroblock_t *mb = mb_buffer->macroblocks;
uint_32 num_blocks = mb_buffer->num_blocks;
......@@ -90,15 +116,126 @@ motion_comp_c(picture_t *picture,mb_buffer_t *mb_buffer)
uint_32 pitch;
uint_32 d;
uint_8 *dst;
uint_32 x_pred,y_pred;
uint_8 *pred;
width = picture->coded_picture_width;
mb_width = picture->coded_picture_width >> 4;
//just do backward prediction for now
for(i=0;i<num_blocks;i++)
{
if(mb[i].skipped)
continue;
//if(mb[i].skipped)
//continue;
//handle interlaced blocks
if (mb[i].dct_type)
{
d = 1;
pitch = width *2;
}
else
{
d = 8;
pitch = width;
}
//FIXME I'd really to take these two divides out.
//maybe do fixed point mult with a LUT of the 16.16 inverse
//of common widths
x = mb[i].mba % mb_width;
y = mb[i].mba / mb_width;
if(mb[i].macroblock_type & MACROBLOCK_MOTION_BACKWARD)
{
//fprintf(stderr,"(motion_comp) backward_mv %d,%d\n",mb[i].b_motion_vectors[0][0] >> 1,mb[i].b_motion_vectors[0][1] >> 1);
x_pred = (mb[i].b_motion_vectors[0][0] >> 1) + x;
y_pred = (mb[i].b_motion_vectors[0][1] >> 1) + y;
//Do y component
dst = &picture->current_frame[0][x * 16 + y * width * 16];
pred =&picture->backward_reference_frame[0][x_pred * 16 + y_pred * width * 16];
motion_comp_block_1mv_intra(pred , mb[i].y_blocks , dst , pitch);
motion_comp_block_1mv_intra(pred + 8 , mb[i].y_blocks + 64, dst + 8 , pitch);
motion_comp_block_1mv_intra(pred + width * 8 , mb[i].y_blocks + 2*64, dst + width * d , pitch);
motion_comp_block_1mv_intra(pred + width * 8 + 8, mb[i].y_blocks + 3*64, dst + width * d + 8, pitch);
//Do Cr component
dst = &picture->current_frame[1][x * 8 + y * width/2 * 8];
pred =&picture->backward_reference_frame[0][x_pred * 8 + y_pred * width/2 * 8];
motion_comp_block_1mv_intra(pred, mb[i].cr_blocks, dst, width/2);
//Do Cb component
dst = &picture->current_frame[2][x * 8 + y * width/2 * 8];
pred =&picture->backward_reference_frame[0][x_pred * 8 + y_pred * width/2 * 8];
motion_comp_block_1mv_intra(pred, mb[i].cb_blocks, dst, width/2);
}
else if(mb[i].macroblock_type & MACROBLOCK_MOTION_FORWARD)
{
//fprintf(stderr,"(motion_comp) forward_mv %d,%d\n",mb[i].f_motion_vectors[0][0] >> 1,mb[i].f_motion_vectors[0][1] >> 1);
x_pred = (mb[i].f_motion_vectors[0][0] >> 1) + x;
y_pred = (mb[i].f_motion_vectors[0][1] >> 1) + y;
//Do y component
dst = &picture->current_frame[0][x * 16 + y * width * 16];
pred =&picture->forward_reference_frame[0][x_pred * 16 + y_pred * width * 16];
motion_comp_block_1mv_intra(pred , mb[i].y_blocks , dst , pitch);
motion_comp_block_1mv_intra(pred + 8 , mb[i].y_blocks + 64, dst + 8 , pitch);
motion_comp_block_1mv_intra(pred + width * 8 , mb[i].y_blocks + 2*64, dst + width * d , pitch);
motion_comp_block_1mv_intra(pred + width * 8 + 8, mb[i].y_blocks + 3*64, dst + width * d + 8, pitch);
//Do Cr component
dst = &picture->current_frame[1][x * 8 + y * width/2 * 8];
pred =&picture->forward_reference_frame[0][x_pred * 8 + y_pred * width/2 * 8];
motion_comp_block_1mv_intra(pred, mb[i].cr_blocks, dst, width/2);
//Do Cb component
dst = &picture->current_frame[2][x * 8 + y * width/2 * 8];
pred =&picture->forward_reference_frame[0][x_pred * 8 + y_pred * width/2 * 8];
motion_comp_block_1mv_intra(pred, mb[i].cb_blocks, dst, width/2);
}
else
{
//Do y component
dst = &picture->current_frame[0][x * 16 + y * width * 16];
motion_comp_block(0, mb[i].y_blocks , dst , pitch);
motion_comp_block(0, mb[i].y_blocks + 64, dst + 8 , pitch);
motion_comp_block(0, mb[i].y_blocks + 2*64, dst + width * d , pitch);
motion_comp_block(0, mb[i].y_blocks + 3*64, dst + width * d + 8, pitch);
//Do Cr component
dst = &picture->current_frame[1][x * 8 + y * width/2 * 8];
motion_comp_block(0, mb[i].cr_blocks, dst, width/2);
//Do Cb component
dst = &picture->current_frame[2][x * 8 + y * width/2 * 8];
motion_comp_block(0, mb[i].cb_blocks, dst, width/2);
}
}
}
static void
motion_comp_intra_frame(picture_t *picture,mb_buffer_t *mb_buffer)
{
macroblock_t *mb = mb_buffer->macroblocks;
uint_32 num_blocks = mb_buffer->num_blocks;
uint_32 i;
uint_32 width,x,y;
uint_32 mb_width;
uint_32 pitch;
uint_32 d;
uint_8 *dst;
width = picture->coded_picture_width;
mb_width = picture->coded_picture_width >> 4;
for(i=0;i<num_blocks;i++)
{
//handle interlaced blocks
if (mb[i].dct_type)
{
......@@ -136,4 +273,13 @@ motion_comp_c(picture_t *picture,mb_buffer_t *mb_buffer)
}
}
void
motion_comp_c(picture_t *picture,mb_buffer_t *mb_buffer)
{
if(picture->picture_coding_type == I_TYPE)
motion_comp_intra_frame(picture,mb_buffer);
else
motion_comp_non_intra_frame(picture,mb_buffer);
}
......@@ -30,6 +30,7 @@ typedef struct mpeg2_frame_s
{
uint_32 horizontal_size;
uint_32 vertical_size;
uint_8 *frame[3];
} mpeg2_frame_t;
//void mpeg2_init(mpeg2_config_t *config);
......
......@@ -194,10 +194,12 @@ typedef struct picture_s
//pointer to the zigzag scan we're supposed to be using
uint_8 *scan;
//These are pointers to the planar frame buffers (Y,Cr,CB)
//Pointer to the current planar frame buffer (Y,Cr,CB)
uint_8 *current_frame[3];
//storage for reference frames plus a b-frame
uint_8 *forward_reference_frame[3];
uint_8 *backward_reference_frame[3];
uint_8 *throwaway_frame[3];
} picture_t;
typedef struct slice_s
......@@ -209,9 +211,13 @@ typedef struct slice_s
uint_32 extra_information_slice;
//Motion vectors
sint_32 pmv[2][2][2];
sint_32 dc_dct_pred[3];
uint_32 quantizer_scale;
//The f_ and b_ correspond to the forward and backward motion
//predictors
sint_16 f_pmv[2][2];
sint_16 b_pmv[2][2];
sint_16 dc_dct_pred[3];
uint_16 quantizer_scale;
} slice_t;
typedef struct macroblock_s
......@@ -222,12 +228,20 @@ typedef struct macroblock_s
uint_16 mba;
uint_16 macroblock_type;
//Motion vector stuff
//The f_ and b_ correspond to the forward and backward motion
//predictors
uint_16 motion_type;
uint_16 motion_vector_count;
sint_16 motion_vertical_field_select[2][2];
sint_16 b_motion_vectors[2][2];
sint_16 f_motion_vectors[2][2];
sint_16 f_motion_vertical_field_select[2];
sint_16 b_motion_vertical_field_select[2];
sint_16 dmvector[2];
uint_16 mv_format;
uint_16 mvscale;
uint_16 dmv;
uint_16 dct_type;
uint_16 coded_block_pattern;
......
/*
* mpeg2dec.c
*
* Copyright (C) Aaron Holtzman <aholtzma@ess.engr.uvic.ca> - Nov 1999
*
* Decodes an MPEG-2 video stream.
*
* This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
*
* mpeg2dec is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* mpeg2dec is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU Make; see the file COPYING. If not, write to
* the Free Software Foundation,
*
*/
#include <stdlib.h>
#include <stdio.h>
#include "config.h"
#include "mpeg2.h"
uint_32 buf[2048/4];
FILE *in_file;
void fill_buffer(uint_32 **start,uint_32 **end)
{
uint_32 bytes_read;
bytes_read = fread(buf,1,2048,in_file);
*start = buf;
*end = buf + bytes_read/4;
if(bytes_read != 2048)
exit(1);
}
int main(int argc,char *argv[])
{
mpeg2_frame_t *my_frame;
if(argc < 2)
{
fprintf(stderr,"usage: %s video_stream\n",argv[0]);
exit(1);
}
printf(PACKAGE"-"VERSION" (C) 1999 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>\n");
if(argv[1][0] != '-')
{
in_file = fopen(argv[1],"r");
if(!in_file)
{
fprintf(stderr,"%s - Couldn't open file ",argv[1]);
perror(0);
exit(1);
}
}
else
in_file = stdin;
//FIXME this doesn't go here later
bitstream_init(fill_buffer);
mpeg2_init();
while(1)
{
my_frame = mpeg2_decode_frame();
display_frame(my_frame->frame);
}
return 0;
}
......@@ -282,10 +282,11 @@ parse_gop_header(picture_t *picture)
static void
parse_picture_coding_extension(picture_t *picture)
{
picture->f_code[0][0] = bitstream_get(4);
picture->f_code[0][1] = bitstream_get(4);
picture->f_code[1][0] = bitstream_get(4);
picture->f_code[1][1] = bitstream_get(4);
//pre subtract 1 for use later in compute_motion_vector
picture->f_code[0][0] = bitstream_get(4) - 1;
picture->f_code[0][1] = bitstream_get(4) - 1;
picture->f_code[1][0] = bitstream_get(4) - 1;
picture->f_code[1][1] = bitstream_get(4) - 1;
picture->intra_dc_precision = bitstream_get(2);
picture->picture_structure = bitstream_get(2);
......@@ -577,16 +578,148 @@ parse_non_intra_block(const picture_t *picture,slice_t *slice,sint_16 *dest,uint
}
}
//This should inline easily into parse_motion_vector
static inline sint_16 compute_motion_vector(sint_16 vec,uint_16 r_size,sint_16 motion_code,
sint_16 motion_residual)
{
sint_16 lim;
lim = 16<<r_size;
if (motion_code>0)
{
vec+= ((motion_code-1)<<r_size) + motion_residual + 1;
if (vec>=lim)
vec-= lim + lim;
}
else if (motion_code<0)
{
vec-= ((-motion_code-1)<<r_size) + motion_residual + 1;
if (vec<-lim)
vec+= lim + lim;
}
return vec;
}
static void parse_motion_vector(sint_16 *prev_mv, sint_16 *curr_mv,const uint_8 *f_code,
macroblock_t *mb)
{
sint_16 motion_code, motion_residual;
sint_16 r_size;
//fprintf(stderr,"motion_vec: h_r_size %d v_r_size %d\n",f_code[0],f_code[1]);
// horizontal component
r_size = f_code[0];
motion_code = Get_motion_code();
motion_residual = 0;
if (r_size!=0 && motion_code!=0)
motion_residual = bitstream_get(r_size);
curr_mv[0] = compute_motion_vector(prev_mv[0],r_size,motion_code,motion_residual);
prev_mv[0] = curr_mv[0];
//XXX dmvectors are unsed right now...
if (mb->dmv)
mb->dmvector[0] = Get_dmvector();
// vertical component
r_size = f_code[1];
motion_code = Get_motion_code();
motion_residual = 0;
if (r_size!=0 && motion_code!=0)
motion_residual = bitstream_get(r_size);
if (mb->mvscale)
prev_mv[1] >>= 1;
curr_mv[1] = compute_motion_vector(prev_mv[1],r_size,motion_code,motion_residual);
prev_mv[1] = curr_mv[1];
if (mb->mvscale)
prev_mv[1] <<= 1;
//XXX dmvectors are unsed right now...
if (mb->dmv)
mb->dmvector[1] = Get_dmvector();
}
//These next two functions are very similar except that they
//don't have to switch between forward and backward data structures.
//The jury is still out on whether is was worth it.
static void parse_forward_motion_vectors(const picture_t *picture,slice_t *slice,
macroblock_t *mb)
{
if (mb->motion_vector_count==1)
{
if (mb->mv_format==MV_FIELD && !mb->dmv)
{
fprintf(stderr,"field based mv\n");
mb->f_motion_vertical_field_select[1] =
mb->f_motion_vertical_field_select[0] = bitstream_get(1);
}
parse_motion_vector(slice->f_pmv[0],mb->f_motion_vectors[0],picture->f_code[0],mb);
/* update other motion vector predictors */
slice->f_pmv[1][0] = slice->f_pmv[0][0];
slice->f_pmv[1][1] = slice->f_pmv[0][1];
}
else
{