summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'media-video/mplayer/files/vaapi-take3/04-mplayer-vdpau.patch')
-rw-r--r--media-video/mplayer/files/vaapi-take3/04-mplayer-vdpau.patch1231
1 files changed, 1231 insertions, 0 deletions
diff --git a/media-video/mplayer/files/vaapi-take3/04-mplayer-vdpau.patch b/media-video/mplayer/files/vaapi-take3/04-mplayer-vdpau.patch
new file mode 100644
index 0000000..5e91da0
--- /dev/null
+++ b/media-video/mplayer/files/vaapi-take3/04-mplayer-vdpau.patch
@@ -0,0 +1,1231 @@
+diff --git a/configure b/configure
+index 91fd164..1f4434e 100755
+--- a/configure
++++ b/configure
+@@ -597,7 +597,7 @@ libavparsers=$libavparsers_all
+ libavbsfs_all=$(sed -n 's/^[^#]*BSF.*(.*, *\(.*\)).*/\1_bsf/p' ffmpeg/libavcodec/allcodecs.c | tr '[a-z]' '[A-Z]')
+ libavbsfs=$libavbsfs_all
+ libavhwaccels_all=$(sed -n 's/^[^#]*HWACCEL.*(.*, *\(.*\)).*/\1_hwaccel/p' ffmpeg/libavcodec/allcodecs.c | tr '[a-z]' '[A-Z]')
+-libavhwaccels=$(for h in $libavhwaccels_all; do case $h in (*_VAAPI_HWACCEL) echo $h;; esac; done)
++libavhwaccels=$(for h in $libavhwaccels_all; do case $h in (*_VAAPI_HWACCEL|*_VDPAU_HWACCEL) echo $h;; esac; done)
+ libavdemuxers_all=$(sed -n 's/^[^#]*DEMUX.*(.*, *\(.*\)).*/\1_demuxer/p' ffmpeg/libavformat/allformats.c | tr '[a-z]' '[A-Z]')
+ libavdemuxers=$(echo $libavdemuxers_all | sed -e 's/ LIB[A-Z0-9_]*_DEMUXER//g' -e s/REDIR_DEMUXER// -e s/AVISYNTH_DEMUXER//)
+ libavmuxers_all=$(sed -n 's/^[^#]*_MUX.*(.*, *\(.*\)).*/\1_muxer/p' ffmpeg/libavformat/allformats.c | tr '[a-z]' '[A-Z]')
+@@ -4466,7 +4466,7 @@ if test "$_vdpau" = yes ; then
+ else
+ def_vdpau='#define CONFIG_VDPAU 0'
+ novomodules="vdpau $novomodules"
+- libavdecoders=$(echo $libavdecoders | sed -e 's/[A-Z0-9]*_VDPAU_DECODER//g')
++ libavhwaccels=$(echo $libavhwaccels | sed -e 's/[A-Z0-9]*_VDPAU_DECODER//g')
+ fi
+ echores "$_vdpau"
+
+diff --git a/etc/codecs.conf b/etc/codecs.conf
+index b0d00f4..745d3e4 100644
+--- a/etc/codecs.conf
++++ b/etc/codecs.conf
+@@ -176,6 +176,7 @@ videocodec ffmpeg1
+ fourcc m1v1
+ driver ffmpeg
+ dll "mpeg1video"
++ out VDPAU_MPEG1
+ out YV12,I420,IYUV
+
+ videocodec ffmpeg2
+@@ -213,6 +214,7 @@ videocodec ffmpeg2
+ driver ffmpeg
+ dll "mpeg2video"
+ out VAAPI_MPEG2
++ out VDPAU_MPEG2
+ out YV12,I420,IYUV
+ out 422P,444P
+
+@@ -253,6 +255,7 @@ videocodec ffmpeg12
+ fourcc slif ; SoftLab MPEG-2 I-frames Codec
+ driver ffmpeg
+ dll "mpegvideo"
++ out VDPAU_MPEG1,VDPAU_MPEG2
+ out YV12,I420,IYUV
+ out 422P,444P
+
+@@ -332,41 +335,6 @@ videocodec ffmpeg12mc
+ out IDCT_MPEG2
+ out MOCO_MPEG2
+
+-videocodec ffmpeg12vdpau
+- info "FFmpeg MPEG-1/2 (VDPAU)"
+- status working
+- format 0x10000001 ; MPEG-1
+- format 0x10000002 ; MPEG-2
+- fourcc mpg1,mpg2,MPG2
+- fourcc PIM1 ; Pinnacle hardware-MPEG-1
+- fourcc PIM2 ; Pinnacle hardware-MPEG-2
+- fourcc "DVR "
+- fourcc hdv2
+- fourcc MPEG
+- fourcc hdv1
+- fourcc hdv3 ; HDV 1080i50
+- fourcc hdv5 ; HDV 720p25
+- fourcc mx5p ; MPEG IMX 625/50 (50 Mb/s)
+- fourcc hdv6,hdv7,hdv8
+- fourcc xdv1,xdv2,xdv3
+- fourcc xdv4,xdv5,xdv6
+- fourcc xdv7,xdv8,xdv9
+- fourcc xdva,xdvb,xdvc
+- fourcc xdvd,xdve,xdvf
+- fourcc xd5a,xd5b,xd5c
+- fourcc xd5d,xd5e,xd5f
+- fourcc xd59,xd54,xd55
+- fourcc mx5n,mx4n,mx4p
+- fourcc mx3n,mx3p
+- fourcc AVmp
+- fourcc mp2v,mpgv
+- fourcc LMP2 ; Lead mpeg2 in avi
+- fourcc m2v1,m1v1
+- driver ffmpeg
+- dll "mpegvideo_vdpau"
+- out VDPAU_MPEG1
+- out VDPAU_MPEG2
+-
+ videocodec mpegpes
+ info "MPEG-PES output (.mpg or DXR3/IVTV/DVB/V4L2 card)"
+ comment "for hardware decoding"
+@@ -943,6 +911,7 @@ videocodec ffwmv3
+ driver ffmpeg
+ dll wmv3
+ out VAAPI_WMV3
++ out VDPAU_WMV3
+ out YV12,I420,IYUV
+
+ videocodec ffwmvp
+@@ -952,15 +921,8 @@ videocodec ffwmvp
+ fourcc wmvp,WMVP
+ driver ffmpeg
+ dll wmv3
+- out YV12,I420,IYUV
+-
+-videocodec ffwmv3vdpau
+- info "FFmpeg WMV3/WMV9 (VDPAU)"
+- status buggy
+- fourcc WMV3,wmv3
+- driver ffmpeg
+- dll wmv3_vdpau
+ out VDPAU_WMV3
++ out YV12,I420,IYUV
+
+ videocodec ffvc1
+ info "FFmpeg WVC1"
+@@ -970,16 +932,8 @@ videocodec ffvc1
+ driver ffmpeg
+ dll vc1
+ out VAAPI_VC1
+- out YV12,I420,IYUV
+-
+-videocodec ffvc1vdpau
+- info "FFmpeg WVC1 (VDPAU)"
+- status buggy
+- fourcc WVC1,wvc1,WMVA
+- fourcc vc-1,VC-1
+- driver ffmpeg
+- dll vc1_vdpau
+ out VDPAU_VC1
++ out YV12,I420,IYUV
+
+ videocodec ffh264
+ info "FFmpeg H.264"
+@@ -993,21 +947,8 @@ videocodec ffh264
+ driver ffmpeg
+ dll h264
+ out VAAPI_H264
+- out YV12,I420,IYUV
+-
+-videocodec ffh264vdpau
+- info "FFmpeg H.264 (VDPAU)"
+- status working
+- fourcc H264,h264
+- fourcc X264,x264
+- fourcc avc1,AVC1
+- fourcc davc,DAVC
+- fourcc ai55,ai15 ; flip4mac avc intra
+- fourcc ai1q,ai5q ; flip4mac avc intra
+- format 0x10000005
+- driver ffmpeg
+- dll h264_vdpau
+ out VDPAU_H264
++ out YV12,I420,IYUV
+
+ videocodec coreavcwindows
+ info "CoreAVC H.264 for x86 - http://corecodec.org/"
+@@ -1063,40 +1006,8 @@ videocodec ffodivx
+ driver ffmpeg
+ dll mpeg4 ;opendivx
+ out VAAPI_MPEG4
+- out YV12,I420,IYUV
+-
+-videocodec ffodivxvdpau
+- info "FFmpeg MPEG-4,DIVX-4/5 (VDPAU)"
+- status working
+- fourcc FMP4,fmp4
+- fourcc DIVX,divx
+- fourcc DIV1,div1 divx
+- fourcc MP4S,mp4s ; ISO MPEG-4 Video V1
+- fourcc M4S2,m4s2
+- fourcc xvid,XVID,XviD,XVIX
+- fourcc DX50,dx50,BLZ0 DX50
+- fourcc mp4v,MP4V
+- format 0x4
+- fourcc UMP4
+- fourcc RMP4
+- fourcc 3IV2,3iv2 ; 3ivx Delta 4
+- fourcc DXGM
+- fourcc SEDG ; diskless camcorder Samsung Miniket VP-M110
+- fourcc SMP4,smp4 ; Samsung SMP4 video codec
+- fourcc VIDM ; vidm 4.01 codec
+- format 0x10000004 ; mpeg 4 es
+- fourcc m4cc,M4CC
+- fourcc hdx4,HDX4
+- fourcc FVFW,fvfw
+- fourcc FFDS
+- fourcc DCOD,MVXM,EM4A,PM4V
+- fourcc M4T3,DMK2,DIGI,INMC
+- fourcc EPHV,SN40,WAWV
+- fourcc uldx,ULDX,VSPX
+- fourcc SIPP ; Samsung SHR-6040
+- driver ffmpeg
+- dll mpeg4_vdpau
+ out VDPAU_MPEG4
++ out YV12,I420,IYUV
+
+ videocodec ffwv1f
+ info "WV1F MPEG-4"
+diff --git a/ffmpeg/libavcodec/allcodecs.c b/ffmpeg/libavcodec/allcodecs.c
+index fbae0f6..4b6d2e9 100644
+--- a/ffmpeg/libavcodec/allcodecs.c
++++ b/ffmpeg/libavcodec/allcodecs.c
+@@ -116,7 +123,6 @@ void avcodec_register_all(void)
+ REGISTER_DECODER (H263I, h263i);
+ REGISTER_ENCODER (H263P, h263p);
+ REGISTER_DECODER (H264, h264);
+- REGISTER_DECODER (H264_VDPAU, h264_vdpau);
+ REGISTER_ENCDEC (HUFFYUV, huffyuv);
+ REGISTER_DECODER (IDCIN, idcin);
+ REGISTER_DECODER (IFF_BYTERUN1, iff_byterun1);
+@@ -140,10 +146,7 @@ void avcodec_register_all(void)
+ REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video);
+ REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video);
+ REGISTER_ENCDEC (MPEG4, mpeg4);
+- REGISTER_DECODER (MPEG4_VDPAU, mpeg4_vdpau);
+ REGISTER_DECODER (MPEGVIDEO, mpegvideo);
+- REGISTER_DECODER (MPEG_VDPAU, mpeg_vdpau);
+- REGISTER_DECODER (MPEG1_VDPAU, mpeg1_vdpau);
+ REGISTER_ENCDEC (MSMPEG4V1, msmpeg4v1);
+ REGISTER_ENCDEC (MSMPEG4V2, msmpeg4v2);
+ REGISTER_ENCDEC (MSMPEG4V3, msmpeg4v3);
+@@ -196,7 +199,6 @@ void avcodec_register_all(void)
+ REGISTER_DECODER (V210X, v210x);
+ REGISTER_DECODER (VB, vb);
+ REGISTER_DECODER (VC1, vc1);
+- REGISTER_DECODER (VC1_VDPAU, vc1_vdpau);
+ REGISTER_DECODER (VCR1, vcr1);
+ REGISTER_DECODER (VMDVIDEO, vmdvideo);
+ REGISTER_DECODER (VMNC, vmnc);
+@@ -210,7 +212,6 @@ void avcodec_register_all(void)
+ REGISTER_ENCDEC (WMV1, wmv1);
+ REGISTER_ENCDEC (WMV2, wmv2);
+ REGISTER_DECODER (WMV3, wmv3);
+- REGISTER_DECODER (WMV3_VDPAU, wmv3_vdpau);
+ REGISTER_DECODER (WNV1, wnv1);
+ REGISTER_DECODER (XAN_WC3, xan_wc3);
+ REGISTER_DECODER (XL, xl);
+diff --git a/ffmpeg/libavcodec/avcodec.h b/ffmpeg/libavcodec/avcodec.h
+index bff9477..1e77fa6 100644
+--- a/ffmpeg/libavcodec/avcodec.h
++++ b/ffmpeg/libavcodec/avcodec.h
+@@ -687,10 +687,6 @@ typedef struct RcOverride{
+ */
+ #define CODEC_CAP_SMALL_LAST_FRAME 0x0040
+ /**
+- * Codec can export data for HW decoding (VDPAU).
+- */
+-#define CODEC_CAP_HWACCEL_VDPAU 0x0080
+-/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+diff --git a/ffmpeg/libavcodec/error_resilience.c b/ffmpeg/libavcodec/error_resilience.c
+index dc015b9..a36f7ac 100644
+--- a/ffmpeg/libavcodec/error_resilience.c
++++ b/ffmpeg/libavcodec/error_resilience.c
+@@ -759,7 +759,6 @@ void ff_er_frame_end(MpegEncContext *s){
+
+ if(!s->error_recognition || s->error_count==0 || s->avctx->lowres ||
+ s->avctx->hwaccel ||
+- s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
+ s->picture_structure != PICT_FRAME || // we dont support ER of field pictures yet, though it should not crash if enabled
+ s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
+
+diff --git a/ffmpeg/libavcodec/h263dec.c b/ffmpeg/libavcodec/h263dec.c
+index b0a3a8a..b5b9026 100644
+--- a/ffmpeg/libavcodec/h263dec.c
++++ b/ffmpeg/libavcodec/h263dec.c
+@@ -34,7 +34,6 @@
+ #include "h263_parser.h"
+ #include "mpeg4video_parser.h"
+ #include "msmpeg4.h"
+-#include "vdpau_internal.h"
+ #include "flv.h"
+ #include "mpeg4video.h"
+
+@@ -620,11 +619,6 @@ retry:
+ if(MPV_frame_start(s, avctx) < 0)
+ return -1;
+
+- if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)) {
+- ff_vdpau_mpeg4_decode_picture(s, s->gb.buffer, s->gb.buffer_end - s->gb.buffer);
+- goto frame_end;
+- }
+-
+ if (avctx->hwaccel) {
+ if (avctx->hwaccel->start_frame(avctx, s->gb.buffer, s->gb.buffer_end - s->gb.buffer) < 0)
+ return -1;
+diff --git a/ffmpeg/libavcodec/h264.c b/ffmpeg/libavcodec/h264.c
+index f99f7ea..faafa97 100644
+--- a/ffmpeg/libavcodec/h264.c
++++ b/ffmpeg/libavcodec/h264.c
+@@ -37,7 +37,6 @@
+ #include "golomb.h"
+ #include "mathops.h"
+ #include "rectangle.h"
+-#include "vdpau_internal.h"
+ #include "libavutil/avassert.h"
+
+ #include "cabac.h"
+@@ -1647,9 +1646,6 @@ static void field_end(H264Context *h){
+ s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264;
+ s->current_picture_ptr->pict_type= s->pict_type;
+
+- if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+- ff_vdpau_h264_set_reference_frames(s);
+-
+ if(!s->dropable) {
+ ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
+ h->prev_poc_msb= h->poc_msb;
+@@ -1663,9 +1659,6 @@ static void field_end(H264Context *h){
+ av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture\n");
+ }
+
+- if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+- ff_vdpau_h264_picture_complete(s);
+-
+ /*
+ * FIXME: Error handling code does not seem to support interlaced
+ * when slices span multiple rows
+@@ -2722,8 +2715,6 @@ static void execute_decode_slices(H264Context *h, int context_count){
+
+ if (s->avctx->hwaccel)
+ return;
+- if(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+- return;
+ if(context_count == 1) {
+ decode_slice(avctx, &h);
+ } else {
+@@ -2859,8 +2850,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
+ if (h->current_slice == 1) {
+ if (s->avctx->hwaccel && s->avctx->hwaccel->start_frame(s->avctx, NULL, 0) < 0)
+ return -1;
+- if(CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+- ff_vdpau_h264_picture_start(s);
+ }
+
+ s->current_picture_ptr->key_frame |=
+@@ -2875,11 +2864,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
+ if (avctx->hwaccel->decode_slice(avctx, &buf[buf_index - consumed], consumed) < 0)
+ return -1;
+ }else
+- if(CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU){
+- static const uint8_t start_code[] = {0x00, 0x00, 0x01};
+- ff_vdpau_add_data_chunk(s, start_code, sizeof(start_code));
+- ff_vdpau_add_data_chunk(s, &buf[buf_index - consumed], consumed );
+- }else
+ context_count++;
+ }
+ break;
+@@ -3409,21 +3393,3 @@ AVCodec h264_decoder = {
+ .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
+ .profiles = NULL_IF_CONFIG_SMALL(profiles),
+ };
+-
+-#if CONFIG_H264_VDPAU_DECODER
+-AVCodec ff_h264_vdpau_decoder = {
+- "h264_vdpau",
+- AVMEDIA_TYPE_VIDEO,
+- CODEC_ID_H264,
+- sizeof(H264Context),
+- ff_h264_decode_init,
+- NULL,
+- ff_h264_decode_end,
+- decode_frame,
+- CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
+- .flush= flush_dpb,
+- .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
+- .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_H264, PIX_FMT_NONE},
+- .profiles = NULL_IF_CONFIG_SMALL(profiles),
+-};
+-#endif
+diff --git a/ffmpeg/libavcodec/mpeg12.c b/ffmpeg/libavcodec/mpeg12.c
+index 6a331eb..c226797 100644
+--- a/ffmpeg/libavcodec/mpeg12.c
++++ b/ffmpeg/libavcodec/mpeg12.c
+@@ -35,7 +35,6 @@
+ #include "mpeg12data.h"
+ #include "mpeg12decdata.h"
+ #include "bytestream.h"
+-#include "vdpau_internal.h"
+ #include "xvmc_internal.h"
+
+ //#undef NDEBUG
+@@ -1226,12 +1225,7 @@ static enum PixelFormat mpeg_get_pixelformat(AVCodecContext *avctx){
+
+ if(avctx->xvmc_acceleration)
+ return avctx->get_format(avctx,pixfmt_xvmc_mpg2_420);
+- else if(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU){
+- if(avctx->codec_id == CODEC_ID_MPEG1VIDEO)
+- return PIX_FMT_VDPAU_MPEG1;
+- else
+- return PIX_FMT_VDPAU_MPEG2;
+- }else{
++ else{
+ if(s->chroma_format < 2)
+ return avctx->get_format(avctx,ff_hwaccel_pixfmt_list_420);
+ else if(s->chroma_format == 2)
+@@ -1324,9 +1318,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
+ avctx->pix_fmt = mpeg_get_pixelformat(avctx);
+ avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
+ //until then pix_fmt may be changed right after codec init
+- if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ||
+- avctx->hwaccel ||
+- s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU )
++ if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel )
+ if( avctx->idct_algo == FF_IDCT_AUTO )
+ avctx->idct_algo = FF_IDCT_SIMPLE;
+
+@@ -2065,8 +2057,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
+ avctx->pix_fmt = mpeg_get_pixelformat(avctx);
+ avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
+
+- if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel ||
+- s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU )
++ if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel )
+ if( avctx->idct_algo == FF_IDCT_AUTO )
+ avctx->idct_algo = FF_IDCT_SIMPLE;
+
+@@ -2297,9 +2288,6 @@ static int decode_chunks(AVCodecContext *avctx,
+ s2->error_count += s2->thread_context[i]->error_count;
+ }
+
+- if (CONFIG_MPEG_VDPAU_DECODER && avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+- ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count);
+-
+ if (slice_end(avctx, picture)) {
+ if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice
+ *data_size = sizeof(AVPicture);
+@@ -2446,11 +2434,6 @@ static int decode_chunks(AVCodecContext *avctx,
+ return -1;
+ }
+
+- if (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
+- s->slice_count++;
+- break;
+- }
+-
+ if(avctx->thread_count > 1){
+ int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
+ if(threshold <= mb_y){
+@@ -2578,36 +2561,3 @@ AVCodec mpeg_xvmc_decoder = {
+ };
+
+ #endif
+-
+-#if CONFIG_MPEG_VDPAU_DECODER
+-AVCodec ff_mpeg_vdpau_decoder = {
+- "mpegvideo_vdpau",
+- AVMEDIA_TYPE_VIDEO,
+- CODEC_ID_MPEG2VIDEO,
+- sizeof(Mpeg1Context),
+- mpeg_decode_init,
+- NULL,
+- mpeg_decode_end,
+- mpeg_decode_frame,
+- CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
+- .flush= flush,
+- .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"),
+-};
+-#endif
+-
+-#if CONFIG_MPEG1_VDPAU_DECODER
+-AVCodec ff_mpeg1_vdpau_decoder = {
+- "mpeg1video_vdpau",
+- AVMEDIA_TYPE_VIDEO,
+- CODEC_ID_MPEG1VIDEO,
+- sizeof(Mpeg1Context),
+- mpeg_decode_init,
+- NULL,
+- mpeg_decode_end,
+- mpeg_decode_frame,
+- CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
+- .flush= flush,
+- .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"),
+-};
+-#endif
+-
+diff --git a/ffmpeg/libavcodec/mpeg4videodec.c b/ffmpeg/libavcodec/mpeg4videodec.c
+index b339f78..1928d03 100644
+--- a/ffmpeg/libavcodec/mpeg4videodec.c
++++ b/ffmpeg/libavcodec/mpeg4videodec.c
+@@ -2249,20 +2249,3 @@ AVCodec mpeg4_decoder = {
+ .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
+ .pix_fmts= ff_hwaccel_pixfmt_list_420,
+ };
+-
+-
+-#if CONFIG_MPEG4_VDPAU_DECODER
+-AVCodec ff_mpeg4_vdpau_decoder = {
+- "mpeg4_vdpau",
+- AVMEDIA_TYPE_VIDEO,
+- CODEC_ID_MPEG4,
+- sizeof(MpegEncContext),
+- decode_init,
+- NULL,
+- ff_h263_decode_end,
+- ff_h263_decode_frame,
+- CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
+- .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"),
+- .pix_fmts= (const enum PixelFormat[]){PIX_FMT_VDPAU_MPEG4, PIX_FMT_NONE},
+-};
+-#endif
+diff --git a/ffmpeg/libavcodec/mpegvideo.c b/ffmpeg/libavcodec/mpegvideo.c
+index 9650066..5f8440b 100644
+--- a/ffmpeg/libavcodec/mpegvideo.c
++++ b/ffmpeg/libavcodec/mpegvideo.c
+@@ -117,6 +117,7 @@ const enum PixelFormat ff_pixfmt_list_420[] = {
+ const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
+ PIX_FMT_DXVA2_VLD,
+ PIX_FMT_VAAPI_VLD,
++ PIX_FMT_VDPAU,
+ PIX_FMT_YUV420P,
+ PIX_FMT_NONE
+ };
+@@ -1064,7 +1065,6 @@ void MPV_frame_end(MpegEncContext *s)
+ if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
+ ff_xvmc_field_end(s);
+ }else if(!s->avctx->hwaccel
+- && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+ && s->unrestricted_mv
+ && s->current_picture.reference
+ && !s->intra_only
+diff --git a/ffmpeg/libavcodec/vc1dec.c b/ffmpeg/libavcodec/vc1dec.c
+index c9a547b..71c50e7 100644
+--- a/ffmpeg/libavcodec/vc1dec.c
++++ b/ffmpeg/libavcodec/vc1dec.c
+@@ -37,7 +37,6 @@
+ #include "unary.h"
+ #include "simple_idct.h"
+ #include "mathops.h"
+-#include "vdpau_internal.h"
+
+ #undef NDEBUG
+ #include <assert.h>
+@@ -3167,13 +3166,6 @@ static int vc1_decode_frame(AVCodecContext *avctx,
+ s->current_picture_ptr= &s->picture[i];
+ }
+
+- if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU){
+- if (v->profile < PROFILE_ADVANCED)
+- avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
+- else
+- avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
+- }
+-
+ //for advanced profile we may need to parse and unescape data
+ if (avctx->codec_id == CODEC_ID_VC1) {
+ int buf_size2 = 0;
+@@ -3190,8 +3182,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
+ if(size <= 0) continue;
+ switch(AV_RB32(start)){
+ case VC1_CODE_FRAME:
+- if (avctx->hwaccel ||
+- s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
++ if (avctx->hwaccel)
+ buf_start = start;
+ buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
+ break;
+@@ -3283,10 +3274,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
+ s->me.qpel_put= s->dsp.put_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
+
+- if ((CONFIG_VC1_VDPAU_DECODER)
+- &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+- ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
+- else if (avctx->hwaccel) {
++ if (avctx->hwaccel) {
+ if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
+ return -1;
+ if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
+@@ -3377,39 +3365,3 @@ AVCodec wmv3_decoder = {
+ .profiles = NULL_IF_CONFIG_SMALL(profiles),
+ };
+ #endif
+-
+-#if CONFIG_WMV3_VDPAU_DECODER
+-AVCodec ff_wmv3_vdpau_decoder = {
+- "wmv3_vdpau",
+- AVMEDIA_TYPE_VIDEO,
+- CODEC_ID_WMV3,
+- sizeof(VC1Context),
+- vc1_decode_init,
+- NULL,
+- vc1_decode_end,
+- vc1_decode_frame,
+- CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
+- NULL,
+- .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
+- .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
+- .profiles = NULL_IF_CONFIG_SMALL(profiles)
+-};
+-#endif
+-
+-#if CONFIG_VC1_VDPAU_DECODER
+-AVCodec ff_vc1_vdpau_decoder = {
+- "vc1_vdpau",
+- AVMEDIA_TYPE_VIDEO,
+- CODEC_ID_VC1,
+- sizeof(VC1Context),
+- vc1_decode_init,
+- NULL,
+- vc1_decode_end,
+- vc1_decode_frame,
+- CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
+- NULL,
+- .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
+- .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
+- .profiles = NULL_IF_CONFIG_SMALL(profiles)
+-};
+-#endif
+diff --git a/ffmpeg/libavcodec/vdpau.c b/ffmpeg/libavcodec/vdpau.c
+index bd721e8..09ca45e 100644
+--- a/ffmpeg/libavcodec/vdpau.c
++++ b/ffmpeg/libavcodec/vdpau.c
+@@ -30,7 +30,6 @@
+ #include <assert.h>
+
+ #include "vdpau.h"
+-#include "vdpau_internal.h"
+
+ /**
+ * \addtogroup VDPAU_Decoding
+@@ -38,15 +37,57 @@
+ * @{
+ */
+
+-void ff_vdpau_h264_set_reference_frames(MpegEncContext *s)
++static void vdpau_h264_fill_field_order_cnt(int32_t field_order_cnt[2], Picture *pic, int pic_structure)
+ {
+- H264Context *h = s->avctx->priv_data;
++ int i;
++ for (i = 0; i < 2; i++) {
++ const int poc = pic->field_poc[i];
++ field_order_cnt[i] = poc != INT_MAX ? poc : 0;
++ }
++}
++
++static void vdpau_h264_init_picture(VdpReferenceFrameH264 *rf)
++{
++ rf->surface = VDP_INVALID_HANDLE;
++ rf->is_long_term = 0;
++ rf->top_is_reference = 0;
++ rf->bottom_is_reference = 0;
++ rf->field_order_cnt[0] = 0;
++ rf->field_order_cnt[1] = 0;
++ rf->frame_idx = 0;
++}
++
++static void vdpau_h264_fill_picture(VdpReferenceFrameH264 *rf, Picture *pic, int pic_structure)
++{
++ struct vdpau_render_state *render;
++
++ assert(rf);
++ assert(pic);
++
++ if (pic_structure == 0)
++ pic_structure = pic->reference;
++
++ render = (struct vdpau_render_state *)pic->data[3];
++ assert(render);
++
++ rf->surface = render->surface;
++ rf->is_long_term = pic->reference && pic->long_ref;
++ rf->top_is_reference = (pic_structure & PICT_TOP_FIELD) != 0;
++ rf->bottom_is_reference = (pic_structure & PICT_BOTTOM_FIELD) != 0;
++ rf->frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
++
++ vdpau_h264_fill_field_order_cnt(rf->field_order_cnt, pic, pic_structure);
++}
++
++static void vdpau_h264_set_reference_frames(H264Context *h)
++{
++ MpegEncContext * const s = &h->s;
+ struct vdpau_render_state *render, *render_ref;
+ VdpReferenceFrameH264 *rf, *rf2;
+ Picture *pic;
+ int i, list, pic_frame_idx;
+
+- render = (struct vdpau_render_state *)s->current_picture_ptr->data[0];
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
+ assert(render);
+
+ rf = &render->info.h264.referenceFrames[0];
+@@ -62,7 +103,7 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s)
+ continue;
+ pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
+
+- render_ref = (struct vdpau_render_state *)pic->data[0];
++ render_ref = (struct vdpau_render_state *)pic->data[3];
+ assert(render_ref);
+
+ rf2 = &render->info.h264.referenceFrames[0];
+@@ -84,81 +125,93 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s)
+ if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT])
+ continue;
+
+- rf->surface = render_ref->surface;
+- rf->is_long_term = pic->long_ref;
+- rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
+- rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
+- rf->field_order_cnt[0] = pic->field_poc[0];
+- rf->field_order_cnt[1] = pic->field_poc[1];
+- rf->frame_idx = pic_frame_idx;
++ vdpau_h264_fill_picture(rf, pic, pic->reference);
+
+ ++rf;
+ }
+ }
+
+- for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) {
+- rf->surface = VDP_INVALID_HANDLE;
+- rf->is_long_term = 0;
+- rf->top_is_reference = 0;
+- rf->bottom_is_reference = 0;
+- rf->field_order_cnt[0] = 0;
+- rf->field_order_cnt[1] = 0;
+- rf->frame_idx = 0;
+- }
++ for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf)
++ vdpau_h264_init_picture(rf);
+ }
+
+-void ff_vdpau_add_data_chunk(MpegEncContext *s,
+- const uint8_t *buf, int buf_size)
++static int vdpau_ensure_bitstream_buffers(struct vdpau_render_state *render)
+ {
+- struct vdpau_render_state *render;
+-
+- render = (struct vdpau_render_state *)s->current_picture_ptr->data[0];
+- assert(render);
+-
+ render->bitstream_buffers= av_fast_realloc(
+ render->bitstream_buffers,
+ &render->bitstream_buffers_allocated,
+ sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1)
+ );
+
+- render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+- render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf;
+- render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
+- render->bitstream_buffers_used++;
++ if (!render->bitstream_buffers)
++ return -1;
++
++ return 0;
+ }
+
+-void ff_vdpau_h264_picture_start(MpegEncContext *s)
++static int vdpau_common_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
+ {
+- H264Context *h = s->avctx->priv_data;
++ MpegEncContext * const s = avctx->priv_data;
+ struct vdpau_render_state *render;
+- int i;
+
+- render = (struct vdpau_render_state *)s->current_picture_ptr->data[0];
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
+ assert(render);
+
+- for (i = 0; i < 2; ++i) {
+- int foc = s->current_picture_ptr->field_poc[i];
+- if (foc == INT_MAX)
+- foc = 0;
+- render->info.h264.field_order_cnt[i] = foc;
++ render->bitstream_buffers_used = 0;
++ return 0;
++}
++
++static int vdpau_common_end_frame(AVCodecContext *avctx)
++{
++ MpegEncContext * const s = avctx->priv_data;
++ struct vdpau_render_state *render;
++
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
++ assert(render);
++
++ if (render->bitstream_buffers_used) {
++ ff_draw_horiz_band(s, 0, s->avctx->height);
++ render->bitstream_buffers_used = 0;
+ }
++ return 0;
++}
+
+- render->info.h264.frame_num = h->frame_num;
++static int vdpau_common_decode_slice(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
++{
++ MpegEncContext * const s = avctx->priv_data;
++ struct vdpau_render_state *render;
++
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
++ assert(render);
++
++ if (vdpau_ensure_bitstream_buffers(render) < 0)
++ return -1;
++
++ render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
++ render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf;
++ render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
++ render->bitstream_buffers_used++;
++ return 0;
+ }
+
+-void ff_vdpau_h264_picture_complete(MpegEncContext *s)
++static int vdpau_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
+ {
+- H264Context *h = s->avctx->priv_data;
++ H264Context * const h = avctx->priv_data;
++ MpegEncContext * const s = &h->s;
+ struct vdpau_render_state *render;
+
+- render = (struct vdpau_render_state *)s->current_picture_ptr->data[0];
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
+ assert(render);
+
+- render->info.h264.slice_count = h->slice_num;
+- if (render->info.h264.slice_count < 1)
+- return;
++ vdpau_h264_set_reference_frames(h);
+
+- render->info.h264.is_reference = (s->current_picture_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
++ vdpau_h264_fill_field_order_cnt(render->info.h264.field_order_cnt,
++ s->current_picture_ptr,
++ s->picture_structure);
++
++ /* fill VdpPictureInfoH264 struct */
++ render->info.h264.is_reference = h->nal_ref_idc != 0;
++ render->info.h264.frame_num = h->frame_num;
+ render->info.h264.field_pic_flag = s->picture_structure != PICT_FRAME;
+ render->info.h264.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD;
+ render->info.h264.num_ref_frames = h->sps.ref_frame_count;
+@@ -185,19 +238,44 @@ void ff_vdpau_h264_picture_complete(MpegEncContext *s)
+ memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
+ memcpy(render->info.h264.scaling_lists_8x8, h->pps.scaling_matrix8, sizeof(render->info.h264.scaling_lists_8x8));
+
+- ff_draw_horiz_band(s, 0, s->avctx->height);
+- render->bitstream_buffers_used = 0;
++ render->info.h264.slice_count = 0;
++
++ return vdpau_common_start_frame(avctx, buffer, size);
+ }
+
+-void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
+- int buf_size, int slice_count)
++static int vdpau_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
+ {
++ MpegEncContext * const s = avctx->priv_data;
++ struct vdpau_render_state *render;
++ static const uint8_t start_code_prefix_one_3byte[3] = { 0x00, 0x00, 0x01 };
++
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
++ assert(render);
++
++ if (vdpau_ensure_bitstream_buffers(render) < 0)
++ return -1;
++
++ render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
++ render->bitstream_buffers[render->bitstream_buffers_used].bitstream = start_code_prefix_one_3byte;
++ render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = sizeof(start_code_prefix_one_3byte);
++ render->bitstream_buffers_used++;
++
++ if (vdpau_common_decode_slice(avctx, buffer, size) < 0)
++ return -1;
++
++ ++render->info.h264.slice_count;
++ return 0;
++}
++
++static int vdpau_mpeg2_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
++{
++ MpegEncContext * const s = avctx->priv_data;
+ struct vdpau_render_state *render, *last, *next;
+ int i;
+
+- if (!s->current_picture_ptr) return;
++ if (!s->current_picture_ptr) return 0;
+
+- render = (struct vdpau_render_state *)s->current_picture_ptr->data[0];
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
+ assert(render);
+
+ /* fill VdpPictureInfoMPEG1Or2 struct */
+@@ -226,36 +304,47 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
+
+ switch(s->pict_type){
+ case FF_B_TYPE:
+- next = (struct vdpau_render_state *)s->next_picture.data[0];
++ next = (struct vdpau_render_state *)s->next_picture.data[3];
+ assert(next);
+ render->info.mpeg.backward_reference = next->surface;
+ // no return here, going to set forward prediction
+ case FF_P_TYPE:
+- last = (struct vdpau_render_state *)s->last_picture.data[0];
++ last = (struct vdpau_render_state *)s->last_picture.data[3];
+ if (!last) // FIXME: Does this test make sense?
+ last = render; // predict second field from the first
+ render->info.mpeg.forward_reference = last->surface;
+ }
+
+- ff_vdpau_add_data_chunk(s, buf, buf_size);
++ render->info.mpeg.slice_count = 0;
+
+- render->info.mpeg.slice_count = slice_count;
++ return vdpau_common_start_frame(avctx, buffer, size);
++}
+
+- if (slice_count)
+- ff_draw_horiz_band(s, 0, s->avctx->height);
+- render->bitstream_buffers_used = 0;
++static int vdpau_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
++{
++ MpegEncContext * const s = avctx->priv_data;
++ struct vdpau_render_state *render;
++
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
++ assert(render);
++
++ if (vdpau_common_decode_slice(avctx, buffer, size) < 0)
++ return -1;
++
++ ++render->info.mpeg.slice_count;
++ return 0;
+ }
+
+-void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
+- int buf_size)
++static int vdpau_vc1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
+ {
+- VC1Context *v = s->avctx->priv_data;
++ VC1Context * const v = avctx->priv_data;
++ MpegEncContext * const s = &v->s;
+ struct vdpau_render_state *render, *last, *next;
+
+- render = (struct vdpau_render_state *)s->current_picture.data[0];
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
+ assert(render);
+
+- /* fill LvPictureInfoVC1 struct */
++ /* fill VdpPictureInfoVC1 struct */
+ render->info.vc1.frame_coding_mode = v->fcm;
+ render->info.vc1.postprocflag = v->postprocflag;
+ render->info.vc1.pulldown = v->broadcast;
+@@ -296,34 +385,47 @@ void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
+
+ switch(s->pict_type){
+ case FF_B_TYPE:
+- next = (struct vdpau_render_state *)s->next_picture.data[0];
++ next = (struct vdpau_render_state *)s->next_picture.data[3];
+ assert(next);
+ render->info.vc1.backward_reference = next->surface;
+ // no break here, going to set forward prediction
+ case FF_P_TYPE:
+- last = (struct vdpau_render_state *)s->last_picture.data[0];
++ last = (struct vdpau_render_state *)s->last_picture.data[3];
+ if (!last) // FIXME: Does this test make sense?
+ last = render; // predict second field from the first
+ render->info.vc1.forward_reference = last->surface;
+ }
+
+- ff_vdpau_add_data_chunk(s, buf, buf_size);
++ render->info.vc1.slice_count = 0;
+
+- render->info.vc1.slice_count = 1;
++ return vdpau_common_start_frame(avctx, buffer, size);
++}
+
+- ff_draw_horiz_band(s, 0, s->avctx->height);
+- render->bitstream_buffers_used = 0;
++static int vdpau_vc1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
++{
++ VC1Context * const v = avctx->priv_data;
++ MpegEncContext * const s = &v->s;
++ struct vdpau_render_state *render;
++
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
++ assert(render);
++
++ if (vdpau_common_decode_slice(avctx, buffer, size) < 0)
++ return -1;
++
++ ++render->info.vc1.slice_count;
++ return 0;
+ }
+
+-void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf,
+- int buf_size)
++static int vdpau_mpeg4_decode_picture(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
+ {
++ MpegEncContext * const s = avctx->priv_data;
+ struct vdpau_render_state *render, *last, *next;
+ int i;
+
+- if (!s->current_picture_ptr) return;
++ if (!s->current_picture_ptr) return 0;
+
+- render = (struct vdpau_render_state *)s->current_picture_ptr->data[0];
++ render = (struct vdpau_render_state *)s->current_picture_ptr->data[3];
+ assert(render);
+
+ /* fill VdpPictureInfoMPEG4Part2 struct */
+@@ -352,21 +454,122 @@ void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf,
+
+ switch (s->pict_type) {
+ case FF_B_TYPE:
+- next = (struct vdpau_render_state *)s->next_picture.data[0];
++ next = (struct vdpau_render_state *)s->next_picture.data[3];
+ assert(next);
+ render->info.mpeg4.backward_reference = next->surface;
+ render->info.mpeg4.vop_coding_type = 2;
+ // no break here, going to set forward prediction
+ case FF_P_TYPE:
+- last = (struct vdpau_render_state *)s->last_picture.data[0];
++ last = (struct vdpau_render_state *)s->last_picture.data[3];
+ assert(last);
+ render->info.mpeg4.forward_reference = last->surface;
+ }
+
+- ff_vdpau_add_data_chunk(s, buf, buf_size);
++ if (vdpau_common_start_frame(avctx, buffer, size) < 0)
++ return -1;
+
+- ff_draw_horiz_band(s, 0, s->avctx->height);
+- render->bitstream_buffers_used = 0;
++ if (vdpau_common_decode_slice(avctx, buffer, size) < 0)
++ return -1;
++
++ return vdpau_common_end_frame(avctx);
++}
++
++static int vdpau_mpeg4_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
++{
++ /* The `buffer' can be modified beyond this point so we pass the
++ * whole picture now to the decoder and let user-applications catch
++ * up immediately */
++ return vdpau_mpeg4_decode_picture(avctx, buffer, size);
+ }
+
++static int vdpau_mpeg4_decode_slice(av_unused AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
++{
++ /* SKIP: everything is done in vdpau_mpeg4_start_frame() */
++ return 0;
++}
++
++static int vdpau_mpeg4_end_frame(av_unused AVCodecContext *avctx)
++{
++ /* SKIP: everything is done in vdpau_mpeg4_start_frame() */
++ return 0;
++}
++
++#if CONFIG_MPEG1_VDPAU_HWACCEL
++AVHWAccel mpeg1_vdpau_hwaccel = {
++ .name = "mpeg1_vdpau",
++ .type = CODEC_TYPE_VIDEO,
++ .id = CODEC_ID_MPEG1VIDEO,
++ .pix_fmt = PIX_FMT_VDPAU,
++ .capabilities = 0,
++ .start_frame = vdpau_mpeg2_start_frame,
++ .end_frame = vdpau_common_end_frame,
++ .decode_slice = vdpau_mpeg2_decode_slice,
++};
++#endif
++
++#if CONFIG_MPEG2_VDPAU_HWACCEL
++AVHWAccel mpeg2_vdpau_hwaccel = {
++ .name = "mpeg2_vdpau",
++ .type = CODEC_TYPE_VIDEO,
++ .id = CODEC_ID_MPEG2VIDEO,
++ .pix_fmt = PIX_FMT_VDPAU,
++ .capabilities = 0,
++ .start_frame = vdpau_mpeg2_start_frame,
++ .end_frame = vdpau_common_end_frame,
++ .decode_slice = vdpau_mpeg2_decode_slice,
++};
++#endif
++
++#if CONFIG_H264_VDPAU_HWACCEL
++AVHWAccel h264_vdpau_hwaccel = {
++ .name = "h264_vdpau",
++ .type = CODEC_TYPE_VIDEO,
++ .id = CODEC_ID_H264,
++ .pix_fmt = PIX_FMT_VDPAU,
++ .capabilities = 0,
++ .start_frame = vdpau_h264_start_frame,
++ .end_frame = vdpau_common_end_frame,
++ .decode_slice = vdpau_h264_decode_slice,
++};
++#endif
++
++#if CONFIG_WMV3_VDPAU_HWACCEL
++AVHWAccel wmv3_vdpau_hwaccel = {
++ .name = "wmv3_vdpau",
++ .type = CODEC_TYPE_VIDEO,
++ .id = CODEC_ID_WMV3,
++ .pix_fmt = PIX_FMT_VDPAU,
++ .capabilities = 0,
++ .start_frame = vdpau_vc1_start_frame,
++ .end_frame = vdpau_common_end_frame,
++ .decode_slice = vdpau_vc1_decode_slice,
++};
++#endif
++
++#if CONFIG_VC1_VDPAU_HWACCEL
++AVHWAccel vc1_vdpau_hwaccel = {
++ .name = "vc1_vdpau",
++ .type = CODEC_TYPE_VIDEO,
++ .id = CODEC_ID_VC1,
++ .pix_fmt = PIX_FMT_VDPAU,
++ .capabilities = 0,
++ .start_frame = vdpau_vc1_start_frame,
++ .end_frame = vdpau_common_end_frame,
++ .decode_slice = vdpau_vc1_decode_slice,
++};
++#endif
++
++#if CONFIG_MPEG4_VDPAU_HWACCEL
++AVHWAccel mpeg4_vdpau_hwaccel = {
++ .name = "mpeg4_vdpau",
++ .type = CODEC_TYPE_VIDEO,
++ .id = CODEC_ID_MPEG4,
++ .pix_fmt = PIX_FMT_VDPAU,
++ .capabilities = 0,
++ .start_frame = vdpau_mpeg4_start_frame,
++ .end_frame = vdpau_mpeg4_end_frame,
++ .decode_slice = vdpau_mpeg4_decode_slice,
++};
++#endif
++
+ /* @}*/
+diff --git a/ffmpeg/libavutil/pixdesc.c b/ffmpeg/libavutil/pixdesc.c
+index 54f1d74..f01d16c 100644
+--- a/ffmpeg/libavutil/pixdesc.c
++++ b/ffmpeg/libavutil/pixdesc.c
+@@ -800,6 +800,12 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[PIX_FMT_NB] = {
+ {0,1,2,0,7}, /* A */
+ },
+ },
++ [PIX_FMT_VDPAU] = {
++ .name = "vdpau",
++ .log2_chroma_w = 1,
++ .log2_chroma_h = 1,
++ .flags = PIX_FMT_HWACCEL,
++ },
+ };
+
+ static enum PixelFormat get_pix_fmt_internal(const char *name)
+diff --git a/ffmpeg/libavutil/pixfmt.h b/ffmpeg/libavutil/pixfmt.h
+index 8ec91c8..2386a81 100644
+--- a/ffmpeg/libavutil/pixfmt.h
++++ b/ffmpeg/libavutil/pixfmt.h
+@@ -133,6 +133,7 @@ enum PixelFormat {
+ PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ PIX_FMT_Y400A, ///< 8bit gray, 8bit alpha
++ PIX_FMT_VDPAU, ///< HW decoding with VDPAU, Picture.data[3] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+ };
+
+diff --git a/fmt-conversion.c b/fmt-conversion.c
+index bd32ce8..8994caa 100644
+--- a/fmt-conversion.c
++++ b/fmt-conversion.c
+@@ -91,12 +91,6 @@ static const struct {
+
+ {IMGFMT_XVMC_MOCO_MPEG2, PIX_FMT_XVMC_MPEG2_MC},
+ {IMGFMT_XVMC_IDCT_MPEG2, PIX_FMT_XVMC_MPEG2_IDCT},
+- {IMGFMT_VDPAU_MPEG1, PIX_FMT_VDPAU_MPEG1},
+- {IMGFMT_VDPAU_MPEG2, PIX_FMT_VDPAU_MPEG2},
+- {IMGFMT_VDPAU_H264, PIX_FMT_VDPAU_H264},
+- {IMGFMT_VDPAU_WMV3, PIX_FMT_VDPAU_WMV3},
+- {IMGFMT_VDPAU_VC1, PIX_FMT_VDPAU_VC1},
+- {IMGFMT_VDPAU_MPEG4, PIX_FMT_VDPAU_MPEG4},
+
+ /* VA-API formats */
+ {IMGFMT_VAAPI_MPEG2, PIX_FMT_VAAPI_VLD, CODEC_ID_MPEG2VIDEO},
+@@ -108,6 +102,14 @@ static const struct {
+ {IMGFMT_VAAPI_WMV3, PIX_FMT_VAAPI_VLD, CODEC_ID_WMV3},
+ {IMGFMT_VAAPI_VC1, PIX_FMT_VAAPI_VLD, CODEC_ID_VC1},
+
++ /* VDPAU formats */
++ {IMGFMT_VDPAU_MPEG1, PIX_FMT_VDPAU, CODEC_ID_MPEG1VIDEO},
++ {IMGFMT_VDPAU_MPEG2, PIX_FMT_VDPAU, CODEC_ID_MPEG2VIDEO},
++ {IMGFMT_VDPAU_MPEG4, PIX_FMT_VDPAU, CODEC_ID_MPEG4},
++ {IMGFMT_VDPAU_H264, PIX_FMT_VDPAU, CODEC_ID_H264},
++ {IMGFMT_VDPAU_WMV3, PIX_FMT_VDPAU, CODEC_ID_WMV3},
++ {IMGFMT_VDPAU_VC1, PIX_FMT_VDPAU, CODEC_ID_VC1},
++
+ {0, PIX_FMT_NONE}
+ };
+
+diff --git a/libmpcodecs/vd_ffmpeg.c b/libmpcodecs/vd_ffmpeg.c
+index 84b5430..aef79e9 100644
+--- a/libmpcodecs/vd_ffmpeg.c
++++ b/libmpcodecs/vd_ffmpeg.c
+@@ -303,7 +303,7 @@ static int init(sh_video_t *sh){
+ }
+ #endif /* CONFIG_VAAPI */
+ #if CONFIG_VDPAU
+- if(lavc_codec->capabilities & CODEC_CAP_HWACCEL_VDPAU){
++ if(get_video_hwaccel() == HWACCEL_VDPAU){
+ avctx->get_format = get_format;
+ }
+ #endif /* CONFIG_VDPAU */
+diff --git a/libvo/vo_vdpau.c b/libvo/vo_vdpau.c
+index 5133b66..0ce6104 100644
+--- a/libvo/vo_vdpau.c
++++ b/libvo/vo_vdpau.c
+@@ -979,7 +979,7 @@ static int draw_slice(uint8_t *image[], int stride[], int w, int h,
+ int x, int y)
+ {
+ VdpStatus vdp_st;
+- struct vdpau_render_state *rndr = (struct vdpau_render_state *)image[0];
++ struct vdpau_render_state *rndr = (struct vdpau_render_state *)image[3];
+ int max_refs = image_format == IMGFMT_VDPAU_H264 ? rndr->info.h264.num_ref_frames : 2;
+
+ if (handle_preemption() < 0)
+@@ -1082,10 +1082,10 @@ static uint32_t get_image(mp_image_t *mpi)
+ return VO_FALSE;
+ }
+ mpi->flags |= MP_IMGFLAG_DIRECT;
+- mpi->stride[0] = mpi->stride[1] = mpi->stride[2] = 0;
+- mpi->planes[0] = mpi->planes[1] = mpi->planes[2] = NULL;
++ mpi->stride[0] = mpi->stride[1] = mpi->stride[2] = mpi->stride[3] = 0;
++ mpi->planes[0] = mpi->planes[1] = mpi->planes[2] = mpi->planes[3] = NULL;
+ // hack to get around a check and to avoid a special-case in vd_ffmpeg.c
+- mpi->planes[0] = (void *)rndr;
++ mpi->planes[0] = mpi->planes[3] = (void *)rndr;
+ mpi->num_planes = 1;
+ mpi->priv = rndr;
+ return VO_TRUE;