Commits

Anonymous committed 98dca64

Cleared out old, non-working code.

Comments (0)

Files changed (5)

src/_ff_all.h

-#ifndef _FF_ALL_H_
-#define _FF_ALL_H_
-typedef struct PacketQueue {
-    AVPacketList *first_pkt, *last_pkt;
-    int nb_packets;
-    int size;
-    int abort_request;
-    SDL_mutex *mutex;
-    SDL_cond *cond;
-} PacketQueue;
-
-
-
-#if defined(__ICC) || defined(__SUNPRO_C)
-    #define DECLARE_ALIGNED(n,t,v)      t v __attribute__ ((aligned (n)))
-    #define DECLARE_ASM_CONST(n,t,v)    const t __attribute__ ((aligned (n))) v
-#elif defined(__GNUC__)
-    #define DECLARE_ALIGNED(n,t,v)      t v __attribute__ ((aligned (n)))
-    #define DECLARE_ASM_CONST(n,t,v)    static const t v attribute_used __attribute__ ((aligned (n)))
-#elif defined(_MSC_VER)
-    #define DECLARE_ALIGNED(n,t,v)      __declspec(align(n)) t v
-    #define DECLARE_ASM_CONST(n,t,v)    __declspec(align(n)) static const t v
-#elif HAVE_INLINE_ASM
-    #error The asm code needs alignment, but we do not know how to do it for this compiler.
-#else
-    #define DECLARE_ALIGNED(n,t,v)      t v
-    #define DECLARE_ASM_CONST(n,t,v)    static const t v
-#endif
-
-
-#endif /*_FF_ALL_H_*/

src/_ffmovie_aud.h

-#ifndef _FFMOVIE_AUD_H_
-#define _FFMOVIE_AUD_H_
-
-#include <libavformat/avformat.h>
-#include <Python.h>
-#include "pygame.h"
-#include "_ff_all.h"
-
-
-typedef struct PyAudioStream
-{
-    PyObject_HEAD
-
-    //state control variables for pausing/seeking.
-    int paused;
-    int last_paused;
-    int seek_req;
-    int seek_flags;
-    int64_t seek_pos;
-    
-    //time-keeping values
-    double audio_clock;
-    double audio_diff_cum; /* used for AV difference average computation */
-    double audio_diff_avg_coef;
-    double audio_diff_threshold;
-    int audio_diff_avg_count;
-    
-    
-    AVStream *audio_st;     //audio stream
-    PacketQueue audioq;     //packet queue for audio packets
-    int audio_hw_buf_size;  //the size of the audio hardware buffer
-    /* samples output by the codec. we reserve more space for avsync
-       compensation */
-    DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
-    DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
-    uint8_t *audio_buf;
-
-    unsigned int audio_buf_size; /* in bytes */
-    int audio_buf_index; /* in bytes */
-    AVPacket audio_pkt;
-    uint8_t *audio_pkt_data;
-    int audio_pkt_size;
-    enum SampleFormat audio_src_fmt;
-    AVAudioConvert *reformat_ctx;
-
-    /*time-keeping values
-     *By default, all operating streams work on the same external clock
-     *It is only when streams are controlled individually that an individual clock is needed.
-     */
-    int av_sync_type;      /* Normally external. */
-    double external_clock; /* external clock base */
-    int64_t external_clock_time;
-    int64_t offset;        /*Offset for when the individual clock is used. This way if all the streams are playing, we can keep them synced up, but shifted. */
-
-    /* Frame-tracker values 
-     * Needed for syncing, time delay
-     */
-    double frame_timer;
-    double frame_last_pts;
-    double frame_last_delay;
-    double frame_offset;
-    int pts;
-    
-    int playing;
-    int loops;
-
-} PyAudioStream;
-
-
-// stream python stuff 
-
-static PyObject* _aud_stream_init_internal(PyObject *self); //expects file to have been opened in _aud_stream_new
-static PyObject* _aud_stream_init (PyObject *self, PyObject *args, PyObject *kwds);
-//static void _aud_stream_dealloc (PyAudioStream *audio);
-static void _dealloc_aud_stream(PyAudioStream *pas);
-static PyObject* _aud_stream_repr (PyAudioStream *audio);
-//static PyObject* _aud_stream_str (PyAudioStream *audio);
-static PyObject* _aud_stream_play(PyAudioStream *audio, PyObject* args);
-static PyObject* _aud_stream_stop(PyAudioStream *audio);
-static PyObject* _aud_stream_pause(PyAudioStream *audio);
-static PyObject* _aud_stream_rewind(PyAudioStream *audio, PyObject* args);
-
-/* Getters/setters */
-static PyObject* _aud_stream_get_paused (PyAudioStream *pvs, void *closure);
-static PyObject* _aud_stream_get_playing (PyAudioStream *pvs, void *closure);
-
-static PyMethodDef _audio_methods[] = {
-   { "play",    (PyCFunction) _aud_stream_play, METH_VARARGS,
-               "Play the movie file from current time-mark. If loop<0, then it will loop infinitely. If there is no loop value, then it will play once." },
-   { "stop", (PyCFunction) _aud_stream_stop, METH_NOARGS,
-                "Stop the movie, and set time-mark to 0:0"},
-   { "pause", (PyCFunction) _aud_stream_pause, METH_NOARGS,
-                "Pause movie."},
-   { "rewind", (PyCFunction) _aud_stream_rewind, METH_VARARGS,
-                "Rewind movie to time_pos. If there is no time_pos, same as stop."},
-   { NULL, NULL, 0, NULL }
-};
-
-static PyGetSetDef _audio_getsets[] =
-{
-    { "paused", (getter) _aud_stream_get_paused, NULL, NULL, NULL },
-    { "playing", (getter) _aud_stream_get_playing, NULL, NULL, NULL },
-    { NULL, NULL, NULL, NULL, NULL }
-};
-
-static PyTypeObject PyAudioStream_Type =
-{
-    PyObject_HEAD_INIT(NULL)
-    0, 
-    "pygame.gmovie.AudioStream",          /* tp_name */
-    sizeof (PyAudioStream),           /* tp_basicsize */
-    0,                          /* tp_itemsize */
-    (destructor) _dealloc_aud_stream,/* tp_dealloc */
-    0,                          /* tp_print */
-    0,                          /* tp_getattr */
-    0,                          /* tp_setattr */
-    0,                          /* tp_compare */
-    (reprfunc) _aud_stream_repr,     /* tp_repr */
-    0,                          /* tp_as_number */
-    0,                          /* tp_as_sequence */
-    0,                          /* tp_as_mapping */
-    0,                          /* tp_hash */
-    0,                          /* tp_call */
-    0,                          /* tp_str */
-    0,                          /* tp_getattro */
-    0,                          /* tp_setattro */
-    0,                          /* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
-    0,                          /* tp_doc */
-    0,                          /* tp_traverse */
-    0,                          /* tp_clear */
-    0,                          /* tp_richcompare */
-    0,                          /* tp_weaklistoffset */
-    0,                          /* tp_iter */
-    0,                          /* tp_iternext */
-    _audio_methods,             /* tp_methods */
-    0,                          /* tp_members */
-    _audio_getsets,             /* tp_getset */
-    0,                          /* tp_base */
-    0,                          /* tp_dict */
-    0,                          /* tp_descr_get */
-    0,                          /* tp_descr_set */
-    0,                          /* tp_dictoffset */
-    (initproc) _aud_stream_init,                          /* tp_init */
-    0,                          /* tp_alloc */
-    0,                 /* tp_new */
-    0,                          /* tp_free */
-    0,                          /* tp_is_gc */
-    0,                          /* tp_bases */
-    0,                          /* tp_mro */
-    0,                          /* tp_cache */
-    0,                          /* tp_subclasses */
-    0,                          /* tp_weaklist */
-    0                           /* tp_del */
-};
-
-
-static PyObject* _aud_stream_init_internal(PyObject *self)
-{
-	    /*Expects filename. If surface is null, then it sets overlay to >0. */
-    PySys_WriteStdout("Within _aud_stream_init_internal\n");    
-    
-    Py_INCREF(self);
-	//do stuff
-    
-    
-    //Py_DECREF((PyObject *) movie);
-    PySys_WriteStdout("_aud_stream_init_internal: Returning from _aud_stream_init_internal\n");
-    Py_DECREF(self);
-    return self;
-}
-
-static PyObject* _aud_stream_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-	Py_INCREF(self);
-    PySys_WriteStdout("Within _aud_stream_init\n");
-
-    PySys_WriteStdout("_aud_stream_init: Before _aud_stream_init_internal\n");
-    self = _aud_stream_init_internal(self);
-    PySys_WriteStdout("_aud_stream_init: After _aud_stream_init_internal\n");
-    PyObject *er;
-    er = PyErr_Occurred();
-    if(er)
-    {
-        PyErr_Print();
-    }
-    if(!self)
-    {
-        PyErr_SetString(PyExc_IOError, "No audio stream created.");
-        PyErr_Print();
-        Py_XDECREF(self);
-        Py_RETURN_NONE;
-    }
-    PySys_WriteStdout("Returning from _aud_stream_init\n");
-    return self;
-}
-
-static PyObject* _aud_stream_repr (PyAudioStream *audio)
-{
-    /*Eventually add a time-code call */
-    char buf[100];
-    //PySys_WriteStdout("_aud_stream_repr: %10s\n", audio->filename); 
-    PyOS_snprintf(buf, sizeof(buf), "(Video Stream: %p)", &audio);
-    return PyString_FromString(buf);
-}
-
-
-static PyObject* _aud_stream_play(PyAudioStream *audio, PyObject* args)
-{
-    PySys_WriteStdout("In _aud_stream_play\n");
-    int loops;
-    if(!PyArg_ParseTuple(args, "i", &loops))
-    {
-        PyErr_SetString(PyExc_TypeError, "Not a valid argument.");
-        Py_RETURN_NONE;
-    }
-    PySys_WriteStdout("_aud_stream_play: loops set to: %i\n", loops);
-    audio->loops =loops;
-    audio->paused = 0;
-    audio->playing = 1;
-    Py_RETURN_NONE;
-}
-
-static PyObject* _aud_stream_stop(PyAudioStream *audio)
-{
-    //stream_pause(audio);
-    audio->seek_req = 1;
-    audio->seek_pos = 0;
-    audio->seek_flags =AVSEEK_FLAG_BACKWARD;
-    Py_RETURN_NONE;
-}  
-
-static PyObject* _aud_stream_pause(PyAudioStream *audio)
-{
-    //stream_pause(audio); 
-    Py_RETURN_NONE;
-}
-
-static PyObject* _aud_stream_rewind(PyAudioStream *audio, PyObject* args)
-{
-    /* For now, just alias rewind to stop */
-    return _aud_stream_stop(audio);
-}
-
-static PyObject* _aud_stream_get_paused (PyAudioStream *audio, void *closure)
-{
-    return PyInt_FromLong((long)audio->paused);
-}
-static PyObject* _aud_stream_get_playing (PyAudioStream *audio, void *closure)
-{
-    PyObject *pyo;
-    pyo= PyInt_FromLong((long)audio->playing);
-    return pyo;
-}
-
-
-
-#endif /*_FFMOVIE_AUD_H_*/

src/_ffmovie_vid.h

-#ifndef _FFMOVIE_VID_H_
-#define _FFMOVIE_VID_H_
-
-#include <libavformat/avformat.h>
-#include <Python.h>
-#include "pygame.h"
-#include "_ff_all.h"
-
-//Struct to display the video image. 
-typedef struct {
-    double pts;         //presentation time stamp for this picture, used for syncing
-    int width, height;
-    int allocated;      //if structure has been allocated. null if not.
-} VideoPicture;
-
-#define VIDEO_PICTURE_QUEUE_SIZE 1
-
-typedef struct PyVideoStream
-{
-    PyObject_HEAD
-
-    SDL_Surface *out_surf; /*surface to output video to. If surface is the display surface, 
-                         * then we can use overlay code. Otherwise, we use the python interface.
-                         */
-    SDL_Overlay *bmp;
-    SDL_Thread *video_tid;  //thread id for the video thread
-    int rgb;                //if true, must convert image data to rgb before writing to it. 
-    //int no_background;    //Not needed or relevant when we're working with pygame. ;)
-    
-    //state values for pausing and seeking
-    int paused;          
-    int playing;
-    int last_paused;
-    int seek_req;
-    int seek_flags;
-    int64_t seek_pos;
-
-    /*time-keeping values
-     *By default, all operating streams work on the same external clock
-     *It is only when streams are controlled individually that an individual clock is needed.
-     */
-    int av_sync_type;      /* Normally external. */
-    double external_clock; /* external clock base */
-    int64_t external_clock_time;
-    int64_t offset;        /*Offset for when the individual clock is used. This way if all the streams are playing, we can keep them synced up, but shifted. */
-
-    /* Frame-tracker values 
-     * Needed for syncing, time delay
-     */
-    double frame_timer;
-    double frame_last_pts;
-    double frame_last_delay;
-    double frame_offset;
-    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
-    
-    //Video stream struct.
-    AVStream *video_st;
-    //video queue, with video packets
-    PacketQueue videoq;
-    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
-    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
-    
-    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE]; //queue of VideoPicture objects, ring-buffer structure. Normally size of 1.
-    int pictq_size, pictq_rindex, pictq_windex;
-    SDL_mutex *pictq_mutex;
-    SDL_cond *pictq_cond;
-
-    //    QETimer *video_timer;
-    int width, height, xleft, ytop;
-
-    int pts;
-    
-    int overlay;
-    
-    int loops;
-
-} PyVideoStream;
-
-
-// stream python stuff 
-
-static PyObject* _vid_stream_init_internal(PyObject *self,  PyObject* surface); //expects file to have been opened in _vid_stream_new
-static PyObject* _vid_stream_init (PyObject *self, PyObject *args, PyObject *kwds);
-//static void _vid_stream_dealloc (PyVideoStream *video);
-static void _dealloc_vid_stream(PyVideoStream *pvs);
-static PyObject* _vid_stream_repr (PyVideoStream *video);
-//static PyObject* _vid_stream_str (PyVideoStream *video);
-static PyObject* _vid_stream_play(PyVideoStream *video, PyObject* args);
-static PyObject* _vid_stream_stop(PyVideoStream *video);
-static PyObject* _vid_stream_pause(PyVideoStream *video);
-static PyObject* _vid_stream_rewind(PyVideoStream *video, PyObject* args);
-
-/* Getters/setters */
-static PyObject* _vid_stream_get_paused (PyVideoStream *pvs, void *closure);
-static PyObject* _vid_stream_get_playing (PyVideoStream *pvs, void *closure);
-
-static PyMethodDef _video_methods[] = {
-   { "play",    (PyCFunction) _vid_stream_play, METH_VARARGS,
-               "Play the movie file from current time-mark. If loop<0, then it will loop infinitely. If there is no loop value, then it will play once." },
-   { "stop", (PyCFunction) _vid_stream_stop, METH_NOARGS,
-                "Stop the movie, and set time-mark to 0:0"},
-   { "pause", (PyCFunction) _vid_stream_pause, METH_NOARGS,
-                "Pause movie."},
-   { "rewind", (PyCFunction) _vid_stream_rewind, METH_VARARGS,
-                "Rewind movie to time_pos. If there is no time_pos, same as stop."},
-   { NULL, NULL, 0, NULL }
-};
-
-static PyGetSetDef _video_getsets[] =
-{
-    { "paused", (getter) _vid_stream_get_paused, NULL, NULL, NULL },
-    { "playing", (getter) _vid_stream_get_playing, NULL, NULL, NULL },
-    { NULL, NULL, NULL, NULL, NULL }
-};
-
-static PyTypeObject PyVideoStream_Type =
-{
-    PyObject_HEAD_INIT(NULL)
-    0, 
-    "pygame.gmovie.VideoStream",          /* tp_name */
-    sizeof (PyVideoStream),           /* tp_basicsize */
-    0,                          /* tp_itemsize */
-    (destructor) _dealloc_vid_stream,/* tp_dealloc */
-    0,                          /* tp_print */
-    0,                          /* tp_getattr */
-    0,                          /* tp_setattr */
-    0,                          /* tp_compare */
-    (reprfunc) _vid_stream_repr,     /* tp_repr */
-    0,                          /* tp_as_number */
-    0,                          /* tp_as_sequence */
-    0,                          /* tp_as_mapping */
-    0,                          /* tp_hash */
-    0,                          /* tp_call */
-    0,                          /* tp_str */
-    0,                          /* tp_getattro */
-    0,                          /* tp_setattro */
-    0,                          /* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
-    0,                          /* tp_doc */
-    0,                          /* tp_traverse */
-    0,                          /* tp_clear */
-    0,                          /* tp_richcompare */
-    0,                          /* tp_weaklistoffset */
-    0,                          /* tp_iter */
-    0,                          /* tp_iternext */
-    _video_methods,             /* tp_methods */
-    0,                          /* tp_members */
-    _video_getsets,             /* tp_getset */
-    0,                          /* tp_base */
-    0,                          /* tp_dict */
-    0,                          /* tp_descr_get */
-    0,                          /* tp_descr_set */
-    0,                          /* tp_dictoffset */
-    (initproc)_vid_stream_init,                          /* tp_init */
-    0,                          /* tp_alloc */
-    0,                 /* tp_new */
-    0,                          /* tp_free */
-    0,                          /* tp_is_gc */
-    0,                          /* tp_bases */
-    0,                          /* tp_mro */
-    0,                          /* tp_cache */
-    0,                          /* tp_subclasses */
-    0,                          /* tp_weaklist */
-    0                           /* tp_del */
-};
-
-
-static PyObject* _vid_stream_init_internal(PyObject *self, PyObject *surface)
-{
-	    /*Expects filename. If surface is null, then it sets overlay to >0. */
-    PySys_WriteStdout("Within _vid_stream_init_internal\n");    
-    Py_INCREF(self);
-	PyVideoStream *pvs;
-	pvs=(PyVideoStream *)self;
-	Py_INCREF(pvs);
-    if(!surface)
-    {
-        PySys_WriteStdout("_vid_stream_init_internal: Overlay=True\n");
-        pvs->overlay=1;
-    }
-    else
-    {
-        PySys_WriteStdout("_vid_stream_init_internal: Overlay=False\n");
-        SDL_Surface *surf;
-        surf = PySurface_AsSurface(surface);
-        pvs->out_surf=surf;
-        pvs->overlay=0;
-    }
-    
-    //Py_DECREF((PyObject *) movie);
-    Py_DECREF(pvs);
-    Py_DECREF(self);
-    PySys_WriteStdout("_vid_stream_init_internal: Returning from _vid_stream_init_internal\n");
-    return (PyObject *)pvs;
-}
-
-static PyObject* _vid_stream_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-	Py_INCREF(self);
-    PyObject *obj1;
-    PySys_WriteStdout("Within _vid_stream_init\n");
-    if (!PyArg_ParseTuple (args, "O", &obj1))
-    {
-        PyErr_SetString(PyExc_TypeError, "No valid arguments");
-        //Py_RETURN_NONE;
-    	return 0;
-    }
-    PySys_WriteStdout("_vid_stream_init: after PyArg_ParseTuple\n"); 
-
-    PySys_WriteStdout("_vid_stream_init: Before _vid_stream_init_internal\n");
-    self = _vid_stream_init_internal(self, obj1);
-    PySys_WriteStdout("_vid_stream_init: After _vid_stream_init_internal\n");
-    PyObject *er;
-    er = PyErr_Occurred();
-    if(er)
-    {
-        PyErr_Print();
-    }
-    if(!self)
-    {
-        PyErr_SetString(PyExc_IOError, "No video stream created.");
-        PyErr_Print();
-    }
-    PySys_WriteStdout("Returning from _vid_stream_init\n");
-    return self;
-}
-
-static PyObject* _vid_stream_repr (PyVideoStream *video)
-{
-    /*Eventually add a time-code call */
-    char buf[100];
-    //PySys_WriteStdout("_vid_stream_repr: %10s\n", video->filename); 
-    PyOS_snprintf(buf, sizeof(buf), "(Video Stream: %p)", &video);
-    return PyString_FromString(buf);
-}
-
-
-static PyObject* _vid_stream_play(PyVideoStream *video, PyObject* args)
-{
-    PySys_WriteStdout("In _vid_stream_play\n");
-    int loops;
-    if(!PyArg_ParseTuple(args, "i", &loops))
-    {
-        PyErr_SetString(PyExc_TypeError, "Not a valid argument.");
-        Py_RETURN_NONE;
-    }
-    PySys_WriteStdout("_vid_stream_play: loops set to: %i\n", loops);
-    video->loops =loops;
-    video->paused = 0;
-    video->playing = 1;
-    Py_RETURN_NONE;
-}
-
-static PyObject* _vid_stream_stop(PyVideoStream *video)
-{
-    //stream_pause(video);
-    video->seek_req = 1;
-    video->seek_pos = 0;
-    video->seek_flags =AVSEEK_FLAG_BACKWARD;
-    Py_RETURN_NONE;
-}  
-
-static PyObject* _vid_stream_pause(PyVideoStream *video)
-{
-    //stream_pause(video); 
-    Py_RETURN_NONE;
-}
-
-static PyObject* _vid_stream_rewind(PyVideoStream *video, PyObject* args)
-{
-    /* For now, just alias rewind to stop */
-    return _vid_stream_stop(video);
-}
-
-static PyObject* _vid_stream_get_paused (PyVideoStream *video, void *closure)
-{
-    return PyInt_FromLong((long)video->paused);
-}
-static PyObject* _vid_stream_get_playing (PyVideoStream *video, void *closure)
-{
-    PyObject *pyo;
-    pyo= PyInt_FromLong((long)video->playing);
-    return pyo;
-}
-
-	
-
-
-
-#endif /*_FFMOVIE_VID_H_*/

src/ff_movie.c

-/* Tyler Laing
- * May 16, 2009
- * ff_movie.c
- * Wrapper around ffmpeg libraries for use with pygame/python
- * Imported by movie.py(hopefully)
- */
-
-#include "ff_movie.h"
-
-#ifdef __MINGW32__
-#undef main /* We don't want SDL to override our main() */
-#endif
-
-#undef exit
-
-//#define DEBUG_SYNC
-/* options specified by the user */
-
-static int frame_width = 600;
-static int frame_height = 420;
-static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
-static int audio_disable;
-static int video_disable;
-static int wanted_audio_stream= 1;
-static int wanted_video_stream= 1;
-static int wanted_subtitle_stream= 0;
-static int av_sync_type = AV_SYNC_AUDIO_MASTER;
-static int64_t start_time = AV_NOPTS_VALUE;
-static int debug = 0;
-static int debug_mv = 0;
-static int step = 0;
-static int thread_count = 1;
-static int workaround_bugs = 1;
-static int fast = 0;
-static int genpts = 0;
-static int lowres = 0;
-static int idct = FF_IDCT_AUTO;
-static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
-static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
-static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
-static int error_recognition = FF_ER_CAREFUL;
-static int error_concealment = 3;
-static int decoder_reorder_pts= 0;
-
-/* current context */
-static int64_t audio_callback_time;
-
-static AVPacket flush_pkt;
-
-#define FF_ALLOC_EVENT   (SDL_USEREVENT)
-#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
-#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
-
-static PyAudioStream* _new_audio_stream(void);
-static PyVideoStream* _new_video_stream(void);
-static PySubtitleStream* _new_sub_stream(void);
-
-//static int sws_flags = SWS_BICUBIC;
-
-
-/*internal functions for video playing. Not accessible to Python */
-
-
-/* packet queue handling */
-static void packet_queue_init(PacketQueue *q)
-{
-    memset(q, 0, sizeof(PacketQueue));
-    q->mutex = SDL_CreateMutex();
-    q->cond = SDL_CreateCond();
-}
-
-static void packet_queue_flush(PacketQueue *q)
-{
-    AVPacketList *pkt, *pkt1;
-
-    SDL_LockMutex(q->mutex);
-    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
-        pkt1 = pkt->next;
-        av_free_packet(&pkt->pkt);
-        av_freep(&pkt);
-    }
-    q->last_pkt = NULL;
-    q->first_pkt = NULL;
-    q->nb_packets = 0;
-    q->size = 0;
-    SDL_UnlockMutex(q->mutex);
-}
-
-static void packet_queue_end(PacketQueue *q)
-{
-    packet_queue_flush(q);
-    SDL_DestroyMutex(q->mutex);
-    SDL_DestroyCond(q->cond);
-}
-
-static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
-{
-    AVPacketList *pkt1;
-
-    /* duplicate the packet */
-    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
-        return -1;
-
-    pkt1 = av_malloc(sizeof(AVPacketList));
-    if (!pkt1)
-        return -1;
-    pkt1->pkt = *pkt;
-    pkt1->next = NULL;
-
-
-    SDL_LockMutex(q->mutex);
-
-    if (!q->last_pkt)
-
-        q->first_pkt = pkt1;
-    else
-        q->last_pkt->next = pkt1;
-    q->last_pkt = pkt1;
-    q->nb_packets++;
-    q->size += pkt1->pkt.size + sizeof(*pkt1);
-    /* XXX: should duplicate packet data in DV case */
-    SDL_CondSignal(q->cond);
-
-    SDL_UnlockMutex(q->mutex);
-    return 0;
-}
-
-static void packet_queue_abort(PacketQueue *q)
-{
-    SDL_LockMutex(q->mutex);
-
-    q->abort_request = 1;
-
-    SDL_CondSignal(q->cond);
-
-    SDL_UnlockMutex(q->mutex);
-}
-
-/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
-static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
-{
-    AVPacketList *pkt1;
-    int ret;
-
-    SDL_LockMutex(q->mutex);
-
-    for(;;) {
-        if (q->abort_request) {
-            ret = -1;
-            break;
-        }
-
-        pkt1 = q->first_pkt;
-        if (pkt1) {
-            q->first_pkt = pkt1->next;
-            if (!q->first_pkt)
-                q->last_pkt = NULL;
-            q->nb_packets--;
-            q->size -= pkt1->pkt.size + sizeof(*pkt1);
-            *pkt = pkt1->pkt;
-            av_free(pkt1);
-            ret = 1;
-            break;
-        } else if (!block) {
-            ret = 0;
-            break;
-        } else {
-            SDL_CondWait(q->cond, q->mutex);
-        }
-    }
-    SDL_UnlockMutex(q->mutex);
-    return ret;
-}
-
-#define BPP 1
-
-static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
-{
-    int wrap, wrap3, width2, skip2;
-    int y, u, v, a, u1, v1, a1, w, h;
-    uint8_t *lum, *cb, *cr;
-    const uint8_t *p;
-    const uint32_t *pal;
-    int dstx, dsty, dstw, dsth;
-
-    dstw = av_clip(rect->w, 0, imgw);
-    dsth = av_clip(rect->h, 0, imgh);
-    dstx = av_clip(rect->x, 0, imgw - dstw);
-    dsty = av_clip(rect->y, 0, imgh - dsth);
-    lum = dst->data[0] + dsty * dst->linesize[0];
-    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
-    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
-
-    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
-    skip2 = dstx >> 1;
-    wrap = dst->linesize[0];
-    wrap3 = rect->pict.linesize[0];
-    p = rect->pict.data[0];
-    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
-
-    if (dsty & 1) {
-        lum += dstx;
-        cb += skip2;
-        cr += skip2;
-
-        if (dstx & 1) {
-            YUVA_IN(y, u, v, a, p, pal);
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
-            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
-            cb++;
-            cr++;
-            lum++;
-            p += BPP;
-        }
-        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
-            YUVA_IN(y, u, v, a, p, pal);
-            u1 = u;
-            v1 = v;
-            a1 = a;
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-
-            YUVA_IN(y, u, v, a, p + BPP, pal);
-            u1 += u;
-            v1 += v;
-            a1 += a;
-            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
-            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
-            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
-            cb++;
-            cr++;
-            p += 2 * BPP;
-            lum += 2;
-        }
-        if (w) {
-            YUVA_IN(y, u, v, a, p, pal);
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
-            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
-            p++;
-            lum++;
-        }
-        p += wrap3 - dstw * BPP;
-        lum += wrap - dstw - dstx;
-        cb += dst->linesize[1] - width2 - skip2;
-        cr += dst->linesize[2] - width2 - skip2;
-    }
-    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
-        lum += dstx;
-        cb += skip2;
-        cr += skip2;
-
-        if (dstx & 1) {
-            YUVA_IN(y, u, v, a, p, pal);
-            u1 = u;
-            v1 = v;
-            a1 = a;
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-            p += wrap3;
-            lum += wrap;
-            YUVA_IN(y, u, v, a, p, pal);
-            u1 += u;
-            v1 += v;
-            a1 += a;
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
-            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
-            cb++;
-            cr++;
-            p += -wrap3 + BPP;
-            lum += -wrap + 1;
-        }
-        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
-            YUVA_IN(y, u, v, a, p, pal);
-            u1 = u;
-            v1 = v;
-            a1 = a;
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-
-            YUVA_IN(y, u, v, a, p + BPP, pal);
-            u1 += u;
-            v1 += v;
-            a1 += a;
-            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
-            p += wrap3;
-            lum += wrap;
-
-            YUVA_IN(y, u, v, a, p, pal);
-            u1 += u;
-            v1 += v;
-            a1 += a;
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-
-            YUVA_IN(y, u, v, a, p + BPP, pal);
-            u1 += u;
-            v1 += v;
-            a1 += a;
-            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
-
-            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
-            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
-
-            cb++;
-            cr++;
-            p += -wrap3 + 2 * BPP;
-            lum += -wrap + 2;
-        }
-        if (w) {
-            YUVA_IN(y, u, v, a, p, pal);
-            u1 = u;
-            v1 = v;
-            a1 = a;
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-            p += wrap3;
-            lum += wrap;
-            YUVA_IN(y, u, v, a, p, pal);
-            u1 += u;
-            v1 += v;
-            a1 += a;
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
-            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
-            cb++;
-            cr++;
-            p += -wrap3 + BPP;
-            lum += -wrap + 1;
-        }
-        p += wrap3 + (wrap3 - dstw * BPP);
-        lum += wrap + (wrap - dstw - dstx);
-        cb += dst->linesize[1] - width2 - skip2;
-        cr += dst->linesize[2] - width2 - skip2;
-    }
-    /* handle odd height */
-    if (h) {
-        lum += dstx;
-        cb += skip2;
-        cr += skip2;
-
-        if (dstx & 1) {
-            YUVA_IN(y, u, v, a, p, pal);
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
-            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
-            cb++;
-            cr++;
-            lum++;
-            p += BPP;
-        }
-        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
-            YUVA_IN(y, u, v, a, p, pal);
-            u1 = u;
-            v1 = v;
-            a1 = a;
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-
-            YUVA_IN(y, u, v, a, p + BPP, pal);
-            u1 += u;
-            v1 += v;
-            a1 += a;
-            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
-            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
-            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
-            cb++;
-            cr++;
-            p += 2 * BPP;
-            lum += 2;
-        }
-        if (w) {
-            YUVA_IN(y, u, v, a, p, pal);
-            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
-            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
-        }
-    }
-}
-static void free_subpicture(SubPicture *sp)
-{
-    int i;
-
-    for (i = 0; i < sp->sub.num_rects; i++)
-    {
-        av_freep(&sp->sub.rects[i]->pict.data[0]);
-        av_freep(&sp->sub.rects[i]->pict.data[1]);
-        av_freep(&sp->sub.rects[i]);
-    }
-
-    av_free(sp->sub.rects);
-
-    memset(&sp->sub, 0, sizeof(AVSubtitle));
-}
-
-static void video_image_display(PyMovie *is)
-{
-    Py_INCREF( is);
-    VideoPicture *vp;
-    SubPicture *sp;
-    AVPicture pict;
-    float aspect_ratio;
-    int width, height, x, y;
-    SDL_Rect rect;
-    int i;
-
-    //vp = &is->pictq[is->pictq_rindex];
-    PyVideoStream *pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF(pvs);
-    
-    vp = &pvs->pictq[pvs->pictq_rindex];
-    if (pvs->out_surf || pvs->bmp) {
-        /* XXX: use variable in the frame */
-        if (pvs->video_st->sample_aspect_ratio.num)
-            aspect_ratio = av_q2d(pvs->video_st->sample_aspect_ratio);
-        else if (pvs->video_st->codec->sample_aspect_ratio.num)
-            aspect_ratio = av_q2d(pvs->video_st->codec->sample_aspect_ratio);
-        else
-            aspect_ratio = 0;
-        if (aspect_ratio <= 0.0)
-            aspect_ratio = 1.0;
-        aspect_ratio *= (float)pvs->video_st->codec->width / pvs->video_st->codec->height;
-        /* if an active format is indicated, then it overrides the
-           mpeg format */
-#if 0
-        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
-            is->dtg_active_format = is->video_st->codec->dtg_active_format;
-            printf("dtg_active_format=%d\n", is->dtg_active_format);
-        }
-#endif
-#if 0
-        switch(is->video_st->codec->dtg_active_format) {
-        case FF_DTG_AFD_SAME:
-        default:
-            /* nothing to do */
-            break;
-        case FF_DTG_AFD_4_3:
-            aspect_ratio = 4.0 / 3.0;
-            break;
-        case FF_DTG_AFD_16_9:
-            aspect_ratio = 16.0 / 9.0;
-            break;
-        case FF_DTG_AFD_14_9:
-            aspect_ratio = 14.0 / 9.0;
-            break;
-        case FF_DTG_AFD_4_3_SP_14_9:
-            aspect_ratio = 14.0 / 9.0;
-            break;
-        case FF_DTG_AFD_16_9_SP_14_9:
-            aspect_ratio = 14.0 / 9.0;
-            break;
-        case FF_DTG_AFD_SP_4_3:
-            aspect_ratio = 4.0 / 3.0;
-            break;
-        }
-#endif
-        
-        if (is->subtitle_stream>-1)
-        {
-            PySubtitleStream *pss;
-            pss = (PySubtitleStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->subtitle_stream);
-            Py_INCREF(pss);
-            
-            if (pss->subpq_size > 0)
-            {
-                sp = &pss->subpq[pss->subpq_rindex];
-
-                if (pvs->pts >= pss->pts + ((float) sp->sub.start_display_time / 1000))
-                {
-
-                    if(is->overlay>0)
-                    {
-                        SDL_LockYUVOverlay (pvs->bmp);
-
-                        pict.data[0] = pvs->bmp->pixels[0];
-                        pict.data[1] = pvs->bmp->pixels[2];
-                        pict.data[2] = pvs->bmp->pixels[1];
-
-                        pict.linesize[0] = pvs->bmp->pitches[0];
-                        pict.linesize[1] = pvs->bmp->pitches[2];
-                        pict.linesize[2] = pvs->bmp->pitches[1];
-
-                        for (i = 0; i < sp->sub.num_rects; i++)
-                            blend_subrect(&pict, sp->sub.rects[i],
-                                          pvs->bmp->w, pvs->bmp->h);
-
-                        SDL_UnlockYUVOverlay (pvs->bmp);
-                    }
-                    else
-                    {
-                        /*if (pvs->out_surf->flags & SDL_OPENGL && !(pvs->out_surf->flags & (SDL_OPENGLBLIT & ~SDL_OPENGL)))
-                                return RAISE (PyExc_SDLError,
-                                              "Cannot blit to OPENGL Surfaces (OPENGLBLIT is ok)");*/
-                        //TODO:fix blitting to surface, and blend_subrect
-                        #if 0
-                        SDL_LockSurface(pvs->out_surf);
-                        pict.data[0] = (Uint8 *)pvs->out_surf->pixels[0];
-                        pict.data[1] = pvs->out_surf->pixels[1];
-                        pict.data[2] = pvs->out_surf->pixels[2];
-
-                        pict.linesize[0] = pvs->out_surf->pitch;
-                        pict.linesize[1] = pvs->out_surf->pitch;
-                        pict.linesize[2] = pvs->out_surf->pitch;
-
-                        for (i = 0; i < sp->sub.num_rects; i++)
-                            //TODO:check if blend_subrect works with RGB
-                            blend_subrect(&pict, sp->sub.rects[i],
-                                          pvs->out_surf->w, pvs->out_surf->h);
-                                              
-                        SDL_UnlockSurface(pvs->out_surf);
-                        #endif
-                    }
-                }
-            }
-        Py_DECREF(pss);
-        }
-
-
-        /* XXX: we suppose the screen has a 1.0 pixel ratio */
-        height = pvs->height;
-        width = ((int)rint(height * aspect_ratio)) & ~1;
-        if (width > pvs->width) {
-            width = pvs->width;
-            height = ((int)rint(width / aspect_ratio)) & ~1;
-        }
-        x = (pvs->width - width) / 2;
-        y = (pvs->height - height) / 2;
-       
-        rect.x = pvs->xleft + x;
-        rect.y = pvs->ytop  + y;
-        rect.w = width;
-        rect.h = height;
-        if(is->overlay>0) 
-        {       
-            SDL_DisplayYUVOverlay(pvs->bmp, &rect);
-        }
-        
-    } else {
-#if 0
-        fill_rectangle(screen,
-                       is->xleft, is->ytop, is->width, is->height,
-                       QERGB(0x00, 0x00, 0x00));
-#endif
-    }
-    Py_DECREF(pvs);
-    Py_DECREF( is);
-}
-
-
-static inline int compute_mod(int a, int b)
-{
-    a = a % b;
-    if (a >= 0)
-        return a;
-    else
-        return a + b;
-}
-
-static int video_open(PyMovie *is){
-    int w,h;
-    Py_INCREF( is);
-    PyVideoStream *pvs;
-    pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF( pvs);
-    
-    w = pvs->video_st->codec->width;
-    h = pvs->video_st->codec->height;
-
-    if(!pvs->bmp && is->overlay>0)
-    {
-        //now we have to open an overlay up
-        SDL_Surface *screen;
-        if (!SDL_WasInit (SDL_INIT_VIDEO))
-        {
-        	RAISE(PyExc_SDLError,"cannot create overlay without pygame.display initialized");
-        	return -1;
-        }
-        screen = SDL_GetVideoSurface ();
-        if (!screen)
-		{
-            RAISE (PyExc_SDLError, "Display mode not set");
-        	return -1;
-		}
-        pvs->bmp = SDL_CreateYUVOverlay (w, h, SDL_YV12_OVERLAY, screen);
-        if (!pvs->bmp)
-        {
-            RAISE (PyExc_SDLError, "Cannot create overlay");
-			return -1;
-        }
-    } 
-    else if (!pvs->out_surf && is->overlay<=0)
-    {
-        int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
-        //we create a pygame surface
-        SDL_Surface *screen;
-        #ifndef __APPLE__
-        screen = SDL_SetVideoMode(w, h, 0, flags);
-        #else
-        /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
-        screen = SDL_SetVideoMode(w, h, 24, flags);
-        #endif
-        pvs->out_surf=(SDL_Surface *)PyMem_Malloc(sizeof(SDL_Surface));
-        if (!pvs->out_surf)
-        {
-            RAISE (PyExc_SDLError, "Could not create Surface object");
-        	return -1;
-        }
-    }
-
-
-    pvs->width = w;
-    pvs->height = h;
-    Py_DECREF( pvs);
-    Py_DECREF( is);
-    return 0;
-}
-
-/* display the current picture, if any */
-static void video_display(PyMovie *is)
-{
-    Py_INCREF( is);
-    PyVideoStream *pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF( pvs);
-    
-    if (!pvs->out_surf||!pvs->bmp)
-        video_open(is);
-
-    else if (is->vid_stream_ix>0)
-        video_image_display(is);
-    Py_DECREF( pvs);
-    Py_DECREF( is);
-}
-
-static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
-{
-    Py_INCREF((PyObject *) opaque);
-    SDL_Event event;
-    event.type = FF_REFRESH_EVENT;
-    event.user.data1 = opaque;
-    SDL_PushEvent(&event);
-    Py_DECREF((PyObject *) opaque);
-    return 0; /* 0 means stop timer */
-}
-
-/* schedule a video refresh in 'delay' ms */
-static void schedule_refresh(PyMovie *is, int delay)
-{
-    Py_INCREF( is);
-    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
-    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
-    Py_DECREF( is);
-}
-
-
-static int audio_write_get_buf_size(PyMovie *is)
-{
-    Py_INCREF( is);
-    PyAudioStream *pas = (PyAudioStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->audio_stream);
-    Py_INCREF( pas);
-    int temp = pas->audio_buf_size - pas->audio_buf_index;
-    Py_DECREF( pas);
-    Py_DECREF( is);
-    return temp;
-}
-
-/* get the current audio clock value */
-static double get_audio_clock(PyMovie *is)
-{
-    Py_INCREF( is);
-    double pts;
-    int hw_buf_size, bytes_per_sec;
-
-    PyAudioStream *pas = (PyAudioStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->audio_stream);
-    Py_INCREF( pas);
-    
-    pts = pas->audio_clock;
-    hw_buf_size = audio_write_get_buf_size(is);
-    bytes_per_sec = 0;
-    if (pas->audio_st) {
-        bytes_per_sec = pas->audio_st->codec->sample_rate *
-            2 * pas->audio_st->codec->channels;
-    }
-    if (bytes_per_sec)
-        pts -= (double)hw_buf_size / bytes_per_sec;
-    Py_DECREF( pas);
-    Py_DECREF( is);
-    return pts;
-}
-
-/* get the current video clock value */
-static double get_video_clock(PyMovie *is)
-{
-    Py_INCREF( is);
-    double delta;
-    PyVideoStream *pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF(pvs);
-    
-    if (pvs->paused) {
-        delta = 0;
-    } else {
-        delta = (av_gettime() - pvs->video_current_pts_time) / 1000000.0;
-    }
-    double temp = pvs->video_current_pts+delta;
-    Py_DECREF( pvs);
-    Py_DECREF( is);
-    return temp;
-}
-
-/* get the current external clock value */
-static double get_external_clock(PyMovie *is)
-{
-    Py_INCREF( is);
-    int64_t ti;
-    ti = av_gettime();
-    double res = is->external_clock + ((ti - is->external_clock_time) * 1e-6);
-    Py_DECREF( is);
-    return res;
-}
-
-/* get the current master clock value */
-static double get_master_clock(PyMovie *is)
-{
-    Py_INCREF( is);
-    double val;
-    PyVideoStream *pvs;
-    PyAudioStream *pas;
-    
-    pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF(pvs);
-    
-    pas = (PyAudioStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->audio_stream);
-    Py_INCREF(pas);
-    
-    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
-        if (pvs->video_st)
-            val = get_video_clock(is);
-        else
-            val = get_audio_clock(is);
-    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
-        if (pas->audio_st)
-            val = get_audio_clock(is);
-        else
-            val = get_video_clock(is);
-    } else {
-        val = get_external_clock(is);
-    }
-    Py_DECREF(pvs);
-    Py_DECREF(pas);
-    Py_DECREF( is);
-    return val;
-}
-
-/* seek in the stream */
-static void stream_seek(PyMovie *is, int64_t pos, int rel)
-{
-    Py_INCREF( is);
-    if (!is->seek_req) {
-        is->seek_pos = pos;
-        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
-
-        is->seek_req = 1;
-    }
-    Py_DECREF( is);
-}
-
-/* pause or resume the video */
-static void stream_pause(PyMovie *is)
-{
-    Py_INCREF( is);
-    is->paused = !is->paused;
-    if (!is->paused) {
-        PyVideoStream *pvs;
-        PyAudioStream *pas;
-        pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-        Py_INCREF(pvs);
-        
-        pas = (PyAudioStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->audio_stream);
-        Py_INCREF(pas);
-
-        pvs->video_current_pts = get_video_clock(is);
-        
-        is->frame_timer += (av_gettime() - pvs->video_current_pts_time) / 1000000.0;
-        Py_DECREF( pvs);
-        Py_DECREF( pas);
-    }
-    Py_DECREF( is);
-}
-
-static double compute_frame_delay(double frame_current_pts, PyMovie *is)
-{
-    Py_INCREF( is);
-
-    double actual_delay, delay, sync_threshold, ref_clock, diff;
-
-    /* compute nominal delay */
-    delay = frame_current_pts - is->frame_last_pts;
-    if (delay <= 0 || delay >= 10.0) {
-        /* if incorrect delay, use previous one */
-        delay = is->frame_last_delay;
-    } else {
-        is->frame_last_delay = delay;
-    }
-    is->frame_last_pts = frame_current_pts;
-
-    PyVideoStream *pvs;
-    pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF(pvs);
-       
-    PyAudioStream *pas; 
-    pas = (PyAudioStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->audio_stream);
-    Py_INCREF(pas);
-   
-    /* update delay to follow master synchronisation source */
-    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && pas->audio_st) ||
-         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
-        /* if video is slave, we try to correct big delays by
-           duplicating or deleting a frame */
-        ref_clock = get_master_clock(is);
-        diff = frame_current_pts - ref_clock;
-
-        /* skip or repeat frame. We take into account the
-           delay to compute the threshold. I still don't know
-           if it is the best guess */
-        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
-        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
-            if (diff <= -sync_threshold)
-                delay = 0;
-            else if (diff >= sync_threshold)
-                delay = 2 * delay;
-        }
-    }
-
-    is->frame_timer += delay;
-    /* compute the REAL delay (we need to do that to avoid
-       long term errors */
-    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
-    if (actual_delay < 0.010) {
-        /* XXX: should skip picture */
-        actual_delay = 0.010;
-    }
-
-#if defined(DEBUG_SYNC)
-    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
-            delay, actual_delay, frame_current_pts, -diff);
-#endif
-    Py_DECREF( pvs);
-    Py_DECREF( pas);
-    Py_DECREF( is);
-    return actual_delay;
-}
-
-
-
-/* called to display each frame */
-static void video_refresh_timer(void *opaque)
-{
-    PyMovie *is = opaque;
-    Py_INCREF( is);
-    
-    VideoPicture *vp;
-
-    SubPicture *sp, *sp2;
-
-    PyVideoStream *pvs;
-    PyAudioStream *pas;
-    PySubtitleStream   *pss;
-
-    pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF(pvs); 
-        
-    pas = (PyAudioStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->audio_stream);
-    Py_INCREF(pas);
-
-    if (pvs->video_st) {
-        if (pvs->pictq_size == 0) {
-            /* if no picture, need to wait */
-            schedule_refresh(is, 1);
-        } else {
-            /* dequeue the picture */
-            vp = &pvs->pictq[pvs->pictq_rindex];
-
-            /* update current video pts */
-            pvs->video_current_pts = vp->pts;
-            pvs->video_current_pts_time = av_gettime();
-
-            /* launch timer for next picture */
-            schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
-
-            if(is->subtitle_stream>-1) {
-                pss = (PySubtitleStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->subtitle_stream);
-                Py_INCREF(pss);
-                
-                if (pss->subtitle_stream_changed) {
-                    SDL_LockMutex(pss->subpq_mutex);
-
-                    while (pss->subpq_size) {
-                        free_subpicture(&pss->subpq[pss->subpq_rindex]);
-
-                        /* update queue size and signal for next picture */
-                        if (++pss->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
-                            pss->subpq_rindex = 0;
-
-                        pss->subpq_size--;
-                    }
-                    pss->subtitle_stream_changed = 0;
-
-                    SDL_CondSignal(pss->subpq_cond);
-                    SDL_UnlockMutex(pss->subpq_mutex);
-                } else {
-                    if (pss->subpq_size > 0) {
-                        sp = &pss->subpq[pss->subpq_rindex];
-
-                        if (pss->subpq_size > 1)
-                            sp2 = &pss->subpq[(pss->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
-                        else
-                            sp2 = NULL;
-
-                        if ((pvs->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
-                                || (sp2 && pvs->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
-                        {
-                            free_subpicture(sp);
-
-                            /* update queue size and signal for next picture */
-                            if (++pss->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
-                                pss->subpq_rindex = 0;
-
-                            SDL_LockMutex(pss->subpq_mutex);
-                            pss->subpq_size--;
-                            SDL_CondSignal(pss->subpq_cond);
-                            SDL_UnlockMutex(pss->subpq_mutex);
-                        }
-                    }
-                }
-                Py_DECREF(pss);
-            }
-
-            /* display picture */
-            video_display(is);
-
-            /* update queue size and signal for next picture */
-            pvs->pictq_rindex++;
-            if (pvs->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
-                pvs->pictq_rindex = 0;
-
-            SDL_LockMutex(pvs->pictq_mutex);
-            pvs->pictq_size--;
-            SDL_CondSignal(pvs->pictq_cond);
-            SDL_UnlockMutex(pvs->pictq_mutex);
-        }
-    } else if (pas->audio_st) {
-        /* draw the next audio frame */
-
-        schedule_refresh(is, 40);
-
-        /* if only audio stream, then display the audio bars (better
-           than nothing, just to test the implementation */
-
-        /* display picture */
-        video_display(is);
-    } else {
-        schedule_refresh(is, 100);
-    }
-    Py_DECREF( pvs);
-    Py_DECREF( pas);
-    Py_DECREF( is);
-}
-
-/* allocate a picture (needs to do that in main thread to avoid
-   potential locking problems */
-static void alloc_picture(void *opaque)
-{
-    PyMovie *is = opaque;
-    Py_INCREF( is);
-    VideoPicture *vp;
-    
-    PyVideoStream *pvs;
-
-    pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF(pvs);
-    
-    vp = &pvs->pictq[pvs->pictq_windex];
-
-    if (pvs->bmp)
-        SDL_FreeYUVOverlay(pvs->bmp);
-    if (pvs->out_surf)
-        SDL_FreeSurface(pvs->out_surf);
-#if 0
-    /* XXX: use generic function */
-    /* XXX: disable overlay if no hardware acceleration or if RGB format */
-    switch(is->video_st->codec->pix_fmt) {
-    case PIX_FMT_YUV420P:
-    case PIX_FMT_YUV422P:
-    case PIX_FMT_YUV444P:
-    case PIX_FMT_YUYV422:
-    case PIX_FMT_YUV410P:
-    case PIX_FMT_YUV411P:
-        is_yuv = 1;
-        break;
-    default:
-        is_yuv = 0;
-        break;
-    }
-#endif
-    if(is->overlay>0)
-    {
-        SDL_Surface *screen = SDL_GetVideoSurface ();
-        pvs->bmp = SDL_CreateYUVOverlay(pvs->video_st->codec->width,
-                                   pvs->video_st->codec->height,
-                                   SDL_YV12_OVERLAY,
-                                   screen);
-    }
-    else
-    {
-        pvs->out_surf= SDL_GetVideoSurface ();
-    }
-    vp->width = pvs->video_st->codec->width;
-    vp->height = pvs->video_st->codec->height;
-
-    SDL_LockMutex(pvs->pictq_mutex);
-    vp->allocated = 1;
-    SDL_CondSignal(pvs->pictq_cond);
-    SDL_UnlockMutex(pvs->pictq_mutex);
-    Py_DECREF( pvs);
-    Py_DECREF( is);
-}
-
-/**
- *
- * @param pts the dts of the pkt / pts of the frame and guessed if not known
- */
-static int queue_picture(PyMovie *is, AVFrame *src_frame, double pts)
-{
-    Py_INCREF( is);
-    VideoPicture *vp;
-
-    PyVideoStream *pvs;
-
-    pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF(pvs);
-
-    int dst_pix_fmt;
-    AVPicture pict;
-    static struct SwsContext *img_convert_ctx;
-
-    /* wait until we have space to put a new picture */
-    SDL_LockMutex(pvs->pictq_mutex);
-    while (pvs->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
-           !pvs->videoq.abort_request) {
-        SDL_CondWait(pvs->pictq_cond, pvs->pictq_mutex);
-    }
-    SDL_UnlockMutex(pvs->pictq_mutex);
-
-    if (pvs->videoq.abort_request)
-        return -1;
-
-    vp = &pvs->pictq[pvs->pictq_windex];
-
-    /* alloc or resize hardware picture buffer */
-    if (!pvs->bmp || !pvs->out_surf ||
-        vp->width != pvs->video_st->codec->width ||
-        vp->height != pvs->video_st->codec->height) {
-        SDL_Event event;
-
-        vp->allocated = 0;
-
-        /* the allocation must be done in the main thread to avoid
-           locking problems */
-        event.type = FF_ALLOC_EVENT;
-        event.user.data1 = is;
-        SDL_PushEvent(&event);
-
-        /* wait until the picture is allocated */
-        SDL_LockMutex(pvs->pictq_mutex);
-        while (!vp->allocated && !pvs->videoq.abort_request) {
-            SDL_CondWait(pvs->pictq_cond, pvs->pictq_mutex);
-        }
-        SDL_UnlockMutex(pvs->pictq_mutex);
-
-        if (pvs->videoq.abort_request)
-        {
-        	Py_DECREF(pvs);
-        	Py_DECREF(is);
-            return -1;
-        }
-    }
-
-    /* if the frame is not skipped, then display it */
-    if (pvs->bmp||pvs->out_surf) {
-        /* get a pointer on the bitmap */
-        if(is->overlay>0)
-        {
-            dst_pix_fmt = PIX_FMT_YUV422;
-              
-            SDL_LockYUVOverlay (pvs->bmp);
-
-            pict.data[0] = pvs->bmp->pixels[0];
-            pict.data[1] = pvs->bmp->pixels[2];
-            pict.data[2] = pvs->bmp->pixels[1];
-
-            pict.linesize[0] = pvs->bmp->pitches[0];
-            pict.linesize[1] = pvs->bmp->pitches[2];
-            pict.linesize[2] = pvs->bmp->pitches[1];
-            //sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
-            img_convert_ctx = sws_getCachedContext(img_convert_ctx,
-                pvs->video_st->codec->width, pvs->video_st->codec->height,
-                pvs->video_st->codec->pix_fmt,
-                pvs->video_st->codec->width, pvs->video_st->codec->height,
-                dst_pix_fmt, sws_flags, NULL, NULL, NULL);
-            if (img_convert_ctx == NULL) {
-                fprintf(stderr, "Cannot initialize the conversion context\n");
-                exit(1);
-            }
-            sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
-                      0, pvs->video_st->codec->height, pict.data, pict.linesize);
-            /* update the bitmap content */
-            SDL_UnlockYUVOverlay(pvs->bmp);
-        	video_refresh_timer(is);
-        }
-        else
-        {
-            //TODO:fix this as well
-            #if 0
-            dst_pix_fmt = PIX_FMT_RGB24;
-              
-            SDL_LockSurface (pvs->out_surf);
-
-            pict.data[0] = pvs->out_surf->pixels[0];
-            pict.data[1] = pvs->out_surf->pixels[1];
-            pict.data[2] = pvs->out_surf->pixels[2];
-
-            pict.linesize[0] = pvs->out_surf->pitch;
-            pict.linesize[1] = pvs->out_surf->pitch;
-            pict.linesize[2] = pvs->out_surf->pitch;
-            //sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
-            img_convert_ctx = sws_getCachedContext(img_convert_ctx,
-                pvs->video_st->codec->width, pvs->video_st->codec->height,
-                pvs->video_st->codec->pix_fmt,
-                pvs->video_st->codec->width, pvs->video_st->codec->height,
-                dst_pix_fmt, sws_flags, NULL, NULL, NULL);
-            if (img_convert_ctx == NULL) {
-                
-                PyErr_SetString(PyExc_MemoryError ,"Cannot initialize the conversion context.");
-                //fprintf(stderr, "Cannot initialize the conversion context\n");
-                //exit(1);
-            }
-            sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
-                      0, pvs->video_st->codec->height, pict.data, pict.linesize);
-            /* update the bitmap content */
-            SDL_UnlockSurface(pvs->out_surf);
-            video_refresh_timer(is);
-            #endif
-        }
-        
-        vp->pts = pts;
-
-        /* now we can update the picture count */
-        if (++pvs->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
-            pvs->pictq_windex = 0;
-        SDL_LockMutex(pvs->pictq_mutex);
-        pvs->pictq_size++;
-        SDL_UnlockMutex(pvs->pictq_mutex);
-    }
-    Py_DECREF( pvs);
-    Py_DECREF( is);
-    return 0;
-}
-
-
-/**
- * compute the exact PTS for the picture if it is omitted in the stream
- * @param pts1 the dts of the pkt / pts of the frame
- */
-static int output_picture2(PyMovie *is, AVFrame *src_frame, double pts1)
-{
-    Py_INCREF( is);
-    
-    double frame_delay, pts;
-
-    pts = pts1;
-
-    PyVideoStream *pvs;
-
-    pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF( pvs);
-    
-    if (pts != 0) {
-        /* update video clock with pts, if present */
-        pvs->video_clock = pts;
-    } else {
-        pts = pvs->video_clock;
-    }
-    /* update video clock for next frame */
-    frame_delay = av_q2d(pvs->video_st->codec->time_base);
-    /* for MPEG2, the frame can be repeated, so we update the
-       clock accordingly */
-    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
-    pvs->video_clock += frame_delay;
-
-#if defined(DEBUG_SYNC) && 0
-    {
-        int ftype;
-        if (src_frame->pict_type == FF_B_TYPE)
-            ftype = 'B';
-        else if (src_frame->pict_type == FF_I_TYPE)
-            ftype = 'I';
-        else
-            ftype = 'P';
-        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
-               ftype, pts, pts1);
-    }
-#endif
-    Py_DECREF(pvs);
-    Py_DECREF( is);
-    return queue_picture(is, src_frame, pts);
-}
-
-static int video_thread(void *arg)
-{
-    PyMovie *is = arg;
-    Py_INCREF( is);
-    AVPacket pkt1, *pkt = &pkt1;
-    int len1, got_picture;
-    AVFrame *frame= avcodec_alloc_frame();
-    double pts;
-
-    PyVideoStream *pvs;
-
-    pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF(pvs);
-    
-    for(;;) {
-        while (pvs->paused && !pvs->videoq.abort_request) {
-            SDL_Delay(10);
-        }
-        if (packet_queue_get(&pvs->videoq, pkt, 1) < 0)
-            break;
-
-        if(pkt->data == flush_pkt.data){
-            avcodec_flush_buffers(pvs->video_st->codec);
-            continue;
-        }
-
-        /* NOTE: ipts is the PTS of the _first_ picture beginning in
-           this packet, if any */
-        pvs->video_st->codec->reordered_opaque= pkt->pts;
-        len1 = avcodec_decode_video(pvs->video_st->codec,
-                                    frame, &got_picture,
-                                    pkt->data, pkt->size);
-
-        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
-           && frame->reordered_opaque != AV_NOPTS_VALUE)
-            pts= frame->reordered_opaque;
-        else if(pkt->dts != AV_NOPTS_VALUE)
-            pts= pkt->dts;
-        else
-            pts= 0;
-        pts *= av_q2d(pvs->video_st->time_base);
-
-//            if (len1 < 0)
-//                break;
-        if (got_picture) {
-            if (output_picture2(is, frame, pts) < 0)
-                goto the_end;
-        }
-        av_free_packet(pkt);
-        if (step)
-            if (is)
-                stream_pause(is);
-    }
- the_end:
-    Py_DECREF( pvs);
-    Py_DECREF( is);
-    av_free(frame);
-    return 0;
-}
-
-static int subtitle_thread(void *arg)
-{
-    PyMovie *is = arg;
-    Py_INCREF( is);
-    SubPicture *sp;
-    AVPacket pkt1, *pkt = &pkt1;
-    int len1, got_subtitle;
-    double pts;
-    int i, j;
-    int r, g, b, y, u, v, a;
-
-    PySubtitleStream   *pss;
-
-    pss = (PySubtitleStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->subtitle_stream);
-    Py_INCREF(pss);
-    
-    for(;;) {
-        while (pss->paused && !pss->subtitleq.abort_request) {
-            SDL_Delay(10);
-        }
-        if (packet_queue_get(&pss->subtitleq, pkt, 1) < 0)
-            break;
-
-        if(pkt->data == flush_pkt.data){
-            avcodec_flush_buffers(pss->subtitle_st->codec);
-            continue;
-        }
-        SDL_LockMutex(pss->subpq_mutex);
-        while (pss->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
-               !pss->subtitleq.abort_request) {
-            SDL_CondWait(pss->subpq_cond, pss->subpq_mutex);
-        }
-        SDL_UnlockMutex(pss->subpq_mutex);
-
-        if (pss->subtitleq.abort_request)
-            goto the_end;
-
-        sp = &pss->subpq[pss->subpq_windex];
-
-       /* NOTE: ipts is the PTS of the _first_ picture beginning in
-           this packet, if any */
-        pts = 0;
-        if (pkt->pts != AV_NOPTS_VALUE)
-            pts = av_q2d(pss->subtitle_st->time_base)*pkt->pts;
-
-        len1 = avcodec_decode_subtitle(pss->subtitle_st->codec,
-                                    &sp->sub, &got_subtitle,
-                                    pkt->data, pkt->size);
-//            if (len1 < 0)
-//                break;
-        if (got_subtitle && sp->sub.format == 0) {
-            sp->pts = pts;
-
-            for (i = 0; i < sp->sub.num_rects; i++)
-            {
-                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
-                {
-                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
-                    y = RGB_TO_Y_CCIR(r, g, b);
-                    u = RGB_TO_U_CCIR(r, g, b, 0);
-                    v = RGB_TO_V_CCIR(r, g, b, 0);
-                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
-                }
-            }
-
-            /* now we can update the picture count */
-            if (++pss->subpq_windex == SUBPICTURE_QUEUE_SIZE)
-                pss->subpq_windex = 0;
-            SDL_LockMutex(pss->subpq_mutex);
-            pss->subpq_size++;
-            SDL_UnlockMutex(pss->subpq_mutex);
-        }
-        av_free_packet(pkt);
-//        if (step)
-//            if (cur_stream)
-//                stream_pause(cur_stream);
-    }
- the_end:
-    Py_DECREF( pss);
-    Py_DECREF( is);
-    return 0;
-}
-
-/* return the new audio buffer size (samples can be added or deleted
-   to get better sync if video or external master clock) */
-static int synchronize_audio(PyMovie *is, short *samples,
-                             int samples_size1, double pts)
-{
-    Py_INCREF( is);
-    
-    int n, samples_size;
-    double ref_clock;
-
-    PyAudioStream   *pas;
-    PyVideoStream   *pvs;
-
-    pas = (PyAudioStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->audio_stream);
-    Py_INCREF(pas);
-
-    pvs = (PyVideoStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->video_stream);
-    Py_INCREF(pvs);
-
-    n = 2 * pas->audio_st->codec->channels;
-    samples_size = samples_size1;
-
-    /* if not master, then we try to remove or add samples to correct the clock */
-    if (((pas->av_sync_type == AV_SYNC_VIDEO_MASTER && pvs->video_st) ||
-         pas->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
-        double diff, avg_diff;
-        int wanted_size, min_size, max_size, nb_samples;
-
-        ref_clock = get_master_clock(is);
-        diff = get_audio_clock(is) - ref_clock;
-
-        if (diff < AV_NOSYNC_THRESHOLD) {
-            pas->audio_diff_cum = diff + pas->audio_diff_avg_coef * pas->audio_diff_cum;
-            if (pas->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
-                /* not enough measures to have a correct estimate */
-                pas->audio_diff_avg_count++;
-            } else {
-                /* estimate the A-V difference */
-                avg_diff = pas->audio_diff_cum * (1.0 - pas->audio_diff_avg_coef);
-
-                if (fabs(avg_diff) >= pas->audio_diff_threshold) {
-                    wanted_size = samples_size + ((int)(diff * pas->audio_st->codec->sample_rate) * n);
-                    nb_samples = samples_size / n;
-
-                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
-                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
-                    if (wanted_size < min_size)
-                        wanted_size = min_size;
-                    else if (wanted_size > max_size)
-                        wanted_size = max_size;
-
-                    /* add or remove samples to correction the synchro */
-                    if (wanted_size < samples_size) {
-                        /* remove samples */
-                        samples_size = wanted_size;
-                    } else if (wanted_size > samples_size) {
-                        uint8_t *samples_end, *q;
-                        int nb;
-
-                        /* add samples */
-                        nb = (samples_size - wanted_size);
-                        samples_end = (uint8_t *)samples + samples_size - n;
-                        q = samples_end + n;
-                        while (nb > 0) {
-                           
-                            memcpy(q, samples_end, n);
-                            q += n;
-                            nb -= n;
-                        }
-                        samples_size = wanted_size;
-                    }
-                }
-#if 0
-                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
-                       diff, avg_diff, samples_size - samples_size1,
-                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
-#endif
-            }
-        } else {
-            /* too big difference : may be initial PTS errors, so
-               reset A-V filter */
-            pas->audio_diff_avg_count = 0;
-            pas->audio_diff_cum = 0;
-        }
-    }
-    Py_DECREF(pas);
-    Py_DECREF(pvs);
-    Py_DECREF( is);
-    return samples_size;
-}
-
-/* decode one audio frame and returns its uncompressed size */
-static int audio_decode_frame(PyMovie *is, double *pts_ptr)
-{
-    Py_INCREF( is);
-    PyAudioStream   *pas;
-
-    pas = (PyAudioStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->audio_stream);
-    Py_INCREF(pas);
-    
-    AVPacket *pkt = &pas->audio_pkt;
-    AVCodecContext *dec= pas->audio_st->codec;
-    int n, len1, data_size;
-    double pts;
-
-    for(;;) {
-        /* NOTE: the audio packet can contain several frames */
-        while (pas->audio_pkt_size > 0) {
-            data_size = sizeof(pas->audio_buf1);
-            len1 = avcodec_decode_audio2(dec,
-                                        (int16_t *)pas->audio_buf1, &data_size,
-                                        pas->audio_pkt_data, pas->audio_pkt_size);
-            if (len1 < 0) {
-                /* if error, we skip the frame */
-                pas->audio_pkt_size = 0;
-                break;
-            }
-
-            pas->audio_pkt_data += len1;
-            pas->audio_pkt_size -= len1;
-            if (data_size <= 0)
-                continue;
-
-            if (dec->sample_fmt != pas->audio_src_fmt) {
-                if (pas->reformat_ctx)
-                    av_audio_convert_free(pas->reformat_ctx);
-                pas->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
-                                                         dec->sample_fmt, 1, NULL, 0);
-                if (!pas->reformat_ctx) {
-                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
-                        avcodec_get_sample_fmt_name(dec->sample_fmt),
-                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
-                        break;
-                }
-                pas->audio_src_fmt= dec->sample_fmt;
-            }
-
-            if (pas->reformat_ctx) {
-                const void *ibuf[6]= {pas->audio_buf1};
-                void *obuf[6]= {pas->audio_buf2};
-                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
-                int ostride[6]= {2};
-                int len= data_size/istride[0];
-                if (av_audio_convert(pas->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
-                    PyErr_WarnEx(NULL, "av_audio_convert() failed", 1);
-                    //printf("av_audio_convert() failed\n");
-                    break;
-                }
-                pas->audio_buf= pas->audio_buf2;
-                /* FIXME: existing code assume that data_size equals framesize*channels*2
-                          remove this legacy cruft */
-                data_size= len*2;
-            }else{
-                pas->audio_buf= pas->audio_buf1;
-            }
-
-            /* if no pts, then compute it */
-            pts = pas->audio_clock;
-            *pts_ptr = pts;
-            n = 2 * dec->channels;
-            pas->audio_clock += (double)data_size /
-                (double)(n * dec->sample_rate);
-#if defined(DEBUG_SYNC)
-            {
-                static double last_clock;
-                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
-                       pas->audio_clock - last_clock,
-                       pas->audio_clock, pts);
-                last_clock = pas->audio_clock;
-            }
-#endif
-            Py_DECREF(pas);
-            return data_size;
-        }
-
-        /* free the current packet */
-        if (pkt->data)
-            av_free_packet(pkt);
-
-        if (pas->paused || pas->audioq.abort_request) {
-            return -1;
-        }
-
-        /* read next packet */
-        if (packet_queue_get(&pas->audioq, pkt, 1) < 0)
-            return -1;
-        if(pkt->data == flush_pkt.data){
-            avcodec_flush_buffers(dec);
-            continue;
-        }
-
-        pas->audio_pkt_data = pkt->data;
-        pas->audio_pkt_size = pkt->size;
-
-        /* if update the audio clock with the pts */
-        if (pkt->pts != AV_NOPTS_VALUE) {
-            pas->audio_clock = av_q2d(pas->audio_st->time_base)*pkt->pts;
-        }
-    }
-    Py_DECREF( pas);
-    Py_DECREF( is);
-}
-
-
-
-/* prepare a new audio buffer */
-static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
-{
-    PyMovie *is = opaque;
-    Py_INCREF( is);
-    int audio_size, len1;
-    double pts;
-
-    PyAudioStream   *pas;
-
-    pas = (PyAudioStream *)PySequence_GetItem(is->streams, (Py_ssize_t) is->audio_stream);
-    Py_INCREF(pas);
-
-    audio_callback_time = av_gettime();
-
-    while (len > 0) {
-        if (pas->audio_buf_index >= pas->audio_buf_size) {
-           audio_size = audio_decode_frame(is, &pts);
-           if (audio_size < 0) {
-                /* if error, just output silence */
-               pas->audio_buf = pas->audio_buf1;
-               pas->audio_buf_size = 1024;
-               memset(pas->audio_buf, 0, pas->audio_buf_size);
-           } else {
-               
-               audio_size = synchronize_audio(is, (int16_t *)pas->audio_buf, audio_size,
-                                              pts);
-               pas->audio_buf_size = audio_size;
-           }
-           pas->audio_buf_index = 0;
-        }
-        len1 = pas->audio_buf_size - pas->audio_buf_index;
-        if (len1 > len)
-            len1 = len;
-        memcpy(stream, (uint8_t *)pas->audio_buf + pas->audio_buf_index, len1);
-        len -= len1;
-        stream += len1;
-        pas->audio_buf_index += len1;
-    }
-    Py_DECREF( pas);
-    Py_DECREF( is);
-}
-
-/* open a given stream. Return 0 if OK */
-static int stream_component_open(PyMovie *is, int stream_index)
-{
-    Py_INCREF( is);
-    AVFormatContext *ic = is->ic;
-    AVCodecContext *enc;
-    AVCodec *codec;
-    SDL_AudioSpec wanted_spec, spec;
-
-    PyAudioStream* pas; 
-    PyVideoStream* pvs;
-    PySubtitleStream*   pss;
-
-    if (stream_index < 0 || stream_index >= ic->nb_streams)
-        return -1;
-    enc = ic->streams[stream_index]->codec;
-
-    /* prepare audio output */
-    if (enc->codec_type == CODEC_TYPE_AUDIO) {
-        if (enc->channels > 0) {
-            enc->request_channels = FFMIN(2, enc->channels);
-        } else {
-            enc->request_channels = 2;
-        }
-    }
-
-    codec = avcodec_find_decoder(enc->codec_id);
-    enc->debug_mv = debug_mv;
-    enc->debug = debug;