Commits

zeroth  committed ec60de7

Removed audioconvert.h as it was not needed.
Macroed out the subtitle code, as that is the biggest difference between
versions of ffmpeg. Have investigated and found that all uses of ffmpeg
references in the module are supported all the way to Ubuntu 8.04 and
Debian Lenny.

Found a small issue where calling pause after a seek causes spotty sound
issues due to a conflict of threads. Current solution is to wait at
least half a second before calling pause. Will consider how to make them
safe to use so close together.

  • Participants
  • Parent commits 19252bb
  • Branches tylerthemovie

Comments (0)

Files changed (8)

File config_unix.py

 
 #these get prefixes with '/usr' and '/usr/local' or the $LOCALBASE
 origincdirs = ['/include', '/include/SDL', '/include/SDL',
-               '/include/smpeg', ]
+               '/include/smpeg' ]
 origlibdirs = ['/lib','/lib64','/X11R6/lib']
 
 def confirm(message):

File movie_test.py

 print "Testing seek..."
 m.easy_seek(second=10, minute=5, reverse=0)
 time.sleep(5)
+m.easy_seek(second=10, minute=5, reverse=0)
+time.sleep(1)
+m.pause()
+time.sleep(5)
+m.pause()
+time.sleep(10000)
 
 print "Altering xleft and ytop..."
 m.xleft += 10

File src/_gmovie.c

     return ret;
 }
 
+#if 0
 void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
 {
     int wrap, wrap3, width2, skip2;
 
     memset(&sp->sub, 0, sizeof(AVSubtitle));
 }
-
+#endif
 
 /* Sets the value of the variable width. Acts like a macro */
 void inline get_width(PyMovie *movie, int *width)
     Py_INCREF( movie);
     RELEASEGIL
     VidPicture *vp;
-    SubPicture *sp;
+    //SubPicture *sp;
     float aspect_ratio;
     int width, height, x, y;
     vp = &movie->pictq[movie->pictq_rindex];
 
     if (vp->dest_overlay && vp->overlay>0)
     {
+     #if 0 
         if (movie->sub_st)
         {
             if (movie->subpq_size > 0)
                 }
             }
         }
-        
+        #endif
         if(vp->overlay>0)
         {
             SDL_LockYUVOverlay(vp->dest_overlay);
             actual_delay = 0.010;
         }
         GRABGIL
-        PySys_WriteStdout("Actual Delay: %f\ndelay: %f\ndiff: %f\n", actual_delay, delay, diff);
+        //PySys_WriteStdout("Actual Delay: %f\ndelay: %f\ndiff: %f\n", actual_delay, delay, diff);
         movie->timing = (actual_delay*1000.0)+1;
         RELEASEGIL
     }
         movie->video_stream = stream_index;
         movie->video_st = ic->streams[stream_index];
         break;
+    #if 0    
 	case CODEC_TYPE_SUBTITLE:
 		movie->sub_stream = stream_index;
 		movie->sub_st     = ic->streams[stream_index];
+    #endif
     default:
         break;
     }
         movie->video_current_pts_time = av_gettime();
         packet_queue_init(&movie->videoq);
         break;
+    #if 0
 	case CODEC_TYPE_SUBTITLE:
 		if(movie->replay)
 		{
 			movie->sub_st     = ic->streams[stream_index];
 		}
 		packet_queue_init(&movie->subq);
+    #endif
     default:
         break;
     }
     case CODEC_TYPE_AUDIO:
         packet_queue_abort(&movie->audioq);
         SDL_WaitThread(movie->audio_tid, NULL);
+        SDL_DestroyMutex(movie->audio_mutex);
         soundEnd();
         memset(&movie->audio_buf1, 0, sizeof(movie->audio_buf1));
         packet_queue_flush(&movie->audioq);
         packet_queue_abort(&movie->videoq);
         packet_queue_flush(&movie->videoq);
         break;
+    #if 0
     case CODEC_TYPE_SUBTITLE:
         packet_queue_abort(&movie->subq);
         packet_queue_flush(&movie->subq);
         break;
-        
+    #endif    
     default:
         break;
     }
     case CODEC_TYPE_AUDIO:
         soundQuit();
         packet_queue_end(&movie->audioq, end);
-        if (movie->reformat_ctx)
-            av_audio_convert_free(movie->reformat_ctx);
+        //if (movie->reformat_ctx)
+        //    av_audio_convert_free(movie->reformat_ctx);
         break;
     case CODEC_TYPE_VIDEO:
         packet_queue_end(&movie->videoq, end);
         break;
     case CODEC_TYPE_SUBTITLE:
-    	packet_queue_end(&movie->subq, end);
+    	//packet_queue_end(&movie->subq, end);
     default:
         break;
     }
         movie->video_st = NULL;
         movie->video_stream = -1;
         break;
+    #if 0
     case CODEC_TYPE_SUBTITLE:
     	movie->sub_st=NULL;
     	movie->sub_stream = -1;
+    #endif
     default:
         break;
     }
     subtitle_index = -1;
     movie->video_stream = -1;
     movie->audio_stream = -1;
-    movie->sub_stream = -1;
+    //movie->sub_stream = -1;
 
     initialize_context(movie, threaded); //moved a bunch of convenience stuff out of here for access at other times
 
                 video_index = i;
             break;
         case CODEC_TYPE_SUBTITLE:
-        	if(wanted_subti_stream -- >= 0 && !movie->subtitle_disable)
-        		subtitle_index=i;
+        	//if(wanted_subti_stream -- >= 0 && !movie->subtitle_disable)
+        	//	subtitle_index=i;
         default:
             break;
         }
     {
         stream_component_close(movie, movie->video_stream, threaded);
     }
+    #if 0
     if (movie->sub_stream >= 0)
     {
         stream_component_close(movie, movie->sub_stream, threaded);
     }
+    #endif
     if (movie->ic)
     {
         av_close_input_file(movie->ic);
             stream_component_start(movie, movie->video_stream, 1);
         if(movie->audio_st)
             stream_component_start(movie, movie->audio_stream, 1);
+        #if 0
         if(movie->sub_st)
         	stream_component_start(movie, movie->sub_stream, 1);
+        #endif
         /* Now we call the function that does the ACTUAL work. Just like us to loaf 
          * around while we make decoder work so hard...
          */
             stream_component_end(movie, movie->video_st->index, 1);
         if(movie->audio_st)
             stream_component_end(movie, movie->audio_st->index, 1);
+    	#if 0
     	if(movie->sub_st)
     		stream_component_end(movie, movie->sub_st->index, 1);
+    	#endif
     }
     GRABGIL
     Py_DECREF(movie);
     movie->finished =0;
     ic=movie->ic;
     int co=0;
+    SDL_Delay(100);
     movie->last_showtime = av_gettime()/1000.0;
     for(;;)
     {
         if (movie->seek_req)
         {
             int64_t seek_target= movie->seek_pos;
+			seek_target-=1*AV_TIME_BASE;
 			int aud_stream_index=-1;
 			int vid_stream_index=-1;
 			int64_t vid_seek_target=seek_target;
 			
 			if(vid_stream_index>=0)
 			{
-            	ret = av_seek_frame(movie->ic, vid_stream_index, vid_seek_target, movie->seek_flags|AVSEEK_FLAG_ANY);
+            	ret = av_seek_frame(movie->ic, vid_stream_index, vid_seek_target, movie->seek_flags);
 	            if (ret < 0)
 	            {
 	                PyErr_Format(PyExc_IOError, "%s: error while seeking", movie->ic->filename);
 			}
 			else if(aud_stream_index>=0)
 			{
-				ret = av_seek_frame(movie->ic, aud_stream_index, aud_seek_target, movie->seek_flags|AVSEEK_FLAG_ANY);
+				ret = av_seek_frame(movie->ic, aud_stream_index, aud_seek_target, movie->seek_flags);
 	            if (ret < 0)
 	            {
 	                PyErr_Format(PyExc_IOError, "%s: error while seeking", movie->ic->filename);
             }
      
             movie->seek_req = 0;
+            //now we need to seek to a keyframe...
+         	while(av_read_frame(ic, pkt)>=0)
+         	{
+         		if(pkt->stream_index == movie->video_stream)
+         		{
+         			int bytesDecoded, frameFinished;
+         			AVFrame *frame;
+         			frame=avcodec_alloc_frame();
+         			bytesDecoded = avcodec_decode_video(movie->video_st->codec, frame, &frameFinished, pkt->data, pkt->size);
+         			if(frameFinished)
+         			{
+         				if(pkt->pts >= vid_seek_target)
+         					break;
+         			}
+         		}
+         		av_free_packet(pkt);
+         	}
         }
         /* if the queue are full, no need to read more */
         if ( //yay for short circuit logic testing
             {
                 packet_queue_put(&movie->videoq, pkt);
             }
+            #if 0
             else if (pkt->stream_index == movie->sub_stream)
             {
             	packet_queue_put(&movie->subq, pkt);
             }
+            #endif
             else if(pkt)
             {
                 av_free_packet(pkt);
             }
         }
+        #if 0
         SubPicture *sp;
         SubPicture *sp2;
         if(movie->sub_stream>=0) {
                     }
                 }
             }
+         #endif
         if(movie->video_st)
             video_render(movie);
         /*if(movie->audio_st)
             audio_thread(movie);*/
+        #if 0
         if(movie->sub_st)
         	subtitle_render(movie);
+        #endif
         if(co<2)
             video_refresh_timer(movie);
         if(movie->timing>0)
     av_free(frame);
     return 0;
 }
-
+#if 0
 int subtitle_render(void *arg){
 	PyMovie *movie = arg;
 	DECLAREGIL
  	RELEASEGIL
     return 0;
 }
+#endif

File src/_gmovie.h

 #include "pygamedocs.h"
 #include "pygame.h"
 #include "pgcompat.h"
-#include "audioconvert.h"
+//#include "audioconvert.h"
 #include "surface.h"
 #include "_gsound.h"
 #include "structmember.h"
 }
 VidPicture;
 
+#if 0
 typedef struct SubPicture
 {
 	double pts;
 	AVSubtitle sub;
 } SubPicture;
-
+#endif
 enum {
     AV_SYNC_AUDIO_MASTER, /* default choice */
     AV_SYNC_VIDEO_MASTER,
     int64_t  audio_pts;      
     //int audio_volume; /*must self implement*/
     enum SampleFormat audio_src_fmt;
-    AVAudioConvert   *reformat_ctx;
+    //AVAudioConvert   *reformat_ctx;
     int               audio_stream;
     int               audio_disable;
     SDL_mutex        *audio_mutex;
     SDL_mutex  *videoq_mutex;
     SDL_cond   *videoq_cond;
     struct SwsContext *img_convert_ctx;
-
+#if 0
 	/*subtitle */
 	int sub_stream;
 	int sub_stream_changed;
 	int subpq_rindex, subpq_windex, subpq_size;
 	SDL_mutex *subpq_mutex;
 	int subtitle_disable;
+#endif
 #ifdef PROFILE
 	ImageScaleStats *istats;
 #endif
 double get_external_clock (PyMovie *is);
 double get_master_clock   (PyMovie *is);
 
-
+#if 0
 /*		Subtitle Management*/
 int subtitle_render(void *arg);
 void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh);
 void free_subpicture(SubPicture *sp);
+#endif
 #endif /*_GMOVIE_H_*/

File src/_gsound.c

 #endif
 
     }
-    ainfo.channel = 0;
-    ainfo.channels = channels;
-    ainfo.audio_clock=0.0;
-    ainfo.queue.size=0;
-    ainfo.queue.first=ainfo.queue.last=NULL;
-    ainfo.sample_rate=freq;
-    ainfo.mutex = SDL_CreateMutex();
-    ainfo.queue.mutex = SDL_CreateMutex();
-    ainfo.ended=1;
+    ainfo = (AudioInfo *)PyMem_Malloc(sizeof(AudioInfo));
+    ainfo->channel = 0;
+    ainfo->channels = channels;
+    ainfo->audio_clock=0.0;
+    ainfo->queue.size=0;
+    ainfo->queue.first=ainfo->queue.last=NULL;
+    ainfo->sample_rate=freq;
+    ainfo->mutex = SDL_CreateMutex();
+    ainfo->queue.mutex = SDL_CreateMutex();
+    ainfo->ended=1;
     Mix_VolumeMusic (127);
     
-    //ainfo._tstate = _tstate;
+    //ainfo->_tstate = _tstate;
     return 0;
 }
 
     if (SDL_WasInit (SDL_INIT_AUDIO))
     {
         Mix_HaltMusic ();
-        queue_flush(&ainfo.queue);
+        queue_flush(&ainfo->queue);
         Mix_ChannelFinished(NULL);
         Mix_CloseAudio ();
         SDL_QuitSubSystem (SDL_INIT_AUDIO);
 int soundStart (void)
 {
     Mix_ChannelFinished(&cb_mixer);
-    ainfo.ended=0;
-    ainfo.audio_clock =0.0;
-    ainfo.playing=0;
-    ainfo.channel=0;
-    ainfo.current_frame_size=1;
+    ainfo->ended=0;
+    ainfo->audio_clock =0.0;
+    ainfo->playing=0;
+    ainfo->channel=0;
+    ainfo->current_frame_size=1;
     return 0;
 }
 
 int soundEnd   (void)
 {
-    ainfo.ended = 1;
-    ainfo.restart=1;
-    queue_flush(&ainfo.queue);
+    ainfo->ended = 1;
+    ainfo->restart=1;
+    queue_flush(&ainfo->queue);
     return 0;
 }
 
 /* Play a sound buffer, with a given length */
 int playBuffer (uint8_t *buf, uint32_t len, int channel, int64_t pts)
 {
-	//SDL_mutexP(ainfo.mutex);
+	//SDL_mutexP(ainfo->mutex);
     Mix_Chunk *mix;
     int false=0;
     int allocated=0;
     
-    if(!ainfo.ended && (ainfo.queue.size>0||ainfo.playing))
+    if(!ainfo->ended && (ainfo->queue.size>0||ainfo->playing))
     {
         if(buf)
         {   
             node->len = len;
             node->next =NULL;
             node->pts = pts;
-            queue_put(&ainfo.queue, node);
-            //SDL_mutexV(ainfo.mutex);
-            if(ainfo.channel<0)
-            	ainfo.channel=channel;
-            return ainfo.channel;
+            queue_put(&ainfo->queue, node);
+            //SDL_mutexV(ainfo->mutex);
+            if(ainfo->channel<0)
+            	ainfo->channel=channel;
+            return ainfo->channel;
         }
-        else if(!buf && ainfo.queue.size==0)
+        else if(!buf && ainfo->queue.size==0)
         {  
             
             //callback call but when the queue is empty, so we just load a short empty sound.
             buf = (uint8_t *) PyMem_Malloc((size_t)1024);
             memset(buf, 0, (size_t)1024);
-            ainfo.current_frame_size=1;
+            ainfo->current_frame_size=1;
             len=1024;
             allocated =1;
         	false=1;
         {
             //callback call, and convenienty enough, the queue has a buffer ready to go, so we copy it into buf
             BufferNode *new;
-            queue_get(&ainfo.queue, &new);
+            queue_get(&ainfo->queue, &new);
             if(!new)
             {
-                //SDL_mutexV(ainfo.mutex);
+                //SDL_mutexV(ainfo->mutex);
                 return -1;
             }
-            ainfo.current_frame_size=new->len;
+            ainfo->current_frame_size=new->len;
             buf = (uint8_t *)PyMem_Malloc((size_t)new->len);
             memcpy(buf, new->buf, new->len);
             len=new->len;
     }
     
     //we assume that if stopped is true, then
-    if(ainfo.ended && !buf)
+    if(ainfo->ended && !buf)
     {
         //callback call but when the queue is empty, so we just load a short empty sound.
         buf = (uint8_t *) PyMem_Malloc((size_t)1024);
         memset(buf, 0, (size_t)1024);
-        ainfo.current_frame_size=1;
+        ainfo->current_frame_size=1;
         len=1024;
         allocated =1;
         false=1;
     }
-    
+    else if(ainfo->ended && buf)
+    {
+    	//toss the buffer out. we don't need it.
+    	return ainfo->channel;
+    }
     //regardless of 1st call, or a callback, we load the data from buf into a newly allocated block.
     mix= (Mix_Chunk *)PyMem_Malloc(sizeof(Mix_Chunk));
     mix->allocated=0;
     memcpy(mix->abuf, buf, len);
     mix->alen = (Uint32 )len;
     mix->volume = 127;
-    ainfo.playing = 1;
- 	if(!ainfo.ended)
+    ainfo->playing = 1;
+ 	if(!ainfo->ended)
 	{
-    	int bytes_per_sec = ainfo.channels*ainfo.sample_rate*2;
-    	ainfo.audio_clock+= (double) len/(double) bytes_per_sec;
+    	int bytes_per_sec = ainfo->channels*ainfo->sample_rate*2;
+    	ainfo->audio_clock+= (double) len/(double) bytes_per_sec;
 	}
-    ainfo.current_frame_size =len;
-    int chan = ainfo.channel;
+    ainfo->current_frame_size =len;
+    int chan = ainfo->channel;
     
-    //SDL_mutexV(ainfo.mutex);
+    //SDL_mutexV(ainfo->mutex);
     
     int playing = Mix_Playing(chan);
     if(playing && allocated &&false)
     	return chan;
     }
     int ret = Mix_PlayChannel(chan, mix, 0);
-    ainfo.channel = ret;
+    ainfo->channel = ret;
     if(allocated)
     {
         PyMem_Free(buf);
     int paused = Mix_Paused(channel);
     if(paused)
     {
-    	ainfo.audio_clock=ainfo.old_clock;
-    	ainfo.ended=0;
+    	ainfo->audio_clock=ainfo->old_clock;
+    	ainfo->ended=0;
         Mix_Resume(-1);
     }
     else
     {
-    	ainfo.old_clock = ainfo.audio_clock;
-    	ainfo.ended=1;
+    	ainfo->old_clock = ainfo->audio_clock;
+    	ainfo->ended=1;
         Mix_Pause(-1);
     }
     return 0;
 
 double getAudioClock(void)
 {
-    //SDL_mutexP(ainfo.mutex);//lock
-    int bytes_per_sec = ainfo.channels*ainfo.sample_rate*2;
-    double pts = ainfo.audio_clock;
-    pts -= (double) ainfo.current_frame_size/(double) bytes_per_sec;
-    //SDL_mutexV(ainfo.mutex);
+    //SDL_mutexP(ainfo->mutex);//lock
+    int bytes_per_sec = ainfo->channels*ainfo->sample_rate*2;
+    double pts = ainfo->audio_clock;
+    pts -= (double) ainfo->current_frame_size/(double) bytes_per_sec;
+    //SDL_mutexV(ainfo->mutex);
     return pts;
 }
 
 int getBufferQueueSize(void)
 {
-	return ainfo.queue.size;
+	return ainfo->queue.size;
 }

File src/_gsound.h

 }
 BufferNode;
 
-
 typedef struct BufferQueue
 {
     BufferNode *first, *last;
 }
 AudioInfo;
 
-AudioInfo ainfo;
+AudioInfo *ainfo;
 
 int soundInit     (int freq, int size, int channels, int chunksize);
 int soundQuit     (void);

File src/audioconvert.h

-/*
- * audio conversion
- * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
- * Copyright (c) 2008 Peter Ross
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_AUDIOCONVERT_H
-#define AVCODEC_AUDIOCONVERT_H
-
-/**
- * @file libavcodec/audioconvert.h
- * Audio format conversion routines
- */
-
-
-#include <libavcodec/avcodec.h>
-
-
-/**
- * Generate string corresponding to the sample format with
- * number sample_fmt, or a header if sample_fmt is negative.
- *
- * @param[in] buf the buffer where to write the string
- * @param[in] buf_size the size of buf
- * @param[in] sample_fmt the number of the sample format to print the corresponding info string, or
- * a negative value to print the corresponding header.
- * Meaningful values for obtaining a sample format info vary from 0 to SAMPLE_FMT_NB -1.
- */
-void avcodec_sample_fmt_string(char *buf, int buf_size, int sample_fmt);
-
-/**
- * @return NULL on error
- */
-const char *avcodec_get_sample_fmt_name(int sample_fmt);
-
-/**
- * @return SAMPLE_FMT_NONE on error
- */
-enum SampleFormat avcodec_get_sample_fmt(const char* name);
-
-/**
- * @return NULL on error
- */
-const char *avcodec_get_channel_name(int channel_id);
-
-/**
- * Return description of channel layout
- */
-void avcodec_get_channel_layout_string(char *buf, int buf_size, int nb_channels, int64_t channel_layout);
-
-/**
- * Guess the channel layout
- * @param nb_channels
- * @param codec_id Codec identifier, or CODEC_ID_NONE if unknown
- * @param fmt_name Format name, or NULL if unknown
- * @return Channel layout mask
- */
-int64_t avcodec_guess_channel_layout(int nb_channels, enum CodecID codec_id, const char *fmt_name);
-
-
-struct AVAudioConvert;
-typedef struct AVAudioConvert AVAudioConvert;
-
-/**
- * Create an audio sample format converter context
- * @param out_fmt Output sample format
- * @param out_channels Number of output channels
- * @param in_fmt Input sample format
- * @param in_channels Number of input channels
- * @param[in] matrix Channel mixing matrix (of dimension in_channel*out_channels). Set to NULL to ignore.
- * @param flags See FF_MM_xx
- * @return NULL on error
- */
-AVAudioConvert *av_audio_convert_alloc(enum SampleFormat out_fmt, int out_channels,
-                                       enum SampleFormat in_fmt, int in_channels,
-                                       const float *matrix, int flags);
-
-/**
- * Free audio sample format converter context
- */
-void av_audio_convert_free(AVAudioConvert *ctx);
-
-/**
- * Convert between audio sample formats
- * @param[in] out array of output buffers for each channel. set to NULL to ignore processing of the given channel.
- * @param[in] out_stride distance between consecutive input samples (measured in bytes)
- * @param[in] in array of input buffers for each channel
- * @param[in] in_stride distance between consecutive output samples (measured in bytes)
- * @param len length of audio frame size (measured in samples)
- */
-int av_audio_convert(AVAudioConvert *ctx,
-                           void * const out[6], const int out_stride[6],
-                     const void * const  in[6], const int  in_stride[6], int len);
-
-#endif /* AVCODEC_AUDIOCONVERT_H */

File src/gmovie.c

 	int minute=0;
 	int second=0;
 	int reverse=0;
+	//int relative=0;
 	char *keywords[4] = {"second", "minute", "hour", "reverse"};
 	if(!PyArg_ParseTupleAndKeywords(args, kwds, "|iiii", keywords, &second, &minute, &hour, &reverse))
 	{