Commits

Lenard Lindstrom  committed 23f848d

Clean up white space in C source files

For the *.c and *.h files in the src and src/freetype directories replace
tabs with spaces and remove trailing whitespace.

  • Participants
  • Parent commits 8610c95

Comments (0)

Files changed (64)

File src/_camera.c

   You should have received a copy of the GNU Library General Public
   License along with this library; if not, write to the Free
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-  
+
 */
 
 /*
  * pixelformats, add them to v4l2_init_device and v4l2_process_image, and add
  * functions to convert the format to packed RGB, YUV, and HSV.
  */
- 
+
 #include "camera.h"
 #include "pgcompat.h"
 
 
 /*
  * Functions available to pygame users.  The idea is to make these as simple as
- * possible, and merely have them call functions specific to the type of 
+ * possible, and merely have them call functions specific to the type of
  * camera being used to do all the real work.  It currently just calls v4l2_*
  * functions, but it could check something like self->cameratype and depending
  * on the result, call v4l, v4l2, vfw, or other functions.
     surfobj2 = NULL;
 
     /*get all the arguments*/
-    if (!PyArg_ParseTuple (arg, "O!s|O!", &PySurface_Type, &surfobj, 
+    if (!PyArg_ParseTuple (arg, "O!s|O!", &PySurface_Type, &surfobj,
                            &color, &PySurface_Type, &surfobj2))
         return NULL;
 
     }
 
     surf = PySurface_AsSurface (surfobj);
-	
+
     if (!surfobj2) {
         newsurf = SDL_CreateRGBSurface (0, surf->w, surf->h,
             surf->format->BitsPerPixel, surf->format->Rmask,
 
     /* check to see if the size is the same. */
     if (newsurf->w != surf->w || newsurf->h != surf->h)
-        return RAISE (PyExc_ValueError, 
+        return RAISE (PyExc_ValueError,
                       "Surfaces not the same width and height.");
 
     /* check to see if the format of the surface is the same. */
     PyObject* string;
     char** devices;
     int num_devices, i;
-    
+
     num_devices = 0;
     ret_list = NULL;
     ret_list = PyList_New (0);
     if (!ret_list)
         return NULL;
-    
+
     #if defined(__unix__)
     devices = v4l2_list_cameras(&num_devices);
     # elif defined(PYGAME_MAC_CAMERA_OLD)
     devices = mac_list_cameras(&num_devices);
     # endif
-    
+
     for(i = 0; i < num_devices; i++) {
         string = Text_FromUTF8(devices[i]);
         PyList_Append(ret_list, string);
         free(devices[i]);
     }
     free(devices);
-    
+
     return ret_list;
 #else
-	Py_RETURN_NONE;
+    Py_RETURN_NONE;
 #endif
 }
 
 /* get_controls() - gets current values of user controls */
 /* TODO: Support brightness, contrast, and other common controls */
 PyObject* camera_get_controls (PyCameraObject* self) {
-#if defined(__unix__)    
+#if defined(__unix__)
     int value;
     if (v4l2_get_control(self->fd, V4L2_CID_HFLIP, &value))
         self->hflip = value;
-    
+
     if (v4l2_get_control(self->fd, V4L2_CID_VFLIP, &value))
         self->vflip = value;
 
     if (v4l2_get_control(self->fd, V4L2_CID_BRIGHTNESS, &value))
         self->brightness = value;
-    
+
     return Py_BuildValue ("(NNN)", PyBool_FromLong(self->hflip), PyBool_FromLong(self->vflip), PyInt_FromLong(self->brightness));
 #elif defined(PYGAME_MAC_CAMERA_OLD)
     return Py_BuildValue ("(NNN)", PyBool_FromLong(self->hflip), PyBool_FromLong(self->vflip), PyInt_FromLong(-1));
     hflip = self->hflip;
     vflip = self->vflip;
     brightness = self->brightness;
-    
+
     if (!PyArg_ParseTupleAndKeywords(arg, kwds, "|iii", kwids, &hflip, &vflip, &brightness))
         return NULL;
-        
+
 /* #if defined(__unix__)         */
     if (v4l2_set_control(self->fd, V4L2_CID_HFLIP, hflip))
         self->hflip = hflip;
-        
+
     if (v4l2_set_control(self->fd, V4L2_CID_VFLIP, vflip))
         self->vflip = vflip;
-        
+
     if (v4l2_set_control(self->fd, V4L2_CID_BRIGHTNESS, brightness))
         self->brightness = brightness;
-           
+
     return Py_BuildValue ("(NNN)", PyBool_FromLong(self->hflip), PyBool_FromLong(self->vflip), PyInt_FromLong(self->brightness));
 
 #elif defined(PYGAME_MAC_CAMERA_OLD)
     hflip = self->hflip;
     vflip = self->vflip;
     brightness = -1;
-    
+
     if (!PyArg_ParseTupleAndKeywords(arg, kwds, "|iii", kwids, &hflip, &vflip, &brightness))
         return NULL;
-        
+
     self->hflip = hflip;
     self->vflip = vflip;
-           
+
     return Py_BuildValue ("(NNN)", PyBool_FromLong(self->hflip), PyBool_FromLong(self->vflip), PyInt_FromLong(-1));
 #endif
     Py_RETURN_NONE;
 #if defined(__unix__)
     SDL_Surface* surf = NULL;
     PyObject *surfobj = NULL;
-    
+
     if (!PyArg_ParseTuple (arg, "|O!", &PySurface_Type, &surfobj))
         return NULL;
 
     if (!surfobj) {
-        surf = SDL_CreateRGBSurface (0, self->width, self->height, 24, 0xFF<<16, 
+        surf = SDL_CreateRGBSurface (0, self->width, self->height, 24, 0xFF<<16,
                                  0xFF<<8, 0xFF, 0);
     } else {
         surf = PySurface_AsSurface (surfobj);
     }
-    
+
     if (!surf)
         return NULL;
-        
+
     if (surf->w != self->width || surf->h != self->height) {
-        return RAISE (PyExc_ValueError, 
+        return RAISE (PyExc_ValueError,
                       "Destination surface not the correct width or height.");
     }
-    
+
     Py_BEGIN_ALLOW_THREADS;
     if (!v4l2_read_frame(self, surf))
         return NULL;
     Py_END_ALLOW_THREADS;
-    
+
     if (!surf)
         return NULL;
-        
+
     if (surfobj) {
         Py_INCREF (surfobj);
         return surfobj;
                 0xFF<<8,
                 0xFF,
                 0);
-            
+
         } else {
             surf = PySurface_AsSurface(surfobj);
         }
             return NULL;
 
         if (surf->w != self->boundsRect.right || surf->h != self->boundsRect.bottom) {
-            return RAISE (PyExc_ValueError, 
+            return RAISE (PyExc_ValueError,
                           "Destination surface not the correct width or height.");
         }
         /*is dit nodig op osx... */
         Py_BEGIN_ALLOW_THREADS;
-        
+
         if (!mac_read_frame(self, surf))
             return NULL;
         Py_END_ALLOW_THREADS;
 /*
  * Pixelformat conversion functions
  */
- 
+
 /* converts from rgb Surface to yuv or hsv */
 /* TODO: Allow for conversion from yuv and hsv to all */
-void colorspace (SDL_Surface *src, SDL_Surface *dst, int cspace) {   
+void colorspace (SDL_Surface *src, SDL_Surface *dst, int cspace) {
     switch (cspace) {
         case YUV_OUT:
             rgb_to_yuv (src->pixels, dst->pixels, src->h * src->w, 0, src->format);
 }
 
 /* converts packed rgb to packed hsv. formulas modified from wikipedia */
-void rgb_to_hsv (const void* src, void* dst, int length, 
+void rgb_to_hsv (const void* src, void* dst, int length,
                  unsigned long source, SDL_PixelFormat* format)
 {
     Uint8 *s8, *d8;
     rloss = format->Rloss;
     gloss = format->Gloss;
     bloss = format->Bloss;
-    
+
     /* you could stick the if statement inside the loop, but I'm sacrificing a
        a few hundred bytes for a little performance */
     if (source == V4L2_PIX_FMT_RGB444) {
 
 /* convert packed rgb to yuv. Note that unlike many implementations of YUV,
    this has a full range of 0-255 for Y, not 16-235. Formulas from wikipedia */
-void rgb_to_yuv (const void* src, void* dst, int length, 
+void rgb_to_yuv (const void* src, void* dst, int length,
                  unsigned long source, SDL_PixelFormat* format)
 {
     Uint8 *s8, *d8;
     rloss = format->Rloss;
     gloss = format->Gloss;
     bloss = format->Bloss;
-    
-    if (source == V4L2_PIX_FMT_RGB444) {    
+
+    if (source == V4L2_PIX_FMT_RGB444) {
         while (length--) {
             p1 = *s8++;
             p2 = *s8++;
     Uint32 *d32;
     Uint8 p1, p2, r, g, b;
     int rshift, gshift, bshift, rloss, gloss, bloss;
-    
+
     s = (Uint8 *) src;
     rshift = format->Rshift;
     gshift = format->Gshift;
     rloss = format->Rloss;
     gloss = format->Gloss;
     bloss = format->Bloss;
-    
+
     switch (format->BytesPerPixel) {
         case 1:
             d8 = (Uint8 *) dst;
             }
             break;
         case 3:
-            d8 = (Uint8 *) dst;    
+            d8 = (Uint8 *) dst;
             while (length--) {
                 p1 = *s++;
                 p2 = *s++;
             }
             break;
     }
-}   
+}
 
 /* convert from 4:2:2 YUYV interlaced to RGB */
 /* colorspace conversion routine from libv4l. Licensed LGPL 2.1
     rloss = format->Rloss;
     gloss = format->Gloss;
     bloss = format->Bloss;
-    
+
     d8 = (Uint8 *) dst;
     d16 = (Uint16 *) dst;
     d32 = (Uint32 *) dst;
         r1 = SAT2(y1 + v1);
         g1 = SAT2(y1 - rg);
         b1 = SAT2(y1 + u1);
-  
+
         r2 = SAT2(y2 + v1);
         g2 = SAT2(y2 - rg);
         b2 = SAT2(y2 + u1);
     Uint32 *d32;
     int i = length >> 1;
     int rshift, gshift, bshift, rloss, gloss, bloss;
-    
+
     rshift = format->Rshift;
     gshift = format->Gshift;
     bshift = format->Bshift;
     }
 }
 
-/* Converts from 8 bit Bayer (BA81) to rgb24 (RGB3), based on: 
+/* Converts from 8 bit Bayer (BA81) to rgb24 (RGB3), based on:
  * Sonix SN9C101 based webcam basic I/F routines
  * Copyright (C) 2004 Takafumi Mizuno <taka-qce@ls-a.jp>
  *
     rloss = format->Rloss;
     gloss = format->Gloss;
     bloss = format->Bloss;
-    
+
     d8 = (Uint8 *) dst;
     d16 = (Uint16 *) dst;
     d32 = (Uint32 *) dst;
-        
+
     while (i--) {
         if ( (i/width) % 2 == 0 ) {
             /* even row (BGBGBGBG)*/
             } else {
                 /* R */
                 if ( i < (width*(height-1)) && ((i % width) < (width-1)) ) {
-                    b = (*(rawpt-width-1)+*(rawpt-width+1)+                    
+                    b = (*(rawpt-width-1)+*(rawpt-width+1)+
                     *(rawpt+width-1)+*(rawpt+width+1))/4;  /* B */
                     g = (*(rawpt-1)+*(rawpt+1)+
                     *(rawpt-width)+*(rawpt+width))/4;      /* G */
     rloss = format->Rloss;
     gloss = format->Gloss;
     bloss = format->Bloss;
-    
+
     /* see http://en.wikipedia.org/wiki/YUV for an explanation of YUV420 */
     y1 = (Uint8*) src;
     y2 = y1 + width;
     d32_1 = (Uint32 *) dst;
     d32_2 = d32_1 + width;
 
-    
+
     /* for the sake of speed, the nested while loops are inside of the switch
       statement for the different surface bit depths */
     switch(format->BytesPerPixel) {
                     /* These formulas are from libv4l */
                     u1 = (((*u - 128) << 7) +  (*u - 128)) >> 6;
                     rg = (((*u - 128) << 1) +  (*u - 128) +
-                   		((*v - 128) << 2) + ((*v - 128) << 1)) >> 3;
+                           ((*v - 128) << 2) + ((*v - 128) << 1)) >> 3;
                     v1 = (((*v - 128) << 1) +  (*v - 128)) >> 1;
                     u++;
                     v++;
                     /* do the pixels on row 1 */
                     y = *y1++;
-                    *d8_1++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d8_1++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                     y = *y1++;
-                    *d8_1++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d8_1++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                     /* do the pixels on row 2 */
                     y = *y2++;
-                    *d8_2++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d8_2++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                     y = *y2++;
-                    *d8_2++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d8_2++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                 }
                 /* y2 is at the beginning of a new row, make it the new row 1 */
                     /* These formulas are from libv4l */
                     u1 = (((*u - 128) << 7) +  (*u - 128)) >> 6;
                     rg = (((*u - 128) << 1) +  (*u - 128) +
-                   		((*v - 128) << 2) + ((*v - 128) << 1)) >> 3;
+                           ((*v - 128) << 2) + ((*v - 128) << 1)) >> 3;
                     v1 = (((*v - 128) << 1) +  (*v - 128)) >> 1;
                     u++;
                     v++;
                     /* do the pixels on row 1 */
                     y = *y1++;
-                    *d16_1++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d16_1++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                     y = *y1++;
-                    *d16_1++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d16_1++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                     /* do the pixels on row 2 */
                     y = *y2++;
-                    *d16_2++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d16_2++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                     y = *y2++;
-                    *d16_2++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d16_2++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                 }
                 /* y2 is at the beginning of a new row, make it the new row 1 */
                     /* These formulas are from libv4l */
                     u1 = (((*u - 128) << 7) +  (*u - 128)) >> 6;
                     rg = (((*u - 128) << 1) +  (*u - 128) +
-                   		((*v - 128) << 2) + ((*v - 128) << 1)) >> 3;
+                           ((*v - 128) << 2) + ((*v - 128) << 1)) >> 3;
                     v1 = (((*v - 128) << 1) +  (*v - 128)) >> 1;
                     u++;
                     v++;
                     /* do the pixels on row 1 */
                     y = *y1++;
-                    *d8_1++ = SAT2(y + u1); 
+                    *d8_1++ = SAT2(y + u1);
                     *d8_1++ = SAT2(y - rg);
                     *d8_1++ = SAT2(y + v1);
                     y = *y1++;
-                    *d8_1++ = SAT2(y + u1); 
+                    *d8_1++ = SAT2(y + u1);
                     *d8_1++ = SAT2(y - rg);
                     *d8_1++ = SAT2(y + v1);
                     /* do the pixels on row 2 */
                     y = *y2++;
-                    *d8_2++ = SAT2(y + u1); 
+                    *d8_2++ = SAT2(y + u1);
                     *d8_2++ = SAT2(y - rg);
                     *d8_2++ = SAT2(y + v1);
                     y = *y2++;
-                    *d8_2++ = SAT2(y + u1); 
+                    *d8_2++ = SAT2(y + u1);
                     *d8_2++ = SAT2(y - rg);
                     *d8_2++ = SAT2(y + v1);
                 }
                     /* These formulas are from libv4l */
                     u1 = (((*u - 128) << 7) +  (*u - 128)) >> 6;
                     rg = (((*u - 128) << 1) +  (*u - 128) +
-                   		((*v - 128) << 2) + ((*v - 128) << 1)) >> 3;
+                           ((*v - 128) << 2) + ((*v - 128) << 1)) >> 3;
                     v1 = (((*v - 128) << 1) +  (*v - 128)) >> 1;
                     u++;
                     v++;
                     /* do the pixels on row 1 */
                     y = *y1++;
-                    *d32_1++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d32_1++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                     y = *y1++;
-                    *d32_1++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d32_1++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                     /* do the pixels on row 2 */
                     y = *y2++;
-                    *d32_2++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d32_2++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                     y = *y2++;
-                    *d32_2++ = ((SAT2(y + v1) >> rloss) << rshift) | 
-                      ((SAT2(y - rg) >> gloss) << gshift) | 
+                    *d32_2++ = ((SAT2(y + v1) >> rloss) << rshift) |
+                      ((SAT2(y - rg) >> gloss) << gshift) |
                       ((SAT2(y + u1) >> bloss) << bshift);
                 }
                 /* y2 is at the beginning of a new row, make it the new row 1 */
 }
 
 /* turn yuv420 into packed yuv. */
-void yuv420_to_yuv (const void* src, void* dst, int width, int height, SDL_PixelFormat* format) {   
+void yuv420_to_yuv (const void* src, void* dst, int width, int height, SDL_PixelFormat* format) {
     const Uint8 *y1, *y2, *u, *v;
     Uint8 *d8_1, *d8_2;
     Uint16 *d16_1, *d16_2;
     rloss = format->Rloss;
     gloss = format->Gloss;
     bloss = format->Bloss;
-    
+
     d8_1 = (Uint8 *) dst;
     d8_2 = d8_1 + (format->BytesPerPixel == 3 ? width*3 : 3);
     d16_1 = (Uint16 *) dst;
     y1 = (Uint8 *) src;
     y2 = y1 + width;
     u = y1 + width * height;
-    v = u + (width * height) / 4;    
+    v = u + (width * height) / 4;
     j = height / 2;
 
     switch (format->BytesPerPixel) {
     0,
     camera_dealloc,
     0,
-    0,                      /*camera_getattr */
-    NULL,			        /*setattr*/
-    NULL,			        /*compare*/
-    NULL,			        /*repr*/
-    NULL,			        /*as_number*/
-    NULL,			        /*as_sequence*/
-    NULL,			        /*as_mapping*/
-    (hashfunc)NULL, 		/*hash*/
-    (ternaryfunc)NULL,		/*call*/
-    (reprfunc)NULL, 		/*str*/
+    0,                          /*camera_getattr */
+    NULL,                       /*setattr*/
+    NULL,                       /*compare*/
+    NULL,                       /*repr*/
+    NULL,                       /*as_number*/
+    NULL,                       /*as_sequence*/
+    NULL,                       /*as_mapping*/
+    (hashfunc)NULL,             /*hash*/
+    (ternaryfunc)NULL,          /*call*/
+    (reprfunc)NULL,             /*str*/
     0L,0L,0L,0L,
-    DOC_PYGAMECAMERACAMERA,  /* Documentation string */
+    DOC_PYGAMECAMERACAMERA,     /* Documentation string */
     0,                          /* tp_traverse */
     0,                          /* tp_clear */
     0,                          /* tp_richcompare */
     char* dev_name = NULL;
     char* color = NULL;
     PyCameraObject *cameraobj;
-    
+
     w = DEFAULT_WIDTH;
     h = DEFAULT_HEIGHT;
-    
+
     if (!PyArg_ParseTuple(arg, "s|(ii)s", &dev_name, &w, &h, &color))
         return NULL;
-    
+
     cameraobj = PyObject_NEW (PyCameraObject, &PyCamera_Type);
-    
+
     if (cameraobj) {
         cameraobj->device_name = (char*) malloc((strlen(dev_name)+1)*sizeof(char));
         strcpy(cameraobj->device_name, dev_name);
         cameraobj->brightness = 0;
         cameraobj->fd = -1;
     }
-    
+
     return (PyObject*)cameraobj;
-    
+
 # elif defined(PYGAME_MAC_CAMERA_OLD)
     int w, h;
     char* dev_name = NULL;
     char* color = NULL;
     PyCameraObject *cameraobj;
-    
+
     w = DEFAULT_WIDTH;
     h = DEFAULT_HEIGHT;
-    
+
     if (!PyArg_ParseTuple(arg, "s|(ii)s", &dev_name, &w, &h, &color))
         return NULL;
-    
+
     cameraobj = PyObject_NEW (PyCameraObject, &PyCamera_Type);
-    
+
     if (cameraobj) {
         cameraobj->device_name = (char*) malloc((strlen(dev_name)+1)*sizeof(char));
         strcpy(cameraobj->device_name, dev_name);
         cameraobj->component = NULL;
         cameraobj->channel = NULL;
         cameraobj->gworld = NULL;
-        cameraobj->boundsRect.top = 0;                   
+        cameraobj->boundsRect.top = 0;
         cameraobj->boundsRect.left = 0;
         cameraobj->boundsRect.bottom = h;
         cameraobj->boundsRect.right = w;
         cameraobj->hflip = 0;
         cameraobj->vflip = 0;
     }
-    
-    return (PyObject*)cameraobj;    
+
+    return (PyObject*)cameraobj;
     # endif
 }
 
     {"Camera", (PyCFunction) Camera, METH_VARARGS, DOC_PYGAMECAMERACAMERA },
     {NULL, NULL, 0, NULL }
 };
- 
+
 MODINIT_DEFINE (_camera) {
     PyObject *module;
     /* imported needed apis; Do this first so if there is an error
      * the module is not loaded.
      */
-     
+
 #if PY3
     static struct PyModuleDef _module = {
         PyModuleDef_HEAD_INIT,
     {
         MODINIT_ERROR;
     }
-  
+
     /* create the module */
 #if PY3
     module = PyModule_Create(&_module);

File src/_gcommand.c

   You should have received a copy of the GNU Library General Public
   License along with this library; if not, write to the Free
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-  
+
 */
 
 /*
 /* command queue management routines... I make a lot of queues, don't I? */
 void addCommand(CommandQueue *q, Command *comm)
 {
-	SDL_LockMutex(q->q_mutex);
-	comm->next=NULL;
-	q->registry[comm->type]++;
-	if(!q->size)
-	{
-		q->first=comm;
-		q->size++;
-		SDL_UnlockMutex(q->q_mutex);
-		return;
-	}
-	if(q->size==1)
-	{
-		q->last=comm;
-		q->first->next=comm;
-		q->size++;
-		SDL_UnlockMutex(q->q_mutex);
-		return;
-	}
-	q->last->next=comm;
-	q->last=comm;
-	q->size++;
-	SDL_UnlockMutex(q->q_mutex);
-	return;
+    SDL_LockMutex(q->q_mutex);
+    comm->next=NULL;
+    q->registry[comm->type]++;
+    if(!q->size)
+    {
+        q->first=comm;
+        q->size++;
+        SDL_UnlockMutex(q->q_mutex);
+        return;
+    }
+    if(q->size==1)
+    {
+        q->last=comm;
+        q->first->next=comm;
+        q->size++;
+        SDL_UnlockMutex(q->q_mutex);
+        return;
+    }
+    q->last->next=comm;
+    q->last=comm;
+    q->size++;
+    SDL_UnlockMutex(q->q_mutex);
+    return;
 }
 
 Command *getCommand(CommandQueue *q)
 {
-	SDL_LockMutex(q->q_mutex);
-	Command *comm;
-	if(!q->last && q->first)
-	{
-		comm=q->first;
-		q->size--;
-		SDL_UnlockMutex(q->q_mutex);
-		return comm;
-	}
-	else if (!q->last && !q->first)
-	{
-		SDL_UnlockMutex(q->q_mutex);
-		return NULL;
-	}
-	comm=q->first;
-	q->first=q->first->next;
-	q->size--;
-	SDL_UnlockMutex(q->q_mutex);
-	return comm;
+    SDL_LockMutex(q->q_mutex);
+    Command *comm;
+    if(!q->last && q->first)
+    {
+        comm=q->first;
+        q->size--;
+        SDL_UnlockMutex(q->q_mutex);
+        return comm;
+    }
+    else if (!q->last && !q->first)
+    {
+        SDL_UnlockMutex(q->q_mutex);
+        return NULL;
+    }
+    comm=q->first;
+    q->first=q->first->next;
+    q->size--;
+    SDL_UnlockMutex(q->q_mutex);
+    return comm;
 }
 
 inline int hasCommand(CommandQueue *q)
 {
-	if(q->size>0)
-		return 1;
-	return 0;
+    if(q->size>0)
+        return 1;
+    return 0;
 }
 
 void flushCommands(CommandQueue *q)
 {
-	SDL_LockMutex(q->q_mutex);
-	Command *prev;
-	Command *cur = q->first;
-	while(cur!=NULL)
-	{
-		prev=cur;
-		cur=cur->next;
-		PyMem_Free(prev);
-		q->size--;
-	}
-	SDL_UnlockMutex(q->q_mutex);
+    SDL_LockMutex(q->q_mutex);
+    Command *prev;
+    Command *cur = q->first;
+    while(cur!=NULL)
+    {
+        prev=cur;
+        cur=cur->next;
+        PyMem_Free(prev);
+        q->size--;
+    }
+    SDL_UnlockMutex(q->q_mutex);
 }
 
-/* registers a command with a particular movie object's command queue. 
- *  Basically, this means, theoretically, different movie objects could have different commands... 
+/* registers a command with a particular movie object's command queue.
+ *  Basically, this means, theoretically, different movie objects could have different commands...
  */
 int registerCommand(CommandQueue *q)
 {
-	//int cur = q->reg_ix;
-	if(q->reg_ix>=1024)
-		q->reg_ix=0;
-	q->registry[q->reg_ix]=0;
-	q->reg_ix++;
-	return q->reg_ix-1;
+    //int cur = q->reg_ix;
+    if(q->reg_ix>=1024)
+        q->reg_ix=0;
+    q->registry[q->reg_ix]=0;
+    q->reg_ix++;
+    return q->reg_ix-1;
 }

File src/_gcommand.h

   You should have received a copy of the GNU Library General Public
   License along with this library; if not, write to the Free
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-  
+
 */
 
 /*
  *  of a video file. Any format supported by ffmpeg is supported by this
  *  video player. Any bugs, please email trinioler@gmail.com :)
  */
- 
+
 
 #ifndef _GCOMMAND_H_
 #define _GCOMMAND_H_
 
 
 /* Documentation: Command Queue Infrastructure
- *  This lower-level infrastructure code is meant to provide greater stability and 
+ *  This lower-level infrastructure code is meant to provide greater stability and
  *  thread safety to the _movie module. Since we cannot manipulate the SDL event queue
- *  we have to use our own hand-rolled solution. It is just a singly linked list, 
+ *  we have to use our own hand-rolled solution. It is just a singly linked list,
  *  with references to the first and last item, allowing us to do a
  *  simple push/pop implementation. The items in the list are structs that have
- *  all the first members of the default Command struct, making it safe to 
- *  cast the pointers from the pseudo-Command structs to a pointer to 
+ *  all the first members of the default Command struct, making it safe to
+ *  cast the pointers from the pseudo-Command structs to a pointer to
  *  Command struct. Realistically, you can cast any pointer to any other kind of
  *  pointer(as long as they are the same size!), and C will let you. This is dangerous,
- *  and should only be done very, very carefully. This facility is only useful 
+ *  and should only be done very, very carefully. This facility is only useful
  *  when you need a OO approach, like we did here.
- *  
- *  When making new commands, use the FULL_COMMAND macro, and add a line to registerCommands 
- *  in _gmovie.c to add a new type value. This also enables future proofing as any changes to 
+ *
+ *  When making new commands, use the FULL_COMMAND macro, and add a line to registerCommands
+ *  in _gmovie.c to add a new type value. This also enables future proofing as any changes to
  *  the Command struct will be opaque to the user... mostly.
- * 
+ *
  *  -Tyler Laing, August 4th, 2009
  */
 
 typedef struct Command
 {
-	int type;
-	struct Command *next;
+    int type;
+    struct Command *next;
 } Command;
 
 #define FULL_COMMAND \
-	int type;\
-	struct Command *next;
+    int type;\
+    struct Command *next;
 
 typedef struct CommandQueue
 {
-	int size;
-	SDL_mutex *q_mutex;
-	Command *first;
-	Command *last;
-	int registry[1024];
-	int reg_ix;
+    int size;
+    SDL_mutex *q_mutex;
+    Command *first;
+    Command *last;
+    int registry[1024];
+    int reg_ix;
 } CommandQueue;
 
 
 void flushCommands(CommandQueue *q);
 int registerCommand(CommandQueue *q);
 
-//convience function for allocating a new command, and ensuring its type is set properly. 
+//convience function for allocating a new command, and ensuring its type is set properly.
 #define ALLOC_COMMAND(command, name) command* name = (command *)PyMem_Malloc(sizeof(command)); name->type=movie->command##Type;
 
 #endif /*_GCOMMAND_H_*/

File src/_gmovie.c

   You should have received a copy of the GNU Library General Public
   License along with this library; if not, write to the Free
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-  
+
 */
 
 /*
  *  of a video file. Any format supported by ffmpeg is supported by this
  *  video player. Any bugs, please email trinioler@gmail.com :)
  */
- 
+
 
 #ifndef _GMOVIE_H_
 #include "_gmovie.h"
 void WritePicture2Surface(AVPicture *picture, SDL_Surface *surface, int w, int h)
 {
     /* AVPicture initialized with PIX_FMT_RGBA only fills pict->data[0]
-     *  This however is only in {R,G,B, A} format. So we just copy the data over. 
+     *  This however is only in {R,G,B, A} format. So we just copy the data over.
      */
     /* Loop unrolling:
-     * 	We define a blocksize, and so we increment the index counter by blocksize*rgbstep
-     * 	All common resolutions are nicely divisible by 8(because 8 is a power of 2...)
-     *  An uncommon resolution  could have between 1 and 7 bytes left to convert... 
+     *     We define a blocksize, and so we increment the index counter by blocksize*rgbstep
+     *     All common resolutions are nicely divisible by 8(because 8 is a power of 2...)
+     *  An uncommon resolution  could have between 1 and 7 bytes left to convert...
      *   which I guess we'll leave alone. Its just 1-2 pixels in the lower right corner.
-     *  So we repeat the same actions blocksize times.  
+     *  So we repeat the same actions blocksize times.
      */
     int64_t   blocksize     = 8;
     uint32_t *rgb           = surface->pixels;
     //RELEASEGIL
     //set up the aspect ratio values..
 #if LIBAVFORMAT_VERSION_INT>= 3415808
-    
+
         if (movie->video_st->sample_aspect_ratio.num)
             aspect_ratio = av_q2d(movie->video_st->sample_aspect_ratio);
         else if (movie->video_st->codec->sample_aspect_ratio.num)
             aspect_ratio = 0;
         if (aspect_ratio <= 0.0)
             aspect_ratio = 1.0;
-    
+
 #else
-    
+
         aspect_ratio = 1.0;
-    
+
 #endif
     //then we load in width and height values based on the aspect ration and w/h.
     int w=0;
     y = (vp->height - height) / 2;
 
     //we set the rect to have the values we need for blitting/overlay display
-    	
-	    vp->dest_rect.x = vp->xleft + x;
-    	vp->dest_rect.y = vp->ytop  + y;
 
-    	vp->dest_rect.w=width;
- 	   	vp->dest_rect.h=height;
-	
+    vp->dest_rect.x = vp->xleft + x;
+    vp->dest_rect.y = vp->ytop  + y;
+
+    vp->dest_rect.w=width;
+    vp->dest_rect.h=height;
+
     if (vp->dest_overlay && vp->overlay>0 && !movie->skip_frame)
     {
         //SDL_Delay(10);
                 {
                     GRABGIL
                     RAISE(PyExc_SDLError, "No video surface given."); //ideally this should have
-                    RELEASEGIL										  // happen if there's some cleaning up.
+                    RELEASEGIL                                          // happen if there's some cleaning up.
                     return -1;
                 }
                 if(screen->h!=h)
             //printf("asdf\n");
             /* TODO: attempt to fix below... but this is just wrong... */
             data_size = sizeof(movie->audio_buf1);
-            len1 = avcodec_decode_audio4(dec, 
-                                         frame, 
-                                         &data_size, 
+            len1 = avcodec_decode_audio4(dec,
+                                         frame,
+                                         &data_size,
                                          pkt);
 
             movie->audio_pkt_size= frame->nb_samples;
             if (len1 < 0)
             {
-                // if error, we skip the frame 
+                // if error, we skip the frame
                 movie->audio_pkt_size = 0;
                 break;
             }
 
             /* TODO: FIXME uses old functions.
             data_size = sizeof(movie->audio_buf1);
-            len1 += avcodec_decode_audio2(dec, 
-                                          (int16_t *)movie->audio_buf1, 
-                                          &data_size, 
-                                          movie->audio_pkt_data, 
+            len1 += avcodec_decode_audio2(dec,
+                                          (int16_t *)movie->audio_buf1,
+                                          &data_size,
+                                          movie->audio_pkt_data,
                                           movie->audio_pkt_size);
             if (len1 < 0)
             {
-                // if error, we skip the frame 
+                // if error, we skip the frame
                 movie->audio_pkt_size = 0;
                 break;
             }
     {
         if(threaded)
         {
-	    GRABGIL
-	} 
+            GRABGIL
+        }
         Py_DECREF(movie);
         if(threaded)
-	{
+        {
             RELEASEGIL
-	}
+        }
         return -1;
     }
     initialize_codec(movie, stream_index, threaded);
             Py_DECREF(movie);
         }
         if(threaded)
-        {        
+        {
             RELEASEGIL
-	}
+    }
         return;
     }
     movie->replay=1;
     if(threaded)
     {
         GRABGIL
-    }    
+    }
     if(movie->ob_refcnt!=0)
     {
         Py_DECREF( movie);
     }
     if(threaded)
-    {    
-    	RELEASEGIL
-    }    
+    {
+        RELEASEGIL
+    }
 }
 void stream_component_close(PyMovie *movie, int stream_index, int threaded)
 {
     if (stream_index < 0 || stream_index >= ic->nb_streams)
     {
         if(threaded)
-	{
-		GRABGIL
-	}
-    	if(movie->ob_refcnt!=0)
-    	{
-     	   Py_DECREF(movie);
-    	}
+        {
+            GRABGIL
+        }
+        if(movie->ob_refcnt!=0)
+        {
+            Py_DECREF(movie);
+        }
         if(threaded)
-    	{
-        	RELEASEGIL
-    	}
+        {
+            RELEASEGIL
+        }
         return;
     }
     enc = ic->streams[stream_index]->codec;
     if(threaded)
     {
         RELEASEGIL
-    }    
+    }
 }
 
 void stream_open(PyMovie *movie, const char *filename, AVInputFormat *iformat, int threaded)
 
     int wanted_video_stream=1;
     int wanted_audio_stream=1;
-    
+
     /* if seeking requested, we execute it */
     if (movie->start_time != AV_NOPTS_VALUE)
     {
             if(threaded)
             {
                 GRABGIL
- 	    }           
-	    PyErr_Format(PyExc_IOError, "%s: could not seek to position %0.3f", movie->filename, (double)timestamp/AV_TIME_BASE);
+            }
+            PyErr_Format(PyExc_IOError, "%s: could not seek to position %0.3f",
+                         movie->filename, (double)timestamp/AV_TIME_BASE);
             if(threaded)
-	    {
+            {
                 RELEASEGIL
- 	    }           
-	}
+            }
+        }
     }
     for(i = 0; i < movie->ic->nb_streams; i++)
     {
             break;
         case PYG_MEDIA_TYPE_SUBTITLE:
             //if(wanted_subti_stream -- >= 0 && !movie->subtitle_disable)
-            //	subtitle_index=i;
+            //    subtitle_index=i;
         default:
             break;
         }
         }
         PyErr_Format(PyExc_IOError, "There was a problem opening up %s, due to %i", movie->filename, err);
         if(threaded)
- 	{       
-	    RELEASEGIL
-	}           
-	ret = -1;
+        {
+            RELEASEGIL
+        }
+        ret = -1;
         goto fail;
     }
     err = av_find_stream_info(ic);
         {
             GRABGIL
         }
-            PyErr_Format(PyExc_IOError, "%s: could not find codec parameters", movie->filename);
+        PyErr_Format(PyExc_IOError, "%s: could not find codec parameters", movie->filename);
         if(threaded)
- 	{       
-	    RELEASEGIL
-	}           
+        {
+            RELEASEGIL
+        }
         ret = -1;
         goto fail;
     }
     enc->skip_idct= AVDISCARD_DEFAULT;
     enc->skip_loop_filter= AVDISCARD_DEFAULT;
 #if LIBAVCODEC_VERSION_INT>=3412992 //(52<<16)+(20<<8)+0 ie 52.20.0
-    
+
         enc->error_recognition= FF_ER_CAREFUL;
 #endif
     enc->error_concealment= 3;
         if(threaded)
         {
             GRABGIL
-    	}    
+        }
         Py_DECREF(movie);
         if(threaded)
-	{        
-    	    RELEASEGIL
+        {
+            RELEASEGIL
         }
-     }
+    }
 }
 
 void stream_cycle_channel(PyMovie *movie, int codec_type)
             initialize_context(movie, 1);
         /*starting a stream is different from opening it.
          * *nix weenies see that they are the same, when really, they're not.
-         * In this case, starting a stream means we set all the values and stuff we need 
+         * In this case, starting a stream means we set all the values and stuff we need
          * to play it as if it had just started, like initial time values, etc.
          */
 
         /* We've returned... for any number of reasons, like stopping, we're finished
          * or as an omen of the impending apocalypse. I recommend you use the
          * necronomicon to diagnose this.
-         * 
-         * And now, we need to end a stream. Again, this is different from closing a stream. 
+         *
+         * And now, we need to end a stream. Again, this is different from closing a stream.
          * This just sets various variables and structs to their ended state, but they're still
-         * ready to be started again. We only close streams when we dealloc the movie. We want 
+         * ready to be started again. We only close streams when we dealloc the movie. We want
          * to be able to reuse the memory.  Every memory page we reuse is another
          * electronic tree saved!
          */
 
             }
             /*if (SDL_WasInit (SDL_INIT_VIDEO))
-            	SDL_QuitSubSystem (SDL_INIT_VIDEO);*/
+                SDL_QuitSubSystem (SDL_INIT_VIDEO);*/
         }
         if(state==-1)
         {
-        	if(PyErr_Occurred())
-        	{
-        		PyErr_Print();
-        	}
-        	break;
+            if(PyErr_Occurred())
+            {
+                PyErr_Print();
+            }
+            break;
         }
     }
     GRABGIL
     /* This is the most-hardworking function in the entire module. So respect it!
      * He's the blue collar worker amongst the white-collar data pushing functions. There's
      * a few other blue collar functions, but decoder is boss o' them all.
-     * 
+     *
      * Here's decoder's work schedule:
-     * 	loop:
-     * 		checks status
-     * 		deals with seeking
-     * 		handles eofs and stuff
-     * 		read frame
-     * 		load frame into A/V queue
-     * 		video_render()
-     * 		//audio_thread()
-     * 		first two loops:
-     * 			video_refresh_timer <--- we do this or else we'd never start display frames
-     * 		if timing AND timing >=now:
-     * 			video_display
-     * 
+     *     loop:
+     *         checks status
+     *         deals with seeking
+     *         handles eofs and stuff
+     *         read frame
+     *         load frame into A/V queue
+     *         video_render()
+     *         //audio_thread()
+     *         first two loops:
+     *             video_refresh_timer <--- we do this or else we'd never start display frames
+     *         if timing AND timing >=now:
+     *             video_display
+     *
      * And thats it! decoder does this till it ends. Then it cleans some stuff up, and exits gracefully for most any situation.
      */
     PyMovie *movie = arg;
     ic=movie->ic;
     int co=0;
     int video_packet=0;
-	//we do video open as a batch, instead of on-demand. Much better performance that way.
+    //we do video open as a batch, instead of on-demand. Much better performance that way.
     video_open(movie, 0);
     movie->last_showtime = av_gettime()/1000.0;
     int seeking =0;
                         AVFrame *frame;
                         frame=avcodec_alloc_frame();
                         /* TODO: FIXME avcodec_decode_video is old api.
-                        bytesDecoded = avcodec_decode_video(movie->video_st->codec, 
-                                                            frame, 
-                                                            &frameFinished, 
-                                                            pkt->data, 
+                        bytesDecoded = avcodec_decode_video(movie->video_st->codec,
+                                                            frame,
+                                                            &frameFinished,
+                                                            pkt->data,
                                                             pkt->size);
                         */
-                        bytesDecoded = avcodec_decode_video2(movie->video_st->codec, 
-                                                             frame, 
-                                                             &frameFinished, 
+                        bytesDecoded = avcodec_decode_video2(movie->video_st->codec,
+                                                             frame,
+                                                             &frameFinished,
                                                              pkt);
 
                         if(frameFinished)
             if (ret < 0)
             {
 #if LIBAVCODEC_VERSION_INT>=3412992
-				if (ret != AVERROR_EOF && url_ferror(ic->pb) == 0)
+                if (ret != AVERROR_EOF && url_ferror(ic->pb) == 0)
                 {
                     goto fail;
                 }
                     break;
                 }
 #else
-				goto fail;
+                goto fail;
 #endif
-				}
+                }
             if (pkt->stream_index == movie->audio_stream)
             {
                 if(seeking)
             video_render(movie);
             video_packet=0;
         }
-		/*This is very important: without this if check, 
-		 * The video frames will not be displayed, ever. */
+        /*This is very important: without this if check,
+         * The video frames will not be displayed, ever. */
         if(co<1)
             movie->timing=40;
         co++;
 
         if(movie->timing>0)
         {
-			/* Here, we check if the current time in milliseconds exceeds the scheduled time 
-			 * and if so, we display the frame. */ 
+            /* Here, we check if the current time in milliseconds exceeds the scheduled time
+             * and if so, we display the frame. */
             double showtime = movie->timing+movie->last_showtime;
             double now = av_gettime()/1000.0;
             if(now >= showtime)
 
 int video_render(PyMovie *movie)
 {
-	/* Video render function. Only executed when there is a packet to decode. 
-	 *  Here, we get the packet, check it, decode the packet, and queue it up.
-	 */
+    /* Video render function. Only executed when there is a packet to decode.
+     *  Here, we get the packet, check it, decode the packet, and queue it up.
+     */
     AVPacket pkt1, *pkt = &pkt1;
     int len1, got_picture;
     AVFrame *frame= avcodec_alloc_frame();
 
 
 #if LIBAVCODEC_VERSION_INT<3412992
-		if((pkt->dts == AV_NOPTS_VALUE)) //(52<<16)+(20<<8)+0 ie 52.20.0
+        if((pkt->dts == AV_NOPTS_VALUE)) //(52<<16)+(20<<8)+0 ie 52.20.0
         {
             //due to short circuiting this checks first, then if that fails it does the invalid old checks. :)
             pts=opaque;

File src/_gmovie.h

   You should have received a copy of the GNU Library General Public
   License along with this library; if not, write to the Free
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-  
+
 */
 
 /*
  *  of a video file. Any format supported by ffmpeg is supported by this
  *  video player. Any bugs, please email trinioler@gmail.com :)
  */
- 
+
 
 #ifndef _GMOVIE_H_
 #define _GMOVIE_H_
 #include <libavformat/avformat.h>
 #include <libswscale/swscale.h>
 
-	
+
 /*constant definitions */
 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
 
 /*Thread management macros to save me a load of typing...*/
 #if THREADFREE!=1
-	#define DECLAREGIL PyThreadState *_oldtstate;
-	#define GRABGIL    PyEval_AcquireLock();_oldtstate = PyThreadState_Swap(movie->_tstate);
-	#define RELEASEGIL PyThreadState_Swap(_oldtstate); PyEval_ReleaseLock();
+    #define DECLAREGIL PyThreadState *_oldtstate;
+    #define GRABGIL    PyEval_AcquireLock();_oldtstate = PyThreadState_Swap(movie->_tstate);
+    #define RELEASEGIL PyThreadState_Swap(_oldtstate); PyEval_ReleaseLock();
 #else
-	#define DECLAREGIL
-	#define GRABGIL
-	#define RELEASEGIL
+    #define DECLAREGIL
+    #define GRABGIL
+    #define RELEASEGIL
 #endif
 
 /* Used to indicate when to flush the queues for seeking */
 #define RGBA  0
 
 #if RGB24
-	#define RGBSTEP 3
+    #define RGBSTEP 3
 #else
-	#define RGBSTEP 4
+    #define RGBSTEP 4
 #endif
 
 #define BPP 1
 
 #ifdef PROFILE
 #include <math.h>
-	typedef struct TimeSampleNode
-	{
-		int64_t sample;
-		struct TimeSampleNode *next;
-	}TimeSampleNode;
-	typedef struct ImageScaleStats
-	{
-		double mean;
-		int64_t max;
-		int64_t min;
-		double stdev;
-		double median;
-		double firstqu;
-		double thirdqu;
-		double serror;
-		int64_t n_samples;
-		struct TimeSampleNode *first;
-		struct TimeSampleNode *last;
-	}ImageScaleStats;
+    typedef struct TimeSampleNode
+    {
+        int64_t sample;
+        struct TimeSampleNode *next;
+    }TimeSampleNode;
+    typedef struct ImageScaleStats
+    {
+        double mean;
+        int64_t max;
+        int64_t min;
+        double stdev;
+        double median;
+        double firstqu;
+        double thirdqu;
+        double serror;
+        int64_t n_samples;
+        struct TimeSampleNode *first;
+        struct TimeSampleNode *last;
+    }ImageScaleStats;
 #endif
 //included from ffmpeg header files, as the header file is not publically available.
 #if defined(__ICC) || defined(__SUNPRO_C)
 PacketQueue;
 
 /* Holds already loaded pictures, so that decoding, and writing to a overlay/surface can happen while waiting
- * the <strong> very </strong> long time(in computer terms) to show the next frame. 
+ * the <strong> very </strong> long time(in computer terms) to show the next frame.
  */
 typedef struct VidPicture
 {
     SDL_Overlay *dest_overlay; /* Overlay for fast speedy yuv-rendering of the video */
     SDL_Surface *dest_surface; /* Surface for other desires, for example, rendering a video in a small portion of the screen */
-    SDL_Rect    dest_rect;	   /* Dest-rect, which tells where to locate the video */
+    SDL_Rect    dest_rect;     /* Dest-rect, which tells where to locate the video */
     int         width;         /* Width and height */
     int         height;
-    int         xleft;		   /* Where left border of video is located */
-    int         ytop;		   /* Where top border of video is located */
-    int         overlay;	   /* Whether or not to use the overlay */
-    int         ready; 		   /* Boolean to indicate this picture is ready to be used. After displaying the contents, it changes to False */
-    double      pts;		   /* presentation time-stamp of the picture */
+    int         xleft;         /* Where left border of video is located */
+    int         ytop;          /* Where top border of video is located */
+    int         overlay;       /* Whether or not to use the overlay */
+    int         ready;         /* Boolean to indicate this picture is ready to be used. After displaying the contents, it changes to False */
+    double      pts;           /* presentation time-stamp of the picture */
 }
 VidPicture;
 
 #if 0
 typedef struct SubPicture
 {
-	double pts;
-	AVSubtitle sub;
+    double pts;
+    AVSubtitle sub;
 } SubPicture;
 #endif
 enum {
     /* General purpose members */
     SDL_Thread      *parse_tid;      /* Thread id for the decode_thread call */
     int              abort_request;  /* Tells whether or not to stop playing and return */
-    int              paused; 		 /* Boolean for communicating to the threads to pause playback */
+    int              paused;         /* Boolean for communicating to the threads to pause playback */
     int              last_paused;    /* For comparing the state of paused to what it was last time around. */
-    int 			 working;
+    int              working;
     char             filename[1024];
     char            *_backend;       //"FFMPEG_WRAPPER";
     int              overlay;        //>0 if we are to use the overlay, otherwise <=0
     int              loops;          //Number of times to play the video
     int              resize_h;       //Indicator values that we have resized the video screen from the default
     int              resize_w;
-    int 		     replay;         //we've played this once before, we're playing it again.
+    int              replay;         //we've played this once before, we're playing it again.
     AVInputFormat   *iformat;        //Format of the file
     SDL_mutex       *dest_mutex;     //mutex to control access to important info
     int              av_sync_type;   //determines the clock type we use
     int              stop;           //whether we're in a stop state...
     SDL_Surface     *canon_surf;     //pointer to the surface given by the programmer. We do NOT free this... it is not ours. We just write to it.
     PyThreadState   *_tstate;        //really do not touch this unless you have to. This is used for threading control and primitives.
-	int finished;
-	int skip_frame;
-	
-	/* command queue stuff */
+    int finished;
+    int skip_frame;
+
+    /* command queue stuff */
     CommandQueue *commands;
-	int seekCommandType;   
-	int pauseCommandType; 
+    int seekCommandType;
+    int pauseCommandType;
     int stopCommandType;
     int resizeCommandType;
     int shiftCommandType;
 
     /* Seek-info */
     int      seek_req;
-    int      seek_flags; 
+    int      seek_flags;
     int64_t  seek_pos;
-	int64_t  start_time;     //used for seeking
-    
+    int64_t  start_time;     //used for seeking
+
     /* external clock members */
     double  external_clock; /* external clock base */
     int64_t external_clock_time;
 
     /* Audio stream members */
-    double      audio_clock; 
+    double      audio_clock;
     AVStream   *audio_st;    //audio stream struct
     PacketQueue audioq;      //audio packets
     /* samples output by the codec. we reserve more space for avsync compensation */
     AVPacket audio_pkt;      //current packet
     uint8_t *audio_pkt_data; //current packet data
     int      audio_pkt_size; //current packet size
-    int64_t  audio_pts;      
+    int64_t  audio_pts;
     //int audio_volume; /*must self implement*/
     enum SampleFormat audio_src_fmt;
     //AVAudioConvert   *reformat_ctx;
     SDL_cond   *videoq_cond;
     struct SwsContext *img_convert_ctx;
 #if 0
-	/*subtitle */
-	int sub_stream;
-	int sub_stream_changed;
-	AVStream *sub_st;
-	PacketQueue subq;
-	SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
-	int subpq_rindex, subpq_windex, subpq_size;
-	SDL_mutex *subpq_mutex;
-	int subtitle_disable;
+    /*subtitle */
+    int sub_stream;
+    int sub_stream_changed;
+    AVStream *sub_st;
+    PacketQueue subq;
+    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
+    int subpq_rindex, subpq_windex, subpq_size;
+    SDL_mutex *subpq_mutex;
+    int subtitle_disable;
 #endif
 #ifdef PROFILE
-	ImageScaleStats *istats;
+    ImageScaleStats *istats;
 #endif
 
 }
     char *filename;//check
     double aspect_ratio;//check
 } PyMovieInfo;
-    
+
 
 
 /*command definitions */
 typedef struct seekCommand
 {
-	FULL_COMMAND
-	int64_t pos;
-	int rel;
+    FULL_COMMAND
+    int64_t pos;
+    int rel;
 } seekCommand;
 
 
 typedef struct pauseCommand
 {
-	FULL_COMMAND
+    FULL_COMMAND
 } pauseCommand;
 
 typedef struct stopCommand
 {
-	FULL_COMMAND
+    FULL_COMMAND
 } stopCommand;
 
 typedef struct resizeCommand
 {
-	FULL_COMMAND
-	int h;
-	int w;
+    FULL_COMMAND
+    int h;
+    int w;
 } resizeCommand;
 
 typedef struct shiftCommand
 {
-	FULL_COMMAND
-	int barrier;
-	int ytop;
-	int xleft;	
+    FULL_COMMAND
+    int barrier;
+    int ytop;
+    int xleft;
 } shiftCommand;
 
 typedef struct surfaceCommand
 {
-	FULL_COMMAND
-	SDL_Surface *surface;	
+    FULL_COMMAND
+    SDL_Surface *surface;
 } surfaceCommand;
 
 /* end of struct definitions */
 /* function definitions */
 
-/* 		PacketQueue Management */
+/*         PacketQueue Management */
 void packet_queue_init  (PacketQueue *q);
 void packet_queue_flush (PacketQueue *q);
 void packet_queue_end   (PacketQueue *q, int end);
 void packet_queue_abort (PacketQueue *q);
 int  packet_queue_get   (PacketQueue *q, AVPacket *pkt, int block);
 
-/* 		Misc*/
+/*         Misc*/
 int  initialize_context     (PyMovie *movie, int threaded);
 int  initialize_codec       (PyMovie *movie, int stream_index, int threaded);
-/* 		Video Management */
+/*         Video Management */
 int  video_open          (PyMovie *is, int index);
 int  video_image_display (PyMovie *is);
 int  video_display       (PyMovie *is);
 void update_video_clock  (PyMovie *movie, AVFrame* frame, double pts);
 void video_refresh_timer (PyMovie *movie); //unlike in ffplay, this does the job of compute_frame_delay
 
-/* 		Audio management */
+/*         Audio management */
 int  synchronize_audio        (PyMovie *is, short *samples, int samples_size1, double pts);
 int  audio_decode_frame       (PyMovie *is, double *pts_ptr);
 
-/* 		General Movie Management */
+/*         General Movie Management */
 void stream_seek            (PyMovie *is, int64_t pos, int rel);
 void stream_pause           (PyMovie *is);
 int  stream_component_open  (PyMovie *is, int stream_index, int threaded); //TODO: break down into separate functions
 void stream_cycle_channel   (PyMovie *is, int codec_type);
 int  decoder_wrapper        (void *arg);
 
-/* 		Clock Management */
+/*         Clock Management */
 double get_audio_clock    (PyMovie *is);
 double get_video_clock    (PyMovie *is);
 double get_external_clock (PyMovie *is);
 double get_master_clock   (PyMovie *is);
 
-/* 		Command management */
+/*         Command management */
 void registerCommands(PyMovie *movie);
 
-/* 		MovieInfo management*/
+/*         MovieInfo management*/
 int _info_init(PyObject *self, PyObject *args, PyObject *kwds);
 void _info_init_internal(PyMovieInfo *self, const char *filename);
 void _info_dealloc(PyMovieInfo *info);
 
 
 #if 0
-/*		Subtitle Management*/
+/*        Subtitle Management*/
 int subtitle_render(void *arg);
 void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh);
 void free_subpicture(SubPicture *sp);

File src/_gsound.c

   You should have received a copy of the GNU Library General Public
   License along with this library; if not, write to the Free
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-  
+
 */
 
 /*
  *  We use SDL_mixer to manage things, and provide a simple callback.
  *  The interface is simple, to avoid the mess of dealing with sound and
  *  channel management within _gmovie.c which was getting long enough as it was.
- */ 
+ */
 
 #include "_gsound.h"
 
 }
 
 /* SDL mixer callback function:
- *  The idea is to get the next sound sample played as quickly as possible. 
+ *  The idea is to get the next sound sample played as quickly as possible.
  *  We free the last played chunk, then call playBufferQueue, explained '
- *  below. 
+ *  below.
  */
 void cb_mixer(int channel)
 {
-	Mix_Chunk *mix;
-	mix= Mix_GetChunk(channel);
-	if(mix->abuf)
-		PyMem_Free(mix->abuf);
-	if(mix)
-		PyMem_Free(mix);
+    Mix_Chunk *mix;
+    mix= Mix_GetChunk(channel);
+    if(mix->abuf)
+        PyMem_Free(mix->abuf);
+    if(mix)
+        PyMem_Free(mix);
     playBufferQueue(channel);
-    
+
 }
 
 //initialize the mixer audio subsystem, code cribbed from mixer.c
         /* A bug in sdl_mixer where the stereo is reversed for 8 bit.
            So we use this CPU hogging effect to reverse it for us.
            Hopefully this bug is fixed in SDL_mixer 1.2.9
-        printf("MIX_MAJOR_VERSION :%d: MIX_MINOR_VERSION :%d: MIX_PATCHLEVEL :%d: \n", 
+        printf("MIX_MAJOR_VERSION :%d: MIX_MINOR_VERSION :%d: MIX_PATCHLEVEL :%d: \n",
                MIX_MAJOR_VERSION, MIX_MINOR_VERSION, MIX_PATCHLEVEL);
         */
 
     ainfo->ended=1;
     ainfo->time_base=time_base;
     Mix_VolumeMusic (127);
-    
+
     //ainfo->_tstate = _tstate;
     return 0;
 }
     ainfo->current_frame_size=1;
     return 0;
 }
-/*basic sound end routine, used for pauses, the movie object ends, but the 
+/*basic sound end routine, used for pauses, the movie object ends, but the
  * movie object isn't deallocated yet, etc */
 int soundEnd   (void)
 {
     return 0;
 }
 
-/* Play a sound buffer, with a given length 
- *  This will place the buffer on the queue, if a sound is already playing. 
+/* Play a sound buffer, with a given length
+ *  This will place the buffer on the queue, if a sound is already playing.
  */
 int playBuffer (uint8_t *buf, uint32_t len, int channel, int64_t pts)
 {
-	//SDL_mutexP(ainfo->mutex);
+    //SDL_mutexP(ainfo->mutex);
     Mix_Chunk *mix;
     if(!ainfo->ended && (ainfo->queue.size>0||ainfo->playing))
     {
-    	//not a callback call, so we copy the buffer into a buffernode and add it to the queue.
+        //not a callback call, so we copy the buffer into a buffernode and add it to the queue.
         BufferNode *node;
         node = (BufferNode *)PyMem_Malloc(sizeof(BufferNode));
         node->buf = (uint8_t *)PyMem_Malloc((size_t)len);
         queue_put(&ainfo->queue, node);
         //SDL_mutexV(ainfo->mutex);
         if(ainfo->channel<0)
-        	ainfo->channel=channel;
+            ainfo->channel=channel;
         return ainfo->channel;
-    }     
+    }
     //regardless of 1st call, or a callback, we load the data from buf into a newly allocated block.
     mix= (Mix_Chunk *)PyMem_Malloc(sizeof(Mix_Chunk));
     mix->allocated=1;
-	mix->abuf = (Uint8 *)PyMem_Malloc((size_t)len);
+    mix->abuf = (Uint8 *)PyMem_Malloc((size_t)len);
     memcpy(mix->abuf, buf, len);
     mix->alen = (Uint32 )len;
     mix->volume = 127;
     ainfo->playing = 1;
- 	if(!ainfo->ended && len!=0)
-	{
-    	int bytes_per_sec = ainfo->channels*ainfo->sample_rate*2;
-    	ainfo->audio_clock+= (double) len/(double) bytes_per_sec;
-    	
-	}
+    if(!ainfo->ended && len!=0)
+    {
+        int bytes_per_sec = ainfo->channels*ainfo->sample_rate*2;
+        ainfo->audio_clock+= (double) len/(double) bytes_per_sec;
+
+    }
     ainfo->current_frame_size =len;
     int playing = Mix_Playing(channel);
     if(playing)
     {
-    	return channel;
+        return channel;
     }
     int ret = Mix_PlayChannel(channel, mix, 0);
     ainfo->channel = ret;
 
 int stopBuffer (int channel)
 {
-	queue_flush(&ainfo->queue);
+    queue_flush(&ainfo->queue);
     return 0;
 }
 
 void playBufferQueue(int channel)
 {
-	uint8_t *buf;
-	int len=0;
-	int64_t pts;
-	int playing = Mix_Playing(channel);
-	if (playing)
-		return;
-	if(!ainfo->ended && ainfo->queue.size<=0)
-	{            
+    uint8_t *buf;
+    int len=0;
+    int64_t pts;
+    int playing = Mix_Playing(channel);
+    if (playing)
+        return;
+    if(!ainfo->ended && ainfo->queue.size<=0)
+    {
         //callback call but when the queue is empty, so we just load a short empty sound.
         buf = (uint8_t *) PyMem_Malloc((size_t)128);
         memset(buf, 0, (size_t)128);
         PyMem_Free(newNode);
         newNode=NULL;
     }
-    
+
     //we assume that if stopped is true, then
     if(ainfo->ended && !buf)
     {
     Mix_Chunk *mix;
     mix= (Mix_Chunk *)PyMem_Malloc(sizeof(Mix_Chunk));
     mix->allocated=1;
-	mix->abuf = buf;
+    mix->abuf = buf;
     mix->alen = (Uint32 )len;
     mix->volume = 127;
- 	if(!ainfo->ended && len!=0)
-	{
-    	int bytes_per_sec = ainfo->channels*ainfo->sample_rate*2;
-    	ainfo->audio_clock+= (double) len/(double) bytes_per_sec;    	
-	}
+    if(!ainfo->ended && len!=0)
+    {
+        int bytes_per_sec = ainfo->channels*ainfo->sample_rate*2;
+        ainfo->audio_clock+= (double) len/(double) bytes_per_sec;
+    }
     ainfo->current_frame_size =len;
     int chan = ainfo->channel;
-    
+
     int ret = Mix_PlayChannel(chan, mix, 0);
     ainfo->channel = ret;
     return;
     int paused = Mix_Paused(channel);
     if(paused)
     {
-    	ainfo->audio_clock=ainfo->old_clock;
-    	ainfo->ended=0;
+        ainfo->audio_clock=ainfo->old_clock;
+        ainfo->ended=0;
         Mix_Resume(-1);
     }
     else
     {
-    	ainfo->old_clock = ainfo->audio_clock;
-    	ainfo->ended=1;
+        ainfo->old_clock = ainfo->audio_clock;
+        ainfo->ended=1;
         Mix_Pause(-1);
     }
     return 0;
 }
 int seekBuffer (double pts)
 {
-	/*we need to flush our buffer */
-	queue_flush(&ainfo->queue);
-	if (pts != AV_NOPTS_VALUE) 
-	{
-            ainfo->audio_clock = ainfo->time_base*pts;
+    /*we need to flush our buffer */
+    queue_flush(&ainfo->queue);
+    if (pts != AV_NOPTS_VALUE)
+    {
+        ainfo->audio_clock = ainfo->time_base*pts;
     }
-	//ainfo->ended=1;
-	return 1;
+    //ainfo->ended=1;
+    return 1;
 }
 
 int setCallback(void (*callback)(int channel))
 
 int getBufferQueueSize(void)
 {
-	return ainfo->queue.size;
+    return ainfo->queue.size;
 }

File src/_gsound.h

   You should have received a copy of the GNU Library General Public
   License along with this library; if not, write to the Free
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-  
+
 */
 
 /*
  *  of a video file. Any format supported by ffmpeg is supported by this
  *  video player. Any bugs, please email trinioler@gmail.com :)
  */
- 
+
 
 #ifndef _GSOUND_H_
 #define _GSOUND_H_
 }
 BufferNode;
 
-/* Queue Struct for handling sound samples. This enables one thread pushing 
+/* Queue Struct for handling sound samples. This enables one thread pushing
  * samples onto the queue with minimal interruption of grabbing a sample off the queue
  */
 typedef struct BufferQueue
     int         playing;            //if we've started playing any buffers
     int         channel;            //what channel the last buffer played on
     int         ended;              //whether or not we've "ended", so we know to output silence.
-	int			paused;
+    int            paused;
     BufferQueue queue;              //queue of our buffers
     SDL_mutex   *mutex;
-	//PyThreadState *_tstate;
-	int restart;
-	double time_base;
+    //PyThreadState *_tstate;
+    int restart;
+    double time_base;
 }
 AudioInfo;
 

File src/_numericsndarray.c

     numdims = (numchannels > 1) ? 2 : 1;
     dim[0] = chunk->alen / (numchannels*formatbytes);
     dim[1] = numchannels;
-    
+
     array = PyArray_FromDimsAndData (numdims, dim, type, (char*)chunk->abuf);
     if(array)
     {
 sndarray_array (PyObject* self, PyObject* arg)
 {
     PyObject *array, *arraycopy=NULL;
-    
+
     /*we'll let numeric do the copying for us*/
     array = sndarray_samples (self, arg);
     if(array)
     Uint8 *src, *dst;
 
     if (!PyArg_ParseTuple (arg, "O!", &PyArray_Type, &arrayobj))
-	return NULL;
+        return NULL;
     array = (PyArrayObject*) arrayobj;
-    
+
     if (!Mix_QuerySpec (NULL, &format, &numchannels))
         return RAISE (PyExc_SDLError, "Mixer not initialized");
     if (array->descr->type_num > PyArray_LONG)
         return RAISE (PyExc_ValueError, "Invalid array datatype for sound");
-    
+
     if (format==AUDIO_S8 || format==AUDIO_U8)
         mixerbytes = 1;