Commits

Anonymous committed d892d13

Began subtitle support work. Need to alter decoder, and add a
subtitle_render function.

  • Participants
  • Parent commits cd5f532
  • Branches tylerthemovie

Comments (0)

Files changed (2)

File src/_gmovie.c

 
     return ret;
 }
+
+void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
+{
+    int wrap, wrap3, width2, skip2;
+    int y, u, v, a, u1, v1, a1, w, h;
+    uint8_t *lum, *cb, *cr;
+    const uint8_t *p;
+    const uint32_t *pal;
+    int dstx, dsty, dstw, dsth;
+
+    dstw = av_clip(rect->w, 0, imgw);
+    dsth = av_clip(rect->h, 0, imgh);
+    dstx = av_clip(rect->x, 0, imgw - dstw);
+    dsty = av_clip(rect->y, 0, imgh - dsth);
+    lum = dst->data[0] + dsty * dst->linesize[0];
+    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
+    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
+
+    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
+    skip2 = dstx >> 1;
+    wrap = dst->linesize[0];
+    wrap3 = rect->pict.linesize[0];
+    p = rect->pict.data[0];
+    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
+
+    if (dsty & 1) {
+        lum += dstx;
+        cb += skip2;
+        cr += skip2;
+
+        if (dstx & 1) {
+            YUVA_IN(y, u, v, a, p, pal);
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+            cb[0] = _ALPHA_BLEND(a >> 2, cb[0], u, 0);
+            cr[0] = _ALPHA_BLEND(a >> 2, cr[0], v, 0);
+            cb++;
+            cr++;
+            lum++;
+            p += BPP;
+        }
+        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
+            YUVA_IN(y, u, v, a, p, pal);
+            u1 = u;
+            v1 = v;
+            a1 = a;
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+
+            YUVA_IN(y, u, v, a, p + BPP, pal);
+            u1 += u;
+            v1 += v;
+            a1 += a;
+            lum[1] = _ALPHA_BLEND(a, lum[1], y, 0);
+            cb[0] = _ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+            cr[0] = _ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+            cb++;
+            cr++;
+            p += 2 * BPP;
+            lum += 2;
+        }
+        if (w) {
+            YUVA_IN(y, u, v, a, p, pal);
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+            cb[0] = _ALPHA_BLEND(a >> 2, cb[0], u, 0);
+            cr[0] = _ALPHA_BLEND(a >> 2, cr[0], v, 0);
+            p++;
+            lum++;
+        }
+        p += wrap3 - dstw * BPP;
+        lum += wrap - dstw - dstx;
+        cb += dst->linesize[1] - width2 - skip2;
+        cr += dst->linesize[2] - width2 - skip2;
+    }
+    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
+        lum += dstx;
+        cb += skip2;
+        cr += skip2;
+
+        if (dstx & 1) {
+            YUVA_IN(y, u, v, a, p, pal);
+            u1 = u;
+            v1 = v;
+            a1 = a;
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+            p += wrap3;
+            lum += wrap;
+            YUVA_IN(y, u, v, a, p, pal);
+            u1 += u;
+            v1 += v;
+            a1 += a;
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+            cb[0] = _ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+            cr[0] = _ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+            cb++;
+            cr++;
+            p += -wrap3 + BPP;
+            lum += -wrap + 1;
+        }
+        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
+            YUVA_IN(y, u, v, a, p, pal);
+            u1 = u;
+            v1 = v;
+            a1 = a;
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+
+            YUVA_IN(y, u, v, a, p + BPP, pal);
+            u1 += u;
+            v1 += v;
+            a1 += a;
+            lum[1] = _ALPHA_BLEND(a, lum[1], y, 0);
+            p += wrap3;
+            lum += wrap;
+
+            YUVA_IN(y, u, v, a, p, pal);
+            u1 += u;
+            v1 += v;
+            a1 += a;
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+
+            YUVA_IN(y, u, v, a, p + BPP, pal);
+            u1 += u;
+            v1 += v;
+            a1 += a;
+            lum[1] = _ALPHA_BLEND(a, lum[1], y, 0);
+
+            cb[0] = _ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
+            cr[0] = _ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
+
+            cb++;
+            cr++;
+            p += -wrap3 + 2 * BPP;
+            lum += -wrap + 2;
+        }
+        if (w) {
+            YUVA_IN(y, u, v, a, p, pal);
+            u1 = u;
+            v1 = v;
+            a1 = a;
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+            p += wrap3;
+            lum += wrap;
+            YUVA_IN(y, u, v, a, p, pal);
+            u1 += u;
+            v1 += v;
+            a1 += a;
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+            cb[0] = _ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+            cr[0] = _ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+            cb++;
+            cr++;
+            p += -wrap3 + BPP;
+            lum += -wrap + 1;
+        }
+        p += wrap3 + (wrap3 - dstw * BPP);
+        lum += wrap + (wrap - dstw - dstx);
+        cb += dst->linesize[1] - width2 - skip2;
+        cr += dst->linesize[2] - width2 - skip2;
+    }
+    /* handle odd height */
+    if (h) {
+        lum += dstx;
+        cb += skip2;
+        cr += skip2;
+
+        if (dstx & 1) {
+            YUVA_IN(y, u, v, a, p, pal);
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+            cb[0] = _ALPHA_BLEND(a >> 2, cb[0], u, 0);
+            cr[0] = _ALPHA_BLEND(a >> 2, cr[0], v, 0);
+            cb++;
+            cr++;
+            lum++;
+            p += BPP;
+        }
+        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
+            YUVA_IN(y, u, v, a, p, pal);
+            u1 = u;
+            v1 = v;
+            a1 = a;
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+
+            YUVA_IN(y, u, v, a, p + BPP, pal);
+            u1 += u;
+            v1 += v;
+            a1 += a;
+            lum[1] = _ALPHA_BLEND(a, lum[1], y, 0);
+            cb[0] = _ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
+            cr[0] = _ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
+            cb++;
+            cr++;
+            p += 2 * BPP;
+            lum += 2;
+        }
+        if (w) {
+            YUVA_IN(y, u, v, a, p, pal);
+            lum[0] = _ALPHA_BLEND(a, lum[0], y, 0);
+            cb[0] = _ALPHA_BLEND(a >> 2, cb[0], u, 0);
+            cr[0] = _ALPHA_BLEND(a >> 2, cr[0], v, 0);
+        }
+    }
+}
+
+void free_subpicture(SubPicture *sp)
+{
+    int i;
+
+    for (i = 0; i < sp->sub.num_rects; i++)
+    {
+        av_freep(&sp->sub.rects[i]->pict.data[0]);
+        av_freep(&sp->sub.rects[i]->pict.data[1]);
+        av_freep(&sp->sub.rects[i]);
+    }
+
+    av_free(sp->sub.rects);
+
+    memset(&sp->sub, 0, sizeof(AVSubtitle));
+}
+
+
 /* Sets the value of the variable width. Acts like a macro */
 void inline get_width(PyMovie *movie, int *width)
 {
     Py_INCREF( movie);
     RELEASEGIL
     VidPicture *vp;
+    SubPicture *sp;
     float aspect_ratio;
     int width, height, x, y;
     vp = &movie->pictq[movie->pictq_rindex];
 
     if (vp->dest_overlay && vp->overlay>0)
     {
+        if (movie->sub_st)
+        {
+            if (movie->subpq_size > 0)
+            {
+                sp = &movie->subpq[movie->subpq_rindex];
+				AVPicture pict;
+				int i;
+				
+                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
+                {
+                    SDL_LockYUVOverlay (vp->dest_overlay);
+
+                    pict.data[0] = vp->dest_overlay->pixels[0];
+                    pict.data[1] = vp->dest_overlay->pixels[2];
+                    pict.data[2] = vp->dest_overlay->pixels[1];
+
+                    pict.linesize[0] = vp->dest_overlay->pitches[0];
+                    pict.linesize[1] = vp->dest_overlay->pitches[2];
+                    pict.linesize[2] = vp->dest_overlay->pitches[1];
+
+                    for (i = 0; i < sp->sub.num_rects; i++)
+                        blend_subrect(&pict, sp->sub.rects[i],
+                                      vp->dest_overlay->w, vp->dest_overlay->h);
+
+                    SDL_UnlockYUVOverlay (vp->dest_overlay);
+                }
+            }
+        }
+        
         if(vp->overlay>0)
         {
             SDL_LockYUVOverlay(vp->dest_overlay);
         movie->video_stream = stream_index;
         movie->video_st = ic->streams[stream_index];
         break;
+	case CODEC_TYPE_SUBTITLE:
+		movie->sub_stream = stream_index;
+		movie->sub_st     = ic->streams[stream_index];
     default:
         break;
     }
         movie->video_current_pts_time = av_gettime();
         packet_queue_init(&movie->videoq);
         break;
+	case CODEC_TYPE_SUBTITLE:
+		if(movie->replay)
+		{
+			movie->sub_stream = stream_index;
+			movie->sub_st     = ic->streams[stream_index];
+		}
+		packet_queue_init(&movie->subq);
     default:
         break;
     }
     enc = ic->streams[stream_index]->codec;
     int i;
     VidPicture *vp;
+    SubPicture *sp;
     switch(enc->codec_type)
     {
     case CODEC_TYPE_AUDIO:
         packet_queue_abort(&movie->videoq);
         packet_queue_flush(&movie->videoq);
         break;
+    case CODEC_TYPE_SUBTITLE:
+        packet_queue_abort(&movie->subq);
+        packet_queue_flush(&movie->subq);
+        break;
+        
     default:
         break;
     }
     case CODEC_TYPE_VIDEO:
         packet_queue_end(&movie->videoq, end);
         break;
+    case CODEC_TYPE_SUBTITLE:
+    	packet_queue_end(&movie->subq, end);
     default:
         break;
     }
         movie->video_st = NULL;
         movie->video_stream = -1;
         break;
+    case CODEC_TYPE_SUBTITLE:
+    	movie->sub_st=NULL;
+    	movie->sub_stream = -1;
     default:
         break;
     }
     subtitle_index = -1;
     movie->video_stream = -1;
     movie->audio_stream = -1;
+    movie->sub_stream = -1;
 
     initialize_context(movie, threaded); //moved a bunch of convenience stuff out of here for access at other times
 
     int wanted_video_stream=1;
     int wanted_audio_stream=1;
+    int wanted_subti_stream=1;
     /* if seeking requested, we execute it */
     if (movie->start_time != AV_NOPTS_VALUE)
     {
             if (wanted_video_stream-- >= 0 && !movie->video_disable)
                 video_index = i;
             break;
+        case CODEC_TYPE_SUBTITLE:
+        	if(wanted_subti_stream -- >= 0 && !movie->subtitle_disable)
+        		subtitle_index=i;
         default:
             break;
         }
     {
         stream_component_open(movie, video_index, threaded);
     }
+    if(subtitle_index >= 0)
+    {
+    	stream_component_open(movie, subtitle_index, threaded);
+    }
 
     if (movie->video_stream < 0 && movie->audio_stream < 0)
     {
     {
         stream_component_close(movie, movie->video_stream);
     }
+    if (movie->sub_stream >= 0)
+    {
+        stream_component_close(movie, movie->sub_stream);
+    }
     if (movie->ic)
     {
         av_close_input_file(movie->ic);
                 av_free_packet(pkt);
             }
         }
+        SubPicture *sp;
+        SubPicture *sp2;
+        if(movie->sub_stream>=0) {
+                if (movie->sub_stream_changed) {
+                    SDL_LockMutex(movie->subpq_mutex);
+
+                    while (movie->subpq_size) {
+                        free_subpicture(&movie->subpq[movie->subpq_rindex]);
+
+                        // update queue size and signal for next picture 
+                        if (++movie->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
+                            movie->subpq_rindex = 0;
+
+                        movie->subpq_size--;
+                    }
+                    movie->sub_stream_changed = 0;
+
+                    SDL_UnlockMutex(movie->subpq_mutex);
+                } else {
+                    if (movie->subpq_size > 0) {
+                        sp = &movie->subpq[movie->subpq_rindex];
+
+                        if (movie->subpq_size > 1)
+                            sp2 = &movie->subpq[(movie->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
+                        else
+                            sp2 = NULL;
+
+                        if ((movie->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
+                                || (sp2 && movie->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
+                        {
+                            free_subpicture(sp);
+
+                            // update queue size and signal for next picture 
+                            if (++movie->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
+                                movie->subpq_rindex = 0;
+
+                            SDL_LockMutex(movie->subpq_mutex);
+                            movie->subpq_size--;
+                            SDL_UnlockMutex(movie->subpq_mutex);
+                        }
+                    }
+                }
+            }
         if(movie->video_st)
             video_render(movie);
         if(movie->audio_st)

File src/_gmovie.h

 
 /* Queues for already-loaded pictures, for rapid display */
 #define VIDEO_PICTURE_QUEUE_SIZE 8
-
+#define SUBPICTURE_QUEUE_SIZE    4
 /* RGB24 or RGBA... */
 /* In this case I've chosen RGB24 because its smaller */
 #define RGB24 1
 #define RGBA  0
 
+#define BPP 1
+
 //included from ffmpeg header files, as the header file is not publically available.
 #if defined(__ICC) || defined(__SUNPRO_C)
     #define DECLARE_ALIGNED(n,t,v)      t v __attribute__ ((aligned (n)))
     #define DECLARE_ASM_CONST(n,t,v)    static const t v
 #endif
 
+#define SCALEBITS 10
+#define ONE_HALF  (1 << (SCALEBITS - 1))
+#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define RGB_TO_Y_CCIR(r, g, b) \
+((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
+  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
+
+#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
+(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
+     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
+(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
+   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define _ALPHA_BLEND(a, oldp, newp, s)\
+((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
+
+#define RGBA_IN(r, g, b, a, s)\
+{\
+    unsigned int v = ((const uint32_t *)(s))[0];\
+    a = (v >> 24) & 0xff;\
+    r = (v >> 16) & 0xff;\
+    g = (v >> 8) & 0xff;\
+    b = v & 0xff;\
+}
+
+#define YUVA_IN(y, u, v, a, s, pal)\
+{\
+    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
+    a = (val >> 24) & 0xff;\
+    y = (val >> 16) & 0xff;\
+    u = (val >> 8) & 0xff;\
+    v = val & 0xff;\
+}
+
+#define YUVA_OUT(d, y, u, v, a)\
+{\
+    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
+}
+
+
 /* structure definitions */
 /* PacketQueue to hold incoming ffmpeg packets from the stream */
 typedef struct PacketQueue
 }
 VidPicture;
 
+typedef struct SubPicture
+{
+	double pts;
+	AVSubtitle sub;
+} SubPicture;
 
 enum {
     AV_SYNC_AUDIO_MASTER, /* default choice */
     SDL_cond   *videoq_cond;
     struct SwsContext *img_convert_ctx;
 
+	/*subtitle */
+	int sub_stream;
+	int sub_stream_changed;
+	AVStream *sub_st;
+	PacketQueue subq;
+	SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
+	int subpq_rindex, subpq_windex, subpq_size;
+	SDL_mutex *subpq_mutex;
+	int subtitle_disable;
 }
 PyMovie;
 /* end of struct definitions */
 void video_refresh_timer (PyMovie *movie); //unlike in ffplay, this does the job of compute_frame_delay
 
 /* 		Audio management */
-int  audio_write_get_buf_size (PyMovie *is);
 int  synchronize_audio        (PyMovie *is, short *samples, int samples_size1, double pts);
 int  audio_decode_frame       (PyMovie *is, double *pts_ptr);
 
 double get_master_clock   (PyMovie *is);
 
 
+/*		Subtitle Management*/
+void subtitle_render(PyMovie *movie);
+void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh);
+void free_subpicture(SubPicture *sp);
 #endif /*_GMOVIE_H_*/