gnash-commit
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Gnash-commit] /srv/bzr/gnash/trunk r9648: Convert VP6A video to image::


From: Benjamin Wolsey
Subject: [Gnash-commit] /srv/bzr/gnash/trunk r9648: Convert VP6A video to image::ImageRGBA. This only succeeds with SwScale,
Date: Thu, 28 Aug 2008 21:59:28 +0200
User-agent: Bazaar (1.5)

------------------------------------------------------------
revno: 9648
committer: Benjamin Wolsey <address@hidden>
branch nick: trunk
timestamp: Thu 2008-08-28 21:59:28 +0200
message:
  Convert VP6A video to image::ImageRGBA. This only succeeds with SwScale,
  not with older versions of ffmpeg.
  
  Make renderers accept but ignore RGBA data. It is easy to get agg to render
  the RGBA images, slightly harder to get it to handle both RGB and RGBA, and
  so far I don't see how to get it to handle transparency correctly.
modified:
  backend/render_handler_agg.cpp
  backend/render_handler_cairo.cpp
  backend/render_handler_ogl.cpp
  libmedia/FLVParser.cpp
  libmedia/ffmpeg/VideoDecoderFfmpeg.cpp
  libmedia/ffmpeg/VideoDecoderFfmpeg.h
    ------------------------------------------------------------
    revno: 9646.1.2
    committer: Benjamin Wolsey <address@hidden>
    branch nick: work
    timestamp: Thu 2008-08-28 18:11:42 +0200
    message:
      Replace convertRGB24 with a function to return an image::ImageBase,
      either with or without alpha depending on the codec.
    modified:
      libmedia/ffmpeg/VideoDecoderFfmpeg.cpp
      libmedia/ffmpeg/VideoDecoderFfmpeg.h
    ------------------------------------------------------------
    revno: 9646.1.3
    committer: Benjamin Wolsey <address@hidden>
    branch nick: work
    timestamp: Thu 2008-08-28 18:30:58 +0200
    message:
      Rename function.
    modified:
      libmedia/ffmpeg/VideoDecoderFfmpeg.cpp
      libmedia/ffmpeg/VideoDecoderFfmpeg.h
    ------------------------------------------------------------
    revno: 9646.1.4
    committer: Benjamin Wolsey <address@hidden>
    branch nick: work
    timestamp: Thu 2008-08-28 18:58:46 +0200
    message:
      Start a byte later for VP6A too.
      
      A SWS_FAST_BILINEAR transform crashes for alpha, so use SWS_BICUBIC
      (SWF_BILINEAR is fine too).
    modified:
      libmedia/FLVParser.cpp
      libmedia/ffmpeg/VideoDecoderFfmpeg.cpp
    ------------------------------------------------------------
    revno: 9646.1.5
    committer: Benjamin Wolsey <address@hidden>
    branch nick: work
    timestamp: Thu 2008-08-28 20:53:43 +0200
    message:
      Use BILINEAR instead of BICUBIC transform as it's probably faster.
      
      Log error and return in the renderers when given an RGBA video frame
      to render.
    modified:
      backend/render_handler_agg.cpp
      backend/render_handler_cairo.cpp
      backend/render_handler_ogl.cpp
      libmedia/ffmpeg/VideoDecoderFfmpeg.cpp
=== modified file 'backend/render_handler_agg.cpp'
--- a/backend/render_handler_agg.cpp    2008-08-18 23:53:04 +0000
+++ b/backend/render_handler_agg.cpp    2008-08-28 18:53:43 +0000
@@ -429,7 +429,7 @@
     return new agg_bitmap_info<agg::pixfmt_rgb24_pre> (0, 0, 0, &dummy, 24);
   }
 
-  void drawVideoFrame(image::ImageBase* baseframe, const matrix* source_mat, 
+  void drawVideoFrame(image::ImageBase* frame, const matrix* source_mat, 
     const rect* bounds) {
   
     // NOTE: Assuming that the source image is RGB 8:8:8
@@ -444,12 +444,15 @@
     
     // TODO: Maybe implement specialization for 1:1 scaled videos
     
+    if (frame->type() == GNASH_IMAGE_RGBA)
+    {
+        LOG_ONCE(log_error(_("Can't render videos with alpha")));
+        return;
+    }
       
     typedef agg::pixfmt_rgb24_pre baseformat;
-    
-    image::ImageRGB* frame = dynamic_cast<image::ImageRGB*>(baseframe);
 
-    assert(frame);
+    assert(frame->type() == GNASH_IMAGE_RGB);
     
     matrix mat = stage_matrix;
     mat.concatenate(*source_mat);

=== modified file 'backend/render_handler_cairo.cpp'
--- a/backend/render_handler_cairo.cpp  2008-08-18 23:53:04 +0000
+++ b/backend/render_handler_cairo.cpp  2008-08-28 18:53:43 +0000
@@ -446,11 +446,18 @@
 
   virtual void drawVideoFrame(image::ImageBase* baseframe, const matrix* m, 
const rect* bounds)
   {
-    // Extract frame attributes
+
+    if (baseframe->type() == GNASH_IMAGE_RGBA)
+    {
+        LOG_ONCE(log_error(_("Can't render videos with alpha")));
+        return;
+    }
+
     image::ImageRGB* frame = dynamic_cast<image::ImageRGB*>(baseframe);
-    
+
     assert(frame);
-    
+
+    // Extract frame attributes
     int         w = frame->width();
     int         h = frame->height();
 

=== modified file 'backend/render_handler_ogl.cpp'
--- a/backend/render_handler_ogl.cpp    2008-08-18 23:53:04 +0000
+++ b/backend/render_handler_ogl.cpp    2008-08-28 18:53:43 +0000
@@ -657,7 +657,7 @@
   // anti-aliased with the rest of the drawing. Since display lists cannot be
   // concatenated this means we'll add up with several display lists for normal
   // drawing operations.
-  virtual void drawVideoFrame(image::ImageBase* baseframe, const matrix* m, 
const rect* bounds)
+  virtual void drawVideoFrame(image::ImageBase* frame, const matrix* m, const 
rect* bounds)
   {
     GLint index;
 
@@ -678,7 +678,7 @@
     glNewList(index, GL_COMPILE);
     _video_indices.push_back(index);
 
-    reallyDrawVideoFrame(baseframe, m, bounds);
+    reallyDrawVideoFrame(frame, m, bounds);
 
     glEndList();
 
@@ -688,11 +688,16 @@
     _render_indices.push_back(index);
   }
   
-  virtual void reallyDrawVideoFrame(image::ImageBase* baseframe, const matrix* 
m, const rect* bounds)
+  virtual void reallyDrawVideoFrame(image::ImageBase* frame, const matrix* m, 
const rect* bounds)
   {
-    image::ImageRGB* frame = dynamic_cast<image::ImageRGB*>(baseframe);
-    
-    assert(frame);
+  
+    if (frame->type() == GNASH_IMAGE_RGBA)
+    {
+        LOG_ONCE(log_error(_("Can't render videos with alpha")));
+        return;
+    }
+  
+    assert(frame->type() == GNASH_IMAGE_RGB);
 
     glPushAttrib(GL_ENABLE_BIT | GL_COLOR_BUFFER_BIT);
 

=== modified file 'libmedia/FLVParser.cpp'
--- a/libmedia/FLVParser.cpp    2008-08-28 13:56:42 +0000
+++ b/libmedia/FLVParser.cpp    2008-08-28 16:58:46 +0000
@@ -337,7 +337,7 @@
 
                boost::uint16_t codec = (tag[11] & 0x0f) >> 0;
 
-        if (codec == VIDEO_CODEC_VP6)
+        if (codec == VIDEO_CODEC_VP6 || codec == VIDEO_CODEC_VP6A)
         {
             _stream->read_byte();
             --bodyLength;

=== modified file 'libmedia/ffmpeg/VideoDecoderFfmpeg.cpp'
--- a/libmedia/ffmpeg/VideoDecoderFfmpeg.cpp    2008-08-28 09:54:06 +0000
+++ b/libmedia/ffmpeg/VideoDecoderFfmpeg.cpp    2008-08-28 18:53:43 +0000
@@ -153,68 +153,93 @@
   }
 }
 
-AVPicture
-VideoDecoderFfmpeg::convertRGB24(AVCodecContext* srcCtx,
+std::auto_ptr<image::ImageBase>
+VideoDecoderFfmpeg::frameToImage(AVCodecContext* srcCtx,
                                  const AVFrame& srcFrame)
 {
-  AVPicture picture;
-  int width = srcCtx->width, height = srcCtx->height;
-  
-  picture.data[0] = NULL;
-  
-  int bufsize = avpicture_get_size(PIX_FMT_RGB24, width, height);
-  if (bufsize == -1) {
-    return picture;
-  }
-
-  boost::uint8_t* buffer = new boost::uint8_t[bufsize];
-
-  avpicture_fill(&picture, buffer, PIX_FMT_RGB24, width, height);
-
-#ifndef HAVE_SWSCALE_H
-  img_convert(&picture, PIX_FMT_RGB24, (AVPicture*) &srcFrame,
-      srcCtx->pix_fmt, width, height);
-#else
-
+
+  const int width = srcCtx->width;
+  const int height = srcCtx->height;
+
+  PixelFormat pixFmt;
+  std::auto_ptr<image::ImageBase> im;
+
+  if (srcCtx->codec->id == CODEC_ID_VP6A)
+  {
+    // Expect RGBA data
+    //log_debug("alpha image");
+    pixFmt = PIX_FMT_RGBA;
+    im.reset(new image::ImageRGBA(width, height));    
+  }
+  else
+  {
+    // Expect RGB data
+    pixFmt = PIX_FMT_RGB24;
+    im.reset(new image::ImageRGB(width, height));
+  }
+
+#ifdef HAVE_SWSCALE_H
   // Check whether the context wrapper exists
   // already.
   if (!_swsContext.get()) {
-    // FIXME: this leads to wrong results (read: segfaults) if this method
-    //        is called from two unrelated video contexts, for example from
-    //        a NetStreamFfmpeg and an embedded video context. Or two
-    //        separate instances of one of the former two.    
+
     _swsContext.reset(
             new SwsContextWrapper(
                 sws_getContext(width, height, srcCtx->pix_fmt,
-                width, height, PIX_FMT_RGB24,
-                SWS_FAST_BILINEAR, NULL, NULL, NULL)
+                width, height, pixFmt,
+                SWS_BILINEAR, NULL, NULL, NULL)
             ));
     
     // Check that the context was assigned.
     if (!_swsContext->getContext()) {
-      delete [] buffer;
 
       // This means we will try to assign the 
       // context again next time.
       _swsContext.reset();
-      return picture;
+      
+      // Can't do anything now, though.
+      im.reset();
+      return im;
     }
   }
+#endif
+
+  int bufsize = avpicture_get_size(pixFmt, width, height);
+      if (bufsize == -1) {
+        im.reset();
+        return im;
+      }
+
+  boost::uint8_t* buffer = new boost::uint8_t[bufsize];
+
+  AVPicture picture;
+  picture.data[0] = NULL;
+
+  avpicture_fill(&picture, buffer, pixFmt, width, height);
 
   // Is it possible for the context to be reset
   // to NULL once it's been created?
   assert(_swsContext->getContext());
 
+
+#ifndef HAVE_SWSCALE_H
+  img_convert(&picture, PIX_FMT_RGB24, (AVPicture*) &srcFrame,
+      srcCtx->pix_fmt, width, height);
+#else
   int rv = sws_scale(_swsContext->getContext(), 
const_cast<uint8_t**>(srcFrame.data),
     const_cast<int*>(srcFrame.linesize), 0, height, picture.data,
     picture.linesize);
 
   if (rv == -1) {
     delete [] buffer;
+    im.reset();
+    return im;
   }
-
-#endif // HAVE_SWSCALE_H
-  return picture;
+#endif
+
+  im->update(picture.data[0]);
+  return im;
+
 }
 
 std::auto_ptr<image::ImageBase>
@@ -238,10 +263,7 @@
     return ret;
   }
 
-  AVPicture rgbpicture = convertRGB24(_videoCodecCtx, *frame);
-  
-  ret.reset(new image::ImageRGB(rgbpicture.data[0], _videoCodecCtx->width,
-                           _videoCodecCtx->height, rgbpicture.linesize[0]));
+  ret = frameToImage(_videoCodecCtx, *frame);
 
   // FIXME: av_free doesn't free frame->data!
   av_free(frame);

=== modified file 'libmedia/ffmpeg/VideoDecoderFfmpeg.h'
--- a/libmedia/ffmpeg/VideoDecoderFfmpeg.h      2008-08-28 09:54:06 +0000
+++ b/libmedia/ffmpeg/VideoDecoderFfmpeg.h      2008-08-28 16:30:58 +0000
@@ -24,6 +24,7 @@
 #include "gnashconfig.h"
 #endif
 
+#include <memory>
 #include "dsodefs.h" //For DSOEXPORT
 #include "log.h"
 #include "VideoDecoder.h"
@@ -52,10 +53,12 @@
 
 
 /// Forward declarations
+class image::ImageBase;
 #ifdef HAVE_SWSCALE_H
 class SwsContextWrapper;
 #endif
 
+
 class VideoDecoderFfmpeg : public VideoDecoder {
     
 public:
@@ -82,7 +85,7 @@
     ///                 caller owns that pointer, which must be freed with 
delete [].
     ///                 It is advised to wrap the pointer in a 
boost::scoped_array.
     ///                 If conversion fails, AVPicture::data[0] will be NULL.
-    AVPicture convertRGB24(AVCodecContext* srcCtx, const AVFrame& srcFrame);
+    std::auto_ptr<image::ImageBase> frameToImage(AVCodecContext* srcCtx, const 
AVFrame& srcFrame);
 
     /// Convert FLASH codec id to FFMPEG codec id
     //


reply via email to

[Prev in Thread] Current Thread [Next in Thread]