From David Longest, "I have updated the FFmpeg plugin to support the 1.0 release version of FFmpeg. The files attached were modified in order to facilitate the update. Below are the details for all changes made.

Header update

FindFFmpeg.cmake has been changed in order to support the new header include format for FFmpeg. In the 1.0 release, a new file had been added with the name “time.h” in the avutil library. The previous method of adding includes caused conflicts with the ANSI C “time.h” file. Now the include directive will only use the main include folder. All files using the old include format have been updated to reflect the change.



Added __STDC_CONSTANT_MACROS define to CMakeLists.txt

Since there is no guarantee that FFmpegHeaders.hpp will be included before stdint.h is included, the define has been moved from FFmpegHeaders.hpp to be part of the CMakeLists.txt for the FFmpeg plugin. This will allow the define to work on all compilers regardless of include order.



Replaced AVFormatParameters with AVDictionary

AVFormatParameters is no longer supported in FFmpeg and has been replaced with a key/value map of strings for each setting. FFmpegParameters and FFmpegDecoder has been updated to reflect this.



Replaced av_open_input_file with avformat_open_input

FFmpeg now opens files using avformat_open_input. Since the av_open_input_file method is deprecated, the FFmpegDecoder class has been updated to reflect this change.



Added custom AVIOContext field to options

Since some formats and inputs may not be supported by FFmpeg, I have added a new parameter that allows a user to allocate their own AVIOContext. This class will allow for creating a read, seek, and write callback if they desire.



Checking for start_time validity

It is possible for some file formats to not provide a start_time to FFmpeg. This would cause stuttering in the video since the clocks class would be incorrect.



Removed findVideoStream and findAudioStream

The new FFmpeg release already has a function that will find the best audio and video stream. The code has been replaced with this function.



Updated error reporting

Some functions would not log an error when opening a file or modifying a file failed. New logs have been added as well as a function to convert error numbers to their string descriptions.



decode_video has been replaced

The old decode_video function would remove extra data that some decoders use in order to properly decode a packet. Now av_codec_decode_video2 has replaced that function.



Picture format changed from RGBA32 to RGB24

Since most video will not contain an alpha channel, using a 24 bit texture will use less memory."
This commit is contained in:
Robert Osfield 2013-02-06 12:46:03 +00:00
parent 19bfa92c91
commit 29eb65c77d
10 changed files with 202 additions and 188 deletions

View File

@ -11,7 +11,8 @@
#In ffmpeg code, old version use "#include <header.h>" and newer use "#include <libname/header.h>" #In ffmpeg code, old version use "#include <header.h>" and newer use "#include <libname/header.h>"
#In OSG ffmpeg plugin, we use "#include <header.h>" for compatibility with old version of ffmpeg #In OSG ffmpeg plugin, we used "#include <header.h>" for compatibility with old version of ffmpeg
#With the new version of FFmpeg, a file named "time.h" was added that breaks compatability with the old version of ffmpeg.
#We have to search the path which contain the header.h (usefull for old version) #We have to search the path which contain the header.h (usefull for old version)
#and search the path which contain the libname/header.h (usefull for new version) #and search the path which contain the libname/header.h (usefull for new version)
@ -131,20 +132,17 @@ IF (FFMPEG_LIBAVFORMAT_FOUND AND FFMPEG_LIBAVDEVICE_FOUND AND FFMPEG_LIBAVCODE
SET(FFMPEG_FOUND "YES") SET(FFMPEG_FOUND "YES")
SET(FFMPEG_INCLUDE_DIRS SET(FFMPEG_INCLUDE_DIRS
${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS} ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS}/libavformat ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS}
${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS} ${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS}/libavdevice ${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS}
${FFMPEG_LIBAVCODEC_INCLUDE_DIRS} ${FFMPEG_LIBAVCODEC_INCLUDE_DIRS}/libavcodec ${FFMPEG_LIBAVCODEC_INCLUDE_DIRS}
${FFMPEG_LIBAVUTIL_INCLUDE_DIRS} ${FFMPEG_LIBAVUTIL_INCLUDE_DIRS}/libavutil ${FFMPEG_LIBAVUTIL_INCLUDE_DIRS}
) )
# Using the new include style for FFmpeg prevents issues with #include <time.h>
IF (FFMPEG_STDINT_INCLUDE_DIR) IF (FFMPEG_STDINT_INCLUDE_DIR)
SET(FFMPEG_INCLUDE_DIRS SET(FFMPEG_INCLUDE_DIRS
${FFMPEG_INCLUDE_DIRS} ${FFMPEG_INCLUDE_DIRS}
${FFMPEG_STDINT_INCLUDE_DIR} ${FFMPEG_STDINT_INCLUDE_DIR}
${FFMPEG_STDINT_INCLUDE_DIR}/libavformat
${FFMPEG_STDINT_INCLUDE_DIR}/libavdevice
${FFMPEG_STDINT_INCLUDE_DIR}/libavcodec
${FFMPEG_STDINT_INCLUDE_DIR}/libavutil
) )
ENDIF() ENDIF()

View File

@ -13,6 +13,8 @@ IF(FFMPEG_LIBSWSCALE_FOUND)
ENDIF() ENDIF()
ADD_DEFINITIONS(-D__STDC_CONSTANT_MACROS)
# MESSAGE("FFMPEG_LIBAVFORMAT_INCLUDE_DIRS = " ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS} ) # MESSAGE("FFMPEG_LIBAVFORMAT_INCLUDE_DIRS = " ${FFMPEG_LIBAVFORMAT_INCLUDE_DIRS} )
# MESSAGE("FFMPEG_LIBAVDEVICE_INCLUDE_DIRS = " ${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS} ) # MESSAGE("FFMPEG_LIBAVDEVICE_INCLUDE_DIRS = " ${FFMPEG_LIBAVDEVICE_INCLUDE_DIRS} )
# MESSAGE("FFMPEG_LIBAVCODEC_INCLUDE_DIRS = " ${FFMPEG_LIBAVCODEC_INCLUDE_DIRS} ) # MESSAGE("FFMPEG_LIBAVCODEC_INCLUDE_DIRS = " ${FFMPEG_LIBAVCODEC_INCLUDE_DIRS} )

View File

@ -28,7 +28,12 @@
namespace osgFFmpeg { namespace osgFFmpeg {
static std::string AvStrError(int errnum)
{
char buf[128];
av_strerror(errnum, buf, sizeof(buf));
return std::string(buf);
}
FFmpegDecoder::FFmpegDecoder() : FFmpegDecoder::FFmpegDecoder() :
m_audio_stream(0), m_audio_stream(0),
@ -60,25 +65,20 @@ bool FFmpegDecoder::open(const std::string & filename, FFmpegParameters* paramet
if (filename.compare(0, 5, "/dev/")==0) if (filename.compare(0, 5, "/dev/")==0)
{ {
#ifdef ANDROID
throw std::runtime_error("Device not supported on Android");
#else
avdevice_register_all(); avdevice_register_all();
OSG_NOTICE<<"Attempting to stream "<<filename<<std::endl; OSG_NOTICE<<"Attempting to stream "<<filename<<std::endl;
AVFormatParameters formatParams;
memset(&formatParams, 0, sizeof(AVFormatParameters));
AVInputFormat *iformat; AVInputFormat *iformat;
formatParams.channel = 0;
formatParams.standard = 0;
#if 1 #if 1
formatParams.width = 320; av_dict_set(parameters->getOptions(), "video_size", "320x240", 0);
formatParams.height = 240;
#else #else
formatParams.width = 640; av_dict_set(parameters->getOptions(), "video_size", "640x480", 0);
formatParams.height = 480;
#endif #endif
formatParams.time_base.num = 1; av_dict_set(parameters->getOptions(), "framerate", "1:30", 0);
formatParams.time_base.den = 30;
std::string format = "video4linux2"; std::string format = "video4linux2";
iformat = av_find_input_format(format.c_str()); iformat = av_find_input_format(format.c_str());
@ -92,7 +92,7 @@ bool FFmpegDecoder::open(const std::string & filename, FFmpegParameters* paramet
OSG_NOTICE<<"Failed to find input format: "<<format<<std::endl; OSG_NOTICE<<"Failed to find input format: "<<format<<std::endl;
} }
int error = av_open_input_file(&p_format_context, filename.c_str(), iformat, 0, &formatParams); int error = avformat_open_input(&p_format_context, filename.c_str(), iformat, parameters->getOptions());
if (error != 0) if (error != 0)
{ {
std::string error_str; std::string error_str;
@ -112,34 +112,53 @@ bool FFmpegDecoder::open(const std::string & filename, FFmpegParameters* paramet
throw std::runtime_error("av_open_input_file() failed : " + error_str); throw std::runtime_error("av_open_input_file() failed : " + error_str);
} }
#endif
} }
else else
{ {
AVInputFormat* av_format = (parameters ? parameters->getFormat() : 0); AVInputFormat* iformat = (parameters ? parameters->getFormat() : 0);
AVFormatParameters* av_params = (parameters ? parameters->getFormatParameter() : 0); AVIOContext* context = parameters->getContext();
if (av_open_input_file(&p_format_context, filename.c_str(), av_format, 0, av_params) !=0 ) if (context != NULL)
{
p_format_context = avformat_alloc_context();
p_format_context->pb = context;
}
if (avformat_open_input(&p_format_context, filename.c_str(), iformat, parameters->getOptions()) != 0)
throw std::runtime_error("av_open_input_file() failed"); throw std::runtime_error("av_open_input_file() failed");
} }
m_format_context.reset(p_format_context); m_format_context.reset(p_format_context);
// Retrieve stream info // Retrieve stream info
if (av_find_stream_info(p_format_context) < 0) // Only buffer up to one and a half seconds
p_format_context->max_analyze_duration = AV_TIME_BASE * 1.5f;
if (avformat_find_stream_info(p_format_context, NULL) < 0)
throw std::runtime_error("av_find_stream_info() failed"); throw std::runtime_error("av_find_stream_info() failed");
m_duration = double(m_format_context->duration) / AV_TIME_BASE; m_duration = double(m_format_context->duration) / AV_TIME_BASE;
m_start = double(m_format_context->start_time) / AV_TIME_BASE; if (m_format_context->start_time != AV_NOPTS_VALUE)
m_start = double(m_format_context->start_time) / AV_TIME_BASE;
else
m_start = 0;
// TODO move this elsewhere // TODO move this elsewhere
m_clocks.reset(m_start); m_clocks.reset(m_start);
// Dump info to stderr // Dump info to stderr
dump_format(p_format_context, 0, filename.c_str(), false); av_dump_format(p_format_context, 0, filename.c_str(), false);
// Find and open the first video and audio streams (note that audio stream is optional and only opened if possible) // Find and open the first video and audio streams (note that audio stream is optional and only opened if possible)
if ((m_video_index = av_find_best_stream(m_format_context.get(), AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0)) < 0)
throw std::runtime_error("Could not open video stream");
m_video_stream = m_format_context->streams[m_video_index];
findVideoStream(); if ((m_audio_index = av_find_best_stream(m_format_context.get(), AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0)) >= 0)
findAudioStream(); m_audio_stream = m_format_context->streams[m_audio_index];
else
{
m_audio_stream = 0;
m_audio_index = std::numeric_limits<unsigned int>::max();
}
m_video_decoder.open(m_video_stream); m_video_decoder.open(m_video_stream);
@ -196,6 +215,7 @@ bool FFmpegDecoder::readNextPacket()
return readNextPacketSeeking(); return readNextPacketSeeking();
default: default:
OSG_FATAL << "unknown decoder state " << m_state << std::endl;
assert(false); assert(false);
return false; return false;
} }
@ -230,40 +250,6 @@ void FFmpegDecoder::pause()
m_state = PAUSE; m_state = PAUSE;
} }
void FFmpegDecoder::findAudioStream()
{
for (unsigned int i = 0; i < m_format_context->nb_streams; ++i)
{
if (m_format_context->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO)
{
m_audio_stream = m_format_context->streams[i];
m_audio_index = i;
return;
}
}
m_audio_stream = 0;
m_audio_index = std::numeric_limits<unsigned int>::max();
}
void FFmpegDecoder::findVideoStream()
{
for (unsigned int i = 0; i < m_format_context->nb_streams; ++i)
{
if (m_format_context->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
{
m_video_stream = m_format_context->streams[i];
m_video_index = i;
return;
}
}
throw std::runtime_error("could not find a video stream");
}
inline void FFmpegDecoder::flushAudioQueue() inline void FFmpegDecoder::flushAudioQueue()
{ {
@ -290,12 +276,15 @@ bool FFmpegDecoder::readNextPacketNormal()
bool end_of_stream = false; bool end_of_stream = false;
// Read the next frame packet // Read the next frame packet
if (av_read_frame(m_format_context.get(), &packet) < 0) int error = av_read_frame(m_format_context.get(), &packet);
if (error < 0)
{ {
if (url_ferror(m_format_context->pb) == 0) if (error == AVERROR_EOF || url_feof(m_format_context.get()->pb))
end_of_stream = true; end_of_stream = true;
else else {
OSG_FATAL << "av_read_frame() returned " << AvStrError(error) << std::endl;
throw std::runtime_error("av_read_frame() failed"); throw std::runtime_error("av_read_frame() failed");
}
} }
if (end_of_stream) if (end_of_stream)
@ -314,8 +303,10 @@ bool FFmpegDecoder::readNextPacketNormal()
else else
{ {
// Make the packet data available beyond av_read_frame() logical scope. // Make the packet data available beyond av_read_frame() logical scope.
if (av_dup_packet(&packet) < 0) if ((error = av_dup_packet(&packet)) < 0) {
OSG_FATAL << "av_dup_packet() returned " << AvStrError(error) << std::endl;
throw std::runtime_error("av_dup_packet() failed"); throw std::runtime_error("av_dup_packet() failed");
}
m_pending_packet = FFmpegPacket(packet); m_pending_packet = FFmpegPacket(packet);
} }
@ -381,8 +372,11 @@ void FFmpegDecoder::rewindButDontFlushQueues()
const int64_t pos = int64_t(m_clocks.getStartTime() * double(AV_TIME_BASE)); const int64_t pos = int64_t(m_clocks.getStartTime() * double(AV_TIME_BASE));
const int64_t seek_target = av_rescale_q(pos, AvTimeBaseQ, m_video_stream->time_base); const int64_t seek_target = av_rescale_q(pos, AvTimeBaseQ, m_video_stream->time_base);
if (av_seek_frame(m_format_context.get(), m_video_index, seek_target, 0/*AVSEEK_FLAG_BYTE |*/ /*AVSEEK_FLAG_BACKWARD*/) < 0) int error = 0;
if ((error = av_seek_frame(m_format_context.get(), m_video_index, seek_target, 0/*AVSEEK_FLAG_BYTE |*/ /*AVSEEK_FLAG_BACKWARD*/)) < 0) {
OSG_FATAL << "av_seek_frame returned " << AvStrError(error) << std::endl;
throw std::runtime_error("av_seek_frame failed()"); throw std::runtime_error("av_seek_frame failed()");
}
m_clocks.rewind(); m_clocks.rewind();
m_state = REWINDING; m_state = REWINDING;
@ -407,8 +401,11 @@ void FFmpegDecoder::seekButDontFlushQueues(double time)
m_clocks.setSeekTime(time); m_clocks.setSeekTime(time);
if (av_seek_frame(m_format_context.get(), m_video_index, seek_target, 0/*AVSEEK_FLAG_BYTE |*/ /*AVSEEK_FLAG_BACKWARD*/) < 0) int error = 0;
if ((error = av_seek_frame(m_format_context.get(), m_video_index, seek_target, 0/*AVSEEK_FLAG_BYTE |*/ /*AVSEEK_FLAG_BACKWARD*/)) < 0) {
OSG_FATAL << "av_seek_frame() returned " << AvStrError(error) << std::endl;
throw std::runtime_error("av_seek_frame failed()"); throw std::runtime_error("av_seek_frame failed()");
}
m_clocks.seek(time); m_clocks.seek(time);
m_state = SEEKING; m_state = SEEKING;

View File

@ -46,8 +46,13 @@ class FormatContextPtr
{ {
if (_ptr) if (_ptr)
{ {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(53, 17, 0)
OSG_NOTICE<<"Calling avformat_close_input("<<&_ptr<<")"<<std::endl;
avformat_close_input(&_ptr);
#else
OSG_NOTICE<<"Calling av_close_input_file("<<_ptr<<")"<<std::endl; OSG_NOTICE<<"Calling av_close_input_file("<<_ptr<<")"<<std::endl;
av_close_input_file(_ptr); av_close_input_file(_ptr);
#endif
} }
_ptr = 0; _ptr = 0;
} }
@ -100,8 +105,6 @@ protected:
typedef BoundedMessageQueue<FFmpegPacket> PacketQueue; typedef BoundedMessageQueue<FFmpegPacket> PacketQueue;
void findAudioStream();
void findVideoStream();
void flushAudioQueue(); void flushAudioQueue();
void flushVideoQueue(); void flushVideoQueue();
bool readNextPacketNormal(); bool readNextPacketNormal();
@ -151,7 +154,7 @@ inline bool FFmpegDecoder::loop() const
inline double FFmpegDecoder::creation_time() const inline double FFmpegDecoder::creation_time() const
{ {
if(m_format_context) return m_format_context->timestamp; if(m_format_context) return m_format_context->start_time;
else return HUGE_VAL; else return HUGE_VAL;
} }

View File

@ -8,27 +8,6 @@
namespace osgFFmpeg { namespace osgFFmpeg {
static int decode_video(AVCodecContext *avctx, AVFrame *picture,
int *got_picture_ptr,
const uint8_t *buf, int buf_size)
{
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR==52 && LIBAVCODEC_VERSION_MINOR>=32)
// following code segment copied from ffmpeg avcodec_decode_video() implementation
// to avoid warnings about deprecated function usage.
AVPacket avpkt;
av_init_packet(&avpkt);
avpkt.data = const_cast<uint8_t *>(buf);
avpkt.size = buf_size;
// HACK for CorePNG to decode as normal PNG by default
avpkt.flags = AV_PKT_FLAG_KEY;
return avcodec_decode_video2(avctx, picture, got_picture_ptr, &avpkt);
#else
// fallback for older versions of ffmpeg that don't have avcodec_decode_video2.
return avcodec_decode_video(avctx, picture, got_picture_ptr, buf, buf_size);
#endif
}
FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) : FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
m_packets(packets), m_packets(packets),
@ -105,7 +84,7 @@ void FFmpegDecoderVideo::open(AVStream * const stream)
// m_context->flags |= CODEC_FLAG_TRUNCATED; // m_context->flags |= CODEC_FLAG_TRUNCATED;
// Open codec // Open codec
if (avcodec_open(m_context, m_codec) < 0) if (avcodec_open2(m_context, m_codec, NULL) < 0)
throw std::runtime_error("avcodec_open() failed"); throw std::runtime_error("avcodec_open() failed");
// Allocate video frame // Allocate video frame
@ -113,11 +92,11 @@ void FFmpegDecoderVideo::open(AVStream * const stream)
// Allocate converted RGB frame // Allocate converted RGB frame
m_frame_rgba.reset(avcodec_alloc_frame()); m_frame_rgba.reset(avcodec_alloc_frame());
m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB32, width(), height())); m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB24, width(), height()));
m_buffer_rgba[1].resize(m_buffer_rgba[0].size()); m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
// Assign appropriate parts of the buffer to image planes in m_frame_rgba // Assign appropriate parts of the buffer to image planes in m_frame_rgba
avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB32, width(), height()); avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB24, width(), height());
// Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame. // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
m_context->opaque = this; m_context->opaque = this;
@ -183,7 +162,8 @@ void FFmpegDecoderVideo::decodeLoop()
int frame_finished = 0; int frame_finished = 0;
const int bytes_decoded = decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining); // We want to use the entire packet since some codecs will require extra information for decoding
const int bytes_decoded = avcodec_decode_video2(m_context, m_frame.get(), &frame_finished, &(packet.packet));
if (bytes_decoded < 0) if (bytes_decoded < 0)
throw std::runtime_error("avcodec_decode_video failed()"); throw std::runtime_error("avcodec_decode_video failed()");
@ -191,29 +171,37 @@ void FFmpegDecoderVideo::decodeLoop()
m_bytes_remaining -= bytes_decoded; m_bytes_remaining -= bytes_decoded;
m_packet_data += bytes_decoded; m_packet_data += bytes_decoded;
// Find out the frame pts
if (packet.packet.dts == int64_t(AV_NOPTS_VALUE) &&
m_frame->opaque != 0 &&
*reinterpret_cast<const int64_t*>(m_frame->opaque) != int64_t(AV_NOPTS_VALUE))
{
pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
}
else if (packet.packet.dts != int64_t(AV_NOPTS_VALUE))
{
pts = packet.packet.dts;
}
else
{
pts = 0;
}
pts *= av_q2d(m_stream->time_base);
// Publish the frame if we have decoded a complete frame // Publish the frame if we have decoded a complete frame
if (frame_finished) if (frame_finished)
{ {
const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts); AVRational timebase;
// Find out the frame pts
if (m_frame->pts != int64_t(AV_NOPTS_VALUE))
{
pts = m_frame->pts;
timebase = m_context->time_base;
}
else if (packet.packet.dts == int64_t(AV_NOPTS_VALUE) &&
m_frame->opaque != 0 &&
*reinterpret_cast<const int64_t*>(m_frame->opaque) != int64_t(AV_NOPTS_VALUE))
{
pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
timebase = m_stream->time_base;
}
else if (packet.packet.dts != int64_t(AV_NOPTS_VALUE))
{
pts = packet.packet.dts;
timebase = m_stream->time_base;
}
else
{
pts = 0;
timebase = m_context->time_base;
}
pts *= av_q2d(timebase);
const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(timebase), pts);
const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts); const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
publishFrame(frame_delay, m_clocks.audioDisabled()); publishFrame(frame_delay, m_clocks.audioDisabled());
@ -278,21 +266,21 @@ int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
} }
OSG_INFO<<"Using sws_scale "; OSG_DEBUG<<"Using sws_scale ";
int result = sws_scale(m_swscale_ctx, int result = sws_scale(m_swscale_ctx,
(src->data), (src->linesize), 0, src_height, (src->data), (src->linesize), 0, src_height,
(dst->data), (dst->linesize)); (dst->data), (dst->linesize));
#else #else
OSG_INFO<<"Using img_convert "; OSG_DEBUG<<"Using img_convert ";
int result = img_convert(dst, dst_pix_fmt, src, int result = img_convert(dst, dst_pix_fmt, src,
src_pix_fmt, src_width, src_height); src_pix_fmt, src_width, src_height);
#endif #endif
osg::Timer_t endTick = osg::Timer::instance()->tick(); osg::Timer_t endTick = osg::Timer::instance()->tick();
OSG_INFO<<" time = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl; OSG_DEBUG<<" time = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;
return result; return result;
} }
@ -320,14 +308,14 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
AVPicture * const dst = (AVPicture *) m_frame_rgba.get(); AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
// Assign appropriate parts of the buffer to image planes in m_frame_rgba // Assign appropriate parts of the buffer to image planes in m_frame_rgba
avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB32, width(), height()); avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB24, width(), height());
// Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
if (m_context->pix_fmt == PIX_FMT_YUVA420P) if (m_context->pix_fmt == PIX_FMT_YUVA420P)
yuva420pToRgba(dst, src, width(), height()); yuva420pToRgba(dst, src, width(), height());
else else
convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height()); convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width(), height());
// Wait 'delay' seconds before publishing the picture. // Wait 'delay' seconds before publishing the picture.
int i_delay = static_cast<int>(delay * 1000000 + 0.5); int i_delay = static_cast<int>(delay * 1000000 + 0.5);
@ -354,7 +342,7 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height) void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height)
{ {
convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height); convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width, height);
const size_t bpp = 4; const size_t bpp = 4;

View File

@ -5,17 +5,20 @@
extern "C" extern "C"
{ {
#define __STDC_CONSTANT_MACROS
#define FF_API_OLD_SAMPLE_FMT 0 #define FF_API_OLD_SAMPLE_FMT 0
#include <errno.h> // for error codes defined in avformat.h #include <errno.h> // for error codes defined in avformat.h
#include <stdint.h> #include <stdint.h>
#include <avcodec.h> #include <libavcodec/avcodec.h>
#include <avformat.h> #include <libavformat/avformat.h>
#include <avdevice.h>
#include <mathematics.h> #ifndef ANDROID
#include <libavdevice/avdevice.h>
#endif
#include <libavutil/mathematics.h>
#ifdef USE_SWSCALE #ifdef USE_SWSCALE
#include <swscale.h> #include <libswscale/swscale.h>
#endif #endif
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(50,38,0) #if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(50,38,0)

View File

@ -69,7 +69,7 @@ bool FFmpegImageStream::open(const std::string & filename, FFmpegParameters* par
return false; return false;
setImage( setImage(
m_decoder->video_decoder().width(), m_decoder->video_decoder().height(), 1, GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE, m_decoder->video_decoder().width(), m_decoder->video_decoder().height(), 1, GL_RGB, GL_RGB, GL_UNSIGNED_BYTE,
const_cast<unsigned char *>(m_decoder->video_decoder().image()), NO_DELETE const_cast<unsigned char *>(m_decoder->video_decoder().image()), NO_DELETE
); );
@ -329,7 +329,7 @@ void FFmpegImageStream::publishNewFrame(const FFmpegDecoderVideo &, void * user_
#if 1 #if 1
this_->setImage( this_->setImage(
this_->m_decoder->video_decoder().width(), this_->m_decoder->video_decoder().height(), 1, GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE, this_->m_decoder->video_decoder().width(), this_->m_decoder->video_decoder().height(), 1, GL_RGB, GL_RGB, GL_UNSIGNED_BYTE,
const_cast<unsigned char *>(this_->m_decoder->video_decoder().image()), NO_DELETE const_cast<unsigned char *>(this_->m_decoder->video_decoder().image()), NO_DELETE
); );
#else #else

View File

@ -8,25 +8,18 @@
#if LIBAVCODEC_VERSION_MAJOR >= 53 #if LIBAVCODEC_VERSION_MAJOR >= 53
extern "C" extern "C"
{ {
#include <parseutils.h> #include <libavutil/parseutils.h>
} }
#define av_parse_video_frame_size av_parse_video_size #define av_parse_video_frame_size av_parse_video_size
#define av_parse_video_frame_rate av_parse_video_rate #define av_parse_video_frame_rate av_parse_video_rate
#endif #endif
#if LIBAVCODEC_VERSION_MAJOR >= 53 || \ extern "C"
(LIBAVCODEC_VERSION_MAJOR==52 && LIBAVCODEC_VERSION_MINOR>=49) {
#include <libavutil/pixdesc.h>
}
extern "C" inline PixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); }
{
#include <pixdesc.h>
}
inline PixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); }
#else
inline PixelFormat osg_av_get_pix_fmt(const char *name) { return avcodec_get_pix_fmt(name); }
#endif
namespace osgFFmpeg { namespace osgFFmpeg {
@ -34,14 +27,18 @@ namespace osgFFmpeg {
FFmpegParameters::FFmpegParameters() : FFmpegParameters::FFmpegParameters() :
m_context(0),
m_options(0),
m_format(0) m_format(0)
{ {
memset(&m_parameters, 0, sizeof(m_parameters)); // Initialize the dictionary
av_dict_set(&m_options, "foo", "bar", 0);
} }
FFmpegParameters::~FFmpegParameters() FFmpegParameters::~FFmpegParameters()
{} {
av_dict_free(&m_options);
}
void FFmpegParameters::parse(const std::string& name, const std::string& value) void FFmpegParameters::parse(const std::string& name, const std::string& value)
@ -50,50 +47,19 @@ void FFmpegParameters::parse(const std::string& name, const std::string& value)
{ {
return; return;
} }
else if (name == "format") if (name == "format")
{ {
#ifndef ANDROID
avdevice_register_all(); avdevice_register_all();
#endif
m_format = av_find_input_format(value.c_str()); m_format = av_find_input_format(value.c_str());
if (!m_format) if (!m_format)
OSG_NOTICE<<"Failed to apply input video format: "<<value.c_str()<<std::endl; OSG_NOTICE<<"Failed to apply input video format: "<<value.c_str()<<std::endl;
} }
else if (name == "pixel_format")
{
m_parameters.pix_fmt = osg_av_get_pix_fmt(value.c_str());
}
else if (name == "frame_size")
{
int frame_width = 0, frame_height = 0;
if (av_parse_video_frame_size(&frame_width, &frame_height, value.c_str()) < 0)
{
OSG_NOTICE<<"Failed to apply frame size: "<<value.c_str()<<std::endl;
return;
}
if ((frame_width % 2) != 0 || (frame_height % 2) != 0)
{
OSG_NOTICE<<"Frame size must be a multiple of 2: "<<frame_width<<"x"<<frame_height<<std::endl;
return;
}
m_parameters.width = frame_width;
m_parameters.height = frame_height;
}
else if (name == "frame_rate") else if (name == "frame_rate")
{ av_dict_set(&m_options, "framerate", value.c_str(), 0);
AVRational frame_rate; else
if (av_parse_video_frame_rate(&frame_rate, value.c_str()) < 0) av_dict_set(&m_options, name.c_str(), value.c_str(), 0);
{
OSG_NOTICE<<"Failed to apply frame rate: "<<value.c_str()<<std::endl;
return;
}
m_parameters.time_base.den = frame_rate.num;
m_parameters.time_base.num = frame_rate.den;
}
else if (name == "audio_sample_rate")
{
int audio_sample_rate = 44100;
std::stringstream ss(value); ss >> audio_sample_rate;
m_parameters.sample_rate = audio_sample_rate;
}
} }

View File

@ -21,14 +21,17 @@ public:
bool isFormatAvailable() const { return m_format!=NULL; } bool isFormatAvailable() const { return m_format!=NULL; }
AVInputFormat* getFormat() { return m_format; } AVInputFormat* getFormat() { return m_format; }
AVFormatParameters* getFormatParameter() { return &m_parameters; } AVDictionary** getOptions() { return &m_options; }
void setContext(AVIOContext* context) { m_context = context; }
AVIOContext* getContext() { return m_context; }
void parse(const std::string& name, const std::string& value); void parse(const std::string& name, const std::string& value);
protected: protected:
AVInputFormat* m_format; AVInputFormat* m_format;
AVFormatParameters m_parameters; AVIOContext* m_context;
AVDictionary* m_options;
}; };

View File

@ -26,7 +26,46 @@
#define USE_AV_LOCK_MANAGER #define USE_AV_LOCK_MANAGER
#endif #endif
extern "C" {
static void log_to_osg(void *ptr, int level, const char *fmt, va_list vl)
{
char logbuf[256];
vsnprintf(logbuf, sizeof(logbuf), fmt, vl);
logbuf[sizeof(logbuf) - 1] = '\0';
osg::NotifySeverity severity = osg::DEBUG_FP;
switch (level) {
case AV_LOG_PANIC:
severity = osg::ALWAYS;
break;
case AV_LOG_FATAL:
severity = osg::FATAL;
break;
case AV_LOG_ERROR:
severity = osg::WARN;
break;
case AV_LOG_WARNING:
severity = osg::NOTICE;
break;
case AV_LOG_INFO:
severity = osg::INFO;
break;
case AV_LOG_VERBOSE:
severity = osg::DEBUG_INFO;
break;
default:
case AV_LOG_DEBUG:
severity = osg::DEBUG_FP;
break;
}
// Most av_logs have a trailing newline already
osg::notify(severity) << logbuf;
}
} // extern "C"
/** Implementation heavily inspired by http://www.dranger.com/ffmpeg/ */ /** Implementation heavily inspired by http://www.dranger.com/ffmpeg/ */
@ -38,6 +77,8 @@ public:
{ {
supportsProtocol("http","Read video/audio from http using ffmpeg."); supportsProtocol("http","Read video/audio from http using ffmpeg.");
supportsProtocol("rtsp","Read video/audio from rtsp using ffmpeg."); supportsProtocol("rtsp","Read video/audio from rtsp using ffmpeg.");
supportsProtocol("rtp","Read video/audio from rtp using ffmpeg.");
supportsProtocol("tcp","Read video/audio from tcp using ffmpeg.");
supportsExtension("ffmpeg", ""); supportsExtension("ffmpeg", "");
supportsExtension("avi", ""); supportsExtension("avi", "");
@ -61,6 +102,9 @@ public:
supportsOption("frame_size", "Set frame size (e.g. 320x240)"); supportsOption("frame_size", "Set frame size (e.g. 320x240)");
supportsOption("frame_rate", "Set frame rate (e.g. 25)"); supportsOption("frame_rate", "Set frame rate (e.g. 25)");
supportsOption("audio_sample_rate", "Set audio sampling rate (e.g. 44100)"); supportsOption("audio_sample_rate", "Set audio sampling rate (e.g. 44100)");
supportsOption("context", "AVIOContext* for custom IO");
av_log_set_callback(log_to_osg);
#ifdef USE_AV_LOCK_MANAGER #ifdef USE_AV_LOCK_MANAGER
// enable thread locking // enable thread locking
@ -68,6 +112,8 @@ public:
#endif #endif
// Register all FFmpeg formats/codecs // Register all FFmpeg formats/codecs
av_register_all(); av_register_all();
avformat_network_init();
} }
virtual ~ReaderWriterFFmpeg() virtual ~ReaderWriterFFmpeg()
@ -135,6 +181,14 @@ private:
parameters->parse(name, options->getPluginStringData(name)); parameters->parse(name, options->getPluginStringData(name));
} }
} }
if (options && options->getNumPluginData()>0)
{
AVIOContext* context = (AVIOContext*)options->getPluginData("context");
if (context != NULL)
{
parameters->setContext(context);
}
}
} }
#ifdef USE_AV_LOCK_MANAGER #ifdef USE_AV_LOCK_MANAGER