From Tanguy Fautre (Aris Technologies), ffmpeg plugin

This commit is contained in:
Robert Osfield 2009-02-25 16:04:48 +00:00
parent 9d91163508
commit 6680ee2b10
18 changed files with 2734 additions and 0 deletions

View File

@ -0,0 +1,40 @@
#ifndef HEADER_GUARD_OSGFFMPEG_AUDIO_SINK_INTERFACE_H
#define HEADER_GUARD_OSGFFMPEG_AUDIO_SINK_INTERFACE_H
#include <osg/Object>
namespace osgFFmpeg
{
class AudioSinkInterface : public osg::Object
{
public:
AudioSinkInterface() :
m_delay(0.0) { }
virtual void startPlaying() = 0;
virtual bool playing() const = 0;
virtual double getDelay() const { return m_delay; }
virtual void setDelay(const double delay) { m_delay = delay; }
virtual const char * libraryName() const { return "osgFFmpeg"; }
virtual const char * className() const { return "AudioSinkInterface"; }
private:
virtual AudioSinkInterface * cloneType() const { return 0; }
virtual AudioSinkInterface * clone(const osg::CopyOp &) const { return 0; }
double m_delay;
};
}
#endif // HEADER_GUARD_OSGFFMPEG_AUDIO_SINK_INTERFACE_H

View File

@ -0,0 +1,322 @@
#ifndef HEADER_GUARD_OSGFFMPEG_BOUNDED_MESSAGE_QUEUE_H
#define HEADER_GUARD_OSGFFMPEG_BOUNDED_MESSAGE_QUEUE_H
#include <OpenThreads/Condition>
#include <OpenThreads/Mutex>
#include <OpenThreads/ScopedLock>
#include <cassert>
#include <algorithm>
#include <vector>
namespace osgFFmpeg {
template <class T>
class BoundedMessageQueue
{
public:
typedef T value_type;
typedef size_t size_type;
explicit BoundedMessageQueue(size_type capacity);
~BoundedMessageQueue();
void clear();
template <class Destructor>
void flush(const Destructor destructor);
void push(const value_type & value);
bool tryPush(const value_type & value);
bool timedPush(const value_type & value, unsigned long ms);
value_type pop();
value_type tryPop(bool & is_empty);
value_type timedPop(bool & is_empty, unsigned long ms);
private:
BoundedMessageQueue(const BoundedMessageQueue &);
BoundedMessageQueue & operator = (const BoundedMessageQueue &);
typedef std::vector<T> Buffer;
typedef OpenThreads::Condition Condition;
typedef OpenThreads::Mutex Mutex;
typedef OpenThreads::ScopedLock<Mutex> ScopedLock;
bool isFull() const;
bool isEmpty() const;
void unsafePush(const value_type & value);
value_type unsafePop();
Buffer m_buffer;
size_type m_begin;
size_type m_end;
size_type m_size;
Mutex m_mutex;
Condition m_not_empty;
Condition m_not_full;
};
template <class T>
BoundedMessageQueue<T>::BoundedMessageQueue(const size_type capacity) :
m_buffer(capacity),
m_begin(0),
m_end(0),
m_size(0)
{
}
template <class T>
BoundedMessageQueue<T>::~BoundedMessageQueue()
{
}
template <class T>
void BoundedMessageQueue<T>::clear()
{
{
ScopedLock lock(m_mutex);
m_buffer.clear();
m_begin = 0;
m_end = 0;
m_size = 0;
}
m_not_full.broadcast();
}
template <class T>
template <class Destructor>
void BoundedMessageQueue<T>::flush(const Destructor destructor)
{
{
ScopedLock lock(m_mutex);
while (! isEmpty())
{
value_type value = unsafePop();
destructor(value);
}
m_begin = 0;
m_end = 0;
m_size = 0;
}
m_not_full.broadcast();
}
template <class T>
void BoundedMessageQueue<T>::push(const value_type & value)
{
{
ScopedLock lock(m_mutex);
while (isFull())
m_not_full.wait(&m_mutex);
unsafePush(value);
}
m_not_empty.signal();
}
template <class T>
bool BoundedMessageQueue<T>::tryPush(const value_type & value)
{
{
ScopedLock lock(m_mutex);
if (isFull())
return false;
unsafePush(value);
}
m_not_empty.signal();
return true;
}
template <class T>
bool BoundedMessageQueue<T>::timedPush(const value_type & value, const unsigned long ms)
{
// We don't wait in a loop to avoid an infinite loop (as the ms timeout would not be decremented).
// This means that timedPush() could return false before the timeout has been hit.
{
ScopedLock lock(m_mutex);
if (isFull())
m_not_full.wait(&m_mutex, ms);
if (isFull())
return false;
unsafePush(value);
}
m_not_empty.signal();
return true;
}
template <class T>
typename BoundedMessageQueue<T>::value_type BoundedMessageQueue<T>::pop()
{
value_type value;
{
ScopedLock lock(m_mutex);
while (isEmpty())
m_not_empty.wait(&m_mutex);
value = unsafePop();
}
m_not_full.signal();
return value;
}
template <class T>
typename BoundedMessageQueue<T>::value_type BoundedMessageQueue<T>::tryPop(bool & is_empty)
{
value_type value;
{
ScopedLock lock(m_mutex);
is_empty = isEmpty();
if (is_empty)
return value_type();
value = unsafePop();
}
m_not_full.signal();
return value;
}
template <class T>
typename BoundedMessageQueue<T>::value_type BoundedMessageQueue<T>::timedPop(bool & is_empty, const unsigned long ms)
{
value_type value;
{
ScopedLock lock(m_mutex);
// We don't wait in a loop to avoid an infinite loop (as the ms timeout would not be decremented).
// This means that timedPop() could return with (is_empty = true) before the timeout has been hit.
if (isEmpty())
m_not_empty.wait(&m_mutex, ms);
is_empty = isEmpty();
if (is_empty)
return value_type();
value = unsafePop();
}
m_not_full.signal();
return value;
}
template <class T>
inline bool BoundedMessageQueue<T>::isFull() const
{
return m_size == m_buffer.size();
}
template <class T>
inline bool BoundedMessageQueue<T>::isEmpty() const
{
return m_size == 0;
}
template <class T>
inline void BoundedMessageQueue<T>::unsafePush(const value_type & value)
{
// Note: this shall never be called if the queue is full.
assert(! isFull());
m_buffer[m_end++] = value;
if (m_end == m_buffer.size())
m_end = 0;
++m_size;
}
template <class T>
inline typename BoundedMessageQueue<T>::value_type BoundedMessageQueue<T>::unsafePop()
{
// Note: this shall never be called if the queue is empty.
assert(! isEmpty());
const size_t pos = m_begin;
++m_begin;
--m_size;
if (m_begin == m_buffer.size())
m_begin = 0;
return m_buffer[pos];
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_BOUNDED_MESSAGE_QUEUE_H

View File

@ -0,0 +1,28 @@
INCLUDE_DIRECTORIES( ${FFMPEG_INCLUDE_DIRS} )
SET(TARGET_SRC
FFmpegClocks.cpp
FFmpegDecoderAudio.cpp
FFmpegDecoder.cpp
FFmpegDecoderVideo.cpp
FFmpegImageStream.cpp
ReaderWriterFFmpeg.cpp
)
SET(TARGET_H
AudioSinkInterface.hpp
BoundedMessageQueue.hpp
FFmpegClocks.hpp
FFmpegDecoderAudio.hpp
FFmpegDecoder.hpp
FFmpegDecoderVideo.hpp
FFmpegHeaders.hpp
FFmpegPacket.hpp
MessageQueue.hpp
)
SET(TARGET_EXTERNAL_LIBRARIES ${FFMPEG_LIBRARIES} )
#### end var setup ###
SETUP_PLUGIN(ffmpeg)

View File

@ -0,0 +1,217 @@
#include "FFmpegClocks.hpp"
#include <cmath>
#include <algorithm>
// DEBUG
//#include <iostream>
namespace osgFFmpeg {
namespace
{
const double AV_SYNC_THRESHOLD = 0.01;
const double AV_NOSYNC_THRESHOLD = 10.0;
inline double clamp(const double value, const double min, const double max)
{
return (std::min)((std::max)(value, min), max);
}
}
FFmpegClocks::FFmpegClocks() :
m_video_clock(0),
m_start_time(0),
m_last_frame_delay(0.040),
m_last_frame_pts(0),
m_last_actual_delay(0),
m_frame_time(0),
m_audio_buffer_end_pts(0),
m_audio_delay(0.0),
m_audio_disabled(false),
m_rewind(false)
{
}
void FFmpegClocks::reset(const double start_time)
{
ScopedLock lock(m_mutex);
m_video_clock = start_time;
m_start_time = start_time;
m_last_frame_delay = 0.040;
m_last_frame_pts = start_time - m_last_frame_delay;
m_frame_time = start_time;
m_audio_buffer_end_pts = start_time;
m_audio_timer.setStartTick();
}
void FFmpegClocks::rewindAudio()
{
ScopedLock lock(m_mutex);
m_audio_buffer_end_pts = m_start_time;
m_audio_timer.setStartTick();
m_rewind = ! m_rewind;
}
void FFmpegClocks::rewindVideo()
{
ScopedLock lock(m_mutex);
if (m_audio_disabled)
return;
m_video_clock = m_start_time;
m_last_frame_delay = 0.040;
m_last_frame_pts = m_start_time - m_last_frame_delay;
m_frame_time = m_start_time;
m_rewind = ! m_rewind;
}
void FFmpegClocks::audioSetBufferEndPts(const double pts)
{
ScopedLock lock(m_mutex);
m_audio_buffer_end_pts = pts;
m_audio_timer.setStartTick();
}
void FFmpegClocks::audioAdjustBufferEndPts(double increment)
{
ScopedLock lock(m_mutex);
m_audio_buffer_end_pts += increment;
m_audio_timer.setStartTick();
}
void FFmpegClocks::audioSetDelay(const double delay)
{
m_audio_delay = delay;
}
void FFmpegClocks::audioDisable()
{
ScopedLock lock(m_mutex);
m_audio_disabled = true;
}
double FFmpegClocks::videoSynchClock(const AVFrame * const frame, const double time_base, double pts)
{
if (pts != 0)
{
// If we have a PTS, set the video clock to it.
m_video_clock = pts;
}
else
{
// Else, if we don't, use the video clock value.
pts = m_video_clock;
}
// Update the video clock to take into account the frame delay
double frame_delay = time_base;
frame_delay += frame->repeat_pict * (frame_delay * 0.5);
m_video_clock += frame_delay;
return pts;
}
double FFmpegClocks::videoRefreshSchedule(const double pts)
{
ScopedLock lock(m_mutex);
// DEBUG
//std::cerr << "ftime / dpts / delay / audio_time / adelay: ";
double delay = pts - m_last_frame_pts;
//std::cerr << m_frame_time << " / ";
//std::cerr << delay << " / ";
// If incorrect delay, use previous one
if (delay <= 0.0 || delay >= 1.0)
delay = m_last_frame_delay;
// Save for next time
m_last_frame_delay = delay;
m_last_frame_pts = pts;
// Update the delay to synch to the audio stream
// Ideally the frame time should be incremented after the actual delay is computed.
// But because of the sound latency, it seems better to keep some latency in the video too.
m_frame_time += delay;
const double audio_time = getAudioTime();
const double actual_delay = (! m_rewind) ?
clamp(m_frame_time - audio_time, -0.5*delay, 2.5*delay) :
m_last_actual_delay; // when rewinding audio or video (but the other has yet to be), get the last used delay
//m_frame_time += delay;
// DEBUG
//std::cerr << delay << " / ";
//std::cerr << audio_time << " / ";
//std::cerr << actual_delay << std::endl;
m_last_actual_delay = actual_delay;
return actual_delay;
}
double FFmpegClocks::getStartTime() const
{
return m_start_time;
}
double FFmpegClocks::getAudioTime() const
{
return m_audio_buffer_end_pts + m_audio_timer.time_s() - m_audio_delay;
}
} // namespace osgFFmpeg

View File

@ -0,0 +1,69 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_CLOCKS_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_CLOCKS_H
#include <osg/Timer>
#include <OpenThreads/Mutex>
#include <OpenThreads/ScopedLock>
#include "FFmpegHeaders.hpp"
namespace osgFFmpeg {
class FFmpegClocks
{
public:
FFmpegClocks();
void reset(double start_time);
void rewindAudio();
void rewindVideo();
void audioSetBufferEndPts(double pts);
void audioAdjustBufferEndPts(double increment);
void audioSetDelay(double delay);
void audioDisable();
double videoSynchClock(const AVFrame * frame, double time_base, double pts);
double videoRefreshSchedule(double pts);
double getStartTime() const;
private:
double getAudioTime() const;
typedef osg::Timer Timer;
typedef OpenThreads::Mutex Mutex;
typedef OpenThreads::ScopedLock<Mutex> ScopedLock;
mutable Mutex m_mutex;
double m_video_clock;
double m_start_time;
double m_last_frame_delay;
double m_last_frame_pts;
double m_last_actual_delay;
double m_frame_time;
double m_audio_buffer_end_pts;
double m_audio_delay;
Timer m_audio_timer;
bool m_audio_disabled;
bool m_rewind;
};
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_CLOCKS_H

View File

@ -0,0 +1,290 @@
#include "FFmpegDecoder.hpp"
#include <osg/Notify>
#include <cassert>
#include <limits>
#include <stdexcept>
#include <string.h>
namespace osgFFmpeg {
FFmpegDecoder::FFmpegDecoder() :
m_audio_stream(0),
m_video_stream(0),
m_audio_queue(100),
m_video_queue(100),
m_audio_decoder(m_audio_queue, m_clocks),
m_video_decoder(m_video_queue, m_clocks),
m_state(NORMAL),
m_loop(false)
{
}
FFmpegDecoder::~FFmpegDecoder()
{
close();
}
bool FFmpegDecoder::open(const std::string & filename)
{
try
{
// Open video file
AVFormatContext * p_format_context = 0;
if (av_open_input_file(&p_format_context, filename.c_str(), 0, 0, 0) != 0)
throw std::runtime_error("av_open_input_file() failed");
m_format_context.reset(p_format_context, av_close_input_file);
// Retrieve stream info
if (av_find_stream_info(p_format_context) < 0)
throw std::runtime_error("av_find_stream_info() failed");
m_duration = double(m_format_context->duration) / AV_TIME_BASE;
m_start = double(m_format_context->start_time) / AV_TIME_BASE;
// TODO move this elsewhere
m_clocks.reset(m_start);
// Dump info to stderr
dump_format(p_format_context, 0, filename.c_str(), false);
// Find and open the first video and audio streams (note that audio stream is optional and only opened if possible)
findVideoStream();
findAudioStream();
m_video_decoder.open(m_video_stream);
try
{
m_audio_decoder.open(m_audio_stream);
}
catch (const std::runtime_error & error)
{
osg::notify(osg::WARN) << "FFmpegImageStream::open audio failed, audio stream will be disabled: " << error.what() << std::endl;
}
}
catch (const std::runtime_error & error)
{
osg::notify(osg::WARN) << "FFmpegImageStream::open : " << error.what() << std::endl;
return false;
}
return true;
}
void FFmpegDecoder::close()
{
flushAudioQueue();
flushVideoQueue();
}
bool FFmpegDecoder::readNextPacket()
{
switch (m_state)
{
case NORMAL:
return readNextPacketNormal();
case END_OF_STREAM:
return readNextPacketEndOfStream();
case REWINDING:
return readNextPacketRewinding();
default:
assert(false);
return false;
}
}
void FFmpegDecoder::rewind()
{
m_pending_packet.clear();
flushAudioQueue();
flushVideoQueue();
rewindButDontFlushQueues();
}
void FFmpegDecoder::findAudioStream()
{
for (unsigned int i = 0; i < m_format_context->nb_streams; ++i)
{
if (m_format_context->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO)
{
m_audio_stream = m_format_context->streams[i];
m_audio_index = i;
return;
}
}
m_audio_stream = 0;
m_audio_index = std::numeric_limits<unsigned int>::max();
}
void FFmpegDecoder::findVideoStream()
{
for (unsigned int i = 0; i < m_format_context->nb_streams; ++i)
{
if (m_format_context->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
{
m_video_stream = m_format_context->streams[i];
m_video_index = i;
return;
}
}
throw std::runtime_error("could not find a video stream");
}
inline void FFmpegDecoder::flushAudioQueue()
{
FFmpegPacketClear pc;
m_audio_queue.flush(pc);
}
inline void FFmpegDecoder::flushVideoQueue()
{
FFmpegPacketClear pc;
m_video_queue.flush(pc);
}
bool FFmpegDecoder::readNextPacketNormal()
{
AVPacket packet;
if (! m_pending_packet)
{
bool end_of_stream = false;
// Read the next frame packet
if (av_read_frame(m_format_context.get(), &packet) < 0)
{
if (url_ferror(m_format_context->pb) == 0)
end_of_stream = true;
else
throw std::runtime_error("av_read_frame() failed");
}
if (end_of_stream)
{
// If we reach the end of the stream, change the decoder state
if (loop())
rewindButDontFlushQueues();
else
m_state = END_OF_STREAM;
return false;
}
else
{
// Make the packet data available beyond av_read_frame() logical scope.
if (av_dup_packet(&packet) < 0)
throw std::runtime_error("av_dup_packet() failed");
m_pending_packet = FFmpegPacket(packet);
}
}
// Send data packet
if (m_pending_packet.type == FFmpegPacket::PACKET_DATA)
{
if (m_pending_packet.packet.stream_index == m_audio_index)
{
if (m_audio_queue.timedPush(m_pending_packet, 10)) {
m_pending_packet.release();
return true;
}
}
else if (m_pending_packet.packet.stream_index == m_video_index)
{
if (m_video_queue.timedPush(m_pending_packet, 10)) {
m_pending_packet.release();
return true;
}
}
else
{
m_pending_packet.clear();
return true;
}
}
return false;
}
bool FFmpegDecoder::readNextPacketEndOfStream()
{
const FFmpegPacket packet(FFmpegPacket::PACKET_END_OF_STREAM);
m_audio_queue.timedPush(packet, 10);
m_video_queue.timedPush(packet, 10);
return false;
}
bool FFmpegDecoder::readNextPacketRewinding()
{
const FFmpegPacket packet(FFmpegPacket::PACKET_FLUSH);
if (m_audio_queue.timedPush(packet, 10) && m_video_queue.timedPush(packet, 10))
m_state = NORMAL;
return false;
}
void FFmpegDecoder::rewindButDontFlushQueues()
{
const AVRational AvTimeBaseQ = { 1, AV_TIME_BASE }; // = AV_TIME_BASE_Q
const int64_t pos = m_clocks.getStartTime() * AV_TIME_BASE;
const int64_t seek_target = av_rescale_q(pos, AvTimeBaseQ, m_video_stream->time_base);
if (av_seek_frame(m_format_context.get(), m_video_index, seek_target, 0/*AVSEEK_FLAG_BYTE |*/ /*AVSEEK_FLAG_BACKWARD*/) < 0)
throw std::runtime_error("av_seek_frame failed()");
m_state = REWINDING;
}
} // namespace osgFFmpeg

View File

@ -0,0 +1,133 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_H
#include <boost/shared_ptr.hpp>
#include "FFmpegDecoderAudio.hpp"
#include "FFmpegDecoderVideo.hpp"
namespace osgFFmpeg {
class FFmpegDecoder
{
public:
FFmpegDecoder();
~FFmpegDecoder();
bool open(const std::string & filename);
void close();
bool readNextPacket();
void rewind();
void loop(bool loop);
bool loop() const;
double duration() const;
FFmpegDecoderAudio & audio_decoder();
FFmpegDecoderVideo & video_decoder();
FFmpegDecoderAudio const & audio_decoder() const;
FFmpegDecoderVideo const & video_decoder() const;
protected:
enum State
{
NORMAL,
END_OF_STREAM,
REWINDING
};
typedef boost::shared_ptr<AVFormatContext> FormatContextPtr;
typedef BoundedMessageQueue<FFmpegPacket> PacketQueue;
void findAudioStream();
void findVideoStream();
void flushAudioQueue();
void flushVideoQueue();
bool readNextPacketNormal();
bool readNextPacketEndOfStream();
bool readNextPacketRewinding();
void rewindButDontFlushQueues();
FormatContextPtr m_format_context;
AVStream * m_audio_stream;
AVStream * m_video_stream;
unsigned int m_audio_index;
unsigned int m_video_index;
FFmpegClocks m_clocks;
FFmpegPacket m_pending_packet;
PacketQueue m_audio_queue;
PacketQueue m_video_queue;
FFmpegDecoderAudio m_audio_decoder;
FFmpegDecoderVideo m_video_decoder;
double m_duration;
double m_start;
State m_state;
bool m_loop;
};
inline void FFmpegDecoder::loop(const bool loop)
{
m_loop = loop;
}
inline bool FFmpegDecoder::loop() const
{
return m_loop;
}
inline double FFmpegDecoder::duration() const
{
return double(m_format_context->duration) / AV_TIME_BASE;
}
inline FFmpegDecoderAudio & FFmpegDecoder::audio_decoder()
{
return m_audio_decoder;
}
inline FFmpegDecoderVideo & FFmpegDecoder::video_decoder()
{
return m_video_decoder;
}
inline FFmpegDecoderAudio const & FFmpegDecoder::audio_decoder() const
{
return m_audio_decoder;
}
inline FFmpegDecoderVideo const & FFmpegDecoder::video_decoder() const
{
return m_video_decoder;
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_H

View File

@ -0,0 +1,306 @@
#include "FFmpegDecoderAudio.hpp"
#include <osg/Notify>
#include <stdexcept>
#include <string.h>
//DEBUG
//#include <iostream>
namespace osgFFmpeg {
FFmpegDecoderAudio::FFmpegDecoderAudio(PacketQueue & packets, FFmpegClocks & clocks) :
m_packets(packets),
m_clocks(clocks),
m_stream(0),
m_context(0),
m_packet_data(0),
m_bytes_remaining(0),
m_audio_buffer((AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2),
m_audio_buf_size(0),
m_audio_buf_index(0),
m_end_of_stream(false),
m_exit(false)
{
}
FFmpegDecoderAudio::~FFmpegDecoderAudio()
{
if (isRunning())
{
m_exit = true;
join();
}
}
void FFmpegDecoderAudio::open(AVStream * const stream)
{
try
{
// Sound can be optional (i.e. no audio stream is present)
if (stream == 0)
return;
m_stream = stream;
m_context = stream->codec;
m_frequency = m_context->sample_rate;
m_nb_channels = m_context->channels;
m_sample_format = FFmpegSampleFormat(m_context->sample_fmt);
// Check stream sanity
if (m_context->codec_id == CODEC_ID_NONE)
throw std::runtime_error("invalid audio codec");;
// Find the decoder for the audio stream
AVCodec * const p_codec = avcodec_find_decoder(m_context->codec_id);
if (p_codec == 0)
throw std::runtime_error("avcodec_find_decoder() failed");
// Inform the codec that we can handle truncated bitstreams
//if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
// m_context->flags |= CODEC_FLAG_TRUNCATED;
// Open codec
if (avcodec_open(m_context, p_codec) < 0)
throw std::runtime_error("avcodec_open() failed");
}
catch (...)
{
m_context = 0;
throw;
}
}
void FFmpegDecoderAudio::run()
{
try
{
decodeLoop();
}
catch (const std::exception & error)
{
osg::notify(osg::WARN) << "FFmpegDecoderAudio::run : " << error.what() << std::endl;
}
catch (...)
{
osg::notify(osg::WARN) << "FFmpegDecoderAudio::run : unhandled exception" << std::endl;
}
}
void FFmpegDecoderAudio::setAudioSink(osg::ref_ptr<AudioSinkInterface> audio_sink)
{
// The FFmpegDecoderAudio object takes the responsability of destroying the audio_sink.
m_audio_sink = audio_sink;
}
void FFmpegDecoderAudio::fillBuffer(void * const buffer, size_t size)
{
size_t filled = 0;
uint8_t * dst_buffer = reinterpret_cast<uint8_t*>(buffer);
while (size != 0)
{
if (m_audio_buf_index == m_audio_buf_size)
{
m_audio_buf_index = 0;
// Pre-fetch audio buffer is empty, refill it.
const size_t bytes_decoded = decodeFrame(&m_audio_buffer[0], m_audio_buffer.size());
// If nothing could be decoded (e.g. error or no packet available), output a bit of silence
if (bytes_decoded == 0)
{
m_audio_buf_size = std::min(Buffer::size_type(1024), m_audio_buffer.size());
memset(&m_audio_buffer[0], 0, m_audio_buf_size);
}
else
{
m_audio_buf_size = bytes_decoded;
}
}
const size_t fill_size = std::min(m_audio_buf_size - m_audio_buf_index, size);
memcpy(dst_buffer, &m_audio_buffer[m_audio_buf_index], fill_size);
size -= fill_size;
dst_buffer += fill_size;
m_audio_buf_index += fill_size;
adjustBufferEndTps(fill_size);
}
}
void FFmpegDecoderAudio::decodeLoop()
{
const bool skip_audio = ! validContext() || ! m_audio_sink.valid();
if (! skip_audio && ! m_audio_sink->playing())
{
m_clocks.audioSetDelay(m_audio_sink->getDelay());
m_audio_sink->startPlaying();
}
else
{
m_clocks.audioDisable();
}
while (! m_exit)
{
// If skipping audio, make sure the audio stream is still consumed.
if (skip_audio)
{
bool is_empty;
FFmpegPacket packet = m_packets.timedPop(is_empty, 10);
if (packet.valid())
packet.clear();
}
// Else, just idle in this thread.
// Note: If m_audio_sink has an audio callback, this thread will still be awaken
// from time to time to refill the audio buffer.
else
{
OpenThreads::Thread::microSleep(10000);
}
}
}
void FFmpegDecoderAudio::adjustBufferEndTps(const size_t buffer_size)
{
int sample_size = nbChannels() * frequency();
switch (sampleFormat())
{
case SAMPLE_FORMAT_U8:
sample_size *= 1;
break;
case SAMPLE_FORMAT_S16:
sample_size *= 2;
break;
case SAMPLE_FORMAT_S24:
sample_size *= 3;
break;
case SAMPLE_FORMAT_S32:
sample_size *= 4;
break;
case SAMPLE_FORMAT_F32:
sample_size *= 4;
break;
default:
throw std::runtime_error("unsupported audio sample format");
}
m_clocks.audioAdjustBufferEndPts(double(buffer_size) / double(sample_size));
}
size_t FFmpegDecoderAudio::decodeFrame(void * const buffer, const size_t size)
{
for (;;)
{
// Decode current packet
while (m_bytes_remaining > 0)
{
int data_size = size;
const int bytes_decoded = avcodec_decode_audio2(m_context, reinterpret_cast<int16_t*>(buffer), &data_size, m_packet_data, m_bytes_remaining);
if (bytes_decoded < 0)
{
// if error, skip frame
m_bytes_remaining = 0;
break;
}
m_bytes_remaining -= bytes_decoded;
m_packet_data += bytes_decoded;
// If we have some data, return it and come back for more later.
if (data_size > 0)
return data_size;
}
// Get next packet
if (m_packet.valid())
m_packet.clear();
if (m_exit)
return 0;
bool is_empty = true;
m_packet = m_packets.tryPop(is_empty);
if (is_empty)
return 0;
if (m_packet.type == FFmpegPacket::PACKET_DATA)
{
if (m_packet.packet.pts != AV_NOPTS_VALUE)
{
const double pts = av_q2d(m_stream->time_base) * m_packet.packet.pts;
m_clocks.audioSetBufferEndPts(pts);
}
m_bytes_remaining = m_packet.packet.size;
m_packet_data = m_packet.packet.data;
}
else if (m_packet.type == FFmpegPacket::PACKET_END_OF_STREAM)
{
m_end_of_stream = true;
}
else if (m_packet.type == FFmpegPacket::PACKET_FLUSH)
{
avcodec_flush_buffers(m_context);
m_clocks.rewindAudio();
}
// just output silence when we reached the end of stream
if (m_end_of_stream)
{
memset(buffer, 0, size);
return size;
}
}
}
} // namespace osgFFmpeg

View File

@ -0,0 +1,106 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_AUDIO_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_AUDIO_H
#include <OpenThreads/Thread>
#include "AudioSinkInterface.hpp"
#include "BoundedMessageQueue.hpp"
#include "FFmpegClocks.hpp"
#include "FFmpegPacket.hpp"
#include "FFmpegSampleFormat.hpp"
namespace osgFFmpeg {
class FFmpegDecoderAudio : public OpenThreads::Thread
{
public:
typedef BoundedMessageQueue<FFmpegPacket> PacketQueue;
typedef void (* PublishFunc) (const FFmpegDecoderAudio & decoder, void * user_data);
FFmpegDecoderAudio(PacketQueue & packets, FFmpegClocks & clocks);
~FFmpegDecoderAudio();
void open(AVStream * stream);
virtual void run();
void setAudioSink(osg::ref_ptr<AudioSinkInterface> audio_sink);
void fillBuffer(void * buffer, size_t size);
bool validContext() const;
int frequency() const;
int nbChannels() const;
FFmpegSampleFormat sampleFormat() const;
private:
//typedef boost::shared_ptr<AVFrame> FramePtr;
typedef osg::ref_ptr<AudioSinkInterface> SinkPtr;
typedef std::vector<uint8_t> Buffer;
void decodeLoop();
void adjustBufferEndTps(size_t buffer_size);
size_t decodeFrame(void * buffer, size_t size);
PacketQueue & m_packets;
FFmpegClocks & m_clocks;
AVStream * m_stream;
AVCodecContext * m_context;
FFmpegPacket m_packet;
const uint8_t * m_packet_data;
int m_bytes_remaining;
Buffer m_audio_buffer;
size_t m_audio_buf_size;
size_t m_audio_buf_index;
int m_frequency;
int m_nb_channels;
FFmpegSampleFormat m_sample_format;
SinkPtr m_audio_sink;
bool m_end_of_stream;
volatile bool m_exit;
};
inline bool FFmpegDecoderAudio::validContext() const
{
return m_context != 0;
}
inline int FFmpegDecoderAudio::frequency() const
{
return m_frequency;
}
inline int FFmpegDecoderAudio::nbChannels() const
{
return m_nb_channels;
}
inline FFmpegSampleFormat FFmpegDecoderAudio::sampleFormat() const
{
return m_sample_format;
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_AUDIO_H

View File

@ -0,0 +1,310 @@
#include "FFmpegDecoderVideo.hpp"
#include <osg/Notify>
#include <stdexcept>
#include <string.h>
namespace osgFFmpeg {
FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
m_packets(packets),
m_clocks(clocks),
m_stream(0),
m_context(0),
m_codec(0),
m_packet_data(0),
m_bytes_remaining(0),
m_packet_pts(AV_NOPTS_VALUE),
m_user_data(0),
m_publish_func(0),
m_exit(false)
{
}
FFmpegDecoderVideo::~FFmpegDecoderVideo()
{
if (isRunning())
{
m_exit = true;
join();
}
}
void FFmpegDecoderVideo::open(AVStream * const stream)
{
m_stream = stream;
m_context = stream->codec;
// Trust the video size given at this point
// (avcodec_open seems to sometimes return a 0x0 size)
m_width = m_context->width;
m_height = m_context->height;
findAspectRatio();
// Find out whether we support Alpha channel
m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
// Find out the framerate
m_frame_rate = av_q2d(stream->r_frame_rate);
// Find the decoder for the video stream
m_codec = avcodec_find_decoder(m_context->codec_id);
if (m_codec == 0)
throw std::runtime_error("avcodec_find_decoder() failed");
// Inform the codec that we can handle truncated bitstreams
//if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
// m_context->flags |= CODEC_FLAG_TRUNCATED;
// Open codec
if (avcodec_open(m_context, m_codec) < 0)
throw std::runtime_error("avcodec_open() failed");
// Allocate video frame
m_frame.reset(avcodec_alloc_frame(), av_free);
// Allocate converted RGB frame
m_frame_rgba.reset(avcodec_alloc_frame(), av_free);
m_buffer_rgba.resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
m_buffer_rgba_public.resize(m_buffer_rgba.size());
// Assign appropriate parts of the buffer to image planes in m_frame_rgba
avpicture_fill((AVPicture *) m_frame_rgba.get(), &m_buffer_rgba[0], PIX_FMT_RGB32, width(), height());
// Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
m_context->opaque = this;
m_context->get_buffer = getBuffer;
m_context->release_buffer = releaseBuffer;
}
void FFmpegDecoderVideo::run()
{
try
{
decodeLoop();
}
catch (const std::exception & error)
{
osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
}
catch (...)
{
osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
}
}
void FFmpegDecoderVideo::decodeLoop()
{
FFmpegPacket packet;
double pts;
while (! m_exit)
{
// Work on the current packet until we have decoded all of it
while (m_bytes_remaining > 0)
{
// Save global PTS to be stored in m_frame via getBuffer()
m_packet_pts = packet.packet.pts;
// Decode video frame
int frame_finished = 0;
const int bytes_decoded = avcodec_decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
if (bytes_decoded < 0)
throw std::runtime_error("avcodec_decode_video failed()");
m_bytes_remaining -= bytes_decoded;
m_packet_data += bytes_decoded;
// Find out the frame pts
if (packet.packet.dts == AV_NOPTS_VALUE && m_frame->opaque != 0 && *reinterpret_cast<const int64_t*>(m_frame->opaque) != AV_NOPTS_VALUE)
{
pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
}
else if (packet.packet.dts != AV_NOPTS_VALUE)
{
pts = packet.packet.dts;
}
else
{
pts = 0;
}
pts *= av_q2d(m_stream->time_base);
// Publish the frame if we have decoded a complete frame
if (frame_finished)
{
const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
publishFrame(frame_delay);
}
}
// Get the next packet
pts = 0;
if (packet.valid())
packet.clear();
bool is_empty = true;
packet = m_packets.timedPop(is_empty, 10);
if (! is_empty)
{
if (packet.type == FFmpegPacket::PACKET_DATA)
{
m_bytes_remaining = packet.packet.size;
m_packet_data = packet.packet.data;
}
else if (packet.type == FFmpegPacket::PACKET_FLUSH)
{
avcodec_flush_buffers(m_context);
m_clocks.rewindVideo();
}
}
}
}
void FFmpegDecoderVideo::findAspectRatio()
{
double ratio = 0.0;
if (m_context->sample_aspect_ratio.num != 0)
ratio = (av_q2d(m_context->sample_aspect_ratio) * m_width) / m_height;
if (ratio <= 0.0)
ratio = double(m_width) / double(m_height);
m_aspect_ratio = ratio;
}
void FFmpegDecoderVideo::publishFrame(const double delay)
{
// If no publishing function, just ignore the frame
if (m_publish_func == 0)
return;
// If the display delay is too small, we better skip the frame.
if (delay < -0.010)
return;
const AVPicture * const src = (const AVPicture *) m_frame.get();
AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
// Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
if (m_context->pix_fmt == PIX_FMT_YUVA420P)
yuva420pToRgba(dst, src, width(), height());
else
img_convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
// Flip and swap buffer
swapBuffers();
// Wait 'delay' seconds before publishing the picture.
int i_delay = static_cast<int>(delay * 1000000 + 0.5);
while (i_delay > 1000)
{
// Avoid infinite/very long loops
if (m_exit)
return;
const int micro_delay = (std::min)(1000000, i_delay);
OpenThreads::Thread::microSleep(micro_delay);
i_delay -= micro_delay;
}
m_publish_func(* this, m_user_data);
}
void FFmpegDecoderVideo::swapBuffers()
{
for (int h = 0; h < height(); ++h)
memcpy(&m_buffer_rgba_public[(height() - h - 1) * width() * 4], &m_buffer_rgba[h * width() * 4], width() * 4);
}
void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, const AVPicture * const src, int width, int height)
{
img_convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
const size_t bpp = 4;
uint8_t * a_dst = dst->data[0] + 3;
for (int h = 0; h < height; ++h) {
const uint8_t * a_src = src->data[3] + h * src->linesize[3];
for (int w = 0; w < width; ++w) {
*a_dst = *a_src;
a_dst += bpp;
a_src += 1;
}
}
}
int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
{
const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
const int result = avcodec_default_get_buffer(context, picture);
int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
*p_pts = this_->m_packet_pts;
picture->opaque = p_pts;
return result;
}
void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
{
if (picture != 0)
av_freep(&picture->opaque);
avcodec_default_release_buffer(context, picture);
}
} // namespace osgFFmpeg

View File

@ -0,0 +1,141 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_VIDEO_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_VIDEO_H
#include <boost/shared_ptr.hpp>
#include <OpenThreads/Thread>
#include <vector>
#include "BoundedMessageQueue.hpp"
#include "FFmpegClocks.hpp"
#include "FFmpegPacket.hpp"
namespace osgFFmpeg {
class FFmpegDecoderVideo : public OpenThreads::Thread
{
public:
typedef BoundedMessageQueue<FFmpegPacket> PacketQueue;
typedef void (* PublishFunc) (const FFmpegDecoderVideo & decoder, void * user_data);
FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks);
~FFmpegDecoderVideo();
void open(AVStream * stream);
virtual void run();
void setUserData(void * user_data);
void setPublishCallback(PublishFunc function);
int width() const;
int height() const;
double aspectRatio() const;
bool alphaChannel() const;
double frameRate() const;
const uint8_t * image() const;
private:
typedef boost::shared_ptr<AVFrame> FramePtr;
typedef std::vector<uint8_t> Buffer;
void decodeLoop();
void findAspectRatio();
void publishFrame(double delay);
void swapBuffers();
double synchronizeVideo(double pts);
void yuva420pToRgba(AVPicture *dst, const AVPicture *src, int width, int height);
static int getBuffer(AVCodecContext * context, AVFrame * picture);
static void releaseBuffer(AVCodecContext * context, AVFrame * picture);
PacketQueue & m_packets;
FFmpegClocks & m_clocks;
AVStream * m_stream;
AVCodecContext * m_context;
AVCodec * m_codec;
const uint8_t * m_packet_data;
int m_bytes_remaining;
int64_t m_packet_pts;
FramePtr m_frame;
FramePtr m_frame_rgba;
Buffer m_buffer_rgba;
Buffer m_buffer_rgba_public;
void * m_user_data;
PublishFunc m_publish_func;
double m_frame_rate;
double m_aspect_ratio;
int m_width;
int m_height;
size_t m_next_frame_index;
bool m_alpha_channel;
volatile bool m_exit;
};
inline void FFmpegDecoderVideo::setUserData(void * const user_data)
{
m_user_data = user_data;
}
inline void FFmpegDecoderVideo::setPublishCallback(const PublishFunc function)
{
m_publish_func = function;
}
inline int FFmpegDecoderVideo::width() const
{
return m_width;
}
inline int FFmpegDecoderVideo::height() const
{
return m_height;
}
inline double FFmpegDecoderVideo::aspectRatio() const
{
return m_aspect_ratio;
}
inline bool FFmpegDecoderVideo::alphaChannel() const
{
return m_alpha_channel;
}
inline double FFmpegDecoderVideo::frameRate() const
{
return m_frame_rate;
}
inline const uint8_t * FFmpegDecoderVideo::image() const
{
return &m_buffer_rgba_public[0];
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_DECODER_VIDEO_H

View File

@ -0,0 +1,14 @@
#ifndef HEADER_GUARD_FFMPEG_HEADERS_H
#define HEADER_GUARD_FFMPEG_HEADERS_H
extern "C"
{
#define __STDC_CONSTANT_MACROS
#include <avcodec.h>
#include <avformat.h>
}
#endif // HEADER_GUARD_FFMPEG_HEADERS_H

View File

@ -0,0 +1,321 @@
#include "FFmpegImageStream.hpp"
#include <OpenThreads/ScopedLock>
#include <osg/Notify>
#include "FFmpegDecoder.hpp"
#include "MessageQueue.hpp"
#include <memory>
namespace osgFFmpeg {
FFmpegImageStream::FFmpegImageStream() :
m_decoder(0),
m_commands(0),
m_frame_published_flag(false)
{
setOrigin(osg::Image::BOTTOM_LEFT);
std::auto_ptr<FFmpegDecoder> decoder(new FFmpegDecoder);
std::auto_ptr<CommandQueue> commands(new CommandQueue);
m_decoder = decoder.release();
m_commands = commands.release();
}
FFmpegImageStream::FFmpegImageStream(const FFmpegImageStream & image, const osg::CopyOp & copyop) :
osg::ImageStream(image, copyop)
{
// TODO: probably incorrect or incomplete
}
FFmpegImageStream::~FFmpegImageStream()
{
quit(true);
delete m_commands;
delete m_decoder;
}
bool FFmpegImageStream::open(const std::string & filename)
{
setFileName(filename);
if (! m_decoder->open(filename))
return false;
setImage(
m_decoder->video_decoder().width(), m_decoder->video_decoder().height(), 1, GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE,
const_cast<unsigned char *>(m_decoder->video_decoder().image()), NO_DELETE
);
m_decoder->video_decoder().setUserData(this);
m_decoder->video_decoder().setPublishCallback(publishNewFrame);
_status = PAUSED;
applyLoopingMode();
start(); // start thread
return true;
}
void FFmpegImageStream::play()
{
m_commands->push(CMD_PLAY);
#if 0
// Wait for at least one frame to be published before returning the call
OpenThreads::ScopedLock<Mutex> lock(m_mutex);
while (duration() > 0 && ! m_frame_published_flag)
m_frame_published_cond.wait(&m_mutex);
#endif
}
void FFmpegImageStream::pause()
{
m_commands->push(CMD_PAUSE);
}
void FFmpegImageStream::rewind()
{
m_commands->push(CMD_REWIND);
}
void FFmpegImageStream::quit(bool waitForThreadToExit)
{
// Stop the packet producer thread
if (isRunning())
{
m_commands->push(CMD_STOP);
if (waitForThreadToExit)
join();
}
// Close the decoder (i.e. flush the decoder packet queues)
m_decoder->close();
}
void FFmpegImageStream::setAudioSink(osg::ref_ptr<AudioSinkInterface> audio_sink)
{
m_decoder->audio_decoder().setAudioSink(audio_sink);
}
void FFmpegImageStream::fillAudioBuffer(void * const buffer, const size_t size)
{
m_decoder->audio_decoder().fillBuffer(buffer, size);
}
double FFmpegImageStream::duration() const
{
return m_decoder->duration();
}
bool FFmpegImageStream::videoAlphaChannel() const
{
return m_decoder->video_decoder().alphaChannel();
}
double FFmpegImageStream::videoAspectRatio() const
{
return m_decoder->video_decoder().aspectRatio();
}
double FFmpegImageStream::videoFrameRate() const
{
return m_decoder->video_decoder().frameRate();
}
bool FFmpegImageStream::audioStream() const
{
return m_decoder->audio_decoder().validContext();
}
int FFmpegImageStream::audioFrequency() const
{
return m_decoder->audio_decoder().frequency();
}
int FFmpegImageStream::audioNbChannels() const
{
return m_decoder->audio_decoder().nbChannels();
}
FFmpegSampleFormat FFmpegImageStream::audioSampleFormat() const
{
return m_decoder->audio_decoder().sampleFormat();
}
void FFmpegImageStream::run()
{
try
{
bool done = false;
while (! done)
{
if (_status == PLAYING)
{
bool no_cmd;
const Command cmd = m_commands->timedPop(no_cmd, 1);
if (no_cmd)
{
m_decoder->readNextPacket();
}
else
done = ! handleCommand(cmd);
}
else
{
done = ! handleCommand(m_commands->pop());
}
}
}
catch (const std::exception & error)
{
osg::notify(osg::WARN) << "FFmpegImageStream::run : " << error.what() << std::endl;
}
catch (...)
{
osg::notify(osg::WARN) << "FFmpegImageStream::run : unhandled exception" << std::endl;
}
}
void FFmpegImageStream::applyLoopingMode()
{
m_decoder->loop(getLoopingMode() == LOOPING);
}
bool FFmpegImageStream::handleCommand(const Command cmd)
{
switch (cmd)
{
case CMD_PLAY:
cmdPlay();
return true;
case CMD_PAUSE:
cmdPause();
return true;
case CMD_REWIND:
cmdRewind();
return true;
case CMD_STOP:
return false;
default:
assert(false);
return false;
}
}
void FFmpegImageStream::cmdPlay()
{
if (_status == PAUSED)
{
if (! m_decoder->audio_decoder().isRunning())
m_decoder->audio_decoder().start();
if (! m_decoder->video_decoder().isRunning())
m_decoder->video_decoder().start();
}
_status = PLAYING;
}
void FFmpegImageStream::cmdPause()
{
if (_status == PLAYING)
{
}
_status = PAUSED;
}
void FFmpegImageStream::cmdRewind()
{
m_decoder->rewind();
}
void FFmpegImageStream::publishNewFrame(const FFmpegDecoderVideo &, void * user_data)
{
FFmpegImageStream * const this_ = reinterpret_cast<FFmpegImageStream*>(user_data);
/** \bug If viewer.realize() hasn't been already called, this doesn't work? */
this_->dirty();
OpenThreads::ScopedLock<Mutex> lock(this_->m_mutex);
if (! this_->m_frame_published_flag)
{
this_->m_frame_published_flag = true;
this_->m_frame_published_cond.signal();
}
}
} // namespace osgFFmpeg

View File

@ -0,0 +1,108 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_IMAGE_STREAM_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_IMAGE_STREAM_H
#include <osg/ImageStream>
#include <OpenThreads/Condition>
#include <OpenThreads/Thread>
#include "AudioSinkInterface.hpp"
#include "FFmpegSampleFormat.hpp"
#ifdef _WIN32
#if defined OSG_LIBRARY_STATIC
#define OSGFFMPEG_EXPORT_API
#elif defined OSG_LIBRARY || defined osgFFmpeg_EXPORTS
#define OSGFFMPEG_EXPORT_API __declspec(dllexport)
#else
#define OSGFFMPEG_EXPORT_API __declspec(dllimport)
#endif
#else
#define OSGFFMPEG_EXPORT_API
#endif
namespace osgFFmpeg
{
class FFmpegDecoder;
class FFmpegDecoderAudio;
class FFmpegDecoderVideo;
template <class T>
class MessageQueue;
class OSGFFMPEG_EXPORT_API FFmpegImageStream : public osg::ImageStream, public OpenThreads::Thread
{
public:
FFmpegImageStream();
FFmpegImageStream(const FFmpegImageStream & image, const osg::CopyOp & copyop = osg::CopyOp::SHALLOW_COPY);
META_Object(osgFFmpeg, FFmpegImageStream);
bool open(const std::string & filename);
virtual void play();
virtual void pause();
virtual void rewind();
virtual void quit(bool waitForThreadToExit = true);
void setAudioSink(osg::ref_ptr<AudioSinkInterface> audio_sink);
void fillAudioBuffer(void * const buffer, const size_t size);
double duration() const;
bool videoAlphaChannel() const;
double videoAspectRatio() const;
double videoFrameRate() const;
bool audioStream() const;
int audioFrequency() const;
int audioNbChannels() const;
FFmpegSampleFormat audioSampleFormat() const;
private:
enum Command
{
CMD_PLAY,
CMD_PAUSE,
CMD_STOP,
CMD_REWIND
};
typedef MessageQueue<Command> CommandQueue;
typedef OpenThreads::Mutex Mutex;
typedef OpenThreads::Condition Condition;
virtual ~FFmpegImageStream();
virtual void run();
virtual void applyLoopingMode();
bool handleCommand(Command cmd);
void cmdPlay();
void cmdPause();
void cmdRewind();
static void publishNewFrame(const FFmpegDecoderVideo &, void * user_data);
FFmpegDecoder * m_decoder;
CommandQueue * m_commands;
Mutex m_mutex;
Condition m_frame_published_cond;
bool m_frame_published_flag;
};
}
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_IMAGE_STREAM_H

View File

@ -0,0 +1,82 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_PACKET_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_PACKET_H
#include "FFmpegHeaders.hpp"
namespace osgFFmpeg
{
struct FFmpegPacket
{
enum Type
{
PACKET_DATA,
PACKET_END_OF_STREAM,
PACKET_FLUSH
};
FFmpegPacket() :
type(PACKET_DATA)
{
packet.data = 0;
}
explicit FFmpegPacket(const Type type) :
type(type)
{
packet.data = 0;
}
explicit FFmpegPacket(const AVPacket & packet) :
packet(packet),
type(PACKET_DATA)
{
}
void clear()
{
if (packet.data != 0)
av_free_packet(&packet);
release();
}
void release()
{
packet.data = 0;
type = PACKET_DATA;
}
bool valid() const
{
return (type != PACKET_DATA) ^ (packet.data != 0);
}
bool operator ! () const
{
return ! valid();
}
AVPacket packet;
Type type;
};
struct FFmpegPacketClear
{
void operator () (FFmpegPacket & packet) const
{
packet.clear();
}
};
}
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_PACKET_H

View File

@ -0,0 +1,23 @@
#ifndef HEADER_GUARD_OSGFFMPEG_FFMPEG_SAMPLE_FORMAT_H
#define HEADER_GUARD_OSGFFMPEG_FFMPEG_SAMPLE_FORMAT_H
namespace osgFFmpeg
{
enum FFmpegSampleFormat
{
SAMPLE_FORMAT_U8, //= SAMPLE_FMT_U8,
SAMPLE_FORMAT_S16, //= SAMPLE_FMT_S16,
SAMPLE_FORMAT_S24, //= SAMPLE_FMT_S24,
SAMPLE_FORMAT_S32, //= SAMPLE_FMT_S32,
SAMPLE_FORMAT_F32 //= SAMPLE_FMT_FLT
};
}
#endif // HEADER_GUARD_OSGFFMPEG_FFMPEG_SAMPLE_FORMAT_H

View File

@ -0,0 +1,156 @@
#ifndef HEADER_GUARD_OSGFFMPEG_MESSAGE_QUEUE_H
#define HEADER_GUARD_OSGFFMPEG_MESSAGE_QUEUE_H
#include <OpenThreads/Condition>
#include <OpenThreads/Mutex>
#include <OpenThreads/ScopedLock>
#include <deque>
namespace osgFFmpeg {
template <class T>
class MessageQueue
{
public:
typedef T value_type;
typedef size_t size_type;
MessageQueue();
~MessageQueue();
void clear();
void push(const T & value);
value_type pop();
value_type tryPop(bool & is_empty);
value_type timedPop(bool & is_empty, unsigned long ms);
private:
MessageQueue(const MessageQueue &);
MessageQueue & operator = (const MessageQueue &);
typedef std::deque<T> Queue;
typedef OpenThreads::Condition Condition;
typedef OpenThreads::Mutex Mutex;
typedef OpenThreads::ScopedLock<Mutex> ScopedLock;
Mutex m_mutex;
Condition m_not_empty;
Queue m_queue;
};
template <class T>
MessageQueue<T>::MessageQueue()
{
}
template <class T>
MessageQueue<T>::~MessageQueue()
{
}
template <class T>
void MessageQueue<T>::clear()
{
ScopedLock lock(m_mutex);
m_queue.clear();
}
template <class T>
void MessageQueue<T>::push(const T & value)
{
{
ScopedLock lock(m_mutex);
m_queue.push_back(value);
}
m_not_empty.signal();
}
template <class T>
typename MessageQueue<T>::value_type MessageQueue<T>::pop()
{
ScopedLock lock(m_mutex);
while (m_queue.empty())
m_not_empty.wait(&m_mutex);
const value_type value = m_queue.front();
m_queue.pop_front();
return value;
}
template <class T>
typename MessageQueue<T>::value_type MessageQueue<T>::tryPop(bool & is_empty)
{
ScopedLock lock(m_mutex);
is_empty = m_queue.empty();
if (is_empty)
return value_type();
const value_type value = m_queue.front();
m_queue.pop_front();
return value;
}
template <class T>
typename MessageQueue<T>::value_type MessageQueue<T>::timedPop(bool & is_empty, const unsigned long ms)
{
ScopedLock lock(m_mutex);
// We don't wait in a loop to avoid an infinite loop (as the ms timeout would not be decremented).
// This means that timedPop() could return with (is_empty = true) before the timeout has been hit.
if (m_queue.empty())
m_not_empty.wait(&m_mutex, ms);
is_empty = m_queue.empty();
if (is_empty)
return value_type();
const value_type value = m_queue.front();
m_queue.pop_front();
return value;
}
} // namespace osgFFmpeg
#endif // HEADER_GUARD_OSGFFMPEG_MESSAGE_QUEUE_H

View File

@ -0,0 +1,68 @@
#include <osgDB/Registry>
#include <osgDB/FileNameUtils>
#include <osgDB/FileUtils>
#include "FFmpegHeaders.hpp"
#include "FFmpegImageStream.hpp"
/** Implementation heavily inspired by http://www.dranger.com/ffmpeg/ */
class ReaderWriterFFmpeg : public osgDB::ReaderWriter
{
public:
ReaderWriterFFmpeg()
{
supportsExtension("avi", "");
supportsExtension("flv", "");
supportsExtension("mov", "");
supportsExtension("mpg", "Mpeg movie format");
supportsExtension("mpv", "Mpeg movie format");
supportsExtension("wmv", "");
// Register all FFmpeg formats/codecs
av_register_all();
}
virtual ~ReaderWriterFFmpeg()
{
}
virtual const char * className() const
{
return "ReaderWriterFFmpeg";
}
virtual ReadResult readImage(const std::string & filename, const osgDB::ReaderWriter::Options * options) const
{
const std::string ext = osgDB::getLowerCaseFileExtension(filename);
if (! acceptsExtension(ext))
return ReadResult::FILE_NOT_HANDLED;
const std::string path = osgDB::findDataFile(filename, options);
if (path.empty())
return ReadResult::FILE_NOT_FOUND;
osg::notify(osg::INFO) << "ReaderWriterFFmpeg::readImage " << path << std::endl;
osg::ref_ptr<osgFFmpeg::FFmpegImageStream> image_stream(new osgFFmpeg::FFmpegImageStream);
if (! image_stream->open(path))
return ReadResult::FILE_NOT_HANDLED;
return image_stream.release();
}
private:
};
REGISTER_OSGPLUGIN(ffmpeg, ReaderWriterFFmpeg)