OpenSceneGraph/include/osg/ImageStream

125 lines
3.9 KiB
Plaintext
Raw Permalink Normal View History

/* -*-c++-*- OpenSceneGraph - Copyright (C) 1998-2006 Robert Osfield
*
* This library is open source and may be redistributed and/or modified under
* the terms of the OpenSceneGraph Public License (OSGPL) version 0.0 or
* (at your option) any later version. The full license is in LICENSE file
* included with this distribution, and on the openscenegraph.org website.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* OpenSceneGraph Public License for more details.
*/
#ifndef OSG_IMAGESTREAM
#define OSG_IMAGESTREAM 1
#include <osg/Image>
#include <osg/AudioStream>
namespace osg {
From Stephan Huber, "attached you'll find the latest versions of the QTKit + the AVFoundation-plugin, some changes to osgPresentation and a small enhancement für ImageIO. I fixed some bugs and did some more tests with both of the video-plugins. I integrated CoreVideo with osgPresentation, ImageStream has a new virtual method called createSuitableTexture which returns NULL for default implementations. Specialized implementations like the QTKit-plugin return a CoreVideo-texture. I refactored the code in SlideShowConstructor::createTexturedQuad to use a texture returned from ImageStream::createSuitableTexture. I did not use osgDB::readObjectFile to get the texture-object, as a lot of image-related code in SlideShowConstructor had to be refactored to use a texture. My changes are minimal and should not break existing code. There's one minor issue with CoreVideo in general: As the implementation is asynchronous, there might be no texture available, when first showing the video the first frame. I am a bit unsure how to tackle this problem, any input on this is appreciated. Back to the AVFoundation-plugin: the current implementation does not support CoreVideo as the QTKit-plugin supports it. There's no way to get decoded frames from AVFoundation stored on the GPU, which is kind of sad. I added some support for CoreVideo to transfer decoded frames back to the GPU, but in my testings the performance was worse than using the normal approach using glTexSubImage. This is why I disabled CoreVideo for AVFoundation. You can still request a CoreVideoTexture via readObjectFile, though. "
2012-10-24 18:43:01 +08:00
// forward declare of osg::Texture
class Texture;
/**
* Image Stream class.
*/
class OSG_EXPORT ImageStream : public Image
{
public:
ImageStream();
/** Copy constructor using CopyOp to manage deep vs shallow copy. */
ImageStream(const ImageStream& image,const CopyOp& copyop=CopyOp::SHALLOW_COPY);
virtual Object* cloneType() const { return new ImageStream(); }
virtual Object* clone(const CopyOp& copyop) const { return new ImageStream(*this,copyop); }
virtual bool isSameKindAs(const Object* obj) const { return dynamic_cast<const ImageStream*>(obj)!=0; }
virtual const char* libraryName() const { return "osg"; }
virtual const char* className() const { return "ImageStream"; }
/** Return -1 if *this < *rhs, 0 if *this==*rhs, 1 if *this>*rhs. */
virtual int compare(const Image& rhs) const;
2005-11-18 01:44:48 +08:00
enum StreamStatus
{
INVALID,
PLAYING,
PAUSED,
REWINDING
};
virtual void seek(double /*time*/) {}
virtual void play() { _status=PLAYING; }
virtual void pause() { _status=PAUSED; }
virtual void rewind() { _status=REWINDING; }
virtual void quit(bool /*waitForThreadToExit*/ = true) {}
StreamStatus getStatus() const { return _status; }
enum LoopingMode
{
NO_LOOPING,
LOOPING
};
void setLoopingMode(LoopingMode mode)
{
if (_loopingMode == mode) return;
_loopingMode = mode;
applyLoopingMode();
}
LoopingMode getLoopingMode() const { return _loopingMode; }
virtual double getCreationTime() const { return HUGE_VAL; }
virtual double getLength() const { return 0.0; }
virtual double getFrameRate() const { return 0.0; }
virtual double getCurrentTime() const { return 0.0; }
virtual void setReferenceTime(double) {}
virtual double getReferenceTime() const { return 0.0; }
virtual void setTimeMultiplier(double) {}
virtual double getTimeMultiplier() const { return 0.0; }
virtual void setVolume(float) {}
virtual float getVolume() const { return 0.0f; }
/// set the balance of the audio: -1 = left, 0 = center, 1 = right
virtual float getAudioBalance() { return 0.0f; }
virtual void setAudioBalance(float /*b*/) {}
typedef std::vector< osg::ref_ptr<osg::AudioStream> > AudioStreams;
void setAudioStreams(const AudioStreams& asl) { _audioStreams = asl; }
AudioStreams& getAudioStreams() { return _audioStreams; }
const AudioStreams& getAudioStreams() const { return _audioStreams; }
From Stephan Huber, "attached you'll find the latest versions of the QTKit + the AVFoundation-plugin, some changes to osgPresentation and a small enhancement für ImageIO. I fixed some bugs and did some more tests with both of the video-plugins. I integrated CoreVideo with osgPresentation, ImageStream has a new virtual method called createSuitableTexture which returns NULL for default implementations. Specialized implementations like the QTKit-plugin return a CoreVideo-texture. I refactored the code in SlideShowConstructor::createTexturedQuad to use a texture returned from ImageStream::createSuitableTexture. I did not use osgDB::readObjectFile to get the texture-object, as a lot of image-related code in SlideShowConstructor had to be refactored to use a texture. My changes are minimal and should not break existing code. There's one minor issue with CoreVideo in general: As the implementation is asynchronous, there might be no texture available, when first showing the video the first frame. I am a bit unsure how to tackle this problem, any input on this is appreciated. Back to the AVFoundation-plugin: the current implementation does not support CoreVideo as the QTKit-plugin supports it. There's no way to get decoded frames from AVFoundation stored on the GPU, which is kind of sad. I added some support for CoreVideo to transfer decoded frames back to the GPU, but in my testings the performance was worse than using the normal approach using glTexSubImage. This is why I disabled CoreVideo for AVFoundation. You can still request a CoreVideoTexture via readObjectFile, though. "
2012-10-24 18:43:01 +08:00
/** create a suitable texture for this imagestream, return NULL, if not supported
* implement this method in subclasses to use special technologies like CoreVideo
* or similar.
*/
virtual osg::Texture* createSuitableTexture() { return NULL; }
protected:
virtual void applyLoopingMode() {}
virtual ~ImageStream() {}
StreamStatus _status;
LoopingMode _loopingMode;
AudioStreams _audioStreams;
};
} // namespace
#endif