Added a performace test section to osgunitests, currently just does basic C/C++ tests.

This commit is contained in:
Robert Osfield 2006-07-03 13:53:39 +00:00
parent 07ac167fa8
commit c986f6ea41
5 changed files with 220 additions and 0 deletions

View File

@ -85,14 +85,31 @@ LINK32=link.exe
# Name "Example osgunittests - Win32 Release"
# Name "Example osgunittests - Win32 Debug"
# Begin Group "Source Files"
# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"
# Begin Source File
SOURCE=..\..\..\examples\osgunittests\osgunittests.cpp
# End Source File
# Begin Source File
SOURCE=..\..\..\examples\osgunittests\performance.cpp
# End Source File
# End Group
# Begin Group "Header Files"
# PROP Default_Filter ";h;hpp;hxx;hm;inl"
# Begin Source File
SOURCE=..\..\..\examples\performance\performance.h
# End Source File
# Begin Source File
SOURCE=..\..\icons\osg_icon.rc
# End Source File
# End Group
# End Target
# Begin Group "Resource Files"

View File

@ -2,6 +2,7 @@ TOPDIR = ../..
include $(TOPDIR)/Make/makedefs
CXXFILES =\
performance.cpp\
osgunittests.cpp\
LIBS += -losgProducer -lProducer -losgText -losgGA -losgDB -losgUtil -losg $(GL_LIBS) $(X_LIBS) $(OTHER_LIBS)

View File

@ -6,6 +6,8 @@
#include <osg/Matrix>
#include <osg/io_utils>
#include "performance.h"
#include <iostream>
void testFrustum(double left,double right,double bottom,double top,double zNear,double zFar)
@ -192,6 +194,7 @@ int main( int argc, char** argv )
arguments.getApplicationUsage()->addCommandLineOption("qt","Display qualified tests.");
arguments.getApplicationUsage()->addCommandLineOption("sizeof","Display sizeof tests.");
arguments.getApplicationUsage()->addCommandLineOption("matrix","Display qualified tests.");
arguments.getApplicationUsage()->addCommandLineOption("performance","Display qualified tests.");
if (arguments.argc()<=1)
@ -212,6 +215,9 @@ int main( int argc, char** argv )
bool printQuatTest = false;
while (arguments.read("quat")) printQuatTest = true;
bool performanceTest = false;
while (arguments.read("p") || arguments.read("performance")) performanceTest = true;
// if user request help write it out to cout.
if (arguments.read("-h") || arguments.read("--help"))
{
@ -264,6 +270,14 @@ int main( int argc, char** argv )
}
if (performanceTest)
{
std::cout<<"**** performance tests ******"<<std::endl;
runPerformanceTests();
}
if (printQualifiedTest)
{
std::cout<<"***** Qualified Tests ******"<<std::endl;

View File

@ -0,0 +1,182 @@
#include "performance.h"
#include <osg/Timer>
#include <iostream>
struct Benchmark
{
Benchmark()
{
calibrate();
_beginTick = _timer.tick();
_endTick = _timer.tick();
}
void calibrate(unsigned int numLoops = 100000)
{
osg::Timer_t beginTick = _timer.tick();
for(unsigned int i=0;i<numLoops;++i)
{
begin();
end();
}
osg::Timer_t endTick = _timer.tick();
_averageDelay = _timer.delta_s(beginTick,endTick)/(double)numLoops;
}
inline void begin()
{
_beginTick = _timer.tick();
}
inline void end()
{
_endTick = _timer.tick();
}
inline double time()
{
double t = _timer.delta_s(_beginTick,_endTick) - _averageDelay;
return t<0.0 ? 0.0 : t;
}
inline void output(const char* str, double numIterations=1.0)
{
std::cout<<str<<"\t";
double s = time()/numIterations;
if (s>=1.0) std::cout<<s<<" s"<<std::endl;
else if (s>=0.001) std::cout<<s*1000.0<<" ms (10 ^ -3)"<<std::endl;
else if (s>=0.000001) std::cout<<s*1000000.0<<" ns (10 ^ -6)"<<std::endl;
else std::cout<<s*1000000000.0<<" ps (10 ^ -9)"<<std::endl;
}
osg::Timer _timer;
osg::Timer_t _beginTick;
osg::Timer_t _endTick;
double _averageDelay;
};
#define RUN(A,B,D) { A.begin(); for(unsigned int i=0;i<D;++i) B; A.end(); A.output(#B,D); }
static int v = 0;
#define OPERATION { v=v+1; }
inline void inline_increment() { OPERATION }
void function_increment() { OPERATION }
typedef void ( * IncrementProc) ();
IncrementProc s_functionIncrement = &function_increment;
inline void functionPointer_increment() { s_functionIncrement(); }
struct InlineMethod;
struct Method;
struct VirtualMethod;
struct VirtualMethod2;
struct Visitor
{
virtual void apply(InlineMethod& m);
virtual void apply(Method& m);
virtual void apply(VirtualMethod& m);
virtual void apply(VirtualMethod2& m);
virtual ~Visitor() {}
};
struct InlineMethod
{
void method() { OPERATION }
virtual void accept(Visitor& visitor) { visitor.apply(*this); }
virtual ~InlineMethod() {}
};
struct Method
{
virtual void accept(Visitor& visitor) { visitor.apply(*this); }
void method();
virtual ~Method() {}
};
void Method::method() { OPERATION }
struct VirtualMethod
{
virtual void accept(Visitor& visitor) { visitor.apply(*this); }
virtual void method();
virtual ~VirtualMethod() {}
};
void VirtualMethod::method() { OPERATION }
struct VirtualMethod2 : public VirtualMethod
{
VirtualMethod2() { }
virtual void accept(Visitor& visitor) { visitor.apply(*this); }
virtual void method();
virtual ~VirtualMethod2() { }
char a[100];
};
void VirtualMethod2::method() { OPERATION }
void Visitor::apply(Method& m) { m.method(); }
void Visitor::apply(VirtualMethod& m) { m.method(); }
void Visitor::apply(InlineMethod& m) { m.method(); }
void Visitor::apply(VirtualMethod2& m) { m.method(); }
struct CustomVisitor
{
virtual void apply(InlineMethod& m) { m.method(); }
virtual void apply(Method& m) { m.method(); }
virtual void apply(VirtualMethod& m) { m.method(); }
virtual void apply(VirtualMethod2& m) { m.method(); }
virtual ~CustomVisitor() {}
};
void runPerformanceTests()
{
Benchmark benchmark;
unsigned int iterations = 10000000;
RUN(benchmark, {} , iterations)
v = 0;
RUN(benchmark, OPERATION , iterations)
RUN(benchmark, functionPointer_increment() , iterations)
RUN(benchmark, inline_increment() , iterations)
RUN(benchmark, function_increment() , iterations)
VirtualMethod2 m4;
RUN(benchmark, m4.method() , iterations)
InlineMethod m1;
RUN(benchmark, m1.method() , iterations)
Method m2;
RUN(benchmark, m2.method() , iterations)
VirtualMethod m3;
RUN(benchmark, m3.method() , iterations)
RUN(benchmark, m3.method() , iterations)
Visitor visitor;
RUN(benchmark, m4.accept(visitor), iterations)
RUN(benchmark, m1.accept(visitor), iterations)
RUN(benchmark, m2.accept(visitor), iterations)
RUN(benchmark, m3.accept(visitor), iterations)
RUN(benchmark, m4.accept(visitor), iterations)
VirtualMethod* vm4 = &m4;
RUN(benchmark, (dynamic_cast<VirtualMethod2*>(vm4))->method(), iterations)
RUN(benchmark, (static_cast<VirtualMethod2*>(vm4))->method(), iterations)
RUN(benchmark, { VirtualMethod mm; mm.method(); }, iterations)
RUN(benchmark, { VirtualMethod2 mm; mm.method(); }, iterations)
}

View File

@ -0,0 +1,6 @@
#ifndef PERFORMANCE_H
#define PERFORMANCE_H 1
extern void runPerformanceTests();
#endif