Commit f4757136 authored by Sebastian Eichelbaum's avatar Sebastian Eichelbaum
Browse files

[MERGE]

parents b7c89451 6951a84d
#! /path/to/openwalnut/bin/openwalnut-script -f
# 1. Introduction
#
# This is an example on how to use the script interpreter that comes with OpenWalnut.
# We will be loading all NIfTI datasets in a directory given via script parameter, apply a
# gaussian blur and then save the data to another directory also given via script parameter.
#
# However, before we start, let's have a look at OpenWalnut's script interpreter.
# 2. Script interpreter
#
# The script interpreter can be started via
#
# openwalnut-script -i language-name ,
#
# Where 'language-name' is the name of the language to use (currently "lua" and/or "python", depending on which
# prerequisite libraries were available when compiling).
# Start in python mode:
#
# openwalnut-script -i python
#
# You can use this python interpreter just as you would use the normal one. For example, type
#
# print i
#
# This yields an error, as 'i' was not defined. Try
#
# i = 1
# print i
#
# This yields the correct answer. You can also use all modules available to your python installation:
#
# import datetime
# print datetime.datetime.now().strftime( "%A %Y-%m-%d %H:%M" )
#
# This will print the current day and time. You can also define and use functions and classes. Feel free to experiment a bit.
# You can quit by typing 'quit'.
# 3. Executing script files
#
# To execute a script, switch to your OpenWalnut installation and execute:
#
# openwalnut-script -f script-name
#
# Where 'script-name' is the path to and name of your script file. For example, to execute this script, type
#
# openwalnut-script -f template.py
#
# Depending on whether you installed OpenWalnut or built it yourself, you might need to change the path to
# something like:
#
# openwalnut-script -f ../resources/scripting/template.py
#
# The script will tell you that no input and output directories were found and OpenWalnut will quit afterwards.
# The directories can be set via script parameters:
#
# openwalnut-script -f ../resources/scripting/template.py ./inDir ./outDir
#
# or
#
# ../resources/scripting/template.py ./inDir ./outDir (this requires the script to be executable and the path to your
# OpenWalnut installation/build-directory set correctly in the
# first line of the script)
# 4. Implementing a batch processing pipeline for OpenWalnut
#
# Now that we know how to execute the script, let's have a look at what it actually does.
# The first part is importing what we need:
import sys
import os
import signal
import time
# This makes the interpreter react to Ctrl+C.
signal.signal( signal.SIGINT, signal.SIG_DFL )
# Now we can proceed by checking the parameters:
if len( sys.argv ) < 3:
print "Too few parameters, quitting."
exit( 1 )
# This checks the input dir:
inputDir = sys.argv[ 1 ]
print "Input dir was set to", inputDir
if not os.path.exists( inputDir ):
print "The directory", inputDir, "does not exist!"
exit( 1 )
if not os.path.isdir( inputDir ):
print inputDir, "is not a directory!"
exit( 1 )
# Now the same for our output directory:
outputDir = sys.argv[ 2 ]
print "Output dir was set to", outputDir
if not os.path.exists( outputDir ):
print "The directory", outputDir, "does not exist!"
exit( 1 )
if not os.path.isdir( outputDir ):
print outputDir, "is not a directory!"
exit( 1 )
# The next step is to find all the files that we want to process:
niftiList = list()
filesList = os.listdir( inputDir )
for f in filesList:
if not os.path.isdir( os.path.join( inputDir, f ) ):
filename, extension = os.path.splitext( f )
if extension is not None and extension == '.nii':
niftiList.append( f )
if extension is not None and extension == '.gz':
filename, extension = os.path.splitext( filename )
if extension is not None and extension == '.nii':
niftiList.append( f )
print "List of nifti files in input directory:", niftiList
if len( niftiList ) == 0:
print "No nifti files found!"
exit( 1 )
# Now that we have found all the datasets, we can proceed by initializing the
# OpenWalnut module pipeline that will do the gauss filter on our datasets. The
# pipeline will look like this:
#
# 'Data Module' -> 'Gauss Filtering' -> 'Write NIfTI'
#
# We start by creating the 'Gauss Filtering' and 'Write NIfTI' modules using the
# 'rootContainer' global:
gauss = rootContainer.create( "Gauss Filtering" )
writer = rootContainer.create( "Write NIfTI" )
# We can now change some properties of the modules we just created. We get access
# to a module's properties with the 'getProperties()' and 'getInformationProperties()' funtions.
# These return the respective property groups.
# The groups can then be asked for properties or nested property groups via the 'getProperty()'
# and 'getGroup()' functions, both taking a string parameter denoting the name of the property
# or group to get.
iterProp = gauss.getProperties().getProperty( "Iterations" )
# Let's increase the width of the kernel:
iterProp.setInt( 2 )
# The properties provide various functions for getting and setting, for example the
# 'setInt()' function we just used. There are more such functions, such as
#
# 'setBool()'
# 'setDouble()'
# 'setFilename()' - the parameter is a string denoting a path/filename
# 'setSelection()' - the parameter is an integer denoting which element of a selection to select
# 0 is the first one, 1 the second one etc....
#
# There are also the respective getters.
# We now need to connect the modules. To be more precise, we need to connect
# the writer's input connector 'in' to the gauss filter's output connector 'out'.
# The names of the connectors can be looked up in the GUI version of OpenWalnut.
# (The names of the properties too.)
writer.getInputConnector( "in" ).connect( gauss.getOutputConnector( "out" ) )
# The modules can be disconnected again using the 'disconnect()' function of either
# the input- or the output connectors. We'll see this in action later.
# Now that we have done the setup, let's start working on the datasets.
for dataset in niftiList:
fileToLoad = os.path.join( inputDir, dataset )
# We start with loading the dataset, which is done by creating a data module:
data = rootContainer.createDataModule( fileToLoad )
# Now we check if the data we loaded is a scalar dataset:
if not data.getInformationProperties().getProperty( "Dataset type" ).getString( True ) == "WDataSetScalar":
print fileToLoad, "does not contain scalar data! Skipping."
continue
# If the dataset is a scalar dataset, we proceed by setting the output filename in the writer:
fileToSave = os.path.join( outputDir, dataset )
writer.getProperties().getProperty( "Filename" ).setFilename( fileToSave )
# Now we connect the data module, this will start the pipeline:
gauss.getInputConnector( "in" ).connect( data.getOutputConnector( "out" ) )
# The tricky part is waiting for the correct events, as all OpenWalnut modules run
# in their own threads. We cannot just keep on loading data and passing them to the gauss
# module, as it would just ignore the new inputs until it has finished calculating. By the time
# this happens, we might already be done loading the datasets, so OpenWalnut would just
# close because we reached the end of the script without waiting for the calculations to finish.
#
# So what we need to do now is wait for the input of the writer to update:
writer.getInputConnector( "in" ).waitForInput()
# We can now issue the saving of the result by pushing the 'Save' button.
writer.getProperties().getProperty( "Do save" ).click()
# We could now wait for the button to be reset. (Which means the saving is done.)
#
# writer.getProperties().getProperty( "Do save" ).waitForUpdate()
#
# You can wait for any property to be changed via its 'waitForUpdate()' function.
#
# However, for really small datasets, the saving might be done really fast, so
# the update may happen before we actually start waiting for it, resulting in us waiting
# forever. And we most certainly do not have that much time. So we just wait a few seconds.
time.sleep( 3 )
# Now our data should be written to disk, so we can now delete the dataset from the pipeline
# by removing the data module. This is done via the 'remove()' function of the 'rootContainer'
# global. We might also want to disconnect the data- and gauss modules beforehand.
data.getOutputConnector( "out" ).disconnect()
rootContainer.remove( data )
# Now we are ready for the next dataset.
# Well, and that's it for this simple batch processing example.
time.sleep( 3 )
print "Everything done, bye!"
# As python provides quite a lot of functionality, you can write lots of cool scripts to control
# your pipelines. For example, using sockets, you could control multiple OpenWalnut instances on
# remote computers. Have an entire army of OpenWalnuts do as you command!
#
# Have fun!
......@@ -141,13 +141,19 @@ ADD_SUBDIRECTORY( core )
OPTION( OW_GUI_QT4 "Enable this to build the QT4-based OpenWalnut GUI." ON )
IF( OW_GUI_QT4 )
SET( OWQt4GuiName "qt4gui" )
SET( OWBinaryName "openwalnut-qt4" )
# build
ADD_SUBDIRECTORY( qt4gui )
ENDIF()
# -----------------------------------------------------------------------------------------------------------------------------------------------
# Scripting GUI
OPTION( OW_GUI_SCRIPT "Enable this to build the script-based OpenWalnut interface for commandline-only use." ON )
IF( OW_GUI_SCRIPT )
# build
ADD_SUBDIRECTORY( scriptgui )
ENDIF()
# -----------------------------------------------------------------------------------------------------------------------------------------------
# Modules
......
......@@ -37,6 +37,21 @@ ADD_DEFINITIONS( '-DW_LIB_SUFFIX="${CMAKE_SHARED_LIBRARY_SUFFIX}"' )
OPTION( OW_STATIC_BUILD "Enable this to build the core library as static library." OFF )
# the files to link against if we found the necessary libs for a script interpreter
SET( INTERPRETER_LINK_LIBRARIES )
IF( BUILD_PYTHON_INTERPRETER )
# Python
FIND_PACKAGE( PythonLibs REQUIRED )
# Python found?
IF( PYTHONLIBS_FOUND AND Boost_FOUND )
INCLUDE_DIRECTORIES( ${Boost_INCLUDE_DIR} ${PYTHON_INCLUDE_DIRS} )
ADD_DEFINITIONS( -DPYTHON_FOUND )
SET( INTERPRETER_LINK_LIBRARIES ${INTERPRETER_LINK_LIBRARIES} ${PYTHON_LIBRARIES} )
ENDIF()
ENDIF() # BUILD_SCRIPTENGINE
# ---------------------------------------------------------------------------------------------------------------------------------------------------
# Add sources as target
# ---------------------------------------------------------------------------------------------------------------------------------------------------
......@@ -51,7 +66,7 @@ ELSE()
ADD_LIBRARY( ${LibName} SHARED ${TARGET_CPP_FILES} ${TARGET_H_FILES} ${OW_VERSION_HEADER} )
ENDIF()
TARGET_LINK_LIBRARIES( ${LibName} ${Boost_LIBRARIES} ${CMAKE_STANDARD_LIBRARIES} ${CMAKE_DL_LIBS} ${OPENGL_gl_LIBRARY} ${OPENSCENEGRAPH_LIBRARIES}
TARGET_LINK_LIBRARIES( ${LibName} ${Boost_LIBRARIES} ${CMAKE_STANDARD_LIBRARIES} ${CMAKE_DL_LIBS} ${OPENGL_gl_LIBRARY} ${OPENSCENEGRAPH_LIBRARIES} ${INTERPRETER_LINK_LIBRARIES}
)
# Tell CMake that someone creates this file for us. See doc of SETUP_VERSION_HEADER for details why this is needed.
......@@ -156,4 +171,3 @@ SETUP_SHADERS( "${TARGET_GLSL_FILES}" "${OW_SHARE_DIR_RELATIVE}/shaders" "CORE"
SETUP_STYLECHECKER( "${LibName}"
"${TARGET_CPP_FILES};${TARGET_H_FILES};${TARGET_TEST_FILES};${TARGET_GLSL_FILES}" # add all these files to the stylechecker
"" ) # exlude some ugly files
......@@ -28,7 +28,6 @@
WCreateColorArraysThread::WCreateColorArraysThread( int left, int right, boost::shared_ptr< std::vector< float > >vertices,
boost::shared_ptr< std::vector< size_t > > lineStartIndexes,
boost::shared_ptr< std::vector< size_t > > lineLengths,
boost::shared_ptr< std::vector< float > > globalColors,
boost::shared_ptr< std::vector< float > > localColors,
......@@ -41,7 +40,6 @@ WCreateColorArraysThread::WCreateColorArraysThread( int left, int right, boost:
m_tangents( tangents ),
m_globalColors( globalColors ),
m_localColors( localColors ),
m_lineStartIndexes( lineStartIndexes ),
m_lineLengths( lineLengths )
{
}
......@@ -52,17 +50,12 @@ WCreateColorArraysThread::~WCreateColorArraysThread()
void WCreateColorArraysThread::threadMain()
{
if( !m_vertices || !m_tangents || !m_globalColors || !m_localColors || !m_lineStartIndexes || !m_lineLengths )
if( !m_vertices || !m_tangents || !m_globalColors || !m_localColors || !m_lineLengths )
{
return;
}
if( !m_vertices->size() ||
!m_tangents->size() ||
!m_globalColors->size() ||
!m_localColors->size() ||
!m_lineStartIndexes->size() ||
!m_lineLengths->size() )
if( !m_vertices->size() || !m_tangents->size() || !m_globalColors->size() || !m_localColors->size() || !m_lineLengths->size() )
{
return;
}
......
......@@ -29,9 +29,8 @@
#include "../common/WThreadedRunner.h"
/**
* implements a thread that updates the fiber selection bit field
* Thread for computing directional color coding of fibers.
*/
class WCreateColorArraysThread: public WThreadedRunner // NOLINT
{
......@@ -39,17 +38,15 @@ public:
/**
* default constructor
*
* \param left
* \param right
* \param vertices
* \param lineStartIndexes
* \param lineLengths
* \param globalColors
* \param localColors
* \param left start position of the first line to comput colors for
* \param right last line for which the color is computed
* \param vertices vertices of all lines
* \param lineLengths line length in vertex array
* \param globalColors where to write global coloring
* \param localColors where to write local coloring
* \param tangents
*/
WCreateColorArraysThread( int left, int right, boost::shared_ptr< std::vector< float > >vertices,
boost::shared_ptr< std::vector< size_t > > lineStartIndexes,
boost::shared_ptr< std::vector< size_t > > lineLengths,
boost::shared_ptr< std::vector< float > > globalColors,
boost::shared_ptr< std::vector< float > > localColors,
......@@ -83,7 +80,7 @@ private:
/**
* Point vector for all fibers
*/
boost::shared_ptr< std::vector< float > > m_vertices;
boost::shared_ptr< const std::vector< float > > m_vertices;
/**
* Point vector for tangents at each vertex, used for fake tubes
......@@ -99,19 +96,12 @@ private:
* Storing the local color value of the fibers for each point.
* \note it is mutable to allow getLocalColors creating it on demand.
*/
mutable boost::shared_ptr< std::vector< float > > m_localColors;
/**
* Line vector that contains the start index of its first point for each line.
* \warning The index returned cannot be used in the vertices array until
* the number of components for each point is multiplied.
*/
boost::shared_ptr< std::vector< size_t > > m_lineStartIndexes;
boost::shared_ptr< std::vector< float > > m_localColors;
/**
* Line vector that contains the number of vertices for each line
*/
boost::shared_ptr< std::vector< size_t > > m_lineLengths;
boost::shared_ptr< const std::vector< size_t > > m_lineLengths;
};
#endif // WCREATECOLORARRAYSTHREAD_H
......@@ -41,6 +41,11 @@
class WDataSetFiberVector : public WMixinVector< WFiber >, public WDataSet // NOLINT
{
public:
/**
* Short hand for a boost::shared_ptr on such classes.
*/
typedef boost::shared_ptr< WDataSetFiberVector > SPtr;
/**
* Default constructor for creating an empty fiber vector.
*/
......
......@@ -136,16 +136,15 @@ void WDataSetFibers::init()
boost::shared_ptr< std::vector< float > > localColors = boost::shared_ptr< std::vector< float > >( new std::vector<float>( size ) );
boost::shared_ptr< std::vector< float > > customColors = boost::shared_ptr< std::vector< float > >( new std::vector<float>( size ) );
// TODO(all): use the new WThreadedJobs functionality
WCreateColorArraysThread* t1 = new WCreateColorArraysThread( 0, m_lineLengths->size()/4, m_vertices,
m_lineStartIndexes, m_lineLengths, globalColors, localColors, m_tangents );
m_lineLengths, globalColors, localColors, m_tangents );
WCreateColorArraysThread* t2 = new WCreateColorArraysThread( m_lineLengths->size()/4+1, m_lineLengths->size()/2, m_vertices,
m_lineStartIndexes, m_lineLengths, globalColors, localColors, m_tangents );
m_lineLengths, globalColors, localColors, m_tangents );
WCreateColorArraysThread* t3 = new WCreateColorArraysThread( m_lineLengths->size()/2+1, m_lineLengths->size()/4*3, m_vertices,
m_lineStartIndexes, m_lineLengths, globalColors, localColors, m_tangents );
m_lineLengths, globalColors, localColors, m_tangents );
WCreateColorArraysThread* t4 = new WCreateColorArraysThread( m_lineLengths->size()/4*3+1, m_lineLengths->size()-1, m_vertices,
m_lineStartIndexes, m_lineLengths, globalColors, localColors, m_tangents );
m_lineLengths, globalColors, localColors, m_tangents );
t1->run();
t2->run();
t3->run();
......@@ -186,6 +185,8 @@ void WDataSetFibers::init()
m_colorProp = m_properties->addProperty( "Color Scheme", "Determines the coloring scheme to use for this data.", m_colors->getSelectorFirst() );
WPropertyHelper::PC_SELECTONLYONE::addTo( m_colorProp );
WPropertyHelper::PC_NOTEMPTY::addTo( m_colorProp );
m_infoProperties->addProperty( "#Fibers", "The number of fibers", static_cast< WPVBaseTypes::PV_INT >( m_lineLengths->size() ) );
m_infoProperties->addProperty( "#Vertices", "The number of vertices", static_cast< WPVBaseTypes::PV_INT >( m_vertices->size() ) );
}
bool WDataSetFibers::isTexture() const
......@@ -243,16 +244,6 @@ WDataSetFibers::TangentArray WDataSetFibers::getTangents() const
return m_tangents;
}
WDataSetFibers::ColorArray WDataSetFibers::getGlobalColors() const
{
return boost::shared_static_cast< const ColorScheme >( ( *m_colors )[0] )->getColor();
}
WDataSetFibers::ColorArray WDataSetFibers::getLocalColors() const
{
return boost::shared_static_cast< const ColorScheme >( ( *m_colors )[1] )->getColor();
}
void WDataSetFibers::addColorScheme( WDataSetFibers::ColorArray colors, std::string name, std::string description )
{
ColorScheme::ColorMode mode = ColorScheme::GRAY;
......
......@@ -296,20 +296,6 @@ public:
*/
TangentArray getTangents() const;
/**
* Reference to the vector storing the global colors.
*
* \return Pointer to the float array. This always is RGB.
*/
ColorArray getGlobalColors() const;
/**
* Reference to the vector storing the local colors.
*
* \return Pointer to the float array. This always is RGB.
*/
ColorArray getLocalColors() const;
/**
* Get the parameter values for each vertex. Same indexing as vertices. Used to store additional scalar values for each vertex.
*
......
......@@ -70,6 +70,16 @@ WDataSetSingle::SPtr WDataSetScalar::clone() const
return WDataSetSingle::SPtr( new WDataSetScalar( getValueSet(), getGrid() ) );
}
std::string const WDataSetScalar::getName() const
{
return "WDataSetScalar";
}
std::string const WDataSetScalar::getDescription() const
{
return "A scalar dataset, i.e. one scalar value per voxel.";
}
double WDataSetScalar::getMax() const
{
return m_valueSet->getMaximumValue();
......
......@@ -26,6 +26,7 @@
#define WDATASETSCALAR_H
#include <map>
#include <string>
#include <boost/thread.hpp>
#include <boost/shared_ptr.hpp>
......@@ -114,6 +115,20 @@ public:
*/
double getMin() const;
/**
* Gets the name of this prototype.
*
* \return the name.
*/
virtual const std::string getName() const;
/**
* Gets the description for this prototype.
*
* \return the description
*/
virtual const std::string getDescription() const;
/**
* Returns the histogram of this dataset's valueset. If it does not exist yet, it will be created and cached. It does NOT make use of the
* WValueSetHistogram down scaling feature even though the number of buckets might be lower than the default as the down scaling might
......
......@@ -108,6 +108,8 @@ void WKernel::init()
// load all modules
m_moduleFactory->load();
m_scriptEngine = boost::shared_ptr< WScriptEngine >( new WScriptEngine( m_moduleContainer ) );
}
WKernel* WKernel::getRunningKernel()
......@@ -146,10 +148,16 @@ void WKernel::threadMain()
WLogger::getLogger()->addLogMessage( "Starting Kernel", "Kernel", LL_INFO );
// wait for GUI to be initialized properly
m_gui->isInitialized().wait();
if( m_gui )
{
m_gui->isInitialized().wait();
}
// start GE
m_graphicsEngine->run();
if( m_graphicsEngine )
{
m_graphicsEngine->run();
}
// actually there is nothing more to do here
waitForStop();
......@@ -187,6 +195,11 @@ boost::shared_ptr< WSelectionManager>WKernel::getSelectionManager()
return m_selectionManager;
}
boost::shared_ptr<WScriptEngine> WKernel::getScriptEngine()
{
return m_scriptEngine;
}
WTimer::ConstSPtr WKernel::getTimer() const
{
return m_timer;
......
......@@ -32,6 +32,7 @@
#include "../common/WTimer.h"
#include "../common/WLogger.h"
#include "../scripting/WScriptEngine.h"
#include "../graphicsEngine/WGraphicsEngine.h"
#include "WBatchLoader.h"
......@@ -60,7 +61,7 @@ class WThreadedRunner;
* GUI, GE and DataHandler
* \ingroup kernel
*/
class WKernel: public WThreadedRunner
class WKernel: public WThreadedRunner
{
public:
/**
......@@ -169,6 +170,13 @@ public:
*/
boost::shared_ptr< WSelectionManager> getSelectionManager();
/**
* Get the script engine of this kernel.
*
* \return A pointer to the script engine.
*/
boost::shared_ptr< WScriptEngine > getScriptEngine();
/**
* Returns the system timer. If you need timing for animations and similar, use this one. This timer can change to frame based timing if the
* user plays back some animation. So, everything which uses this timer can always do accurate per-frame animations even if frame time and
......@@ -223,6 +231,11 @@ protected:
*/
boost::shared_ptr< WModuleContainer > m_moduleContainer;
/**
* The script engine to use.
*/
boost::shared_ptr< WScriptEngine > m_scriptEngine;
private:
/**
* Loads all the modules it can find.
......
//---------------------------------------------------------------------------
//
// Project: OpenWalnut ( http://www.openwalnut.org )
//