GClasses
GClasses::GNeuralNetOptimizer Class Referenceabstract

Detailed Description

Optimizes the parameters of a differentiable function using an objective function.

#include <GOptimizer.h>

Inheritance diagram for GClasses::GNeuralNetOptimizer:
GClasses::GAdamOptimizer GClasses::GRMSPropOptimizer GClasses::GSGDOptimizer

Public Member Functions

 GNeuralNetOptimizer (GNeuralNet &model, GRand &rand, GObjective *objective=NULL)
 
virtual ~GNeuralNetOptimizer ()
 
size_t batchesPerEpoch () const
 
size_t batchSize () const
 
virtual void computeGradient (const GVec &feat, const GVec &lab)=0
 Evaluate feat and lab, and update the model's gradient. More...
 
GContextNeuralNetcontext ()
 Returns the default context for training the model. (Note: It is allocated lazily. This should not be called before layers are added to the model. For multi-threaded optimization, a separate context should be allocated for each thread.) More...
 
virtual void descendGradient (double learningRate)=0
 Step the model's parameters in the direction of the calculated gradient scaled by learningRate. More...
 
size_t epochs () const
 
double improvementThresh () const
 
double learningRate () const
 
GNeuralNetmodel ()
 
GObjectiveobjective ()
 
void optimize (const GMatrix &features, const GMatrix &labels)
 
virtual void optimizeBatch (const GMatrix &features, const GMatrix &labels, size_t start, size_t batchSize)
 Update and apply the gradient for a single batch in order. More...
 
void optimizeBatch (const GMatrix &features, const GMatrix &labels, size_t start)
 
virtual void optimizeBatch (const GMatrix &features, const GMatrix &labels, GRandomIndexIterator &ii, size_t batchSize)
 Update and apply the gradient for a single batch in randomized order. More...
 
void optimizeBatch (const GMatrix &features, const GMatrix &labels, GRandomIndexIterator &ii)
 
virtual void optimizeIncremental (const GVec &feat, const GVec &lab)
 Update and apply the gradient for a single training sample (on-line). More...
 
void optimizeWithValidation (const GMatrix &features, const GMatrix &labels, const GMatrix &validationFeat, const GMatrix &validationLab)
 
void optimizeWithValidation (const GMatrix &features, const GMatrix &labels, double validationPortion=0.35)
 
virtual void prepareForOptimizing ()=0
 Prepare for optimization (i.e. allocate delta vectors). More...
 
GRandrand ()
 
void resetState ()
 Flushes the memory in any recurrent units in the network. This method should be called when beginning a new training sequence with neural networks that contain any recurrent blocks. More...
 
void setBatchesPerEpoch (size_t b)
 
void setBatchSize (size_t b)
 
void setEpochs (size_t e)
 
void setImprovementThresh (double m)
 
void setLearningRate (double l)
 
void setObjective (GObjective *objective)
 
void setWindowSize (size_t w)
 
double sumLoss (const GMatrix &features, const GMatrix &labels)
 
size_t windowSize () const
 

Protected Attributes

size_t m_batchesPerEpoch
 
size_t m_batchSize
 
size_t m_epochs
 
double m_learningRate
 
double m_minImprovement
 
GNeuralNetm_model
 
GObjectivem_objective
 
GContextNeuralNetm_pContext
 
GRandm_rand
 
size_t m_windowSize
 

Constructor & Destructor Documentation

GClasses::GNeuralNetOptimizer::GNeuralNetOptimizer ( GNeuralNet model,
GRand rand,
GObjective objective = NULL 
)
virtual GClasses::GNeuralNetOptimizer::~GNeuralNetOptimizer ( )
virtual

Member Function Documentation

size_t GClasses::GNeuralNetOptimizer::batchesPerEpoch ( ) const
inline
size_t GClasses::GNeuralNetOptimizer::batchSize ( ) const
inline
virtual void GClasses::GNeuralNetOptimizer::computeGradient ( const GVec feat,
const GVec lab 
)
pure virtual

Evaluate feat and lab, and update the model's gradient.

Implemented in GClasses::GRMSPropOptimizer, GClasses::GAdamOptimizer, and GClasses::GSGDOptimizer.

GContextNeuralNet& GClasses::GNeuralNetOptimizer::context ( )

Returns the default context for training the model. (Note: It is allocated lazily. This should not be called before layers are added to the model. For multi-threaded optimization, a separate context should be allocated for each thread.)

virtual void GClasses::GNeuralNetOptimizer::descendGradient ( double  learningRate)
pure virtual

Step the model's parameters in the direction of the calculated gradient scaled by learningRate.

Implemented in GClasses::GRMSPropOptimizer, GClasses::GAdamOptimizer, and GClasses::GSGDOptimizer.

size_t GClasses::GNeuralNetOptimizer::epochs ( ) const
inline
double GClasses::GNeuralNetOptimizer::improvementThresh ( ) const
inline
double GClasses::GNeuralNetOptimizer::learningRate ( ) const
inline
GNeuralNet& GClasses::GNeuralNetOptimizer::model ( )
inline
GObjective* GClasses::GNeuralNetOptimizer::objective ( )
inline
void GClasses::GNeuralNetOptimizer::optimize ( const GMatrix features,
const GMatrix labels 
)
virtual void GClasses::GNeuralNetOptimizer::optimizeBatch ( const GMatrix features,
const GMatrix labels,
size_t  start,
size_t  batchSize 
)
virtual

Update and apply the gradient for a single batch in order.

void GClasses::GNeuralNetOptimizer::optimizeBatch ( const GMatrix features,
const GMatrix labels,
size_t  start 
)
virtual void GClasses::GNeuralNetOptimizer::optimizeBatch ( const GMatrix features,
const GMatrix labels,
GRandomIndexIterator ii,
size_t  batchSize 
)
virtual

Update and apply the gradient for a single batch in randomized order.

void GClasses::GNeuralNetOptimizer::optimizeBatch ( const GMatrix features,
const GMatrix labels,
GRandomIndexIterator ii 
)
virtual void GClasses::GNeuralNetOptimizer::optimizeIncremental ( const GVec feat,
const GVec lab 
)
virtual

Update and apply the gradient for a single training sample (on-line).

void GClasses::GNeuralNetOptimizer::optimizeWithValidation ( const GMatrix features,
const GMatrix labels,
const GMatrix validationFeat,
const GMatrix validationLab 
)
void GClasses::GNeuralNetOptimizer::optimizeWithValidation ( const GMatrix features,
const GMatrix labels,
double  validationPortion = 0.35 
)
virtual void GClasses::GNeuralNetOptimizer::prepareForOptimizing ( )
pure virtual

Prepare for optimization (i.e. allocate delta vectors).

Implemented in GClasses::GRMSPropOptimizer, GClasses::GAdamOptimizer, and GClasses::GSGDOptimizer.

GRand& GClasses::GNeuralNetOptimizer::rand ( )
inline
void GClasses::GNeuralNetOptimizer::resetState ( )

Flushes the memory in any recurrent units in the network. This method should be called when beginning a new training sequence with neural networks that contain any recurrent blocks.

void GClasses::GNeuralNetOptimizer::setBatchesPerEpoch ( size_t  b)
inline
void GClasses::GNeuralNetOptimizer::setBatchSize ( size_t  b)
inline
void GClasses::GNeuralNetOptimizer::setEpochs ( size_t  e)
inline
void GClasses::GNeuralNetOptimizer::setImprovementThresh ( double  m)
inline
void GClasses::GNeuralNetOptimizer::setLearningRate ( double  l)
inline
void GClasses::GNeuralNetOptimizer::setObjective ( GObjective objective)
inline
void GClasses::GNeuralNetOptimizer::setWindowSize ( size_t  w)
inline
double GClasses::GNeuralNetOptimizer::sumLoss ( const GMatrix features,
const GMatrix labels 
)
size_t GClasses::GNeuralNetOptimizer::windowSize ( ) const
inline

Member Data Documentation

size_t GClasses::GNeuralNetOptimizer::m_batchesPerEpoch
protected
size_t GClasses::GNeuralNetOptimizer::m_batchSize
protected
size_t GClasses::GNeuralNetOptimizer::m_epochs
protected
double GClasses::GNeuralNetOptimizer::m_learningRate
protected
double GClasses::GNeuralNetOptimizer::m_minImprovement
protected
GNeuralNet& GClasses::GNeuralNetOptimizer::m_model
protected
GObjective* GClasses::GNeuralNetOptimizer::m_objective
protected
GContextNeuralNet* GClasses::GNeuralNetOptimizer::m_pContext
protected
GRand& GClasses::GNeuralNetOptimizer::m_rand
protected
size_t GClasses::GNeuralNetOptimizer::m_windowSize
protected