GClasses
GClasses::GAdamOptimizer Class Reference

Detailed Description

Trains a neural network by ADAM. See Diederik P. Kingma and Jimmy Lei Ba, "Adam: A Method for Stochastic Optimization", 2015.

#include <GOptimizer.h>

Inheritance diagram for GClasses::GAdamOptimizer:
GClasses::GNeuralNetOptimizer

Public Member Functions

 GAdamOptimizer (GNeuralNet &model, GRand &rand, GObjective *error=NULL)
 
double beta1 () const
 
double beta2 () const
 
virtual void computeGradient (const GVec &feat, const GVec &lab) override
 Evaluate feat and lab, and update the model's gradient. More...
 
virtual void descendGradient (double learningRate) override
 Step the model's parameters in the direction of the calculated gradient scaled by learningRate. More...
 
double epsilon () const
 
virtual void prepareForOptimizing () override
 Prepare for optimization (i.e. allocate buffers). More...
 
void setBeta1 (double b)
 
void setBeta2 (double b)
 
void setEpsilon (double e)
 
- Public Member Functions inherited from GClasses::GNeuralNetOptimizer
 GNeuralNetOptimizer (GNeuralNet &model, GRand &rand, GObjective *objective=NULL)
 
virtual ~GNeuralNetOptimizer ()
 
size_t batchesPerEpoch () const
 
size_t batchSize () const
 
GContextNeuralNetcontext ()
 Returns the default context for training the model. (Note: It is allocated lazily. This should not be called before layers are added to the model. For multi-threaded optimization, a separate context should be allocated for each thread.) More...
 
size_t epochs () const
 
double improvementThresh () const
 
double learningRate () const
 
GNeuralNetmodel ()
 
GObjectiveobjective ()
 
void optimize (const GMatrix &features, const GMatrix &labels)
 
virtual void optimizeBatch (const GMatrix &features, const GMatrix &labels, size_t start, size_t batchSize)
 Update and apply the gradient for a single batch in order. More...
 
void optimizeBatch (const GMatrix &features, const GMatrix &labels, size_t start)
 
virtual void optimizeBatch (const GMatrix &features, const GMatrix &labels, GRandomIndexIterator &ii, size_t batchSize)
 Update and apply the gradient for a single batch in randomized order. More...
 
void optimizeBatch (const GMatrix &features, const GMatrix &labels, GRandomIndexIterator &ii)
 
virtual void optimizeIncremental (const GVec &feat, const GVec &lab)
 Update and apply the gradient for a single training sample (on-line). More...
 
void optimizeWithValidation (const GMatrix &features, const GMatrix &labels, const GMatrix &validationFeat, const GMatrix &validationLab)
 
void optimizeWithValidation (const GMatrix &features, const GMatrix &labels, double validationPortion=0.35)
 
GRandrand ()
 
void resetState ()
 Flushes the memory in any recurrent units in the network. This method should be called when beginning a new training sequence with neural networks that contain any recurrent blocks. More...
 
void setBatchesPerEpoch (size_t b)
 
void setBatchSize (size_t b)
 
void setEpochs (size_t e)
 
void setImprovementThresh (double m)
 
void setLearningRate (double l)
 
void setObjective (GObjective *objective)
 
void setWindowSize (size_t w)
 
double sumLoss (const GMatrix &features, const GMatrix &labels)
 
size_t windowSize () const
 

Additional Inherited Members

- Protected Attributes inherited from GClasses::GNeuralNetOptimizer
size_t m_batchesPerEpoch
 
size_t m_batchSize
 
size_t m_epochs
 
double m_learningRate
 
double m_minImprovement
 
GNeuralNetm_model
 
GObjectivem_objective
 
GContextNeuralNetm_pContext
 
GRandm_rand
 
size_t m_windowSize
 

Constructor & Destructor Documentation

GClasses::GAdamOptimizer::GAdamOptimizer ( GNeuralNet model,
GRand rand,
GObjective error = NULL 
)

Member Function Documentation

double GClasses::GAdamOptimizer::beta1 ( ) const
inline
double GClasses::GAdamOptimizer::beta2 ( ) const
inline
virtual void GClasses::GAdamOptimizer::computeGradient ( const GVec feat,
const GVec lab 
)
overridevirtual

Evaluate feat and lab, and update the model's gradient.

Implements GClasses::GNeuralNetOptimizer.

virtual void GClasses::GAdamOptimizer::descendGradient ( double  learningRate)
overridevirtual

Step the model's parameters in the direction of the calculated gradient scaled by learningRate.

Implements GClasses::GNeuralNetOptimizer.

double GClasses::GAdamOptimizer::epsilon ( ) const
inline
virtual void GClasses::GAdamOptimizer::prepareForOptimizing ( )
overridevirtual

Prepare for optimization (i.e. allocate buffers).

Implements GClasses::GNeuralNetOptimizer.

void GClasses::GAdamOptimizer::setBeta1 ( double  b)
inline
void GClasses::GAdamOptimizer::setBeta2 ( double  b)
inline
void GClasses::GAdamOptimizer::setEpsilon ( double  e)
inline