Trains a neural network by ADAM. See Diederik P. Kingma and Jimmy Lei Ba, "Adam: A Method for Stochastic Optimization", 2015.
|
| | GAdamOptimizer (GNeuralNet &model, GRand &rand, GObjective *error=NULL) |
| |
| double | beta1 () const |
| |
| double | beta2 () const |
| |
| virtual void | computeGradient (const GVec &feat, const GVec &lab) override |
| | Evaluate feat and lab, and update the model's gradient. More...
|
| |
| virtual void | descendGradient (double learningRate) override |
| | Step the model's parameters in the direction of the calculated gradient scaled by learningRate. More...
|
| |
| double | epsilon () const |
| |
| virtual void | prepareForOptimizing () override |
| | Prepare for optimization (i.e. allocate buffers). More...
|
| |
| void | setBeta1 (double b) |
| |
| void | setBeta2 (double b) |
| |
| void | setEpsilon (double e) |
| |
| | GNeuralNetOptimizer (GNeuralNet &model, GRand &rand, GObjective *objective=NULL) |
| |
| virtual | ~GNeuralNetOptimizer () |
| |
| size_t | batchesPerEpoch () const |
| |
| size_t | batchSize () const |
| |
| GContextNeuralNet & | context () |
| | Returns the default context for training the model. (Note: It is allocated lazily. This should not be called before layers are added to the model. For multi-threaded optimization, a separate context should be allocated for each thread.) More...
|
| |
| size_t | epochs () const |
| |
| double | improvementThresh () const |
| |
| double | learningRate () const |
| |
| GNeuralNet & | model () |
| |
| GObjective * | objective () |
| |
| void | optimize (const GMatrix &features, const GMatrix &labels) |
| |
| virtual void | optimizeBatch (const GMatrix &features, const GMatrix &labels, size_t start, size_t batchSize) |
| | Update and apply the gradient for a single batch in order. More...
|
| |
| void | optimizeBatch (const GMatrix &features, const GMatrix &labels, size_t start) |
| |
| virtual void | optimizeBatch (const GMatrix &features, const GMatrix &labels, GRandomIndexIterator &ii, size_t batchSize) |
| | Update and apply the gradient for a single batch in randomized order. More...
|
| |
| void | optimizeBatch (const GMatrix &features, const GMatrix &labels, GRandomIndexIterator &ii) |
| |
| virtual void | optimizeIncremental (const GVec &feat, const GVec &lab) |
| | Update and apply the gradient for a single training sample (on-line). More...
|
| |
| void | optimizeWithValidation (const GMatrix &features, const GMatrix &labels, const GMatrix &validationFeat, const GMatrix &validationLab) |
| |
| void | optimizeWithValidation (const GMatrix &features, const GMatrix &labels, double validationPortion=0.35) |
| |
| GRand & | rand () |
| |
| void | resetState () |
| | Flushes the memory in any recurrent units in the network. This method should be called when beginning a new training sequence with neural networks that contain any recurrent blocks. More...
|
| |
| void | setBatchesPerEpoch (size_t b) |
| |
| void | setBatchSize (size_t b) |
| |
| void | setEpochs (size_t e) |
| |
| void | setImprovementThresh (double m) |
| |
| void | setLearningRate (double l) |
| |
| void | setObjective (GObjective *objective) |
| |
| void | setWindowSize (size_t w) |
| |
| double | sumLoss (const GMatrix &features, const GMatrix &labels) |
| |
| size_t | windowSize () const |
| |