@article{Sma1994, Author = {Smagt, Patrick van der}, Title = {Minimisation methods for training feed-forward networks}, Journal = {Neural Networks}, Year = {1994}, Volume = {7}, Number = {1}, Pages = {1--11}, Keywords = {brml machine-learning}, Abstract = {Minimisation methods for training feed-forward networks with back-propagation are compared. Feed-forward neural network training is a special case of function minimisation, where no explicit model of the data is assumed. Therefore, and due to the high dimensionality of the data, linearisation of the training problem through use of orthogonal basis functions is not desirable. The focus is on function minimisation on any basis. Quasi-Newton and conjugate gradient methods are reviewed, and the latter are shown to be a special case of error back-propagation with momentum term. Three feed-forward learning problems are tested with five methods. It is shown that, due to the fixed stepsize, standard error back-propagation performs well in avoiding local minima. However, by using not only the local gradient but also the second derivative of the error function a much shorter training time is required. Conjugate gradient with Powell restarts shows to be the superior method.} } @COMMENT{Bibtex file generated on 2018-10-9 with typo3 si_bibtex plugin. Data from https://brml.org/publications/publications/ }