More back-propagation code, calculation of output-neuron gradients

This commit is contained in:
mandlm 2015-10-15 22:16:34 +02:00
parent 7ba16e9e9d
commit ce88f690cf
3 changed files with 34 additions and 4 deletions

26
Net.cpp
View File

@ -59,7 +59,7 @@ std::vector<double> Net::getOutput()
void Net::backProp(const std::vector<double> &targetValues)
{
const Layer &outputLayer = back();
Layer &outputLayer = back();
if (targetValues.size() != outputLayer.size())
{
@ -67,12 +67,30 @@ void Net::backProp(const std::vector<double> &targetValues)
}
std::vector<double> resultValues = getOutput();
unsigned int numResultValues = resultValues.size();
double rmsError = 0.0;
for (unsigned int i = 0; i < resultValues.size(); ++i)
for (unsigned int i = 0; i < numResultValues; ++i)
{
double delta = resultValues[i] - targetValues[i];
rmsError += delta * delta;
}
rmsError = sqrt(rmsError / resultValues.size());
rmsError = sqrt(rmsError / numResultValues);
for (unsigned int i = 0; i < numResultValues; ++i)
{
outputLayer[i].calcOutputGradients(targetValues[i]);
}
for (auto it = end() - 1; it != begin(); --it)
{
Layer &hiddenLayer = *it;
Layer &prevLayer = *(it - 1);
for (auto neuron : hiddenLayer)
{
//neuron.calcHiddenGradients(prevLayer);
}
}
}

View File

@ -4,6 +4,7 @@
Neuron::Neuron(double value)
: outputValue(value)
, gradient(0)
{
}
@ -52,3 +53,10 @@ double Neuron::getOutputValue() const
{
return outputValue;
}
void Neuron::calcOutputGradients(double targetValue)
{
double delta = targetValue - outputValue;
gradient = delta * transferFunctionDerivative(outputValue);
}

View File

@ -7,6 +7,7 @@ class Neuron
private:
double outputValue;
std::vector<double> outputWeights;
double gradient;
public:
Neuron(double value = 1.0);
@ -18,4 +19,7 @@ public:
double getWeightedOutputValue(unsigned int outputNeuron) const;
void createRandomOutputWeights(unsigned int numberOfWeights);
double getOutputValue() const;
void calcOutputGradients(double targetValue);
//void calcHiddenGradients(const Layer &prevLayer);
};