Finished the max-value net (2/3/1 neurons) with 10k learning iterations. No good.
This commit is contained in:
parent
d4a22ecae7
commit
6ed30e56c4
12
Layer.cpp
12
Layer.cpp
@ -54,16 +54,16 @@ void Layer::updateInputWeights(Layer & prevLayer)
|
||||
{
|
||||
static const double trainingRate = 0.2;
|
||||
|
||||
for (size_t currentLayerIndex = 0; currentLayerIndex < sizeWithoutBiasNeuron(); ++currentLayerIndex)
|
||||
for (size_t targetLayerIndex = 0; targetLayerIndex < sizeWithoutBiasNeuron(); ++targetLayerIndex)
|
||||
{
|
||||
Neuron &targetNeuron = at(currentLayerIndex);
|
||||
const Neuron &targetNeuron = at(targetLayerIndex);
|
||||
|
||||
for (size_t prevLayerIndex = 0; prevLayerIndex < prevLayer.size(); ++prevLayerIndex)
|
||||
for (size_t sourceLayerIndex = 0; sourceLayerIndex < prevLayer.size(); ++sourceLayerIndex)
|
||||
{
|
||||
Neuron &sourceNeuron = prevLayer.at(prevLayerIndex);
|
||||
Neuron &sourceNeuron = prevLayer.at(sourceLayerIndex);
|
||||
|
||||
sourceNeuron.setOutputWeight(currentLayerIndex,
|
||||
sourceNeuron.getOutputWeight(currentLayerIndex) +
|
||||
sourceNeuron.setOutputWeight(targetLayerIndex,
|
||||
sourceNeuron.getOutputWeight(targetLayerIndex) +
|
||||
sourceNeuron.getOutputValue() * targetNeuron.getGradient() * trainingRate);
|
||||
}
|
||||
}
|
||||
|
5
Net.cpp
5
Net.cpp
@ -73,11 +73,10 @@ void Net::backProp(const std::vector<double> &targetValues)
|
||||
|
||||
for (unsigned int i = 0; i < numResultValues; ++i)
|
||||
{
|
||||
double delta = resultValues[i] - targetValues[i];
|
||||
rmsError += delta * delta;
|
||||
rmsError += std::pow(resultValues[i] - targetValues[i], 2);
|
||||
}
|
||||
|
||||
rmsError = sqrt(rmsError / numResultValues);
|
||||
rmsError = std::sqrt(rmsError / numResultValues);
|
||||
|
||||
// calculate output neuron gradients
|
||||
for (unsigned int i = 0; i < numResultValues; ++i)
|
||||
|
31
Neuro.cpp
31
Neuro.cpp
@ -12,25 +12,32 @@ int main()
|
||||
|
||||
Net myNet({ 2, 3, 1 });
|
||||
|
||||
std::vector<double> inputValues =
|
||||
size_t numIterations = 10000;
|
||||
for (size_t iteration = 0; iteration < numIterations; ++iteration)
|
||||
{
|
||||
0.1,
|
||||
0.7,
|
||||
};
|
||||
std::vector<double> inputValues =
|
||||
{
|
||||
std::rand() / (double)RAND_MAX,
|
||||
std::rand() / (double)RAND_MAX
|
||||
};
|
||||
|
||||
std::vector<double> targetValues = { 0.7 };
|
||||
std::vector<double> targetValues =
|
||||
{
|
||||
*std::max_element(inputValues.begin(), inputValues.end())
|
||||
};
|
||||
|
||||
myNet.feedForward(inputValues);
|
||||
myNet.feedForward(inputValues);
|
||||
|
||||
std::vector<double> outputValues = myNet.getOutput();
|
||||
std::vector<double> outputValues = myNet.getOutput();
|
||||
|
||||
double error = outputValues[0] - targetValues[0];
|
||||
double error = outputValues[0] - targetValues[0];
|
||||
|
||||
std::cout << "Error: ";
|
||||
std::cout << std::abs(error);
|
||||
std::cout << std::endl;
|
||||
std::cout << "Error: ";
|
||||
std::cout << std::abs(error);
|
||||
std::cout << std::endl;
|
||||
|
||||
myNet.backProp(targetValues);
|
||||
myNet.backProp(targetValues);
|
||||
}
|
||||
}
|
||||
catch (std::exception &ex)
|
||||
{
|
||||
|
Reference in New Issue
Block a user