First implementation of weight updates. Very slow rate of change in the output value.
This commit is contained in:
parent
de06daaad3
commit
a79abb5db1
5 changed files with 43 additions and 12 deletions
17
Layer.cpp
17
Layer.cpp
|
@ -51,6 +51,21 @@ void Layer::connectTo(const Layer & nextLayer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Layer::updateInputWeights(const Layer & prevLayer)
|
void Layer::updateInputWeights(Layer & prevLayer)
|
||||||
{
|
{
|
||||||
|
static const double trainingRate = 0.8;
|
||||||
|
|
||||||
|
for (size_t currentLayerIndex = 0; currentLayerIndex < size() - 1; ++currentLayerIndex)
|
||||||
|
{
|
||||||
|
Neuron &targetNeuron = at(currentLayerIndex);
|
||||||
|
|
||||||
|
for (size_t prevLayerIndex = 0; prevLayerIndex < prevLayer.size(); ++prevLayerIndex)
|
||||||
|
{
|
||||||
|
Neuron &sourceNeuron = prevLayer.at(prevLayerIndex);
|
||||||
|
|
||||||
|
sourceNeuron.setOutputWeight(currentLayerIndex,
|
||||||
|
sourceNeuron.getOutputWeight(currentLayerIndex) +
|
||||||
|
sourceNeuron.getOutputValue() * targetNeuron.getGradient() * trainingRate);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
2
Layer.h
2
Layer.h
|
@ -14,5 +14,5 @@ public:
|
||||||
double getWeightedSum(int outputNeuron) const;
|
double getWeightedSum(int outputNeuron) const;
|
||||||
void connectTo(const Layer & nextLayer);
|
void connectTo(const Layer & nextLayer);
|
||||||
|
|
||||||
void updateInputWeights(const Layer &prevLayer);
|
void updateInputWeights(Layer &prevLayer);
|
||||||
};
|
};
|
||||||
|
|
23
Neuro.cpp
23
Neuro.cpp
|
@ -14,18 +14,21 @@ int main()
|
||||||
|
|
||||||
Net myNet({ inputValues.size(), 4, targetValues.size() });
|
Net myNet({ inputValues.size(), 4, targetValues.size() });
|
||||||
|
|
||||||
myNet.feedForward(inputValues);
|
for (int i = 0; i < 20; ++i)
|
||||||
|
|
||||||
std::vector<double> outputValues = myNet.getOutput();
|
|
||||||
|
|
||||||
std::cout << "Result: ";
|
|
||||||
for (double &value : outputValues)
|
|
||||||
{
|
{
|
||||||
std::cout << value << " ";
|
myNet.feedForward(inputValues);
|
||||||
}
|
|
||||||
std::cout << std::endl;
|
|
||||||
|
|
||||||
myNet.backProp(targetValues);
|
std::vector<double> outputValues = myNet.getOutput();
|
||||||
|
|
||||||
|
std::cout << "Result: ";
|
||||||
|
for (double &value : outputValues)
|
||||||
|
{
|
||||||
|
std::cout << value << " ";
|
||||||
|
}
|
||||||
|
std::cout << std::endl;
|
||||||
|
|
||||||
|
myNet.backProp(targetValues);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
catch (std::exception &ex)
|
catch (std::exception &ex)
|
||||||
{
|
{
|
||||||
|
|
10
Neuron.cpp
10
Neuron.cpp
|
@ -84,3 +84,13 @@ double Neuron::getGradient() const
|
||||||
return gradient;
|
return gradient;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double Neuron::getOutputWeight(size_t index) const
|
||||||
|
{
|
||||||
|
return outputWeights.at(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Neuron::setOutputWeight(size_t index, double value)
|
||||||
|
{
|
||||||
|
outputWeights.at(index) = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
3
Neuron.h
3
Neuron.h
|
@ -25,6 +25,9 @@ public:
|
||||||
|
|
||||||
double getGradient() const;
|
double getGradient() const;
|
||||||
|
|
||||||
|
double getOutputWeight(size_t index) const;
|
||||||
|
void setOutputWeight(size_t index, double value);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static double transferFunction(double inputValue);
|
static double transferFunction(double inputValue);
|
||||||
static double transferFunctionDerivative(double inputValue);
|
static double transferFunctionDerivative(double inputValue);
|
||||||
|
|
Loading…
Reference in a new issue