Implemented dynamic learning

main
mandlm 2015-10-18 22:05:18 +02:00
parent 6ef1f9657c
commit 3d30346f2d
3 changed files with 18 additions and 13 deletions

View File

@ -53,7 +53,7 @@ void Layer::connectTo(const Layer & nextLayer)
void Layer::updateInputWeights(Layer & prevLayer)
{
static const double trainingRate = 0.5;
static const double trainingRate = 0.2;
for (size_t currentLayerIndex = 0; currentLayerIndex < sizeWithoutBiasNeuron(); ++currentLayerIndex)
{

View File

@ -2,9 +2,9 @@
Net::Net(std::initializer_list<size_t> layerSizes)
{
if (layerSizes.size() < 3)
if (layerSizes.size() < 2)
{
throw std::exception("A net needs at least 3 layers");
throw std::exception("A net needs at least 2 layers");
}
for (size_t numNeurons : layerSizes)

View File

@ -9,22 +9,27 @@ int main()
{
std::cout << "Neuro running" << std::endl;
std::vector<double> inputValues = { 0.1, 0.2, 0.8 };
std::vector<double> targetValues = { 0.8 };
Net myNet({ 3, 2, 1 });
Net myNet({ inputValues.size(), 4, targetValues.size() });
for (int i = 0; i < 200; ++i)
for (int i = 0; i < 100000; ++i)
{
std::vector<double> inputValues =
{
std::rand() / (double)RAND_MAX,
std::rand() / (double)RAND_MAX,
std::rand() / (double)RAND_MAX
};
std::vector<double> targetValues = { inputValues[2] };
myNet.feedForward(inputValues);
std::vector<double> outputValues = myNet.getOutput();
std::cout << "Result: ";
for (double &value : outputValues)
{
std::cout << value << " ";
}
double error = outputValues[0] - targetValues[0];
std::cout << "Error: ";
std::cout << std::abs(error);
std::cout << std::endl;
myNet.backProp(targetValues);