perceptron/main.cpp

168 lines
5.3 KiB
C++

#include <cmath>
#include <vector>
// #include <execution>
// #include <algorithm>
// #include <atomic>
#include <iostream>
struct Perceptron {
std::vector<float> _weights;
double _bias;
// random weight between -1 and 1 but not 0
Perceptron(size_t n) : _weights(n) {
for (size_t i = 0; i < n; ++i) {
_weights[i] = (rand() % 2000 - 1000) / 1000.0;
}
_bias = (rand() % 2000 - 1000) / 1000.0;
}
float evaluate(const std::vector<float>& inputs, float (*const activation)(float)) {
float sum = 0.0;
for (size_t i = 0; i < inputs.size(); ++i) {
sum += inputs[i] * _weights[i];
}
return activation(sum + _bias);
}
};
struct Layer {
std::vector<Perceptron> _perceptrons;
float (*const _activation)(float);
float (*const _actDeriv)(float);
Layer(size_t n, size_t i, float (*const activation)(float), float (*const actDeriv)(float))
: _perceptrons(n, Perceptron(i)), _activation(activation), _actDeriv(actDeriv) {
}
std::vector<float> evaluate(const std::vector<float>& inputs) {
std::vector<float> outputs;
for (size_t i = 0; i < _perceptrons.size(); ++i) {
outputs.push_back(_perceptrons[i].evaluate(inputs, _activation));
}
// std::transform(std::execution::par_unseq, _perceptrons.begin(), _perceptrons.end(), outputs.begin(), [&](Perceptron& perceptron) {
// return perceptron.evaluate(inputs, _activation);
// });
return outputs;
}
};
struct NeuralNetwork {
private:
std::vector<Layer> _layers;
public:
NeuralNetwork() {
}
void addLayer(size_t n, size_t i, float (*const activation)(float), float (*const actDeriv)(float)) {
_layers.emplace_back(n, i, activation, actDeriv);
}
std::vector<float> eval(const std::vector<float>& inputs) {
std::vector<float> outputs = inputs;
for (int i = 0; i < _layers.size(); ++i) {
outputs = _layers[i].evaluate(outputs);
}
return outputs;
}
void train(const std::vector<std::vector<float>>& inputs, const std::vector<std::vector<float>>& outputs, float learningRate, int epochs) {
for (int i = 0; i < epochs; ++i) {
float avgError = 0.0;
for (int sample = 0; sample < inputs.size(); ++sample) {
std::vector<std::vector<float>> layerOutputs;
std::vector<std::vector<float>> layerInputs;
layerInputs.push_back(inputs[sample]);
for (int j = 0; j < _layers.size(); ++j) {
layerOutputs.push_back(_layers[j].evaluate(layerInputs[j]));
layerInputs.push_back(layerOutputs[j]);
}
std::vector<float> error(outputs[sample].size());
for (int j = 0; j < outputs[sample].size(); ++j) {
error[j] = outputs[sample][j] - layerOutputs.back()[j];
}
for (int j = _layers.size() - 1; j >= 0; --j) {
std::vector<float> newError(_layers[j]._perceptrons.size());
for (int k = 0; k < _layers[j]._perceptrons.size(); ++k) {
if (j == _layers.size() - 1) {
newError[k] = error[k] * _layers[j]._actDeriv(layerOutputs[j][k]);
} else {
newError[k] = 0.0;
for (int l = 0; l < _layers[j + 1]._perceptrons.size(); ++l) {
newError[k] += _layers[j + 1]._perceptrons[l]._weights[k] * error[l];
}
newError[k] *= _layers[j]._actDeriv(layerOutputs[j][k]);
}
}
for (int k = 0; k < _layers[j]._perceptrons.size(); ++k) {
for (int l = 0; l < layerInputs[j].size(); ++l) {
_layers[j]._perceptrons[k]._weights[l] += learningRate * newError[k] * layerInputs[j][l];
}
_layers[j]._perceptrons[k]._bias += learningRate * newError[k];
}
error = newError;
}
for (int j = 0; j < error.size(); ++j) {
avgError += std::abs(error[j]);
}
}
avgError /= outputs[0].size() * outputs.size();
if (i % 1000 == 0)
std::cout << "epoch: " << i << " error: " << avgError << "\n";
}
}
};
float sigmoid(float x) {
return 1.0 / (1.0 + std::expf(-x));
}
float sigmoidDerivative(float x) {
return x * (1 - x);
}
int main() {
NeuralNetwork nn;
nn.addLayer(2, 2, sigmoid, sigmoidDerivative);
nn.addLayer(1, 2, sigmoid, sigmoidDerivative);
std::vector<std::vector<float>> ref = {
{0, 0},
{0, 1},
{1, 0},
{1, 1}
};
std::vector<std::vector<float>> out = {
{0},
{1},
{1},
{0}
};
nn.train(ref, out, 0.1, 100000);
std::cout << "0, 0: " << nn.eval({0, 0})[0] << "\n";
std::cout << "0, 1: " << nn.eval({0, 1})[0] << "\n";
std::cout << "1, 0: " << nn.eval({1, 0})[0] << "\n";
std::cout << "1, 1: " << nn.eval({1, 1})[0] << "\n";
return 0;
}