GTK+ IOStream  Beta
<< GTK+ >> add C++ IOStream operators to GTK+. Now with extra abilities ... like network serialisation
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
NeuralNetwork.H
Go to the documentation of this file.
1 /* Copyright 2000-2013 Matt Flax <flatmax@flatmax.org>
2  This file is part of GTK+ IOStream class set
3 
4  GTK+ IOStream is free software; you can redistribute it and/or modify
5  it under the terms of the GNU General Public License as published by
6  the Free Software Foundation; either version 2 of the License, or
7  (at your option) any later version.
8 
9  GTK+ IOStream is distributed in the hope that it will be useful,
10  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  GNU General Public License for more details.
13 
14  You have received a copy of the GNU General Public License
15  along with GTK+ IOStream
16  */
17 #ifndef NEURALNETWORK_H_
18 #define NEURALNETWORK_H_
19 
20 #include <Eigen/Dense>
21 #include <vector>
22 using namespace std;
23 
27 template<typename TYPE>
28 class NeuralLayer {
29 protected:
30  Eigen::Matrix<TYPE, Eigen::Dynamic, Eigen::Dynamic> weights;
31  Eigen::Matrix<TYPE, Eigen::Dynamic, 1> bias;
32 public:
33 
34  Eigen::Matrix<TYPE, Eigen::Dynamic, 1> output;
35 
40  NeuralLayer(int inputSize, int outputSize) {
41  weights.resize(inputSize, outputSize);
42  bias.resize(1,outputSize);
43  output.resize(1,outputSize);
44  }
45 
51  template <typename Derived>
52  NeuralLayer(const Eigen::MatrixBase<Derived> &weightsIn, const Eigen::MatrixBase<Derived> &biasIn) {
53  weights=weightsIn;
54  bias=biasIn;
55  output.resize(bias.rows(),1);
56  }
57 
59  virtual ~NeuralLayer(void) {}
60 
66  virtual Eigen::Matrix<TYPE, Eigen::Dynamic, 1> &activate(const Eigen::Matrix<TYPE, Eigen::Dynamic, 1> &input) {
67 // cout<<"weights r,c "<<weights.rows()<<'\t'<<weights.cols()<<endl;
68 // cout<<"bias r,c "<<bias.rows()<<'\t'<<bias.cols()<<endl;
69 // cout<<"input r,c "<<input.rows()<<'\t'<<input.cols()<<endl;
70 // cout<<"output r,c "<<output.rows()<<'\t'<<output.cols()<<endl;
71  output=bias;
72  output.noalias()+=weights*input;
73  return output;
74  }
75 };
76 
80 template<typename TYPE>
81 class SigmoidLayer : public NeuralLayer<TYPE> {
82 public:
87  SigmoidLayer(int inputSize, int outputSize) : NeuralLayer<TYPE>(inputSize, outputSize) {
88  }
89 
95  template <typename Derived>
96  SigmoidLayer(const Eigen::MatrixBase<Derived> &weightsIn, const Eigen::MatrixBase<Derived> &biasIn) : NeuralLayer<TYPE>(weightsIn, biasIn) {
97  }
98 
100  virtual ~SigmoidLayer(void) {}
101 
107  virtual Eigen::Matrix<TYPE, Eigen::Dynamic, 1> &activate(const Eigen::Matrix<TYPE, Eigen::Dynamic, 1> &input) {
109  NeuralLayer<TYPE>::output=1./(1.+(-NeuralLayer<TYPE>::output).array().exp());
110 // cout<<"output "<<NeuralLayer<TYPE>::output<<endl;
112  }
113 };
114 
118 template<typename TYPE>
119 class TanhLayer : public NeuralLayer<TYPE> {
120 public:
125  TanhLayer(int inputSize, int outputSize) : NeuralLayer<TYPE>(inputSize, outputSize) {
126  }
127 
133  template <typename Derived>
134  TanhLayer(const Eigen::MatrixBase<Derived> &weightsIn, const Eigen::MatrixBase<Derived> &biasIn) : NeuralLayer<TYPE>(weightsIn, biasIn) {
135  }
136 
138  virtual ~TanhLayer(void) {}
139 
145  virtual Eigen::Matrix<TYPE, Eigen::Dynamic, 1> &activate(const Eigen::Matrix<TYPE, Eigen::Dynamic, 1> &input) {
147  NeuralLayer<TYPE>::output=2./(1.+(-2.*NeuralLayer<TYPE>::output).array().exp())-1.;
149  }
150 };
151 
152 /* Implements a neural layer with an scaled and offset tanh activation function
153 \tparam TYPE the precision of the data to use, e.g. float, double
154 */
155 //template<typename TYPE>
156 //class PosLayer : public NeuralLayer<TYPE> {
157 //public:
158  /* Generate a neural layer of particular size
159  \param inputSize The number of the inputs
160  \param outputSize The number of outputs
161  */
162 // PosLayer(int inputSize, int outputSize) : NeuralLayer<TYPE>(inputSize, outputSize) {
163 // }
164 
165  /* Generate a neural layer of particular size providing the weights
166  \param weightsIn The weights to set
167  \param biasIn The biases to set
168  \tparam Derived is used by Eigen's Curiously recurring template pattern (CRTP)
169  */
170 // template <typename Derived>
171 // PosLayer(const Eigen::MatrixBase<Derived> &weightsIn, const Eigen::MatrixBase<Derived> &biasIn) : NeuralLayer<TYPE>(weightsIn, biasIn) {
172 // }
173 
174  // Destructor
175 // virtual ~PosLayer(void) {}
176 
177  /* The positive only layer
178  Evaluate the neural network using the sigmoid as the activation function
179  \param input The input to this layer
180  \return The result of the layer after processing the input
181  */
182 // virtual Eigen::Matrix<TYPE, Eigen::Dynamic, 1> &activate(const Eigen::Matrix<TYPE, Eigen::Dynamic, 1> &input) {
183 // NeuralLayer<TYPE>::activate(input);
184 // NeuralLayer<TYPE>::output=(NeuralLayer<TYPE>::output>0.).select(NeuralLayer<TYPE>::output,0.);
185 // }
186 //};
187 
209 template<typename TYPE>
211 public:
213  NeuralNetwork(void) {}
214 
216  virtual ~NeuralNetwork(void) {}
217 
223  void activate(vector<NeuralLayer<TYPE> *> &layers, Eigen::Matrix<TYPE, Eigen::Dynamic, 1> &input) {
224  int layerCount=layers.size();
225  if (layerCount>0) {
226  // process the first layer
227  layers[0]->activate(input);
228  for (int i=1; i<layerCount; i++) {
229 // cout<<i<<endl;
230  layers[i]->activate(layers[i-1]->NeuralLayer<TYPE>::output);
231  }
232  }
233  }
234 };
235 #endif // NEURALNETWORK_H_