Main Page   Namespace List   Class Hierarchy   Alphabetical List   Compound List   File List   Namespace Members   Compound Members   File Members   Related Pages  

colvar_neuralnetworkcompute.h

Go to the documentation of this file.
00001 // -*- c++ -*-
00002 
00003 // This file is part of the Collective Variables module (Colvars).
00004 // The original version of Colvars and its updates are located at:
00005 // https://github.com/Colvars/colvars
00006 // Please update all Colvars source files before making any changes.
00007 // If you wish to distribute your changes, please submit them to the
00008 // Colvars repository at GitHub.
00009 
00010 #if (__cplusplus >= 201103L)
00011 #ifndef NEURALNETWORKCOMPUTE_H
00012 #define NEURALNETWORKCOMPUTE_H
00013 
00014 #include <vector>
00015 #include <functional>
00016 #include <string>
00017 #include <cmath>
00018 #include <memory>
00019 #include <map>
00020 
00021 #ifdef LEPTON
00022 #include "Lepton.h"
00023 #endif
00024 
00025 namespace neuralnetworkCV {
00027 extern std::map<std::string, std::pair<std::function<double(double)>, std::function<double(double)>>> activation_function_map;
00028 
00029 #ifdef LEPTON
00030 // allow to define a custom activation function
00031 class customActivationFunction {
00032 public:
00034     customActivationFunction();
00036     customActivationFunction(const std::string& expression_string);
00038     customActivationFunction(const customActivationFunction& source);
00040     customActivationFunction& operator=(const customActivationFunction& source);
00042     void setExpression(const std::string& expression_string);
00044     std::string getExpression() const;
00046     double evaluate(double x) const;
00048     double derivative(double x) const;
00049 private:
00050     std::string expression;
00051     std::unique_ptr<Lepton::CompiledExpression> value_evaluator;
00052     std::unique_ptr<Lepton::CompiledExpression> gradient_evaluator;
00053     double* input_reference;
00054     double* derivative_reference;
00055 };
00056 #endif
00057 
00058 class denseLayer {
00059 private:
00060     size_t m_input_size;
00061     size_t m_output_size;
00062     std::function<double(double)> m_activation_function;
00063     std::function<double(double)> m_activation_function_derivative;
00064 #ifdef LEPTON
00065     bool m_use_custom_activation;
00066     customActivationFunction m_custom_activation_function;
00067 #else
00068     static const bool m_use_custom_activation = false;
00069 #endif
00070 
00071     std::vector<std::vector<double>> m_weights;
00073     std::vector<double> m_biases;
00074 public:
00076     denseLayer() {}
00082     denseLayer(const std::string& weights_file, const std::string& biases_file, const std::function<double(double)>& f, const std::function<double(double)>& df);
00083 #ifdef LEPTON
00084 
00088     denseLayer(const std::string& weights_file, const std::string& biases_file, const std::string& custom_activation_expression);
00089 #endif
00090 
00091     void readFromFile(const std::string& weights_file, const std::string& biases_file);
00093     void setActivationFunction(const std::function<double(double)>& f, const std::function<double(double)>& df);
00095     void compute(const std::vector<double>& input, std::vector<double>& output) const;
00097     double computeGradientElement(const std::vector<double>& input, const size_t i, const size_t j) const;
00099     void computeGradient(const std::vector<double>& input, std::vector<std::vector<double>>& output_grad) const;
00101     size_t getInputSize() const {
00102         return m_input_size;
00103     }
00105     size_t getOutputSize() const {
00106         return m_output_size;
00107     }
00109     double getWeight(size_t i, size_t j) const {
00110         return m_weights[i][j];
00111     }
00112     double getBias(size_t i) const {
00113         return m_biases[i];
00114     }
00115     ~denseLayer() {}
00116 };
00117 
00118 class neuralNetworkCompute {
00119 private:
00120     std::vector<denseLayer> m_dense_layers;
00121     std::vector<double> m_input;
00123     std::vector<std::vector<double>> m_layers_output;
00124     std::vector<std::vector<std::vector<double>>> m_grads_tmp;
00125     std::vector<std::vector<double>> m_chained_grad;
00126 private:
00128     static std::vector<std::vector<double>> multiply_matrix(const std::vector<std::vector<double>>& A, const std::vector<std::vector<double>>& B);
00129 public:
00130     neuralNetworkCompute(): m_dense_layers(0), m_layers_output(0) {}
00131     neuralNetworkCompute(const std::vector<denseLayer>& dense_layers);
00132     bool addDenseLayer(const denseLayer& layer);
00133     // for faster computation
00134     const std::vector<double>& input() const {return m_input;}
00135     std::vector<double>& input() {return m_input;}
00137     void compute();
00138     double getOutput(const size_t i) const {return m_layers_output.back()[i];}
00139     double getGradient(const size_t i, const size_t j) const {return m_chained_grad[i][j];}
00141     const denseLayer& getLayer(const size_t i) const {return m_dense_layers[i];}
00143     size_t getNumberOfLayers() const {return m_dense_layers.size();}
00144 };
00145 
00146 }
00147 #endif
00148 #endif

Generated on Tue Apr 30 02:43:26 2024 for VMD (current) by doxygen1.2.14 written by Dimitri van Heesch, © 1997-2002