Main Page   Namespace List   Class Hierarchy   Alphabetical List   Compound List   File List   Namespace Members   Compound Members   File Members   Related Pages  

colvarcomp_neuralnetwork.C

Go to the documentation of this file.
00001 #if (__cplusplus >= 201103L)
00002 
00003 #include "colvarmodule.h"
00004 #include "colvarvalue.h"
00005 #include "colvarparse.h"
00006 #include "colvar.h"
00007 #include "colvarcomp.h"
00008 #include "colvar_neuralnetworkcompute.h"
00009 
00010 using namespace neuralnetworkCV;
00011 
00012 colvar::neuralNetwork::neuralNetwork(std::string const &conf): linearCombination(conf) {
00013     set_function_type("neuralNetwork");
00014     // the output of neural network consists of multiple values
00015     // read "output_component" key to determine it
00016     get_keyval(conf, "output_component", m_output_index);
00017     // read weight files
00018     bool has_weight_files = true;
00019     size_t num_layers_weight = 0;
00020     std::vector<std::string> weight_files;
00021     while (has_weight_files) {
00022         std::string lookup_key = std::string{"layer"} + cvm::to_str(num_layers_weight + 1) + std::string{"_WeightsFile"};
00023         if (key_lookup(conf, lookup_key.c_str())) {
00024             std::string weight_filename;
00025             get_keyval(conf, lookup_key.c_str(), weight_filename, std::string(""));
00026             weight_files.push_back(weight_filename);
00027             cvm::log(std::string{"Will read layer["} + cvm::to_str(num_layers_weight + 1) + std::string{"] weights from "} + weight_filename + '\n');
00028             ++num_layers_weight;
00029         } else {
00030             has_weight_files = false;
00031         }
00032     }
00033     // read bias files
00034     bool has_bias_files = true;
00035     size_t num_layers_bias = 0;
00036     std::vector<std::string> bias_files;
00037     while (has_bias_files) {
00038         std::string lookup_key = std::string{"layer"} + cvm::to_str(num_layers_bias + 1) + std::string{"_BiasesFile"};
00039         if (key_lookup(conf, lookup_key.c_str())) {
00040             std::string bias_filename;
00041             get_keyval(conf, lookup_key.c_str(), bias_filename, std::string(""));
00042             bias_files.push_back(bias_filename);
00043             cvm::log(std::string{"Will read layer["} + cvm::to_str(num_layers_bias + 1) + std::string{"] biases from "} + bias_filename + '\n');
00044             ++num_layers_bias;
00045         } else {
00046             has_bias_files = false;
00047         }
00048     }
00049     // read activation function strings
00050     bool has_activation_functions = true;
00051     size_t num_activation_functions = 0;
00052     // pair(is_custom_function, function_string)
00053     std::vector<std::pair<bool, std::string>> activation_functions;
00054     while (has_activation_functions) {
00055         std::string lookup_key = std::string{"layer"} + cvm::to_str(num_activation_functions + 1) + std::string{"_activation"};
00056         std::string lookup_key_custom = std::string{"layer"} + cvm::to_str(num_activation_functions + 1) + std::string{"_custom_activation"};
00057         if (key_lookup(conf, lookup_key.c_str())) {
00058             // Ok, this is not a custom function
00059             std::string function_name;
00060             get_keyval(conf, lookup_key.c_str(), function_name, std::string(""));
00061             if (activation_function_map.find(function_name) == activation_function_map.end()) {
00062                 cvm::error("Unknown activation function name: \"" + function_name + "\".\n");
00063                 return;
00064             }
00065             activation_functions.push_back(std::make_pair(false, function_name));
00066             cvm::log(std::string{"The activation function for layer["} + cvm::to_str(num_activation_functions + 1) + std::string{"] is "} + function_name + '\n');
00067             ++num_activation_functions;
00068 #ifdef LEPTON
00069         } else if (key_lookup(conf, lookup_key_custom.c_str())) {
00070             std::string function_expression;
00071             get_keyval(conf, lookup_key_custom.c_str(), function_expression, std::string(""));
00072             activation_functions.push_back(std::make_pair(true, function_expression));
00073             cvm::log(std::string{"The custom activation function for layer["} + cvm::to_str(num_activation_functions + 1) + std::string{"] is "} + function_expression + '\n');
00074             ++num_activation_functions;
00075 #endif
00076         } else {
00077             has_activation_functions = false;
00078         }
00079     }
00080     // expect the three numbers are equal
00081     if ((num_layers_weight != num_layers_bias) || (num_layers_bias != num_activation_functions)) {
00082         cvm::error("Error: the numbers of weights, biases and activation functions do not match.\n");
00083         return;
00084     }
00085 //     nn = std::make_unique<neuralnetworkCV::neuralNetworkCompute>();
00086     // std::make_unique is only available in C++14
00087     nn = std::unique_ptr<neuralnetworkCV::neuralNetworkCompute>(new neuralnetworkCV::neuralNetworkCompute());
00088     for (size_t i_layer = 0; i_layer < num_layers_weight; ++i_layer) {
00089         denseLayer d;
00090 #ifdef LEPTON
00091         if (activation_functions[i_layer].first) {
00092             // use custom function as activation function
00093             try {
00094                 d = denseLayer(weight_files[i_layer], bias_files[i_layer], activation_functions[i_layer].second);
00095             } catch (std::exception &ex) {
00096                 cvm::error("Error on initializing layer " + cvm::to_str(i_layer) + " (" + ex.what() + ")\n", COLVARS_INPUT_ERROR);
00097                 return;
00098             }
00099         } else {
00100 #endif
00101             // query the map of supported activation functions
00102             const auto& f = activation_function_map[activation_functions[i_layer].second].first;
00103             const auto& df = activation_function_map[activation_functions[i_layer].second].second;
00104             try {
00105                 d = denseLayer(weight_files[i_layer], bias_files[i_layer], f, df);
00106             } catch (std::exception &ex) {
00107                 cvm::error("Error on initializing layer " + cvm::to_str(i_layer) + " (" + ex.what() + ")\n", COLVARS_INPUT_ERROR);
00108                 return;
00109             }
00110 #ifdef LEPTON
00111         }
00112 #endif
00113         // add a new dense layer to network
00114         if (nn->addDenseLayer(d)) {
00115             if (cvm::debug()) {
00116                 // show information about the neural network
00117                 cvm::log("Layer " + cvm::to_str(i_layer) + " : has " + cvm::to_str(d.getInputSize()) + " input nodes and " + cvm::to_str(d.getOutputSize()) + " output nodes.\n");
00118                 for (size_t i_output = 0; i_output < d.getOutputSize(); ++i_output) {
00119                     for (size_t j_input = 0; j_input < d.getInputSize(); ++j_input) {
00120                         cvm::log("    weights[" + cvm::to_str(i_output) + "][" + cvm::to_str(j_input) + "] = " + cvm::to_str(d.getWeight(i_output, j_input)));
00121                     }
00122                     cvm::log("    biases[" + cvm::to_str(i_output) + "] = " + cvm::to_str(d.getBias(i_output)) + "\n");
00123                 }
00124             }
00125         } else {
00126             cvm::error("Error: error on adding a new dense layer.\n");
00127             return;
00128         }
00129     }
00130     nn->input().resize(cv.size());
00131 }
00132 
00133 colvar::neuralNetwork::~neuralNetwork() {
00134 }
00135 
00136 void colvar::neuralNetwork::calc_value() {
00137     x.reset();
00138     for (size_t i_cv = 0; i_cv < cv.size(); ++i_cv) {
00139         cv[i_cv]->calc_value();
00140         const colvarvalue& current_cv_value = cv[i_cv]->value();
00141         // for current nn implementation we have to assume taht types are always scaler
00142         if (current_cv_value.type() == colvarvalue::type_scalar) {
00143             nn->input()[i_cv] = cv[i_cv]->sup_coeff * (cvm::pow(current_cv_value.real_value, cv[i_cv]->sup_np));
00144         } else {
00145             cvm::error("Error: using of non-scaler component.\n");
00146             return;
00147         }
00148     }
00149     nn->compute();
00150     x = nn->getOutput(m_output_index);
00151 }
00152 
00153 void colvar::neuralNetwork::calc_gradients() {
00154     for (size_t i_cv = 0; i_cv < cv.size(); ++i_cv) {
00155         cv[i_cv]->calc_gradients();
00156         if (cv[i_cv]->is_enabled(f_cvc_explicit_gradient)) {
00157             const cvm::real factor = nn->getGradient(m_output_index, i_cv);
00158             const cvm::real factor_polynomial = getPolynomialFactorOfCVGradient(i_cv);
00159             for (size_t j_elem = 0; j_elem < cv[i_cv]->value().size(); ++j_elem) {
00160                 for (size_t k_ag = 0 ; k_ag < cv[i_cv]->atom_groups.size(); ++k_ag) {
00161                     for (size_t l_atom = 0; l_atom < (cv[i_cv]->atom_groups)[k_ag]->size(); ++l_atom) {
00162                         (*(cv[i_cv]->atom_groups)[k_ag])[l_atom].grad = factor_polynomial * factor * (*(cv[i_cv]->atom_groups)[k_ag])[l_atom].grad;
00163                     }
00164                 }
00165             }
00166         }
00167     }
00168 }
00169 
00170 void colvar::neuralNetwork::apply_force(colvarvalue const &force) {
00171     for (size_t i_cv = 0; i_cv < cv.size(); ++i_cv) {
00172         // If this CV us explicit gradients, then atomic gradients is already calculated
00173         // We can apply the force to atom groups directly
00174         if (cv[i_cv]->is_enabled(f_cvc_explicit_gradient)) {
00175             for (size_t k_ag = 0 ; k_ag < cv[i_cv]->atom_groups.size(); ++k_ag) {
00176                 (cv[i_cv]->atom_groups)[k_ag]->apply_colvar_force(force.real_value);
00177             }
00178         } else {
00179             // Compute factors for polynomial combinations
00180             const cvm::real factor_polynomial = getPolynomialFactorOfCVGradient(i_cv);
00181             const cvm::real factor = nn->getGradient(m_output_index, i_cv);;
00182             colvarvalue cv_force = force.real_value * factor * factor_polynomial;
00183             cv[i_cv]->apply_force(cv_force);
00184         }
00185     }
00186 }
00187 
00188 #endif

Generated on Tue Apr 30 02:43:34 2024 for VMD (current) by doxygen1.2.14 written by Dimitri van Heesch, © 1997-2002