c++调用tensorflow源码编译自己程序不通过的问题(急!)

老师您好!
    本人目的创建一个c++程序调用tensorflow训练chu出来的.pb模型文件进行预测,但是在调用tensorflwo源码进行编译不通过,程序包含文件如下:
#include <iostream>
#include <vector>
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/platform/env.h"

using namespace tensorflow;
 
编译报错为:
1>  main.cpp
1>g:\study\tensorflow\src_code\tensorflow\tensorflow\google\protobuf\io\coded_stream.h(870): warning C4800: “google::protobuf::internal::AtomicWord”: 将值强制为布尔值“true”或“false”(性能警告)
1>g:\study\tensorflow\src_code\tensorflow\tensorflow\google\protobuf\io\coded_stream.h(874): warning C4800: “google::protobuf::internal::Atomic32”: 将值强制为布尔值“true”或“false”(性能警告)
1>g:\study\tensorflow\src_code\tensorflow\tensorflow\google\protobuf\generated_message_util.h(160): warning C4800: “google::protobuf::uint32”: 将值强制为布尔值“true”或“false”(性能警告)
1>g:\study\tensorflow\src_code\tensorflow\third_party\eigen3\unsupported\eigen\cxx11\tensor(1): fatal error C1014: 包含文件太多: 深度 = 1024
1>  ann_model_loader.cpp
1>g:\study\tensorflow\src_code\tensorflow\tensorflow\google\protobuf\io\coded_stream.h(870): warning C4800: “google::protobuf::internal::AtomicWord”: 将值强制为布尔值“true”或“false”(性能警告)
1>g:\study\tensorflow\src_code\tensorflow\tensorflow\google\protobuf\io\coded_stream.h(874): warning C4800: “google::protobuf::internal::Atomic32”: 将值强制为布尔值“true”或“false”(性能警告)
1>g:\study\tensorflow\src_code\tensorflow\tensorflow\google\protobuf\generated_message_util.h(160): warning C4800: “google::protobuf::uint32”: 将值强制为布尔值“true”或“false”(性能警告)
1>g:\study\tensorflow\src_code\tensorflow\third_party\eigen3\unsupported\eigen\cxx11\tensor(1): fatal error C1014: 包含文件太多: 深度 = 1024
 

fish - Hadooper

赞同来自:

包含文件太多 大都因为头文件被递归包含了。头文件里用#ifndef限制下试试?

yuanmartin

赞同来自:

/* *  ann_model_loader.h * *  Created on: 2017年7月7日 *      Author: Derek */ #ifndef ANN_MODEL_LOADER_H_ #define ANN_MODEL_LOADER_H_ #include "model_loader_base.h" //#include "tensorflow/core/public/session.h" //#include "tensorflow/core/platform/env.h" using namespace tensorflow; namespace tf_model {     /**     * @brief: Model Loader for Feed Forward Neural Network     * */     class ANNFeatureAdapter : public FeatureAdapterBase {     public:         ANNFeatureAdapter();         ~ANNFeatureAdapter();         void assign(std::string tname, std::vector<double>*) override; // (tensor_name, tensor)     };     class ANNModelLoader : public ModelLoaderBase {     public:         ANNModelLoader();         ~ANNModelLoader();         int load(tensorflow::Session*, const std::string) override;    //Load graph file and new session         int predict(tensorflow::Session*, const FeatureAdapterBase&, const std::string, double*) override;     }; } #endif /* ANN_MODEL_LOADER_H_ */     我的代码都做了这个限制   主要是tensorflow的源码中tensorflow\third_party\eigen3\unsupported\eigen\cxx11\tensor这 #i个文件的nen内容如下: #include "unsupported/Eigen/CXX11/Tensor" #ifdef _WIN32 #ifndef SLEEP_FUNC_HEADER_GUARD #define SLEEP_FUNC_HEADER_GUARD inline void sleep(unsigned int seconds) { Sleep(1000*seconds); } #endif // On Windows, Eigen will include Windows.h, which defines various // macros that conflict with TensorFlow symbols. Undefine them here to // prevent clashes. #undef DeleteFile #undef ERROR #undef LoadLibrary #endif  // _WIN32

yuanmartin

赞同来自:

我的源码如下: /* *  model_loader_base.h * *  Created on: 2017年7月7日 *      Author: Derek */ #ifndef MODEL_LOADER_BASE_H_ #define MODEL_LOADER_BASE_H_ #include <iostream> #include <vector> #include "tensorflow/core/public/session.h" #include "tensorflow/core/platform/env.h" using namespace tensorflow; namespace tf_model {     /**     * Base Class for feature adapter, common interface convert input format to tensors     * */     class FeatureAdapterBase {     public:         FeatureAdapterBase() {};         virtual ~FeatureAdapterBase() {};         virtual void assign(std::string, std::vector<double>*) = 0;  // tensor_name, tensor_double_vector         std::vector<std::pair<std::string, tensorflow::Tensor> > input;     };     class ModelLoaderBase {     public:         ModelLoaderBase() {};         virtual ~ModelLoaderBase() {};         virtual int load(tensorflow::Session*, const std::string) = 0;     //pure virutal function load method         virtual int predict(tensorflow::Session*, const FeatureAdapterBase&, const std::string, double*) = 0;         tensorflow::GraphDef graphdef; //Graph Definition for current model     }; } #endif /* MODEL_LOADER_BASE_H_ */ /* *  ann_model_loader.h * *  Created on: 2017年7月7日 *      Author: Derek */ #ifndef ANN_MODEL_LOADER_H_ #define ANN_MODEL_LOADER_H_ #include "model_loader_base.h" //#include "tensorflow/core/public/session.h" //#include "tensorflow/core/platform/env.h" using namespace tensorflow; namespace tf_model {     /**     * @brief: Model Loader for Feed Forward Neural Network     * */     class ANNFeatureAdapter : public FeatureAdapterBase {     public:         ANNFeatureAdapter();         ~ANNFeatureAdapter();         void assign(std::string tname, std::vector<double>*) override; // (tensor_name, tensor)     };     class ANNModelLoader : public ModelLoaderBase {     public:         ANNModelLoader();         ~ANNModelLoader();         int load(tensorflow::Session*, const std::string) override;    //Load graph file and new session         int predict(tensorflow::Session*, const FeatureAdapterBase&, const std::string, double*) override;     }; } #endif /* ANN_MODEL_LOADER_H_ */     /* * ann_model_loader.cpp * *  Created on: 2017年7月7日 *      Author: Derek */ #include <iostream> #include <vector> #include <map> #include "ann_model_loader.h" //#include <tensor_shape.h> using namespace tensorflow; namespace tf_model {     /**     * ANNFeatureAdapter Implementation     * */     ANNFeatureAdapter::ANNFeatureAdapter() {     }     ANNFeatureAdapter::~ANNFeatureAdapter() {     }     /*     * @brief: Feature Adapter: convert 1-D double vector to Tensor, shape [1, ndim]     * @param: std::string tname, tensor name;     * @parma: std::vector<double>*, input vector;     * */     void ANNFeatureAdapter::assign(std::string tname, std::vector<double>* vec) {         //Convert input 1-D double vector to Tensor         int ndim = vec->size();         if (ndim == 0) {             std::cout << "WARNING: Input Vec size is 0 ..." << std::endl;             return;         }         // Create New tensor and set value         Tensor x(tensorflow::DT_FLOAT, tensorflow::TensorShape({ 1, ndim })); // New Tensor shape [1, ndim]         auto x_map = x.tensor<float, 2>();         for (int j = 0; j < ndim; j++) {             x_map(0, j) = (*vec)[j];         }         // Append <tname, Tensor> to input         input.push_back(std::pair<std::string, tensorflow::Tensor>(tname, x));     }     /**     * ANN Model Loader Implementation     * */     ANNModelLoader::ANNModelLoader() {     }     ANNModelLoader::~ANNModelLoader() {     }     /**     * @brief: load the graph and add to Session     * @param: Session* session, add the graph to the session     * @param: model_path absolute path to exported protobuf file *.pb     * */     int ANNModelLoader::load(tensorflow::Session* session, const std::string model_path) {         //Read the pb file into the grapgdef member         tensorflow::Status status_load = ReadBinaryProto(Env::Default(), model_path, &graphdef);         if (!status_load.ok()) {             std::cout << "ERROR: Loading model failed..." << model_path << std::endl;             std::cout << status_load.ToString() << "\n";             return -1;         }         // Add the graph to the session         tensorflow::Status status_create = session->Create(graphdef);         if (!status_create.ok()) {             std::cout << "ERROR: Creating graph in session failed..." << status_create.ToString() << std::endl;             return -1;         }         return 0;     }     /**     * @brief: Making new prediction     * @param: Session* session     * @param: FeatureAdapterBase, common interface of input feature     * @param: std::string, output_node, tensorname of output node     * @param: double, prediction values     * */     int ANNModelLoader::predict(tensorflow::Session* session, const FeatureAdapterBase& input_feature,         const std::string output_node, double* prediction) {         // The session will initialize the outputs         std::vector<tensorflow::Tensor> outputs;         //shape  [batch_size]                                                          // @input: vector<pair<string, tensor> >, feed_dict                                                          // @output_node: std::string, name of the output node op, defined in the protobuf file         tensorflow::Status status = session->Run(input_feature.input, { output_node }, {}, &outputs);         if (!status.ok()) {             std::cout << "ERROR: prediction failed..." << status.ToString() << std::endl;             return -1;         }         //Fetch output value         std::cout << "Output tensor size:" << outputs.size() << std::endl;         for (std::size_t i = 0; i < outputs.size(); i++) {             std::cout << outputs[i].DebugString();         }         std::cout << std::endl;         Tensor t = outputs[0];                   // Fetch the first tensor         int ndim = t.shape().dims();             // Get the dimension of the tensor         auto tmap = t.tensor<float, 2>();        // Tensor Shape: [batch_size, target_class_num]         int output_dim = t.shape().dim_size(1);  // Get the target_class_num from 1st dimension         std::vector<double> tout;         // Argmax: Get Final Prediction Label and Probability         int output_class_id = -1;         double output_prob = 0.0;         for (int j = 0; j < output_dim; j++) {             std::cout << "Class " << j << " prob:" << tmap(0, j) << "," << std::endl;             if (tmap(0, j) >= output_prob) {                 output_class_id = j;                 output_prob = tmap(0, j);             }         }         // Log         std::cout << "Final class id: " << output_class_id << std::endl;         std::cout << "Final class prob: " << output_prob << std::endl;         (*prediction) = output_prob;   // Assign the probability to prediction         return 0;     } }   #include <iostream> #include "ann_model_loader.h" using namespace tensorflow; int main(int argc, char* argv[])  {     if (argc != 2)      {         std::cout << "WARNING: Input Args missing" << std::endl;         return 0;     }     std::string model_path = argv[1];  // Model_path *.pb file                                        // TensorName pre-defined in python file, Need to extract values from tensors     std::string input_tensor_name = "inputs";     std::string output_tensor_name = "output_node";     // Create New Session     Session* session;     Status status = NewSession(SessionOptions(), &session);     if (!status.ok())      {         std::cout << status.ToString() << "\n";         return 0;     }     // Create prediction demo     tf_model::ANNModelLoader model;  //Create demo for prediction     if (0 != model.load(session, model_path))      {         std::cout << "Error: Model Loading failed..." << std::endl;         return 0;     }     // Define Input tensor and Feature Adapter     // Demo example: [1.0, 1.0, 1.0, 1.0, 1.0] for Iris Example, including bias     int ndim = 5;     std::vector<double> input;     for (int i = 0; i < ndim; i++)      {         input.push_back(1.0);     }     // New Feature Adapter to convert vector to tensors dictionary     tf_model::ANNFeatureAdapter input_feat;     input_feat.assign(input_tensor_name, &input);   //Assign vec<double> to tensor                                                     // Make New Prediction     double prediction = 0.0;     if (0 != model.predict(session, input_feat, output_tensor_name, &prediction))      {         std::cout << "WARNING: Prediction failed..." << std::endl;     }     std::cout << "Output Prediction Value:" << prediction << std::endl;     return 0; }   老师辛苦帮忙看看,看看问题出在那

要回复问题请先登录注册