您的位置:首页 > 其它

TensorFlow - 使用TensorFlow开发程序编译环境配置

2017-08-05 17:27 549 查看
TensorFlow - 使用TensorFlow开发程序编译环境配置

flyfish

环境:Win7,Microsoft Visual Studio 2017 Community,平台x64

语言:C++

编译器配置如下

C\C++-》 常规-》附加包含目录

E:\lib\tensorflow

E:\lib\tensorflow\tensorflow\contrib\cmake\build

E:\lib\tensorflow\tensorflow\contrib\cmake\build\external\zlib_archive

E:\lib\tensorflow\tensorflow\contrib\cmake\build\external\gif_archive\giflib-5.1.4

E:\lib\tensorflow\tensorflow\contrib\cmake\build\external\png_archive

E:\lib\tensorflow\tensorflow\contrib\cmake\build\external\jpeg_archive

E:\lib\tensorflow\tensorflow\contrib\cmake\build\external\eigen_archive

E:\lib\tensorflow\third_party\eigen3

E:\lib\tensorflow\tensorflow\contrib\cmake\build\gemmlowp\src\gemmlowp

E:\lib\tensorflow\tensorflow\contrib\cmake\build\jsoncpp\src\jsoncpp

E:\lib\tensorflow\tensorflow\contrib\cmake\build\external\farmhash_archive

E:\lib\tensorflow\tensorflow\contrib\cmake\build\external\farmhash_archive\util

E:\lib\tensorflow\tensorflow\contrib\cmake\build\external\highwayhash

E:\lib\tensorflow\tensorflow\contrib\cmake\build\protobuf\src\protobuf\src

E:\lib\tensorflow\tensorflow\contrib\cmake\build\grpc\src\grpc\include

C\C++-》预处理器-》预处理器定义

WIN32

_WINDOWS

_DEBUG

EIGEN_AVOID_STL_ARRAY

NOMINMAX

_WIN32_WINNT=0x0A00

LANG_CXX11

COMPILER_MSVC

OS_WIN

_MBCS

WIN64

WIN32_LEAN_AND_MEAN

NOGDI

PLATFORM_WINDOWS

TENSORFLOW_USE_EIGEN_THREADPOOL

EIGEN_HAS_C99_MATH

TF_COMPILE_LIBRARY

CMAKE_INTDIR=”Debug”

链接器-》输入-》附加依赖项

有些lib是windows自己增加的

kernel32.lib

user32.lib

gdi32.lib

winspool.lib

shell32.lib

ole32.lib

oleaut32.lib

uuid.lib

comdlg32.lib

advapi32.lib

Debug\tf_protos_cc.lib

zlib\install\lib\zlibstaticd.lib

gif\install\lib\giflib.lib

png\install\lib\libpng12_staticd.lib

jpeg\install\lib\libjpeg.lib

jsoncpp\src\jsoncpp\src\lib_json$(Configuration)\jsoncpp.lib

farmhash\install\lib\farmhash.lib

fft2d\src\lib\fft2d.lib

highwayhash\install\lib\highwayhash.lib

protobuf\src\protobuf$(Configuration)\libprotobufd.lib

grpc\src\grpc$(Configuration)\grpc++_unsecure.lib

grpc\src\grpc$(Configuration)\grpc_unsecure.lib

grpc\src\grpc$(Configuration)\gpr.lib

wsock32.lib

ws2_32.lib

shlwapi.lib

添加引用如下



使用TensorFlow自带代码示例

// MyTFTest.cpp : 定义控制台应用程序的入口点。
//

#include "stdafx.h"

#include <cstdio>
#include <functional>
#include <string>
#include <vector>

#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"

using tensorflow::string;
using tensorflow::int32;

namespace tensorflow {
namespace example {

struct Options {
int num_concurrent_sessions = 1;   // The number of concurrent sessions
int num_concurrent_steps = 10;     // The number of concurrent steps
int num_iterations = 100;          // Each step repeats this many times
bool use_gpu = false;              // Whether to use gpu in the training
};

// A = [3 2; -1 0]; x = rand(2, 1);
// We want to compute the largest eigenvalue for A.
// repeat x = y / y.norm(); y = A * x; end
GraphDef CreateGraphDef() {
// TODO(jeff,opensource): This should really be a more interesting
// computation.  Maybe turn this into an mnist model instead?
Scope root = Scope::NewRootScope();
using namespace ::tensorflow::ops;  // NOLINT(build/namespaces)

// A = [3 2; -1 0].  Using Const<float> means the result will be a
// float tensor even though the initializer has integers.
auto a = Const<float>(root, { { 3, 2 },{ -1, 0 } });

// x = [1.0; 1.0]
auto x = Const(root.WithOpName("x"), { { 1.f },{ 1.f } });

// y = A * x
auto y = MatMul(root.WithOpName("y"), a, x);

// y2 = y.^2
auto y2 = Square(root, y);

// y2_sum = sum(y2).  Note that you can pass constants directly as
// inputs.  Sum() will automatically create a Const node to hold the
// 0 value.
auto y2_sum = Sum(root, y2, 0);

// y_norm = sqrt(y2_sum)
auto y_norm = Sqrt(root, y2_sum);

// y_normalized = y ./ y_norm
Div(root.WithOpName("y_normalized"), y, y_norm);

GraphDef def;
TF_CHECK_OK(root.ToGraphDef(&def));

return def;
}

string DebugString(const Tensor& x, const Tensor& y) {
CHECK_EQ(x.NumElements(), 2);
CHECK_EQ(y.NumElements(), 2);
auto x_flat = x.flat<float>();
auto y_flat = y.flat<float>();
// Compute an estimate of the eigenvalue via
//      (x' A x) / (x' x) = (x' y) / (x' x)
// and exploit the fact that x' x = 1 by assumption
Eigen::Tensor<float, 0, Eigen::RowMajor> lambda = (x_flat * y_flat).sum();
return strings::Printf("lambda = %8.6f x = [%8.6f %8.6f] y = [%8.6f %8.6f]",
lambda(), x_flat(0), x_flat(1), y_flat(0), y_flat(1));
}

void ConcurrentSteps(const Options* opts, int session_index) {
// Creates a session.
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
GraphDef def = CreateGraphDef();
if (options.target.empty()) {
graph::SetDefaultDevice(opts->use_gpu ? "/gpu:0" : "/cpu:0", &def);
}

TF_CHECK_OK(session->Create(def));

// Spawn M threads for M concurrent steps.
const int M = opts->num_concurrent_steps;
std::unique_ptr<thread::ThreadPool> step_threads(
new thread::ThreadPool(Env::Default(), "trainer", M));

for (int step = 0; step < M; ++step) {
step_threads->Schedule([&session, opts, session_index, step]() {
// Randomly initialize the input.
Tensor x(DT_FLOAT, TensorShape({ 2, 1 }));
auto x_flat = x.flat<float>();
x_flat.setRandom();
Eigen::Tensor<float, 0, Eigen::RowMajor> inv_norm =
x_flat.square().sum().sqrt().inverse();
x_flat = x_flat * inv_norm();

// Iterations.
std::vector<Tensor> outputs;
for (int iter = 0; iter < opts->num_iterations; ++iter) {
outputs.clear();
TF_CHECK_OK(
session->Run({ { "x", x } }, { "y:0", "y_normalized:0" }, {}, &outputs));
CHECK_EQ(size_t{ 2 }, outputs.size());

const Tensor& y = outputs[0];
const Tensor& y_norm = outputs[1];
// Print out lambda, x, and y.
std::printf("%06d/%06d %s\n", session_index, step,
DebugString(x, y).c_str());
// Copies y_normalized to x.
x = y_norm;
}
});
}

// Delete the threadpool, thus waiting for all threads to complete.
step_threads.reset(nullptr);
TF_CHECK_OK(session->Close());
}

void ConcurrentSessions(const Options& opts) {
// Spawn N threads for N concurrent sessions.
const int N = opts.num_concurrent_sessions;

// At the moment our Session implementation only allows
// one concurrently computing Session on GPU.
CHECK_EQ(1, N) << "Currently can only have one concurrent session.";

thread::ThreadPool session_threads(Env::Default(), "trainer", N);
for (int i = 0; i < N; ++i) {
session_threads.Schedule(std::bind(&ConcurrentSteps, &opts, i));
}
}

}  // end namespace example
}  // end namespace tensorflow

namespace {

bool ParseInt32Flag(tensorflow::StringPiece arg, tensorflow::StringPiece flag,
int32* dst) {
if (arg.Consume(flag) && arg.Consume("=")) {
char extra;
return (sscanf(arg.data(), "%d%c", dst, &extra) == 1);
}

return false;
}

bool ParseBoolFlag(tensorflow::StringPiece arg, tensorflow::StringPiece flag,
bool* dst) {
if (arg.Consume(flag)) {
if (arg.empty()) {
*dst = true;
return true;
}

if (arg == "=true") {
*dst = true;
return true;
}
else if (arg == "=false") {
*dst = false;
return true;
}
}

return false;
}

}  // namespace

int main(int argc, char* argv[]) {
tensorflow::example::Options opts;
std::vector<char*> unknown_flags;
for (int i = 1; i < argc; ++i) {
if (string(argv[i]) == "--") {
while (i < argc) {
unknown_flags.push_back(argv[i]);
++i;
}
break;
}

if (ParseInt32Flag(argv[i], "--num_concurrent_sessions",
&opts.num_concurrent_sessions) ||
ParseInt32Flag(argv[i], "--num_concurrent_steps",
&opts.num_concurrent_steps) ||
ParseInt32Flag(argv[i], "--num_iterations", &opts.num_iterations) ||
ParseBoolFlag(argv[i], "--use_gpu", &opts.use_gpu)) {
continue;
}

fprintf(stderr, "Unknown flag: %s\n", argv[i]);
return -1;
}

// Passthrough any unknown flags.
int dst = 1;  // Skip argv[0]
for (char* f : unknown_flags) {
argv[dst++] = f;
}
argv[dst++] = nullptr;
argc = static_cast<int>(unknown_flags.size() + 1);
tensorflow::port::InitMain(argv[0], &argc, &argv);
tensorflow::example::ConcurrentSessions(opts);
}


代码执行

内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  TensorFlow