您的位置:首页 > 编程语言

tensorflow编程一些需要知道的 - 5 实现logistic分类器

2017-06-30 16:14 399 查看
写编写一个简单的logistic 分类器。

import tensorflow as tf
import numpy as np
import pandas as pd

class Trainer(object):
def __init__ (self):
self.load()
print (self.x_data.shape, self.y_data.shape)

def build_graph (self):
# 首先你得构建你的计算图
self.x = tf.placeholder(shape=[None, None], dtype=tf.float32, name='x') # self.x是个placeholder,用来接受数据
w1 = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name="weights1") #训练参数1
w2 = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name="weights2") #训练参数2
w3 = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name="weights3") #训练参数3
w4 = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name="weights4") #训练参数4
b = tf.Variable(tf.zeros([1]), name="biases") #训练参数5
feat1 = tf.nn.l2_normalize (self.x[:, 0], 0) #做个归一化
feat2 = tf.nn.l2_normalize (self.x[:, 1], 0)
feat3 = tf.nn.l2_normalize (self.x[:, 2], 0)
feat4 = tf.nn.l2_normalize (self.x[:, 3], 0)
self.y = w1 * feat1 + w2 * feat2 + w3 * feat3 + w4 * feat4 + b
self.logits = tf.sigmoid (self.y) #logistic
#self.loss = tf.losses.sigmoid_cross_entropy (self.y_data, self.y)
#self.loss = tf.losses.mean_squared_error(self.y_data, y)
#self.loss = tf.reduce_mean(tf.square(self.logits - self.y_data))
self.loss = tf.reduce_mean(tf.abs(self.logits - self.y_data))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
self.train = optimizer.minimize(self.loss)
tf.summary.histogram("model/input", self.x)
tf.summary.scalar("loss", self.loss)
self.summaries = tf.summary.merge_all()
self.saver = tf.train.Saver()

def load (self):
data_raw = pd.read_csv('test.csv') #训练数据,用pandas加载,看下面很容易知道训练数据是什么样的
self.x_data = data_raw[['length','width','height','fps']].as_matrix()
def func (row):
if row > 60:
return 1
else:
return 0
self.y_data = data_raw['cost'].apply(func).as_matrix()

def run (self):
with tf.Graph().as_default() as graph:
self.build_graph()
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
writer = tf.summary.FileWriter("/tmp/summ", sess.graph) #定义一个summary writer
step = 0
while step < 2500: #共2500个iteration
_, current_summ, loss = sess.run ([self.train, self.summaries, self.loss], feed_dict={self.x: self.x_data})
writer.add_summary(current_summ, global_step=step)
step += 1
print (step, loss)
save_path = self.saver.save(sess, "/tmp/summ/model.ckpt") #保存模型

def predict (self):
with tf.Graph().as_default() as graph:
self.build_graph()
with tf.Session() as sess:
self.saver.restore(sess, "/tmp/summ/model.ckpt") #加载模型
result, loss = sess.run ([self.logits, self.loss], feed_dict={self.x: self.x_data})
print ("predict loss:", loss)
for i in range(len(self.y_data)):
print (result[i], self.y_data[i])

the_trainer = Trainer()
the_trainer.run()
#the_trainer.predict()


train loss 
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息