我tensorflow建立6层全连接神经网络,网络的输入数据2625 个,输出数据1个。理想情况下输出数据应该按照一定规规律随着输入数据的变化而变化的,但是训练到最后,无论输入数据是多,输出数据都是一个数值,loss在来回跳动,没有减小。
我的模型代码如下:
import sys
sys.path.insert(0, '/home/wcsmomo/workspace/tutorials')
#from __future__ import print_function, division
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import numpy as np
from my_ant_v3 import FetchDataSets as fds
import DBUtil as dbu
from my_ant_v3 import ant_loger
class Network():
def __init__(self, num_hidden, num_hidden_2, num_hidden_3, num_hidden_4 , num_hidden_5, batch_size ):
self.batch_size = batch_size
self.test_batch_size = batch_size
# Hyper Parameters
self.num_hidden = num_hidden
self.num_hidden_2 = num_hidden_2
self.num_hidden_3 = num_hidden_3
self.num_hidden_4 = num_hidden_4
self.num_hidden_5 = num_hidden_5
# Graph Related
self.graph = tf.Graph()
self.tf_train_samples = None
self.tf_train_labels = None
self.tf_test_samples = None
self.tf_test_labels = None
self.tf_test_prediction = None
def define_graph(self):
with self.graph.as_default():
self.tf_train_samples = tf.placeholder(tf.float32, shape=(self.batch_size, 375*7))
self.tf_train_labels = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
self.tf_test_samples = tf.placeholder(tf.float32, shape=(self.test_batch_size, 375*7))
self.tf_test_labels = tf.placeholder(tf.float32, shape=(self.test_batch_size, 1))
# fully connected layer 1, fully connected
fc1_weights = tf.Variable(tf.truncated_normal([375*7, self.num_hidden], stddev=0.1))
fc1_biases = tf.Variable(tf.constant(0.0, shape=[self.num_hidden]))
# fully connected layer 2
fc2_weights = tf.Variable(tf.truncated_normal([self.num_hidden, self.num_hidden_2], stddev=0.1))
fc2_biases = tf.Variable(tf.constant(0.0, shape=[self.num_hidden_2]))
# fully connected layer 3
fc3_weights = tf.Variable(tf.truncated_normal([self.num_hidden_2, self.num_hidden_3], stddev=0.1))
fc3_biases = tf.Variable(tf.constant(0.0, shape=[self.num_hidden_3]))
# fully connected layer 4
fc4_weights = tf.Variable(tf.truncated_normal([self.num_hidden_3, self.num_hidden_4], stddev=0.1))
fc4_biases = tf.Variable(tf.constant(0.0, shape=[self.num_hidden_4]))
# fully connected layer 5
fc5_weights = tf.Variable(tf.truncated_normal([self.num_hidden_4, self.num_hidden_5], stddev=0.1))
fc5_biases = tf.Variable(tf.constant(0.0, shape=[self.num_hidden_5]))
# fully connected layer 6 --> output layer
fc6_weights = tf.Variable(tf.truncated_normal([self.num_hidden_5, 1], stddev=0.0))
fc6_biases = tf.Variable(tf.constant(0.0, shape=[1]))
def model(input_data):
hidden = tf.nn.relu(tf.matmul(input_data, fc1_weights) + fc1_biases)
hidden = tf.nn.dropout(hidden, 0.75)
# fully connected layer 2
hidden_2 = tf.nn.relu(tf.matmul(hidden, fc2_weights) + fc2_biases)
hidden_2 = tf.nn.dropout(hidden_2, 0.75
# fully connected layer 3
hidden_3 = tf.nn.relu(tf.matmul(hidden_2, fc3_weights) + fc3_biases)
hidden_3 = tf.nn.dropout(hidden_3, 0.75)
# fully connected layer 4
hidden_4 = tf.nn.relu(tf.matmul(hidden_3, fc4_weights) + fc4_biases)
hidden_4 = tf.nn.dropout(hidden_4, 0.75)
# fully connected layer 5
hidden_5 = tf.nn.relu(tf.matmul(hidden_4, fc5_weights) + fc5_biases)
return tf.matmul(hidden_5, fc6_weights) + fc6_biases
# Training computation.
self.logits = model(self.tf_train_samples)
self.loss = tf.reduce_mean(tf.squared_difference(self.logits, self.tf_train_labels))
# Optimizer.
#self.optimizer = tf.train.RMSPropOptimizer(0.00005).minimize(self.loss)
#self.optimizer = tf.train.AdadeltaOptimizer(0.00005).minimize(self.loss)
self.optimizer = tf.train.AdagradOptimizer(0.00001).minimize(self.loss)
#self.optimizer = tf.train.AdamOptimizer(0.00005).minimize(self.loss)
# Predictions for the training, validation, and test
self.test_prediction = model(self.tf_test_samples)
def run(self):
loger = ant_loger.Loger("/home/wcsmomo/workspace/tutorials/ant.log")
self.session = tf.Session(graph=self.graph)
with self.session as session:
session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
i = 0
count = 0
ck = fds.CreateKindleLines(self.batch_size, data_type='train')
test_ck = fds.CreateKindleLines(self.test_batch_size, data_type='test')
while i < ck.avg_k_count:
if i == ck.avg_k_count -1 :
i = 0
ck.k = 0
test_ck.k = 0
i +=1
count +=1
bat_x,bat_y = ck.fetch_batch_slope_continue_time()
bat_x = np.reshape(bat_x , [self.batch_size,2625])
bat_y = np.reshape(bat_y, [self.batch_size, 1])
logits = session.run(self.logits , feed_dict={self.tf_train_samples:bat_x, self.tf_train_labels:bat_y})
#print(logits)
_, los = session.run([self.optimizer, self.loss], feed_dict = {self.tf_train_samples:bat_x, self.tf_train_labels:bat_y})
if i % 10 == 0:
test_bat_x, test_bat_y = test_ck.fetch_batch_slope_continue_time()
test_bat_x = np.reshape(test_bat_x, [self.test_batch_size, 2625])
test_bat_y = np.reshape(test_bat_y, [self.test_batch_size, 1])
pred = session.run(self.test_prediction,
feed_dict={self.tf_test_samples:test_bat_x, self.tf_test_labels:test_bat_y})
#accuracy = self.accuracy(pred, test_bat_y, True);
loger.info("bat_y: "+str(bat_y))
loger.info("pred: "+str(pred))
print(bat_y)
print(pred)
print('Minibatch loss at step %d lost: %f' % (i, los))
#print('Minibatch accuracy: %.1f%%' % accuracy[0])
if count % 100 == 0 and self.is_save():
path = '/home/wcsmomo/workspace/MultLayerNet.ckpt'
save_path = saver.save(self.session, path)
print("save_path:" , save_path)
def accuracy(self, predictions, labels, need_confusion_matrix=False):
'''
@return: accuracy and confusionMatrix as a tuple
'''
_predictions = np.argmax(predictions, 1)
_labels = np.argmax(labels, 1)
cm = confusion_matrix(_labels, _predictions) if need_confusion_matrix else None
# == is overloaded for numpy array
accuracy = (100.0 * np.sum(_predictions == _labels) / predictions.shape[0])
return accuracy, cm
def is_save(self):
conn = dbu.Conn().get_conn()
sql = "select value from ba_site where key_name='IS_SAVE'
cur = conn.cursor()
cur.execute(sql)
results = cur.fetchall()
flg = results[0][0]
flg_bl = flg == 'Y'
return flg_bl
if __name__ == '__main__':
net = Network(num_hidden=3000 , num_hidden_2=1000,
num_hidden_3=500, num_hidden_4=200, num_hidden_5=50,
batch_size=100)
net.define_graph()
net.run()
3个回答
遇到了这个问题,我的loss值最开始是在比较大的值上一直无法收敛,查看网络权值梯度,最开始的梯度返回已经是e-3级别了,因此网络基本没调整。
目前知道的方法有:
初始化不对,查看每层网络的输入方差,如果方差越来越小,可以判定为初始化不对,而你的初始化也是采用的基本的高斯函数,有很大可能是初始化的问题;
采用BN(批量标准化),可以稍微降低初始化对网络的影响;
如果你有更好的办法,求告知。
SofaSofa数据科学社区DS面试题库 DS面经好长好长的code。我没有逐行看,但是有几个可能性:
1、你的预测输出标签,而不是概率。如果你的数据是非常倾斜,1:20什么的,基本上不管怎么预测,预测标签都会是0。
2、你的bias项一直都是0,tf.Variable(tf.constant(0.0, shape=[self.num_hidden]))。这也许是一个可能。
3、你的模型层数比较多,激活函数都是一样的。可以试试两层、三层的,效果如何。也可以换个激活函数试试。
我输出的确是值,不是概率。我处理的不是分类问题,我输出的值是连续的。这样的话,不能这么处理吗?
-
wcsmomo
2017-05-14 11:19
因为我输出的结果是在零的左右分布的,所以我把bias设置为0了,这样能从一个比较好的起点开始训练,如果不是0的话,loss会从很大的数慢慢下降。
-
wcsmomo
2017-05-14 11:25
不行的,bias是1,如果是0,模型会自动把bias的权重训练为0。如果你处理的不是分类问题,里面不该出现confusion matrix和logit function之类的代码。
-
batmanX
2017-05-14 12:54
我把网络变成了3层,并且降低了隐藏层的神经元个数,发现loss在随机跳动,似乎没有学到任何特征,应该是神经网络层数低了导致网络的学习能力不足。
-
wcsmomo
2017-05-14 14:00
sorry , def accuracy(self, predictions, labels, need_confusion_matrix=False) 这个函数是一个废弃的,没有被调用的函数。logits 也仅仅用于表示输层的输出结果而已。
-
wcsmomo
2017-05-14 14:05
bias换成1了吗?试了其他激活函数吗?你的激活函数全是relu,relu无法输出负数吧,你的真实值都是正数?
-
batmanX
2017-05-14 14:18
还有个情况就是,最开始我设置bias = 0.1 ,然后训练到最后所有的输出值也全是一个。
-
wcsmomo
2017-05-14 14:19
把bias换成1了,激活函数去掉了,直接输出y=w*x+b的值,还是一样的情况,我现在有点怀疑,是不是batch_size设置为100导致每次更新误差是按这次训练的误差均值去更新权重的。所以它就学习不到更细节的特征,导致它输出的结果都是接近的值。
-
wcsmomo
2017-05-14 22:15
您好,请问您的问题解决了么,我也遇到了类似的问题
-
lixixi
2020-05-06 19:49