InvalidArgumentError : You must feed a value for placeholder tensor

问题

在运行过程中, 抛出异常:

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'input/y-input' with dtype float and shape [?,10]

原始代码如下


import tensorflow as tf from tensorflow.contrib.layers import l2_regularizer from tensorflow.examples.tutorials.mnist import input_data REGULARIZATION_RATE = 0.0001 TRAINING_STEPS = 30000 MOVING_AVERAGE_DECAY = 0.99 BATCH_SIZE = 100 LEARNING_RATE_BASE = 0.8 LEARNING_RATE_DECAY = 0.99 INPUT_NODE = 28 * 28 OUTPUT_NODE = 10 LAYER1_NODE = 500 def get_weight_variable(shape, regularizer): weights = tf.get_variable('weights', shape=shape, initializer=tf.truncated_normal_initializer(stddev=0.1)) if regularizer is not None: tf.add_to_collection('losses', regularizer(weights)) return weights # 定义神经网络的前向传播过程. def inference(input_tensor, regularizer=None): # 声明第一层神经网络并前向传播. with tf.variable_scope('layer1'): weights = get_weight_variable(shape=[INPUT_NODE, LAYER1_NODE], regularizer=regularizer) biases = tf.get_variable(name='biases', shape=[LAYER1_NODE], initializer=tf.constant_initializer(value=0.0)) layer1 = tf.nn.relu(tf.matmul(a=input_tensor, b=weights) + biases) # 声明第二层神经网络并前向传播. with tf.variable_scope('layer2'): weights = get_weight_variable(shape=[LAYER1_NODE, OUTPUT_NODE], regularizer=regularizer) biases = tf.get_variable(name='biases', shape=[OUTPUT_NODE], initializer=tf.constant_initializer(value=0.0)) layer2 = tf.nn.relu(tf.matmul(a=layer1, b=weights) + biases) # 返回前向传播的结果. return layer2 def train(mnist): # 将处理输入数据的计算放在命名空间'input'内. with tf.name_scope('input'): x = tf.placeholder(dtype=tf.float32, shape=(None,INPUT_NODE), name='x-input') y_ = tf.placeholder(dtype=tf.float32, shape=(None,OUTPUT_NODE), name='y-input') regularizer = l2_regularizer(REGULARIZATION_RATE) y = inference(x, regularizer) global_step = tf.Variable(initial_value=0, trainable=False) # 将处理滑动平均相关的计算都放在命名空间'moving_average'内. with tf.name_scope('moving_average'): variable_averages = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step) variables_average_op = variable_averages.apply(tf.trainable_variables()) # 将计算损失函数的计算都放在命名空间'loss_function'内. with tf.name_scope('loss_function'): cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) cross_entropy_mean = tf.reduce_mean(cross_entropy) loss = cross_entropy_mean + tf.add_n(inputs=tf.get_collection('losses')) # 将定义学习率, 优化器以及每一轮训练需要执行的操作都放在命名空间'train_step'内. with tf.name_scope('train_step'): learning_rate = tf.train.exponential_decay(learning_rate=LEARNING_RATE_BASE, global_step=global_step, decay_steps=mnist.train.num_examples / BATCH_SIZE, decay_rate=LEARNING_RATE_DECAY, staircase=True) train_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss=loss, global_step=global_step) with tf.control_dependencies(control_inputs=[train_step, variables_average_op]): train_op = tf.no_op(name='train') saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(TRAINING_STEPS): xs, ys = mnist.train.next_batch(BATCH_SIZE) _, loss_value, step = sess.run(fetches=[train_op, loss, global_step], feed_dict={x: xs, y: ys}) if i % 1000 == 0: print('After %d training steps, loss on training batch is %d.' % (step, loss_value)) # 将当前计算图输出到TensorBoard日志文件. writer = tf.summary.FileWriter(logdir='./log/', graph=tf.get_default_graph()) writer.close() def main(argv=None): mnist = input_data.read_data_sets('./mnist_data/', one_hot=True) train(mnist) if __name__ == '__main__': tf.app.run()

解决

经过排查, 发现错误行:

_, loss_value, step = sess.run(fetches=[train_op, loss, global_step], feed_dict={x: xs, y: ys})

其中, feed_dict中应该是将ys传给y_占位符, 这里误写成了y, 导致报出占位符无输入的错误. 修改之后正常运行.

发表评论

此站点使用Akismet来减少垃圾评论。了解我们如何处理您的评论数据