Hvass-Labs / TensorFlow-Tutorials

TensorFlow Tutorials with YouTube Videos

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

Cannot feed value of shape (36, 784) for Tensor 'Reshape:0', which has shape '(?, 28, 28, 1)'

Ranjithavidyashankar opened this issue · comments

img_h = img_w = 28 # MNIST images are 28x28
img_size_flat = img_h * img_w # 28x28=784, the total number of pixels
n_classes = 10 # Number of classes, one class per digit
n_channels = 1
learning_rate = 0.0001

Define placeholders

x = tf.placeholder(tf.float32, shape=[None, img_h, img_w, n_channels], name='X')
y = tf.placeholder(tf.float32, shape=[None, n_classes], name='Y')
x = tf.reshape(x, shape=[-1, img_h, img_w, n_channels])

def conv(input_img, output_dim=64, batch_norm=False, name='conv'):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
lay_1 = tf.contrib.layers.conv2d(input_img, num_outputs = 32, kernel_size = (3,3), stride = 1)
lay_2 = tf.contrib.layers.max_pool2d(lay_1, kernel_size = (2,2), stride = 1)
lay_3 = tf.contrib.layers.conv2d(lay_2, num_outputs = 64, kernel_size = (3,3), stride = 1)
lay_4 = tf.contrib.layers.max_pool2d(lay_3, kernel_size = (2,2), stride = 1)
lay_5 = tf.contrib.layers.conv2d(lay_4, num_outputs = 128, kernel_size = (3,3), stride = 1)
lay_6 = tf.contrib.layers.max_pool2d(lay_5, kernel_size = (2,2), stride = 1)
lay_7 = tf.contrib.layers.flatten(lay_6)
return lay_7

def fc(input_img, num_unit=10, batch_norm=False, name='dense', activation_fn = tf.nn.relu):
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
lay_8 = tf.layers.dense(input_img, units = 1024, activation=tf.nn.relu)
lay_9 = tf.layers.dense(lay_8, units = 256, activation=tf.nn.relu)
lay_10 = tf.layers.dense(lay_9, units = 10, activation=tf.nn.sigmoid)
return lay_10

conv1 = conv(x)
output_last = fc(conv1)

with tf.variable_scope('Train'):
# Loss function
with tf.variable_scope('Loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output_last), name='loss')
tf.summary.scalar('loss', loss)
# Optimizer
with tf.variable_scope('Optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name='Adam_op').minimize(loss)
# Calculate accuracy
with tf.variable_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(output_last, 1), tf.argmax(y, 1), name='correct_pred')
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
tf.summary.scalar('accuracy', accuracy)

Initializer of variables

init = tf.global_variables_initializer()

Merge all summaries

merged = tf.summary.merge_all()

Ops. for saving checkpoints

ckpt = tf.train.get_checkpoint_state('./checkpoints/')
saver = tf.train.Saver()

Training Parameters

display_step = 4000
chk_save=4000
batch_size = 36
iteration = 40000
total_batch = int(mnist.train.num_examples/batch_size)

with tf.Session() as sess:
if ckpt:
saver.restore(sess, './checkpoints/-38000')
print("Successfully restored model")
else:
# initiate all variables
sess.run(init)

writer = tf.summary.FileWriter('./graphs', sess.graph)

for iter in range(iteration):
    #iterate over totl batch size
    for i in range(total_batch):
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        # Run optimization op (backprop) and summary value           
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        summary_op = sess.run(merged, feed_dict={x: batch_x, y: batch_y})
        writer.add_summary(summary_op, iter)
        
    # Display logs per epoch after each 50 steps
    if iter % display_step == 0:
       print("Iter %d: loss %f" % (iter+1, sess.run(loss, feed_dict={x: batch_x, y: batch_y})))

print("Optimization Finished!")

This doesn't look like the code from my tutorials. What is it?