I'm try to test my CNN by showing the output to an image.
The source code is from https://xueyangfu.github.io/projects/tip2017.htmlI run the origin code is useful.And then I change some structure of CNN. I add some layers and change some parameters.
The training is right and I use some image to test the model.I try to print the output ndarray and it contains correct values.However, the image show by cv2 or plt are all same dark color in every pixels. The shape and type of output ndarray and input ndarray are all the same of (height, width, 3), but the values of output ndarray can't write into the image.I read the pure color image that save by the output ndarray. The shape of it becomes (height, width, 4).
It add the value of 1. in each array, and the value of front three are different from my output ndarray.I waste about a week in this strange problem. Please give me some advice about how to solve it or what the problem is.Thank you for helping.
Below is the testing code:
import osimport training_0116 as DerainNetimport tensorflow as tfimport matplotlib.pyplot as pltimport matplotlib.image as imgimport numpy as npimport cv2##################### Select GPU device ####################################os.environ['CUDA_VISIBLE_DEVICES'] = "0"file = "h.jpg"ori = img.Below is the part of training code:imread(file)
ori = ori/255.0detail = oridetails = np.zeros([ori.shape[0], ori.shape[1], ori.shape[2]])for j in range(3): tmp = detail[:,:,j] details[:,:,j] = np.pad(tmp, 0, 'symmetric')
details = np.expand_dims(details[:,:,:], axis = 0)image = tf.placeholder(tf.float32, shape=(1, details.shape[1], details.shape[2], details.shape[3]))
out = DerainNet.inference(image)saver = tf.train.Saver()
config = tf.ConfigProto()config.gpu_options.per_process_gpu_memory_fraction = 0.5
config.gpu_options.allow_growth = Truewith tf.Session(config=config) as sess:if tf.train.get_checkpoint_state('./model/'):
ckpt = tf.train.latest_checkpoint('./model/') saver.restore(sess, ckpt)
print ("load new model")else: saver.restore(sess, "./model/test-model/model") # this model uses 128 feature maps and for debug only print ("load pre-trained model") detail_out = sess.run(out, feed_dict={image:details}) derained = detail_out[0,:, :, :] derained[np.where(derained < 0. )] = 0. derained[np.where(derained > 1. )] = 1.img.imsave("h_11.jpg",derained) plt.subplot(1,2,1) plt.imshow(ori) plt.title('input')plt.subplot(1,2,2) plt.imshow(derained) plt.title('output')plt.show()
def inference(images):# conv1 with tf.variable_scope('conv_1'): kernel = tf.Variable(tf.random_normal([16, 16, FLAGS.num_channels, FLAGS.num_feature], dtype=tf.float32, stddev=1e-3), trainable=True, name='weights1') biases = tf.Variable(tf.constant(0.0, shape=[FLAGS.num_feature], dtype=tf.float32), trainable=True, name='biases1')conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') bias = tf.nn.bias_add(conv, biases)conv1 = tf.nn.tanh(bias) pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1], strides=[1,1,1,1],padding='SAME')# conv2 with tf.variable_scope('conv_2'): kernel = tf.Variable(tf.random_normal([1, 1, FLAGS.num_feature, FLAGS.num_feature], dtype=tf.float32, stddev=1e-3), trainable=True, name='weights2') biases = tf.Variable(tf.constant(0.0, shape=[FLAGS.num_feature], dtype=tf.float32), trainable=True, name='biases2')conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME') bias = tf.nn.bias_add(conv, biases)conv2 = tf.nn.tanh(bias) pool2 = tf.nn.max_pool(conv2, ksize=[1,3,3,1], strides=[1,1,1,1],padding='SAME') with tf.variable_scope('conv_3'): kernel = tf.Variable(tf.random_normal([4, 4, FLAGS.num_feature, FLAGS.num_feature], dtype=tf.float32, stddev=1e-3), trainable=True, name='weights2') biases = tf.Variable(tf.constant(0.0, shape=[FLAGS.num_feature], dtype=tf.float32), trainable=True, name='biases2')conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME') bias = tf.nn.bias_add(conv, biases)conv3 = tf.nn.tanh(bias) pool3 = tf.nn.max_pool(conv3, ksize=[1,3,3,1], strides=[1,1,1,1],padding='SAME') with tf.variable_scope('conv_4'): kernel = tf.Variable(tf.random_normal([6, 6, FLAGS.num_feature, FLAGS.num_feature], dtype=tf.float32, stddev=1e-3), trainable=True, name='weights2') biases = tf.Variable(tf.constant(0.0, shape=[FLAGS.num_feature], dtype=tf.float32), trainable=True, name='biases2')conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME') bias = tf.nn.bias_add(conv, biases)conv4 = tf.nn.tanh(bias) pool4 = tf.nn.max_pool(conv4, ksize=[1,3,3,1], strides=[1,1,1,1],padding='SAME') with tf.variable_scope('conv_6'): kernel = tf.Variable(tf.random_normal([8, 8, FLAGS.num_channels,FLAGS.num_feature], dtype=tf.float32, stddev=1e-3), trainable=True, name='weights3') biases = tf.Variable(tf.constant(0.0, shape=[FLAGS.num_channels], dtype=tf.float32), trainable=True, name='biases3')conv = tf.nn.conv2d_transpose(conv4,kernel,[tf.shape(images)[0], tf.shape(images)[1], tf.shape(images)[2], FLAGS.num_channels],[1, 1, 1, 1],padding='SAME') conv5 = tf.nn.bias_add(conv, biases) out = tf.nn.max_pool(conv5, ksize=[1,3,3,1], strides=[1,1,1,1],padding='SAME')return out