Fully Convolutional Networks for Segmentation
Table of Contents
To obtain a segmentation map (output), segmentation networks usually have 2 parts
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from google.colab import drive
drive.mount('/content/drive')
seg_train_imgs = np.load('/content/drive/MyDrive/DL_Colab/DL_data/seg_train_imgs.npy')
seg_train_labels = np.load('/content/drive/MyDrive/DL_Colab/DL_data/seg_train_labels.npy')
seg_test_imgs = np.load('/content/drive/MyDrive/DL_Colab/DL_data/seg_test_imgs.npy')
n_train = seg_train_imgs.shape[0]
n_test = seg_train_imgs.shape[0]
print ("The number of training images : {}, shape : {}".format(n_train, seg_train_imgs.shape))
print ("The number of segmented images : {}, shape : {}".format(n_train, seg_train_labels.shape))
print ("The number of testing images : {}, shape : {}".format(n_test, seg_test_imgs.shape))
## binary segmentation and one-hot encoding in this case
idx = np.random.randint(n_train)
plt.figure(figsize = (10, 4))
plt.subplot(1,3,1)
plt.imshow(seg_train_imgs[idx])
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(seg_train_labels[idx][:,:,0])
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(seg_train_labels[idx][:,:,1])
plt.axis('off')
plt.show()
model_type = tf.keras.applications.vgg16
base_model = model_type.VGG16()
base_model.trainable = False
base_model.summary()
map5 = base_model.layers[-5].output
# sixth convolution layer
conv6 = tf.keras.layers.Conv2D(filters = 4096,
kernel_size = (7,7),
padding = 'SAME',
activation = 'relu')(map5)
# 1x1 convolution layers
fcn4 = tf.keras.layers.Conv2D(filters = 4096,
kernel_size = (1,1),
padding = 'SAME',
activation = 'relu')(conv6)
fcn3 = tf.keras.layers.Conv2D(filters = 2,
kernel_size = (1,1),
padding = 'SAME',
activation = 'relu')(fcn4)
# Upsampling layers
fcn2 = tf.keras.layers.Conv2DTranspose(filters = 512,
kernel_size = (4,4),
strides = (2,2),
padding = 'SAME')(fcn3)
fcn1 = tf.keras.layers.Conv2DTranspose(filters = 256,
kernel_size = (4,4),
strides = (2,2),
padding = 'SAME')(fcn2 + base_model.layers[14].output)
output = tf.keras.layers.Conv2DTranspose(filters = 2,
kernel_size = (16,16),
strides = (8,8),
padding = 'SAME',
activation = 'softmax')(fcn1 + base_model.layers[10].output)
model = tf.keras.Model(inputs = base_model.inputs, outputs = output)
model.summary()
model.compile(optimizer = 'adam',
loss = 'categorical_crossentropy',
metrics = 'accuracy')
model.fit(seg_train_imgs, seg_train_labels, batch_size = 5, epochs = 5)
test_img = seg_test_imgs[[1]]
test_segmented = model.predict(test_img)
seg_mask = (test_segmented[:,:,:,1] > 0.5).reshape(224, 224, 1).astype(float)
plt.figure(figsize = (8,8))
plt.subplot(2,2,1)
plt.imshow(test_img[0])
plt.axis('off')
plt.subplot(2,2,2)
plt.imshow(seg_mask, cmap = 'Blues')
plt.axis('off')
plt.subplot(2,2,3)
plt.imshow(test_img[0])
plt.imshow(seg_mask, cmap = 'Blues', alpha = 0.5)
plt.axis('off')
plt.show()
%%javascript
$.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')