practice/python/watermark.ipynb

2.1 MiB

In [1]:
import os, shutil
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
from keras.layers import Input, Dense, Conv2D, MaxPool2D, UpSampling2D, Dropout
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFile
from PIL import ImageFont
from PIL import ImageDraw, ImageOps
import string

Generate input images with watermarks

In [2]:
# list printable characters
printable = list(string.printable)[0:84]
In [3]:
def gen_watermark(img_str, train=1):
    # open the image as jpegs
    img = Image.open("/home/tensorflow/Downloads/VOCdevkit/RAWS/{}".format(img_str))
    
    # generate random font sizes 30 - 100
    np.random.seed()
    font_size = np.random.randint(low = 30, high = 100)
    
    # create the watermark font
    font = ImageFont.truetype("/home/tensorflow/Downloads/VOCdevkit/abel-regular.ttf", font_size)
    
    # generate image to hold the watermark
    img_temp = Image.new('L', (350,350))
    
    # create the watermark text
    np.random.seed()
    text_str = np.random.choice(printable, np.random.randint(low=5, high=10))
    text_str = "".join(text_str)
    
    # draw on the temporary image
    draw_temp = ImageDraw.Draw(img_temp)
    
    # generate random opacity level
    np.random.seed()
    opacity = np.random.randint(low=90, high=120)
    
    # insert text onto the temporary image
    draw_temp.text((0,0), text_str, font=font, fill=opacity)
    
    # generate random rotation angle
    np.random.seed()
    rot_int = np.random.randint(low=0, high=40)
    
    # rotate the text
    rotated_text = img_temp.rotate(rot_int, expand=1)
    
    # default text color = white
    col_1 = (255,255,255)
    col_2 = (255,255,255)
    
    # generate random location to put the text
    np.random.seed()
    rand_loc = tuple(np.random.randint(low=10,high=100,size=(2,)))
    
    # merge the temporary image
    img.paste(ImageOps.colorize(rotated_text, col_1, col_2), rand_loc, rotated_text)
    
    # save the watermarked image
    img.save("/home/tensorflow/Downloads/VOCdevkit/WATS/{}".format(img_str))
In [4]:
# Prepare the directory

wat_path = "/home/tensorflow/Downloads/VOCdevkit/WATS"
    
if not os.path.exists(wat_path):
    os.mkdir(wat_path)
else:
    shutil.rmtree(wat_path)
In [5]:
# Or else it will crash
ImageFile.LOAD_TRUNCATED_IMAGES = True
In [6]:
# Generate watermarked images

img_list = os.listdir("/home/tensorflow/Downloads/VOCdevkit/RAWS")

for _, img in enumerate(img_list):
    gen_watermark(img)
In [7]:
# Show some images

raw_str = "/home/tensorflow/Downloads/VOCdevkit/RAWS/"
wat_str = "/home/tensorflow/Downloads/VOCdevkit/WATS/"

dir_raw = os.listdir("/home/tensorflow/Downloads/VOCdevkit/RAWS")
dir_wat = os.listdir("/home/tensorflow/Downloads/VOCdevkit/WATS")

f, axs = plt.subplots(3,2, figsize=(25, 25), sharey=True, sharex = True)
axs = axs.ravel()

img_raw = Image.open(raw_str + dir_raw[4])
img_wat = Image.open(wat_str + dir_wat[4])
axs[0].imshow(img_raw)
axs[1].imshow(img_wat)

img_raw = Image.open(raw_str + dir_raw[5])
img_wat = Image.open(wat_str + dir_wat[5])
axs[2].imshow(img_raw)
axs[3].imshow(img_wat)

img_raw = Image.open(raw_str + dir_raw[10])
img_wat = Image.open(wat_str + dir_wat[10])
axs[4].imshow(img_raw)
axs[5].imshow(img_wat)

plt.show()

Network Code

Transform inputs and labels

In [8]:
train_wat_path = "/home/tensorflow/Downloads/VOCdevkit/train/wat_imgs"
train_raw_path = "/home/tensorflow/Downloads/VOCdevkit/train/raw_imgs"
val_wat_path = "/home/tensorflow/Downloads/VOCdevkit/val/wat_imgs"
val_raw_path = "/home/tensorflow/Downloads/VOCdevkit/val/raw_imgs"
In [9]:
# Seperate training and validation inputs/labels
    
if not os.path.exists(train_raw_path):
    os.mkdir(train_raw_path)
    
if not os.path.exists(val_raw_path):
    os.mkdir(val_raw_path)
    
if not os.path.exists(train_wat_path):
    os.mkdir(train_wat_path)
else:
    shutil.rmtree(train_wat_path)
    
if not os.path.exists(val_wat_path):
    os.mkdir(val_wat_path)
else:
    shutil.rmtree(val_wat_path)
In [10]:
# Copy raw images over

raw_list = os.listdir("/home/tensorflow/Downloads/VOCdevkit/RAWS")

for ind, img_str in enumerate(raw_list):
    img = Image.open("/home/tensorflow/Downloads/VOCdevkit/RAWS/{}".format(img_str))
    if ind < 4900:
        img.save("/home/tensorflow/Downloads/VOCdevkit/train/raw_imgs/{}".format(img_str))
    else:
        img.save("/home/tensorflow/Downloads/VOCdevkit/val/raw_imgs/{}".format(img_str))
In [11]:
# Copy watermarked images over

wat_list = os.listdir("/home/tensorflow/Downloads/VOCdevkit/WATS")

for ind, img_str in enumerate(wat_list):
    img = Image.open("/home/tensorflow/Downloads/VOCdevkit/WATS/{}".format(img_str))
    if ind < 4900:
        img.save("/home/tensorflow/Downloads/VOCdevkit/train/wat_imgs/{}".format(img_str))
    else:
        img.save("/home/tensorflow/Downloads/VOCdevkit/val/wat_imgs/{}".format(img_str))
In [12]:
# Standardize and normalize

x_train = np.ndarray(shape=(4900, 128, 128, 3),
                     dtype=np.float32)
y_train = np.ndarray(shape=(4900, 128, 128, 3),
                     dtype=np.float32)
x_val = np.ndarray(shape=(196, 128, 128, 3),
                     dtype=np.float32)
y_val = np.ndarray(shape=(196, 128, 128, 3),
                     dtype=np.float32)

i = 0
for file in os.listdir(train_wat_path):
    img = Image.open("/home/tensorflow/Downloads/VOCdevkit/train/wat_imgs/{}".format(file))
    img = np.array(img.resize((128,128))) / 255
    img = img.astype('float32')
    x_train[i] = img
    i += 1
    
i = 0
for file in os.listdir(train_raw_path):
    img = Image.open("/home/tensorflow/Downloads/VOCdevkit/train/raw_imgs/{}".format(file))
    img = np.array(img.resize((128,128))) / 255
    img = img.astype('float32')
    y_train[i] = img
    i += 1
    
i = 0
for file in os.listdir(val_wat_path):
    img = Image.open("/home/tensorflow/Downloads/VOCdevkit/val/wat_imgs/{}".format(file))
    img = np.array(img.resize((128,128))) / 255
    img = img.astype('float32')
    x_val[i] = img
    i += 1
    
i = 0
for file in os.listdir(val_raw_path):
    img = Image.open("/home/tensorflow/Downloads/VOCdevkit/val/raw_imgs/{}".format(file))
    img = np.array(img.resize((128,128))) / 255
    img = img.astype('float32')
    y_val[i] = img
    i += 1
In [13]:
# See images after transforming

fig, axs = plt.subplots(1, 2, figsize=(25, 25), sharey=True, sharex = True)

img_1 = x_train[0]
img_2 = y_train[0]

axs[0].imshow(img_1)
axs[1].imshow(img_2)
Out[13]:
<matplotlib.image.AxesImage at 0x7f0c99cd56d0>

Create model

In [14]:
class inpaintingModel:
  '''
  Build UNET like model for image inpaining task.
  '''
  def prepare_model(self, input_size=(128,128,3)):
    inputs = keras.layers.Input(input_size)

    conv1, pool1 = self.__ConvBlock(32, (3,3), (2,2), 'relu', 'same', inputs)
    pool1 = Dropout(0.25)(pool1)
    conv2, pool2 = self.__ConvBlock(64, (3,3), (2,2), 'relu', 'same', pool1)
    pool2 = Dropout(0.5)(pool2)
    conv3, pool3 = self.__ConvBlock(128, (3,3), (2,2), 'relu', 'same', pool2)
    pool3 = Dropout(0.5)(pool3)
    conv4, pool4 = self.__ConvBlock(256, (3,3), (2,2), 'relu', 'same', pool3)
    pool4 = Dropout(0.5)(pool4)
    
    conv5, up6 = self.__UpConvBlock(512, 256, (3,3), (2,2), (2,2), 'relu', 'same', pool4, conv4)
    up6 = Dropout(0.5)(up6)
    conv6, up7 = self.__UpConvBlock(256, 128, (3,3), (2,2), (2,2), 'relu', 'same', up6, conv3)
    up7 = Dropout(0.5)(up7)
    conv7, up8 = self.__UpConvBlock(128, 64, (3,3), (2,2), (2,2), 'relu', 'same', up7, conv2)
    up8= Dropout(0.5)(up8)
    conv8, up9 = self.__UpConvBlock(64, 32, (3,3), (2,2), (2,2), 'relu', 'same', up8, conv1)
    up9= Dropout(0.25)(up9)
    conv9 = self.__ConvBlock(32, (3,3), (2,2), 'relu', 'same', up9, False)
    
    outputs = keras.layers.Conv2D(3, (3, 3), activation='sigmoid', padding='same')(conv9)

    return keras.models.Model(inputs=[inputs], outputs=[outputs])  

  def __ConvBlock(self, filters, kernel_size, pool_size, activation, padding, connecting_layer, pool_layer=True):
    conv = keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, activation=activation, padding=padding)(connecting_layer)
    conv = keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, activation=activation, padding=padding)(conv)
    if pool_layer:
      pool = keras.layers.MaxPooling2D(pool_size)(conv)
      return conv, pool
    else:
      return conv

  def __UpConvBlock(self, filters, up_filters, kernel_size, up_kernel, up_stride, activation, padding, connecting_layer, shared_layer):
    conv = keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, activation=activation, padding=padding)(connecting_layer)
    conv = keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, activation=activation, padding=padding)(conv)
    up = keras.layers.Conv2DTranspose(filters=up_filters, kernel_size=up_kernel, strides=up_stride, padding=padding)(conv)
    up = keras.layers.concatenate([up, shared_layer], axis=3)

    return conv, up
In [15]:
model = inpaintingModel().prepare_model()
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 128, 128, 3) 0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 128, 128, 32) 896         input_1[0][0]                    
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 128, 128, 32) 9248        conv2d[0][0]                     
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 64, 64, 32)   0           conv2d_1[0][0]                   
__________________________________________________________________________________________________
dropout (Dropout)               (None, 64, 64, 32)   0           max_pooling2d[0][0]              
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 64, 64, 64)   18496       dropout[0][0]                    
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 64, 64, 64)   36928       conv2d_2[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 32, 32, 64)   0           conv2d_3[0][0]                   
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 32, 32, 64)   0           max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 32, 32, 128)  73856       dropout_1[0][0]                  
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 32, 32, 128)  147584      conv2d_4[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 16, 16, 128)  0           conv2d_5[0][0]                   
__________________________________________________________________________________________________
dropout_2 (Dropout)             (None, 16, 16, 128)  0           max_pooling2d_2[0][0]            
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 16, 16, 256)  295168      dropout_2[0][0]                  
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 16, 16, 256)  590080      conv2d_6[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 8, 8, 256)    0           conv2d_7[0][0]                   
__________________________________________________________________________________________________
dropout_3 (Dropout)             (None, 8, 8, 256)    0           max_pooling2d_3[0][0]            
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 8, 8, 512)    1180160     dropout_3[0][0]                  
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 8, 8, 512)    2359808     conv2d_8[0][0]                   
__________________________________________________________________________________________________
conv2d_transpose (Conv2DTranspo (None, 16, 16, 256)  524544      conv2d_9[0][0]                   
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 16, 16, 512)  0           conv2d_transpose[0][0]           
                                                                 conv2d_7[0][0]                   
__________________________________________________________________________________________________
dropout_4 (Dropout)             (None, 16, 16, 512)  0           concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 16, 16, 256)  1179904     dropout_4[0][0]                  
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 16, 16, 256)  590080      conv2d_10[0][0]                  
__________________________________________________________________________________________________
conv2d_transpose_1 (Conv2DTrans (None, 32, 32, 128)  131200      conv2d_11[0][0]                  
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 32, 32, 256)  0           conv2d_transpose_1[0][0]         
                                                                 conv2d_5[0][0]                   
__________________________________________________________________________________________________
dropout_5 (Dropout)             (None, 32, 32, 256)  0           concatenate_1[0][0]              
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 32, 32, 128)  295040      dropout_5[0][0]                  
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 32, 32, 128)  147584      conv2d_12[0][0]                  
__________________________________________________________________________________________________
conv2d_transpose_2 (Conv2DTrans (None, 64, 64, 64)   32832       conv2d_13[0][0]                  
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 64, 64, 128)  0           conv2d_transpose_2[0][0]         
                                                                 conv2d_3[0][0]                   
__________________________________________________________________________________________________
dropout_6 (Dropout)             (None, 64, 64, 128)  0           concatenate_2[0][0]              
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 64, 64, 64)   73792       dropout_6[0][0]                  
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 64, 64, 64)   36928       conv2d_14[0][0]                  
__________________________________________________________________________________________________
conv2d_transpose_3 (Conv2DTrans (None, 128, 128, 32) 8224        conv2d_15[0][0]                  
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 128, 128, 64) 0           conv2d_transpose_3[0][0]         
                                                                 conv2d_1[0][0]                   
__________________________________________________________________________________________________
dropout_7 (Dropout)             (None, 128, 128, 64) 0           concatenate_3[0][0]              
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 128, 128, 32) 18464       dropout_7[0][0]                  
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 128, 128, 32) 9248        conv2d_16[0][0]                  
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 128, 128, 3)  867         conv2d_17[0][0]                  
==================================================================================================
Total params: 7,760,931
Trainable params: 7,760,931
Non-trainable params: 0
__________________________________________________________________________________________________

Training

In [16]:
early_stopping = EarlyStopping(patience=10, verbose=1)
model_checkpoint = ModelCheckpoint("./keras.model", save_best_only=True, verbose=1)
reduce_lr = ReduceLROnPlateau(factor=0.1, patience=5, min_lr=0.00001, verbose=1)

history = model.fit(x_train, y_train,
                   validation_data=(x_val, y_val),
                   epochs=30,
                   batch_size=32,
                   callbacks=[early_stopping, model_checkpoint, reduce_lr])
Epoch 1/30
154/154 [==============================] - 132s 764ms/step - loss: 0.5882 - accuracy: 0.5445 - val_loss: 0.5275 - val_accuracy: 0.8164

Epoch 00001: val_loss improved from inf to 0.52747, saving model to ./keras.model
INFO:tensorflow:Assets written to: ./keras.model/assets
Epoch 2/30
154/154 [==============================] - 113s 735ms/step - loss: 0.5266 - accuracy: 0.7876 - val_loss: 0.5246 - val_accuracy: 0.8419

Epoch 00002: val_loss improved from 0.52747 to 0.52456, saving model to ./keras.model
INFO:tensorflow:Assets written to: ./keras.model/assets
Epoch 3/30
154/154 [==============================] - 114s 739ms/step - loss: 0.5215 - accuracy: 0.8043 - val_loss: 0.5226 - val_accuracy: 0.8693

Epoch 00003: val_loss improved from 0.52456 to 0.52256, saving model to ./keras.model
INFO:tensorflow:Assets written to: ./keras.model/assets
Epoch 4/30
154/154 [==============================] - 114s 738ms/step - loss: 0.5216 - accuracy: 0.8182 - val_loss: 0.5210 - val_accuracy: 0.8521

Epoch 00004: val_loss improved from 0.52256 to 0.52101, saving model to ./keras.model
INFO:tensorflow:Assets written to: ./keras.model/assets
Epoch 5/30
154/154 [==============================] - 114s 740ms/step - loss: 0.5205 - accuracy: 0.8236 - val_loss: 0.5200 - val_accuracy: 0.8616

Epoch 00005: val_loss improved from 0.52101 to 0.52001, saving model to ./keras.model
INFO:tensorflow:Assets written to: ./keras.model/assets
Epoch 6/30
154/154 [==============================] - 114s 742ms/step - loss: 0.5196 - accuracy: 0.8239 - val_loss: 0.5194 - val_accuracy: 0.8534

Epoch 00006: val_loss improved from 0.52001 to 0.51936, saving model to ./keras.model
INFO:tensorflow:Assets written to: ./keras.model/assets
Epoch 7/30
154/154 [==============================] - 114s 744ms/step - loss: 0.5193 - accuracy: 0.8306 - val_loss: 0.5199 - val_accuracy: 0.8710

Epoch 00007: val_loss did not improve from 0.51936
Epoch 8/30
154/154 [==============================] - 115s 747ms/step - loss: 0.5190 - accuracy: 0.8328 - val_loss: 0.5202 - val_accuracy: 0.8559

Epoch 00008: val_loss did not improve from 0.51936
Epoch 9/30
154/154 [==============================] - 115s 746ms/step - loss: 0.5192 - accuracy: 0.8367 - val_loss: 0.5195 - val_accuracy: 0.8664

Epoch 00009: val_loss did not improve from 0.51936
Epoch 10/30
154/154 [==============================] - 115s 748ms/step - loss: 0.5188 - accuracy: 0.8373 - val_loss: 0.5198 - val_accuracy: 0.8769

Epoch 00010: val_loss did not improve from 0.51936
Epoch 11/30
154/154 [==============================] - 115s 748ms/step - loss: 0.5163 - accuracy: 0.8358 - val_loss: 0.5190 - val_accuracy: 0.8400

Epoch 00011: val_loss improved from 0.51936 to 0.51905, saving model to ./keras.model
INFO:tensorflow:Assets written to: ./keras.model/assets
Epoch 12/30
154/154 [==============================] - 115s 744ms/step - loss: 0.5186 - accuracy: 0.8335 - val_loss: 0.5193 - val_accuracy: 0.8613

Epoch 00012: val_loss did not improve from 0.51905
Epoch 13/30
154/154 [==============================] - 115s 745ms/step - loss: 0.5177 - accuracy: 0.8381 - val_loss: 0.5185 - val_accuracy: 0.8720

Epoch 00013: val_loss improved from 0.51905 to 0.51849, saving model to ./keras.model
INFO:tensorflow:Assets written to: ./keras.model/assets
Epoch 14/30
154/154 [==============================] - 115s 747ms/step - loss: 0.5185 - accuracy: 0.8377 - val_loss: 0.5188 - val_accuracy: 0.8541

Epoch 00014: val_loss did not improve from 0.51849
Epoch 15/30
154/154 [==============================] - 115s 747ms/step - loss: 0.5168 - accuracy: 0.8358 - val_loss: 0.5215 - val_accuracy: 0.8176

Epoch 00015: val_loss did not improve from 0.51849
Epoch 16/30
154/154 [==============================] - 115s 748ms/step - loss: 0.5181 - accuracy: 0.8273 - val_loss: 0.5177 - val_accuracy: 0.8593

Epoch 00016: val_loss improved from 0.51849 to 0.51772, saving model to ./keras.model
INFO:tensorflow:Assets written to: ./keras.model/assets
Epoch 17/30
154/154 [==============================] - 116s 750ms/step - loss: 0.5159 - accuracy: 0.8350 - val_loss: 0.5185 - val_accuracy: 0.8332

Epoch 00017: val_loss did not improve from 0.51772
Epoch 18/30
154/154 [==============================] - 115s 747ms/step - loss: 0.5184 - accuracy: 0.8389 - val_loss: 0.5183 - val_accuracy: 0.8424

Epoch 00018: val_loss did not improve from 0.51772
Epoch 19/30
154/154 [==============================] - 115s 746ms/step - loss: 0.5178 - accuracy: 0.8434 - val_loss: 0.5183 - val_accuracy: 0.8688

Epoch 00019: val_loss did not improve from 0.51772
Epoch 20/30
154/154 [==============================] - 115s 747ms/step - loss: 0.5170 - accuracy: 0.8327 - val_loss: 0.5182 - val_accuracy: 0.8720

Epoch 00020: val_loss did not improve from 0.51772
Epoch 21/30
154/154 [==============================] - 115s 747ms/step - loss: 0.5168 - accuracy: 0.8462 - val_loss: 0.5183 - val_accuracy: 0.8682

Epoch 00021: val_loss did not improve from 0.51772

Epoch 00021: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.
Epoch 22/30
154/154 [==============================] - 115s 749ms/step - loss: 0.5172 - accuracy: 0.8465 - val_loss: 0.5179 - val_accuracy: 0.8762

Epoch 00022: val_loss did not improve from 0.51772
Epoch 23/30
154/154 [==============================] - 116s 750ms/step - loss: 0.5179 - accuracy: 0.8504 - val_loss: 0.5180 - val_accuracy: 0.8705

Epoch 00023: val_loss did not improve from 0.51772
Epoch 24/30
154/154 [==============================] - 115s 747ms/step - loss: 0.5171 - accuracy: 0.8483 - val_loss: 0.5179 - val_accuracy: 0.8772

Epoch 00024: val_loss did not improve from 0.51772
Epoch 25/30
154/154 [==============================] - 115s 748ms/step - loss: 0.5169 - accuracy: 0.8478 - val_loss: 0.5181 - val_accuracy: 0.8749

Epoch 00025: val_loss did not improve from 0.51772
Epoch 26/30
154/154 [==============================] - 115s 748ms/step - loss: 0.5177 - accuracy: 0.8507 - val_loss: 0.5179 - val_accuracy: 0.8740

Epoch 00026: val_loss did not improve from 0.51772

Epoch 00026: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.
Epoch 00026: early stopping
In [17]:
# Visualize the training process

fig, (ax_loss, ax_acc) = plt.subplots(1, 2, figsize=(15,5))
ax_loss.plot(history.epoch, history.history["loss"], label="Train loss")
ax_loss.plot(history.epoch, history.history["val_loss"], label="Validation loss")
ax_acc.plot(history.epoch, history.history["accuracy"], label="Train accuracy")
ax_acc.plot(history.epoch, history.history["val_accuracy"], label="Validation accuracy")
Out[17]:
[<matplotlib.lines.Line2D at 0x7f0d0cb4ef70>]

Test

In [ ]:
model = load_model("./keras.model")
In [18]:
# Generate watermark (again)

def watermark(img_str):
    img = Image.open("/home/tensorflow/Downloads/VOCdevkit/TEST/RAW/{}".format(img_str))
    img = img.resize((512,round(img.size[1]*512/img.size[0])))
    
    np.random.seed()
    font_size = np.random.randint(low = 30, high = 100)
    
    font = ImageFont.truetype("/home/tensorflow/Downloads/VOCdevkit/abel-regular.ttf", font_size)
    
    img_temp = Image.new('L', (350,350))
    
    np.random.seed()
    text_str = np.random.choice(printable, np.random.randint(low=5, high=10))
    text_str = "".join(text_str)
    
    draw_temp = ImageDraw.Draw(img_temp)
    
    np.random.seed()
    opacity = np.random.randint(low=90, high=120)
    
    # insert text onto the temporary image
    draw_temp.text((0,0), text_str, font=font, fill=opacity)
    
    np.random.seed()
    rot_int = np.random.randint(low=0, high=30)
    
    rotated_text = img_temp.rotate(rot_int, expand=1)
    
    col_1 = (255,255,255)
    col_2 = (255,255,255)
    
    np.random.seed()
    rand_loc = tuple(np.random.randint(low=10,high=100,size=(2,)))
    
    img.paste(ImageOps.colorize(rotated_text, col_1, col_2), rand_loc, rotated_text)
    
    img.save("/home/tensorflow/Downloads/VOCdevkit/TEST/WAT/{}".format(img_str))
    return img
In [27]:
fig, axs = plt.subplots(1, 3, figsize=(25, 25), sharey=True, sharex = True)

# The watermarked image
wat_img = watermark("1.jpg")
wat_img = np.array(wat_img.resize((128,128))) / 255
wat_img = wat_img.astype('float32')

# The predicted image
rec_img = model.predict(wat_img.reshape((1,)+wat_img.shape))
rec_img = rec_img.reshape(rec_img.shape[1:])

# The original image (reshaped)
tr_img = Image.open("/home/tensorflow/Downloads/VOCdevkit/TEST/RAW/1.jpg")
tr_img = np.array(tr_img.resize((128,128))) / 255
tr_img = tr_img.astype('float32')

axs[0].imshow(rec_img)
axs[1].imshow(wat_img)
axs[2].imshow(tr_img)
Out[27]:
<matplotlib.image.AxesImage at 0x7f0d0c947ee0>