How to Save TensorFlow model using estimator.export_savemodel()

How can i Save the TensorFlow model using estimator.export_savedmode() ?

Especially, what should i put inside the serving_input_receiver_fn()?

I have created a Custom Estimator based on VGGNet Architecture, i am using my own images and doing some transformation (you can see them in _parse_function()) on the images.

I have read the documentation here, but i am exactly not sure what to write for my code (please see below). Ultimately i want to save the model and use TensorFlow Serving.

    from __future__ import absolute_import
    from __future__ import division
    from __future__ import print_function

    import tensorflow as tf
    import numpy as np
    from sklearn.model_selection import train_test_split
    import os
    import matplotlib.pyplot as plt
    import matplotlib.image as mpimg
    import scipy
    from scipy import ndimage
    import scipy.misc

    tf.logging.set_verbosity(tf.logging.INFO)

    def cnn_model_fn(features, labels, mode):
        """Model function for CNN."""
        # Input Layer
        input_layer = tf.reshape(features, [-1, 224, 224, 3])

    # Convolutional Layer #1
    conv1 = tf.layers.conv2d(
      inputs=input_layer,
      filters=64,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)
    # Convolutional Layer #2
    conv2 = tf.layers.conv2d(
      inputs=conv1,
      filters=64,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)
    # Pooling Layer #1
    pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    # Convolutional Layer #3
    conv3 = tf.layers.conv2d(
      inputs=pool1,
      filters=128,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #4
    conv4 = tf.layers.conv2d(
      inputs=conv3,
      filters=128,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)
    pool2 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)

    # Convolutional Layer #5
    conv5 = tf.layers.conv2d(
      inputs=pool2,
      filters=256,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #6
    conv6 = tf.layers.conv2d(
      inputs=conv5,
      filters=256,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #7
    conv7 = tf.layers.conv2d(
      inputs=conv6,
      filters=256,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    pool3 = tf.layers.max_pooling2d(inputs=conv7, pool_size=[2, 2], strides=2)

    # Convolutional Layer #8
    conv8 = tf.layers.conv2d(
      inputs=pool3,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #9
    conv9 = tf.layers.conv2d(
      inputs=conv8,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #10
    conv10 = tf.layers.conv2d(
      inputs=conv9,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)
    pool4 = tf.layers.max_pooling2d(inputs=conv10, pool_size=[2, 2], strides=2)

    # Convolutional Layer #11
    conv11 = tf.layers.conv2d(
      inputs=pool4,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #12
    conv12 = tf.layers.conv2d(
      inputs=conv11,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #12
    conv13 = tf.layers.conv2d(
      inputs=conv12,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    pool5 = tf.layers.max_pooling2d(inputs=conv13, pool_size=[2, 2], strides=2)
    # Dense Layer
    pool5_flat = tf.reshape(pool5, [-1, 7 * 7 * 512])
    dense1 = tf.layers.dense(inputs=pool5_flat, units=4096, activation=tf.nn.relu)
    dense2 = tf.layers.dense(inputs=dense1, units=4096, activation=tf.nn.relu)
    dense3 = tf.layers.dense(inputs=dense2, units=1024, activation=tf.nn.relu)

    dropout = tf.layers.dropout(
      inputs=dense3, rate=0.001, training=mode == tf.estimator.ModeKeys.TRAIN)

    logits1 = tf.layers.dense(inputs=dropout, units=2)
    logits2 = tf.layers.dense(inputs=dropout, units=4)

    predictions = {
        "classes1": tf.argmax(input=logits1, axis=1),
        "classes2": tf.argmax(input=logits2, axis=1),
        "probabilities1": tf.nn.softmax(logits1, name="softmax_tensor_1"),
        "probabilities2": tf.nn.softmax(logits2, name="softmax_tensor_2")
    }

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    # Calculate Loss (for both TRAIN and EVAL modes)
    loss1 = tf.losses.sparse_softmax_cross_entropy(labels=labels[:,0], logits=logits1)
    loss2 = tf.losses.sparse_softmax_cross_entropy(labels=labels[:,1], logits=logits2)
    loss = loss1 + loss2

    # Configure the Training Op (for TRAIN mode)
    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
        train_op = optimizer.minimize(
            loss=loss,
            global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)

    # Add evaluation metrics (for EVAL mode)
    eval_metric_ops = {
        "accuracy1": tf.metrics.accuracy(
            labels=labels[:,0], predictions=predictions["classes1"]),
        "accuracy2": tf.metrics.accuracy(
            labels=labels[:,1], predictions=predictions["classes2"]),
        "precision1": tf.metrics.precision(labels=labels[:,0], predictions=predictions["classes1"]),
        "precision2": tf.metrics.precision(labels=labels[:,1], predictions=predictions["classes2"]),
        "recall1": tf.metrics.recall(labels=labels[:,0], predictions=predictions["classes1"]),
        "recall2": tf.metrics.recall(labels=labels[:,1], predictions=predictions["classes2"])
    }

    return tf.estimator.EstimatorSpec(
      mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)

    def _parse_function(filename, label):
        image_string = tf.read_file(filename)
        image_decoded = tf.image.decode_image(image_string)
        image_typecasted = tf.cast(image_decoded, tf.float32)
        image_reshaped = tf.reshape(image_typecasted, [-1, 224, 224, 3])
        return image_reshaped, label

    def _parse_function(filename):
        image_string = tf.read_file(filename)
        image_decoded = tf.image.decode_image(image_string)
        image_typecasted = tf.cast(image_decoded, tf.float32)
        image_reshaped = tf.reshape(image_typecasted, [-1, 224, 224, 3])
        return image_reshaped

    def stratified_train_test_split_():
        filenamelist = []
        labelslist = []
        DIRECTORY = 'path_to'

    for filename in os.listdir(DIRECTORY):
        fullfilename = DIRECTORY + filename
        if filename.endswith('.back.0.jpg'):
            #back image, original orientation
            filenamelist.append(fullfilename)
            temp = [0,0]
            labelslist.append(temp)

        elif filename.endswith('.back.90.jpg'):
            #back image, rotated clockwise 90
            filenamelist.append(fullfilename)
            temp = [0,1]
            labelslist.append(temp)

        elif filename.endswith('.back.180.jpg'):
            #back image, rotated clockwise 180
            filenamelist.append(fullfilename)
            temp = [0,2]
            labelslist.append(temp)

        elif filename.endswith('.back.270.jpg'):
            #back image, rotated clockwise 270
            filenamelist.append(fullfilename)
            temp = [0,3]
            labelslist.append(temp)

        elif filename.endswith('.front.0.jpg'):
            #front image, rotated clockwise 0
            filenamelist.append(fullfilename)
            temp = [1,0]
            labelslist.append(temp)

        elif filename.endswith('.front.90.jpg'):
            #front image, rotated clockwise 90
            filenamelist.append(fullfilename)
            temp = [1,1]
            labelslist.append(temp)

        elif filename.endswith('.front.180.jpg'):
            #front image, rotated clockwise 180
            filenamelist.append(fullfilename)
            temp = [1,2]
            labelslist.append(temp)

        elif filename.endswith('.front.270.jpg'):
            #front image, rotated clockwise 270
            filenamelist.append(fullfilename)
            temp = [1,3]
            labelslist.append(temp)

    X_train, X_test, y_train, y_test = train_test_split(filenamelist, labelslist, test_size=0.20, random_state=42, shuffle=True, stratify=labelslist)
    return X_train, X_test, y_train, y_test


def my_input_fn_train(X_train, y_train):
    filenames = tf.constant(X_train)
    labels = tf.constant(y_train)
    dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
    dataset = dataset.map(_parse_function)
    # # Shuffle, repeat, and batch the examples.
    dataset = dataset.shuffle(5000).repeat().batch(64)
    # # Build the Iterator, and return the read end of the pipeline.
    return dataset.make_one_shot_iterator().get_next()

def my_input_fn_test(X_test, y_test):
    filenames = tf.constant(X_test)
    labels = tf.constant(y_test)
    dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
    dataset = dataset.map(_parse_function)
    # # Shuffle, repeat, and batch the examples.
    dataset = dataset.shuffle(5000).repeat(1).batch(64)        
    # # Build the Iterator, and return the read end of the pipeline.
    return dataset.make_one_shot_iterator().get_next()

def my_input_fn_predict(filename):    
    filenames = tf.constant(filename)
    dataset = tf.data.Dataset.from_tensors((filenames))
    dataset = dataset.map(_parse_function)
    return dataset.make_one_shot_iterator().get_next()

def main(unused_argv):

    # Create the Estimator
    mnist_classifier = tf.estimator.Estimator(
    model_fn=cnn_model_fn, 
    model_dir="path_to_model_directory",
    config = tf.estimator.RunConfig( save_checkpoints_steps=None, save_checkpoints_secs=600, save_summary_steps=5))
    # Set up logging for predictions
    tensors_to_log_1 = {"probabilities1": "softmax_tensor_1"}
    tensors_to_log_2 = {"probabilities2": "softmax_tensor_2"}
    logging_hook_1 = tf.train.LoggingTensorHook(
        tensors=tensors_to_log_1, every_n_iter=100)
    logging_hook_2 = tf.train.LoggingTensorHook(
        tensors=tensors_to_log_2, every_n_iter=100)

    #Splitting the train test split seperately
    X_train, X_test, y_train, y_test = stratified_train_test_split_()

    #Removed the training, testing and prediction calls.

    #Code for exporting the models using 
    def serving_input_receiver_fn():
      #????

    mnist_classifier.export_savedmodel(export_dir_base, serving_input_fn)

if __name__ == "__main__":
  tf.app.run()

Topic estimators tensorflow deep-learning machine-learning

Category Data Science

About

Geeks Mental is a community that publishes articles and tutorials about Web, Android, Data Science, new techniques and Linux security.