No gradients provided for any variable, when using Lambda to round model output

I have a problem where I need to predict some integers from an image. The problem is that this includes some negative integers too. I have done some reasearch and came accross Poisson which does count regression, however this does not work due to me also needing to predict some negative integers too, resulting in Poisson output nan as its loss. I was thinking of using Lambda to round the output of my model however this resulted in this error:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
/var/folders/nc/c4mgwn897qbg8g52tp3mhbjr0000gp/T/ipykernel_8618/1788039059.py in module
---- 1 model.fit(x_train, y_train,callbacks=[callback], epochs = 999)

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
   1181                 _r=1):
   1182               callbacks.on_train_batch_begin(step)
- 1183               tmp_logs = self.train_function(iterator)
   1184               if data_handler.should_sync:
   1185                 context.async_wait()

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
    887 
    888       with OptionalXlaContext(self._jit_compile):
-- 889         result = self._call(*args, **kwds)
    890 
    891       new_tracing_count = self.experimental_get_tracing_count()

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
    931       # This is the first call of __call__, so we have to initialize.
    932       initializers = []
-- 933       self._initialize(args, kwds, add_initializers_to=initializers)
    934     finally:
    935       # At this point we know that the initialization is complete (or less

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
    761     self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
    762     self._concrete_stateful_fn = (
-- 763         self._stateful_fn._get_concrete_function_internal_garbage_collected(  # pylint: disable=protected-access
    764             *args, **kwds))
    765 

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
   3048       args, kwargs = None, None
   3049     with self._lock:
- 3050       graph_function, _ = self._maybe_define_function(args, kwargs)
   3051     return graph_function
   3052 

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
   3442 
   3443           self._function_cache.missed.add(call_context_key)
- 3444           graph_function = self._create_graph_function(args, kwargs)
   3445           self._function_cache.primary[cache_key] = graph_function
   3446 

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
   3277     arg_names = base_arg_names + missing_arg_names
   3278     graph_function = ConcreteFunction(
- 3279         func_graph_module.func_graph_from_py_func(
   3280             self._name,
   3281             self._python_function,

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
    997         _, original_func = tf_decorator.unwrap(python_func)
    998 
-- 999       func_outputs = python_func(*func_args, **func_kwargs)
   1000 
   1001       # invariant: `func_outputs` contains only Tensors, CompositeTensors,

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
    670         # the function a weak reference to itself to avoid a reference cycle.
    671         with OptionalXlaContext(compile_with_xla):
-- 672           out = weak_wrapped_fn().__wrapped__(*args, **kwds)
    673         return out
    674 

~/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
    984           except Exception as e:  # pylint:disable=broad-except
    985             if hasattr(e, ag_error_metadata):
-- 986               raise e.ag_error_metadata.to_exception(e)
    987             else:
    988               raise

ValueError: in user code:

    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:855 train_function  *
        return step_function(self, iterator)
    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:845 step_function  **
        outputs = model.distribute_strategy.run(run_step, args=(data,))
    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:1285 run
        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2833 call_for_each_replica
        return self._call_for_each_replica(fn, args, kwargs)
    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:3608 _call_for_each_replica
        return fn(*args, **kwargs)
    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:838 run_step  **
        outputs = model.train_step(data)
    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:799 train_step
        self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:530 minimize
        return self.apply_gradients(grads_and_vars, name=name)
    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:630 apply_gradients
        grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
    /Users/jr123456jr987654321/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/utils.py:75 filter_empty_gradients
        raise ValueError(No gradients provided for any variable: %s. %

    ValueError: No gradients provided for any variable: ['conv2d_2/kernel:0', 'conv2d_2/bias:0', 'conv2d_3/kernel:0', 'conv2d_3/bias:0', 'dense_3/kernel:0', 'dense_3/bias:0', 'dense_4/kernel:0', 'dense_4/bias:0', 'dense_5/kernel:0', 'dense_5/bias:0'].

Here is my implimentation of the Lambda layer thing:

filter_size = (3,3)
filters = 32
pool = 2

input_layer = keras.Input(shape=(100,300,1))

conv_extractor = layers.Conv2D(filters,filter_size, activation='relu')(input_layer)
conv_extractor = layers.MaxPooling2D(pool_size=(pool, pool))(conv_extractor)
conv_extractor = layers.Conv2D(filters,filter_size, activation='relu')(conv_extractor)
conv_extractor = layers.MaxPooling2D(pool_size=(pool, pool))(conv_extractor)

#conv_extractor = layers.Reshape(target_shape=(100 // (pool ** 2), (100 // (pool ** 2)) * filters))(conv_extractor)
shape = ((100 // 4), (300 // 4) * 32)
#conv_extractor = layers.Dense(512, activation='relu')(conv_extractor)
conv_extractor = layers.Reshape(target_shape=(23,2336))(conv_extractor)

gru_1 = GRU(512, return_sequences=True)(conv_extractor)
gru_1b = GRU(512, return_sequences=True, go_backwards=True)(conv_extractor)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(512, return_sequences=True)(gru1_merged)
gru_2b = GRU(512, return_sequences=True, go_backwards=True)(gru1_merged)

x = layers.concatenate([gru_2, gru_2b])   # move concatenate layer aside
x = layers.Flatten()(x)
inner = layers.Dense(30, activation='LeakyReLU')(x)
inner = layers.Dense(10, activation='LeakyReLU')(inner)
inner = layers.Dense(3, activation='LeakyReLU')(inner)
inner = layers.Lambda(rounding)(inner)

model = Model(input_layer,inner)
model.compile(loss = MeanSquaredError, optimizer = optimizers.Adam(2e-4), metrics=['accuracy'])
model.fit(x_train, y_train, epochs = 999)

Why did I get this error? And how can I fix it? If it's not fixable, is there another way of solving my problem (e.g by modifying the poisson loss function)?

Topic ocr keras convolutional-neural-network tensorflow neural-network

Category Data Science

About

Geeks Mental is a community that publishes articles and tutorials about Web, Android, Data Science, new techniques and Linux security.