import tensorflow as tf defcorr2d(X, K): """Compute 2D cross-correlation.""" h, w = K.shape Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))) for i inrange(Y.shape[0]): for j inrange(Y.shape[1]): Y[i, j].assign(tf.reduce_sum(X[i:i + h, j:j + w] * K)) return Y
deflearning_a_kernel(input: Union[tf.Tensor, tf.Variable], wanted_output: Union[tf.Tensor, tf.Variable], kernel_shape): # Construct a two-dimensional convolutional layer with 1 output channel and a # kernel of kernal_shape. For the sake of simplicity, we ignore the bias here conv2d = tf.keras.layers.Conv2D(1, shape=kernel_shape, use_bias=False)
reshaped_input = tf.reshape(input, (1, input.shape[0], input.shape[1], 1)) reshaped_wanted_output = tf.reshape(wanted_output, (1, wanted_output.shape[0], wanted_output.shape[1], 1)) _ = conv2d(reshaped_input) # this is use to gen weights in layer for i inrange(10): with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(conv2d.weights[0]) calculated_output = conv2d(reshaped_input) loss = (abs(calculated_output - reshaped_wanted_output)) ** 2