ETRI IVCL 1.0.0
Acceleration SW Platform for Ondevice
Classes | Functions | Variables
total_loss_layer Namespace Reference

Classes

class  TotalLossLayer
 

Functions

def forward (self, bottom, top)
 
def backward (self, bottom, top)
 

Variables

 res = absolute_difference(labels, predictions)
 
int variance_loss = 0
 
 temp1 = tf.reduce_mean(y_GT[:,:,:,(i*8):(i*8)+8], axis=-1)
 
 temp2 = tf.reduce_mean(pred_LF_loss[:,:,:,(i*8):(i*8)+8], axis=-1)
 
 temp3 = tf.reduce_mean(y_GT_H[:,:,:,(i*8):(i*8)+8], axis=-1)
 
 temp4 = tf.reduce_mean(pred_LF_loss_H[:,:,:,(i*8):(i*8)+8], axis=-1)
 
 mean = tf.where(tf.is_nan(mean), tf.zeros_like(mean), mean)
 
 variance = tf.where(tf.is_nan(variance), tf.zeros_like(variance), variance)
 
 mean2 = tf.where(tf.is_nan(mean2), tf.zeros_like(mean2), mean2)
 
 variance2 = tf.where(tf.is_nan(variance2), tf.zeros_like(variance2), variance2)
 
 mean_loss = tf.losses.absolute_difference(mean, mean2)
 
tuple tv_loss_x = (total_variation_self(flow_LF[:,:,:,0::2]))
 
tuple tv_loss_y = (total_variation_self(flow_LF[:,:,:,1::2]))
 
 tv_loss = tf.reduce_mean(tv_loss_x) + tf.reduce_mean(tv_loss_y)
 
tuple Total_Loss = (LAMBDA_L1 * pixel_loss_V) + (LAMBDA_L1 * pixel_loss_H) \
 
 src_h
 L_Loss ###. More...
 
 gt_h
 

Function Documentation

◆ backward()

def total_loss_layer.backward (   self,
  bottom,
  top 
)

◆ forward()

def total_loss_layer.forward (   self,
  bottom,
  top 
)

Variable Documentation

◆ gt_h

total_loss_layer.gt_h

◆ mean

total_loss_layer.mean = tf.where(tf.is_nan(mean), tf.zeros_like(mean), mean)

◆ mean2

total_loss_layer.mean2 = tf.where(tf.is_nan(mean2), tf.zeros_like(mean2), mean2)

◆ mean_loss

total_loss_layer.mean_loss = tf.losses.absolute_difference(mean, mean2)

◆ res

total_loss_layer.res = absolute_difference(labels, predictions)

◆ src_h

total_loss_layer.src_h

L_Loss ###.

◆ temp1

total_loss_layer.temp1 = tf.reduce_mean(y_GT[:,:,:,(i*8):(i*8)+8], axis=-1)

◆ temp2

total_loss_layer.temp2 = tf.reduce_mean(pred_LF_loss[:,:,:,(i*8):(i*8)+8], axis=-1)

◆ temp3

total_loss_layer.temp3 = tf.reduce_mean(y_GT_H[:,:,:,(i*8):(i*8)+8], axis=-1)

◆ temp4

total_loss_layer.temp4 = tf.reduce_mean(pred_LF_loss_H[:,:,:,(i*8):(i*8)+8], axis=-1)

◆ Total_Loss

tuple total_loss_layer.Total_Loss = (LAMBDA_L1 * pixel_loss_V) + (LAMBDA_L1 * pixel_loss_H) \

◆ tv_loss

total_loss_layer.tv_loss = tf.reduce_mean(tv_loss_x) + tf.reduce_mean(tv_loss_y)

◆ tv_loss_x

tuple total_loss_layer.tv_loss_x = (total_variation_self(flow_LF[:,:,:,0::2]))

◆ tv_loss_y

tuple total_loss_layer.tv_loss_y = (total_variation_self(flow_LF[:,:,:,1::2]))

◆ variance

total_loss_layer.variance = tf.where(tf.is_nan(variance), tf.zeros_like(variance), variance)

◆ variance2

total_loss_layer.variance2 = tf.where(tf.is_nan(variance2), tf.zeros_like(variance2), variance2)

◆ variance_loss

int total_loss_layer.variance_loss = 0