Added comments

This commit is contained in:
Jordon Brooks 2023-08-13 17:46:14 +01:00
parent c157549fe4
commit b02503dca8
3 changed files with 47 additions and 24 deletions

View file

@ -52,4 +52,4 @@ def preprocess_frame(frame):
combined_feature = np.stack([edge_feature, histogram_feature_image], axis=-1) combined_feature = np.stack([edge_feature, histogram_feature_image], axis=-1)
compressed_frame = frame / 255.0 # Assuming the frame is uint8, scale to [0, 1] compressed_frame = frame / 255.0 # Assuming the frame is uint8, scale to [0, 1]
return combined_feature, compressed_frame return compressed_frame

View file

@ -1,8 +1,12 @@
# gobalVars.py
import log import log
LOGGER = log.Logger(level="DEBUG", logfile="training.log", reset_logfile=True) LOGGER = log.Logger(level="DEBUG", logfile="training.log", reset_logfile=True)
NUM_CHANNELS = 3 PRESET_SPEED_CATEGORIES = ["ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow"]
NUM_PRESET_SPEEDS = len(PRESET_SPEED_CATEGORIES)
NUM_COLOUR_CHANNELS = 3
WIDTH = 640 WIDTH = 640
HEIGHT = 360 HEIGHT = 360
MAX_FRAMES = 0 MAX_FRAMES = 0

View file

@ -5,11 +5,8 @@ import cv2
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from featureExtraction import preprocess_frame from featureExtraction import preprocess_frame
from globalVars import HEIGHT, LOGGER, NUM_COLOUR_CHANNELS, WIDTH
from globalVars import HEIGHT, LOGGER, WIDTH
#PRESET_SPEED_CATEGORIES = ["ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow"]
#NUM_PRESET_SPEEDS = len(PRESET_SPEED_CATEGORIES)
#from tensorflow.keras.mixed_precision import Policy #from tensorflow.keras.mixed_precision import Policy
@ -17,43 +14,62 @@ from globalVars import HEIGHT, LOGGER, WIDTH
#tf.keras.mixed_precision.set_global_policy(policy) #tf.keras.mixed_precision.set_global_policy(policy)
def data_generator(videos, batch_size): def data_generator(videos, batch_size):
# Infinite loop to keep generating batches
while True: while True:
# Iterate over each video
for video_details in videos: for video_details in videos:
# Get the paths for compressed and original (uncompressed) video files
video_path = os.path.join(os.path.dirname("test_data/validation/validation.json"), video_details["compressed_video_file"]) video_path = os.path.join(os.path.dirname("test_data/validation/validation.json"), video_details["compressed_video_file"])
cap = cv2.VideoCapture(video_path) uncompressed_video_path = os.path.join(os.path.dirname("test_data/validation/validation.json"), video_details["original_video_file"])
# Open the video files
cap_compressed = cv2.VideoCapture(video_path)
cap_uncompressed = cv2.VideoCapture(uncompressed_video_path)
# Lists to store the processed frames
compressed_frame_batch = [] # Input data (Training)
uncompressed_frame_batch = [] # Target data (Target)
feature_batch = [] # Read and process frames from both videos
compressed_frame_batch = [] while cap_compressed.isOpened() and cap_uncompressed.isOpened():
ret_compressed, compressed_frame = cap_compressed.read()
while cap.isOpened(): ret_uncompressed, uncompressed_frame = cap_uncompressed.read()
ret, frame = cap.read() if not ret_compressed or not ret_uncompressed:
if not ret:
break break
combined_feature, compressed_frame = preprocess_frame(frame) # Preprocess the compressed frame (input)
compressed_frame = preprocess_frame(compressed_frame)
feature_batch.append(combined_feature)
# Preprocess the uncompressed frame (target)
uncompressed_frame = preprocess_frame(uncompressed_frame) # Modify if different preprocessing is needed for target frames
# Append processed frames to batches
compressed_frame_batch.append(compressed_frame) compressed_frame_batch.append(compressed_frame)
uncompressed_frame_batch.append(uncompressed_frame)
if len(feature_batch) == batch_size: # If batch is complete, yield it
yield (np.array(feature_batch), np.array(compressed_frame_batch)) if len(compressed_frame_batch) == batch_size:
feature_batch = [] yield (np.array(compressed_frame_batch), np.array(uncompressed_frame_batch)) # Yielding Training and Target data
compressed_frame_batch = [] compressed_frame_batch = []
uncompressed_frame_batch = []
cap.release() # Release video files
cap_compressed.release()
cap_uncompressed.release()
# If there are frames left that don't fill a whole batch, send them anyway # If there are frames left that don't fill a whole batch, send them anyway
if len(feature_batch) > 0: if len(compressed_frame_batch) > 0:
yield (np.array(feature_batch), np.array(compressed_frame_batch)) yield (np.array(compressed_frame_batch), np.array(uncompressed_frame_batch))
class VideoCompressionModel(tf.keras.Model): class VideoCompressionModel(tf.keras.Model):
def __init__(self): def __init__(self):
super(VideoCompressionModel, self).__init__() super(VideoCompressionModel, self).__init__()
LOGGER.debug("Initializing VideoCompressionModel.") LOGGER.debug("Initializing VideoCompressionModel.")
# Add an additional channel for the histogram features # Input shape (includes channels for edges and histogram)
input_shape_with_histogram = (HEIGHT, WIDTH, 2) # 1 channel for edges, 1 for histogram input_shape_with_histogram = (HEIGHT, WIDTH, NUM_COLOUR_CHANNELS)
# Encoder part of the model
self.encoder = tf.keras.Sequential([ self.encoder = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=input_shape_with_histogram), tf.keras.layers.InputLayer(input_shape=input_shape_with_histogram),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same'), tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
@ -62,6 +78,7 @@ class VideoCompressionModel(tf.keras.Model):
tf.keras.layers.MaxPooling2D((2, 2), padding='same') tf.keras.layers.MaxPooling2D((2, 2), padding='same')
]) ])
# Decoder part of the model
self.decoder = tf.keras.Sequential([ self.decoder = tf.keras.Sequential([
tf.keras.layers.Conv2DTranspose(32, (3, 3), activation='relu', padding='same'), tf.keras.layers.Conv2DTranspose(32, (3, 3), activation='relu', padding='same'),
tf.keras.layers.UpSampling2D((2, 2)), tf.keras.layers.UpSampling2D((2, 2)),
@ -71,6 +88,8 @@ class VideoCompressionModel(tf.keras.Model):
]) ])
def call(self, inputs): def call(self, inputs):
# Encode the input
encoded = self.encoder(inputs) encoded = self.encoder(inputs)
# Decode the encoded representation
decoded = self.decoder(encoded) decoded = self.decoder(encoded)
return decoded return decoded