working
This commit is contained in:
parent
f4512bba99
commit
db43239b3d
5 changed files with 311 additions and 197 deletions
|
@ -5,6 +5,7 @@ import os
|
|||
import cv2
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers
|
||||
from featureExtraction import preprocess_frame, scale_crf, scale_speed_preset
|
||||
from globalVars import HEIGHT, LOGGER, NUM_COLOUR_CHANNELS, NUM_PRESET_SPEEDS, PRESET_SPEED_CATEGORIES, WIDTH
|
||||
|
||||
|
@ -28,36 +29,6 @@ def combine_batch(frame, crf, speed, include_controls=True, resize=True):
|
|||
return np.concatenate(combined, axis=-1)
|
||||
|
||||
|
||||
def process_video(video):
|
||||
base_dir = os.path.dirname("test_data/validation/validation.json")
|
||||
|
||||
cap_compressed = cv2.VideoCapture(os.path.join(base_dir, video["compressed_video_file"]))
|
||||
cap_uncompressed = cv2.VideoCapture(os.path.join(base_dir, video["original_video_file"]))
|
||||
|
||||
compressed_frames = []
|
||||
uncompressed_frames = []
|
||||
|
||||
while True:
|
||||
ret_compressed, compressed_frame = cap_compressed.read()
|
||||
ret_uncompressed, uncompressed_frame = cap_uncompressed.read()
|
||||
|
||||
if not ret_compressed or not ret_uncompressed:
|
||||
break
|
||||
|
||||
CRF = scale_crf(video["crf"])
|
||||
SPEED = scale_speed_preset(PRESET_SPEED_CATEGORIES.index(video["preset_speed"]))
|
||||
|
||||
compressed_combined = combine_batch(compressed_frame, CRF, SPEED, include_controls=False)
|
||||
uncompressed_combined = combine_batch(uncompressed_frame, 0, scale_speed_preset(PRESET_SPEED_CATEGORIES.index("veryslow")))
|
||||
|
||||
compressed_frames.append(compressed_combined)
|
||||
uncompressed_frames.append(uncompressed_combined)
|
||||
|
||||
cap_compressed.release()
|
||||
cap_uncompressed.release()
|
||||
|
||||
return uncompressed_frames, compressed_frames
|
||||
|
||||
|
||||
def frame_generator(videos, max_frames=None):
|
||||
base_dir = "test_data/validation/"
|
||||
|
@ -76,10 +47,10 @@ def frame_generator(videos, max_frames=None):
|
|||
CRF = scale_crf(video["crf"])
|
||||
SPEED = scale_speed_preset(PRESET_SPEED_CATEGORIES.index(video["preset_speed"]))
|
||||
|
||||
compressed_combined = combine_batch(compressed_frame, CRF, SPEED, include_controls=False)
|
||||
uncompressed_combined = combine_batch(uncompressed_frame, 10, scale_speed_preset(PRESET_SPEED_CATEGORIES.index("veryslow")))
|
||||
validation = combine_batch(compressed_frame, CRF, SPEED, include_controls=False)
|
||||
training = combine_batch(uncompressed_frame, 10, scale_speed_preset(PRESET_SPEED_CATEGORIES.index("veryslow")))
|
||||
|
||||
yield uncompressed_combined, compressed_combined
|
||||
yield training, validation
|
||||
|
||||
frame_count += 1
|
||||
if max_frames is not None and frame_count >= max_frames:
|
||||
|
@ -104,7 +75,7 @@ def create_dataset(videos, batch_size, max_frames=None):
|
|||
output_signature=output_signature
|
||||
)
|
||||
|
||||
dataset = dataset.shuffle(100).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
|
||||
dataset = dataset.batch(batch_size).shuffle(20).prefetch(1) #.prefetch(tf.data.experimental.AUTOTUNE)
|
||||
|
||||
return dataset
|
||||
|
||||
|
@ -113,29 +84,36 @@ def create_dataset(videos, batch_size, max_frames=None):
|
|||
class VideoCompressionModel(tf.keras.Model):
|
||||
def __init__(self):
|
||||
super(VideoCompressionModel, self).__init__()
|
||||
LOGGER.debug("Initializing VideoCompressionModel.")
|
||||
|
||||
# Input shape (includes channels for CRF and SPEED_PRESET)
|
||||
input_shape_with_histogram = (None, None, NUM_COLOUR_CHANNELS + 2)
|
||||
|
||||
input_shape = (None, None, NUM_COLOUR_CHANNELS + 2)
|
||||
|
||||
# Encoder part of the model
|
||||
self.encoder = tf.keras.Sequential([
|
||||
tf.keras.layers.InputLayer(input_shape=input_shape_with_histogram),
|
||||
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
|
||||
tf.keras.layers.MaxPooling2D((2, 2), padding='same'),
|
||||
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same'),
|
||||
tf.keras.layers.MaxPooling2D((2, 2), padding='same')
|
||||
layers.InputLayer(input_shape=input_shape),
|
||||
layers.Conv2D(64, (3, 3), padding='same'),
|
||||
#layers.BatchNormalization(),
|
||||
layers.LeakyReLU(),
|
||||
layers.MaxPooling2D((2, 2), padding='same'),
|
||||
layers.SeparableConv2D(32, (3, 3), padding='same'), # Using Separable Convolution
|
||||
#layers.BatchNormalization(),
|
||||
layers.LeakyReLU(),
|
||||
layers.MaxPooling2D((2, 2), padding='same')
|
||||
])
|
||||
|
||||
# Decoder part of the model
|
||||
self.decoder = tf.keras.Sequential([
|
||||
tf.keras.layers.Conv2DTranspose(32, (3, 3), activation='relu', padding='same'),
|
||||
tf.keras.layers.UpSampling2D((2, 2)),
|
||||
tf.keras.layers.Conv2DTranspose(64, (3, 3), activation='relu', padding='same'),
|
||||
tf.keras.layers.UpSampling2D((2, 2)),
|
||||
tf.keras.layers.Conv2DTranspose(NUM_COLOUR_CHANNELS, (3, 3), activation='sigmoid', padding='same')
|
||||
layers.Conv2DTranspose(32, (3, 3), padding='same'),
|
||||
#layers.BatchNormalization(),
|
||||
layers.LeakyReLU(),
|
||||
layers.Conv2DTranspose(64, (3, 3), dilation_rate=2, padding='same'), # Using Dilated Convolution
|
||||
#layers.BatchNormalization(),
|
||||
layers.LeakyReLU(),
|
||||
# Use Sub-Pixel Convolutional Layer
|
||||
layers.Conv2DTranspose(NUM_COLOUR_CHANNELS * 16, (3, 3), padding='same'), # 16 times the number of color channels
|
||||
layers.Lambda(lambda x: tf.nn.depth_to_space(x, block_size=4)) # Sub-Pixel Convolutional Layer with block_size=4
|
||||
])
|
||||
|
||||
def call(self, inputs):
|
||||
return self.decoder(self.encoder(inputs))
|
||||
encoded = self.encoder(inputs)
|
||||
return self.decoder(encoded)
|
||||
|
||||
|
||||
|
|
Reference in a new issue