22241mp4 -

def prepare_model(): model = models.video.slowfast_r50_2x16x32_featurizer(pretrained=True) model.eval() # Set the model to evaluation mode return model

import cv2 import numpy as np

features = extract_features(model, frames_tensor) print(features.shape) You might want to save these features for later use: 22241mp4

video_path = '22241.mp4' frames_tensor = load_video(video_path) def extract_features(model, video_tensor): # This may need to be adjusted based on the model and the input requirements inputs = video_tensor.unsqueeze(0) # Add batch dimension with torch.no_grad(): features = model(inputs) return features.squeeze() def prepare_model(): model = models

def load_video(video_path, target_resolution=(224, 224), frame_rate=16): cap = cv2.VideoCapture(video_path) frames = [] while cap.isOpened(): ret, frame = cap.read() if not ret: break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.resize(frame, target_resolution) frames.append(frame) cap.release() # Select every frame_rate-th frame selected_frames = frames[::int(30/frame_rate)] # Stack and convert to tensor frames_tensor = torch.from_numpy(np.stack(selected_frames)).permute(0, 3, 1, 2).float() / 255. return frames_tensor cv2.COLOR_BGR2RGB) frame = cv2.resize(frame