Parallelize Youtube video frame download using yt-dlp and cv2

My task is to download multiple sequences of successive low resolution frames of Youtube videos.

I summarize the main parts of the process:

  • Each bag of shots have a dimension of half a second (depending on the current fps)
  • In order to grab useful frames I’ve decided to remove the initial and final 10% of each video since it is common to have an intro and outro. Moreover
  • I’ve made an array of pair of initial and final frame to distribute the load on multiple processes using ProcessPoolExecutor(max_workers=multiprocessing.cpu_count())
  • In case of failure/exception I completly remove the relative directory

The point is that it do not scale up, since while running I noticesd that all CPUs had always a load lower that the 20% more or less. In addition since with these shots I have to run multiple CNNs, to prevent overfitting it is suggested to have a big dataset and not a bounch of shots.

Here it is the code:

import yt_dlp
import os
from tqdm import tqdm
import cv2
import shutil
import time
import random
from concurrent.futures import ProcessPoolExecutor
import multiprocessing
import pandas as pd
import numpy as np
from pathlib import Path
import zipfile


# PARAMETERS
percentage_train_test = 50
percentage_bag_shots = 20
percentage_to_ignore = 10

zip_f_name = f'VideoClassificationDataset_{percentage_train_test}_{percentage_bag_shots}_{percentage_to_ignore}'
dataset_path = Path('/content/VideoClassificationDataset')

# DOWNOAD ZIP FILES
!wget --no-verbose https://github.com/gtoderici/sports-1m-dataset/archive/refs/heads/master.zip

# EXTRACT AND DELETE THEM
!unzip -qq -o '/content/master.zip' 
!rm '/content/master.zip'

DATA = {'train_partition.txt': {},
        'test_partition.txt': {}}

LABELS = []

train_dict = {}
test_dict = {}

path = '/content/sports-1m-dataset-master/original'

for f in os.listdir(path):
  with open(path + '/' + f) as f_txt:
    lines = f_txt.readlines()
    for line in lines:
      splitted_line = line.split(' ')
      label_indices = splitted_line[1].rstrip('\n').split(',') 
      DATA[f][splitted_line[0]] = list(map(int, label_indices))

with open('/content/sports-1m-dataset-master/labels.txt') as f_labels:
  LABELS = f_labels.read().splitlines()


TRAIN = DATA['train_partition.txt']
TEST = DATA['test_partition.txt']
print('Original Train Test length: ', len(TRAIN), len(TEST))

# sample a subset percentage_train_test
TRAIN = dict(random.sample(TRAIN.items(), (len(TRAIN)*percentage_train_test)//100))
TEST = dict(random.sample(TEST.items(), (len(TEST)*percentage_train_test)//100))

print(f'Sampled {percentage_train_test} Percentage  Train Test length: ', len(TRAIN), len(TEST))


if not os.path.exists(dataset_path): os.makedirs(dataset_path)
if not os.path.exists(f'{dataset_path}/train'): os.makedirs(f'{dataset_path}/train')
if not os.path.exists(f'{dataset_path}/test'): os.makedirs(f'{dataset_path}/test')

Function to extract a sequence of continuous frames:

def extract_frames(directory, url, idx_bag, start_frame, end_frame):
  capture = cv2.VideoCapture(url)
  count = start_frame

  capture.set(cv2.CAP_PROP_POS_FRAMES, count)
  os.makedirs(f'{directory}/bag_of_shots{str(idx_bag)}')

  while count < end_frame:

    ret, frame = capture.read()

    if not ret: 
      shutil.rmtree(f'{directory}/bag_of_shots{str(idx_bag)}')
      return False

    filename = f'{directory}/bag_of_shots{str(idx_bag)}/shot{str(count - start_frame)}.png'

    cv2.imwrite(filename, frame)
    count += 1

  capture.release()
  return True

Function to spread the load along multiple processors:

def video_to_frames(video_url, labels_list, directory, dic, percentage_of_bags):
  url_id = video_url.split('=')[1]
  path_until_url_id = f'{dataset_path}/{directory}/{url_id}'
  try:   

    ydl_opts = {
        'ignoreerrors': True,
        'quiet': True,
        'nowarnings': True,
        'simulate': True,
        'ignorenoformatserror': True,
        'verbose':False,
        'cookies': '/content/all_cookies.txt',
        #https://stackoverflow.com/questions/63329412/how-can-i-solve-this-youtube-dl-429
    }
    ydl = yt_dlp.YoutubeDL(ydl_opts)
    info_dict = ydl.extract_info(video_url, download=False)

    if(info_dict is not None and  info_dict['fps'] >= 20):
      # I must have a least 20 frames per seconds since I take half of second bag of shots for every video

      formats = info_dict.get('formats', None)

      # excluding the initial and final 10% of each video to avoid noise
      video_length = info_dict['duration'] * info_dict['fps']

      shots = info_dict['fps'] // 2

      to_ignore = (video_length * percentage_to_ignore) // 100
      new_len = video_length - (to_ignore * 2)
      tot_stored_bags = ((new_len // shots) * percentage_of_bags) // 100   # ((total_possbile_bags // shots) * percentage_of_bags) // 100
      if tot_stored_bags == 0: tot_stored_bags = 1 # minimum 1 bag of shots

      skip_rate_between_bags = (new_len - (tot_stored_bags * shots)) // (tot_stored_bags-1) if tot_stored_bags > 1 else 0

      chunks = [[to_ignore+(bag*(skip_rate_between_bags+shots)), to_ignore+(bag*(skip_rate_between_bags+shots))+shots] for bag in range(tot_stored_bags)]
      # sequence of [[start_frame, end_frame], [start_frame, end_frame], [start_frame, end_frame], ...]


      # ----------- For the moment I download only shots form video that has 144p resolution -----------

      res = {
          '160': '144p',
          '133': '240p',
          '134': '360p',
          '135': '360p',
          '136': '720p'
      }

      format_id = {}
      for f in formats: format_id[f['format_id']] = f
      #for res in resolution_id:
      if list(res.keys())[0] in list(format_id.keys()):
          video = format_id[list(res.keys())[0]]
          url = video.get('url', None)
          if(video.get('url', None) != video.get('manifest_url', None)):

            if not os.path.exists(path_until_url_id): os.makedirs(path_until_url_id)

            with ProcessPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
              for idx_bag, f in enumerate(chunks): 
                res = executor.submit(
                  extract_frames, directory = path_until_url_id, url = url, idx_bag = idx_bag, start_frame = f[0], end_frame = f[1])
                
                if res.result() is True: 
                  l = np.zeros(len(LABELS), dtype=int) 
                  for label in labels_list: l[label] = 1
                  l = np.append(l, [shots]) # appending the number of shots taken in the list before adding it on the dictionary

                  dic[f'{directory}/{url_id}/bag_of_shots{str(idx_bag)}'] = l.tolist()


  except Exception as e:
    shutil.rmtree(path_until_url_id)
    pass

Download of TRAIN bag of shots:

start_time = time.time()
pbar = tqdm(enumerate(TRAIN.items()), total = len(TRAIN.items()), leave=False)

for _, (url, labels_list) in pbar: video_to_frames(
  video_url = url, labels_list = labels_list, directory = 'train', dic = train_dict, percentage_of_bags = percentage_bag_shots)

print("--- %s seconds ---" % (time.time() - start_time))

Download of TEST bag of shots:

start_time = time.time()
pbar = tqdm(enumerate(TEST.items()), total = len(TEST.items()), leave=False)

for _, (url, labels_list) in pbar: video_to_frames(
  video_url = url, labels_list = labels_list, directory = 'test', dic = test_dict, percentage_of_bags = percentage_bag_shots)

print("--- %s seconds ---" % (time.time() - start_time))

Save the .csv files

train_df = pd.DataFrame.from_dict(train_dict, orient='index', dtype=int).reset_index(level=0)
train_df = train_df.rename(columns={train_df.columns[-1]: 'shots'})
train_df.to_csv('/content/VideoClassificationDataset/train.csv', index=True)

test_df = pd.DataFrame.from_dict(test_dict, orient='index', dtype=int).reset_index(level=0)
test_df = test_df.rename(columns={test_df.columns[-1]: 'shots'})
test_df.to_csv('/content/VideoClassificationDataset/test.csv', index=True)

crosspost:

1 Like

this is VERY expensive & should be avoided
(the set() is almost for free, but the following read() might have to decode maybe 100 frames manually (from the last keyframe found to desired position))

if you could do the “slice some frames out of the middle” part from the downloader (maybe this ?), far more efficient i guess

First of all thanks for replying

You mean, that before actually read the wanted frame it has to decode all the others from the start up to the n-th frame (with n the frame that precede the first frame that I want to download)?

Moreover if I able to implement the download_range hopefully if will cut the video using the start_time ad end_time parameters, and then I can use my extract_frames function to download continuous frames. Are you suggesting this? Or I’ve misunderstood?