Downloading Videos From using their API

Automated downloading of videos from

I wanted to automatically download new videos from as they got added to my account, so I took a look at their API and built a python script to do it. ¬† The script descends into the parent folder and any child folders and looks for video files that are above a certain size, and don’t contain the word “sample” in them. ¬†After all the videos that meet that criteria have been downloaded (using aria2c), the script deletes all the folders and cleans the History and Transfers tab.

Here is the script:

import requests
import json
import time
import subprocess
import os
import sys
import datetime

# monitoring and downloading

# Check if file exists before proceeding
file = "/tmp/tv_download"
one_day = - datetime.timedelta(days=1)

# If the file exists, exit
if os.path.isfile(file):
    filetime = datetime.datetime.fromtimestamp(os.path.getctime(file))
    if filetime > one_day:

# If the file doesn't exist create it
    use_file = open(file, "w")
    use_file.write("In use")

# Base URL and OAUTH token
url = ""
oauth = "?oauth_token=<INSERT OAUTH TOKEN>"
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
file_urls = {}

# This function adds {file_id: download_url} of files we want to file_urls, recursively
def get_video_urls(file):

    # If the file type is a FOLDER, recursively descend into it
    if file['file_type'] == 'FOLDER':
        # Grab the folder id
        folder_id = str(file['id'])

        # If its not an empty folder
        if file['size'] != 0:

            # Get the list of children in the folder
            folder_list = requests.get(url + "files/list" + oauth,
                                       params={'parent_id': folder_id}).json()

            # Process each child
            for child in folder_list['files']:
        # If it is empty, add it to the file_urls dict and mark it as null for deletion        
            file_urls.update({folder_id: "null"})
    # If its a file we want
    if file['file_type'] == "VIDEO" and "sample" not in file['name'] and file['size'] > 50000000:

        # Grab it's ID
        video_id = str(file['id'])

        # Get the download URL
        video_url_request = requests.head(url + "files/" + video_id + "/download" + oauth, headers=headers)
        video_url_headers = video_url_request.headers
        video_url = str(video_url_headers['Location'])

        # Add the ID and URL to file_urls{}
        file_urls.update({video_id: video_url})
    # Else its a junk file
        # Grab it's ID
        video_id = str(file['id'])
        # If it is junk add it to the file_urls dict and mark it as null for deletion
        file_urls.update({folder_id: "null"})

# Get files/folder in the specific folder given
other_child_list = requests.get(url + "files/list" + oauth,
                                params={'parent_id':'<FOLDER ID>'}).json()

# Kick off the descending into the "Other" folder
for child in other_child_list['files']:

# For each URL we get in the file_urls dict
for file_url in file_urls:

    # If we have a video file to download
    if file_urls.get(file_url) != "null":
        # Download it with aria2c and store the return code
        return_code =["aria2c", "-c", "-q", "-x8", "-d",
                                       "/downloads", "--log-level=error", file_urls.get(file_url)])

        # If aria2c completed successfully delete the file
        if return_code == 0:
            file_deletion_data = {'file_ids': str(file_url)}
            file_delete_request = + "files/delete" + oauth,
    # Else we have an empty folder to delete
        file_deletion_data = {'file_ids': str(file_url)}
        file_delete_request = + "files/delete" + oauth,

# Pause to let the API catch up

# Clean the history tab + "events/delete" + oauth)

# Pause again to let the request kick in

# Clean the tranfers tab + "transfers/clean" + oauth)

# Remove the file when we are done

559 Words

2017-06-15 20:32 +0000