2015-02-17 15:37:48 -05:00
|
|
|
from __future__ import division, unicode_literals
|
2013-12-23 10:39:49 -05:00
|
|
|
|
|
|
|
import base64
|
|
|
|
import io
|
|
|
|
import itertools
|
|
|
|
import os
|
|
|
|
import time
|
|
|
|
import xml.etree.ElementTree as etree
|
|
|
|
|
|
|
|
from .common import FileDownloader
|
|
|
|
from .http import HttpFD
|
2014-12-13 06:24:42 -05:00
|
|
|
from ..compat import (
|
|
|
|
compat_urlparse,
|
|
|
|
)
|
2013-12-23 10:39:49 -05:00
|
|
|
from ..utils import (
|
2014-02-15 10:24:43 -05:00
|
|
|
struct_pack,
|
|
|
|
struct_unpack,
|
2013-12-23 10:39:49 -05:00
|
|
|
encodeFilename,
|
|
|
|
sanitize_open,
|
2014-09-21 09:43:09 -04:00
|
|
|
xpath_text,
|
2013-12-23 10:39:49 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class FlvReader(io.BytesIO):
|
|
|
|
"""
|
|
|
|
Reader for Flv files
|
|
|
|
The file format is documented in https://www.adobe.com/devnet/f4v.html
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Utility functions for reading numbers and strings
|
|
|
|
def read_unsigned_long_long(self):
|
2014-02-15 10:24:43 -05:00
|
|
|
return struct_unpack('!Q', self.read(8))[0]
|
2013-12-23 10:39:49 -05:00
|
|
|
|
|
|
|
def read_unsigned_int(self):
|
2014-02-15 10:24:43 -05:00
|
|
|
return struct_unpack('!I', self.read(4))[0]
|
2013-12-23 10:39:49 -05:00
|
|
|
|
|
|
|
def read_unsigned_char(self):
|
2014-02-15 10:24:43 -05:00
|
|
|
return struct_unpack('!B', self.read(1))[0]
|
2013-12-23 10:39:49 -05:00
|
|
|
|
|
|
|
def read_string(self):
|
|
|
|
res = b''
|
|
|
|
while True:
|
|
|
|
char = self.read(1)
|
|
|
|
if char == b'\x00':
|
|
|
|
break
|
|
|
|
res += char
|
|
|
|
return res
|
|
|
|
|
|
|
|
def read_box_info(self):
|
|
|
|
"""
|
|
|
|
Read a box and return the info as a tuple: (box_size, box_type, box_data)
|
|
|
|
"""
|
|
|
|
real_size = size = self.read_unsigned_int()
|
|
|
|
box_type = self.read(4)
|
|
|
|
header_end = 8
|
|
|
|
if size == 1:
|
|
|
|
real_size = self.read_unsigned_long_long()
|
|
|
|
header_end = 16
|
2014-11-23 15:23:05 -05:00
|
|
|
return real_size, box_type, self.read(real_size - header_end)
|
2013-12-23 10:39:49 -05:00
|
|
|
|
|
|
|
def read_asrt(self):
|
|
|
|
# version
|
|
|
|
self.read_unsigned_char()
|
|
|
|
# flags
|
|
|
|
self.read(3)
|
|
|
|
quality_entry_count = self.read_unsigned_char()
|
|
|
|
# QualityEntryCount
|
|
|
|
for i in range(quality_entry_count):
|
|
|
|
self.read_string()
|
|
|
|
|
|
|
|
segment_run_count = self.read_unsigned_int()
|
|
|
|
segments = []
|
|
|
|
for i in range(segment_run_count):
|
|
|
|
first_segment = self.read_unsigned_int()
|
|
|
|
fragments_per_segment = self.read_unsigned_int()
|
|
|
|
segments.append((first_segment, fragments_per_segment))
|
|
|
|
|
|
|
|
return {
|
|
|
|
'segment_run': segments,
|
|
|
|
}
|
|
|
|
|
|
|
|
def read_afrt(self):
|
|
|
|
# version
|
|
|
|
self.read_unsigned_char()
|
|
|
|
# flags
|
|
|
|
self.read(3)
|
|
|
|
# time scale
|
|
|
|
self.read_unsigned_int()
|
|
|
|
|
|
|
|
quality_entry_count = self.read_unsigned_char()
|
|
|
|
# QualitySegmentUrlModifiers
|
|
|
|
for i in range(quality_entry_count):
|
|
|
|
self.read_string()
|
|
|
|
|
|
|
|
fragments_count = self.read_unsigned_int()
|
|
|
|
fragments = []
|
|
|
|
for i in range(fragments_count):
|
|
|
|
first = self.read_unsigned_int()
|
|
|
|
first_ts = self.read_unsigned_long_long()
|
|
|
|
duration = self.read_unsigned_int()
|
|
|
|
if duration == 0:
|
|
|
|
discontinuity_indicator = self.read_unsigned_char()
|
|
|
|
else:
|
|
|
|
discontinuity_indicator = None
|
|
|
|
fragments.append({
|
|
|
|
'first': first,
|
|
|
|
'ts': first_ts,
|
|
|
|
'duration': duration,
|
|
|
|
'discontinuity_indicator': discontinuity_indicator,
|
|
|
|
})
|
|
|
|
|
|
|
|
return {
|
|
|
|
'fragments': fragments,
|
|
|
|
}
|
|
|
|
|
|
|
|
def read_abst(self):
|
|
|
|
# version
|
|
|
|
self.read_unsigned_char()
|
|
|
|
# flags
|
|
|
|
self.read(3)
|
2014-02-22 17:03:00 -05:00
|
|
|
|
|
|
|
self.read_unsigned_int() # BootstrapinfoVersion
|
2013-12-23 10:39:49 -05:00
|
|
|
# Profile,Live,Update,Reserved
|
|
|
|
self.read(1)
|
|
|
|
# time scale
|
|
|
|
self.read_unsigned_int()
|
|
|
|
# CurrentMediaTime
|
|
|
|
self.read_unsigned_long_long()
|
|
|
|
# SmpteTimeCodeOffset
|
|
|
|
self.read_unsigned_long_long()
|
2014-02-22 17:03:00 -05:00
|
|
|
|
|
|
|
self.read_string() # MovieIdentifier
|
2013-12-23 10:39:49 -05:00
|
|
|
server_count = self.read_unsigned_char()
|
|
|
|
# ServerEntryTable
|
|
|
|
for i in range(server_count):
|
|
|
|
self.read_string()
|
|
|
|
quality_count = self.read_unsigned_char()
|
|
|
|
# QualityEntryTable
|
2014-02-22 17:03:00 -05:00
|
|
|
for i in range(quality_count):
|
2013-12-23 10:39:49 -05:00
|
|
|
self.read_string()
|
|
|
|
# DrmData
|
|
|
|
self.read_string()
|
|
|
|
# MetaData
|
|
|
|
self.read_string()
|
|
|
|
|
|
|
|
segments_count = self.read_unsigned_char()
|
|
|
|
segments = []
|
|
|
|
for i in range(segments_count):
|
|
|
|
box_size, box_type, box_data = self.read_box_info()
|
|
|
|
assert box_type == b'asrt'
|
|
|
|
segment = FlvReader(box_data).read_asrt()
|
|
|
|
segments.append(segment)
|
|
|
|
fragments_run_count = self.read_unsigned_char()
|
|
|
|
fragments = []
|
|
|
|
for i in range(fragments_run_count):
|
|
|
|
box_size, box_type, box_data = self.read_box_info()
|
|
|
|
assert box_type == b'afrt'
|
|
|
|
fragments.append(FlvReader(box_data).read_afrt())
|
|
|
|
|
|
|
|
return {
|
|
|
|
'segments': segments,
|
|
|
|
'fragments': fragments,
|
|
|
|
}
|
|
|
|
|
|
|
|
def read_bootstrap_info(self):
|
|
|
|
total_size, box_type, box_data = self.read_box_info()
|
|
|
|
assert box_type == b'abst'
|
|
|
|
return FlvReader(box_data).read_abst()
|
|
|
|
|
|
|
|
|
|
|
|
def read_bootstrap_info(bootstrap_bytes):
|
|
|
|
return FlvReader(bootstrap_bytes).read_bootstrap_info()
|
|
|
|
|
|
|
|
|
|
|
|
def build_fragments_list(boot_info):
|
|
|
|
""" Return a list of (segment, fragment) for each fragment in the video """
|
|
|
|
res = []
|
|
|
|
segment_run_table = boot_info['segments'][0]
|
|
|
|
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
|
|
|
first_frag_number = fragment_run_entry_table[0]['first']
|
2015-01-23 10:31:52 -05:00
|
|
|
fragments_counter = itertools.count(first_frag_number)
|
|
|
|
for segment, fragments_count in segment_run_table['segment_run']:
|
|
|
|
for _ in range(fragments_count):
|
|
|
|
res.append((segment, next(fragments_counter)))
|
2013-12-23 10:39:49 -05:00
|
|
|
return res
|
|
|
|
|
|
|
|
|
2015-01-05 13:22:17 -05:00
|
|
|
def write_unsigned_int(stream, val):
|
|
|
|
stream.write(struct_pack('!I', val))
|
|
|
|
|
|
|
|
|
2015-01-05 13:30:40 -05:00
|
|
|
def write_unsigned_int_24(stream, val):
|
|
|
|
stream.write(struct_pack('!I', val)[1:])
|
|
|
|
|
|
|
|
|
2015-01-05 13:12:29 -05:00
|
|
|
def write_flv_header(stream):
|
|
|
|
"""Writes the FLV header to stream"""
|
2013-12-23 10:39:49 -05:00
|
|
|
# FLV header
|
|
|
|
stream.write(b'FLV\x01')
|
|
|
|
stream.write(b'\x05')
|
|
|
|
stream.write(b'\x00\x00\x00\x09')
|
|
|
|
stream.write(b'\x00\x00\x00\x00')
|
2015-01-05 13:12:29 -05:00
|
|
|
|
|
|
|
|
|
|
|
def write_metadata_tag(stream, metadata):
|
|
|
|
"""Writes optional metadata tag to stream"""
|
2015-01-05 13:30:40 -05:00
|
|
|
SCRIPT_TAG = b'\x12'
|
2015-01-05 13:22:17 -05:00
|
|
|
FLV_TAG_HEADER_LEN = 11
|
|
|
|
|
2015-01-05 13:12:29 -05:00
|
|
|
if metadata:
|
2015-01-05 13:30:40 -05:00
|
|
|
stream.write(SCRIPT_TAG)
|
|
|
|
write_unsigned_int_24(stream, len(metadata))
|
2015-01-05 13:12:29 -05:00
|
|
|
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
|
|
|
|
stream.write(metadata)
|
2015-01-05 13:22:17 -05:00
|
|
|
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
|
2013-12-23 10:39:49 -05:00
|
|
|
|
|
|
|
|
|
|
|
def _add_ns(prop):
|
|
|
|
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
|
|
|
|
|
|
|
|
|
|
|
|
class HttpQuietDownloader(HttpFD):
|
|
|
|
def to_screen(self, *args, **kargs):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class F4mFD(FileDownloader):
|
|
|
|
"""
|
|
|
|
A downloader for f4m manifests or AdobeHDS.
|
|
|
|
"""
|
|
|
|
|
2014-05-28 12:19:23 -04:00
|
|
|
def _get_unencrypted_media(self, doc):
|
2015-01-31 04:51:39 -05:00
|
|
|
media = doc.findall(_add_ns('media'))
|
2014-05-28 12:19:23 -04:00
|
|
|
if not media:
|
|
|
|
self.report_error('No media found')
|
|
|
|
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
|
|
|
|
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
|
|
|
|
# If id attribute is missing it's valid for all media nodes
|
|
|
|
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
|
2015-01-30 10:06:55 -05:00
|
|
|
if 'id' not in e.attrib:
|
|
|
|
self.report_error('Missing ID in f4m DRM')
|
2014-05-28 12:19:23 -04:00
|
|
|
media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
|
|
|
|
'drmAdditionalHeaderSetId' not in e.attrib,
|
|
|
|
media))
|
|
|
|
if not media:
|
2015-01-30 10:06:55 -05:00
|
|
|
self.report_error('Unsupported DRM')
|
2014-05-28 12:19:23 -04:00
|
|
|
return media
|
|
|
|
|
2013-12-23 10:39:49 -05:00
|
|
|
def real_download(self, filename, info_dict):
|
|
|
|
man_url = info_dict['url']
|
2014-07-28 09:25:56 -04:00
|
|
|
requested_bitrate = info_dict.get('tbr')
|
2013-12-23 10:39:49 -05:00
|
|
|
self.to_screen('[download] Downloading f4m manifest')
|
|
|
|
manifest = self.ydl.urlopen(man_url).read()
|
|
|
|
|
|
|
|
doc = etree.fromstring(manifest)
|
2014-05-28 12:19:23 -04:00
|
|
|
formats = [(int(f.attrib.get('bitrate', -1)), f)
|
|
|
|
for f in self._get_unencrypted_media(doc)]
|
2014-07-28 09:25:56 -04:00
|
|
|
if requested_bitrate is None:
|
|
|
|
# get the best format
|
|
|
|
formats = sorted(formats, key=lambda f: f[0])
|
|
|
|
rate, media = formats[-1]
|
|
|
|
else:
|
|
|
|
rate, media = list(filter(
|
|
|
|
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
|
|
|
|
2013-12-23 10:39:49 -05:00
|
|
|
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
2014-10-28 12:27:41 -04:00
|
|
|
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
|
|
|
|
if bootstrap_node.text is None:
|
|
|
|
bootstrap_url = compat_urlparse.urljoin(
|
|
|
|
base_url, bootstrap_node.attrib['url'])
|
|
|
|
bootstrap = self.ydl.urlopen(bootstrap_url).read()
|
|
|
|
else:
|
|
|
|
bootstrap = base64.b64decode(bootstrap_node.text)
|
2015-01-05 13:12:29 -05:00
|
|
|
metadata_node = media.find(_add_ns('metadata'))
|
|
|
|
if metadata_node is not None:
|
|
|
|
metadata = base64.b64decode(metadata_node.text)
|
|
|
|
else:
|
|
|
|
metadata = None
|
2013-12-23 10:39:49 -05:00
|
|
|
boot_info = read_bootstrap_info(bootstrap)
|
2014-10-28 12:27:41 -04:00
|
|
|
|
2013-12-23 10:39:49 -05:00
|
|
|
fragments_list = build_fragments_list(boot_info)
|
2014-02-15 11:09:49 -05:00
|
|
|
if self.params.get('test', False):
|
|
|
|
# We only download the first fragment
|
|
|
|
fragments_list = fragments_list[:1]
|
2013-12-23 10:39:49 -05:00
|
|
|
total_frags = len(fragments_list)
|
2014-09-21 09:43:09 -04:00
|
|
|
# For some akamai manifests we'll need to add a query to the fragment url
|
|
|
|
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
|
2013-12-23 10:39:49 -05:00
|
|
|
|
2015-02-17 15:37:48 -05:00
|
|
|
self.report_destination(filename)
|
|
|
|
http_dl = HttpQuietDownloader(
|
|
|
|
self.ydl,
|
|
|
|
{
|
|
|
|
'continuedl': True,
|
|
|
|
'quiet': True,
|
|
|
|
'noprogress': True,
|
|
|
|
'ratelimit': self.params.get('ratelimit', None),
|
|
|
|
'test': self.params.get('test', False),
|
|
|
|
}
|
|
|
|
)
|
2013-12-23 10:39:49 -05:00
|
|
|
tmpfilename = self.temp_name(filename)
|
|
|
|
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
|
2015-02-17 15:37:48 -05:00
|
|
|
|
2015-01-05 13:12:29 -05:00
|
|
|
write_flv_header(dest_stream)
|
|
|
|
write_metadata_tag(dest_stream, metadata)
|
2013-12-23 10:39:49 -05:00
|
|
|
|
|
|
|
# This dict stores the download progress, it's updated by the progress
|
|
|
|
# hook
|
|
|
|
state = {
|
2015-02-17 15:37:48 -05:00
|
|
|
'status': 'downloading',
|
2013-12-23 10:39:49 -05:00
|
|
|
'downloaded_bytes': 0,
|
2015-02-17 15:37:48 -05:00
|
|
|
'frag_index': 0,
|
|
|
|
'frag_count': total_frags,
|
|
|
|
'filename': filename,
|
|
|
|
'tmpfilename': tmpfilename,
|
2013-12-23 10:39:49 -05:00
|
|
|
}
|
|
|
|
start = time.time()
|
|
|
|
|
2015-02-17 15:37:48 -05:00
|
|
|
def frag_progress_hook(s):
|
|
|
|
if s['status'] not in ('downloading', 'finished'):
|
|
|
|
return
|
|
|
|
|
|
|
|
frag_total_bytes = s.get('total_bytes', 0)
|
|
|
|
if s['status'] == 'finished':
|
2013-12-23 10:39:49 -05:00
|
|
|
state['downloaded_bytes'] += frag_total_bytes
|
2015-02-17 15:37:48 -05:00
|
|
|
state['frag_index'] += 1
|
|
|
|
|
|
|
|
estimated_size = (
|
2015-02-21 08:55:13 -05:00
|
|
|
(state['downloaded_bytes'] + frag_total_bytes) /
|
|
|
|
(state['frag_index'] + 1) * total_frags)
|
2015-02-17 15:37:48 -05:00
|
|
|
time_now = time.time()
|
|
|
|
state['total_bytes_estimate'] = estimated_size
|
|
|
|
state['elapsed'] = time_now - start
|
|
|
|
|
|
|
|
if s['status'] == 'finished':
|
|
|
|
progress = self.calc_percent(state['frag_index'], total_frags)
|
2013-12-23 10:39:49 -05:00
|
|
|
else:
|
2015-02-17 15:37:48 -05:00
|
|
|
frag_downloaded_bytes = s['downloaded_bytes']
|
2013-12-23 10:39:49 -05:00
|
|
|
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
2014-11-23 15:39:15 -05:00
|
|
|
frag_total_bytes)
|
2015-02-17 15:37:48 -05:00
|
|
|
progress = self.calc_percent(state['frag_index'], total_frags)
|
2013-12-23 10:39:49 -05:00
|
|
|
progress += frag_progress / float(total_frags)
|
|
|
|
|
2015-02-17 15:37:48 -05:00
|
|
|
state['eta'] = self.calc_eta(
|
|
|
|
start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
|
|
|
|
state['speed'] = s.get('speed')
|
|
|
|
self._hook_progress(state)
|
|
|
|
|
2013-12-23 10:39:49 -05:00
|
|
|
http_dl.add_progress_hook(frag_progress_hook)
|
|
|
|
|
|
|
|
frags_filenames = []
|
|
|
|
for (seg_i, frag_i) in fragments_list:
|
|
|
|
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
|
|
|
url = base_url + name
|
2014-09-21 09:43:09 -04:00
|
|
|
if akamai_pv:
|
|
|
|
url += '?' + akamai_pv.strip(';')
|
2013-12-23 10:39:49 -05:00
|
|
|
frag_filename = '%s-%s' % (tmpfilename, name)
|
|
|
|
success = http_dl.download(frag_filename, {'url': url})
|
|
|
|
if not success:
|
|
|
|
return False
|
|
|
|
with open(frag_filename, 'rb') as down:
|
|
|
|
down_data = down.read()
|
|
|
|
reader = FlvReader(down_data)
|
|
|
|
while True:
|
|
|
|
_, box_type, box_data = reader.read_box_info()
|
|
|
|
if box_type == b'mdat':
|
|
|
|
dest_stream.write(box_data)
|
|
|
|
break
|
|
|
|
frags_filenames.append(frag_filename)
|
|
|
|
|
2014-04-03 07:35:07 -04:00
|
|
|
dest_stream.close()
|
2013-12-23 10:39:49 -05:00
|
|
|
|
2015-02-17 15:37:48 -05:00
|
|
|
elapsed = time.time() - start
|
2013-12-23 10:39:49 -05:00
|
|
|
self.try_rename(tmpfilename, filename)
|
|
|
|
for frag_file in frags_filenames:
|
|
|
|
os.remove(frag_file)
|
|
|
|
|
|
|
|
fsize = os.path.getsize(encodeFilename(filename))
|
|
|
|
self._hook_progress({
|
|
|
|
'downloaded_bytes': fsize,
|
|
|
|
'total_bytes': fsize,
|
|
|
|
'filename': filename,
|
|
|
|
'status': 'finished',
|
2015-02-17 15:37:48 -05:00
|
|
|
'elapsed': elapsed,
|
2013-12-23 10:39:49 -05:00
|
|
|
})
|
|
|
|
|
|
|
|
return True
|