diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index 07f514a462..c04791997f 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -1,11 +1,10 @@ # encoding: utf-8 from __future__ import unicode_literals -import json import re from .common import InfoExtractor -from ..utils import ExtractorError +from .common import compat_str class SohuIE(InfoExtractor): @@ -29,60 +28,73 @@ class SohuIE(InfoExtractor): base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid=' else: base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' - data_url = base_data_url + str(vid_id) - data_json = self._download_webpage( - data_url, video_id, - note='Downloading JSON data for ' + str(vid_id)) - return json.loads(data_json) + + return self._download_json( + base_data_url + vid_id, video_id, + 'Downloading JSON data for %s' % vid_id) mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') mytv = mobj.group('mytv') is not None webpage = self._download_webpage(url, video_id) - raw_title = self._html_search_regex(r'(?s)(.+?)', - webpage, 'video title') + raw_title = self._html_search_regex( + r'(?s)(.+?)', + webpage, 'video title') title = raw_title.partition('-')[0].strip() - vid = self._html_search_regex(r'var vid ?= ?["\'](\d+)["\']', webpage, - 'video path') - data = _fetch_data(vid, mytv) + vid = self._html_search_regex( + r'var vid ?= ?["\'](\d+)["\']', + webpage, 'video path') + vid_data = _fetch_data(vid, mytv) - QUALITIES = ('ori', 'super', 'high', 'nor') - vid_ids = [data['data'][q + 'Vid'] - for q in QUALITIES - if data['data'][q + 'Vid'] != 0] - if not vid_ids: - raise ExtractorError('No formats available for this video') + formats_json = {} + for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'): + vid_id = vid_data['data'].get('%sVid' % format_id) + if not vid_id: + continue + vid_id = compat_str(vid_id) + formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv) - # For now, we just pick the highest available quality - vid_id = vid_ids[-1] - - format_data = data if vid == vid_id else _fetch_data(vid_id, mytv) - part_count = format_data['data']['totalBlocks'] - allot = format_data['allot'] - prot = format_data['prot'] - clipsURL = format_data['data']['clipsURL'] - su = format_data['data']['su'] + part_count = vid_data['data']['totalBlocks'] playlist = [] for i in range(part_count): - part_url = ('http://%s/?prot=%s&file=%s&new=%s' % - (allot, prot, clipsURL[i], su[i])) - part_str = self._download_webpage( - part_url, video_id, - note='Downloading part %d of %d' % (i + 1, part_count)) + formats = [] + for format_id, format_data in formats_json.items(): + allot = format_data['allot'] + prot = format_data['prot'] - part_info = part_str.split('|') - video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3]) + data = format_data['data'] + clips_url = data['clipsURL'] + su = data['su'] - video_info = { - 'id': '%s_part%02d' % (video_id, i + 1), + part_str = self._download_webpage( + 'http://%s/?prot=%s&file=%s&new=%s' % + (allot, prot, clips_url[i], su[i]), + video_id, + 'Downloading %s video URL part %d of %d' + % (format_id, i + 1, part_count)) + + part_info = part_str.split('|') + video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3]) + + formats.append({ + 'url': video_url, + 'format_id': format_id, + 'filesize': data['clipsBytes'][i], + 'width': data['width'], + 'height': data['height'], + 'fps': data['fps'], + }) + self._sort_formats(formats) + + playlist.append({ + 'id': '%s_part%d' % (video_id, i + 1), 'title': title, - 'url': video_url, - 'ext': 'mp4', - } - playlist.append(video_info) + 'duration': vid_data['data']['clipsDuration'][i], + 'formats': formats, + }) if len(playlist) == 1: info = playlist[0]