2014-01-07 04:04:48 -05:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2016-11-18 16:18:21 -05:00
|
|
|
import random
|
2013-06-23 16:24:58 -04:00
|
|
|
import re
|
2016-11-18 16:18:21 -05:00
|
|
|
import time
|
2013-06-23 16:24:58 -04:00
|
|
|
|
|
|
|
from .common import InfoExtractor
|
2014-12-13 06:24:42 -05:00
|
|
|
from ..compat import (
|
2013-11-22 11:44:55 -05:00
|
|
|
compat_str,
|
2013-11-22 10:05:14 -05:00
|
|
|
compat_urlparse,
|
2014-12-13 06:24:42 -05:00
|
|
|
)
|
|
|
|
from ..utils import (
|
2013-06-23 16:24:58 -04:00
|
|
|
ExtractorError,
|
2015-10-16 14:51:35 -04:00
|
|
|
float_or_none,
|
|
|
|
int_or_none,
|
2017-06-04 12:21:30 -04:00
|
|
|
KNOWN_EXTENSIONS,
|
2016-11-18 16:18:21 -05:00
|
|
|
parse_filesize,
|
2018-08-30 16:35:55 -04:00
|
|
|
str_or_none,
|
|
|
|
try_get,
|
2016-11-18 16:18:21 -05:00
|
|
|
unescapeHTML,
|
|
|
|
update_url_query,
|
2017-06-04 09:47:05 -04:00
|
|
|
unified_strdate,
|
2018-08-30 16:35:55 -04:00
|
|
|
unified_timestamp,
|
2018-07-21 08:08:28 -04:00
|
|
|
url_or_none,
|
2013-06-23 16:24:58 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2020-10-27 22:21:34 -04:00
|
|
|
class BandcampBaseIE(InfoExtractor):
|
|
|
|
"""Provide base functions for Bandcamp extractors"""
|
|
|
|
|
|
|
|
def _extract_json_from_html_data_attribute(self, webpage, suffix, video_id):
|
|
|
|
json_string = self._html_search_regex(
|
|
|
|
r' data-%s="([^"]*)' % suffix,
|
|
|
|
webpage, '%s json' % suffix, default='{}')
|
|
|
|
|
|
|
|
return self._parse_json(json_string, video_id)
|
|
|
|
|
|
|
|
def _parse_json_track(self, json):
|
|
|
|
formats = []
|
|
|
|
file_ = json.get('file')
|
|
|
|
if isinstance(file_, dict):
|
|
|
|
for format_id, format_url in file_.items():
|
|
|
|
if not url_or_none(format_url):
|
|
|
|
continue
|
|
|
|
ext, abr_str = format_id.split('-', 1)
|
|
|
|
formats.append({
|
|
|
|
'format_id': format_id,
|
|
|
|
'url': self._proto_relative_url(format_url, 'http:'),
|
|
|
|
'ext': ext,
|
|
|
|
'vcodec': 'none',
|
|
|
|
'acodec': ext,
|
|
|
|
'abr': int_or_none(abr_str),
|
|
|
|
})
|
|
|
|
|
|
|
|
return {
|
|
|
|
'duration': float_or_none(json.get('duration')),
|
|
|
|
'id': str_or_none(json.get('track_id') or json.get('id')),
|
|
|
|
'title': json.get('title'),
|
|
|
|
'title_link': json.get('title_link'),
|
|
|
|
'number': int_or_none(json.get('track_num')),
|
|
|
|
'formats': formats
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class BandcampIE(BandcampBaseIE):
|
|
|
|
IE_NAME = "Bandcamp:track"
|
2018-08-30 16:35:55 -04:00
|
|
|
_VALID_URL = r'https?://[^/]+\.bandcamp\.com/track/(?P<title>[^/?#&]+)'
|
2013-11-22 11:44:55 -05:00
|
|
|
_TESTS = [{
|
2020-10-28 07:18:04 -04:00
|
|
|
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
2014-01-07 04:04:48 -05:00
|
|
|
'md5': 'c557841d5e50261777a6585648adf439',
|
|
|
|
'info_dict': {
|
2014-10-02 09:22:46 -04:00
|
|
|
'id': '1812978515',
|
|
|
|
'ext': 'mp3',
|
2020-09-28 23:54:36 -04:00
|
|
|
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
|
2014-10-02 09:22:46 -04:00
|
|
|
'duration': 9.8485,
|
2020-09-28 23:54:36 -04:00
|
|
|
'uploader': "youtube-dl \"'/\\\u00e4\u21ad",
|
|
|
|
'timestamp': 1354224127,
|
|
|
|
'upload_date': '20121129',
|
2013-06-27 14:46:46 -04:00
|
|
|
},
|
2014-01-07 04:04:48 -05:00
|
|
|
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
2014-10-02 09:22:46 -04:00
|
|
|
}, {
|
2018-08-30 16:35:55 -04:00
|
|
|
# free download
|
2014-10-02 09:22:46 -04:00
|
|
|
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
|
2020-09-28 23:54:36 -04:00
|
|
|
'md5': '5d92af55811e47f38962a54c30b07ef0',
|
2014-10-02 09:22:46 -04:00
|
|
|
'info_dict': {
|
|
|
|
'id': '2650410135',
|
2017-04-27 16:13:12 -04:00
|
|
|
'ext': 'aiff',
|
|
|
|
'title': 'Ben Prunty - Lanius (Battle)',
|
2018-08-30 16:35:55 -04:00
|
|
|
'thumbnail': r're:^https?://.*\.jpg$',
|
2017-04-27 16:13:12 -04:00
|
|
|
'uploader': 'Ben Prunty',
|
2018-08-30 16:35:55 -04:00
|
|
|
'timestamp': 1396508491,
|
|
|
|
'upload_date': '20140403',
|
|
|
|
'release_date': '20140403',
|
|
|
|
'duration': 260.877,
|
|
|
|
'track': 'Lanius (Battle)',
|
|
|
|
'track_number': 1,
|
|
|
|
'track_id': '2650410135',
|
|
|
|
'artist': 'Ben Prunty',
|
|
|
|
'album': 'FTL: Advanced Edition Soundtrack',
|
2014-10-02 09:22:46 -04:00
|
|
|
},
|
2018-08-30 15:32:35 -04:00
|
|
|
}, {
|
2018-08-30 16:35:55 -04:00
|
|
|
# no free download, mp3 128
|
2018-08-30 15:32:35 -04:00
|
|
|
'url': 'https://relapsealumni.bandcamp.com/track/hail-to-fire',
|
2018-08-30 16:35:55 -04:00
|
|
|
'md5': 'fec12ff55e804bb7f7ebeb77a800c8b7',
|
2018-08-30 15:32:35 -04:00
|
|
|
'info_dict': {
|
|
|
|
'id': '2584466013',
|
|
|
|
'ext': 'mp3',
|
2018-08-30 16:35:55 -04:00
|
|
|
'title': 'Mastodon - Hail to Fire',
|
|
|
|
'thumbnail': r're:^https?://.*\.jpg$',
|
|
|
|
'uploader': 'Mastodon',
|
|
|
|
'timestamp': 1322005399,
|
|
|
|
'upload_date': '20111122',
|
|
|
|
'release_date': '20040207',
|
|
|
|
'duration': 120.79,
|
|
|
|
'track': 'Hail to Fire',
|
2018-08-30 15:32:35 -04:00
|
|
|
'track_number': 5,
|
2018-08-30 16:35:55 -04:00
|
|
|
'track_id': '2584466013',
|
|
|
|
'artist': 'Mastodon',
|
|
|
|
'album': 'Call of the Mastodon',
|
2018-08-30 15:32:35 -04:00
|
|
|
},
|
2013-11-22 11:44:55 -05:00
|
|
|
}]
|
2013-06-23 16:24:58 -04:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
title = mobj.group('title')
|
2020-10-27 22:21:34 -04:00
|
|
|
url_track_title = title
|
2013-06-23 16:24:58 -04:00
|
|
|
webpage = self._download_webpage(url, title)
|
2017-05-05 16:35:42 -04:00
|
|
|
thumbnail = self._html_search_meta('og:image', webpage, default=None)
|
2018-08-30 16:35:55 -04:00
|
|
|
|
2020-10-27 22:21:34 -04:00
|
|
|
json_tralbum = self._extract_json_from_html_data_attribute(webpage, "tralbum", url_track_title)
|
|
|
|
json_embed = self._extract_json_from_html_data_attribute(webpage, "embed", url_track_title)
|
2018-08-30 16:35:55 -04:00
|
|
|
|
2020-10-27 22:21:34 -04:00
|
|
|
json_tracks = json_tralbum.get('trackinfo')
|
|
|
|
if not json_tracks:
|
|
|
|
raise ExtractorError('Could not extract track')
|
|
|
|
|
|
|
|
track = self._parse_json_track(json_tracks[0])
|
|
|
|
artist = json_tralbum.get('artist')
|
|
|
|
album_title = json_embed.get('album_title')
|
|
|
|
|
|
|
|
json_album = json_tralbum.get('packages')
|
|
|
|
if json_album:
|
|
|
|
json_album = json_album[0]
|
|
|
|
album_publish_date = json_album.get('album_publish_date')
|
|
|
|
album_release_date = json_album.get('album_release_date')
|
|
|
|
else:
|
|
|
|
album_publish_date = None
|
|
|
|
album_release_date = json_tralbum.get('album_release_date')
|
|
|
|
|
|
|
|
timestamp = unified_timestamp(json_tralbum.get('current', {}).get('publish_date') or album_publish_date)
|
|
|
|
release_date = unified_strdate(album_release_date)
|
2018-08-30 16:35:55 -04:00
|
|
|
|
|
|
|
download_link = self._search_regex(
|
2020-09-28 23:54:36 -04:00
|
|
|
r'freeDownloadPage(?:["\']|"):\s*(["\']|")(?P<url>(?:(?!\1).)+)\1', webpage,
|
2018-08-30 16:35:55 -04:00
|
|
|
'download link', default=None, group='url')
|
|
|
|
if download_link:
|
|
|
|
track_id = self._search_regex(
|
2020-09-28 23:54:36 -04:00
|
|
|
r'\?id=(?P<id>\d+)&',
|
|
|
|
download_link, 'track id')
|
2018-08-30 16:35:55 -04:00
|
|
|
|
|
|
|
download_webpage = self._download_webpage(
|
|
|
|
download_link, track_id, 'Downloading free downloads page')
|
|
|
|
|
|
|
|
blob = self._parse_json(
|
|
|
|
self._search_regex(
|
|
|
|
r'data-blob=(["\'])(?P<blob>{.+?})\1', download_webpage,
|
|
|
|
'blob', group='blob'),
|
|
|
|
track_id, transform_source=unescapeHTML)
|
|
|
|
|
|
|
|
info = try_get(
|
|
|
|
blob, (lambda x: x['digital_items'][0],
|
|
|
|
lambda x: x['download_items'][0]), dict)
|
|
|
|
if info:
|
|
|
|
downloads = info.get('downloads')
|
|
|
|
if isinstance(downloads, dict):
|
|
|
|
if not artist:
|
|
|
|
artist = info.get('artist')
|
|
|
|
if not thumbnail:
|
|
|
|
thumbnail = info.get('thumb_url')
|
|
|
|
|
|
|
|
download_formats = {}
|
|
|
|
download_formats_list = blob.get('download_formats')
|
|
|
|
if isinstance(download_formats_list, list):
|
|
|
|
for f in blob['download_formats']:
|
|
|
|
name, ext = f.get('name'), f.get('file_extension')
|
|
|
|
if all(isinstance(x, compat_str) for x in (name, ext)):
|
|
|
|
download_formats[name] = ext.strip('.')
|
|
|
|
|
|
|
|
for format_id, f in downloads.items():
|
|
|
|
format_url = f.get('url')
|
|
|
|
if not format_url:
|
|
|
|
continue
|
|
|
|
# Stat URL generation algorithm is reverse engineered from
|
|
|
|
# download_*_bundle_*.js
|
|
|
|
stat_url = update_url_query(
|
|
|
|
format_url.replace('/download/', '/statdownload/'), {
|
|
|
|
'.rand': int(time.time() * 1000 * random.random()),
|
|
|
|
})
|
|
|
|
format_id = f.get('encoding_name') or format_id
|
|
|
|
stat = self._download_json(
|
|
|
|
stat_url, track_id, 'Downloading %s JSON' % format_id,
|
|
|
|
transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1],
|
|
|
|
fatal=False)
|
|
|
|
if not stat:
|
|
|
|
continue
|
|
|
|
retry_url = url_or_none(stat.get('retry_url'))
|
|
|
|
if not retry_url:
|
|
|
|
continue
|
2020-10-27 22:21:34 -04:00
|
|
|
track['formats'].append({
|
2018-08-30 16:35:55 -04:00
|
|
|
'url': self._proto_relative_url(retry_url, 'http:'),
|
|
|
|
'ext': download_formats.get(format_id),
|
|
|
|
'format_id': format_id,
|
|
|
|
'format_note': f.get('description'),
|
|
|
|
'filesize': parse_filesize(f.get('size_mb')),
|
|
|
|
'vcodec': 'none',
|
|
|
|
})
|
2013-12-26 08:08:57 -05:00
|
|
|
|
2020-10-27 22:21:34 -04:00
|
|
|
self._sort_formats(track['formats'])
|
2016-11-18 16:18:21 -05:00
|
|
|
|
2020-10-27 22:21:34 -04:00
|
|
|
title = '%s - %s' % (artist, track.get('title')) if artist else track.get('title')
|
2013-06-23 16:24:58 -04:00
|
|
|
|
2013-12-26 08:08:57 -05:00
|
|
|
return {
|
2020-10-27 22:21:34 -04:00
|
|
|
'album': album_title,
|
|
|
|
'artist': artist,
|
|
|
|
'duration': track['duration'],
|
|
|
|
'formats': track['formats'],
|
|
|
|
'id': track['id'],
|
|
|
|
'release_date': release_date,
|
2018-08-30 16:35:55 -04:00
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'timestamp': timestamp,
|
2020-10-27 22:21:34 -04:00
|
|
|
'title': title,
|
|
|
|
'track': track['title'],
|
|
|
|
'track_id': track['id'],
|
|
|
|
'track_number': track['number'],
|
|
|
|
'uploader': artist
|
2013-12-26 08:08:57 -05:00
|
|
|
}
|
2013-11-22 10:05:14 -05:00
|
|
|
|
|
|
|
|
2020-10-27 22:21:34 -04:00
|
|
|
class BandcampAlbumIE(BandcampBaseIE):
|
2014-01-07 04:04:48 -05:00
|
|
|
IE_NAME = 'Bandcamp:album'
|
2017-06-04 12:21:30 -04:00
|
|
|
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^/?#&]+))?'
|
2013-11-22 10:05:14 -05:00
|
|
|
|
2014-08-27 18:58:24 -04:00
|
|
|
_TESTS = [{
|
2014-01-07 04:04:48 -05:00
|
|
|
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
|
|
|
'playlist': [
|
2013-11-22 15:19:31 -05:00
|
|
|
{
|
2014-01-07 04:04:48 -05:00
|
|
|
'md5': '39bc1eded3476e927c724321ddf116cf',
|
|
|
|
'info_dict': {
|
2014-11-12 09:00:54 -05:00
|
|
|
'id': '1353101989',
|
|
|
|
'ext': 'mp3',
|
2014-01-07 04:04:48 -05:00
|
|
|
'title': 'Intro',
|
2013-11-22 15:19:31 -05:00
|
|
|
}
|
|
|
|
},
|
|
|
|
{
|
2014-01-07 04:04:48 -05:00
|
|
|
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
|
|
|
|
'info_dict': {
|
2014-11-12 09:00:54 -05:00
|
|
|
'id': '38097443',
|
|
|
|
'ext': 'mp3',
|
2014-01-07 04:04:48 -05:00
|
|
|
'title': 'Kero One - Keep It Alive (Blazo remix)',
|
2013-11-22 15:19:31 -05:00
|
|
|
}
|
|
|
|
},
|
|
|
|
],
|
2014-11-12 09:00:54 -05:00
|
|
|
'info_dict': {
|
|
|
|
'title': 'Jazz Format Mixtape vol.1',
|
2015-02-17 18:48:52 -05:00
|
|
|
'id': 'jazz-format-mixtape-vol-1',
|
|
|
|
'uploader_id': 'blazo',
|
2014-11-12 09:00:54 -05:00
|
|
|
},
|
2014-01-07 04:04:48 -05:00
|
|
|
'params': {
|
|
|
|
'playlistend': 2
|
2013-11-22 15:19:31 -05:00
|
|
|
},
|
2015-02-17 18:48:52 -05:00
|
|
|
'skip': 'Bandcamp imposes download limits.'
|
2014-08-27 18:58:24 -04:00
|
|
|
}, {
|
|
|
|
'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
|
|
|
|
'info_dict': {
|
|
|
|
'title': 'Hierophany of the Open Grave',
|
2015-02-17 18:48:52 -05:00
|
|
|
'uploader_id': 'nightbringer',
|
|
|
|
'id': 'hierophany-of-the-open-grave',
|
2014-08-27 18:58:24 -04:00
|
|
|
},
|
|
|
|
'playlist_mincount': 9,
|
2014-12-13 15:00:54 -05:00
|
|
|
}, {
|
|
|
|
'url': 'http://dotscale.bandcamp.com',
|
|
|
|
'info_dict': {
|
|
|
|
'title': 'Loom',
|
2015-02-17 18:48:52 -05:00
|
|
|
'id': 'dotscale',
|
|
|
|
'uploader_id': 'dotscale',
|
2014-12-13 15:00:54 -05:00
|
|
|
},
|
|
|
|
'playlist_mincount': 7,
|
2016-08-30 13:29:49 -04:00
|
|
|
}, {
|
|
|
|
# with escaped quote in title
|
|
|
|
'url': 'https://jstrecords.bandcamp.com/album/entropy-ep',
|
|
|
|
'info_dict': {
|
|
|
|
'title': '"Entropy" EP',
|
|
|
|
'uploader_id': 'jstrecords',
|
|
|
|
'id': 'entropy-ep',
|
|
|
|
},
|
|
|
|
'playlist_mincount': 3,
|
2017-02-05 09:47:04 -05:00
|
|
|
}, {
|
|
|
|
# not all tracks have songs
|
|
|
|
'url': 'https://insulters.bandcamp.com/album/we-are-the-plague',
|
|
|
|
'info_dict': {
|
|
|
|
'id': 'we-are-the-plague',
|
|
|
|
'title': 'WE ARE THE PLAGUE',
|
|
|
|
'uploader_id': 'insulters',
|
|
|
|
},
|
|
|
|
'playlist_count': 2,
|
2014-08-27 18:58:24 -04:00
|
|
|
}]
|
2013-11-22 15:19:31 -05:00
|
|
|
|
2017-06-04 09:47:05 -04:00
|
|
|
@classmethod
|
|
|
|
def suitable(cls, url):
|
2017-06-04 12:21:30 -04:00
|
|
|
return (False
|
|
|
|
if BandcampWeeklyIE.suitable(url) or BandcampIE.suitable(url)
|
|
|
|
else super(BandcampAlbumIE, cls).suitable(url))
|
2017-06-04 09:47:05 -04:00
|
|
|
|
2013-11-22 10:05:14 -05:00
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
2015-02-17 18:48:52 -05:00
|
|
|
uploader_id = mobj.group('subdomain')
|
|
|
|
album_id = mobj.group('album_id')
|
|
|
|
playlist_id = album_id or uploader_id
|
|
|
|
webpage = self._download_webpage(url, playlist_id)
|
2020-10-27 22:21:34 -04:00
|
|
|
|
|
|
|
json_tralbum = self._extract_json_from_html_data_attribute(webpage, "tralbum", playlist_id)
|
|
|
|
json_embed = self._extract_json_from_html_data_attribute(webpage, "embed", playlist_id)
|
|
|
|
|
|
|
|
json_tracks = json_tralbum.get('trackinfo')
|
|
|
|
if not json_tracks:
|
|
|
|
raise ExtractorError('Could not extract album tracks')
|
|
|
|
|
|
|
|
album_title = json_embed.get('album_title')
|
|
|
|
|
2017-02-05 09:47:04 -05:00
|
|
|
# Only tracks with duration info have songs
|
2020-10-27 22:21:34 -04:00
|
|
|
tracks = [self._parse_json_track(track) for track in json_tracks]
|
2013-11-22 10:05:14 -05:00
|
|
|
entries = [
|
2017-08-20 12:32:33 -04:00
|
|
|
self.url_result(
|
2020-10-27 22:21:34 -04:00
|
|
|
compat_urlparse.urljoin(url, track['title_link']),
|
2020-10-28 20:56:55 -04:00
|
|
|
ie=BandcampIE.ie_key(), video_id=track['id'],
|
2020-10-27 22:21:34 -04:00
|
|
|
video_title=track['title'])
|
|
|
|
for track in tracks
|
|
|
|
if track.get('duration')]
|
2020-09-28 23:54:36 -04:00
|
|
|
|
2013-11-22 10:05:14 -05:00
|
|
|
return {
|
|
|
|
'_type': 'playlist',
|
2015-02-17 18:48:52 -05:00
|
|
|
'uploader_id': uploader_id,
|
2014-05-04 20:44:44 -04:00
|
|
|
'id': playlist_id,
|
2020-10-27 22:21:34 -04:00
|
|
|
'title': album_title,
|
|
|
|
'entries': entries
|
2013-11-22 10:05:14 -05:00
|
|
|
}
|
2017-06-04 09:47:05 -04:00
|
|
|
|
|
|
|
|
|
|
|
class BandcampWeeklyIE(InfoExtractor):
|
2017-06-04 12:21:30 -04:00
|
|
|
IE_NAME = 'Bandcamp:weekly'
|
|
|
|
_VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
|
2017-06-04 09:47:05 -04:00
|
|
|
_TESTS = [{
|
|
|
|
'url': 'https://bandcamp.com/?show=224',
|
|
|
|
'md5': 'b00df799c733cf7e0c567ed187dea0fd',
|
|
|
|
'info_dict': {
|
|
|
|
'id': '224',
|
|
|
|
'ext': 'opus',
|
2017-06-04 12:21:30 -04:00
|
|
|
'title': 'BC Weekly April 4th 2017 - Magic Moments',
|
|
|
|
'description': 'md5:5d48150916e8e02d030623a48512c874',
|
|
|
|
'duration': 5829.77,
|
|
|
|
'release_date': '20170404',
|
|
|
|
'series': 'Bandcamp Weekly',
|
|
|
|
'episode': 'Magic Moments',
|
|
|
|
'episode_number': 208,
|
|
|
|
'episode_id': '224',
|
2017-06-04 09:47:05 -04:00
|
|
|
}
|
|
|
|
}, {
|
|
|
|
'url': 'https://bandcamp.com/?blah/blah@&show=228',
|
|
|
|
'only_matching': True
|
|
|
|
}]
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
video_id = self._match_id(url)
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
|
|
|
|
blob = self._parse_json(
|
|
|
|
self._search_regex(
|
|
|
|
r'data-blob=(["\'])(?P<blob>{.+?})\1', webpage,
|
|
|
|
'blob', group='blob'),
|
|
|
|
video_id, transform_source=unescapeHTML)
|
|
|
|
|
|
|
|
show = blob['bcw_show']
|
|
|
|
|
|
|
|
# This is desired because any invalid show id redirects to `bandcamp.com`
|
|
|
|
# which happens to expose the latest Bandcamp Weekly episode.
|
2017-06-04 12:21:30 -04:00
|
|
|
show_id = int_or_none(show.get('show_id')) or int_or_none(video_id)
|
2017-06-04 09:47:05 -04:00
|
|
|
|
2017-06-04 12:21:30 -04:00
|
|
|
formats = []
|
|
|
|
for format_id, format_url in show['audio_stream'].items():
|
2018-07-21 08:08:28 -04:00
|
|
|
if not url_or_none(format_url):
|
2017-06-04 12:21:30 -04:00
|
|
|
continue
|
|
|
|
for known_ext in KNOWN_EXTENSIONS:
|
|
|
|
if known_ext in format_id:
|
|
|
|
ext = known_ext
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
ext = None
|
|
|
|
formats.append({
|
|
|
|
'format_id': format_id,
|
|
|
|
'url': format_url,
|
|
|
|
'ext': ext,
|
|
|
|
'vcodec': 'none',
|
|
|
|
})
|
|
|
|
self._sort_formats(formats)
|
2017-06-04 09:47:05 -04:00
|
|
|
|
2017-06-04 12:21:30 -04:00
|
|
|
title = show.get('audio_title') or 'Bandcamp Weekly'
|
|
|
|
subtitle = show.get('subtitle')
|
|
|
|
if subtitle:
|
|
|
|
title += ' - %s' % subtitle
|
2017-06-04 09:47:05 -04:00
|
|
|
|
2017-06-04 12:21:30 -04:00
|
|
|
episode_number = None
|
|
|
|
seq = blob.get('bcw_seq')
|
2017-06-04 09:47:05 -04:00
|
|
|
|
2017-06-04 12:21:30 -04:00
|
|
|
if seq and isinstance(seq, list):
|
|
|
|
try:
|
|
|
|
episode_number = next(
|
|
|
|
int_or_none(e.get('episode_number'))
|
|
|
|
for e in seq
|
|
|
|
if isinstance(e, dict) and int_or_none(e.get('id')) == show_id)
|
|
|
|
except StopIteration:
|
|
|
|
pass
|
2017-06-04 09:47:05 -04:00
|
|
|
|
|
|
|
return {
|
|
|
|
'id': video_id,
|
2017-06-04 12:21:30 -04:00
|
|
|
'title': title,
|
|
|
|
'description': show.get('desc') or show.get('short_desc'),
|
2017-06-04 09:47:05 -04:00
|
|
|
'duration': float_or_none(show.get('audio_duration')),
|
|
|
|
'is_live': False,
|
|
|
|
'release_date': unified_strdate(show.get('published_date')),
|
|
|
|
'series': 'Bandcamp Weekly',
|
2017-06-04 12:21:30 -04:00
|
|
|
'episode': show.get('subtitle'),
|
|
|
|
'episode_number': episode_number,
|
2017-06-04 09:47:05 -04:00
|
|
|
'episode_id': compat_str(video_id),
|
|
|
|
'formats': formats
|
|
|
|
}
|