Compare commits

..

No commits in common. "c282e5f8d723763ba88c521221e4535f46453949" and "1b1442887e67b63545453e10816904e2b4c561c1" have entirely different histories.

3 changed files with 79 additions and 111 deletions

View File

@ -128,12 +128,6 @@ def expect_value(self, got, expected, field):
self.assertTrue( self.assertTrue(
contains_str in got, contains_str in got,
'field %s (value: %r) should contain %r' % (field, got, contains_str)) 'field %s (value: %r) should contain %r' % (field, got, contains_str))
elif isinstance(expected, compat_str) and re.match(r'^lambda \w+:', expected):
fn = eval(expected)
suite = expected.split(':', 1)[1].strip()
self.assertTrue(
fn(got),
'Expected field %s to meet condition %s, but value %r failed ' % (field, suite, got))
elif isinstance(expected, type): elif isinstance(expected, type):
self.assertTrue( self.assertTrue(
isinstance(got, expected), isinstance(got, expected),
@ -143,7 +137,7 @@ def expect_value(self, got, expected, field):
elif isinstance(expected, list) and isinstance(got, list): elif isinstance(expected, list) and isinstance(got, list):
self.assertEqual( self.assertEqual(
len(expected), len(got), len(expected), len(got),
'Expected a list of length %d, but got a list of length %d for field %s' % ( 'Expect a list of length %d, but got a list of length %d for field %s' % (
len(expected), len(got), field)) len(expected), len(got), field))
for index, (item_got, item_expected) in enumerate(zip(got, expected)): for index, (item_got, item_expected) in enumerate(zip(got, expected)):
type_got = type(item_got) type_got = type(item_got)

View File

@ -1,4 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime import datetime
@ -72,7 +71,7 @@ class MotherlessIE(InfoExtractor):
'title': 'a/ Hot Teens', 'title': 'a/ Hot Teens',
'categories': list, 'categories': list,
'upload_date': '20210104', 'upload_date': '20210104',
'uploader_id': 'anonymous', 'uploader_id': 'yonbiw',
'thumbnail': r're:https?://.*\.jpg', 'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18, 'age_limit': 18,
}, },
@ -128,7 +127,7 @@ class MotherlessIE(InfoExtractor):
comment_count = webpage.count('class="media-comment-contents"') comment_count = webpage.count('class="media-comment-contents"')
uploader_id = self._html_search_regex( uploader_id = self._html_search_regex(
r'''(?s)['"](?:media-meta-member|thumb-member-username)\b[^>]+>\s*<a\b[^>]+\bhref\s*=\s*['"]/m/([^"']+)''', r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
webpage, 'uploader_id') webpage, 'uploader_id')
categories = self._html_search_meta('keywords', webpage, default=None) categories = self._html_search_meta('keywords', webpage, default=None)
@ -170,7 +169,7 @@ class MotherlessGroupIE(InfoExtractor):
'description': 'Sex can be funny. Wide smiles,laugh, games, fun of ' 'description': 'Sex can be funny. Wide smiles,laugh, games, fun of '
'any kind!' 'any kind!'
}, },
'playlist_mincount': 0, 'playlist_mincount': 9,
}] }]
@classmethod @classmethod
@ -209,9 +208,9 @@ class MotherlessGroupIE(InfoExtractor):
r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False) r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False)
description = self._html_search_meta( description = self._html_search_meta(
'description', webpage, fatal=False) 'description', webpage, fatal=False)
page_count = str_to_int(self._search_regex( page_count = self._int(self._search_regex(
r'(\d+)\s*</(?:a|span)>\s*<(?:a|span)[^>]+(?:>\s*NEXT|\brel\s*=\s*["\']?next)\b', r'(\d+)</(?:a|span)><(?:a|span)[^>]+>\s*NEXT',
webpage, 'page_count', default='1')) webpage, 'page_count'), 'page_count')
PAGE_SIZE = 80 PAGE_SIZE = 80
def _get_page(idx): def _get_page(idx):

View File

@ -8,14 +8,13 @@ from ..compat import compat_str
from ..utils import ( from ..utils import (
determine_ext, determine_ext,
ExtractorError, ExtractorError,
extract_attributes,
float_or_none, float_or_none,
int_or_none, int_or_none,
merge_dicts, merge_dicts,
NO_DEFAULT, NO_DEFAULT,
orderedSet,
parse_codecs, parse_codecs,
qualities, qualities,
str_or_none,
try_get, try_get,
unified_timestamp, unified_timestamp,
update_url_query, update_url_query,
@ -58,39 +57,28 @@ class ZDFBaseIE(InfoExtractor):
format_urls.add(format_url) format_urls.add(format_url)
mime_type = meta.get('mimeType') mime_type = meta.get('mimeType')
ext = determine_ext(format_url) ext = determine_ext(format_url)
join_nonempty = lambda s, l: s.join(filter(None, l))
meta_map = lambda t: map(lambda x: str_or_none(meta.get(x)), t)
if mime_type == 'application/x-mpegURL' or ext == 'm3u8': if mime_type == 'application/x-mpegURL' or ext == 'm3u8':
new_formats = self._extract_m3u8_formats( formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id='hls', format_url, video_id, 'mp4', m3u8_id='hls',
entry_protocol='m3u8_native', fatal=False) entry_protocol='m3u8_native', fatal=False))
elif mime_type == 'application/f4m+xml' or ext == 'f4m': elif mime_type == 'application/f4m+xml' or ext == 'f4m':
new_formats = self._extract_f4m_formats( formats.extend(self._extract_f4m_formats(
update_url_query(format_url, {'hdcore': '3.7.0'}), video_id, f4m_id='hds', fatal=False) update_url_query(format_url, {'hdcore': '3.7.0'}), video_id, f4m_id='hds', fatal=False))
else: else:
f = parse_codecs(meta.get('mimeCodec')) f = parse_codecs(meta.get('mimeCodec'))
if not f:
data = meta.get('type', '').split('_')
if try_get(data, lambda x: x[2]) == ext:
f = dict(zip(('vcodec', 'acodec'), data[1]))
format_id = ['http'] format_id = ['http']
format_id.extend(join_nonempty('-', meta_map(('type', 'quality')))) for p in (meta.get('type'), meta.get('quality')):
if p and isinstance(p, compat_str):
format_id.append(p)
f.update({ f.update({
'url': format_url, 'url': format_url,
'format_id': '-'.join(format_id), 'format_id': '-'.join(format_id),
'tbr': int_or_none(self._search_regex(r'_(\d+)k_', format_url, 'tbr', default=None)) 'format_note': meta.get('quality'),
})
new_formats = [f]
formats.extend(merge_dicts(f, {
'format_note': join_nonempty(',', meta_map(('quality', 'class'))),
'language': meta.get('language'), 'language': meta.get('language'),
'language_preference': 10 if meta.get('class') == 'main' else -10 if meta.get('class') == 'ad' else -1,
'quality': qualities(self._QUALITIES)(meta.get('quality')), 'quality': qualities(self._QUALITIES)(meta.get('quality')),
}) for f in new_formats) 'preference': -10,
})
formats.append(f)
def _extract_ptmd(self, ptmd_url, video_id, api_token, referrer): def _extract_ptmd(self, ptmd_url, video_id, api_token, referrer):
ptmd = self._call_api( ptmd = self._call_api(
@ -119,7 +107,6 @@ class ZDFBaseIE(InfoExtractor):
'type': f.get('type'), 'type': f.get('type'),
'mimeType': f.get('mimeType'), 'mimeType': f.get('mimeType'),
'quality': quality.get('quality'), 'quality': quality.get('quality'),
'class': track.get('class'),
'language': track.get('language'), 'language': track.get('language'),
}) })
self._sort_formats(formats) self._sort_formats(formats)
@ -184,20 +171,6 @@ class ZDFIE(ZDFBaseIE):
'duration': 2615, 'duration': 2615,
'timestamp': 1465021200, 'timestamp': 1465021200,
'upload_date': '20160604', 'upload_date': '20160604',
'thumbnail': 'https://www.zdf.de/assets/mauve-im-labor-100~768x432?cb=1464909117806',
},
}, {
'url': 'https://www.zdf.de/funk/druck-11790/funk-alles-ist-verzaubert-102.html',
'md5': '1b93bdec7d02fc0b703c5e7687461628',
'info_dict': {
'ext': 'mp4',
'id': 'video_funk_1770473',
'duration': 1278,
'description': 'Die Neue an der Schule verdreht Ismail den Kopf.',
'title': 'Alles ist verzaubert',
'timestamp': 1635520560,
'upload_date': '20211029',
'thumbnail': 'https://www.zdf.de/assets/teaser-funk-alles-ist-verzaubert-100~1920x1080?cb=1636466431799',
}, },
}, { }, {
# Same as https://www.phoenix.de/sendungen/dokumentationen/gesten-der-maechtigen-i-a-89468.html?ref=suche # Same as https://www.phoenix.de/sendungen/dokumentationen/gesten-der-maechtigen-i-a-89468.html?ref=suche
@ -231,19 +204,6 @@ class ZDFIE(ZDFBaseIE):
'timestamp': 1641355200, 'timestamp': 1641355200,
'upload_date': '20220105', 'upload_date': '20220105',
}, },
'skip': 'No longer available "Diese Seite wurde leider nicht gefunden"'
}, {
'url': 'https://www.zdf.de/serien/soko-stuttgart/das-geld-anderer-leute-100.html',
'info_dict': {
'id': '191205_1800_sendung_sok8',
'ext': 'mp4',
'title': 'Das Geld anderer Leute',
'description': 'md5:cb6f660850dc5eb7d1ab776ea094959d',
'duration': 2581.0,
'timestamp': 1654790700,
'upload_date': '20220609',
'thumbnail': 'https://epg-image.zdf.de/fotobase-webdelivery/images/e2d7e55a-09f0-424e-ac73-6cac4dd65f35?layout=2400x1350',
},
}] }]
def _extract_entry(self, url, player, content, video_id): def _extract_entry(self, url, player, content, video_id):
@ -305,15 +265,14 @@ class ZDFIE(ZDFBaseIE):
'https://zdf-cdn.live.cellular.de/mediathekV2/document/%s' % video_id, 'https://zdf-cdn.live.cellular.de/mediathekV2/document/%s' % video_id,
video_id) video_id)
formats = [] document = video['document']
formitaeten = try_get(video, lambda x: x['document']['formitaeten'], list)
document = formitaeten and video['document']
if formitaeten:
title = document['titel'] title = document['titel']
content_id = document['basename'] content_id = document['basename']
formats = []
format_urls = set() format_urls = set()
for f in formitaeten or []: for f in document['formitaeten']:
self._extract_format(content_id, formats, format_urls, f) self._extract_format(content_id, formats, format_urls, f)
self._sort_formats(formats) self._sort_formats(formats)
@ -361,9 +320,9 @@ class ZDFChannelIE(ZDFBaseIE):
'url': 'https://www.zdf.de/sport/das-aktuelle-sportstudio', 'url': 'https://www.zdf.de/sport/das-aktuelle-sportstudio',
'info_dict': { 'info_dict': {
'id': 'das-aktuelle-sportstudio', 'id': 'das-aktuelle-sportstudio',
'title': 'das aktuelle sportstudio', 'title': 'das aktuelle sportstudio | ZDF',
}, },
'playlist_mincount': 18, 'playlist_mincount': 23,
}, { }, {
'url': 'https://www.zdf.de/dokumentation/planet-e', 'url': 'https://www.zdf.de/dokumentation/planet-e',
'info_dict': { 'info_dict': {
@ -371,14 +330,6 @@ class ZDFChannelIE(ZDFBaseIE):
'title': 'planet e.', 'title': 'planet e.',
}, },
'playlist_mincount': 50, 'playlist_mincount': 50,
}, {
'url': 'https://www.zdf.de/gesellschaft/aktenzeichen-xy-ungeloest',
'info_dict': {
'id': 'aktenzeichen-xy-ungeloest',
'title': 'Aktenzeichen XY... ungelöst',
'entries': "lambda x: not any('xy580-fall1-kindermoerder-gesucht-100' in e['url'] for e in x)",
},
'playlist_mincount': 2,
}, { }, {
'url': 'https://www.zdf.de/filme/taunuskrimi/', 'url': 'https://www.zdf.de/filme/taunuskrimi/',
'only_matching': True, 'only_matching': True,
@ -388,36 +339,60 @@ class ZDFChannelIE(ZDFBaseIE):
def suitable(cls, url): def suitable(cls, url):
return False if ZDFIE.suitable(url) else super(ZDFChannelIE, cls).suitable(url) return False if ZDFIE.suitable(url) else super(ZDFChannelIE, cls).suitable(url)
def _og_search_title(self, webpage, fatal=False):
title = super(ZDFChannelIE, self)._og_search_title(webpage, fatal=fatal)
return re.split(r'\s+[-|]\s+ZDF(?:mediathek)?$', title or '')[0] or None
def _real_extract(self, url): def _real_extract(self, url):
channel_id = self._match_id(url) channel_id = self._match_id(url)
webpage = self._download_webpage(url, channel_id) webpage = self._download_webpage(url, channel_id)
matches = re.finditer( entries = [
r'''<div\b[^>]*?\sdata-plusbar-id\s*=\s*(["'])(?P<p_id>[\w-]+)\1[^>]*?\sdata-plusbar-url=\1(?P<url>%s)\1''' % ZDFIE._VALID_URL, self.url_result(item_url, ie=ZDFIE.ie_key())
webpage) for item_url in orderedSet(re.findall(
r'data-plusbar-url=["\'](http.+?\.html)', webpage))]
if self._downloader.params.get('noplaylist', False): return self.playlist_result(
entry = next( entries, channel_id, self._og_search_title(webpage, fatal=False))
(self.url_result(m.group('url'), ie=ZDFIE.ie_key()) for m in matches),
None)
self.to_screen('Downloading just the main video because of --no-playlist')
if entry:
return entry
else:
self.to_screen('Downloading playlist %s - add --no-playlist to download just the main video' % (channel_id, ))
def check_video(m): r"""
v_ref = self._search_regex( player = self._extract_player(webpage, channel_id)
r'''(<a\b[^>]*?\shref\s*=[^>]+?\sdata-target-id\s*=\s*(["'])%s\2[^>]*>)''' % (m.group('p_id'), ),
webpage, 'check id', default='')
v_ref = extract_attributes(v_ref)
return v_ref.get('data-target-video-type') != 'novideo'
return self.playlist_from_matches( channel_id = self._search_regex(
(m.group('url') for m in matches if check_video(m)), r'docId\s*:\s*(["\'])(?P<id>(?!\1).+?)\1', webpage,
channel_id, self._og_search_title(webpage, fatal=False)) 'channel id', group='id')
channel = self._call_api(
'https://api.zdf.de/content/documents/%s.json' % channel_id,
player, url, channel_id)
items = []
for module in channel['module']:
for teaser in try_get(module, lambda x: x['teaser'], list) or []:
t = try_get(
teaser, lambda x: x['http://zdf.de/rels/target'], dict)
if not t:
continue
items.extend(try_get(
t,
lambda x: x['resultsWithVideo']['http://zdf.de/rels/search/results'],
list) or [])
items.extend(try_get(
module,
lambda x: x['filterRef']['resultsWithVideo']['http://zdf.de/rels/search/results'],
list) or [])
entries = []
entry_urls = set()
for item in items:
t = try_get(item, lambda x: x['http://zdf.de/rels/target'], dict)
if not t:
continue
sharing_url = t.get('http://zdf.de/rels/sharing-url')
if not sharing_url or not isinstance(sharing_url, compat_str):
continue
if sharing_url in entry_urls:
continue
entry_urls.add(sharing_url)
entries.append(self.url_result(
sharing_url, ie=ZDFIE.ie_key(), video_id=t.get('id')))
return self.playlist_result(entries, channel_id, channel.get('title'))
"""