[br] Simplify

This commit is contained in:
Philipp Hagemeister 2014-02-22 20:17:26 +01:00
parent 1052d2bfec
commit 06aabfc422
1 changed files with 60 additions and 63 deletions

View File

@ -1,82 +1,79 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor from .common import InfoExtractor
class BRIE(InfoExtractor): class BRIE(InfoExtractor):
IE_DESC = "Bayerischer Rundfunk Mediathek"
_VALID_URL = r"^https?://(?:www\.)?br\.de/mediathek/video/(?:sendungen/)?(?P<id>[a-z0-9\-]+)\.html$"
_BASE_URL = "http://www.br.de"
IE_DESC = u"Bayerischer Rundfunk Mediathek" _TEST = {
_VALID_URL = r"^https?://(?:www\.)?br\.de/mediathek/video/(?:sendungen/)?(?:[a-z0-9\-]+\.html)$" "url": "http://www.br.de/mediathek/video/anselm-gruen-114.html",
_BASE_URL = u"http://www.br.de" "md5": "c4f83cf0f023ba5875aba0bf46860df2",
"info_dict": {
_TESTS = [ "id": "2c8d81c5-6fb7-4a74-88d4-e768e5856532",
{ "ext": "mp4",
u"url": u"http://www.br.de/mediathek/video/anselm-gruen-114.html", "title": "Feiern und Verzichten",
u"file": u"2c8d81c5-6fb7-4a74-88d4-e768e5856532.mp4", "description": "Anselm Grün: Feiern und Verzichten",
u"md5": u"c4f83cf0f023ba5875aba0bf46860df2", "uploader": "BR/Birgit Baier",
u"info_dict": { "upload_date": "20140301"
u"title": u"Feiern und Verzichten",
u"description": u"Anselm Grün: Feiern und Verzichten",
u"uploader": u"BR/Birgit Baier",
u"upload_date": u"20140301"
}
} }
] }
def _real_extract(self, url): def _real_extract(self, url):
page = self._download_webpage(url, None) mobj = re.match(self._VALID_URL, url)
xml_url = self._search_regex(r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/mediathek/video/[a-z0-9/~_.-]+)'}\)\);", page, "XMLURL") display_id = mobj.group('id')
page = self._download_webpage(url, display_id)
xml_url = self._search_regex(
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/mediathek/video/[a-z0-9/~_.-]+)'}\)\);", page, "XMLURL")
xml = self._download_xml(self._BASE_URL + xml_url, None) xml = self._download_xml(self._BASE_URL + xml_url, None)
videos = [] videos = [{
for xml_video in xml.findall("video"): "id": xml_video.get("externalId"),
video = {} "title": xml_video.find("title").text,
video["id"] = xml_video.get("externalId") "formats": self._extract_formats(xml_video.find("assets")),
video["title"] = xml_video.find("title").text "thumbnails": self._extract_thumbnails(xml_video.find("teaserImage/variants")),
video["formats"] = self._extract_formats(xml_video.find("assets")) "description": " ".join(xml_video.find("shareTitle").text.splitlines()),
video["thumbnails"] = self._extract_thumbnails(xml_video.find("teaserImage/variants")) "uploader": xml_video.find("author").text,
video["thumbnail"] = video["thumbnails"][0]["url"] "upload_date": "".join(reversed(xml_video.find("broadcastDate").text.split("."))),
video["description"] = " ".join(xml_video.find("shareTitle").text.splitlines()) "webpage_url": xml_video.find("permalink").text,
video["uploader"] = xml_video.find("author").text } for xml_video in xml.findall("video")]
video["upload_date"] = "".join(reversed(xml_video.find("broadcastDate").text.split(".")))
video["webpage_url"] = xml_video.find("permalink").text
videos.append(video)
if len(videos) > 1: if len(videos) > 1:
self._downloader.report_warning(u'found multiple videos; please' self._downloader.report_warning(
u'report this with the video URL to http://yt-dl.org/bug') 'found multiple videos; please '
'report this with the video URL to http://yt-dl.org/bug')
if not videos:
raise ExtractorError('No video entries found')
return videos[0] return videos[0]
def _extract_formats(self, assets): def _extract_formats(self, assets):
vformats = [] formats = [{
for asset in assets.findall("asset"): "url": asset.find("downloadUrl").text,
if asset.find("downloadUrl") is None: "ext": asset.find("mediaType").text,
continue "format_id": asset.get("type"),
vformat = {} "width": int(asset.find("frameWidth").text),
vformat["url"] = asset.find("downloadUrl").text "height": int(asset.find("frameHeight").text),
vformat["ext"] = asset.find("mediaType").text "tbr": int(asset.find("bitrateVideo").text),
vformat["format_id"] = asset.get("type") "abr": int(asset.find("bitrateAudio").text),
vformat["width"] = int(asset.find("frameWidth").text) "vcodec": asset.find("codecVideo").text,
vformat["height"] = int(asset.find("frameHeight").text) "container": asset.find("mediaType").text,
vformat["resolution"] = "%ix%i" % (vformat["width"], vformat["height"]) "filesize": int(asset.find("size").text),
vformat["tbr"] = int(asset.find("bitrateVideo").text) } for asset in assets.findall("asset")
vformat["abr"] = int(asset.find("bitrateAudio").text) if asset.find("downloadUrl") is not None]
vformat["vcodec"] = asset.find("codecVideo").text
vformat["container"] = vformat["ext"] self._sort_formats(formats)
vformat["filesize"] = int(asset.find("size").text) return formats
vformat["preference"] = vformat["quality"] = -1
vformat["format"] = "%s container with %i Kbps %s" % (vformat["container"], vformat["tbr"], vformat["vcodec"])
vformats.append(vformat)
self._sort_formats(vformats)
return vformats
def _extract_thumbnails(self, variants): def _extract_thumbnails(self, variants):
thumbnails = [] thumbnails = [{
for variant in variants.findall("variant"): "url": self._BASE_URL + variant.find("url").text,
thumbnail = {} "width": int(variant.find("width").text),
thumbnail["url"] = self._BASE_URL + variant.find("url").text "height": int(variant.find("height").text),
thumbnail["width"] = int(variant.find("width").text) } for variant in variants.findall("variant")]
thumbnail["height"] = int(variant.find("height").text) thumbnails.sort(key=lambda x: x["width"] * x["height"], reverse=True)
thumbnail["resolution"] = "%ix%i" % (thumbnail["width"], thumbnail["height"])
thumbnails.append(thumbnail)
thumbnails.sort(key = lambda x: x["width"] * x["height"], reverse=True)
return thumbnails return thumbnails