mirror of
https://gitlab.com/ytdl-org/youtube-dl.git
synced 2026-01-24 00:00:10 -05:00
Compare commits
41 Commits
2011.09.30
...
2011.11.22
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ae7abe57c | ||
|
|
dc0a294a73 | ||
|
|
468c99257c | ||
|
|
af8e8d63f9 | ||
|
|
e092418d8b | ||
|
|
e33e3045c6 | ||
|
|
cb6568bf21 | ||
|
|
235b3ba479 | ||
|
|
5b3330e0cf | ||
|
|
aab771fbdf | ||
|
|
00f95a93f5 | ||
|
|
1724e7c461 | ||
|
|
3b98a5ddac | ||
|
|
8b59cc93d5 | ||
|
|
c3e4e7c182 | ||
|
|
38348005b3 | ||
|
|
208c4b9128 | ||
|
|
ec574c2c41 | ||
|
|
871be928a8 | ||
|
|
b20d4f8626 | ||
|
|
073d7a5985 | ||
|
|
40306424b1 | ||
|
|
ecb3bfe543 | ||
|
|
abeac45abe | ||
|
|
0fca93ac60 | ||
|
|
857e5f329a | ||
|
|
053419cd24 | ||
|
|
99e207bab0 | ||
|
|
0067bbe7a7 | ||
|
|
45aa690868 | ||
|
|
beb245e92f | ||
|
|
c424df0d2f | ||
|
|
87929e4b35 | ||
|
|
d76736fc5e | ||
|
|
0f9b77223e | ||
|
|
9f47175a40 | ||
|
|
a1a8713aad | ||
|
|
6501a06d46 | ||
|
|
8d89fbae5a | ||
|
|
7a2cf5455c | ||
|
|
7125a7ca8b |
@@ -1 +1 @@
|
||||
2011.09.30
|
||||
2011.11.22
|
||||
|
||||
11
Makefile
11
Makefile
@@ -1,12 +1,12 @@
|
||||
default: update
|
||||
|
||||
update: update-readme update-latest
|
||||
update: compile update-readme update-latest
|
||||
|
||||
update-latest:
|
||||
./youtube-dl --version > LATEST_VERSION
|
||||
./youtube-dl.dev --version > LATEST_VERSION
|
||||
|
||||
update-readme:
|
||||
@options=$$(COLUMNS=80 ./youtube-dl --help | sed -e '1,/.*General Options.*/ d' -e 's/^\W\{2\}\(\w\)/### \1/') && \
|
||||
@options=$$(COLUMNS=80 ./youtube-dl.dev --help | sed -e '1,/.*General Options.*/ d' -e 's/^\W\{2\}\(\w\)/### \1/') && \
|
||||
header=$$(sed -e '/.*## OPTIONS/,$$ d' README.md) && \
|
||||
footer=$$(sed -e '1,/.*## FAQ/ d' README.md) && \
|
||||
echo "$${header}" > README.md && \
|
||||
@@ -15,6 +15,7 @@ update-readme:
|
||||
echo -e '\n## FAQ' >> README.md && \
|
||||
echo "$${footer}" >> README.md
|
||||
|
||||
compile:
|
||||
cp youtube_dl/__init__.py youtube-dl
|
||||
|
||||
|
||||
.PHONY: default update update-latest update-readme
|
||||
.PHONY: default compile update update-latest update-readme
|
||||
|
||||
29
test/test_div.py
Normal file
29
test/test_div.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Various small unit tests
|
||||
|
||||
import os,sys
|
||||
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
|
||||
|
||||
import youtube_dl
|
||||
|
||||
def test_simplify_title():
|
||||
assert youtube_dl._simplify_title(u'abc') == u'abc'
|
||||
assert youtube_dl._simplify_title(u'abc_d-e') == u'abc_d-e'
|
||||
|
||||
assert youtube_dl._simplify_title(u'123') == u'123'
|
||||
|
||||
assert u'/' not in youtube_dl._simplify_title(u'abc/de')
|
||||
assert u'abc' in youtube_dl._simplify_title(u'abc/de')
|
||||
assert u'de' in youtube_dl._simplify_title(u'abc/de')
|
||||
assert u'/' not in youtube_dl._simplify_title(u'abc/de///')
|
||||
|
||||
assert u'\\' not in youtube_dl._simplify_title(u'abc\\de')
|
||||
assert u'abc' in youtube_dl._simplify_title(u'abc\\de')
|
||||
assert u'de' in youtube_dl._simplify_title(u'abc\\de')
|
||||
|
||||
assert youtube_dl._simplify_title(u'ä') == u'ä'
|
||||
assert youtube_dl._simplify_title(u'кириллица') == u'кириллица'
|
||||
|
||||
# Strip underlines
|
||||
assert youtube_dl._simplify_title(u'\'a_') == u'a'
|
||||
459
youtube-dl
459
youtube-dl
@@ -12,10 +12,12 @@ __author__ = (
|
||||
'Rogério Brito',
|
||||
'Philipp Hagemeister',
|
||||
'Sören Schulze',
|
||||
'Kevin Ngo',
|
||||
'Ori Avtalion',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
__version__ = '2011.09.30'
|
||||
__version__ = '2011.11.22'
|
||||
|
||||
UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl'
|
||||
|
||||
@@ -77,8 +79,6 @@ std_headers = {
|
||||
'Accept-Language': 'en-us,en;q=0.5',
|
||||
}
|
||||
|
||||
simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError: # Python <2.6, use trivialjson (https://github.com/phihag/trivialjson):
|
||||
@@ -277,6 +277,9 @@ def timeconvert(timestr):
|
||||
timestamp = email.utils.mktime_tz(timetuple)
|
||||
return timestamp
|
||||
|
||||
def _simplify_title(title):
|
||||
expr = re.compile(ur'[^\w\d_\-]+', flags=re.UNICODE)
|
||||
return expr.sub(u'_', title).strip(u'_')
|
||||
|
||||
class DownloadError(Exception):
|
||||
"""Download Error exception.
|
||||
@@ -1236,7 +1239,7 @@ class YoutubeIE(InfoExtractor):
|
||||
|
||||
# Get video webpage
|
||||
self.report_video_webpage_download(video_id)
|
||||
request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
|
||||
request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
|
||||
try:
|
||||
video_webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
@@ -1289,8 +1292,7 @@ class YoutubeIE(InfoExtractor):
|
||||
video_title = sanitize_title(video_title)
|
||||
|
||||
# simplified title
|
||||
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
||||
simple_title = simple_title.strip(ur'_')
|
||||
simple_title = _simplify_title(video_title)
|
||||
|
||||
# thumbnail image
|
||||
if 'thumbnail_url' not in video_info:
|
||||
@@ -1560,9 +1562,6 @@ class DailymotionIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
|
||||
|
||||
def _real_initialize(self):
|
||||
return
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id and simplified title from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
@@ -1651,9 +1650,6 @@ class GoogleIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
|
||||
|
||||
def _real_initialize(self):
|
||||
return
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
@@ -1697,7 +1693,7 @@ class GoogleIE(InfoExtractor):
|
||||
return
|
||||
video_title = mobj.group(1).decode('utf-8')
|
||||
video_title = sanitize_title(video_title)
|
||||
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
||||
simple_title = _simplify_title(video_title)
|
||||
|
||||
# Extract video description
|
||||
mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
|
||||
@@ -1758,9 +1754,6 @@ class PhotobucketIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
|
||||
|
||||
def _real_initialize(self):
|
||||
return
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
@@ -1799,7 +1792,7 @@ class PhotobucketIE(InfoExtractor):
|
||||
return
|
||||
video_title = mobj.group(1).decode('utf-8')
|
||||
video_title = sanitize_title(video_title)
|
||||
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
||||
simple_title = _simplify_title(vide_title)
|
||||
|
||||
video_uploader = mobj.group(2).decode('utf-8')
|
||||
|
||||
@@ -1840,9 +1833,6 @@ class YahooIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
|
||||
|
||||
def _real_initialize(self):
|
||||
return
|
||||
|
||||
def _real_extract(self, url, new_video=True):
|
||||
# Extract ID from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
@@ -1896,7 +1886,7 @@ class YahooIE(InfoExtractor):
|
||||
self._downloader.trouble(u'ERROR: unable to extract video title')
|
||||
return
|
||||
video_title = mobj.group(1).decode('utf-8')
|
||||
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
||||
simple_title = _simplify_title(video_title)
|
||||
|
||||
mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
|
||||
if mobj is None:
|
||||
@@ -1993,9 +1983,6 @@ class VimeoIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
|
||||
|
||||
def _real_initialize(self):
|
||||
return
|
||||
|
||||
def _real_extract(self, url, new_video=True):
|
||||
# Extract ID from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
@@ -2027,7 +2014,7 @@ class VimeoIE(InfoExtractor):
|
||||
self._downloader.trouble(u'ERROR: unable to extract video title')
|
||||
return
|
||||
video_title = mobj.group(1).decode('utf-8')
|
||||
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
||||
simple_title = _simple_title(video_title)
|
||||
|
||||
# Extract uploader
|
||||
mobj = re.search(r'<uploader_url>http://vimeo.com/(.*?)</uploader_url>', webpage)
|
||||
@@ -2059,6 +2046,18 @@ class VimeoIE(InfoExtractor):
|
||||
return
|
||||
sig = mobj.group(1).decode('utf-8')
|
||||
|
||||
# Vimeo specific: extract video quality information
|
||||
mobj = re.search(r'<isHD>(\d+)</isHD>', webpage)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: unable to extract video quality information')
|
||||
return
|
||||
quality = mobj.group(1).decode('utf-8')
|
||||
|
||||
if int(quality) == 1:
|
||||
quality = 'hd'
|
||||
else:
|
||||
quality = 'sd'
|
||||
|
||||
# Vimeo specific: Extract request signature expiration
|
||||
mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage)
|
||||
if mobj is None:
|
||||
@@ -2066,7 +2065,7 @@ class VimeoIE(InfoExtractor):
|
||||
return
|
||||
sig_exp = mobj.group(1).decode('utf-8')
|
||||
|
||||
video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s" % (video_id, sig, sig_exp)
|
||||
video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s/?q=%s" % (video_id, sig, sig_exp, quality)
|
||||
|
||||
try:
|
||||
# Process video information
|
||||
@@ -2106,9 +2105,6 @@ class GenericIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
|
||||
|
||||
def _real_initialize(self):
|
||||
return
|
||||
|
||||
def _real_extract(self, url):
|
||||
# At this point we have a new video
|
||||
self._downloader.increment_downloads()
|
||||
@@ -2162,7 +2158,7 @@ class GenericIE(InfoExtractor):
|
||||
return
|
||||
video_title = mobj.group(1).decode('utf-8')
|
||||
video_title = sanitize_title(video_title)
|
||||
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
||||
simple_title = _simplify_title(video_title)
|
||||
|
||||
# video uploader is domain name
|
||||
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
|
||||
@@ -2458,7 +2454,7 @@ class YahooSearchIE(InfoExtractor):
|
||||
class YoutubePlaylistIE(InfoExtractor):
|
||||
"""Information Extractor for YouTube playlists."""
|
||||
|
||||
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)([0-9A-Za-z]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
|
||||
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
|
||||
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
|
||||
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
|
||||
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
|
||||
@@ -2502,7 +2498,8 @@ class YoutubePlaylistIE(InfoExtractor):
|
||||
|
||||
while True:
|
||||
self.report_download_page(playlist_id, pagenum)
|
||||
request = urllib2.Request(self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum))
|
||||
url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
|
||||
request = urllib2.Request(url)
|
||||
try:
|
||||
page = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
@@ -2536,7 +2533,7 @@ class YoutubeUserIE(InfoExtractor):
|
||||
_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
|
||||
_GDATA_PAGE_SIZE = 50
|
||||
_GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
|
||||
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
|
||||
_VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
|
||||
_youtube_ie = None
|
||||
IE_NAME = u'youtube:user'
|
||||
|
||||
@@ -2634,9 +2631,6 @@ class DepositFilesIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
|
||||
|
||||
def _real_initialize(self):
|
||||
return
|
||||
|
||||
def _real_extract(self, url):
|
||||
# At this point we have a new file
|
||||
self._downloader.increment_downloads()
|
||||
@@ -2697,11 +2691,12 @@ class DepositFilesIE(InfoExtractor):
|
||||
class FacebookIE(InfoExtractor):
|
||||
"""Information Extractor for Facebook"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/video/video\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
|
||||
_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
|
||||
_NETRC_MACHINE = 'facebook'
|
||||
_available_formats = ['highqual', 'lowqual']
|
||||
_available_formats = ['video', 'highqual', 'lowqual']
|
||||
_video_extensions = {
|
||||
'video': 'mp4',
|
||||
'highqual': 'mp4',
|
||||
'lowqual': 'mp4',
|
||||
}
|
||||
@@ -2729,10 +2724,9 @@ class FacebookIE(InfoExtractor):
|
||||
def _parse_page(self, video_webpage):
|
||||
"""Extract video information from page"""
|
||||
# General data
|
||||
data = {'title': r'class="video_title datawrap">(.*?)</',
|
||||
data = {'title': r'\("video_title", "(.*?)"\)',
|
||||
'description': r'<div class="datawrap">(.*?)</div>',
|
||||
'owner': r'\("video_owner_name", "(.*?)"\)',
|
||||
'upload_date': r'data-date="(.*?)"',
|
||||
'thumbnail': r'\("thumb_url", "(?P<THUMB>.*?)"\)',
|
||||
}
|
||||
video_info = {}
|
||||
@@ -2834,9 +2828,7 @@ class FacebookIE(InfoExtractor):
|
||||
video_title = video_title.decode('utf-8')
|
||||
video_title = sanitize_title(video_title)
|
||||
|
||||
# simplified title
|
||||
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
||||
simple_title = simple_title.strip(ur'_')
|
||||
simple_title = _simplify_title(video_title)
|
||||
|
||||
# thumbnail image
|
||||
if 'thumbnail' not in video_info:
|
||||
@@ -2927,11 +2919,6 @@ class BlipTVIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
|
||||
|
||||
def _simplify_title(self, title):
|
||||
res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
|
||||
res = res.strip(ur'_')
|
||||
return res
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
@@ -2951,13 +2938,14 @@ class BlipTVIE(InfoExtractor):
|
||||
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
|
||||
basename = url.split('/')[-1]
|
||||
title,ext = os.path.splitext(basename)
|
||||
title = title.decode('UTF-8')
|
||||
ext = ext.replace('.', '')
|
||||
self.report_direct_download(title)
|
||||
info = {
|
||||
'id': title,
|
||||
'url': url,
|
||||
'title': title,
|
||||
'stitle': self._simplify_title(title),
|
||||
'stitle': _simplify_title(title),
|
||||
'ext': ext,
|
||||
'urlhandle': urlh
|
||||
}
|
||||
@@ -2991,7 +2979,7 @@ class BlipTVIE(InfoExtractor):
|
||||
'uploader': data['display_name'],
|
||||
'upload_date': upload_date,
|
||||
'title': data['title'],
|
||||
'stitle': self._simplify_title(data['title']),
|
||||
'stitle': _simplify_title(data['title']),
|
||||
'ext': ext,
|
||||
'format': data['media']['mimeType'],
|
||||
'thumbnail': data['thumbnailUrl'],
|
||||
@@ -3027,9 +3015,6 @@ class MyVideoIE(InfoExtractor):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
|
||||
|
||||
def _real_initialize(self):
|
||||
return
|
||||
|
||||
def _real_extract(self,url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
@@ -3037,10 +3022,6 @@ class MyVideoIE(InfoExtractor):
|
||||
return
|
||||
|
||||
video_id = mobj.group(1)
|
||||
simple_title = mobj.group(2).decode('utf-8')
|
||||
# should actually not be necessary
|
||||
simple_title = sanitize_title(simple_title)
|
||||
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', simple_title)
|
||||
|
||||
# Get video webpage
|
||||
request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id)
|
||||
@@ -3067,6 +3048,8 @@ class MyVideoIE(InfoExtractor):
|
||||
video_title = mobj.group(1)
|
||||
video_title = sanitize_title(video_title)
|
||||
|
||||
simple_title = _simplify_title(video_title)
|
||||
|
||||
try:
|
||||
self._downloader.process_info({
|
||||
'id': video_id,
|
||||
@@ -3100,11 +3083,6 @@ class ComedyCentralIE(InfoExtractor):
|
||||
def report_player_url(self, episode_id):
|
||||
self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
|
||||
|
||||
def _simplify_title(self, title):
|
||||
res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
|
||||
res = res.strip(ur'_')
|
||||
return res
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
@@ -3113,9 +3091,9 @@ class ComedyCentralIE(InfoExtractor):
|
||||
|
||||
if mobj.group('shortname'):
|
||||
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
||||
url = 'http://www.thedailyshow.com/full-episodes/'
|
||||
url = u'http://www.thedailyshow.com/full-episodes/'
|
||||
else:
|
||||
url = 'http://www.colbertnation.com/full-episodes/'
|
||||
url = u'http://www.colbertnation.com/full-episodes/'
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
assert mobj is not None
|
||||
|
||||
@@ -3201,7 +3179,7 @@ class ComedyCentralIE(InfoExtractor):
|
||||
|
||||
self._downloader.increment_downloads()
|
||||
|
||||
effTitle = showId + '-' + epTitle
|
||||
effTitle = showId + u'-' + epTitle
|
||||
info = {
|
||||
'id': shortMediaId,
|
||||
'url': video_url,
|
||||
@@ -3235,11 +3213,6 @@ class EscapistIE(InfoExtractor):
|
||||
def report_config_download(self, showName):
|
||||
self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
|
||||
|
||||
def _simplify_title(self, title):
|
||||
res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
|
||||
res = res.strip(ur'_')
|
||||
return res
|
||||
|
||||
def _real_extract(self, url):
|
||||
htmlParser = HTMLParser.HTMLParser()
|
||||
|
||||
@@ -3292,7 +3265,7 @@ class EscapistIE(InfoExtractor):
|
||||
'uploader': showName,
|
||||
'upload_date': None,
|
||||
'title': showName,
|
||||
'stitle': self._simplify_title(showName),
|
||||
'stitle': _simplify_title(showName),
|
||||
'ext': 'flv',
|
||||
'format': 'flv',
|
||||
'thumbnail': imgUrl,
|
||||
@@ -3306,6 +3279,336 @@ class EscapistIE(InfoExtractor):
|
||||
self._downloader.trouble(u'\nERROR: unable to download ' + videoId)
|
||||
|
||||
|
||||
class CollegeHumorIE(InfoExtractor):
|
||||
"""Information extractor for collegehumor.com"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
|
||||
IE_NAME = u'collegehumor'
|
||||
|
||||
def report_webpage(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
|
||||
|
||||
def report_extraction(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
||||
|
||||
def _real_extract(self, url):
|
||||
htmlParser = HTMLParser.HTMLParser()
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
||||
return
|
||||
video_id = mobj.group('videoid')
|
||||
|
||||
self.report_webpage(video_id)
|
||||
request = urllib2.Request(url)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
|
||||
return
|
||||
|
||||
m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage)
|
||||
if m is None:
|
||||
self._downloader.trouble(u'ERROR: Cannot extract internal video ID')
|
||||
return
|
||||
internal_video_id = m.group('internalvideoid')
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'internal_id': internal_video_id,
|
||||
}
|
||||
|
||||
self.report_extraction(video_id)
|
||||
xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id
|
||||
try:
|
||||
metaXml = urllib2.urlopen(xmlUrl).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % str(err))
|
||||
return
|
||||
|
||||
mdoc = xml.etree.ElementTree.fromstring(metaXml)
|
||||
try:
|
||||
videoNode = mdoc.findall('./video')[0]
|
||||
info['description'] = videoNode.findall('./description')[0].text
|
||||
info['title'] = videoNode.findall('./caption')[0].text
|
||||
info['stitle'] = _simplify_title(info['title'])
|
||||
info['url'] = videoNode.findall('./file')[0].text
|
||||
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
|
||||
info['ext'] = info['url'].rpartition('.')[2]
|
||||
info['format'] = info['ext']
|
||||
except IndexError:
|
||||
self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
|
||||
return
|
||||
|
||||
self._downloader.increment_downloads()
|
||||
|
||||
try:
|
||||
self._downloader.process_info(info)
|
||||
except UnavailableVideoError, err:
|
||||
self._downloader.trouble(u'\nERROR: unable to download video')
|
||||
|
||||
|
||||
class XVideosIE(InfoExtractor):
|
||||
"""Information extractor for xvideos.com"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
|
||||
IE_NAME = u'xvideos'
|
||||
|
||||
def report_webpage(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
|
||||
|
||||
def report_extraction(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
||||
|
||||
def _real_extract(self, url):
|
||||
htmlParser = HTMLParser.HTMLParser()
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
||||
return
|
||||
video_id = mobj.group(1).decode('utf-8')
|
||||
|
||||
self.report_webpage(video_id)
|
||||
|
||||
request = urllib2.Request(r'http://www.xvideos.com/video' + video_id)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
|
||||
return
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
|
||||
# Extract video URL
|
||||
mobj = re.search(r'flv_url=(.+?)&', webpage)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: unable to extract video url')
|
||||
return
|
||||
video_url = urllib2.unquote(mobj.group(1).decode('utf-8'))
|
||||
|
||||
|
||||
# Extract title
|
||||
mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: unable to extract video title')
|
||||
return
|
||||
video_title = mobj.group(1).decode('utf-8')
|
||||
|
||||
|
||||
# Extract video thumbnail
|
||||
mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]/[a-fA-F0-9]/[a-fA-F0-9]/([a-fA-F0-9.]+jpg)', webpage)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
|
||||
return
|
||||
video_thumbnail = mobj.group(1).decode('utf-8')
|
||||
|
||||
|
||||
|
||||
self._downloader.increment_downloads()
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'stitle': _simplify_title(video_title),
|
||||
'ext': 'flv',
|
||||
'format': 'flv',
|
||||
'thumbnail': video_thumbnail,
|
||||
'description': None,
|
||||
'player_url': None,
|
||||
}
|
||||
|
||||
try:
|
||||
self._downloader.process_info(info)
|
||||
except UnavailableVideoError, err:
|
||||
self._downloader.trouble(u'\nERROR: unable to download ' + video_id)
|
||||
|
||||
|
||||
class SoundcloudIE(InfoExtractor):
|
||||
"""Information extractor for soundcloud.com
|
||||
To access the media, the uid of the song and a stream token
|
||||
must be extracted from the page source and the script must make
|
||||
a request to media.soundcloud.com/crossdomain.xml. Then
|
||||
the media can be grabbed by requesting from an url composed
|
||||
of the stream token and uid
|
||||
"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
|
||||
IE_NAME = u'soundcloud'
|
||||
|
||||
def __init__(self, downloader=None):
|
||||
InfoExtractor.__init__(self, downloader)
|
||||
|
||||
def report_webpage(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
|
||||
|
||||
def report_extraction(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
||||
|
||||
def _real_extract(self, url):
|
||||
htmlParser = HTMLParser.HTMLParser()
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
||||
return
|
||||
|
||||
# extract uploader (which is in the url)
|
||||
uploader = mobj.group(1).decode('utf-8')
|
||||
# extract simple title (uploader + slug of song title)
|
||||
slug_title = mobj.group(2).decode('utf-8')
|
||||
simple_title = uploader + '-' + slug_title
|
||||
|
||||
self.report_webpage('%s/%s' % (uploader, slug_title))
|
||||
|
||||
request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title))
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
|
||||
return
|
||||
|
||||
self.report_extraction('%s/%s' % (uploader, slug_title))
|
||||
|
||||
# extract uid and stream token that soundcloud hands out for access
|
||||
mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage)
|
||||
if mobj:
|
||||
video_id = mobj.group(1)
|
||||
stream_token = mobj.group(2)
|
||||
|
||||
# extract unsimplified title
|
||||
mobj = re.search('"title":"(.*?)",', webpage)
|
||||
if mobj:
|
||||
title = mobj.group(1)
|
||||
|
||||
# construct media url (with uid/token)
|
||||
mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s"
|
||||
mediaURL = mediaURL % (video_id, stream_token)
|
||||
|
||||
# description
|
||||
description = u'No description available'
|
||||
mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
|
||||
if mobj:
|
||||
description = mobj.group(1)
|
||||
|
||||
# upload date
|
||||
upload_date = None
|
||||
mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
|
||||
if mobj:
|
||||
try:
|
||||
upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
|
||||
except Exception, e:
|
||||
print str(e)
|
||||
|
||||
# for soundcloud, a request to a cross domain is required for cookies
|
||||
request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
|
||||
|
||||
try:
|
||||
self._downloader.process_info({
|
||||
'id': video_id.decode('utf-8'),
|
||||
'url': mediaURL,
|
||||
'uploader': uploader.decode('utf-8'),
|
||||
'upload_date': upload_date,
|
||||
'title': simple_title.decode('utf-8'),
|
||||
'stitle': simple_title.decode('utf-8'),
|
||||
'ext': u'mp3',
|
||||
'format': u'NA',
|
||||
'player_url': None,
|
||||
'description': description.decode('utf-8')
|
||||
})
|
||||
except UnavailableVideoError:
|
||||
self._downloader.trouble(u'\nERROR: unable to download video')
|
||||
|
||||
|
||||
class InfoQIE(InfoExtractor):
|
||||
"""Information extractor for infoq.com"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
|
||||
IE_NAME = u'infoq'
|
||||
|
||||
def report_webpage(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
|
||||
|
||||
def report_extraction(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
||||
|
||||
def _real_extract(self, url):
|
||||
htmlParser = HTMLParser.HTMLParser()
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
||||
return
|
||||
|
||||
self.report_webpage(url)
|
||||
|
||||
request = urllib2.Request(url)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
|
||||
return
|
||||
|
||||
self.report_extraction(url)
|
||||
|
||||
|
||||
# Extract video URL
|
||||
mobj = re.search(r"jsclassref='([^']*)'", webpage)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: unable to extract video url')
|
||||
return
|
||||
video_url = 'rtmpe://video.infoq.com/cfx/st/' + urllib2.unquote(mobj.group(1).decode('base64'))
|
||||
|
||||
|
||||
# Extract title
|
||||
mobj = re.search(r'contentTitle = "(.*?)";', webpage)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: unable to extract video title')
|
||||
return
|
||||
video_title = mobj.group(1).decode('utf-8')
|
||||
|
||||
# Extract description
|
||||
video_description = u'No description available.'
|
||||
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage)
|
||||
if mobj is not None:
|
||||
video_description = mobj.group(1).decode('utf-8')
|
||||
|
||||
video_filename = video_url.split('/')[-1]
|
||||
video_id, extension = video_filename.split('.')
|
||||
|
||||
self._downloader.increment_downloads()
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'stitle': _simplify_title(video_title),
|
||||
'ext': extension,
|
||||
'format': extension, # Extension is always(?) mp4, but seems to be flv
|
||||
'thumbnail': None,
|
||||
'description': video_description,
|
||||
'player_url': None,
|
||||
}
|
||||
|
||||
try:
|
||||
self._downloader.process_info(info)
|
||||
except UnavailableVideoError, err:
|
||||
self._downloader.trouble(u'\nERROR: unable to download ' + video_url)
|
||||
|
||||
|
||||
|
||||
class PostProcessor(object):
|
||||
"""Post Processor class.
|
||||
@@ -3701,11 +4004,15 @@ def gen_extractors():
|
||||
MyVideoIE(),
|
||||
ComedyCentralIE(),
|
||||
EscapistIE(),
|
||||
CollegeHumorIE(),
|
||||
XVideosIE(),
|
||||
SoundcloudIE(),
|
||||
InfoQIE(),
|
||||
|
||||
GenericIE()
|
||||
]
|
||||
|
||||
def main():
|
||||
def _real_main():
|
||||
parser, opts, args = parseOpts()
|
||||
|
||||
# Open appropriate CookieJar
|
||||
@@ -3865,10 +4172,9 @@ def main():
|
||||
|
||||
sys.exit(retcode)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
def main():
|
||||
try:
|
||||
main()
|
||||
_real_main()
|
||||
except DownloadError:
|
||||
sys.exit(1)
|
||||
except SameFileError:
|
||||
@@ -3876,4 +4182,7 @@ if __name__ == '__main__':
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(u'\nERROR: Interrupted by user')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
# vim: set ts=4 sw=4 sts=4 noet ai si filetype=python:
|
||||
|
||||
6
youtube-dl.dev
Executable file
6
youtube-dl.dev
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import youtube_dl
|
||||
|
||||
youtube_dl.main()
|
||||
4188
youtube_dl/__init__.py
Executable file
4188
youtube_dl/__init__.py
Executable file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user