vidto extractor: code cleanup
This commit is contained in:
parent
a625e56543
commit
42fc93c709
2 changed files with 12 additions and 21 deletions
|
@ -624,7 +624,6 @@
|
||||||
- **VideoTt**: video.tt - Your True Tube
|
- **VideoTt**: video.tt - Your True Tube
|
||||||
- **videoweed**: VideoWeed
|
- **videoweed**: VideoWeed
|
||||||
- **Vidme**
|
- **Vidme**
|
||||||
- **vidto**: VidTo.me
|
|
||||||
- **Vidzi**
|
- **Vidzi**
|
||||||
- **vier**
|
- **vier**
|
||||||
- **vier:videos**
|
- **vier:videos**
|
||||||
|
|
|
@ -1,24 +1,14 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import encode_dict
|
||||||
encode_dict,
|
|
||||||
)
|
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_chr,
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
|
||||||
compat_urllib_parse_unquote_plus,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urllib_parse
|
||||||
compat_str,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -37,8 +27,7 @@ class VidtoIE(InfoExtractor):
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
page = self._download_webpage(
|
page = self._download_webpage(
|
||||||
'http://%s/%s.html' % (self._HOST, video_id), video_id, 'Downloading video page')
|
'http://%s/%s.html' % (self._HOST, video_id), video_id, 'Downloading video page')
|
||||||
|
@ -63,16 +52,19 @@ class VidtoIE(InfoExtractor):
|
||||||
post_data = compat_urllib_parse.urlencode(encode_dict(form_str)).encode('ascii')
|
post_data = compat_urllib_parse.urlencode(encode_dict(form_str)).encode('ascii')
|
||||||
req = compat_urllib_request.Request(url, post_data)
|
req = compat_urllib_request.Request(url, post_data)
|
||||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||||
for key, morsel in cookies.iteritems():
|
cookie_string = ""
|
||||||
req.add_header('Cookie', '%s=%s' % (morsel.key, morsel.value))
|
for key in cookies.keys():
|
||||||
|
cookie_string += "%s=%s;" % (key, cookies[key].value)
|
||||||
|
|
||||||
print("Waiting for countdown...")
|
req.add_header('Cookie', '%s' % cookie_string)
|
||||||
|
|
||||||
|
self.to_screen("Waiting for countdown...")
|
||||||
time.sleep(7)
|
time.sleep(7)
|
||||||
post_result = self._download_webpage(
|
post_result = self._download_webpage(
|
||||||
req, None,
|
req, video_id,
|
||||||
note='Proceed to video...', errnote='unable to proceed', fatal=True)
|
note='Proceed to video...', errnote='unable to proceed', fatal=True)
|
||||||
|
|
||||||
file_link_regex = r'file_link ?= ?\'(https?:\/\/[0-9a-zA-z.\/\-_]+)'
|
file_link_regex = r'file_link\s*=\s*\'(https?:\/\/[0-9a-zA-z.\/\-_]+)'
|
||||||
file_link = self._search_regex(file_link_regex, post_result, 'file_link', fatal=True)
|
file_link = self._search_regex(file_link_regex, post_result, 'file_link', fatal=True)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
Loading…
Reference in a new issue