forked from luna/vim-rana-local
upgrade wakatime-cli to v6.0.3
This commit is contained in:
parent
082a711995
commit
d46c3e96d5
34 changed files with 1474 additions and 183 deletions
|
@ -1,7 +1,7 @@
|
||||||
__title__ = 'wakatime'
|
__title__ = 'wakatime'
|
||||||
__description__ = 'Common interface to the WakaTime api.'
|
__description__ = 'Common interface to the WakaTime api.'
|
||||||
__url__ = 'https://github.com/wakatime/wakatime'
|
__url__ = 'https://github.com/wakatime/wakatime'
|
||||||
__version_info__ = ('6', '0', '2')
|
__version_info__ = ('6', '0', '3')
|
||||||
__version__ = '.'.join(__version_info__)
|
__version__ = '.'.join(__version_info__)
|
||||||
__author__ = 'Alan Hamlett'
|
__author__ = 'Alan Hamlett'
|
||||||
__author_email__ = 'alan@wakatime.com'
|
__author_email__ = 'alan@wakatime.com'
|
||||||
|
|
|
@ -69,28 +69,6 @@ KEYWORDS = [
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class LassoJavascriptParser(TokenParser):
|
|
||||||
|
|
||||||
def parse(self):
|
|
||||||
for index, token, content in self.tokens:
|
|
||||||
self._process_token(token, content)
|
|
||||||
return self.dependencies
|
|
||||||
|
|
||||||
def _process_token(self, token, content):
|
|
||||||
if u(token) == 'Token.Name.Other':
|
|
||||||
self._process_name(token, content)
|
|
||||||
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
|
|
||||||
self._process_literal_string(token, content)
|
|
||||||
|
|
||||||
def _process_name(self, token, content):
|
|
||||||
if content.lower() in KEYWORDS:
|
|
||||||
self.append(content.lower())
|
|
||||||
|
|
||||||
def _process_literal_string(self, token, content):
|
|
||||||
if 'famous/core/' in content.strip('"').strip("'"):
|
|
||||||
self.append('famous')
|
|
||||||
|
|
||||||
|
|
||||||
class HtmlDjangoParser(TokenParser):
|
class HtmlDjangoParser(TokenParser):
|
||||||
tags = []
|
tags = []
|
||||||
getting_attrs = False
|
getting_attrs = False
|
||||||
|
|
|
@ -127,8 +127,10 @@ def parseArguments():
|
||||||
help='entity type for this heartbeat. can be one of "file", '+
|
help='entity type for this heartbeat. can be one of "file", '+
|
||||||
'"domain", or "app"; defaults to file.')
|
'"domain", or "app"; defaults to file.')
|
||||||
parser.add_argument('--proxy', dest='proxy',
|
parser.add_argument('--proxy', dest='proxy',
|
||||||
help='optional https proxy url; for example: '+
|
help='optional proxy configuration. Supports HTTPS '+
|
||||||
'https://user:pass@localhost:8080')
|
'and SOCKS proxies. For example: '+
|
||||||
|
'https://user:pass@host:port or '+
|
||||||
|
'socks5://user:pass@host:port')
|
||||||
parser.add_argument('--project', dest='project',
|
parser.add_argument('--project', dest='project',
|
||||||
help='optional project name')
|
help='optional project name')
|
||||||
parser.add_argument('--alternate-project', dest='alternate_project',
|
parser.add_argument('--alternate-project', dest='alternate_project',
|
||||||
|
|
|
@ -36,14 +36,14 @@ usage:
|
||||||
The other HTTP methods are supported - see `requests.api`. Full documentation
|
The other HTTP methods are supported - see `requests.api`. Full documentation
|
||||||
is at <http://python-requests.org>.
|
is at <http://python-requests.org>.
|
||||||
|
|
||||||
:copyright: (c) 2015 by Kenneth Reitz.
|
:copyright: (c) 2016 by Kenneth Reitz.
|
||||||
:license: Apache 2.0, see LICENSE for more details.
|
:license: Apache 2.0, see LICENSE for more details.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__title__ = 'requests'
|
__title__ = 'requests'
|
||||||
__version__ = '2.9.1'
|
__version__ = '2.10.0'
|
||||||
__build__ = 0x020901
|
__build__ = 0x021000
|
||||||
__author__ = 'Kenneth Reitz'
|
__author__ = 'Kenneth Reitz'
|
||||||
__license__ = 'Apache 2.0'
|
__license__ = 'Apache 2.0'
|
||||||
__copyright__ = 'Copyright 2016 Kenneth Reitz'
|
__copyright__ = 'Copyright 2016 Kenneth Reitz'
|
||||||
|
@ -55,6 +55,12 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
# urllib3's DependencyWarnings should be silenced.
|
||||||
|
from .packages.urllib3.exceptions import DependencyWarning
|
||||||
|
warnings.simplefilter('ignore', DependencyWarning)
|
||||||
|
|
||||||
from . import utils
|
from . import utils
|
||||||
from .models import Request, Response, PreparedRequest
|
from .models import Request, Response, PreparedRequest
|
||||||
from .api import request, get, head, post, patch, put, delete, options
|
from .api import request, get, head, post, patch, put, delete, options
|
||||||
|
@ -63,7 +69,7 @@ from .status_codes import codes
|
||||||
from .exceptions import (
|
from .exceptions import (
|
||||||
RequestException, Timeout, URLRequired,
|
RequestException, Timeout, URLRequired,
|
||||||
TooManyRedirects, HTTPError, ConnectionError,
|
TooManyRedirects, HTTPError, ConnectionError,
|
||||||
FileModeWarning,
|
FileModeWarning, ConnectTimeout, ReadTimeout
|
||||||
)
|
)
|
||||||
|
|
||||||
# Set default logging handler to avoid "No handler found" warnings.
|
# Set default logging handler to avoid "No handler found" warnings.
|
||||||
|
|
|
@ -19,7 +19,7 @@ from .packages.urllib3.util.retry import Retry
|
||||||
from .compat import urlparse, basestring
|
from .compat import urlparse, basestring
|
||||||
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
|
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
|
||||||
prepend_scheme_if_needed, get_auth_from_url, urldefragauth,
|
prepend_scheme_if_needed, get_auth_from_url, urldefragauth,
|
||||||
select_proxy)
|
select_proxy, to_native_string)
|
||||||
from .structures import CaseInsensitiveDict
|
from .structures import CaseInsensitiveDict
|
||||||
from .packages.urllib3.exceptions import ClosedPoolError
|
from .packages.urllib3.exceptions import ClosedPoolError
|
||||||
from .packages.urllib3.exceptions import ConnectTimeoutError
|
from .packages.urllib3.exceptions import ConnectTimeoutError
|
||||||
|
@ -33,9 +33,15 @@ from .packages.urllib3.exceptions import SSLError as _SSLError
|
||||||
from .packages.urllib3.exceptions import ResponseError
|
from .packages.urllib3.exceptions import ResponseError
|
||||||
from .cookies import extract_cookies_to_jar
|
from .cookies import extract_cookies_to_jar
|
||||||
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
|
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
|
||||||
ProxyError, RetryError)
|
ProxyError, RetryError, InvalidSchema)
|
||||||
from .auth import _basic_auth_str
|
from .auth import _basic_auth_str
|
||||||
|
|
||||||
|
try:
|
||||||
|
from .packages.urllib3.contrib.socks import SOCKSProxyManager
|
||||||
|
except ImportError:
|
||||||
|
def SOCKSProxyManager(*args, **kwargs):
|
||||||
|
raise InvalidSchema("Missing dependencies for SOCKS support.")
|
||||||
|
|
||||||
DEFAULT_POOLBLOCK = False
|
DEFAULT_POOLBLOCK = False
|
||||||
DEFAULT_POOLSIZE = 10
|
DEFAULT_POOLSIZE = 10
|
||||||
DEFAULT_RETRIES = 0
|
DEFAULT_RETRIES = 0
|
||||||
|
@ -149,9 +155,22 @@ class HTTPAdapter(BaseAdapter):
|
||||||
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
|
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
|
||||||
:returns: ProxyManager
|
:returns: ProxyManager
|
||||||
"""
|
"""
|
||||||
if not proxy in self.proxy_manager:
|
if proxy in self.proxy_manager:
|
||||||
|
manager = self.proxy_manager[proxy]
|
||||||
|
elif proxy.lower().startswith('socks'):
|
||||||
|
username, password = get_auth_from_url(proxy)
|
||||||
|
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
|
||||||
|
proxy,
|
||||||
|
username=username,
|
||||||
|
password=password,
|
||||||
|
num_pools=self._pool_connections,
|
||||||
|
maxsize=self._pool_maxsize,
|
||||||
|
block=self._pool_block,
|
||||||
|
**proxy_kwargs
|
||||||
|
)
|
||||||
|
else:
|
||||||
proxy_headers = self.proxy_headers(proxy)
|
proxy_headers = self.proxy_headers(proxy)
|
||||||
self.proxy_manager[proxy] = proxy_from_url(
|
manager = self.proxy_manager[proxy] = proxy_from_url(
|
||||||
proxy,
|
proxy,
|
||||||
proxy_headers=proxy_headers,
|
proxy_headers=proxy_headers,
|
||||||
num_pools=self._pool_connections,
|
num_pools=self._pool_connections,
|
||||||
|
@ -159,7 +178,7 @@ class HTTPAdapter(BaseAdapter):
|
||||||
block=self._pool_block,
|
block=self._pool_block,
|
||||||
**proxy_kwargs)
|
**proxy_kwargs)
|
||||||
|
|
||||||
return self.proxy_manager[proxy]
|
return manager
|
||||||
|
|
||||||
def cert_verify(self, conn, url, verify, cert):
|
def cert_verify(self, conn, url, verify, cert):
|
||||||
"""Verify a SSL certificate. This method should not be called from user
|
"""Verify a SSL certificate. This method should not be called from user
|
||||||
|
@ -264,10 +283,12 @@ class HTTPAdapter(BaseAdapter):
|
||||||
def close(self):
|
def close(self):
|
||||||
"""Disposes of any internal state.
|
"""Disposes of any internal state.
|
||||||
|
|
||||||
Currently, this just closes the PoolManager, which closes pooled
|
Currently, this closes the PoolManager and any active ProxyManager,
|
||||||
connections.
|
which closes any pooled connections.
|
||||||
"""
|
"""
|
||||||
self.poolmanager.clear()
|
self.poolmanager.clear()
|
||||||
|
for proxy in self.proxy_manager.values():
|
||||||
|
proxy.clear()
|
||||||
|
|
||||||
def request_url(self, request, proxies):
|
def request_url(self, request, proxies):
|
||||||
"""Obtain the url to use when making the final request.
|
"""Obtain the url to use when making the final request.
|
||||||
|
@ -284,10 +305,16 @@ class HTTPAdapter(BaseAdapter):
|
||||||
"""
|
"""
|
||||||
proxy = select_proxy(request.url, proxies)
|
proxy = select_proxy(request.url, proxies)
|
||||||
scheme = urlparse(request.url).scheme
|
scheme = urlparse(request.url).scheme
|
||||||
if proxy and scheme != 'https':
|
|
||||||
|
is_proxied_http_request = (proxy and scheme != 'https')
|
||||||
|
using_socks_proxy = False
|
||||||
|
if proxy:
|
||||||
|
proxy_scheme = urlparse(proxy).scheme.lower()
|
||||||
|
using_socks_proxy = proxy_scheme.startswith('socks')
|
||||||
|
|
||||||
|
url = request.path_url
|
||||||
|
if is_proxied_http_request and not using_socks_proxy:
|
||||||
url = urldefragauth(request.url)
|
url = urldefragauth(request.url)
|
||||||
else:
|
|
||||||
url = request.path_url
|
|
||||||
|
|
||||||
return url
|
return url
|
||||||
|
|
||||||
|
@ -434,6 +461,9 @@ class HTTPAdapter(BaseAdapter):
|
||||||
if isinstance(e.reason, ResponseError):
|
if isinstance(e.reason, ResponseError):
|
||||||
raise RetryError(e, request=request)
|
raise RetryError(e, request=request)
|
||||||
|
|
||||||
|
if isinstance(e.reason, _ProxyError):
|
||||||
|
raise ProxyError(e, request=request)
|
||||||
|
|
||||||
raise ConnectionError(e, request=request)
|
raise ConnectionError(e, request=request)
|
||||||
|
|
||||||
except ClosedPoolError as e:
|
except ClosedPoolError as e:
|
||||||
|
|
|
@ -24,7 +24,11 @@ def request(method, url, **kwargs):
|
||||||
:param json: (optional) json data to send in the body of the :class:`Request`.
|
:param json: (optional) json data to send in the body of the :class:`Request`.
|
||||||
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
|
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
|
||||||
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
|
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
|
||||||
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
|
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
|
||||||
|
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
|
||||||
|
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
|
||||||
|
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
|
||||||
|
to add for the file.
|
||||||
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
|
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
|
||||||
:param timeout: (optional) How long to wait for the server to send data
|
:param timeout: (optional) How long to wait for the server to send data
|
||||||
before giving up, as a float, or a :ref:`(connect timeout, read
|
before giving up, as a float, or a :ref:`(connect timeout, read
|
||||||
|
|
|
@ -93,6 +93,7 @@ class HTTPDigestAuth(AuthBase):
|
||||||
qop = self._thread_local.chal.get('qop')
|
qop = self._thread_local.chal.get('qop')
|
||||||
algorithm = self._thread_local.chal.get('algorithm')
|
algorithm = self._thread_local.chal.get('algorithm')
|
||||||
opaque = self._thread_local.chal.get('opaque')
|
opaque = self._thread_local.chal.get('opaque')
|
||||||
|
hash_utf8 = None
|
||||||
|
|
||||||
if algorithm is None:
|
if algorithm is None:
|
||||||
_algorithm = 'MD5'
|
_algorithm = 'MD5'
|
||||||
|
|
|
@ -103,8 +103,10 @@ class RequestEncodingMixin(object):
|
||||||
"""Build the body for a multipart/form-data request.
|
"""Build the body for a multipart/form-data request.
|
||||||
|
|
||||||
Will successfully encode files when passed as a dict or a list of
|
Will successfully encode files when passed as a dict or a list of
|
||||||
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
|
tuples. Order is retained if data is a list of tuples but arbitrary
|
||||||
if parameters are supplied as a dict.
|
if parameters are supplied as a dict.
|
||||||
|
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
|
||||||
|
or 4-tuples (filename, fileobj, contentype, custom_headers).
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if (not files):
|
if (not files):
|
||||||
|
@ -463,9 +465,11 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
|
||||||
|
|
||||||
def prepare_content_length(self, body):
|
def prepare_content_length(self, body):
|
||||||
if hasattr(body, 'seek') and hasattr(body, 'tell'):
|
if hasattr(body, 'seek') and hasattr(body, 'tell'):
|
||||||
|
curr_pos = body.tell()
|
||||||
body.seek(0, 2)
|
body.seek(0, 2)
|
||||||
self.headers['Content-Length'] = builtin_str(body.tell())
|
end_pos = body.tell()
|
||||||
body.seek(0, 0)
|
self.headers['Content-Length'] = builtin_str(max(0, end_pos - curr_pos))
|
||||||
|
body.seek(curr_pos, 0)
|
||||||
elif body is not None:
|
elif body is not None:
|
||||||
l = super_len(body)
|
l = super_len(body)
|
||||||
if l:
|
if l:
|
||||||
|
@ -788,7 +792,7 @@ class Response(object):
|
||||||
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
|
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not self.encoding and len(self.content) > 3:
|
if not self.encoding and self.content and len(self.content) > 3:
|
||||||
# No encoding set. JSON RFC 4627 section 3 states we should expect
|
# No encoding set. JSON RFC 4627 section 3 states we should expect
|
||||||
# UTF-8, -16 or -32. Detect which one to use; If the detection or
|
# UTF-8, -16 or -32. Detect which one to use; If the detection or
|
||||||
# decoding fails, fall back to `self.text` (using chardet to make
|
# decoding fails, fall back to `self.text` (using chardet to make
|
||||||
|
|
|
@ -32,7 +32,7 @@ except ImportError:
|
||||||
|
|
||||||
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
|
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
|
||||||
__license__ = 'MIT'
|
__license__ = 'MIT'
|
||||||
__version__ = '1.13.1'
|
__version__ = '1.15.1'
|
||||||
|
|
||||||
__all__ = (
|
__all__ = (
|
||||||
'HTTPConnectionPool',
|
'HTTPConnectionPool',
|
||||||
|
@ -68,22 +68,25 @@ def add_stderr_logger(level=logging.DEBUG):
|
||||||
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
|
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
|
||||||
logger.addHandler(handler)
|
logger.addHandler(handler)
|
||||||
logger.setLevel(level)
|
logger.setLevel(level)
|
||||||
logger.debug('Added a stderr logging handler to logger: %s' % __name__)
|
logger.debug('Added a stderr logging handler to logger: %s', __name__)
|
||||||
return handler
|
return handler
|
||||||
|
|
||||||
# ... Clean up.
|
# ... Clean up.
|
||||||
del NullHandler
|
del NullHandler
|
||||||
|
|
||||||
|
|
||||||
|
# All warning filters *must* be appended unless you're really certain that they
|
||||||
|
# shouldn't be: otherwise, it's very hard for users to use most Python
|
||||||
|
# mechanisms to silence them.
|
||||||
# SecurityWarning's always go off by default.
|
# SecurityWarning's always go off by default.
|
||||||
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
|
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
|
||||||
# SubjectAltNameWarning's should go off once per host
|
# SubjectAltNameWarning's should go off once per host
|
||||||
warnings.simplefilter('default', exceptions.SubjectAltNameWarning)
|
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
|
||||||
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
|
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
|
||||||
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
|
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
|
||||||
append=True)
|
append=True)
|
||||||
# SNIMissingWarnings should go off only once.
|
# SNIMissingWarnings should go off only once.
|
||||||
warnings.simplefilter('default', exceptions.SNIMissingWarning)
|
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
|
||||||
|
|
||||||
|
|
||||||
def disable_warnings(category=exceptions.HTTPWarning):
|
def disable_warnings(category=exceptions.HTTPWarning):
|
||||||
|
|
|
@ -134,7 +134,7 @@ class HTTPHeaderDict(MutableMapping):
|
||||||
|
|
||||||
def __init__(self, headers=None, **kwargs):
|
def __init__(self, headers=None, **kwargs):
|
||||||
super(HTTPHeaderDict, self).__init__()
|
super(HTTPHeaderDict, self).__init__()
|
||||||
self._container = {}
|
self._container = OrderedDict()
|
||||||
if headers is not None:
|
if headers is not None:
|
||||||
if isinstance(headers, HTTPHeaderDict):
|
if isinstance(headers, HTTPHeaderDict):
|
||||||
self._copy_from(headers)
|
self._copy_from(headers)
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
import datetime
|
import datetime
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import socket
|
import socket
|
||||||
|
@ -38,7 +39,7 @@ from .exceptions import (
|
||||||
SubjectAltNameWarning,
|
SubjectAltNameWarning,
|
||||||
SystemTimeWarning,
|
SystemTimeWarning,
|
||||||
)
|
)
|
||||||
from .packages.ssl_match_hostname import match_hostname
|
from .packages.ssl_match_hostname import match_hostname, CertificateError
|
||||||
|
|
||||||
from .util.ssl_ import (
|
from .util.ssl_ import (
|
||||||
resolve_cert_reqs,
|
resolve_cert_reqs,
|
||||||
|
@ -50,6 +51,10 @@ from .util.ssl_ import (
|
||||||
|
|
||||||
from .util import connection
|
from .util import connection
|
||||||
|
|
||||||
|
from ._collections import HTTPHeaderDict
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
port_by_scheme = {
|
port_by_scheme = {
|
||||||
'http': 80,
|
'http': 80,
|
||||||
'https': 443,
|
'https': 443,
|
||||||
|
@ -162,6 +167,38 @@ class HTTPConnection(_HTTPConnection, object):
|
||||||
conn = self._new_conn()
|
conn = self._new_conn()
|
||||||
self._prepare_conn(conn)
|
self._prepare_conn(conn)
|
||||||
|
|
||||||
|
def request_chunked(self, method, url, body=None, headers=None):
|
||||||
|
"""
|
||||||
|
Alternative to the common request method, which sends the
|
||||||
|
body with chunked encoding and not as one block
|
||||||
|
"""
|
||||||
|
headers = HTTPHeaderDict(headers if headers is not None else {})
|
||||||
|
skip_accept_encoding = 'accept-encoding' in headers
|
||||||
|
self.putrequest(method, url, skip_accept_encoding=skip_accept_encoding)
|
||||||
|
for header, value in headers.items():
|
||||||
|
self.putheader(header, value)
|
||||||
|
if 'transfer-encoding' not in headers:
|
||||||
|
self.putheader('Transfer-Encoding', 'chunked')
|
||||||
|
self.endheaders()
|
||||||
|
|
||||||
|
if body is not None:
|
||||||
|
stringish_types = six.string_types + (six.binary_type,)
|
||||||
|
if isinstance(body, stringish_types):
|
||||||
|
body = (body,)
|
||||||
|
for chunk in body:
|
||||||
|
if not chunk:
|
||||||
|
continue
|
||||||
|
if not isinstance(chunk, six.binary_type):
|
||||||
|
chunk = chunk.encode('utf8')
|
||||||
|
len_str = hex(len(chunk))[2:]
|
||||||
|
self.send(len_str.encode('utf-8'))
|
||||||
|
self.send(b'\r\n')
|
||||||
|
self.send(chunk)
|
||||||
|
self.send(b'\r\n')
|
||||||
|
|
||||||
|
# After the if clause, to always have a closed body
|
||||||
|
self.send(b'0\r\n\r\n')
|
||||||
|
|
||||||
|
|
||||||
class HTTPSConnection(HTTPConnection):
|
class HTTPSConnection(HTTPConnection):
|
||||||
default_port = port_by_scheme['https']
|
default_port = port_by_scheme['https']
|
||||||
|
@ -265,21 +302,26 @@ class VerifiedHTTPSConnection(HTTPSConnection):
|
||||||
'for details.)'.format(hostname)),
|
'for details.)'.format(hostname)),
|
||||||
SubjectAltNameWarning
|
SubjectAltNameWarning
|
||||||
)
|
)
|
||||||
|
_match_hostname(cert, self.assert_hostname or hostname)
|
||||||
# In case the hostname is an IPv6 address, strip the square
|
|
||||||
# brackets from it before using it to validate. This is because
|
|
||||||
# a certificate with an IPv6 address in it won't have square
|
|
||||||
# brackets around that address. Sadly, match_hostname won't do this
|
|
||||||
# for us: it expects the plain host part without any extra work
|
|
||||||
# that might have been done to make it palatable to httplib.
|
|
||||||
asserted_hostname = self.assert_hostname or hostname
|
|
||||||
asserted_hostname = asserted_hostname.strip('[]')
|
|
||||||
match_hostname(cert, asserted_hostname)
|
|
||||||
|
|
||||||
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED or
|
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED or
|
||||||
self.assert_fingerprint is not None)
|
self.assert_fingerprint is not None)
|
||||||
|
|
||||||
|
|
||||||
|
def _match_hostname(cert, asserted_hostname):
|
||||||
|
try:
|
||||||
|
match_hostname(cert, asserted_hostname)
|
||||||
|
except CertificateError as e:
|
||||||
|
log.error(
|
||||||
|
'Certificate did not match expected hostname: %s. '
|
||||||
|
'Certificate: %s', asserted_hostname, cert
|
||||||
|
)
|
||||||
|
# Add cert to exception and reraise so client code can inspect
|
||||||
|
# the cert when catching the exception, if they want to
|
||||||
|
e._peer_cert = cert
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
if ssl:
|
if ssl:
|
||||||
# Make a copy for testing.
|
# Make a copy for testing.
|
||||||
UnverifiedHTTPSConnection = HTTPSConnection
|
UnverifiedHTTPSConnection = HTTPSConnection
|
||||||
|
|
|
@ -69,7 +69,13 @@ class ConnectionPool(object):
|
||||||
if not host:
|
if not host:
|
||||||
raise LocationValueError("No host specified.")
|
raise LocationValueError("No host specified.")
|
||||||
|
|
||||||
self.host = host
|
# httplib doesn't like it when we include brackets in ipv6 addresses
|
||||||
|
# Specifically, if we include brackets but also pass the port then
|
||||||
|
# httplib crazily doubles up the square brackets on the Host header.
|
||||||
|
# Instead, we need to make sure we never pass ``None`` as the port.
|
||||||
|
# However, for backward compatibility reasons we can't actually
|
||||||
|
# *assert* that.
|
||||||
|
self.host = host.strip('[]')
|
||||||
self.port = port
|
self.port = port
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
@ -203,8 +209,8 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
Return a fresh :class:`HTTPConnection`.
|
Return a fresh :class:`HTTPConnection`.
|
||||||
"""
|
"""
|
||||||
self.num_connections += 1
|
self.num_connections += 1
|
||||||
log.info("Starting new HTTP connection (%d): %s" %
|
log.info("Starting new HTTP connection (%d): %s",
|
||||||
(self.num_connections, self.host))
|
self.num_connections, self.host)
|
||||||
|
|
||||||
conn = self.ConnectionCls(host=self.host, port=self.port,
|
conn = self.ConnectionCls(host=self.host, port=self.port,
|
||||||
timeout=self.timeout.connect_timeout,
|
timeout=self.timeout.connect_timeout,
|
||||||
|
@ -239,7 +245,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
|
|
||||||
# If this is a persistent connection, check if it got disconnected
|
# If this is a persistent connection, check if it got disconnected
|
||||||
if conn and is_connection_dropped(conn):
|
if conn and is_connection_dropped(conn):
|
||||||
log.info("Resetting dropped connection: %s" % self.host)
|
log.info("Resetting dropped connection: %s", self.host)
|
||||||
conn.close()
|
conn.close()
|
||||||
if getattr(conn, 'auto_open', 1) == 0:
|
if getattr(conn, 'auto_open', 1) == 0:
|
||||||
# This is a proxied connection that has been mutated by
|
# This is a proxied connection that has been mutated by
|
||||||
|
@ -272,7 +278,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
except Full:
|
except Full:
|
||||||
# This should never happen if self.block == True
|
# This should never happen if self.block == True
|
||||||
log.warning(
|
log.warning(
|
||||||
"Connection pool is full, discarding connection: %s" %
|
"Connection pool is full, discarding connection: %s",
|
||||||
self.host)
|
self.host)
|
||||||
|
|
||||||
# Connection never got put back into the pool, close it.
|
# Connection never got put back into the pool, close it.
|
||||||
|
@ -318,7 +324,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
|
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
|
||||||
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
|
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
|
||||||
|
|
||||||
def _make_request(self, conn, method, url, timeout=_Default,
|
def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
|
||||||
**httplib_request_kw):
|
**httplib_request_kw):
|
||||||
"""
|
"""
|
||||||
Perform a request on a given urllib connection object taken from our
|
Perform a request on a given urllib connection object taken from our
|
||||||
|
@ -350,7 +356,10 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
|
|
||||||
# conn.request() calls httplib.*.request, not the method in
|
# conn.request() calls httplib.*.request, not the method in
|
||||||
# urllib3.request. It also calls makefile (recv) on the socket.
|
# urllib3.request. It also calls makefile (recv) on the socket.
|
||||||
conn.request(method, url, **httplib_request_kw)
|
if chunked:
|
||||||
|
conn.request_chunked(method, url, **httplib_request_kw)
|
||||||
|
else:
|
||||||
|
conn.request(method, url, **httplib_request_kw)
|
||||||
|
|
||||||
# Reset the timeout for the recv() on the socket
|
# Reset the timeout for the recv() on the socket
|
||||||
read_timeout = timeout_obj.read_timeout
|
read_timeout = timeout_obj.read_timeout
|
||||||
|
@ -382,9 +391,8 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
|
|
||||||
# AppEngine doesn't have a version attr.
|
# AppEngine doesn't have a version attr.
|
||||||
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
|
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
|
||||||
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
|
log.debug("\"%s %s %s\" %s %s", method, url, http_version,
|
||||||
httplib_response.status,
|
httplib_response.status, httplib_response.length)
|
||||||
httplib_response.length))
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
assert_header_parsing(httplib_response.msg)
|
assert_header_parsing(httplib_response.msg)
|
||||||
|
@ -435,7 +443,8 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
|
|
||||||
def urlopen(self, method, url, body=None, headers=None, retries=None,
|
def urlopen(self, method, url, body=None, headers=None, retries=None,
|
||||||
redirect=True, assert_same_host=True, timeout=_Default,
|
redirect=True, assert_same_host=True, timeout=_Default,
|
||||||
pool_timeout=None, release_conn=None, **response_kw):
|
pool_timeout=None, release_conn=None, chunked=False,
|
||||||
|
**response_kw):
|
||||||
"""
|
"""
|
||||||
Get a connection from the pool and perform an HTTP request. This is the
|
Get a connection from the pool and perform an HTTP request. This is the
|
||||||
lowest level call for making a request, so you'll need to specify all
|
lowest level call for making a request, so you'll need to specify all
|
||||||
|
@ -512,6 +521,11 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
back into the pool. If None, it takes the value of
|
back into the pool. If None, it takes the value of
|
||||||
``response_kw.get('preload_content', True)``.
|
``response_kw.get('preload_content', True)``.
|
||||||
|
|
||||||
|
:param chunked:
|
||||||
|
If True, urllib3 will send the body using chunked transfer
|
||||||
|
encoding. Otherwise, urllib3 will send the body using the standard
|
||||||
|
content-length form. Defaults to False.
|
||||||
|
|
||||||
:param \**response_kw:
|
:param \**response_kw:
|
||||||
Additional parameters are passed to
|
Additional parameters are passed to
|
||||||
:meth:`urllib3.response.HTTPResponse.from_httplib`
|
:meth:`urllib3.response.HTTPResponse.from_httplib`
|
||||||
|
@ -542,6 +556,10 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
# complains about UnboundLocalError.
|
# complains about UnboundLocalError.
|
||||||
err = None
|
err = None
|
||||||
|
|
||||||
|
# Keep track of whether we cleanly exited the except block. This
|
||||||
|
# ensures we do proper cleanup in finally.
|
||||||
|
clean_exit = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Request a connection from the queue.
|
# Request a connection from the queue.
|
||||||
timeout_obj = self._get_timeout(timeout)
|
timeout_obj = self._get_timeout(timeout)
|
||||||
|
@ -556,13 +574,14 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
# Make the request on the httplib connection object.
|
# Make the request on the httplib connection object.
|
||||||
httplib_response = self._make_request(conn, method, url,
|
httplib_response = self._make_request(conn, method, url,
|
||||||
timeout=timeout_obj,
|
timeout=timeout_obj,
|
||||||
body=body, headers=headers)
|
body=body, headers=headers,
|
||||||
|
chunked=chunked)
|
||||||
|
|
||||||
# If we're going to release the connection in ``finally:``, then
|
# If we're going to release the connection in ``finally:``, then
|
||||||
# the request doesn't need to know about the connection. Otherwise
|
# the response doesn't need to know about the connection. Otherwise
|
||||||
# it will also try to release it and we'll have a double-release
|
# it will also try to release it and we'll have a double-release
|
||||||
# mess.
|
# mess.
|
||||||
response_conn = not release_conn and conn
|
response_conn = conn if not release_conn else None
|
||||||
|
|
||||||
# Import httplib's response into our own wrapper object
|
# Import httplib's response into our own wrapper object
|
||||||
response = HTTPResponse.from_httplib(httplib_response,
|
response = HTTPResponse.from_httplib(httplib_response,
|
||||||
|
@ -570,10 +589,8 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
connection=response_conn,
|
connection=response_conn,
|
||||||
**response_kw)
|
**response_kw)
|
||||||
|
|
||||||
# else:
|
# Everything went great!
|
||||||
# The connection will be put back into the pool when
|
clean_exit = True
|
||||||
# ``response.release_conn()`` is called (implicitly by
|
|
||||||
# ``response.read()``)
|
|
||||||
|
|
||||||
except Empty:
|
except Empty:
|
||||||
# Timed out by queue.
|
# Timed out by queue.
|
||||||
|
@ -583,22 +600,19 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
# Close the connection. If a connection is reused on which there
|
# Close the connection. If a connection is reused on which there
|
||||||
# was a Certificate error, the next request will certainly raise
|
# was a Certificate error, the next request will certainly raise
|
||||||
# another Certificate error.
|
# another Certificate error.
|
||||||
conn = conn and conn.close()
|
clean_exit = False
|
||||||
release_conn = True
|
|
||||||
raise SSLError(e)
|
raise SSLError(e)
|
||||||
|
|
||||||
except SSLError:
|
except SSLError:
|
||||||
# Treat SSLError separately from BaseSSLError to preserve
|
# Treat SSLError separately from BaseSSLError to preserve
|
||||||
# traceback.
|
# traceback.
|
||||||
conn = conn and conn.close()
|
clean_exit = False
|
||||||
release_conn = True
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
|
except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
|
||||||
# Discard the connection for these exceptions. It will be
|
# Discard the connection for these exceptions. It will be
|
||||||
# be replaced during the next _get_conn() call.
|
# be replaced during the next _get_conn() call.
|
||||||
conn = conn and conn.close()
|
clean_exit = False
|
||||||
release_conn = True
|
|
||||||
|
|
||||||
if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
|
if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
|
||||||
e = ProxyError('Cannot connect to proxy.', e)
|
e = ProxyError('Cannot connect to proxy.', e)
|
||||||
|
@ -613,6 +627,14 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
err = e
|
err = e
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
|
if not clean_exit:
|
||||||
|
# We hit some kind of exception, handled or otherwise. We need
|
||||||
|
# to throw the connection away unless explicitly told not to.
|
||||||
|
# Close the connection, set the variable to None, and make sure
|
||||||
|
# we put the None back in the pool to avoid leaking it.
|
||||||
|
conn = conn and conn.close()
|
||||||
|
release_conn = True
|
||||||
|
|
||||||
if release_conn:
|
if release_conn:
|
||||||
# Put the connection back to be reused. If the connection is
|
# Put the connection back to be reused. If the connection is
|
||||||
# expired then it will be None, which will get replaced with a
|
# expired then it will be None, which will get replaced with a
|
||||||
|
@ -622,7 +644,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
if not conn:
|
if not conn:
|
||||||
# Try again
|
# Try again
|
||||||
log.warning("Retrying (%r) after connection "
|
log.warning("Retrying (%r) after connection "
|
||||||
"broken by '%r': %s" % (retries, err, url))
|
"broken by '%r': %s", retries, err, url)
|
||||||
return self.urlopen(method, url, body, headers, retries,
|
return self.urlopen(method, url, body, headers, retries,
|
||||||
redirect, assert_same_host,
|
redirect, assert_same_host,
|
||||||
timeout=timeout, pool_timeout=pool_timeout,
|
timeout=timeout, pool_timeout=pool_timeout,
|
||||||
|
@ -644,7 +666,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
raise
|
raise
|
||||||
return response
|
return response
|
||||||
|
|
||||||
log.info("Redirecting %s -> %s" % (url, redirect_location))
|
log.info("Redirecting %s -> %s", url, redirect_location)
|
||||||
return self.urlopen(
|
return self.urlopen(
|
||||||
method, redirect_location, body, headers,
|
method, redirect_location, body, headers,
|
||||||
retries=retries, redirect=redirect,
|
retries=retries, redirect=redirect,
|
||||||
|
@ -654,9 +676,17 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
||||||
|
|
||||||
# Check if we should retry the HTTP response.
|
# Check if we should retry the HTTP response.
|
||||||
if retries.is_forced_retry(method, status_code=response.status):
|
if retries.is_forced_retry(method, status_code=response.status):
|
||||||
retries = retries.increment(method, url, response=response, _pool=self)
|
try:
|
||||||
|
retries = retries.increment(method, url, response=response, _pool=self)
|
||||||
|
except MaxRetryError:
|
||||||
|
if retries.raise_on_status:
|
||||||
|
# Release the connection for this response, since we're not
|
||||||
|
# returning it to be released manually.
|
||||||
|
response.release_conn()
|
||||||
|
raise
|
||||||
|
return response
|
||||||
retries.sleep()
|
retries.sleep()
|
||||||
log.info("Forced retry: %s" % url)
|
log.info("Forced retry: %s", url)
|
||||||
return self.urlopen(
|
return self.urlopen(
|
||||||
method, url, body, headers,
|
method, url, body, headers,
|
||||||
retries=retries, redirect=redirect,
|
retries=retries, redirect=redirect,
|
||||||
|
@ -742,7 +772,7 @@ class HTTPSConnectionPool(HTTPConnectionPool):
|
||||||
except AttributeError: # Platform-specific: Python 2.6
|
except AttributeError: # Platform-specific: Python 2.6
|
||||||
set_tunnel = conn._set_tunnel
|
set_tunnel = conn._set_tunnel
|
||||||
|
|
||||||
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
|
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
|
||||||
set_tunnel(self.host, self.port)
|
set_tunnel(self.host, self.port)
|
||||||
else:
|
else:
|
||||||
set_tunnel(self.host, self.port, self.proxy_headers)
|
set_tunnel(self.host, self.port, self.proxy_headers)
|
||||||
|
@ -754,8 +784,8 @@ class HTTPSConnectionPool(HTTPConnectionPool):
|
||||||
Return a fresh :class:`httplib.HTTPSConnection`.
|
Return a fresh :class:`httplib.HTTPSConnection`.
|
||||||
"""
|
"""
|
||||||
self.num_connections += 1
|
self.num_connections += 1
|
||||||
log.info("Starting new HTTPS connection (%d): %s"
|
log.info("Starting new HTTPS connection (%d): %s",
|
||||||
% (self.num_connections, self.host))
|
self.num_connections, self.host)
|
||||||
|
|
||||||
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
|
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
|
||||||
raise SSLError("Can't connect to HTTPS URL because the SSL "
|
raise SSLError("Can't connect to HTTPS URL because the SSL "
|
||||||
|
@ -812,6 +842,7 @@ def connection_from_url(url, **kw):
|
||||||
>>> r = conn.request('GET', '/')
|
>>> r = conn.request('GET', '/')
|
||||||
"""
|
"""
|
||||||
scheme, host, port = get_host(url)
|
scheme, host, port = get_host(url)
|
||||||
|
port = port or port_by_scheme.get(scheme, 80)
|
||||||
if scheme == 'https':
|
if scheme == 'https':
|
||||||
return HTTPSConnectionPool(host, port=port, **kw)
|
return HTTPSConnectionPool(host, port=port, **kw)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -144,7 +144,7 @@ class AppEngineManager(RequestMethods):
|
||||||
if retries.is_forced_retry(method, status_code=http_response.status):
|
if retries.is_forced_retry(method, status_code=http_response.status):
|
||||||
retries = retries.increment(
|
retries = retries.increment(
|
||||||
method, url, response=http_response, _pool=self)
|
method, url, response=http_response, _pool=self)
|
||||||
log.info("Forced retry: %s" % url)
|
log.info("Forced retry: %s", url)
|
||||||
retries.sleep()
|
retries.sleep()
|
||||||
return self.urlopen(
|
return self.urlopen(
|
||||||
method, url,
|
method, url,
|
||||||
|
@ -164,6 +164,14 @@ class AppEngineManager(RequestMethods):
|
||||||
if content_encoding == 'deflate':
|
if content_encoding == 'deflate':
|
||||||
del urlfetch_resp.headers['content-encoding']
|
del urlfetch_resp.headers['content-encoding']
|
||||||
|
|
||||||
|
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
|
||||||
|
# We have a full response's content,
|
||||||
|
# so let's make sure we don't report ourselves as chunked data.
|
||||||
|
if transfer_encoding == 'chunked':
|
||||||
|
encodings = transfer_encoding.split(",")
|
||||||
|
encodings.remove('chunked')
|
||||||
|
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
|
||||||
|
|
||||||
return HTTPResponse(
|
return HTTPResponse(
|
||||||
# In order for decoding to work, we must present the content as
|
# In order for decoding to work, we must present the content as
|
||||||
# a file-like object.
|
# a file-like object.
|
||||||
|
@ -177,7 +185,7 @@ class AppEngineManager(RequestMethods):
|
||||||
if timeout is Timeout.DEFAULT_TIMEOUT:
|
if timeout is Timeout.DEFAULT_TIMEOUT:
|
||||||
return 5 # 5s is the default timeout for URLFetch.
|
return 5 # 5s is the default timeout for URLFetch.
|
||||||
if isinstance(timeout, Timeout):
|
if isinstance(timeout, Timeout):
|
||||||
if timeout.read is not timeout.connect:
|
if timeout._read is not timeout._connect:
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
"URLFetch does not support granular timeout settings, "
|
"URLFetch does not support granular timeout settings, "
|
||||||
"reverting to total timeout.", AppEnginePlatformWarning)
|
"reverting to total timeout.", AppEnginePlatformWarning)
|
||||||
|
|
|
@ -43,8 +43,8 @@ class NTLMConnectionPool(HTTPSConnectionPool):
|
||||||
# Performs the NTLM handshake that secures the connection. The socket
|
# Performs the NTLM handshake that secures the connection. The socket
|
||||||
# must be kept open while requests are performed.
|
# must be kept open while requests are performed.
|
||||||
self.num_connections += 1
|
self.num_connections += 1
|
||||||
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
|
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
|
||||||
(self.num_connections, self.host, self.authurl))
|
self.num_connections, self.host, self.authurl)
|
||||||
|
|
||||||
headers = {}
|
headers = {}
|
||||||
headers['Connection'] = 'Keep-Alive'
|
headers['Connection'] = 'Keep-Alive'
|
||||||
|
@ -56,13 +56,13 @@ class NTLMConnectionPool(HTTPSConnectionPool):
|
||||||
# Send negotiation message
|
# Send negotiation message
|
||||||
headers[req_header] = (
|
headers[req_header] = (
|
||||||
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
|
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
|
||||||
log.debug('Request headers: %s' % headers)
|
log.debug('Request headers: %s', headers)
|
||||||
conn.request('GET', self.authurl, None, headers)
|
conn.request('GET', self.authurl, None, headers)
|
||||||
res = conn.getresponse()
|
res = conn.getresponse()
|
||||||
reshdr = dict(res.getheaders())
|
reshdr = dict(res.getheaders())
|
||||||
log.debug('Response status: %s %s' % (res.status, res.reason))
|
log.debug('Response status: %s %s', res.status, res.reason)
|
||||||
log.debug('Response headers: %s' % reshdr)
|
log.debug('Response headers: %s', reshdr)
|
||||||
log.debug('Response data: %s [...]' % res.read(100))
|
log.debug('Response data: %s [...]', res.read(100))
|
||||||
|
|
||||||
# Remove the reference to the socket, so that it can not be closed by
|
# Remove the reference to the socket, so that it can not be closed by
|
||||||
# the response object (we want to keep the socket open)
|
# the response object (we want to keep the socket open)
|
||||||
|
@ -87,12 +87,12 @@ class NTLMConnectionPool(HTTPSConnectionPool):
|
||||||
self.pw,
|
self.pw,
|
||||||
NegotiateFlags)
|
NegotiateFlags)
|
||||||
headers[req_header] = 'NTLM %s' % auth_msg
|
headers[req_header] = 'NTLM %s' % auth_msg
|
||||||
log.debug('Request headers: %s' % headers)
|
log.debug('Request headers: %s', headers)
|
||||||
conn.request('GET', self.authurl, None, headers)
|
conn.request('GET', self.authurl, None, headers)
|
||||||
res = conn.getresponse()
|
res = conn.getresponse()
|
||||||
log.debug('Response status: %s %s' % (res.status, res.reason))
|
log.debug('Response status: %s %s', res.status, res.reason)
|
||||||
log.debug('Response headers: %s' % dict(res.getheaders()))
|
log.debug('Response headers: %s', dict(res.getheaders()))
|
||||||
log.debug('Response data: %s [...]' % res.read()[:100])
|
log.debug('Response data: %s [...]', res.read()[:100])
|
||||||
if res.status != 200:
|
if res.status != 200:
|
||||||
if res.status == 401:
|
if res.status == 401:
|
||||||
raise Exception('Server rejected request: wrong '
|
raise Exception('Server rejected request: wrong '
|
||||||
|
|
|
@ -54,9 +54,17 @@ except SyntaxError as e:
|
||||||
import OpenSSL.SSL
|
import OpenSSL.SSL
|
||||||
from pyasn1.codec.der import decoder as der_decoder
|
from pyasn1.codec.der import decoder as der_decoder
|
||||||
from pyasn1.type import univ, constraint
|
from pyasn1.type import univ, constraint
|
||||||
from socket import _fileobject, timeout, error as SocketError
|
from socket import timeout, error as SocketError
|
||||||
|
|
||||||
|
try: # Platform-specific: Python 2
|
||||||
|
from socket import _fileobject
|
||||||
|
except ImportError: # Platform-specific: Python 3
|
||||||
|
_fileobject = None
|
||||||
|
from urllib3.packages.backports.makefile import backport_makefile
|
||||||
|
|
||||||
import ssl
|
import ssl
|
||||||
import select
|
import select
|
||||||
|
import six
|
||||||
|
|
||||||
from .. import connection
|
from .. import connection
|
||||||
from .. import util
|
from .. import util
|
||||||
|
@ -90,7 +98,7 @@ _openssl_verify = {
|
||||||
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
|
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
|
DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS.encode('ascii')
|
||||||
|
|
||||||
# OpenSSL will only write 16K at a time
|
# OpenSSL will only write 16K at a time
|
||||||
SSL_WRITE_BLOCKSIZE = 16384
|
SSL_WRITE_BLOCKSIZE = 16384
|
||||||
|
@ -104,6 +112,7 @@ def inject_into_urllib3():
|
||||||
|
|
||||||
connection.ssl_wrap_socket = ssl_wrap_socket
|
connection.ssl_wrap_socket = ssl_wrap_socket
|
||||||
util.HAS_SNI = HAS_SNI
|
util.HAS_SNI = HAS_SNI
|
||||||
|
util.IS_PYOPENSSL = True
|
||||||
|
|
||||||
|
|
||||||
def extract_from_urllib3():
|
def extract_from_urllib3():
|
||||||
|
@ -111,6 +120,7 @@ def extract_from_urllib3():
|
||||||
|
|
||||||
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
|
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
|
||||||
util.HAS_SNI = orig_util_HAS_SNI
|
util.HAS_SNI = orig_util_HAS_SNI
|
||||||
|
util.IS_PYOPENSSL = False
|
||||||
|
|
||||||
|
|
||||||
# Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
|
# Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
|
||||||
|
@ -135,7 +145,7 @@ def get_subj_alt_name(peer_cert):
|
||||||
for i in range(peer_cert.get_extension_count()):
|
for i in range(peer_cert.get_extension_count()):
|
||||||
ext = peer_cert.get_extension(i)
|
ext = peer_cert.get_extension(i)
|
||||||
ext_name = ext.get_short_name()
|
ext_name = ext.get_short_name()
|
||||||
if ext_name != 'subjectAltName':
|
if ext_name != b'subjectAltName':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# PyOpenSSL returns extension data in ASN.1 encoded form
|
# PyOpenSSL returns extension data in ASN.1 encoded form
|
||||||
|
@ -167,13 +177,17 @@ class WrappedSocket(object):
|
||||||
self.socket = socket
|
self.socket = socket
|
||||||
self.suppress_ragged_eofs = suppress_ragged_eofs
|
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||||
self._makefile_refs = 0
|
self._makefile_refs = 0
|
||||||
|
self._closed = False
|
||||||
|
|
||||||
def fileno(self):
|
def fileno(self):
|
||||||
return self.socket.fileno()
|
return self.socket.fileno()
|
||||||
|
|
||||||
def makefile(self, mode, bufsize=-1):
|
# Copy-pasted from Python 3.5 source code
|
||||||
self._makefile_refs += 1
|
def _decref_socketios(self):
|
||||||
return _fileobject(self, mode, bufsize, close=True)
|
if self._makefile_refs > 0:
|
||||||
|
self._makefile_refs -= 1
|
||||||
|
if self._closed:
|
||||||
|
self.close()
|
||||||
|
|
||||||
def recv(self, *args, **kwargs):
|
def recv(self, *args, **kwargs):
|
||||||
try:
|
try:
|
||||||
|
@ -182,7 +196,7 @@ class WrappedSocket(object):
|
||||||
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
|
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
|
||||||
return b''
|
return b''
|
||||||
else:
|
else:
|
||||||
raise SocketError(e)
|
raise SocketError(str(e))
|
||||||
except OpenSSL.SSL.ZeroReturnError as e:
|
except OpenSSL.SSL.ZeroReturnError as e:
|
||||||
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
|
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
|
||||||
return b''
|
return b''
|
||||||
|
@ -198,6 +212,27 @@ class WrappedSocket(object):
|
||||||
else:
|
else:
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
def recv_into(self, *args, **kwargs):
|
||||||
|
try:
|
||||||
|
return self.connection.recv_into(*args, **kwargs)
|
||||||
|
except OpenSSL.SSL.SysCallError as e:
|
||||||
|
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
raise SocketError(str(e))
|
||||||
|
except OpenSSL.SSL.ZeroReturnError as e:
|
||||||
|
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
except OpenSSL.SSL.WantReadError:
|
||||||
|
rd, wd, ed = select.select(
|
||||||
|
[self.socket], [], [], self.socket.gettimeout())
|
||||||
|
if not rd:
|
||||||
|
raise timeout('The read operation timed out')
|
||||||
|
else:
|
||||||
|
return self.recv_into(*args, **kwargs)
|
||||||
|
|
||||||
def settimeout(self, timeout):
|
def settimeout(self, timeout):
|
||||||
return self.socket.settimeout(timeout)
|
return self.socket.settimeout(timeout)
|
||||||
|
|
||||||
|
@ -225,6 +260,7 @@ class WrappedSocket(object):
|
||||||
def close(self):
|
def close(self):
|
||||||
if self._makefile_refs < 1:
|
if self._makefile_refs < 1:
|
||||||
try:
|
try:
|
||||||
|
self._closed = True
|
||||||
return self.connection.close()
|
return self.connection.close()
|
||||||
except OpenSSL.SSL.Error:
|
except OpenSSL.SSL.Error:
|
||||||
return
|
return
|
||||||
|
@ -262,6 +298,16 @@ class WrappedSocket(object):
|
||||||
self._makefile_refs -= 1
|
self._makefile_refs -= 1
|
||||||
|
|
||||||
|
|
||||||
|
if _fileobject: # Platform-specific: Python 2
|
||||||
|
def makefile(self, mode, bufsize=-1):
|
||||||
|
self._makefile_refs += 1
|
||||||
|
return _fileobject(self, mode, bufsize, close=True)
|
||||||
|
else: # Platform-specific: Python 3
|
||||||
|
makefile = backport_makefile
|
||||||
|
|
||||||
|
WrappedSocket.makefile = makefile
|
||||||
|
|
||||||
|
|
||||||
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
|
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
|
||||||
return err_no == 0
|
return err_no == 0
|
||||||
|
|
||||||
|
@ -285,7 +331,7 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
|
||||||
else:
|
else:
|
||||||
ctx.set_default_verify_paths()
|
ctx.set_default_verify_paths()
|
||||||
|
|
||||||
# Disable TLS compression to migitate CRIME attack (issue #309)
|
# Disable TLS compression to mitigate CRIME attack (issue #309)
|
||||||
OP_NO_COMPRESSION = 0x20000
|
OP_NO_COMPRESSION = 0x20000
|
||||||
ctx.set_options(OP_NO_COMPRESSION)
|
ctx.set_options(OP_NO_COMPRESSION)
|
||||||
|
|
||||||
|
@ -293,6 +339,8 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
|
||||||
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
|
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
|
||||||
|
|
||||||
cnx = OpenSSL.SSL.Connection(ctx, sock)
|
cnx = OpenSSL.SSL.Connection(ctx, sock)
|
||||||
|
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
|
||||||
|
server_hostname = server_hostname.encode('utf-8')
|
||||||
cnx.set_tlsext_host_name(server_hostname)
|
cnx.set_tlsext_host_name(server_hostname)
|
||||||
cnx.set_connect_state()
|
cnx.set_connect_state()
|
||||||
while True:
|
while True:
|
||||||
|
|
|
@ -0,0 +1,172 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
SOCKS support for urllib3
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This contrib module contains provisional support for SOCKS proxies from within
|
||||||
|
urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
|
||||||
|
SOCKS5. To enable its functionality, either install PySocks or install this
|
||||||
|
module with the ``socks`` extra.
|
||||||
|
|
||||||
|
Known Limitations:
|
||||||
|
|
||||||
|
- Currently PySocks does not support contacting remote websites via literal
|
||||||
|
IPv6 addresses. Any such connection attempt will fail.
|
||||||
|
- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
|
||||||
|
such connection attempt will fail.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
try:
|
||||||
|
import socks
|
||||||
|
except ImportError:
|
||||||
|
import warnings
|
||||||
|
from ..exceptions import DependencyWarning
|
||||||
|
|
||||||
|
warnings.warn((
|
||||||
|
'SOCKS support in urllib3 requires the installation of optional '
|
||||||
|
'dependencies: specifically, PySocks. For more information, see '
|
||||||
|
'https://urllib3.readthedocs.org/en/latest/contrib.html#socks-proxies'
|
||||||
|
),
|
||||||
|
DependencyWarning
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
from socket import error as SocketError, timeout as SocketTimeout
|
||||||
|
|
||||||
|
from ..connection import (
|
||||||
|
HTTPConnection, HTTPSConnection
|
||||||
|
)
|
||||||
|
from ..connectionpool import (
|
||||||
|
HTTPConnectionPool, HTTPSConnectionPool
|
||||||
|
)
|
||||||
|
from ..exceptions import ConnectTimeoutError, NewConnectionError
|
||||||
|
from ..poolmanager import PoolManager
|
||||||
|
from ..util.url import parse_url
|
||||||
|
|
||||||
|
try:
|
||||||
|
import ssl
|
||||||
|
except ImportError:
|
||||||
|
ssl = None
|
||||||
|
|
||||||
|
|
||||||
|
class SOCKSConnection(HTTPConnection):
|
||||||
|
"""
|
||||||
|
A plain-text HTTP connection that connects via a SOCKS proxy.
|
||||||
|
"""
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self._socks_options = kwargs.pop('_socks_options')
|
||||||
|
super(SOCKSConnection, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def _new_conn(self):
|
||||||
|
"""
|
||||||
|
Establish a new connection via the SOCKS proxy.
|
||||||
|
"""
|
||||||
|
extra_kw = {}
|
||||||
|
if self.source_address:
|
||||||
|
extra_kw['source_address'] = self.source_address
|
||||||
|
|
||||||
|
if self.socket_options:
|
||||||
|
extra_kw['socket_options'] = self.socket_options
|
||||||
|
|
||||||
|
try:
|
||||||
|
conn = socks.create_connection(
|
||||||
|
(self.host, self.port),
|
||||||
|
proxy_type=self._socks_options['socks_version'],
|
||||||
|
proxy_addr=self._socks_options['proxy_host'],
|
||||||
|
proxy_port=self._socks_options['proxy_port'],
|
||||||
|
proxy_username=self._socks_options['username'],
|
||||||
|
proxy_password=self._socks_options['password'],
|
||||||
|
timeout=self.timeout,
|
||||||
|
**extra_kw
|
||||||
|
)
|
||||||
|
|
||||||
|
except SocketTimeout as e:
|
||||||
|
raise ConnectTimeoutError(
|
||||||
|
self, "Connection to %s timed out. (connect timeout=%s)" %
|
||||||
|
(self.host, self.timeout))
|
||||||
|
|
||||||
|
except socks.ProxyError as e:
|
||||||
|
# This is fragile as hell, but it seems to be the only way to raise
|
||||||
|
# useful errors here.
|
||||||
|
if e.socket_err:
|
||||||
|
error = e.socket_err
|
||||||
|
if isinstance(error, SocketTimeout):
|
||||||
|
raise ConnectTimeoutError(
|
||||||
|
self,
|
||||||
|
"Connection to %s timed out. (connect timeout=%s)" %
|
||||||
|
(self.host, self.timeout)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise NewConnectionError(
|
||||||
|
self,
|
||||||
|
"Failed to establish a new connection: %s" % error
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise NewConnectionError(
|
||||||
|
self,
|
||||||
|
"Failed to establish a new connection: %s" % e
|
||||||
|
)
|
||||||
|
|
||||||
|
except SocketError as e: # Defensive: PySocks should catch all these.
|
||||||
|
raise NewConnectionError(
|
||||||
|
self, "Failed to establish a new connection: %s" % e)
|
||||||
|
|
||||||
|
return conn
|
||||||
|
|
||||||
|
|
||||||
|
# We don't need to duplicate the Verified/Unverified distinction from
|
||||||
|
# urllib3/connection.py here because the HTTPSConnection will already have been
|
||||||
|
# correctly set to either the Verified or Unverified form by that module. This
|
||||||
|
# means the SOCKSHTTPSConnection will automatically be the correct type.
|
||||||
|
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
|
||||||
|
ConnectionCls = SOCKSConnection
|
||||||
|
|
||||||
|
|
||||||
|
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
|
||||||
|
ConnectionCls = SOCKSHTTPSConnection
|
||||||
|
|
||||||
|
|
||||||
|
class SOCKSProxyManager(PoolManager):
|
||||||
|
"""
|
||||||
|
A version of the urllib3 ProxyManager that routes connections via the
|
||||||
|
defined SOCKS proxy.
|
||||||
|
"""
|
||||||
|
pool_classes_by_scheme = {
|
||||||
|
'http': SOCKSHTTPConnectionPool,
|
||||||
|
'https': SOCKSHTTPSConnectionPool,
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, proxy_url, username=None, password=None,
|
||||||
|
num_pools=10, headers=None, **connection_pool_kw):
|
||||||
|
parsed = parse_url(proxy_url)
|
||||||
|
|
||||||
|
if parsed.scheme == 'socks5':
|
||||||
|
socks_version = socks.PROXY_TYPE_SOCKS5
|
||||||
|
elif parsed.scheme == 'socks4':
|
||||||
|
socks_version = socks.PROXY_TYPE_SOCKS4
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Unable to determine SOCKS version from %s" % proxy_url
|
||||||
|
)
|
||||||
|
|
||||||
|
self.proxy_url = proxy_url
|
||||||
|
|
||||||
|
socks_options = {
|
||||||
|
'socks_version': socks_version,
|
||||||
|
'proxy_host': parsed.host,
|
||||||
|
'proxy_port': parsed.port,
|
||||||
|
'username': username,
|
||||||
|
'password': password,
|
||||||
|
}
|
||||||
|
connection_pool_kw['_socks_options'] = socks_options
|
||||||
|
|
||||||
|
super(SOCKSProxyManager, self).__init__(
|
||||||
|
num_pools, headers, **connection_pool_kw
|
||||||
|
)
|
||||||
|
|
||||||
|
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
|
|
@ -180,6 +180,14 @@ class SNIMissingWarning(HTTPWarning):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DependencyWarning(HTTPWarning):
|
||||||
|
"""
|
||||||
|
Warned when an attempt is made to import a module with missing optional
|
||||||
|
dependencies.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ResponseNotChunked(ProtocolError, ValueError):
|
class ResponseNotChunked(ProtocolError, ValueError):
|
||||||
"Response needs to be chunked in order to read it as chunks."
|
"Response needs to be chunked in order to read it as chunks."
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -36,11 +36,11 @@ def format_header_param(name, value):
|
||||||
result = '%s="%s"' % (name, value)
|
result = '%s="%s"' % (name, value)
|
||||||
try:
|
try:
|
||||||
result.encode('ascii')
|
result.encode('ascii')
|
||||||
except UnicodeEncodeError:
|
except (UnicodeEncodeError, UnicodeDecodeError):
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
return result
|
return result
|
||||||
if not six.PY3: # Python 2:
|
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
|
||||||
value = value.encode('utf-8')
|
value = value.encode('utf-8')
|
||||||
value = email.utils.encode_rfc2231(value, 'utf-8')
|
value = email.utils.encode_rfc2231(value, 'utf-8')
|
||||||
value = '%s*=%s' % (name, value)
|
value = '%s*=%s' % (name, value)
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
backports.makefile
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Backports the Python 3 ``socket.makefile`` method for use with anything that
|
||||||
|
wants to create a "fake" socket object.
|
||||||
|
"""
|
||||||
|
import io
|
||||||
|
|
||||||
|
from socket import SocketIO
|
||||||
|
|
||||||
|
|
||||||
|
def backport_makefile(self, mode="r", buffering=None, encoding=None,
|
||||||
|
errors=None, newline=None):
|
||||||
|
"""
|
||||||
|
Backport of ``socket.makefile`` from Python 3.5.
|
||||||
|
"""
|
||||||
|
if not set(mode) <= set(["r", "w", "b"]):
|
||||||
|
raise ValueError(
|
||||||
|
"invalid mode %r (only r, w, b allowed)" % (mode,)
|
||||||
|
)
|
||||||
|
writing = "w" in mode
|
||||||
|
reading = "r" in mode or not writing
|
||||||
|
assert reading or writing
|
||||||
|
binary = "b" in mode
|
||||||
|
rawmode = ""
|
||||||
|
if reading:
|
||||||
|
rawmode += "r"
|
||||||
|
if writing:
|
||||||
|
rawmode += "w"
|
||||||
|
raw = SocketIO(self, rawmode)
|
||||||
|
self._makefile_refs += 1
|
||||||
|
if buffering is None:
|
||||||
|
buffering = -1
|
||||||
|
if buffering < 0:
|
||||||
|
buffering = io.DEFAULT_BUFFER_SIZE
|
||||||
|
if buffering == 0:
|
||||||
|
if not binary:
|
||||||
|
raise ValueError("unbuffered streams must be binary")
|
||||||
|
return raw
|
||||||
|
if reading and writing:
|
||||||
|
buffer = io.BufferedRWPair(raw, raw, buffering)
|
||||||
|
elif reading:
|
||||||
|
buffer = io.BufferedReader(raw, buffering)
|
||||||
|
else:
|
||||||
|
assert writing
|
||||||
|
buffer = io.BufferedWriter(raw, buffering)
|
||||||
|
if binary:
|
||||||
|
return buffer
|
||||||
|
text = io.TextIOWrapper(buffer, encoding, errors, newline)
|
||||||
|
text.mode = mode
|
||||||
|
return text
|
|
@ -1 +0,0 @@
|
||||||
env
|
|
|
@ -18,16 +18,16 @@ from .util.retry import Retry
|
||||||
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
|
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
|
||||||
|
|
||||||
|
|
||||||
pool_classes_by_scheme = {
|
|
||||||
'http': HTTPConnectionPool,
|
|
||||||
'https': HTTPSConnectionPool,
|
|
||||||
}
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
|
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
|
||||||
'ssl_version', 'ca_cert_dir')
|
'ssl_version', 'ca_cert_dir')
|
||||||
|
|
||||||
|
pool_classes_by_scheme = {
|
||||||
|
'http': HTTPConnectionPool,
|
||||||
|
'https': HTTPSConnectionPool,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class PoolManager(RequestMethods):
|
class PoolManager(RequestMethods):
|
||||||
"""
|
"""
|
||||||
|
@ -65,6 +65,9 @@ class PoolManager(RequestMethods):
|
||||||
self.pools = RecentlyUsedContainer(num_pools,
|
self.pools = RecentlyUsedContainer(num_pools,
|
||||||
dispose_func=lambda p: p.close())
|
dispose_func=lambda p: p.close())
|
||||||
|
|
||||||
|
# Locally set the pool classes so other PoolManagers can override them.
|
||||||
|
self.pool_classes_by_scheme = pool_classes_by_scheme
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
@ -81,7 +84,7 @@ class PoolManager(RequestMethods):
|
||||||
by :meth:`connection_from_url` and companion methods. It is intended
|
by :meth:`connection_from_url` and companion methods. It is intended
|
||||||
to be overridden for customization.
|
to be overridden for customization.
|
||||||
"""
|
"""
|
||||||
pool_cls = pool_classes_by_scheme[scheme]
|
pool_cls = self.pool_classes_by_scheme[scheme]
|
||||||
kwargs = self.connection_pool_kw
|
kwargs = self.connection_pool_kw
|
||||||
if scheme == 'http':
|
if scheme == 'http':
|
||||||
kwargs = self.connection_pool_kw.copy()
|
kwargs = self.connection_pool_kw.copy()
|
||||||
|
@ -186,7 +189,7 @@ class PoolManager(RequestMethods):
|
||||||
kw['retries'] = retries
|
kw['retries'] = retries
|
||||||
kw['redirect'] = redirect
|
kw['redirect'] = redirect
|
||||||
|
|
||||||
log.info("Redirecting %s -> %s" % (url, redirect_location))
|
log.info("Redirecting %s -> %s", url, redirect_location)
|
||||||
return self.urlopen(method, redirect_location, **kw)
|
return self.urlopen(method, redirect_location, **kw)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -221,6 +221,8 @@ class HTTPResponse(io.IOBase):
|
||||||
|
|
||||||
On exit, release the connection back to the pool.
|
On exit, release the connection back to the pool.
|
||||||
"""
|
"""
|
||||||
|
clean_exit = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
|
@ -243,20 +245,27 @@ class HTTPResponse(io.IOBase):
|
||||||
# This includes IncompleteRead.
|
# This includes IncompleteRead.
|
||||||
raise ProtocolError('Connection broken: %r' % e, e)
|
raise ProtocolError('Connection broken: %r' % e, e)
|
||||||
|
|
||||||
except Exception:
|
# If no exception is thrown, we should avoid cleaning up
|
||||||
# The response may not be closed but we're not going to use it anymore
|
# unnecessarily.
|
||||||
# so close it now to ensure that the connection is released back to the pool.
|
clean_exit = True
|
||||||
if self._original_response and not self._original_response.isclosed():
|
|
||||||
self._original_response.close()
|
|
||||||
|
|
||||||
# Closing the response may not actually be sufficient to close
|
|
||||||
# everything, so if we have a hold of the connection close that
|
|
||||||
# too.
|
|
||||||
if self._connection is not None:
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
raise
|
|
||||||
finally:
|
finally:
|
||||||
|
# If we didn't terminate cleanly, we need to throw away our
|
||||||
|
# connection.
|
||||||
|
if not clean_exit:
|
||||||
|
# The response may not be closed but we're not going to use it
|
||||||
|
# anymore so close it now to ensure that the connection is
|
||||||
|
# released back to the pool.
|
||||||
|
if self._original_response:
|
||||||
|
self._original_response.close()
|
||||||
|
|
||||||
|
# Closing the response may not actually be sufficient to close
|
||||||
|
# everything, so if we have a hold of the connection close that
|
||||||
|
# too.
|
||||||
|
if self._connection:
|
||||||
|
self._connection.close()
|
||||||
|
|
||||||
|
# If we hold the original response but it's closed now, we should
|
||||||
|
# return the connection back to the pool.
|
||||||
if self._original_response and self._original_response.isclosed():
|
if self._original_response and self._original_response.isclosed():
|
||||||
self.release_conn()
|
self.release_conn()
|
||||||
|
|
||||||
|
@ -387,6 +396,9 @@ class HTTPResponse(io.IOBase):
|
||||||
if not self.closed:
|
if not self.closed:
|
||||||
self._fp.close()
|
self._fp.close()
|
||||||
|
|
||||||
|
if self._connection:
|
||||||
|
self._connection.close()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def closed(self):
|
def closed(self):
|
||||||
if self._fp is None:
|
if self._fp is None:
|
||||||
|
|
|
@ -6,6 +6,7 @@ from .response import is_fp_closed
|
||||||
from .ssl_ import (
|
from .ssl_ import (
|
||||||
SSLContext,
|
SSLContext,
|
||||||
HAS_SNI,
|
HAS_SNI,
|
||||||
|
IS_PYOPENSSL,
|
||||||
assert_fingerprint,
|
assert_fingerprint,
|
||||||
resolve_cert_reqs,
|
resolve_cert_reqs,
|
||||||
resolve_ssl_version,
|
resolve_ssl_version,
|
||||||
|
@ -26,6 +27,7 @@ from .url import (
|
||||||
|
|
||||||
__all__ = (
|
__all__ = (
|
||||||
'HAS_SNI',
|
'HAS_SNI',
|
||||||
|
'IS_PYOPENSSL',
|
||||||
'SSLContext',
|
'SSLContext',
|
||||||
'Retry',
|
'Retry',
|
||||||
'Timeout',
|
'Timeout',
|
||||||
|
|
|
@ -61,7 +61,7 @@ def assert_header_parsing(headers):
|
||||||
|
|
||||||
def is_response_to_head(response):
|
def is_response_to_head(response):
|
||||||
"""
|
"""
|
||||||
Checks, wether a the request of a response has been a HEAD-request.
|
Checks whether the request of a response has been a HEAD-request.
|
||||||
Handles the quirks of AppEngine.
|
Handles the quirks of AppEngine.
|
||||||
|
|
||||||
:param conn:
|
:param conn:
|
||||||
|
|
|
@ -102,6 +102,11 @@ class Retry(object):
|
||||||
:param bool raise_on_redirect: Whether, if the number of redirects is
|
:param bool raise_on_redirect: Whether, if the number of redirects is
|
||||||
exhausted, to raise a MaxRetryError, or to return a response with a
|
exhausted, to raise a MaxRetryError, or to return a response with a
|
||||||
response code in the 3xx range.
|
response code in the 3xx range.
|
||||||
|
|
||||||
|
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
|
||||||
|
whether we should raise an exception, or return a response,
|
||||||
|
if status falls in ``status_forcelist`` range and retries have
|
||||||
|
been exhausted.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DEFAULT_METHOD_WHITELIST = frozenset([
|
DEFAULT_METHOD_WHITELIST = frozenset([
|
||||||
|
@ -112,7 +117,8 @@ class Retry(object):
|
||||||
|
|
||||||
def __init__(self, total=10, connect=None, read=None, redirect=None,
|
def __init__(self, total=10, connect=None, read=None, redirect=None,
|
||||||
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
|
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
|
||||||
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
|
backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
|
||||||
|
_observed_errors=0):
|
||||||
|
|
||||||
self.total = total
|
self.total = total
|
||||||
self.connect = connect
|
self.connect = connect
|
||||||
|
@ -127,6 +133,7 @@ class Retry(object):
|
||||||
self.method_whitelist = method_whitelist
|
self.method_whitelist = method_whitelist
|
||||||
self.backoff_factor = backoff_factor
|
self.backoff_factor = backoff_factor
|
||||||
self.raise_on_redirect = raise_on_redirect
|
self.raise_on_redirect = raise_on_redirect
|
||||||
|
self.raise_on_status = raise_on_status
|
||||||
self._observed_errors = _observed_errors # TODO: use .history instead?
|
self._observed_errors = _observed_errors # TODO: use .history instead?
|
||||||
|
|
||||||
def new(self, **kw):
|
def new(self, **kw):
|
||||||
|
@ -137,6 +144,7 @@ class Retry(object):
|
||||||
status_forcelist=self.status_forcelist,
|
status_forcelist=self.status_forcelist,
|
||||||
backoff_factor=self.backoff_factor,
|
backoff_factor=self.backoff_factor,
|
||||||
raise_on_redirect=self.raise_on_redirect,
|
raise_on_redirect=self.raise_on_redirect,
|
||||||
|
raise_on_status=self.raise_on_status,
|
||||||
_observed_errors=self._observed_errors,
|
_observed_errors=self._observed_errors,
|
||||||
)
|
)
|
||||||
params.update(kw)
|
params.update(kw)
|
||||||
|
@ -153,7 +161,7 @@ class Retry(object):
|
||||||
|
|
||||||
redirect = bool(redirect) and None
|
redirect = bool(redirect) and None
|
||||||
new_retries = cls(retries, redirect=redirect)
|
new_retries = cls(retries, redirect=redirect)
|
||||||
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
|
log.debug("Converted retries value: %r -> %r", retries, new_retries)
|
||||||
return new_retries
|
return new_retries
|
||||||
|
|
||||||
def get_backoff_time(self):
|
def get_backoff_time(self):
|
||||||
|
@ -272,7 +280,7 @@ class Retry(object):
|
||||||
if new_retry.is_exhausted():
|
if new_retry.is_exhausted():
|
||||||
raise MaxRetryError(_pool, url, error or ResponseError(cause))
|
raise MaxRetryError(_pool, url, error or ResponseError(cause))
|
||||||
|
|
||||||
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
|
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
|
||||||
|
|
||||||
return new_retry
|
return new_retry
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
|
||||||
SSLContext = None
|
SSLContext = None
|
||||||
HAS_SNI = False
|
HAS_SNI = False
|
||||||
create_default_context = None
|
create_default_context = None
|
||||||
|
IS_PYOPENSSL = False
|
||||||
|
|
||||||
# Maps the length of a digest to a possible hash function producing this digest
|
# Maps the length of a digest to a possible hash function producing this digest
|
||||||
HASHFUNC_MAP = {
|
HASHFUNC_MAP = {
|
||||||
|
@ -110,11 +111,12 @@ except ImportError:
|
||||||
)
|
)
|
||||||
self.ciphers = cipher_suite
|
self.ciphers = cipher_suite
|
||||||
|
|
||||||
def wrap_socket(self, socket, server_hostname=None):
|
def wrap_socket(self, socket, server_hostname=None, server_side=False):
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
'A true SSLContext object is not available. This prevents '
|
'A true SSLContext object is not available. This prevents '
|
||||||
'urllib3 from configuring SSL appropriately and may cause '
|
'urllib3 from configuring SSL appropriately and may cause '
|
||||||
'certain SSL connections to fail. For more information, see '
|
'certain SSL connections to fail. You can upgrade to a newer '
|
||||||
|
'version of Python to solve this. For more information, see '
|
||||||
'https://urllib3.readthedocs.org/en/latest/security.html'
|
'https://urllib3.readthedocs.org/en/latest/security.html'
|
||||||
'#insecureplatformwarning.',
|
'#insecureplatformwarning.',
|
||||||
InsecurePlatformWarning
|
InsecurePlatformWarning
|
||||||
|
@ -125,6 +127,7 @@ except ImportError:
|
||||||
'ca_certs': self.ca_certs,
|
'ca_certs': self.ca_certs,
|
||||||
'cert_reqs': self.verify_mode,
|
'cert_reqs': self.verify_mode,
|
||||||
'ssl_version': self.protocol,
|
'ssl_version': self.protocol,
|
||||||
|
'server_side': server_side,
|
||||||
}
|
}
|
||||||
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
|
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
|
||||||
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
|
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
|
||||||
|
@ -308,8 +311,8 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
|
||||||
'An HTTPS request has been made, but the SNI (Subject Name '
|
'An HTTPS request has been made, but the SNI (Subject Name '
|
||||||
'Indication) extension to TLS is not available on this platform. '
|
'Indication) extension to TLS is not available on this platform. '
|
||||||
'This may cause the server to present an incorrect TLS '
|
'This may cause the server to present an incorrect TLS '
|
||||||
'certificate, which can cause validation failures. For more '
|
'certificate, which can cause validation failures. You can upgrade to '
|
||||||
'information, see '
|
'a newer version of Python to solve this. For more information, see '
|
||||||
'https://urllib3.readthedocs.org/en/latest/security.html'
|
'https://urllib3.readthedocs.org/en/latest/security.html'
|
||||||
'#snimissingwarning.',
|
'#snimissingwarning.',
|
||||||
SNIMissingWarning
|
SNIMissingWarning
|
||||||
|
|
|
@ -116,7 +116,6 @@ class SessionRedirectMixin(object):
|
||||||
resp.close()
|
resp.close()
|
||||||
|
|
||||||
url = resp.headers['location']
|
url = resp.headers['location']
|
||||||
method = req.method
|
|
||||||
|
|
||||||
# Handle redirection without scheme (see: RFC 1808 Section 4)
|
# Handle redirection without scheme (see: RFC 1808 Section 4)
|
||||||
if url.startswith('//'):
|
if url.startswith('//'):
|
||||||
|
@ -140,22 +139,7 @@ class SessionRedirectMixin(object):
|
||||||
if resp.is_permanent_redirect and req.url != prepared_request.url:
|
if resp.is_permanent_redirect and req.url != prepared_request.url:
|
||||||
self.redirect_cache[req.url] = prepared_request.url
|
self.redirect_cache[req.url] = prepared_request.url
|
||||||
|
|
||||||
# http://tools.ietf.org/html/rfc7231#section-6.4.4
|
self.rebuild_method(prepared_request, resp)
|
||||||
if (resp.status_code == codes.see_other and
|
|
||||||
method != 'HEAD'):
|
|
||||||
method = 'GET'
|
|
||||||
|
|
||||||
# Do what the browsers do, despite standards...
|
|
||||||
# First, turn 302s into GETs.
|
|
||||||
if resp.status_code == codes.found and method != 'HEAD':
|
|
||||||
method = 'GET'
|
|
||||||
|
|
||||||
# Second, if a POST is responded to with a 301, turn it into a GET.
|
|
||||||
# This bizarre behaviour is explained in Issue 1704.
|
|
||||||
if resp.status_code == codes.moved and method == 'POST':
|
|
||||||
method = 'GET'
|
|
||||||
|
|
||||||
prepared_request.method = method
|
|
||||||
|
|
||||||
# https://github.com/kennethreitz/requests/issues/1084
|
# https://github.com/kennethreitz/requests/issues/1084
|
||||||
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
|
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
|
||||||
|
@ -244,10 +228,10 @@ class SessionRedirectMixin(object):
|
||||||
if self.trust_env and not should_bypass_proxies(url):
|
if self.trust_env and not should_bypass_proxies(url):
|
||||||
environ_proxies = get_environ_proxies(url)
|
environ_proxies = get_environ_proxies(url)
|
||||||
|
|
||||||
proxy = environ_proxies.get(scheme)
|
proxy = environ_proxies.get('all', environ_proxies.get(scheme))
|
||||||
|
|
||||||
if proxy:
|
if proxy:
|
||||||
new_proxies.setdefault(scheme, environ_proxies[scheme])
|
new_proxies.setdefault(scheme, proxy)
|
||||||
|
|
||||||
if 'Proxy-Authorization' in headers:
|
if 'Proxy-Authorization' in headers:
|
||||||
del headers['Proxy-Authorization']
|
del headers['Proxy-Authorization']
|
||||||
|
@ -262,6 +246,28 @@ class SessionRedirectMixin(object):
|
||||||
|
|
||||||
return new_proxies
|
return new_proxies
|
||||||
|
|
||||||
|
def rebuild_method(self, prepared_request, response):
|
||||||
|
"""When being redirected we may want to change the method of the request
|
||||||
|
based on certain specs or browser behavior.
|
||||||
|
"""
|
||||||
|
method = prepared_request.method
|
||||||
|
|
||||||
|
# http://tools.ietf.org/html/rfc7231#section-6.4.4
|
||||||
|
if response.status_code == codes.see_other and method != 'HEAD':
|
||||||
|
method = 'GET'
|
||||||
|
|
||||||
|
# Do what the browsers do, despite standards...
|
||||||
|
# First, turn 302s into GETs.
|
||||||
|
if response.status_code == codes.found and method != 'HEAD':
|
||||||
|
method = 'GET'
|
||||||
|
|
||||||
|
# Second, if a POST is responded to with a 301, turn it into a GET.
|
||||||
|
# This bizarre behaviour is explained in Issue 1704.
|
||||||
|
if response.status_code == codes.moved and method == 'POST':
|
||||||
|
method = 'GET'
|
||||||
|
|
||||||
|
prepared_request.method = method
|
||||||
|
|
||||||
|
|
||||||
class Session(SessionRedirectMixin):
|
class Session(SessionRedirectMixin):
|
||||||
"""A Requests session.
|
"""A Requests session.
|
||||||
|
@ -437,6 +443,7 @@ class Session(SessionRedirectMixin):
|
||||||
A CA_BUNDLE path can also be provided. Defaults to ``True``.
|
A CA_BUNDLE path can also be provided. Defaults to ``True``.
|
||||||
:param cert: (optional) if String, path to ssl client cert file (.pem).
|
:param cert: (optional) if String, path to ssl client cert file (.pem).
|
||||||
If Tuple, ('cert', 'key') pair.
|
If Tuple, ('cert', 'key') pair.
|
||||||
|
:rtype: requests.Response
|
||||||
"""
|
"""
|
||||||
# Create the Request.
|
# Create the Request.
|
||||||
req = Request(
|
req = Request(
|
||||||
|
@ -550,7 +557,7 @@ class Session(SessionRedirectMixin):
|
||||||
|
|
||||||
# It's possible that users might accidentally send a Request object.
|
# It's possible that users might accidentally send a Request object.
|
||||||
# Guard against that specific failure case.
|
# Guard against that specific failure case.
|
||||||
if not isinstance(request, PreparedRequest):
|
if isinstance(request, Request):
|
||||||
raise ValueError('You can only send PreparedRequests.')
|
raise ValueError('You can only send PreparedRequests.')
|
||||||
|
|
||||||
# Set up variables needed for resolve_redirects and dispatching of hooks
|
# Set up variables needed for resolve_redirects and dispatching of hooks
|
||||||
|
|
|
@ -53,6 +53,7 @@ _codes = {
|
||||||
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
|
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
|
||||||
417: ('expectation_failed',),
|
417: ('expectation_failed',),
|
||||||
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
|
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
|
||||||
|
421: ('misdirected_request',),
|
||||||
422: ('unprocessable_entity', 'unprocessable'),
|
422: ('unprocessable_entity', 'unprocessable'),
|
||||||
423: ('locked',),
|
423: ('locked',),
|
||||||
424: ('failed_dependency', 'dependency'),
|
424: ('failed_dependency', 'dependency'),
|
||||||
|
|
|
@ -10,6 +10,8 @@ Data structures that power Requests.
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
|
|
||||||
|
from .compat import OrderedDict
|
||||||
|
|
||||||
|
|
||||||
class CaseInsensitiveDict(collections.MutableMapping):
|
class CaseInsensitiveDict(collections.MutableMapping):
|
||||||
"""
|
"""
|
||||||
|
@ -40,7 +42,7 @@ class CaseInsensitiveDict(collections.MutableMapping):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, data=None, **kwargs):
|
def __init__(self, data=None, **kwargs):
|
||||||
self._store = dict()
|
self._store = OrderedDict()
|
||||||
if data is None:
|
if data is None:
|
||||||
data = {}
|
data = {}
|
||||||
self.update(data, **kwargs)
|
self.update(data, **kwargs)
|
||||||
|
|
|
@ -14,9 +14,7 @@ import codecs
|
||||||
import collections
|
import collections
|
||||||
import io
|
import io
|
||||||
import os
|
import os
|
||||||
import platform
|
|
||||||
import re
|
import re
|
||||||
import sys
|
|
||||||
import socket
|
import socket
|
||||||
import struct
|
import struct
|
||||||
import warnings
|
import warnings
|
||||||
|
@ -83,7 +81,14 @@ def super_len(o):
|
||||||
)
|
)
|
||||||
|
|
||||||
if hasattr(o, 'tell'):
|
if hasattr(o, 'tell'):
|
||||||
current_position = o.tell()
|
try:
|
||||||
|
current_position = o.tell()
|
||||||
|
except (OSError, IOError):
|
||||||
|
# This can happen in some weird situations, such as when the file
|
||||||
|
# is actually a special file descriptor like stdin. In this
|
||||||
|
# instance, we don't know what the length is, so set it to zero and
|
||||||
|
# let requests chunk it instead.
|
||||||
|
current_position = total_length
|
||||||
|
|
||||||
return max(0, total_length - current_position)
|
return max(0, total_length - current_position)
|
||||||
|
|
||||||
|
@ -534,6 +539,10 @@ def should_bypass_proxies(url):
|
||||||
if is_valid_cidr(proxy_ip):
|
if is_valid_cidr(proxy_ip):
|
||||||
if address_in_network(ip, proxy_ip):
|
if address_in_network(ip, proxy_ip):
|
||||||
return True
|
return True
|
||||||
|
elif ip == proxy_ip:
|
||||||
|
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
|
||||||
|
# matches the IP of the index
|
||||||
|
return True
|
||||||
else:
|
else:
|
||||||
for host in no_proxy:
|
for host in no_proxy:
|
||||||
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
|
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
|
||||||
|
@ -557,6 +566,7 @@ def should_bypass_proxies(url):
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def get_environ_proxies(url):
|
def get_environ_proxies(url):
|
||||||
"""Return a dict of environment proxies."""
|
"""Return a dict of environment proxies."""
|
||||||
if should_bypass_proxies(url):
|
if should_bypass_proxies(url):
|
||||||
|
@ -564,6 +574,7 @@ def get_environ_proxies(url):
|
||||||
else:
|
else:
|
||||||
return getproxies()
|
return getproxies()
|
||||||
|
|
||||||
|
|
||||||
def select_proxy(url, proxies):
|
def select_proxy(url, proxies):
|
||||||
"""Select a proxy for the url, if applicable.
|
"""Select a proxy for the url, if applicable.
|
||||||
|
|
||||||
|
@ -572,11 +583,24 @@ def select_proxy(url, proxies):
|
||||||
"""
|
"""
|
||||||
proxies = proxies or {}
|
proxies = proxies or {}
|
||||||
urlparts = urlparse(url)
|
urlparts = urlparse(url)
|
||||||
proxy = proxies.get(urlparts.scheme+'://'+urlparts.hostname)
|
if urlparts.hostname is None:
|
||||||
if proxy is None:
|
return proxies.get('all', proxies.get(urlparts.scheme))
|
||||||
proxy = proxies.get(urlparts.scheme)
|
|
||||||
|
proxy_keys = [
|
||||||
|
'all://' + urlparts.hostname,
|
||||||
|
'all',
|
||||||
|
urlparts.scheme + '://' + urlparts.hostname,
|
||||||
|
urlparts.scheme,
|
||||||
|
]
|
||||||
|
proxy = None
|
||||||
|
for proxy_key in proxy_keys:
|
||||||
|
if proxy_key in proxies:
|
||||||
|
proxy = proxies[proxy_key]
|
||||||
|
break
|
||||||
|
|
||||||
return proxy
|
return proxy
|
||||||
|
|
||||||
|
|
||||||
def default_user_agent(name="python-requests"):
|
def default_user_agent(name="python-requests"):
|
||||||
"""Return a string representing the default user agent."""
|
"""Return a string representing the default user agent."""
|
||||||
return '%s/%s' % (name, __version__)
|
return '%s/%s' % (name, __version__)
|
||||||
|
@ -600,21 +624,19 @@ def parse_header_links(value):
|
||||||
|
|
||||||
links = []
|
links = []
|
||||||
|
|
||||||
replace_chars = " '\""
|
replace_chars = ' \'"'
|
||||||
|
|
||||||
for val in re.split(", *<", value):
|
for val in re.split(', *<', value):
|
||||||
try:
|
try:
|
||||||
url, params = val.split(";", 1)
|
url, params = val.split(';', 1)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
url, params = val, ''
|
url, params = val, ''
|
||||||
|
|
||||||
link = {}
|
link = {'url': url.strip('<> \'"')}
|
||||||
|
|
||||||
link["url"] = url.strip("<> '\"")
|
for param in params.split(';'):
|
||||||
|
|
||||||
for param in params.split(";"):
|
|
||||||
try:
|
try:
|
||||||
key, value = param.split("=")
|
key, value = param.split('=')
|
||||||
except ValueError:
|
except ValueError:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -661,8 +683,8 @@ def guess_json_utf(data):
|
||||||
|
|
||||||
|
|
||||||
def prepend_scheme_if_needed(url, new_scheme):
|
def prepend_scheme_if_needed(url, new_scheme):
|
||||||
'''Given a URL that may or may not have a scheme, prepend the given scheme.
|
"""Given a URL that may or may not have a scheme, prepend the given scheme.
|
||||||
Does not replace a present scheme with the one provided as an argument.'''
|
Does not replace a present scheme with the one provided as an argument."""
|
||||||
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
|
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
|
||||||
|
|
||||||
# urlparse is a finicky beast, and sometimes decides that there isn't a
|
# urlparse is a finicky beast, and sometimes decides that there isn't a
|
||||||
|
@ -693,8 +715,6 @@ def to_native_string(string, encoding='ascii'):
|
||||||
string in the native string type, encoding and decoding where necessary.
|
string in the native string type, encoding and decoding where necessary.
|
||||||
This assumes ASCII unless told otherwise.
|
This assumes ASCII unless told otherwise.
|
||||||
"""
|
"""
|
||||||
out = None
|
|
||||||
|
|
||||||
if isinstance(string, builtin_str):
|
if isinstance(string, builtin_str):
|
||||||
out = string
|
out = string
|
||||||
else:
|
else:
|
||||||
|
|
765
plugin/packages/wakatime/packages/socks.py
Normal file
765
plugin/packages/wakatime/packages/socks.py
Normal file
|
@ -0,0 +1,765 @@
|
||||||
|
"""
|
||||||
|
SocksiPy - Python SOCKS module.
|
||||||
|
Version 1.5.6
|
||||||
|
|
||||||
|
Copyright 2006 Dan-Haim. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
are permitted provided that the following conditions are met:
|
||||||
|
1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
3. Neither the name of Dan Haim nor the names of his contributors may be used
|
||||||
|
to endorse or promote products derived from this software without specific
|
||||||
|
prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||||
|
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||||
|
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
|
||||||
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||||
|
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
|
||||||
|
|
||||||
|
|
||||||
|
This module provides a standard socket-like interface for Python
|
||||||
|
for tunneling connections through SOCKS proxies.
|
||||||
|
|
||||||
|
===============================================================================
|
||||||
|
|
||||||
|
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
|
||||||
|
for use in PyLoris (http://pyloris.sourceforge.net/)
|
||||||
|
|
||||||
|
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
|
||||||
|
mainly to merge bug fixes found in Sourceforge
|
||||||
|
|
||||||
|
Modifications made by Anorov (https://github.com/Anorov)
|
||||||
|
-Forked and renamed to PySocks
|
||||||
|
-Fixed issue with HTTP proxy failure checking (same bug that was in the old ___recvall() method)
|
||||||
|
-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler,
|
||||||
|
courtesy of e000 (https://github.com/e000): https://gist.github.com/869791#file_socksipyhandler.py
|
||||||
|
-Re-styled code to make it readable
|
||||||
|
-Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc.
|
||||||
|
-Improved exception handling and output
|
||||||
|
-Removed irritating use of sequence indexes, replaced with tuple unpacked variables
|
||||||
|
-Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03"
|
||||||
|
-Other general fixes
|
||||||
|
-Added clarification that the HTTP proxy connection method only supports CONNECT-style tunneling HTTP proxies
|
||||||
|
-Various small bug fixes
|
||||||
|
"""
|
||||||
|
|
||||||
|
__version__ = "1.5.6"
|
||||||
|
|
||||||
|
import socket
|
||||||
|
import struct
|
||||||
|
from errno import EOPNOTSUPP, EINVAL, EAGAIN
|
||||||
|
from io import BytesIO
|
||||||
|
from os import SEEK_CUR
|
||||||
|
from collections import Callable
|
||||||
|
from base64 import b64encode
|
||||||
|
|
||||||
|
PROXY_TYPE_SOCKS4 = SOCKS4 = 1
|
||||||
|
PROXY_TYPE_SOCKS5 = SOCKS5 = 2
|
||||||
|
PROXY_TYPE_HTTP = HTTP = 3
|
||||||
|
|
||||||
|
PROXY_TYPES = {"SOCKS4": SOCKS4, "SOCKS5": SOCKS5, "HTTP": HTTP}
|
||||||
|
PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys()))
|
||||||
|
|
||||||
|
_orgsocket = _orig_socket = socket.socket
|
||||||
|
|
||||||
|
class ProxyError(IOError):
|
||||||
|
"""
|
||||||
|
socket_err contains original socket.error exception.
|
||||||
|
"""
|
||||||
|
def __init__(self, msg, socket_err=None):
|
||||||
|
self.msg = msg
|
||||||
|
self.socket_err = socket_err
|
||||||
|
|
||||||
|
if socket_err:
|
||||||
|
self.msg += ": {0}".format(socket_err)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.msg
|
||||||
|
|
||||||
|
class GeneralProxyError(ProxyError): pass
|
||||||
|
class ProxyConnectionError(ProxyError): pass
|
||||||
|
class SOCKS5AuthError(ProxyError): pass
|
||||||
|
class SOCKS5Error(ProxyError): pass
|
||||||
|
class SOCKS4Error(ProxyError): pass
|
||||||
|
class HTTPError(ProxyError): pass
|
||||||
|
|
||||||
|
SOCKS4_ERRORS = { 0x5B: "Request rejected or failed",
|
||||||
|
0x5C: "Request rejected because SOCKS server cannot connect to identd on the client",
|
||||||
|
0x5D: "Request rejected because the client program and identd report different user-ids"
|
||||||
|
}
|
||||||
|
|
||||||
|
SOCKS5_ERRORS = { 0x01: "General SOCKS server failure",
|
||||||
|
0x02: "Connection not allowed by ruleset",
|
||||||
|
0x03: "Network unreachable",
|
||||||
|
0x04: "Host unreachable",
|
||||||
|
0x05: "Connection refused",
|
||||||
|
0x06: "TTL expired",
|
||||||
|
0x07: "Command not supported, or protocol error",
|
||||||
|
0x08: "Address type not supported"
|
||||||
|
}
|
||||||
|
|
||||||
|
DEFAULT_PORTS = { SOCKS4: 1080,
|
||||||
|
SOCKS5: 1080,
|
||||||
|
HTTP: 8080
|
||||||
|
}
|
||||||
|
|
||||||
|
def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
|
||||||
|
"""
|
||||||
|
set_default_proxy(proxy_type, addr[, port[, rdns[, username, password]]])
|
||||||
|
|
||||||
|
Sets a default proxy which all further socksocket objects will use,
|
||||||
|
unless explicitly changed. All parameters are as for socket.set_proxy().
|
||||||
|
"""
|
||||||
|
socksocket.default_proxy = (proxy_type, addr, port, rdns,
|
||||||
|
username.encode() if username else None,
|
||||||
|
password.encode() if password else None)
|
||||||
|
|
||||||
|
setdefaultproxy = set_default_proxy
|
||||||
|
|
||||||
|
def get_default_proxy():
|
||||||
|
"""
|
||||||
|
Returns the default proxy, set by set_default_proxy.
|
||||||
|
"""
|
||||||
|
return socksocket.default_proxy
|
||||||
|
|
||||||
|
getdefaultproxy = get_default_proxy
|
||||||
|
|
||||||
|
def wrap_module(module):
|
||||||
|
"""
|
||||||
|
Attempts to replace a module's socket library with a SOCKS socket. Must set
|
||||||
|
a default proxy using set_default_proxy(...) first.
|
||||||
|
This will only work on modules that import socket directly into the namespace;
|
||||||
|
most of the Python Standard Library falls into this category.
|
||||||
|
"""
|
||||||
|
if socksocket.default_proxy:
|
||||||
|
module.socket.socket = socksocket
|
||||||
|
else:
|
||||||
|
raise GeneralProxyError("No default proxy specified")
|
||||||
|
|
||||||
|
wrapmodule = wrap_module
|
||||||
|
|
||||||
|
def create_connection(dest_pair, proxy_type=None, proxy_addr=None,
|
||||||
|
proxy_port=None, proxy_rdns=True,
|
||||||
|
proxy_username=None, proxy_password=None,
|
||||||
|
timeout=None, source_address=None,
|
||||||
|
socket_options=None):
|
||||||
|
"""create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
|
||||||
|
|
||||||
|
Like socket.create_connection(), but connects to proxy
|
||||||
|
before returning the socket object.
|
||||||
|
|
||||||
|
dest_pair - 2-tuple of (IP/hostname, port).
|
||||||
|
**proxy_args - Same args passed to socksocket.set_proxy() if present.
|
||||||
|
timeout - Optional socket timeout value, in seconds.
|
||||||
|
source_address - tuple (host, port) for the socket to bind to as its source
|
||||||
|
address before connecting (only for compatibility)
|
||||||
|
"""
|
||||||
|
# Remove IPv6 brackets on the remote address and proxy address.
|
||||||
|
remote_host, remote_port = dest_pair
|
||||||
|
if remote_host.startswith('['):
|
||||||
|
remote_host = remote_host.strip('[]')
|
||||||
|
if proxy_addr and proxy_addr.startswith('['):
|
||||||
|
proxy_addr = proxy_addr.strip('[]')
|
||||||
|
|
||||||
|
err = None
|
||||||
|
|
||||||
|
# Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
|
||||||
|
for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
|
||||||
|
family, socket_type, proto, canonname, sa = r
|
||||||
|
sock = None
|
||||||
|
try:
|
||||||
|
sock = socksocket(family, socket_type, proto)
|
||||||
|
|
||||||
|
if socket_options is not None:
|
||||||
|
for opt in socket_options:
|
||||||
|
sock.setsockopt(*opt)
|
||||||
|
|
||||||
|
if isinstance(timeout, (int, float)):
|
||||||
|
sock.settimeout(timeout)
|
||||||
|
|
||||||
|
if proxy_type is not None:
|
||||||
|
sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
|
||||||
|
proxy_username, proxy_password)
|
||||||
|
if source_address is not None:
|
||||||
|
sock.bind(source_address)
|
||||||
|
|
||||||
|
sock.connect((remote_host, remote_port))
|
||||||
|
return sock
|
||||||
|
|
||||||
|
except socket.error as e:
|
||||||
|
err = e
|
||||||
|
if sock is not None:
|
||||||
|
sock.close()
|
||||||
|
sock = None
|
||||||
|
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
|
||||||
|
raise socket.error("gai returned empty list.")
|
||||||
|
|
||||||
|
class _BaseSocket(socket.socket):
|
||||||
|
"""Allows Python 2's "delegated" methods such as send() to be overridden
|
||||||
|
"""
|
||||||
|
def __init__(self, *pos, **kw):
|
||||||
|
_orig_socket.__init__(self, *pos, **kw)
|
||||||
|
|
||||||
|
self._savedmethods = dict()
|
||||||
|
for name in self._savenames:
|
||||||
|
self._savedmethods[name] = getattr(self, name)
|
||||||
|
delattr(self, name) # Allows normal overriding mechanism to work
|
||||||
|
|
||||||
|
_savenames = list()
|
||||||
|
|
||||||
|
def _makemethod(name):
|
||||||
|
return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
|
||||||
|
for name in ("sendto", "send", "recvfrom", "recv"):
|
||||||
|
method = getattr(_BaseSocket, name, None)
|
||||||
|
|
||||||
|
# Determine if the method is not defined the usual way
|
||||||
|
# as a function in the class.
|
||||||
|
# Python 2 uses __slots__, so there are descriptors for each method,
|
||||||
|
# but they are not functions.
|
||||||
|
if not isinstance(method, Callable):
|
||||||
|
_BaseSocket._savenames.append(name)
|
||||||
|
setattr(_BaseSocket, name, _makemethod(name))
|
||||||
|
|
||||||
|
class socksocket(_BaseSocket):
|
||||||
|
"""socksocket([family[, type[, proto]]]) -> socket object
|
||||||
|
|
||||||
|
Open a SOCKS enabled socket. The parameters are the same as
|
||||||
|
those of the standard socket init. In order for SOCKS to work,
|
||||||
|
you must specify family=AF_INET and proto=0.
|
||||||
|
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
|
||||||
|
"""
|
||||||
|
|
||||||
|
default_proxy = None
|
||||||
|
|
||||||
|
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
|
||||||
|
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
|
||||||
|
msg = "Socket type must be stream or datagram, not {!r}"
|
||||||
|
raise ValueError(msg.format(type))
|
||||||
|
|
||||||
|
_BaseSocket.__init__(self, family, type, proto, *args, **kwargs)
|
||||||
|
self._proxyconn = None # TCP connection to keep UDP relay alive
|
||||||
|
|
||||||
|
if self.default_proxy:
|
||||||
|
self.proxy = self.default_proxy
|
||||||
|
else:
|
||||||
|
self.proxy = (None, None, None, None, None, None)
|
||||||
|
self.proxy_sockname = None
|
||||||
|
self.proxy_peername = None
|
||||||
|
|
||||||
|
def _readall(self, file, count):
|
||||||
|
"""
|
||||||
|
Receive EXACTLY the number of bytes requested from the file object.
|
||||||
|
Blocks until the required number of bytes have been received.
|
||||||
|
"""
|
||||||
|
data = b""
|
||||||
|
while len(data) < count:
|
||||||
|
d = file.read(count - len(data))
|
||||||
|
if not d:
|
||||||
|
raise GeneralProxyError("Connection closed unexpectedly")
|
||||||
|
data += d
|
||||||
|
return data
|
||||||
|
|
||||||
|
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
|
||||||
|
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
|
||||||
|
Sets the proxy to be used.
|
||||||
|
|
||||||
|
proxy_type - The type of the proxy to be used. Three types
|
||||||
|
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
|
||||||
|
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
|
||||||
|
addr - The address of the server (IP or DNS).
|
||||||
|
port - The port of the server. Defaults to 1080 for SOCKS
|
||||||
|
servers and 8080 for HTTP proxy servers.
|
||||||
|
rdns - Should DNS queries be performed on the remote side
|
||||||
|
(rather than the local side). The default is True.
|
||||||
|
Note: This has no effect with SOCKS4 servers.
|
||||||
|
username - Username to authenticate with to the server.
|
||||||
|
The default is no authentication.
|
||||||
|
password - Password to authenticate with to the server.
|
||||||
|
Only relevant when username is also provided.
|
||||||
|
"""
|
||||||
|
self.proxy = (proxy_type, addr, port, rdns,
|
||||||
|
username.encode() if username else None,
|
||||||
|
password.encode() if password else None)
|
||||||
|
|
||||||
|
setproxy = set_proxy
|
||||||
|
|
||||||
|
def bind(self, *pos, **kw):
|
||||||
|
"""
|
||||||
|
Implements proxy connection for UDP sockets,
|
||||||
|
which happens during the bind() phase.
|
||||||
|
"""
|
||||||
|
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
|
||||||
|
if not proxy_type or self.type != socket.SOCK_DGRAM:
|
||||||
|
return _orig_socket.bind(self, *pos, **kw)
|
||||||
|
|
||||||
|
if self._proxyconn:
|
||||||
|
raise socket.error(EINVAL, "Socket already bound to an address")
|
||||||
|
if proxy_type != SOCKS5:
|
||||||
|
msg = "UDP only supported by SOCKS5 proxy type"
|
||||||
|
raise socket.error(EOPNOTSUPP, msg)
|
||||||
|
_BaseSocket.bind(self, *pos, **kw)
|
||||||
|
|
||||||
|
# Need to specify actual local port because
|
||||||
|
# some relays drop packets if a port of zero is specified.
|
||||||
|
# Avoid specifying host address in case of NAT though.
|
||||||
|
_, port = self.getsockname()
|
||||||
|
dst = ("0", port)
|
||||||
|
|
||||||
|
self._proxyconn = _orig_socket()
|
||||||
|
proxy = self._proxy_addr()
|
||||||
|
self._proxyconn.connect(proxy)
|
||||||
|
|
||||||
|
UDP_ASSOCIATE = b"\x03"
|
||||||
|
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
|
||||||
|
|
||||||
|
# The relay is most likely on the same host as the SOCKS proxy,
|
||||||
|
# but some proxies return a private IP address (10.x.y.z)
|
||||||
|
host, _ = proxy
|
||||||
|
_, port = relay
|
||||||
|
_BaseSocket.connect(self, (host, port))
|
||||||
|
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
|
||||||
|
|
||||||
|
def sendto(self, bytes, *args, **kwargs):
|
||||||
|
if self.type != socket.SOCK_DGRAM:
|
||||||
|
return _BaseSocket.sendto(self, bytes, *args, **kwargs)
|
||||||
|
if not self._proxyconn:
|
||||||
|
self.bind(("", 0))
|
||||||
|
|
||||||
|
address = args[-1]
|
||||||
|
flags = args[:-1]
|
||||||
|
|
||||||
|
header = BytesIO()
|
||||||
|
RSV = b"\x00\x00"
|
||||||
|
header.write(RSV)
|
||||||
|
STANDALONE = b"\x00"
|
||||||
|
header.write(STANDALONE)
|
||||||
|
self._write_SOCKS5_address(address, header)
|
||||||
|
|
||||||
|
sent = _BaseSocket.send(self, header.getvalue() + bytes, *flags, **kwargs)
|
||||||
|
return sent - header.tell()
|
||||||
|
|
||||||
|
def send(self, bytes, flags=0, **kwargs):
|
||||||
|
if self.type == socket.SOCK_DGRAM:
|
||||||
|
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
|
||||||
|
else:
|
||||||
|
return _BaseSocket.send(self, bytes, flags, **kwargs)
|
||||||
|
|
||||||
|
def recvfrom(self, bufsize, flags=0):
|
||||||
|
if self.type != socket.SOCK_DGRAM:
|
||||||
|
return _BaseSocket.recvfrom(self, bufsize, flags)
|
||||||
|
if not self._proxyconn:
|
||||||
|
self.bind(("", 0))
|
||||||
|
|
||||||
|
buf = BytesIO(_BaseSocket.recv(self, bufsize, flags))
|
||||||
|
buf.seek(+2, SEEK_CUR)
|
||||||
|
frag = buf.read(1)
|
||||||
|
if ord(frag):
|
||||||
|
raise NotImplementedError("Received UDP packet fragment")
|
||||||
|
fromhost, fromport = self._read_SOCKS5_address(buf)
|
||||||
|
|
||||||
|
if self.proxy_peername:
|
||||||
|
peerhost, peerport = self.proxy_peername
|
||||||
|
if fromhost != peerhost or peerport not in (0, fromport):
|
||||||
|
raise socket.error(EAGAIN, "Packet filtered")
|
||||||
|
|
||||||
|
return (buf.read(), (fromhost, fromport))
|
||||||
|
|
||||||
|
def recv(self, *pos, **kw):
|
||||||
|
bytes, _ = self.recvfrom(*pos, **kw)
|
||||||
|
return bytes
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._proxyconn:
|
||||||
|
self._proxyconn.close()
|
||||||
|
return _BaseSocket.close(self)
|
||||||
|
|
||||||
|
def get_proxy_sockname(self):
|
||||||
|
"""
|
||||||
|
Returns the bound IP address and port number at the proxy.
|
||||||
|
"""
|
||||||
|
return self.proxy_sockname
|
||||||
|
|
||||||
|
getproxysockname = get_proxy_sockname
|
||||||
|
|
||||||
|
def get_proxy_peername(self):
|
||||||
|
"""
|
||||||
|
Returns the IP and port number of the proxy.
|
||||||
|
"""
|
||||||
|
return _BaseSocket.getpeername(self)
|
||||||
|
|
||||||
|
getproxypeername = get_proxy_peername
|
||||||
|
|
||||||
|
def get_peername(self):
|
||||||
|
"""
|
||||||
|
Returns the IP address and port number of the destination
|
||||||
|
machine (note: get_proxy_peername returns the proxy)
|
||||||
|
"""
|
||||||
|
return self.proxy_peername
|
||||||
|
|
||||||
|
getpeername = get_peername
|
||||||
|
|
||||||
|
def _negotiate_SOCKS5(self, *dest_addr):
|
||||||
|
"""
|
||||||
|
Negotiates a stream connection through a SOCKS5 server.
|
||||||
|
"""
|
||||||
|
CONNECT = b"\x01"
|
||||||
|
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
|
||||||
|
CONNECT, dest_addr)
|
||||||
|
|
||||||
|
def _SOCKS5_request(self, conn, cmd, dst):
|
||||||
|
"""
|
||||||
|
Send SOCKS5 request with given command (CMD field) and
|
||||||
|
address (DST field). Returns resolved DST address that was used.
|
||||||
|
"""
|
||||||
|
proxy_type, addr, port, rdns, username, password = self.proxy
|
||||||
|
|
||||||
|
writer = conn.makefile("wb")
|
||||||
|
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
|
||||||
|
try:
|
||||||
|
# First we'll send the authentication packages we support.
|
||||||
|
if username and password:
|
||||||
|
# The username/password details were supplied to the
|
||||||
|
# set_proxy method so we support the USERNAME/PASSWORD
|
||||||
|
# authentication (in addition to the standard none).
|
||||||
|
writer.write(b"\x05\x02\x00\x02")
|
||||||
|
else:
|
||||||
|
# No username/password were entered, therefore we
|
||||||
|
# only support connections with no authentication.
|
||||||
|
writer.write(b"\x05\x01\x00")
|
||||||
|
|
||||||
|
# We'll receive the server's response to determine which
|
||||||
|
# method was selected
|
||||||
|
writer.flush()
|
||||||
|
chosen_auth = self._readall(reader, 2)
|
||||||
|
|
||||||
|
if chosen_auth[0:1] != b"\x05":
|
||||||
|
# Note: string[i:i+1] is used because indexing of a bytestring
|
||||||
|
# via bytestring[i] yields an integer in Python 3
|
||||||
|
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||||
|
|
||||||
|
# Check the chosen authentication method
|
||||||
|
|
||||||
|
if chosen_auth[1:2] == b"\x02":
|
||||||
|
# Okay, we need to perform a basic username/password
|
||||||
|
# authentication.
|
||||||
|
writer.write(b"\x01" + chr(len(username)).encode()
|
||||||
|
+ username
|
||||||
|
+ chr(len(password)).encode()
|
||||||
|
+ password)
|
||||||
|
writer.flush()
|
||||||
|
auth_status = self._readall(reader, 2)
|
||||||
|
if auth_status[0:1] != b"\x01":
|
||||||
|
# Bad response
|
||||||
|
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||||
|
if auth_status[1:2] != b"\x00":
|
||||||
|
# Authentication failed
|
||||||
|
raise SOCKS5AuthError("SOCKS5 authentication failed")
|
||||||
|
|
||||||
|
# Otherwise, authentication succeeded
|
||||||
|
|
||||||
|
# No authentication is required if 0x00
|
||||||
|
elif chosen_auth[1:2] != b"\x00":
|
||||||
|
# Reaching here is always bad
|
||||||
|
if chosen_auth[1:2] == b"\xFF":
|
||||||
|
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
|
||||||
|
else:
|
||||||
|
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||||
|
|
||||||
|
# Now we can request the actual connection
|
||||||
|
writer.write(b"\x05" + cmd + b"\x00")
|
||||||
|
resolved = self._write_SOCKS5_address(dst, writer)
|
||||||
|
writer.flush()
|
||||||
|
|
||||||
|
# Get the response
|
||||||
|
resp = self._readall(reader, 3)
|
||||||
|
if resp[0:1] != b"\x05":
|
||||||
|
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||||
|
|
||||||
|
status = ord(resp[1:2])
|
||||||
|
if status != 0x00:
|
||||||
|
# Connection failed: server returned an error
|
||||||
|
error = SOCKS5_ERRORS.get(status, "Unknown error")
|
||||||
|
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
|
||||||
|
|
||||||
|
# Get the bound address/port
|
||||||
|
bnd = self._read_SOCKS5_address(reader)
|
||||||
|
return (resolved, bnd)
|
||||||
|
finally:
|
||||||
|
reader.close()
|
||||||
|
writer.close()
|
||||||
|
|
||||||
|
def _write_SOCKS5_address(self, addr, file):
|
||||||
|
"""
|
||||||
|
Return the host and port packed for the SOCKS5 protocol,
|
||||||
|
and the resolved address as a tuple object.
|
||||||
|
"""
|
||||||
|
host, port = addr
|
||||||
|
proxy_type, _, _, rdns, username, password = self.proxy
|
||||||
|
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
|
||||||
|
|
||||||
|
# If the given destination address is an IP address, we'll
|
||||||
|
# use the IP address request even if remote resolving was specified.
|
||||||
|
# Detect whether the address is IPv4/6 directly.
|
||||||
|
for family in (socket.AF_INET, socket.AF_INET6):
|
||||||
|
try:
|
||||||
|
addr_bytes = socket.inet_pton(family, host)
|
||||||
|
file.write(family_to_byte[family] + addr_bytes)
|
||||||
|
host = socket.inet_ntop(family, addr_bytes)
|
||||||
|
file.write(struct.pack(">H", port))
|
||||||
|
return host, port
|
||||||
|
except socket.error:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Well it's not an IP number, so it's probably a DNS name.
|
||||||
|
if rdns:
|
||||||
|
# Resolve remotely
|
||||||
|
host_bytes = host.encode('idna')
|
||||||
|
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
|
||||||
|
else:
|
||||||
|
# Resolve locally
|
||||||
|
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
|
||||||
|
# We can't really work out what IP is reachable, so just pick the
|
||||||
|
# first.
|
||||||
|
target_addr = addresses[0]
|
||||||
|
family = target_addr[0]
|
||||||
|
host = target_addr[4][0]
|
||||||
|
|
||||||
|
addr_bytes = socket.inet_pton(family, host)
|
||||||
|
file.write(family_to_byte[family] + addr_bytes)
|
||||||
|
host = socket.inet_ntop(family, addr_bytes)
|
||||||
|
file.write(struct.pack(">H", port))
|
||||||
|
return host, port
|
||||||
|
|
||||||
|
def _read_SOCKS5_address(self, file):
|
||||||
|
atyp = self._readall(file, 1)
|
||||||
|
if atyp == b"\x01":
|
||||||
|
addr = socket.inet_ntoa(self._readall(file, 4))
|
||||||
|
elif atyp == b"\x03":
|
||||||
|
length = self._readall(file, 1)
|
||||||
|
addr = self._readall(file, ord(length))
|
||||||
|
elif atyp == b"\x04":
|
||||||
|
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
|
||||||
|
else:
|
||||||
|
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||||
|
|
||||||
|
port = struct.unpack(">H", self._readall(file, 2))[0]
|
||||||
|
return addr, port
|
||||||
|
|
||||||
|
def _negotiate_SOCKS4(self, dest_addr, dest_port):
|
||||||
|
"""
|
||||||
|
Negotiates a connection through a SOCKS4 server.
|
||||||
|
"""
|
||||||
|
proxy_type, addr, port, rdns, username, password = self.proxy
|
||||||
|
|
||||||
|
writer = self.makefile("wb")
|
||||||
|
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
|
||||||
|
try:
|
||||||
|
# Check if the destination address provided is an IP address
|
||||||
|
remote_resolve = False
|
||||||
|
try:
|
||||||
|
addr_bytes = socket.inet_aton(dest_addr)
|
||||||
|
except socket.error:
|
||||||
|
# It's a DNS name. Check where it should be resolved.
|
||||||
|
if rdns:
|
||||||
|
addr_bytes = b"\x00\x00\x00\x01"
|
||||||
|
remote_resolve = True
|
||||||
|
else:
|
||||||
|
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
|
||||||
|
|
||||||
|
# Construct the request packet
|
||||||
|
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
|
||||||
|
writer.write(addr_bytes)
|
||||||
|
|
||||||
|
# The username parameter is considered userid for SOCKS4
|
||||||
|
if username:
|
||||||
|
writer.write(username)
|
||||||
|
writer.write(b"\x00")
|
||||||
|
|
||||||
|
# DNS name if remote resolving is required
|
||||||
|
# NOTE: This is actually an extension to the SOCKS4 protocol
|
||||||
|
# called SOCKS4A and may not be supported in all cases.
|
||||||
|
if remote_resolve:
|
||||||
|
writer.write(dest_addr.encode('idna') + b"\x00")
|
||||||
|
writer.flush()
|
||||||
|
|
||||||
|
# Get the response from the server
|
||||||
|
resp = self._readall(reader, 8)
|
||||||
|
if resp[0:1] != b"\x00":
|
||||||
|
# Bad data
|
||||||
|
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
|
||||||
|
|
||||||
|
status = ord(resp[1:2])
|
||||||
|
if status != 0x5A:
|
||||||
|
# Connection failed: server returned an error
|
||||||
|
error = SOCKS4_ERRORS.get(status, "Unknown error")
|
||||||
|
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
|
||||||
|
|
||||||
|
# Get the bound address/port
|
||||||
|
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
|
||||||
|
if remote_resolve:
|
||||||
|
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
|
||||||
|
else:
|
||||||
|
self.proxy_peername = dest_addr, dest_port
|
||||||
|
finally:
|
||||||
|
reader.close()
|
||||||
|
writer.close()
|
||||||
|
|
||||||
|
def _negotiate_HTTP(self, dest_addr, dest_port):
|
||||||
|
"""
|
||||||
|
Negotiates a connection through an HTTP server.
|
||||||
|
NOTE: This currently only supports HTTP CONNECT-style proxies.
|
||||||
|
"""
|
||||||
|
proxy_type, addr, port, rdns, username, password = self.proxy
|
||||||
|
|
||||||
|
# If we need to resolve locally, we do this now
|
||||||
|
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
|
||||||
|
|
||||||
|
http_headers = [
|
||||||
|
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
|
||||||
|
b"Host: " + dest_addr.encode('idna')
|
||||||
|
]
|
||||||
|
|
||||||
|
if username and password:
|
||||||
|
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
|
||||||
|
|
||||||
|
http_headers.append(b"\r\n")
|
||||||
|
|
||||||
|
self.sendall(b"\r\n".join(http_headers))
|
||||||
|
|
||||||
|
# We just need the first line to check if the connection was successful
|
||||||
|
fobj = self.makefile()
|
||||||
|
status_line = fobj.readline()
|
||||||
|
fobj.close()
|
||||||
|
|
||||||
|
if not status_line:
|
||||||
|
raise GeneralProxyError("Connection closed unexpectedly")
|
||||||
|
|
||||||
|
try:
|
||||||
|
proto, status_code, status_msg = status_line.split(" ", 2)
|
||||||
|
except ValueError:
|
||||||
|
raise GeneralProxyError("HTTP proxy server sent invalid response")
|
||||||
|
|
||||||
|
if not proto.startswith("HTTP/"):
|
||||||
|
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
|
||||||
|
|
||||||
|
try:
|
||||||
|
status_code = int(status_code)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
|
||||||
|
|
||||||
|
if status_code != 200:
|
||||||
|
error = "{0}: {1}".format(status_code, status_msg)
|
||||||
|
if status_code in (400, 403, 405):
|
||||||
|
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
|
||||||
|
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
|
||||||
|
" (must be a CONNECT tunnel proxy)")
|
||||||
|
raise HTTPError(error)
|
||||||
|
|
||||||
|
self.proxy_sockname = (b"0.0.0.0", 0)
|
||||||
|
self.proxy_peername = addr, dest_port
|
||||||
|
|
||||||
|
_proxy_negotiators = {
|
||||||
|
SOCKS4: _negotiate_SOCKS4,
|
||||||
|
SOCKS5: _negotiate_SOCKS5,
|
||||||
|
HTTP: _negotiate_HTTP
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def connect(self, dest_pair):
|
||||||
|
"""
|
||||||
|
Connects to the specified destination through a proxy.
|
||||||
|
Uses the same API as socket's connect().
|
||||||
|
To select the proxy server, use set_proxy().
|
||||||
|
|
||||||
|
dest_pair - 2-tuple of (IP/hostname, port).
|
||||||
|
"""
|
||||||
|
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
|
||||||
|
# Probably IPv6, not supported -- raise an error, and hope
|
||||||
|
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
|
||||||
|
# connection works...
|
||||||
|
raise socket.error("PySocks doesn't support IPv6")
|
||||||
|
|
||||||
|
dest_addr, dest_port = dest_pair
|
||||||
|
|
||||||
|
if self.type == socket.SOCK_DGRAM:
|
||||||
|
if not self._proxyconn:
|
||||||
|
self.bind(("", 0))
|
||||||
|
dest_addr = socket.gethostbyname(dest_addr)
|
||||||
|
|
||||||
|
# If the host address is INADDR_ANY or similar, reset the peer
|
||||||
|
# address so that packets are received from any peer
|
||||||
|
if dest_addr == "0.0.0.0" and not dest_port:
|
||||||
|
self.proxy_peername = None
|
||||||
|
else:
|
||||||
|
self.proxy_peername = (dest_addr, dest_port)
|
||||||
|
return
|
||||||
|
|
||||||
|
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
|
||||||
|
|
||||||
|
# Do a minimal input check first
|
||||||
|
if (not isinstance(dest_pair, (list, tuple))
|
||||||
|
or len(dest_pair) != 2
|
||||||
|
or not dest_addr
|
||||||
|
or not isinstance(dest_port, int)):
|
||||||
|
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
|
||||||
|
|
||||||
|
|
||||||
|
if proxy_type is None:
|
||||||
|
# Treat like regular socket object
|
||||||
|
self.proxy_peername = dest_pair
|
||||||
|
_BaseSocket.connect(self, (dest_addr, dest_port))
|
||||||
|
return
|
||||||
|
|
||||||
|
proxy_addr = self._proxy_addr()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Initial connection to proxy server
|
||||||
|
_BaseSocket.connect(self, proxy_addr)
|
||||||
|
|
||||||
|
except socket.error as error:
|
||||||
|
# Error while connecting to proxy
|
||||||
|
self.close()
|
||||||
|
proxy_addr, proxy_port = proxy_addr
|
||||||
|
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
|
||||||
|
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
|
||||||
|
|
||||||
|
msg = "Error connecting to {0} proxy {1}".format(printable_type,
|
||||||
|
proxy_server)
|
||||||
|
raise ProxyConnectionError(msg, error)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Connected to proxy server, now negotiate
|
||||||
|
try:
|
||||||
|
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
|
||||||
|
negotiate = self._proxy_negotiators[proxy_type]
|
||||||
|
negotiate(self, dest_addr, dest_port)
|
||||||
|
except socket.error as error:
|
||||||
|
# Wrap socket errors
|
||||||
|
self.close()
|
||||||
|
raise GeneralProxyError("Socket error", error)
|
||||||
|
except ProxyError:
|
||||||
|
# Protocol error while negotiating with proxy
|
||||||
|
self.close()
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _proxy_addr(self):
|
||||||
|
"""
|
||||||
|
Return proxy address to connect to as tuple object
|
||||||
|
"""
|
||||||
|
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
|
||||||
|
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
|
||||||
|
if not proxy_port:
|
||||||
|
raise GeneralProxyError("Invalid proxy type")
|
||||||
|
return proxy_addr, proxy_port
|
79
plugin/packages/wakatime/packages/sockshandler.py
Normal file
79
plugin/packages/wakatime/packages/sockshandler.py
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
"""
|
||||||
|
SocksiPy + urllib2 handler
|
||||||
|
|
||||||
|
version: 0.3
|
||||||
|
author: e<e@tr0ll.in>
|
||||||
|
|
||||||
|
This module provides a Handler which you can use with urllib2 to allow it to tunnel your connection through a socks.sockssocket socket, with out monkey patching the original socket...
|
||||||
|
"""
|
||||||
|
import ssl
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib2
|
||||||
|
import httplib
|
||||||
|
except ImportError: # Python 3
|
||||||
|
import urllib.request as urllib2
|
||||||
|
import http.client as httplib
|
||||||
|
|
||||||
|
import socks # $ pip install PySocks
|
||||||
|
|
||||||
|
def merge_dict(a, b):
|
||||||
|
d = a.copy()
|
||||||
|
d.update(b)
|
||||||
|
return d
|
||||||
|
|
||||||
|
class SocksiPyConnection(httplib.HTTPConnection):
|
||||||
|
def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
|
||||||
|
self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
|
||||||
|
httplib.HTTPConnection.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
self.sock = socks.socksocket()
|
||||||
|
self.sock.setproxy(*self.proxyargs)
|
||||||
|
if type(self.timeout) in (int, float):
|
||||||
|
self.sock.settimeout(self.timeout)
|
||||||
|
self.sock.connect((self.host, self.port))
|
||||||
|
|
||||||
|
class SocksiPyConnectionS(httplib.HTTPSConnection):
|
||||||
|
def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
|
||||||
|
self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
|
||||||
|
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
sock = socks.socksocket()
|
||||||
|
sock.setproxy(*self.proxyargs)
|
||||||
|
if type(self.timeout) in (int, float):
|
||||||
|
sock.settimeout(self.timeout)
|
||||||
|
sock.connect((self.host, self.port))
|
||||||
|
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
|
||||||
|
|
||||||
|
class SocksiPyHandler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.args = args
|
||||||
|
self.kw = kwargs
|
||||||
|
urllib2.HTTPHandler.__init__(self)
|
||||||
|
|
||||||
|
def http_open(self, req):
|
||||||
|
def build(host, port=None, timeout=0, **kwargs):
|
||||||
|
kw = merge_dict(self.kw, kwargs)
|
||||||
|
conn = SocksiPyConnection(*self.args, host=host, port=port, timeout=timeout, **kw)
|
||||||
|
return conn
|
||||||
|
return self.do_open(build, req)
|
||||||
|
|
||||||
|
def https_open(self, req):
|
||||||
|
def build(host, port=None, timeout=0, **kwargs):
|
||||||
|
kw = merge_dict(self.kw, kwargs)
|
||||||
|
conn = SocksiPyConnectionS(*self.args, host=host, port=port, timeout=timeout, **kw)
|
||||||
|
return conn
|
||||||
|
return self.do_open(build, req)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import sys
|
||||||
|
try:
|
||||||
|
port = int(sys.argv[1])
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
port = 9050
|
||||||
|
opener = urllib2.build_opener(SocksiPyHandler(socks.PROXY_TYPE_SOCKS5, "localhost", port))
|
||||||
|
print("HTTP: " + opener.open("http://httpbin.org/ip").read().decode())
|
||||||
|
print("HTTPS: " + opener.open("https://httpbin.org/ip").read().decode())
|
|
@ -47,12 +47,12 @@ class Git(BaseProject):
|
||||||
log.traceback('warn')
|
log.traceback('warn')
|
||||||
except IOError: # pragma: nocover
|
except IOError: # pragma: nocover
|
||||||
log.traceback('warn')
|
log.traceback('warn')
|
||||||
return None
|
return u('master')
|
||||||
|
|
||||||
def _project_base(self):
|
def _project_base(self):
|
||||||
if self.configFile:
|
if self.configFile:
|
||||||
return os.path.dirname(os.path.dirname(self.configFile))
|
return os.path.dirname(os.path.dirname(self.configFile))
|
||||||
return None
|
return None # pragma: nocover
|
||||||
|
|
||||||
def _find_git_config_file(self, path):
|
def _find_git_config_file(self, path):
|
||||||
path = os.path.realpath(path)
|
path = os.path.realpath(path)
|
||||||
|
|
Loading…
Reference in a new issue