Move from pip.compat.* to six.moves.* for urllib related stuff

This commit is contained in:
Donald Stufft 2014-09-11 18:40:45 -04:00
parent 8b1affd007
commit ec91d8e421
13 changed files with 80 additions and 66 deletions

View File

@ -22,11 +22,6 @@ except ImportError:
if sys.version_info >= (3,):
from urllib.request import url2pathname, urlretrieve, pathname2url
import urllib.parse as urllib
import urllib.request as urllib2
import urllib.parse as urlparse
def cmp(a, b):
return (a > b) - (a < b)
@ -45,11 +40,6 @@ if sys.version_info >= (3,):
return http_message.get_param(param, default_value)
else:
from urllib import url2pathname, urlretrieve, pathname2url
import urllib
import urllib2
import urlparse
def console_to_str(s):
return s

View File

@ -14,9 +14,10 @@ import shutil
import sys
import tempfile
from pip._vendor.six.moves.urllib import parse as urllib_parse
import pip
from pip.compat import urllib, urlparse
from pip.exceptions import InstallationError, HashMismatch
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file)
@ -109,13 +110,13 @@ class MultiDomainBasicAuth(AuthBase):
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
@ -146,7 +147,7 @@ class MultiDomainBasicAuth(AuthBase):
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
@ -306,7 +307,7 @@ def get_file_content(url, comes_from=None, session=None):
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
@ -350,7 +351,7 @@ def url_to_path(url):
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
path = urllib_parse.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
@ -370,7 +371,7 @@ def path_to_url(path):
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
url = '/'.join([urllib_parse.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
@ -651,13 +652,13 @@ class PipXmlrpcTransport(xmlrpc_client.Transport):
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urlparse.urlparse(index_url)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urlparse.urlunparse(parts)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,

View File

@ -9,6 +9,9 @@ import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.utils import Inf, normalize_name, splitext, is_prerelease
from pip.utils.deprecation import RemovedInPip17Warning
from pip.utils.logging import indent_log
@ -16,7 +19,6 @@ from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.compat import urlparse, url2pathname
from pip.download import url_to_path, path_to_url
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
@ -192,7 +194,7 @@ class PackageFinder(object):
def _warn_about_insecure_transport_scheme(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urlparse.urlparse(str(location))
parsed = urllib_parse.urlparse(str(location))
if parsed.scheme in INSECURE_SCHEMES:
secure_schemes = INSECURE_SCHEMES[parsed.scheme]
@ -640,7 +642,7 @@ class PackageFinder(object):
and not platform == 'cli'
)
and comes_from is not None
and urlparse.urlparse(
and urllib_parse.urlparse(
comes_from.url
).netloc.endswith("pypi.python.org")):
if not wheel.supported(tags=supported_tags_noarch):
@ -782,13 +784,14 @@ class HTMLPage(object):
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
urllib_parse.urlparse(url)
if (scheme == 'file'
and os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
@ -847,7 +850,7 @@ class HTMLPage(object):
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https', 'ftp', 'ftps'):
# FIXME: some warning or something?
# assertion error?
@ -894,7 +897,9 @@ class HTMLPage(object):
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
@ -928,7 +933,7 @@ class HTMLPage(object):
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(
urlparse.urljoin(self.base_url, href)
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self, trusted=False)
@ -948,7 +953,7 @@ class HTMLPage(object):
)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
url = self.clean_link(urllib_parse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@ -1003,18 +1008,18 @@ class Link(object):
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
return urllib_parse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
return urllib_parse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@ -1025,8 +1030,8 @@ class Link(object):
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')

View File

@ -3,7 +3,8 @@ from __future__ import absolute_import
import os
import re
from pip.compat import urlparse
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.utils import normalize_name
@ -46,7 +47,7 @@ def parse_requirements(filename, finder=None, comes_from=None, options=None,
req_url = line[len('--requirement'):].strip().strip('=')
if _scheme_re.search(filename):
# Relative to a URL
req_url = urlparse.urljoin(filename, req_url)
req_url = urllib_parse.urljoin(filename, req_url)
elif not _scheme_re.search(req_url):
req_url = os.path.join(os.path.dirname(filename), req_url)
for item in parse_requirements(

View File

@ -12,11 +12,13 @@ from distutils.util import change_root
from distutils import sysconfig
from email.parser import FeedParser
import pip.wheel
from pip._vendor import pkg_resources, six
from pip._vendor.six.moves import configparser
from pip.compat import urllib, native_str, WINDOWS
from pip._vendor.six.moves.urllib import parse as urllib_parse
import pip.wheel
from pip.compat import native_str, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
@ -254,7 +256,7 @@ class InstallRequirement(object):
def url_name(self):
if self.req is None:
return None
return urllib.quote(self.req.unsafe_name)
return urllib_parse.quote(self.req.unsafe_name)
@property
def setup_py(self):

View File

@ -5,7 +5,8 @@ import logging
import os
import shutil
from pip.compat import urlparse, urllib
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.exceptions import BadCommand
from pip.utils import (display_path, backup_dir, find_command,
rmtree, ask_path_exists)
@ -24,10 +25,10 @@ class VcsSupport(object):
def __init__(self):
# Register more schemes with urlparse for various version control
# systems
urlparse.uses_netloc.extend(self.schemes)
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urlparse, 'uses_fragment', None):
urlparse.uses_fragment.extend(self.schemes)
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
@ -130,11 +131,11 @@ class VersionControl(object):
)
assert '+' in self.url, error_message % self.url
url = self.url.split('+', 1)[1]
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urlparse.urlunsplit((scheme, netloc, path, query, ''))
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def get_info(self, location):
@ -150,7 +151,7 @@ class VersionControl(object):
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib.unquote(url).rstrip('/')
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
"""

View File

@ -5,7 +5,12 @@ import os
import tempfile
import re
from pip.compat import urlparse
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path, call_subprocess
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
@ -27,9 +32,9 @@ class Bazaar(VersionControl):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urlparse, 'uses_fragment', None):
urlparse.uses_fragment.extend(['lp'])
urlparse.non_hierarchical.extend(['lp'])
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""

View File

@ -4,13 +4,16 @@ import logging
import tempfile
import os.path
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.utils import call_subprocess
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.compat import url2pathname, urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
@ -34,7 +37,8 @@ class Git(VersionControl):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
url2pathname(path).replace('\\', '/').lstrip('/')
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1

View File

@ -4,7 +4,8 @@ import logging
import os
import re
from pip.compat import urlparse
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.index import Link
from pip.utils import rmtree, display_path, call_subprocess
from pip.utils.logging import indent_log
@ -270,7 +271,7 @@ def get_rev_options(url, rev):
else:
rev_options = []
r = urlparse.urlsplit(url)
r = urllib_parse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password

View File

@ -1,6 +1,6 @@
import textwrap
from pip.compat import urllib
from pip._vendor.six.moves.urllib import parse as urllib_parse
from tests.lib import pyversion
@ -60,7 +60,7 @@ def test_file_index_url_quoting(script, data):
"""
Test url quoting of file index url with a space
"""
index_url = data.index_url(urllib.quote("in dex"))
index_url = data.index_url(urllib_parse.quote("in dex"))
result = script.pip(
'install', '-vvv', '--index-url', index_url, 'simple',
expect_error=False,

View File

@ -2,8 +2,10 @@ from __future__ import absolute_import
import os
import subprocess
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.vcs import subversion, git, bazaar, mercurial
from pip.compat import urlretrieve
from tests.lib import path_to_url
@ -18,7 +20,7 @@ def _create_initools_repository(directory):
def _dump_initools_repository(directory):
filename, _ = urlretrieve(
filename, _ = urllib_request.urlretrieve(
'http://bitbucket.org/hltbra/pip-initools-dump/raw/8b55c908a320/'
'INITools_modified.dump'
)

View File

@ -4,7 +4,8 @@ import sys
import subprocess
from os.path import dirname, abspath
from pip.compat import urllib
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.utils import rmtree
@ -17,7 +18,7 @@ else:
def all_projects():
data = urllib.urlopen('http://pypi.python.org/simple/').read()
data = urllib_request.urlopen('http://pypi.python.org/simple/').read()
projects = [m.group(1) for m in re.finditer(r'<a.*?>(.+)</a>', data)]
return projects

View File

@ -4,11 +4,12 @@ from io import BytesIO
from shutil import rmtree, copy
from tempfile import mkdtemp
from pip._vendor.six.moves.urllib import request as urllib_request
from mock import Mock, patch
import pytest
import pip
from pip.compat import pathname2url
from pip.exceptions import HashMismatch
from pip.download import (
PipSession, SafeFileCache, path_to_url, unpack_http_url, url_to_path,
@ -124,7 +125,7 @@ def test_unpack_http_url_bad_downloaded_checksum(mock_unpack_file):
def test_path_to_url_unix():
assert path_to_url('/tmp/file') == 'file:///tmp/file'
path = os.path.join(os.getcwd(), 'file')
assert path_to_url('file') == 'file://' + pathname2url(path)
assert path_to_url('file') == 'file://' + urllib_request.pathname2url(path)
@pytest.mark.skipif("sys.platform == 'win32'")
@ -137,7 +138,7 @@ def test_path_to_url_win():
assert path_to_url('c:/tmp/file') == 'file:///c:/tmp/file'
assert path_to_url('c:\\tmp\\file') == 'file:///c:/tmp/file'
path = os.path.join(os.getcwd(), 'file')
assert path_to_url('file') == 'file:' + pathname2url(path)
assert path_to_url('file') == 'file:' + urllib_request.pathname2url(path)
@pytest.mark.skipif("sys.platform != 'win32'")