2009-11-21 00:39:44 +01:00
|
|
|
"""Routines related to PyPI, indexes"""
|
2014-08-31 01:52:28 +02:00
|
|
|
from __future__ import absolute_import
|
2009-11-21 00:39:44 +01:00
|
|
|
|
2014-08-31 01:52:28 +02:00
|
|
|
import logging
|
2009-11-21 00:39:44 +01:00
|
|
|
import sys
|
|
|
|
import os
|
|
|
|
import re
|
2010-08-10 12:04:40 +02:00
|
|
|
import mimetypes
|
2009-11-21 00:39:44 +01:00
|
|
|
import posixpath
|
2014-08-31 01:52:28 +02:00
|
|
|
import warnings
|
2013-02-16 19:02:41 +01:00
|
|
|
|
2014-09-12 00:40:45 +02:00
|
|
|
from pip._vendor.six.moves.urllib import parse as urllib_parse
|
|
|
|
from pip._vendor.six.moves.urllib import request as urllib_request
|
|
|
|
|
2014-08-31 01:52:28 +02:00
|
|
|
from pip.utils import Inf, normalize_name, splitext, is_prerelease
|
2014-09-18 04:04:42 +02:00
|
|
|
from pip.utils.deprecation import RemovedInPip7Warning
|
2014-08-31 01:52:28 +02:00
|
|
|
from pip.utils.logging import indent_log
|
2014-01-27 15:07:10 +01:00
|
|
|
from pip.exceptions import (
|
|
|
|
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
|
|
|
|
UnsupportedWheel,
|
|
|
|
)
|
2014-05-07 01:25:44 +02:00
|
|
|
from pip.download import url_to_path, path_to_url
|
2014-01-07 12:43:10 +01:00
|
|
|
from pip.wheel import Wheel, wheel_ext
|
2013-06-30 20:15:03 +02:00
|
|
|
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
|
2014-01-07 10:47:04 +01:00
|
|
|
from pip._vendor import html5lib, requests, pkg_resources
|
2013-10-23 03:32:25 +02:00
|
|
|
from pip._vendor.requests.exceptions import SSLError
|
2013-08-18 05:50:32 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
__all__ = ['PackageFinder']
|
|
|
|
|
2010-06-03 04:25:26 +02:00
|
|
|
|
2014-08-11 06:21:02 +02:00
|
|
|
LOCAL_HOSTNAMES = ('localhost', '127.0.0.1')
|
2013-08-08 07:18:41 +02:00
|
|
|
INSECURE_SCHEMES = {
|
|
|
|
"http": ["https"],
|
|
|
|
}
|
|
|
|
|
2010-08-16 01:46:23 +02:00
|
|
|
|
2014-08-31 01:52:28 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
class PackageFinder(object):
|
|
|
|
"""This finds packages.
|
|
|
|
|
|
|
|
This is meant to match easy_install's technique for looking for
|
|
|
|
packages, by reading pages and looking for appropriate links
|
|
|
|
"""
|
|
|
|
|
2010-08-16 01:46:23 +02:00
|
|
|
def __init__(self, find_links, index_urls,
|
2014-01-27 15:07:10 +01:00
|
|
|
use_wheel=True, allow_external=[], allow_unverified=[],
|
|
|
|
allow_all_external=False, allow_all_prereleases=False,
|
2014-08-01 22:20:23 +02:00
|
|
|
process_dependency_links=False, session=None):
|
2014-05-07 01:25:44 +02:00
|
|
|
if session is None:
|
|
|
|
raise TypeError(
|
|
|
|
"PackageFinder() missing 1 required keyword argument: "
|
|
|
|
"'session'"
|
|
|
|
)
|
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
self.find_links = find_links
|
|
|
|
self.index_urls = index_urls
|
2014-08-01 22:20:23 +02:00
|
|
|
self.dependency_links = []
|
2014-04-24 13:29:57 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
# These are boring links that have already been logged somehow:
|
|
|
|
self.logged_links = set()
|
2013-07-25 10:43:26 +02:00
|
|
|
|
2012-10-02 07:50:24 +02:00
|
|
|
self.use_wheel = use_wheel
|
2013-04-05 23:21:11 +02:00
|
|
|
|
2013-06-03 00:28:51 +02:00
|
|
|
# Do we allow (safe and verifiable) externally hosted files?
|
2013-06-07 15:48:34 +02:00
|
|
|
self.allow_external = set(normalize_name(n) for n in allow_external)
|
2013-06-02 19:03:56 +02:00
|
|
|
|
2013-06-06 02:26:14 +02:00
|
|
|
# Which names are allowed to install insecure and unverifiable files?
|
2013-10-27 04:47:57 +01:00
|
|
|
self.allow_unverified = set(
|
|
|
|
normalize_name(n) for n in allow_unverified
|
|
|
|
)
|
2013-06-03 00:28:51 +02:00
|
|
|
|
2014-01-10 16:35:55 +01:00
|
|
|
# Anything that is allowed unverified is also allowed external
|
|
|
|
self.allow_external |= self.allow_unverified
|
|
|
|
|
2013-06-07 15:48:34 +02:00
|
|
|
# Do we allow all (safe and verifiable) externally hosted files?
|
|
|
|
self.allow_all_external = allow_all_external
|
|
|
|
|
2013-06-02 19:03:56 +02:00
|
|
|
# Stores if we ignored any external links so that we can instruct
|
|
|
|
# end users how to install them if no distributions are available
|
|
|
|
self.need_warn_external = False
|
|
|
|
|
2013-06-03 00:28:51 +02:00
|
|
|
# Stores if we ignored any unsafe links so that we can instruct
|
|
|
|
# end users how to install them if no distributions are available
|
2013-10-27 04:53:21 +01:00
|
|
|
self.need_warn_unverified = False
|
2013-06-03 00:28:51 +02:00
|
|
|
|
2013-07-11 18:21:30 +02:00
|
|
|
# Do we want to allow _all_ pre-releases?
|
|
|
|
self.allow_all_prereleases = allow_all_prereleases
|
|
|
|
|
2014-08-01 22:20:23 +02:00
|
|
|
# Do we process dependency links?
|
|
|
|
self.process_dependency_links = process_dependency_links
|
|
|
|
|
2013-08-16 14:04:27 +02:00
|
|
|
# The Session we'll use to make requests
|
2014-05-07 01:25:44 +02:00
|
|
|
self.session = session
|
2013-08-16 14:04:27 +02:00
|
|
|
|
2014-08-01 22:20:23 +02:00
|
|
|
def add_dependency_links(self, links):
|
|
|
|
# # FIXME: this shouldn't be global list this, it should only
|
|
|
|
# # apply to requirements of the package that specifies the
|
|
|
|
# # dependency_links value
|
|
|
|
# # FIXME: also, we should track comes_from (i.e., use Link)
|
|
|
|
if self.process_dependency_links:
|
2014-08-31 01:52:28 +02:00
|
|
|
warnings.warn(
|
|
|
|
"Dependency Links processing has been deprecated and will be "
|
|
|
|
"removed in a future release.",
|
2014-09-18 04:04:42 +02:00
|
|
|
RemovedInPip7Warning,
|
2014-08-31 01:52:28 +02:00
|
|
|
)
|
2014-08-01 22:20:23 +02:00
|
|
|
self.dependency_links.extend(links)
|
|
|
|
|
2012-09-17 02:41:27 +02:00
|
|
|
def _sort_locations(self, locations):
|
2010-05-18 02:33:13 +02:00
|
|
|
"""
|
|
|
|
Sort locations into "files" (archives) and "urls", and return
|
|
|
|
a pair of lists (files,urls)
|
|
|
|
"""
|
|
|
|
files = []
|
|
|
|
urls = []
|
|
|
|
|
2012-09-17 02:41:27 +02:00
|
|
|
# puts the url for the given file path into the appropriate list
|
2010-05-18 02:33:13 +02:00
|
|
|
def sort_path(path):
|
2013-11-19 07:12:41 +01:00
|
|
|
url = path_to_url(path)
|
2010-08-10 12:04:40 +02:00
|
|
|
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
|
2010-05-18 02:33:13 +02:00
|
|
|
urls.append(url)
|
2010-05-25 17:54:05 +02:00
|
|
|
else:
|
2010-05-18 02:33:13 +02:00
|
|
|
files.append(url)
|
|
|
|
|
|
|
|
for url in locations:
|
2012-09-17 02:41:27 +02:00
|
|
|
|
|
|
|
is_local_path = os.path.exists(url)
|
|
|
|
is_file_url = url.startswith('file:')
|
|
|
|
is_find_link = url in self.find_links
|
|
|
|
|
|
|
|
if is_local_path or is_file_url:
|
|
|
|
if is_local_path:
|
|
|
|
path = url
|
|
|
|
else:
|
|
|
|
path = url_to_path(url)
|
|
|
|
if is_find_link and os.path.isdir(path):
|
|
|
|
path = os.path.realpath(path)
|
|
|
|
for item in os.listdir(path):
|
|
|
|
sort_path(os.path.join(path, item))
|
2012-09-17 07:41:51 +02:00
|
|
|
elif is_file_url and os.path.isdir(path):
|
2012-09-17 02:41:27 +02:00
|
|
|
urls.append(url)
|
|
|
|
elif os.path.isfile(path):
|
|
|
|
sort_path(path)
|
2010-05-18 02:33:13 +02:00
|
|
|
else:
|
|
|
|
urls.append(url)
|
2012-09-17 02:41:27 +02:00
|
|
|
|
2010-05-18 02:33:13 +02:00
|
|
|
return files, urls
|
|
|
|
|
2012-11-04 05:32:54 +01:00
|
|
|
def _link_sort_key(self, link_tuple):
|
|
|
|
"""
|
|
|
|
Function used to generate link sort key for link tuples.
|
2013-04-02 07:44:46 +02:00
|
|
|
The greater the return value, the more preferred it is.
|
|
|
|
If not finding wheels, then sorted by version only.
|
|
|
|
If finding wheels, then the sort order is by version, then:
|
|
|
|
1. existing installs
|
|
|
|
2. wheels ordered via Wheel.support_index_min()
|
|
|
|
3. source archives
|
|
|
|
Note: it was considered to embed this logic into the Link
|
|
|
|
comparison operators, but then different sdist links
|
|
|
|
with the same version, would have to be considered equal
|
2012-11-04 05:32:54 +01:00
|
|
|
"""
|
2013-04-02 07:44:46 +02:00
|
|
|
parsed_version, link, _ = link_tuple
|
2012-11-04 06:10:58 +01:00
|
|
|
if self.use_wheel:
|
2013-04-02 07:44:46 +02:00
|
|
|
support_num = len(supported_tags)
|
2013-11-06 18:35:57 +01:00
|
|
|
if link == INSTALLED_VERSION:
|
2012-11-04 06:10:58 +01:00
|
|
|
pri = 1
|
2013-11-15 01:35:24 +01:00
|
|
|
elif link.ext == wheel_ext:
|
2014-01-27 15:07:10 +01:00
|
|
|
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
|
2013-11-15 01:35:24 +01:00
|
|
|
if not wheel.supported():
|
2014-01-27 15:07:10 +01:00
|
|
|
raise UnsupportedWheel(
|
|
|
|
"%s is not a supported wheel for this platform. It "
|
|
|
|
"can't be sorted." % wheel.filename
|
|
|
|
)
|
2013-11-15 01:35:24 +01:00
|
|
|
pri = -(wheel.support_index_min())
|
2014-01-27 15:07:10 +01:00
|
|
|
else: # sdist
|
2013-04-02 07:44:46 +02:00
|
|
|
pri = -(support_num)
|
2012-11-04 05:32:54 +01:00
|
|
|
return (parsed_version, pri)
|
|
|
|
else:
|
|
|
|
return parsed_version
|
|
|
|
|
2013-04-02 07:44:46 +02:00
|
|
|
def _sort_versions(self, applicable_versions):
|
|
|
|
"""
|
2014-01-27 15:07:10 +01:00
|
|
|
Bring the latest version (and wheels) to the front, but maintain the
|
|
|
|
existing ordering as secondary. See the docstring for `_link_sort_key`
|
|
|
|
for details. This function is isolated for easier unit testing.
|
2013-04-02 07:44:46 +02:00
|
|
|
"""
|
2014-01-27 15:07:10 +01:00
|
|
|
return sorted(
|
|
|
|
applicable_versions,
|
|
|
|
key=self._link_sort_key,
|
|
|
|
reverse=True
|
|
|
|
)
|
2013-04-02 07:44:46 +02:00
|
|
|
|
2014-08-11 06:21:02 +02:00
|
|
|
def _warn_about_insecure_transport_scheme(self, logger, location):
|
|
|
|
# Determine if this url used a secure transport mechanism
|
2014-09-12 00:40:45 +02:00
|
|
|
parsed = urllib_parse.urlparse(str(location))
|
2014-08-11 06:21:02 +02:00
|
|
|
if parsed.scheme in INSECURE_SCHEMES:
|
|
|
|
secure_schemes = INSECURE_SCHEMES[parsed.scheme]
|
|
|
|
|
|
|
|
if parsed.hostname in LOCAL_HOSTNAMES:
|
|
|
|
# localhost is not a security risk
|
|
|
|
pass
|
|
|
|
elif len(secure_schemes) == 1:
|
|
|
|
ctx = (location, parsed.scheme, secure_schemes[0],
|
|
|
|
parsed.netloc)
|
|
|
|
logger.warn("%s uses an insecure transport scheme (%s). "
|
|
|
|
"Consider using %s if %s has it available" %
|
|
|
|
ctx)
|
|
|
|
elif len(secure_schemes) > 1:
|
|
|
|
ctx = (
|
|
|
|
location,
|
|
|
|
parsed.scheme,
|
|
|
|
", ".join(secure_schemes),
|
|
|
|
parsed.netloc,
|
|
|
|
)
|
|
|
|
logger.warn("%s uses an insecure transport scheme (%s). "
|
|
|
|
"Consider using one of %s if %s has any of "
|
|
|
|
"them available" % ctx)
|
|
|
|
else:
|
|
|
|
ctx = (location, parsed.scheme)
|
|
|
|
logger.warn("%s uses an insecure transport scheme (%s)." %
|
|
|
|
ctx)
|
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
def find_requirement(self, req, upgrade):
|
2012-10-11 06:47:27 +02:00
|
|
|
|
2012-10-02 20:19:56 +02:00
|
|
|
def mkurl_pypi_url(url):
|
|
|
|
loc = posixpath.join(url, url_name)
|
|
|
|
# For maximum compatibility with easy_install, ensure the path
|
|
|
|
# ends in a trailing slash. Although this isn't in the spec
|
|
|
|
# (and PyPI can handle it without the slash) some other index
|
2014-01-27 15:07:10 +01:00
|
|
|
# implementations might break if they relied on easy_install's
|
|
|
|
# behavior.
|
2012-10-02 20:19:56 +02:00
|
|
|
if not loc.endswith('/'):
|
|
|
|
loc = loc + '/'
|
|
|
|
return loc
|
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
url_name = req.url_name
|
|
|
|
# Only check main index if index URL is given:
|
|
|
|
main_index_url = None
|
|
|
|
if self.index_urls:
|
|
|
|
# Check that we have the url_name correctly spelled:
|
2014-01-27 15:07:10 +01:00
|
|
|
main_index_url = Link(
|
|
|
|
mkurl_pypi_url(self.index_urls[0]),
|
|
|
|
trusted=True,
|
|
|
|
)
|
2014-04-24 13:29:57 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
page = self._get_page(main_index_url, req)
|
|
|
|
if page is None:
|
2014-01-27 15:07:10 +01:00
|
|
|
url_name = self._find_url_name(
|
|
|
|
Link(self.index_urls[0], trusted=True),
|
|
|
|
url_name, req
|
|
|
|
) or req.url_name
|
2010-06-03 04:25:26 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
if url_name is not None:
|
|
|
|
locations = [
|
|
|
|
mkurl_pypi_url(url)
|
2013-07-25 10:43:26 +02:00
|
|
|
for url in self.index_urls] + self.find_links
|
2009-11-21 00:39:44 +01:00
|
|
|
else:
|
|
|
|
locations = list(self.find_links)
|
|
|
|
for version in req.absolute_versions:
|
|
|
|
if url_name is not None and main_index_url is not None:
|
|
|
|
locations = [
|
|
|
|
posixpath.join(main_index_url.url, version)] + locations
|
2010-05-18 02:33:13 +02:00
|
|
|
|
|
|
|
file_locations, url_locations = self._sort_locations(locations)
|
2014-08-01 22:20:23 +02:00
|
|
|
_flocations, _ulocations = self._sort_locations(self.dependency_links)
|
|
|
|
file_locations.extend(_flocations)
|
2013-06-03 00:28:51 +02:00
|
|
|
|
|
|
|
# We trust every url that the user has given us whether it was given
|
2013-07-25 10:43:26 +02:00
|
|
|
# via --index-url or --find-links
|
2013-06-03 00:28:51 +02:00
|
|
|
locations = [Link(url, trusted=True) for url in url_locations]
|
|
|
|
|
2014-08-01 22:20:23 +02:00
|
|
|
# We explicitly do not trust links that came from dependency_links
|
|
|
|
locations.extend([Link(url) for url in _ulocations])
|
|
|
|
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug('URLs to search for versions for %s:', req)
|
2009-11-21 00:39:44 +01:00
|
|
|
for location in locations:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug('* %s', location)
|
2014-08-11 06:21:02 +02:00
|
|
|
self._warn_about_insecure_transport_scheme(logger, location)
|
2013-08-08 07:18:41 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
found_versions = []
|
|
|
|
found_versions.extend(
|
|
|
|
self._package_versions(
|
2013-06-03 00:28:51 +02:00
|
|
|
# We trust every directly linked archive in find_links
|
2014-01-27 15:07:10 +01:00
|
|
|
[Link(url, '-f', trusted=True) for url in self.find_links],
|
|
|
|
req.name.lower()
|
|
|
|
)
|
|
|
|
)
|
2009-11-21 00:39:44 +01:00
|
|
|
page_versions = []
|
|
|
|
for page in self._get_pages(locations, req):
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug('Analyzing links from page %s', page.url)
|
|
|
|
with indent_log():
|
2014-01-27 15:07:10 +01:00
|
|
|
page_versions.extend(
|
|
|
|
self._package_versions(page.links, req.name.lower())
|
|
|
|
)
|
2014-08-01 22:20:23 +02:00
|
|
|
dependency_versions = list(self._package_versions(
|
|
|
|
[Link(url) for url in self.dependency_links], req.name.lower()))
|
|
|
|
if dependency_versions:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
|
|
|
'dependency_links found: %s',
|
2014-08-01 22:20:23 +02:00
|
|
|
', '.join([
|
|
|
|
link.url for p, link, version in dependency_versions
|
|
|
|
])
|
|
|
|
)
|
2014-01-27 15:07:10 +01:00
|
|
|
file_versions = list(
|
|
|
|
self._package_versions(
|
|
|
|
[Link(url) for url in file_locations],
|
|
|
|
req.name.lower()
|
|
|
|
)
|
|
|
|
)
|
|
|
|
if (not found_versions
|
|
|
|
and not page_versions
|
2014-08-01 22:20:23 +02:00
|
|
|
and not dependency_versions
|
2014-01-27 15:07:10 +01:00
|
|
|
and not file_versions):
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.critical(
|
|
|
|
'Could not find any downloads that satisfy the requirement %s',
|
|
|
|
req,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2013-06-02 19:03:56 +02:00
|
|
|
|
|
|
|
if self.need_warn_external:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.warning(
|
2014-05-08 18:13:48 +02:00
|
|
|
"Some externally hosted files were ignored as access to "
|
|
|
|
"them may be unreliable (use --allow-external %s to "
|
2014-08-31 01:52:28 +02:00
|
|
|
"allow).",
|
|
|
|
req.name,
|
2014-05-08 18:13:48 +02:00
|
|
|
)
|
2013-06-02 19:03:56 +02:00
|
|
|
|
2013-10-27 04:53:21 +01:00
|
|
|
if self.need_warn_unverified:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.warning(
|
|
|
|
"Some insecure and unverifiable files were ignored"
|
|
|
|
" (use --allow-unverified %s to allow).",
|
|
|
|
req.name,
|
|
|
|
)
|
2013-06-03 00:28:51 +02:00
|
|
|
|
2014-01-27 15:07:10 +01:00
|
|
|
raise DistributionNotFound(
|
|
|
|
'No distributions at all found for %s' % req
|
|
|
|
)
|
2012-10-14 08:03:09 +02:00
|
|
|
installed_version = []
|
2009-11-21 00:39:44 +01:00
|
|
|
if req.satisfied_by is not None:
|
2014-01-27 15:07:10 +01:00
|
|
|
installed_version = [(
|
|
|
|
req.satisfied_by.parsed_version,
|
|
|
|
INSTALLED_VERSION,
|
|
|
|
req.satisfied_by.version,
|
|
|
|
)]
|
2009-11-21 00:39:44 +01:00
|
|
|
if file_versions:
|
|
|
|
file_versions.sort(reverse=True)
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
|
|
|
'Local files found: %s',
|
2014-01-27 15:07:10 +01:00
|
|
|
', '.join([
|
|
|
|
url_to_path(link.url)
|
2014-04-03 04:39:43 +02:00
|
|
|
for _, link, _ in file_versions
|
2014-01-27 15:07:10 +01:00
|
|
|
])
|
|
|
|
)
|
2014-03-26 23:24:19 +01:00
|
|
|
# this is an intentional priority ordering
|
2014-01-27 15:07:10 +01:00
|
|
|
all_versions = installed_version + file_versions + found_versions \
|
2014-08-01 22:20:23 +02:00
|
|
|
+ page_versions + dependency_versions
|
2009-11-21 00:39:44 +01:00
|
|
|
applicable_versions = []
|
|
|
|
for (parsed_version, link, version) in all_versions:
|
|
|
|
if version not in req.req:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
|
|
|
"Ignoring link %s, version %s doesn't match %s",
|
|
|
|
link,
|
|
|
|
version,
|
|
|
|
','.join([''.join(s) for s in req.req.specs]),
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2009-11-21 00:39:44 +01:00
|
|
|
continue
|
2014-01-27 15:07:10 +01:00
|
|
|
elif (is_prerelease(version)
|
|
|
|
and not (self.allow_all_prereleases or req.prereleases)):
|
2013-07-24 01:19:19 +02:00
|
|
|
# If this version isn't the already installed one, then
|
|
|
|
# ignore it if it's a pre-release.
|
2013-11-06 18:35:57 +01:00
|
|
|
if link is not INSTALLED_VERSION:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
2014-01-27 15:07:10 +01:00
|
|
|
"Ignoring link %s, version %s is a pre-release (use "
|
2014-08-31 01:52:28 +02:00
|
|
|
"--pre to allow).",
|
|
|
|
link,
|
|
|
|
version,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2013-07-24 01:19:19 +02:00
|
|
|
continue
|
2012-09-23 23:58:14 +02:00
|
|
|
applicable_versions.append((parsed_version, link, version))
|
2013-04-02 07:44:46 +02:00
|
|
|
applicable_versions = self._sort_versions(applicable_versions)
|
2014-01-27 15:07:10 +01:00
|
|
|
existing_applicable = bool([
|
|
|
|
link
|
|
|
|
for parsed_version, link, version in applicable_versions
|
|
|
|
if link is INSTALLED_VERSION
|
|
|
|
])
|
2009-11-21 00:39:44 +01:00
|
|
|
if not upgrade and existing_applicable:
|
2013-11-06 18:35:57 +01:00
|
|
|
if applicable_versions[0][1] is INSTALLED_VERSION:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
2014-01-27 15:07:10 +01:00
|
|
|
'Existing installed version (%s) is most up-to-date and '
|
2014-08-31 01:52:28 +02:00
|
|
|
'satisfies requirement',
|
|
|
|
req.satisfied_by.version,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2009-11-21 00:39:44 +01:00
|
|
|
else:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
2014-01-27 15:07:10 +01:00
|
|
|
'Existing installed version (%s) satisfies requirement '
|
2014-08-31 01:52:28 +02:00
|
|
|
'(most up-to-date version is %s)',
|
|
|
|
req.satisfied_by.version,
|
|
|
|
applicable_versions[0][2],
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2009-11-21 00:39:44 +01:00
|
|
|
return None
|
|
|
|
if not applicable_versions:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.critical(
|
2014-01-27 15:07:10 +01:00
|
|
|
'Could not find a version that satisfies the requirement %s '
|
2014-08-31 01:52:28 +02:00
|
|
|
'(from versions: %s)',
|
|
|
|
req,
|
|
|
|
', '.join(
|
|
|
|
sorted(set([
|
|
|
|
version
|
|
|
|
for parsed_version, link, version in all_versions
|
|
|
|
]))),
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2013-06-02 19:03:56 +02:00
|
|
|
|
|
|
|
if self.need_warn_external:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.warning(
|
2014-05-08 18:13:48 +02:00
|
|
|
"Some externally hosted files were ignored as access to "
|
|
|
|
"them may be unreliable (use --allow-external to allow)."
|
|
|
|
)
|
2013-06-02 19:03:56 +02:00
|
|
|
|
2013-10-27 04:53:21 +01:00
|
|
|
if self.need_warn_unverified:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.warning(
|
|
|
|
"Some insecure and unverifiable files were ignored"
|
|
|
|
" (use --allow-unverified %s to allow).",
|
|
|
|
req.name,
|
|
|
|
)
|
2013-06-03 00:28:51 +02:00
|
|
|
|
2014-01-27 15:07:10 +01:00
|
|
|
raise DistributionNotFound(
|
|
|
|
'No distributions matching the version for %s' % req
|
|
|
|
)
|
2013-11-06 18:35:57 +01:00
|
|
|
if applicable_versions[0][1] is INSTALLED_VERSION:
|
2009-11-21 00:39:44 +01:00
|
|
|
# We have an existing version, and its the best version
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
2014-01-27 15:07:10 +01:00
|
|
|
'Installed version (%s) is most up-to-date (past versions: '
|
2014-08-31 01:52:28 +02:00
|
|
|
'%s)',
|
|
|
|
req.satisfied_by.version,
|
|
|
|
', '.join([
|
|
|
|
version for parsed_version, link, version
|
|
|
|
in applicable_versions[1:]
|
|
|
|
]) or 'none'),
|
2011-08-24 20:07:16 +02:00
|
|
|
raise BestVersionAlreadyInstalled
|
2009-11-21 00:39:44 +01:00
|
|
|
if len(applicable_versions) > 1:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
|
|
|
'Using version %s (newest of versions: %s)',
|
|
|
|
applicable_versions[0][2],
|
|
|
|
', '.join([
|
|
|
|
version for parsed_version, link, version
|
|
|
|
in applicable_versions
|
|
|
|
])
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2013-06-02 20:31:43 +02:00
|
|
|
|
|
|
|
selected_version = applicable_versions[0][1]
|
|
|
|
|
2013-06-03 00:28:51 +02:00
|
|
|
if (selected_version.verifiable is not None
|
|
|
|
and not selected_version.verifiable):
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.warning(
|
|
|
|
"%s is potentially insecure and unverifiable.", req.name,
|
|
|
|
)
|
2013-06-03 00:28:51 +02:00
|
|
|
|
2013-09-28 18:02:16 +02:00
|
|
|
if selected_version._deprecated_regex:
|
2014-08-31 01:52:28 +02:00
|
|
|
warnings.warn(
|
|
|
|
"%s discovered using a deprecated method of parsing, in the "
|
|
|
|
"future it will no longer be discovered." % req.name,
|
2014-09-18 04:04:42 +02:00
|
|
|
RemovedInPip7Warning,
|
2013-09-28 18:02:16 +02:00
|
|
|
)
|
|
|
|
|
2013-06-02 20:31:43 +02:00
|
|
|
return selected_version
|
2012-09-23 23:58:14 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
def _find_url_name(self, index_url, url_name, req):
|
2014-01-27 15:07:10 +01:00
|
|
|
"""
|
|
|
|
Finds the true URL name of a package, when the given name isn't quite
|
|
|
|
correct.
|
|
|
|
This is usually used to implement case-insensitivity.
|
|
|
|
"""
|
2009-11-21 00:39:44 +01:00
|
|
|
if not index_url.url.endswith('/'):
|
|
|
|
# Vaguely part of the PyPI API... weird but true.
|
2014-03-26 23:24:19 +01:00
|
|
|
# FIXME: bad to modify this?
|
2009-11-21 00:39:44 +01:00
|
|
|
index_url.url += '/'
|
|
|
|
page = self._get_page(index_url, req)
|
|
|
|
if page is None:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.critical('Cannot fetch index base URL %s', index_url)
|
2009-11-21 00:39:44 +01:00
|
|
|
return
|
|
|
|
norm_name = normalize_name(req.url_name)
|
|
|
|
for link in page.links:
|
|
|
|
base = posixpath.basename(link.path.rstrip('/'))
|
|
|
|
if norm_name == normalize_name(base):
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
|
|
|
'Real name of requirement %s is %s', url_name, base,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2009-11-21 00:39:44 +01:00
|
|
|
return base
|
|
|
|
return None
|
|
|
|
|
|
|
|
def _get_pages(self, locations, req):
|
2013-08-17 05:07:42 +02:00
|
|
|
"""
|
|
|
|
Yields (page, page_url) from the given locations, skipping
|
|
|
|
locations that have errors, and adding download/homepage links
|
|
|
|
"""
|
|
|
|
all_locations = list(locations)
|
2009-11-21 00:39:44 +01:00
|
|
|
seen = set()
|
2013-08-17 05:07:42 +02:00
|
|
|
|
|
|
|
while all_locations:
|
|
|
|
location = all_locations.pop(0)
|
2009-11-21 00:39:44 +01:00
|
|
|
if location in seen:
|
|
|
|
continue
|
|
|
|
seen.add(location)
|
2013-08-17 05:07:42 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
page = self._get_page(location, req)
|
|
|
|
if page is None:
|
|
|
|
continue
|
2013-08-17 05:07:42 +02:00
|
|
|
|
|
|
|
yield page
|
|
|
|
|
2013-03-08 00:08:51 +01:00
|
|
|
for link in page.rel_links():
|
2013-06-07 15:48:34 +02:00
|
|
|
normalized = normalize_name(req.name).lower()
|
|
|
|
|
2014-03-26 23:24:19 +01:00
|
|
|
if (normalized not in self.allow_external
|
2013-06-07 15:48:34 +02:00
|
|
|
and not self.allow_all_external):
|
2013-06-03 00:28:51 +02:00
|
|
|
self.need_warn_external = True
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
|
|
|
"Not searching %s for files because external "
|
|
|
|
"urls are disallowed.",
|
|
|
|
link,
|
|
|
|
)
|
2013-06-03 00:28:51 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
if (link.trusted is not None
|
|
|
|
and not link.trusted
|
2014-03-26 23:24:19 +01:00
|
|
|
and normalized not in self.allow_unverified):
|
2014-01-27 15:07:10 +01:00
|
|
|
logger.debug(
|
|
|
|
"Not searching %s for urls, it is an "
|
|
|
|
"untrusted link and cannot produce safe or "
|
2014-08-31 01:52:28 +02:00
|
|
|
"verifiable files.",
|
|
|
|
link,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2013-10-27 04:53:21 +01:00
|
|
|
self.need_warn_unverified = True
|
2013-06-03 00:28:51 +02:00
|
|
|
continue
|
|
|
|
|
2013-08-17 05:07:42 +02:00
|
|
|
all_locations.append(link)
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
|
|
|
|
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
|
2011-12-10 16:35:20 +01:00
|
|
|
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
def _sort_links(self, links):
|
2014-01-27 15:07:10 +01:00
|
|
|
"""
|
|
|
|
Returns elements of links in order, non-egg links first, egg links
|
|
|
|
second, while eliminating duplicates
|
|
|
|
"""
|
2009-11-21 00:39:44 +01:00
|
|
|
eggs, no_eggs = [], []
|
2010-05-28 01:27:13 +02:00
|
|
|
seen = set()
|
2009-11-21 00:39:44 +01:00
|
|
|
for link in links:
|
2010-05-28 01:27:13 +02:00
|
|
|
if link not in seen:
|
|
|
|
seen.add(link)
|
|
|
|
if link.egg_fragment:
|
|
|
|
eggs.append(link)
|
|
|
|
else:
|
|
|
|
no_eggs.append(link)
|
2009-11-21 00:39:44 +01:00
|
|
|
return no_eggs + eggs
|
|
|
|
|
|
|
|
def _package_versions(self, links, search_name):
|
|
|
|
for link in self._sort_links(links):
|
2010-05-28 01:27:13 +02:00
|
|
|
for v in self._link_package_versions(link, search_name):
|
|
|
|
yield v
|
2012-11-04 05:32:54 +01:00
|
|
|
|
2012-10-02 07:50:24 +02:00
|
|
|
def _known_extensions(self):
|
2012-11-04 05:32:54 +01:00
|
|
|
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
|
2012-10-02 07:50:24 +02:00
|
|
|
if self.use_wheel:
|
2013-04-02 07:44:46 +02:00
|
|
|
return extensions + (wheel_ext,)
|
2012-10-02 07:50:24 +02:00
|
|
|
return extensions
|
2010-05-28 01:27:13 +02:00
|
|
|
|
|
|
|
def _link_package_versions(self, link, search_name):
|
|
|
|
"""
|
|
|
|
Return an iterable of triples (pkg_resources_version_key,
|
|
|
|
link, python_version) that can be extracted from the given
|
|
|
|
link.
|
|
|
|
|
|
|
|
Meant to be overridden by subclasses, not called by clients.
|
|
|
|
"""
|
2013-06-30 20:15:03 +02:00
|
|
|
platform = get_platform()
|
2013-07-11 18:21:30 +02:00
|
|
|
|
2012-10-02 07:50:24 +02:00
|
|
|
version = None
|
2010-05-28 01:27:13 +02:00
|
|
|
if link.egg_fragment:
|
|
|
|
egg_info = link.egg_fragment
|
|
|
|
else:
|
|
|
|
egg_info, ext = link.splitext()
|
|
|
|
if not ext:
|
|
|
|
if link not in self.logged_links:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug('Skipping link %s; not a file', link)
|
2010-05-28 01:27:13 +02:00
|
|
|
self.logged_links.add(link)
|
|
|
|
return []
|
|
|
|
if egg_info.endswith('.tar'):
|
|
|
|
# Special double-extension case:
|
|
|
|
egg_info = egg_info[:-4]
|
|
|
|
ext = '.tar' + ext
|
2012-10-02 07:50:24 +02:00
|
|
|
if ext not in self._known_extensions():
|
2010-05-28 01:27:13 +02:00
|
|
|
if link not in self.logged_links:
|
2014-01-27 15:07:10 +01:00
|
|
|
logger.debug(
|
2014-08-31 01:52:28 +02:00
|
|
|
'Skipping link %s; unknown archive format: %s',
|
|
|
|
link,
|
|
|
|
ext,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2010-05-28 01:27:13 +02:00
|
|
|
self.logged_links.add(link)
|
|
|
|
return []
|
2011-12-16 17:10:03 +01:00
|
|
|
if "macosx10" in link.path and ext == '.zip':
|
2011-12-15 17:54:01 +01:00
|
|
|
if link not in self.logged_links:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug('Skipping link %s; macosx10 one', link)
|
2011-12-15 17:54:01 +01:00
|
|
|
self.logged_links.add(link)
|
|
|
|
return []
|
2013-11-15 01:35:24 +01:00
|
|
|
if ext == wheel_ext:
|
|
|
|
try:
|
|
|
|
wheel = Wheel(link.filename)
|
|
|
|
except InvalidWheelFilename:
|
2014-01-27 15:07:10 +01:00
|
|
|
logger.debug(
|
2014-08-31 01:52:28 +02:00
|
|
|
'Skipping %s because the wheel filename is invalid',
|
2014-01-27 15:07:10 +01:00
|
|
|
link
|
|
|
|
)
|
2013-11-15 01:35:24 +01:00
|
|
|
return []
|
|
|
|
if wheel.name.lower() != search_name.lower():
|
2014-01-27 15:07:10 +01:00
|
|
|
logger.debug(
|
2014-08-31 01:52:28 +02:00
|
|
|
'Skipping link %s; wrong project name (not %s)',
|
|
|
|
link,
|
|
|
|
search_name,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2013-10-02 06:33:26 +02:00
|
|
|
return []
|
2013-11-15 01:35:24 +01:00
|
|
|
if not wheel.supported():
|
2014-01-27 15:07:10 +01:00
|
|
|
logger.debug(
|
|
|
|
'Skipping %s because it is not compatible with this '
|
2014-08-31 01:52:28 +02:00
|
|
|
'Python',
|
|
|
|
link,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2013-04-02 07:44:46 +02:00
|
|
|
return []
|
2013-06-29 01:36:41 +02:00
|
|
|
# This is a dirty hack to prevent installing Binary Wheels from
|
2013-11-01 04:54:25 +01:00
|
|
|
# PyPI unless it is a Windows or Mac Binary Wheel. This is
|
|
|
|
# paired with a change to PyPI disabling uploads for the
|
2014-01-27 15:07:10 +01:00
|
|
|
# same. Once we have a mechanism for enabling support for
|
|
|
|
# binary wheels on linux that deals with the inherent problems
|
|
|
|
# of binary distribution this can be removed.
|
2013-06-29 01:36:41 +02:00
|
|
|
comes_from = getattr(link, "comes_from", None)
|
2014-01-27 15:07:10 +01:00
|
|
|
if (
|
|
|
|
(
|
|
|
|
not platform.startswith('win')
|
|
|
|
and not platform.startswith('macosx')
|
2014-06-26 14:15:31 +02:00
|
|
|
and not platform == 'cli'
|
2013-11-01 04:54:25 +01:00
|
|
|
)
|
2014-01-27 15:07:10 +01:00
|
|
|
and comes_from is not None
|
2014-09-12 00:40:45 +02:00
|
|
|
and urllib_parse.urlparse(
|
2014-01-27 15:07:10 +01:00
|
|
|
comes_from.url
|
|
|
|
).netloc.endswith("pypi.python.org")):
|
2013-11-15 01:35:24 +01:00
|
|
|
if not wheel.supported(tags=supported_tags_noarch):
|
2013-06-29 01:36:41 +02:00
|
|
|
logger.debug(
|
2013-06-30 20:15:03 +02:00
|
|
|
"Skipping %s because it is a pypi-hosted binary "
|
2014-08-31 01:52:28 +02:00
|
|
|
"Wheel on an unsupported platform",
|
|
|
|
link,
|
2013-06-29 01:36:41 +02:00
|
|
|
)
|
|
|
|
return []
|
2013-11-15 01:35:24 +01:00
|
|
|
version = wheel.version
|
2013-06-29 01:36:41 +02:00
|
|
|
|
2012-10-02 07:50:24 +02:00
|
|
|
if not version:
|
|
|
|
version = self._egg_info_matches(egg_info, search_name, link)
|
2010-05-28 01:27:13 +02:00
|
|
|
if version is None:
|
2014-01-27 15:07:10 +01:00
|
|
|
logger.debug(
|
2014-08-31 01:52:28 +02:00
|
|
|
'Skipping link %s; wrong project name (not %s)',
|
|
|
|
link,
|
|
|
|
search_name,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2010-05-28 01:27:13 +02:00
|
|
|
return []
|
2013-06-02 19:03:56 +02:00
|
|
|
|
|
|
|
if (link.internal is not None
|
|
|
|
and not link.internal
|
2014-01-27 15:07:10 +01:00
|
|
|
and not normalize_name(search_name).lower()
|
|
|
|
in self.allow_external
|
2013-06-07 15:48:34 +02:00
|
|
|
and not self.allow_all_external):
|
2013-06-02 19:03:56 +02:00
|
|
|
# We have a link that we are sure is external, so we should skip
|
|
|
|
# it unless we are allowing externals
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug("Skipping %s because it is externally hosted.", link)
|
2013-06-02 19:03:56 +02:00
|
|
|
self.need_warn_external = True
|
|
|
|
return []
|
|
|
|
|
2013-06-03 00:28:51 +02:00
|
|
|
if (link.verifiable is not None
|
|
|
|
and not link.verifiable
|
2013-10-27 04:47:57 +01:00
|
|
|
and not (normalize_name(search_name).lower()
|
2014-01-27 15:07:10 +01:00
|
|
|
in self.allow_unverified)):
|
2014-03-04 00:19:55 +01:00
|
|
|
# We have a link that we are sure we cannot verify its integrity,
|
2013-06-03 00:28:51 +02:00
|
|
|
# so we should skip it unless we are allowing unsafe installs
|
|
|
|
# for this requirement.
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(
|
|
|
|
"Skipping %s because it is an insecure and unverifiable file.",
|
|
|
|
link,
|
|
|
|
)
|
2013-10-27 04:53:21 +01:00
|
|
|
self.need_warn_unverified = True
|
2013-06-03 00:28:51 +02:00
|
|
|
return []
|
|
|
|
|
2010-05-28 01:27:13 +02:00
|
|
|
match = self._py_version_re.search(version)
|
|
|
|
if match:
|
|
|
|
version = version[:match.start()]
|
|
|
|
py_version = match.group(1)
|
|
|
|
if py_version != sys.version[:3]:
|
2014-01-27 15:07:10 +01:00
|
|
|
logger.debug(
|
2014-08-31 01:52:28 +02:00
|
|
|
'Skipping %s because Python version is incorrect', link
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2010-05-28 01:27:13 +02:00
|
|
|
return []
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug('Found link %s, version: %s', link, version)
|
2014-01-27 15:07:10 +01:00
|
|
|
return [(
|
|
|
|
pkg_resources.parse_version(version),
|
|
|
|
link,
|
|
|
|
version,
|
|
|
|
)]
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
def _egg_info_matches(self, egg_info, search_name, link):
|
|
|
|
match = self._egg_info_re.search(egg_info)
|
|
|
|
if not match:
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug('Could not parse version from link: %s', link)
|
2009-11-21 00:39:44 +01:00
|
|
|
return None
|
|
|
|
name = match.group(0).lower()
|
|
|
|
# To match the "safe" name that pkg_resources creates:
|
|
|
|
name = name.replace('_', '-')
|
2012-04-17 01:27:47 +02:00
|
|
|
# project name and version must be separated by a dash
|
|
|
|
look_for = search_name.lower() + "-"
|
|
|
|
if name.startswith(look_for):
|
|
|
|
return match.group(0)[len(look_for):]
|
2009-11-21 00:39:44 +01:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def _get_page(self, link, req):
|
2014-04-24 13:29:57 +02:00
|
|
|
return HTMLPage.get_page(link, req, session=self.session)
|
2009-11-21 00:39:44 +01:00
|
|
|
|
2010-06-03 04:25:26 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
class HTMLPage(object):
|
|
|
|
"""Represents one page, along with its URL"""
|
|
|
|
|
2014-03-26 23:24:19 +01:00
|
|
|
# FIXME: these regexes are horrible hacks:
|
2009-11-21 00:39:44 +01:00
|
|
|
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
|
|
|
|
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
|
2014-01-27 15:07:10 +01:00
|
|
|
_href_re = re.compile(
|
|
|
|
'href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))',
|
|
|
|
re.I | re.S
|
|
|
|
)
|
2009-11-21 00:39:44 +01:00
|
|
|
|
2013-06-03 00:28:51 +02:00
|
|
|
def __init__(self, content, url, headers=None, trusted=None):
|
2009-11-21 00:39:44 +01:00
|
|
|
self.content = content
|
2013-06-02 15:31:53 +02:00
|
|
|
self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False)
|
2009-11-21 00:39:44 +01:00
|
|
|
self.url = url
|
|
|
|
self.headers = headers
|
2013-06-03 00:28:51 +02:00
|
|
|
self.trusted = trusted
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return self.url
|
|
|
|
|
|
|
|
@classmethod
|
2014-04-24 13:29:57 +02:00
|
|
|
def get_page(cls, link, req, skip_archives=True, session=None):
|
2013-08-16 14:04:27 +02:00
|
|
|
if session is None:
|
2014-05-07 01:25:44 +02:00
|
|
|
raise TypeError(
|
|
|
|
"get_page() missing 1 required keyword argument: 'session'"
|
|
|
|
)
|
2013-08-16 14:04:27 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
url = link.url
|
|
|
|
url = url.split('#', 1)[0]
|
2010-05-18 20:33:13 +02:00
|
|
|
|
|
|
|
# Check for VCS schemes that do not support lookup as web pages.
|
|
|
|
from pip.vcs import VcsSupport
|
|
|
|
for scheme in VcsSupport.schemes:
|
|
|
|
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug('Cannot look at %s URL %s', scheme, link)
|
2010-05-18 20:33:13 +02:00
|
|
|
return None
|
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
try:
|
|
|
|
if skip_archives:
|
|
|
|
filename = link.filename
|
|
|
|
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
|
|
|
|
if filename.endswith(bad_ext):
|
2014-01-27 15:07:10 +01:00
|
|
|
content_type = cls._get_content_type(
|
|
|
|
url, session=session,
|
2013-08-16 14:04:27 +02:00
|
|
|
)
|
2009-11-21 00:39:44 +01:00
|
|
|
if content_type.lower().startswith('text/html'):
|
|
|
|
break
|
|
|
|
else:
|
2014-01-27 15:07:10 +01:00
|
|
|
logger.debug(
|
2014-08-31 01:52:28 +02:00
|
|
|
'Skipping page %s because of Content-Type: %s',
|
|
|
|
link,
|
|
|
|
content_type,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2014-04-24 13:29:57 +02:00
|
|
|
return
|
|
|
|
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug('Getting page %s', url)
|
2010-06-03 04:25:26 +02:00
|
|
|
|
2010-05-26 06:06:45 +02:00
|
|
|
# Tack index.html onto file:// URLs that point to directories
|
2014-01-27 15:07:10 +01:00
|
|
|
(scheme, netloc, path, params, query, fragment) = \
|
2014-09-12 00:40:45 +02:00
|
|
|
urllib_parse.urlparse(url)
|
|
|
|
if (scheme == 'file'
|
|
|
|
and os.path.isdir(urllib_request.url2pathname(path))):
|
2014-01-27 15:07:10 +01:00
|
|
|
# add trailing slash if not present so urljoin doesn't trim
|
|
|
|
# final segment
|
2010-05-27 21:53:51 +02:00
|
|
|
if not url.endswith('/'):
|
|
|
|
url += '/'
|
2014-09-12 00:40:45 +02:00
|
|
|
url = urllib_parse.urljoin(url, 'index.html')
|
2014-08-31 01:52:28 +02:00
|
|
|
logger.debug(' file: URL is directory, getting %s', url)
|
2010-05-26 06:06:45 +02:00
|
|
|
|
2014-04-24 13:29:57 +02:00
|
|
|
resp = session.get(
|
|
|
|
url,
|
|
|
|
headers={
|
|
|
|
"Accept": "text/html",
|
|
|
|
"Cache-Control": "max-age=600",
|
|
|
|
},
|
|
|
|
)
|
2013-08-16 14:04:27 +02:00
|
|
|
resp.raise_for_status()
|
2013-06-07 14:38:30 +02:00
|
|
|
|
|
|
|
# The check for archives above only works if the url ends with
|
|
|
|
# something that looks like an archive. However that is not a
|
2014-01-27 15:07:10 +01:00
|
|
|
# requirement of an url. Unless we issue a HEAD request on every
|
|
|
|
# url we cannot know ahead of time for sure if something is HTML
|
|
|
|
# or not. However we can check after we've downloaded it.
|
2013-08-16 14:04:27 +02:00
|
|
|
content_type = resp.headers.get('Content-Type', 'unknown')
|
2013-07-14 01:25:21 +02:00
|
|
|
if not content_type.lower().startswith("text/html"):
|
2014-01-27 15:07:10 +01:00
|
|
|
logger.debug(
|
2014-08-31 01:52:28 +02:00
|
|
|
'Skipping page %s because of Content-Type: %s',
|
|
|
|
link,
|
|
|
|
content_type,
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2014-04-24 13:29:57 +02:00
|
|
|
return
|
2013-06-07 14:38:30 +02:00
|
|
|
|
2013-08-16 14:04:27 +02:00
|
|
|
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
|
2013-08-29 21:58:06 +02:00
|
|
|
except requests.HTTPError as exc:
|
|
|
|
level = 2 if exc.response.status_code == 404 else 1
|
2014-04-24 13:29:57 +02:00
|
|
|
cls._handle_fail(req, link, exc, url, level=level)
|
2014-01-07 20:36:13 +01:00
|
|
|
except requests.ConnectionError as exc:
|
|
|
|
cls._handle_fail(
|
|
|
|
req, link, "connection error: %s" % exc, url,
|
|
|
|
)
|
2013-08-18 05:50:32 +02:00
|
|
|
except requests.Timeout:
|
2014-04-24 13:29:57 +02:00
|
|
|
cls._handle_fail(req, link, "timed out", url)
|
2013-08-29 21:42:40 +02:00
|
|
|
except SSLError as exc:
|
2013-08-30 12:19:56 +02:00
|
|
|
reason = ("There was a problem confirming the ssl certificate: "
|
|
|
|
"%s" % exc)
|
2014-01-27 15:07:10 +01:00
|
|
|
cls._handle_fail(
|
|
|
|
req, link, reason, url,
|
2013-08-30 12:19:56 +02:00
|
|
|
level=2,
|
2014-08-31 01:52:28 +02:00
|
|
|
meth=logger.info,
|
2013-08-30 12:19:56 +02:00
|
|
|
)
|
2013-08-18 05:50:32 +02:00
|
|
|
else:
|
|
|
|
return inst
|
2009-11-21 00:39:44 +01:00
|
|
|
|
2013-08-30 12:19:56 +02:00
|
|
|
@staticmethod
|
2014-04-24 13:29:57 +02:00
|
|
|
def _handle_fail(req, link, reason, url, level=1, meth=None):
|
2013-08-30 12:19:56 +02:00
|
|
|
if meth is None:
|
2014-08-31 01:52:28 +02:00
|
|
|
meth = logger.debug
|
2013-08-30 12:19:56 +02:00
|
|
|
|
|
|
|
meth("Could not fetch URL %s: %s", link, reason)
|
|
|
|
meth("Will skip URL %s when looking for download links for %s" %
|
|
|
|
(link.url, req))
|
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
@staticmethod
|
2014-05-07 01:25:44 +02:00
|
|
|
def _get_content_type(url, session):
|
2009-11-21 00:39:44 +01:00
|
|
|
"""Get the Content-Type of the given url, using a HEAD request"""
|
2014-09-12 00:40:45 +02:00
|
|
|
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
|
2014-03-26 23:24:19 +01:00
|
|
|
if scheme not in ('http', 'https', 'ftp', 'ftps'):
|
|
|
|
# FIXME: some warning or something?
|
|
|
|
# assertion error?
|
2009-11-21 00:39:44 +01:00
|
|
|
return ''
|
2013-08-16 14:04:27 +02:00
|
|
|
|
|
|
|
resp = session.head(url, allow_redirects=True)
|
|
|
|
resp.raise_for_status()
|
|
|
|
|
|
|
|
return resp.headers.get("Content-Type", "")
|
2009-11-21 00:39:44 +01:00
|
|
|
|
2013-06-02 19:03:56 +02:00
|
|
|
@property
|
|
|
|
def api_version(self):
|
|
|
|
if not hasattr(self, "_api_version"):
|
|
|
|
_api_version = None
|
|
|
|
|
2014-01-27 15:07:10 +01:00
|
|
|
metas = [
|
|
|
|
x for x in self.parsed.findall(".//meta")
|
|
|
|
if x.get("name", "").lower() == "api-version"
|
|
|
|
]
|
2013-06-02 19:03:56 +02:00
|
|
|
if metas:
|
|
|
|
try:
|
|
|
|
_api_version = int(metas[0].get("value", None))
|
|
|
|
except (TypeError, ValueError):
|
|
|
|
_api_version = None
|
|
|
|
self._api_version = _api_version
|
|
|
|
return self._api_version
|
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
@property
|
|
|
|
def base_url(self):
|
|
|
|
if not hasattr(self, "_base_url"):
|
2014-06-13 13:28:05 +02:00
|
|
|
bases = [
|
|
|
|
x for x in self.parsed.findall(".//base")
|
|
|
|
if x.get("href") is not None
|
|
|
|
]
|
|
|
|
if bases and bases[0].get("href"):
|
|
|
|
self._base_url = bases[0].get("href")
|
2009-11-21 00:39:44 +01:00
|
|
|
else:
|
|
|
|
self._base_url = self.url
|
|
|
|
return self._base_url
|
|
|
|
|
|
|
|
@property
|
|
|
|
def links(self):
|
|
|
|
"""Yields all links in the page"""
|
2013-06-02 15:31:53 +02:00
|
|
|
for anchor in self.parsed.findall(".//a"):
|
|
|
|
if anchor.get("href"):
|
|
|
|
href = anchor.get("href")
|
2014-09-12 00:40:45 +02:00
|
|
|
url = self.clean_link(
|
|
|
|
urllib_parse.urljoin(self.base_url, href)
|
|
|
|
)
|
2013-06-02 19:03:56 +02:00
|
|
|
|
|
|
|
# Determine if this link is internal. If that distinction
|
|
|
|
# doesn't make sense in this context, then we don't make
|
|
|
|
# any distinction.
|
|
|
|
internal = None
|
|
|
|
if self.api_version and self.api_version >= 2:
|
|
|
|
# Only api_versions >= 2 have a distinction between
|
|
|
|
# external and internal links
|
2014-01-27 15:07:10 +01:00
|
|
|
internal = bool(
|
|
|
|
anchor.get("rel")
|
|
|
|
and "internal" in anchor.get("rel").split()
|
|
|
|
)
|
2013-06-02 19:03:56 +02:00
|
|
|
|
|
|
|
yield Link(url, self, internal=internal)
|
2009-11-21 00:39:44 +01:00
|
|
|
|
2013-03-08 00:08:51 +01:00
|
|
|
def rel_links(self):
|
|
|
|
for url in self.explicit_rel_links():
|
2009-11-21 00:39:44 +01:00
|
|
|
yield url
|
|
|
|
for url in self.scraped_rel_links():
|
|
|
|
yield url
|
|
|
|
|
|
|
|
def explicit_rel_links(self, rels=('homepage', 'download')):
|
|
|
|
"""Yields all links with the given relations"""
|
2013-06-02 15:31:53 +02:00
|
|
|
rels = set(rels)
|
|
|
|
|
|
|
|
for anchor in self.parsed.findall(".//a"):
|
|
|
|
if anchor.get("rel") and anchor.get("href"):
|
|
|
|
found_rels = set(anchor.get("rel").split())
|
|
|
|
# Determine the intersection between what rels were found and
|
|
|
|
# what rels were being looked for
|
|
|
|
if found_rels & rels:
|
|
|
|
href = anchor.get("href")
|
2014-01-27 15:07:10 +01:00
|
|
|
url = self.clean_link(
|
2014-09-12 00:40:45 +02:00
|
|
|
urllib_parse.urljoin(self.base_url, href)
|
2014-01-27 15:07:10 +01:00
|
|
|
)
|
2013-06-03 00:28:51 +02:00
|
|
|
yield Link(url, self, trusted=False)
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
def scraped_rel_links(self):
|
2013-06-02 15:31:53 +02:00
|
|
|
# Can we get rid of this horrible horrible method?
|
2009-11-21 00:39:44 +01:00
|
|
|
for regex in (self._homepage_re, self._download_re):
|
|
|
|
match = regex.search(self.content)
|
|
|
|
if not match:
|
|
|
|
continue
|
|
|
|
href_match = self._href_re.search(self.content, pos=match.end())
|
|
|
|
if not href_match:
|
|
|
|
continue
|
2014-01-27 15:07:10 +01:00
|
|
|
url = (
|
|
|
|
href_match.group(1)
|
|
|
|
or href_match.group(2)
|
|
|
|
or href_match.group(3)
|
|
|
|
)
|
2009-11-21 00:39:44 +01:00
|
|
|
if not url:
|
|
|
|
continue
|
2014-09-12 00:40:45 +02:00
|
|
|
url = self.clean_link(urllib_parse.urljoin(self.base_url, url))
|
2013-09-28 18:02:16 +02:00
|
|
|
yield Link(url, self, trusted=False, _deprecated_regex=True)
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
|
|
|
|
|
|
|
|
def clean_link(self, url):
|
|
|
|
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
|
|
|
|
the link, it will be rewritten to %20 (while not over-quoting
|
|
|
|
% or other characters)."""
|
|
|
|
return self._clean_re.sub(
|
|
|
|
lambda match: '%%%2x' % ord(match.group(0)), url)
|
|
|
|
|
2010-06-03 04:25:26 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
class Link(object):
|
|
|
|
|
2013-09-28 18:02:16 +02:00
|
|
|
def __init__(self, url, comes_from=None, internal=None, trusted=None,
|
2014-01-27 15:07:10 +01:00
|
|
|
_deprecated_regex=False):
|
2009-11-21 00:39:44 +01:00
|
|
|
self.url = url
|
|
|
|
self.comes_from = comes_from
|
2013-06-02 19:03:56 +02:00
|
|
|
self.internal = internal
|
2013-06-03 00:28:51 +02:00
|
|
|
self.trusted = trusted
|
2013-09-28 18:02:16 +02:00
|
|
|
self._deprecated_regex = _deprecated_regex
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
if self.comes_from:
|
|
|
|
return '%s (from %s)' % (self.url, self.comes_from)
|
|
|
|
else:
|
2012-09-23 23:58:14 +02:00
|
|
|
return str(self.url)
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return '<Link %s>' % self
|
|
|
|
|
|
|
|
def __eq__(self, other):
|
|
|
|
return self.url == other.url
|
|
|
|
|
2012-09-10 22:04:00 +02:00
|
|
|
def __ne__(self, other):
|
|
|
|
return self.url != other.url
|
|
|
|
|
|
|
|
def __lt__(self, other):
|
|
|
|
return self.url < other.url
|
|
|
|
|
|
|
|
def __le__(self, other):
|
|
|
|
return self.url <= other.url
|
|
|
|
|
|
|
|
def __gt__(self, other):
|
|
|
|
return self.url > other.url
|
|
|
|
|
|
|
|
def __ge__(self, other):
|
|
|
|
return self.url >= other.url
|
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
def __hash__(self):
|
|
|
|
return hash(self.url)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def filename(self):
|
2014-09-12 00:40:45 +02:00
|
|
|
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
|
2012-04-17 00:05:21 +02:00
|
|
|
name = posixpath.basename(path.rstrip('/')) or netloc
|
|
|
|
assert name, ('URL %r produced no filename' % self.url)
|
2009-11-21 00:39:44 +01:00
|
|
|
return name
|
|
|
|
|
|
|
|
@property
|
|
|
|
def scheme(self):
|
2014-09-12 00:40:45 +02:00
|
|
|
return urllib_parse.urlsplit(self.url)[0]
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
@property
|
|
|
|
def path(self):
|
2014-09-12 00:40:45 +02:00
|
|
|
return urllib_parse.urlsplit(self.url)[2]
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
def splitext(self):
|
|
|
|
return splitext(posixpath.basename(self.path.rstrip('/')))
|
|
|
|
|
2013-11-15 01:35:24 +01:00
|
|
|
@property
|
|
|
|
def ext(self):
|
|
|
|
return self.splitext()[1]
|
|
|
|
|
2012-04-05 22:55:26 +02:00
|
|
|
@property
|
|
|
|
def url_without_fragment(self):
|
2014-09-12 00:40:45 +02:00
|
|
|
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
|
|
|
|
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
|
2012-04-05 22:55:26 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def egg_fragment(self):
|
|
|
|
match = self._egg_fragment_re.search(self.url)
|
|
|
|
if not match:
|
|
|
|
return None
|
|
|
|
return match.group(1)
|
|
|
|
|
2014-01-27 15:07:10 +01:00
|
|
|
_hash_re = re.compile(
|
|
|
|
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
|
|
|
|
)
|
2009-11-21 00:39:44 +01:00
|
|
|
|
|
|
|
@property
|
2012-03-02 01:33:44 +01:00
|
|
|
def hash(self):
|
|
|
|
match = self._hash_re.search(self.url)
|
|
|
|
if match:
|
|
|
|
return match.group(2)
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def hash_name(self):
|
|
|
|
match = self._hash_re.search(self.url)
|
2009-11-21 00:39:44 +01:00
|
|
|
if match:
|
|
|
|
return match.group(1)
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def show_url(self):
|
|
|
|
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
|
|
|
|
|
2013-06-03 00:28:51 +02:00
|
|
|
@property
|
|
|
|
def verifiable(self):
|
|
|
|
"""
|
|
|
|
Returns True if this link can be verified after download, False if it
|
|
|
|
cannot, and None if we cannot determine.
|
|
|
|
"""
|
|
|
|
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
|
|
|
|
if trusted is not None and trusted:
|
|
|
|
# This link came from a trusted source. It *may* be verifiable but
|
|
|
|
# first we need to see if this page is operating under the new
|
|
|
|
# API version.
|
|
|
|
try:
|
|
|
|
api_version = getattr(self.comes_from, "api_version", None)
|
|
|
|
api_version = int(api_version)
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
api_version = None
|
|
|
|
|
|
|
|
if api_version is None or api_version <= 1:
|
|
|
|
# This link is either trusted, or it came from a trusted,
|
|
|
|
# however it is not operating under the API version 2 so
|
|
|
|
# we can't make any claims about if it's safe or not
|
|
|
|
return
|
|
|
|
|
|
|
|
if self.hash:
|
|
|
|
# This link came from a trusted source and it has a hash, so we
|
|
|
|
# can consider it safe.
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
# This link came from a trusted source, using the new API
|
|
|
|
# version, and it does not have a hash. It is NOT verifiable
|
|
|
|
return False
|
|
|
|
elif trusted is not None:
|
|
|
|
# This link came from an untrusted source and we cannot trust it
|
|
|
|
return False
|
|
|
|
|
2013-11-06 18:35:57 +01:00
|
|
|
|
|
|
|
# An object to represent the "link" for the installed version of a requirement.
|
|
|
|
# Using Inf as the url makes it sort higher.
|
|
|
|
INSTALLED_VERSION = Link(Inf)
|
2012-09-23 23:58:14 +02:00
|
|
|
|
2010-06-03 04:25:26 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
def get_requirement_from_url(url):
|
|
|
|
"""Get a requirement from the URL, if possible. This looks for #egg
|
|
|
|
in the URL"""
|
|
|
|
link = Link(url)
|
|
|
|
egg_info = link.egg_fragment
|
|
|
|
if not egg_info:
|
|
|
|
egg_info = splitext(link.filename)[0]
|
|
|
|
return package_to_requirement(egg_info)
|
|
|
|
|
2010-06-03 04:25:26 +02:00
|
|
|
|
2009-11-21 00:39:44 +01:00
|
|
|
def package_to_requirement(package_name):
|
|
|
|
"""Translate a name like Foo-1.2 to Foo==1.3"""
|
2011-02-04 01:37:29 +01:00
|
|
|
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
|
2009-11-21 00:39:44 +01:00
|
|
|
if match:
|
|
|
|
name = match.group(1)
|
|
|
|
version = match.group(2)
|
|
|
|
else:
|
|
|
|
name = package_name
|
|
|
|
version = ''
|
|
|
|
if version:
|
|
|
|
return '%s==%s' % (name, version)
|
|
|
|
else:
|
|
|
|
return name
|