Add filter_unallowed_hashes().

This commit is contained in:
Chris Jerdonek 2019-07-10 22:53:16 -07:00
parent e85a848ec8
commit e80fc233ff
2 changed files with 88 additions and 18 deletions

View File

@ -50,6 +50,7 @@ if MYPY_CHECK_RUNNING:
from pip._internal.req import InstallRequirement
from pip._internal.download import PipSession
from pip._internal.pep425tags import Pep425Tag
from pip._internal.utils.hashes import Hashes
BuildTag = Tuple[Any, ...] # either empty tuple or Tuple[int, str]
CandidateSortingKey = (
@ -440,6 +441,45 @@ class LinkEvaluator(object):
return (True, version)
def filter_unallowed_hashes(
candidates, # type: List[InstallationCandidate]
hashes, # type: Hashes
):
# type: (...) -> List[InstallationCandidate]
"""
Filter out candidates whose hashes aren't allowed, and return a new
list of candidates.
If at least one candidate has an allowed hash, then all candidates with
either an allowed hash or no hash specified are returned. Otherwise,
the given candidates are returned.
Including the candidates with no hash specified when there is a match
allows a warning to be logged if there is a more preferred candidate
with no hash specified. Returning all candidates in the case of no
matches lets pip report the hash of the candidate that would otherwise
have been installed (e.g. permitting the user to more easily update
their requirements file with the desired hash).
"""
applicable = []
found_allowed_hash = False
for candidate in candidates:
link = candidate.location
if not link.has_hash:
applicable.append(candidate)
continue
if link.is_hash_allowed(hashes=hashes):
found_allowed_hash = True
applicable.append(candidate)
if found_allowed_hash:
return applicable
# Make sure we're not returning back the given value.
return list(candidates)
class CandidatePreferences(object):
"""

View File

@ -11,16 +11,29 @@ from pip._internal.index import (
CandidateEvaluator, CandidatePreferences, FormatControl, HTMLPage, Link,
LinkEvaluator, PackageFinder, _check_link_requires_python, _clean_link,
_determine_base_url, _extract_version_from_fragment,
_find_name_version_sep, _get_html_page,
_find_name_version_sep, _get_html_page, filter_unallowed_hashes,
)
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.pep425tags import get_supported
from pip._internal.utils.hashes import Hashes
from tests.lib import CURRENT_PY_VERSION_INFO, make_test_finder
def make_mock_candidate(version, yanked_reason=None, hex_digest=None):
url = 'https://example.com/pkg-{}.tar.gz'.format(version)
if hex_digest is not None:
assert len(hex_digest) == 64
url += '#sha256={}'.format(hex_digest)
link = Link(url, yanked_reason=yanked_reason)
candidate = InstallationCandidate('mypackage', version, link)
return candidate
@pytest.mark.parametrize('requires_python, expected', [
('== 3.6.4', False),
('== 3.6.5', True),
@ -170,6 +183,30 @@ class TestLinkEvaluator:
assert actual == expected
@pytest.mark.parametrize('hex_digest, expected_versions', [
(None, ['1.0', '1.1', '1.2']),
(64 * 'a', ['1.0', '1.1']),
(64 * 'b', ['1.0', '1.2']),
(64 * 'c', ['1.0', '1.1', '1.2']),
])
def test_filter_unallowed_hashes(hex_digest, expected_versions):
candidates = [
make_mock_candidate('1.0'),
make_mock_candidate('1.1', hex_digest=(64 * 'a')),
make_mock_candidate('1.2', hex_digest=(64 * 'b')),
]
hashes_data = {
'sha256': [hex_digest],
}
hashes = Hashes(hashes_data)
actual = filter_unallowed_hashes(candidates, hashes=hashes)
actual_versions = [str(candidate.version) for candidate in actual]
assert actual_versions == expected_versions
# Check that the return value is always different from the given value.
assert actual is not candidates
class TestCandidateEvaluator:
@pytest.mark.parametrize('allow_all_prereleases, prefer_binary', [
@ -198,18 +235,11 @@ class TestCandidateEvaluator:
expected_tags = get_supported()
assert evaluator._supported_tags == expected_tags
def make_mock_candidate(self, version, yanked_reason=None):
url = 'https://example.com/pkg-{}.tar.gz'.format(version)
link = Link(url, yanked_reason=yanked_reason)
candidate = InstallationCandidate('mypackage', version, link)
return candidate
def test_get_applicable_candidates(self):
specifier = SpecifierSet('<= 1.11')
versions = ['1.10', '1.11', '1.12']
candidates = [
self.make_mock_candidate(version) for version in versions
make_mock_candidate(version) for version in versions
]
evaluator = CandidateEvaluator.create()
actual = evaluator.get_applicable_candidates(
@ -226,7 +256,7 @@ class TestCandidateEvaluator:
specifier = SpecifierSet('<= 1.11')
versions = ['1.10', '1.11', '1.12']
candidates = [
self.make_mock_candidate(version) for version in versions
make_mock_candidate(version) for version in versions
]
evaluator = CandidateEvaluator.create()
found_candidates = evaluator.make_found_candidates(
@ -275,10 +305,10 @@ class TestCandidateEvaluator:
Test all candidates yanked.
"""
candidates = [
self.make_mock_candidate('1.0', yanked_reason='bad metadata #1'),
make_mock_candidate('1.0', yanked_reason='bad metadata #1'),
# Put the best candidate in the middle, to test sorting.
self.make_mock_candidate('3.0', yanked_reason='bad metadata #3'),
self.make_mock_candidate('2.0', yanked_reason='bad metadata #2'),
make_mock_candidate('3.0', yanked_reason='bad metadata #3'),
make_mock_candidate('2.0', yanked_reason='bad metadata #2'),
]
expected_best = candidates[1]
evaluator = CandidateEvaluator.create()
@ -310,7 +340,7 @@ class TestCandidateEvaluator:
Test the log message with various reason strings.
"""
candidates = [
self.make_mock_candidate('1.0', yanked_reason=yanked_reason),
make_mock_candidate('1.0', yanked_reason=yanked_reason),
]
evaluator = CandidateEvaluator.create()
actual = evaluator.get_best_candidate(candidates)
@ -332,11 +362,11 @@ class TestCandidateEvaluator:
Test the best candidates being yanked, but not all.
"""
candidates = [
self.make_mock_candidate('4.0', yanked_reason='bad metadata #4'),
make_mock_candidate('4.0', yanked_reason='bad metadata #4'),
# Put the best candidate in the middle, to test sorting.
self.make_mock_candidate('2.0'),
self.make_mock_candidate('3.0', yanked_reason='bad metadata #3'),
self.make_mock_candidate('1.0'),
make_mock_candidate('2.0'),
make_mock_candidate('3.0', yanked_reason='bad metadata #3'),
make_mock_candidate('1.0'),
]
expected_best = candidates[1]
evaluator = CandidateEvaluator.create()