mirror of https://github.com/pypa/pip
PEP 470: Remove support for searching rel links
This commit is contained in:
parent
a38da832c4
commit
e26d930b0a
|
@ -1,5 +1,8 @@
|
|||
**8.0.0 (unreleased)**
|
||||
|
||||
* **BACKWARD INCOMPATIBLE** Remove the ability to find any files other than the
|
||||
ones directly linked from the index or find-links pages.
|
||||
|
||||
* Deprecate and no-op the ``--allow-external``, ``--allow-all-external``, and
|
||||
``--allow-unverified`` functionality that was added as part of PEP 438. With
|
||||
changes made to the repository protocol made in PEP 470, these options are no
|
||||
|
|
26
pip/index.py
26
pip/index.py
|
@ -578,13 +578,10 @@ class PackageFinder(object):
|
|||
def _get_pages(self, locations, project_name):
|
||||
"""
|
||||
Yields (page, page_url) from the given locations, skipping
|
||||
locations that have errors, and adding download/homepage links
|
||||
locations that have errors.
|
||||
"""
|
||||
all_locations = list(locations)
|
||||
seen = set()
|
||||
|
||||
while all_locations:
|
||||
location = all_locations.pop(0)
|
||||
for location in locations:
|
||||
if location in seen:
|
||||
continue
|
||||
seen.add(location)
|
||||
|
@ -595,9 +592,6 @@ class PackageFinder(object):
|
|||
|
||||
yield page
|
||||
|
||||
for link in page.rel_links():
|
||||
all_locations.append(link)
|
||||
|
||||
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
|
||||
|
||||
def _sort_links(self, links):
|
||||
|
@ -906,22 +900,6 @@ class HTMLPage(object):
|
|||
)
|
||||
yield Link(url, self)
|
||||
|
||||
def rel_links(self, rels=('homepage', 'download')):
|
||||
"""Yields all links with the given relations"""
|
||||
rels = set(rels)
|
||||
|
||||
for anchor in self.parsed.findall(".//a"):
|
||||
if anchor.get("rel") and anchor.get("href"):
|
||||
found_rels = set(anchor.get("rel").split())
|
||||
# Determine the intersection between what rels were found and
|
||||
# what rels were being looked for
|
||||
if found_rels & rels:
|
||||
href = anchor.get("href")
|
||||
url = self.clean_link(
|
||||
urllib_parse.urljoin(self.base_url, href)
|
||||
)
|
||||
yield Link(url, self)
|
||||
|
||||
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
|
||||
|
||||
def clean_link(self, url):
|
||||
|
|
Loading…
Reference in New Issue