Use pre-defined chunksizes instead of hardcoded ones.

The requests one does currently sit at 10KB, the io buffer one at 8KB.
Both are reasonable values and slightly above our current values.

If those values are adjusted, we will profit as well.
This commit is contained in:
Stephan Erb 2016-02-28 18:44:35 +01:00
parent f5d27846c2
commit 23f0618576
2 changed files with 9 additions and 4 deletions

View File

@ -38,7 +38,7 @@ from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import Response
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.packages import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
@ -588,8 +588,12 @@ def _download_url(resp, link, content_file, hashes):
logger.debug('Downloading from URL %s', link)
downloaded_chunks = written_chunks(progress_indicator(resp_read(4096),
4096))
downloaded_chunks = written_chunks(
progress_indicator(
resp_read(CONTENT_CHUNK_SIZE),
CONTENT_CHUNK_SIZE
)
)
if hashes:
hashes.check_against_chunks(downloaded_chunks)
else:

View File

@ -3,6 +3,7 @@ from __future__ import absolute_import
from collections import deque
import contextlib
import errno
import io
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
@ -199,7 +200,7 @@ def file_contents(filename):
return fp.read().decode('utf-8')
def read_chunks(file, size=4096):
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)