Coverage for requests.packages.urllib3.response : 50%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked )
self._first_try = True self._data = binary_type() self._obj = zlib.decompressobj()
return getattr(self._obj, name)
if not data: return data
if not self._first_try: return self._obj.decompress(data)
self._data += data try: return self._obj.decompress(data) except zlib.error: self._first_try = False self._obj = zlib.decompressobj(-zlib.MAX_WBITS) try: return self.decompress(self._data) finally: self._data = None
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
return getattr(self._obj, name)
if not data: return data return self._obj.decompress(data)
if mode == 'gzip': return GzipDecoder()
return DeflateDecoder()
""" HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is loaded and decoded on-demand when the ``data`` property is accessed. This class is also compatible with the Python standard library's :mod:`io` module, and can hence be treated as a readable object in the context of that framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content: If True, the response's body will be preloaded during construction.
:param decode_content: If True, attempts to decode specific content-encoding's based on headers (like 'gzip' and 'deflate') will be skipped and raw data will be used instead.
:param original_response: When this HTTPResponse wrapper is generated from an httplib.HTTPResponse object, it's convenient to include the original for debug purposes. It's otherwise unused. """
strict=0, preload_content=True, decode_content=True, original_response=None, pool=None, connection=None):
else: self.headers = HTTPHeaderDict(headers)
self._body = body
# Are we using the chunked-style of transfer encoding?
# We certainly don't want to preload content when the response is chunked. self._body = self.read(decode_content=decode_content)
""" Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code. """ if self.status in self.REDIRECT_STATUSES: return self.headers.get('location')
return False
return
def data(self): # For backwords-compat with earlier urllib3 0.4 and earlier. if self._body: return self._body
if self._fp: return self.read(cache_content=True)
""" Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed). """ return self._fp_bytes_read
""" Set-up the _decoder attribute if necessar. """ # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 self._decoder = _get_decoder(content_encoding)
""" Decode the data passed in and potentially flush the decoder. """ data = self._decoder.decompress(data) except (IOError, zlib.error) as e: content_encoding = self.headers.get('content-encoding', '').lower() raise DecodeError( "Received response with content-encoding: %s, but " "failed to decode it." % content_encoding, e)
buf = self._decoder.decompress(binary_type()) data += buf + self._decoder.flush()
""" Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``.
:param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response.
:param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header.
:param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """
return
# cStringIO doesn't like amt=None data = self._fp.read() flush_decoder = True else: # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ # already do. However, versions of python released before # December 15, 2012 (http://bugs.python.org/issue16298) do # not properly close the connection in all cases. There is # no harm in redundantly calling close.
except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? if 'read operation timed out' not in str(e): # Defensive: # This shouldn't happen but just in case we're missing an edge # case, let's avoid swallowing SSL errors. raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e: # This includes IncompleteRead. raise ProtocolError('Connection broken: %r' % e, e)
self._body = data
finally:
""" A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the connection is closed.
:param amt: How much of the content to read. The generator will return up to much data per iteration, but may return less. This is particularly likely when using compressed data. However, the empty string will never be returned.
:param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ yield self._decode(line, decode_content, True) else:
yield data
def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """ headers = HTTPHeaderDict(headers.items()) else: # Python 2
# HTTPResponse objects in Python 3 don't have a .strict attribute headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse return self.headers
return self.headers.get(name, default)
# Overrides from io.IOBase if not self.closed: self._fp.close()
def closed(self): return True elif hasattr(self._fp, 'isclosed'): # Python 2 return self._fp.isclosed() else: return True
if self._fp is None: raise IOError("HTTPResponse has no file to get a fileno from") elif hasattr(self._fp, "fileno"): return self._fp.fileno() else: raise IOError("The file-like object this HTTPResponse is wrapped " "around has no file descriptor")
if self._fp is not None and hasattr(self._fp, 'flush'): return self._fp.flush()
# This method is required for `io` module compatibility. return True
# This method is required for `io` module compatibility. temp = self.read(len(b)) if len(temp) == 0: return 0 else: b[:len(temp)] = temp return len(temp)
# FIXME: Rewrite this method and make it a class with # a better structured logic. raise ResponseNotChunked("Response is not chunked. " "Header 'transfer-encoding: chunked' is missing.") # First, we'll figure out length of a chunk and then # we'll try to read it from socket. line = line.decode() # See RFC 7230: Chunked Transfer Coding. i = line.find(';') if i >= 0: line = line[:i] # Strip chunk-extensions. try: self.chunk_left = int(line, 16) except ValueError: # Invalid chunked protocol response, abort. self.close() raise httplib.IncompleteRead(''.join(line)) if self.chunk_left == 0: break if amt is None: chunk = self._fp._safe_read(self.chunk_left) yield chunk self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None elif amt < self.chunk_left: value = self._fp._safe_read(amt) self.chunk_left = self.chunk_left - amt yield value elif amt == self.chunk_left: value = self._fp._safe_read(amt) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None yield value else: # amt > self.chunk_left yield self._fp._safe_read(self.chunk_left) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None
# Chunk content ends with \r\n: discard it. while True: line = self._fp.fp.readline() if not line: # Some sites may not end with '\r\n'. break if line == b'\r\n': break
# We read everything; close the "file". self.release_conn()
|