mitmproxy/netlib/http.py

393 lines
12 KiB
Python
Raw Normal View History

2013-03-02 21:37:28 +00:00
import string, urlparse, binascii
2014-07-21 12:01:24 +00:00
import sys
import odict, utils
2012-06-18 21:42:32 +00:00
class HttpError(Exception):
def __init__(self, code, message):
super(HttpError, self).__init__(message)
self.code = code
2012-06-18 21:42:32 +00:00
class HttpErrorConnClosed(HttpError):
pass
def _is_valid_port(port):
if not 0 <= port <= 65535:
return False
return True
def _is_valid_host(host):
try:
host.decode("idna")
except ValueError:
return False
if "\0" in host:
return None
return True
2012-06-18 21:42:32 +00:00
def parse_url(url):
"""
Returns a (scheme, host, port, path) tuple, or None on error.
Checks that:
port is an integer 0-65535
host is a valid IDNA-encoded hostname with no null-bytes
path is valid ASCII
2012-06-18 21:42:32 +00:00
"""
try:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
except ValueError:
return None
2012-06-18 21:42:32 +00:00
if not scheme:
return None
if ':' in netloc:
host, port = string.rsplit(netloc, ':', maxsplit=1)
try:
port = int(port)
except ValueError:
return None
else:
host = netloc
if scheme == "https":
port = 443
else:
port = 80
path = urlparse.urlunparse(('', '', path, params, query, fragment))
if not path.startswith("/"):
path = "/" + path
if not _is_valid_host(host):
return None
if not utils.isascii(path):
return None
if not _is_valid_port(port):
return None
2012-06-18 21:42:32 +00:00
return scheme, host, port, path
def read_headers(fp):
"""
Read a set of headers from a file pointer. Stop once a blank line is
reached. Return a ODictCaseless object, or None if headers are invalid.
2012-06-18 21:42:32 +00:00
"""
ret = []
name = ''
while 1:
line = fp.readline()
if not line or line == '\r\n' or line == '\n':
break
if line[0] in ' \t':
if not ret:
return None
2012-06-18 21:42:32 +00:00
# continued header
ret[-1][1] = ret[-1][1] + '\r\n ' + line.strip()
else:
i = line.find(':')
# We're being liberal in what we accept, here.
if i > 0:
name = line[:i]
2014-07-21 12:01:24 +00:00
value = line[i + 1:].strip()
2012-06-18 21:42:32 +00:00
ret.append([name, value])
else:
return None
return odict.ODictCaseless(ret)
2012-06-18 21:42:32 +00:00
2014-07-21 12:01:24 +00:00
def read_chunked(fp, limit, is_request):
2012-06-23 03:07:42 +00:00
"""
Read a chunked HTTP body.
May raise HttpError.
"""
2013-12-15 05:43:54 +00:00
# FIXME: Should check if chunked is the final encoding in the headers
# http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-16#section-3.3 3.3 2.
2012-06-18 21:42:32 +00:00
total = 0
2013-12-15 05:43:54 +00:00
code = 400 if is_request else 502
2014-07-21 12:01:24 +00:00
while True:
2012-06-18 21:42:32 +00:00
line = fp.readline(128)
if line == "":
raise HttpErrorConnClosed(code, "Connection closed prematurely")
2012-06-23 03:07:42 +00:00
if line != '\r\n' and line != '\n':
try:
length = int(line, 16)
except ValueError:
2014-07-21 12:01:24 +00:00
raise HttpError(code, "Invalid chunked encoding length: %s" % line)
2012-06-23 03:07:42 +00:00
total += length
if limit is not None and total > limit:
2014-07-21 12:01:24 +00:00
msg = "HTTP Body too large." \
" Limit is %s, chunked content length was at least %s" % (limit, total)
2012-06-23 03:07:42 +00:00
raise HttpError(code, msg)
2014-07-21 12:01:24 +00:00
chunk = fp.read(length)
suffix = fp.readline(5)
if suffix != '\r\n':
2012-06-23 03:07:42 +00:00
raise HttpError(code, "Malformed chunked body")
2014-07-21 12:01:24 +00:00
yield line, chunk, '\r\n'
if length == 0:
return
2012-06-18 21:42:32 +00:00
def get_header_tokens(headers, key):
"""
Retrieve all tokens for a header key. A number of different headers
follow a pattern where each header line can containe comma-separated
tokens, and headers can be set multiple times.
"""
toks = []
for i in headers[key]:
2012-06-18 21:42:32 +00:00
for j in i.split(","):
toks.append(j.strip())
return toks
def has_chunked_encoding(headers):
return "chunked" in [i.lower() for i in get_header_tokens(headers, "transfer-encoding")]
2012-06-18 21:42:32 +00:00
def parse_http_protocol(s):
2012-06-23 03:07:42 +00:00
"""
Parse an HTTP protocol declaration. Returns a (major, minor) tuple, or
None.
"""
2012-06-18 21:42:32 +00:00
if not s.startswith("HTTP/"):
return None
_, version = s.split('/', 1)
2012-06-24 10:45:40 +00:00
if "." not in version:
return None
major, minor = version.split('.', 1)
try:
major = int(major)
minor = int(minor)
except ValueError:
return None
2012-06-18 21:42:32 +00:00
return major, minor
2013-03-02 21:37:28 +00:00
def parse_http_basic_auth(s):
words = s.split()
if len(words) != 2:
return None
scheme = words[0]
try:
user = binascii.a2b_base64(words[1])
except binascii.Error:
return None
parts = user.split(':')
if len(parts) != 2:
return None
return scheme, parts[0], parts[1]
def assemble_http_basic_auth(scheme, username, password):
v = binascii.b2a_base64(username + ":" + password)
return scheme + " " + v
def parse_init(line):
2012-06-18 21:42:32 +00:00
try:
method, url, protocol = string.split(line)
except ValueError:
return None
httpversion = parse_http_protocol(protocol)
if not httpversion:
2013-03-03 09:13:23 +00:00
return None
if not utils.isascii(method):
return None
return method, url, httpversion
def parse_init_connect(line):
2013-12-13 05:24:08 +00:00
"""
Returns (host, port, httpversion) if line is a valid CONNECT line.
http://tools.ietf.org/html/draft-luotonen-web-proxy-tunneling-01 section 3.1
"""
v = parse_init(line)
if not v:
return None
method, url, httpversion = v
if method.upper() != 'CONNECT':
2012-06-18 21:42:32 +00:00
return None
try:
host, port = url.split(":")
except ValueError:
return None
try:
port = int(port)
except ValueError:
return None
if not _is_valid_port(port):
return None
if not _is_valid_host(host):
return None
2012-06-18 21:42:32 +00:00
return host, port, httpversion
def parse_init_proxy(line):
v = parse_init(line)
if not v:
2012-06-18 21:42:32 +00:00
return None
method, url, httpversion = v
2012-06-18 21:42:32 +00:00
parts = parse_url(url)
if not parts:
return None
scheme, host, port, path = parts
return method, scheme, host, port, path, httpversion
def parse_init_http(line):
"""
Returns (method, url, httpversion)
"""
v = parse_init(line)
if not v:
2012-06-18 21:42:32 +00:00
return None
method, url, httpversion = v
if not utils.isascii(url):
return None
2012-06-18 21:42:32 +00:00
if not (url.startswith("/") or url == "*"):
return None
return method, url, httpversion
2013-11-19 03:11:24 +00:00
def connection_close(httpversion, headers):
2012-06-18 21:42:32 +00:00
"""
2013-11-19 03:11:24 +00:00
Checks the message to see if the client connection should be closed according to RFC 2616 Section 8.1
2014-07-21 12:01:24 +00:00
Note that a connection should be closed as well if the response has been read until end of the stream.
2012-06-18 21:42:32 +00:00
"""
2013-11-19 03:11:24 +00:00
# At first, check if we have an explicit Connection header.
2012-06-18 21:42:32 +00:00
if "connection" in headers:
toks = get_header_tokens(headers, "connection")
if "close" in toks:
return True
elif "keep-alive" in toks:
return False
2013-11-19 03:11:24 +00:00
# If we don't have a Connection header, HTTP 1.1 connections are assumed to be persistent
2012-06-18 21:42:32 +00:00
if httpversion == (1, 1):
return False
return True
2013-03-02 03:57:38 +00:00
def parse_response_line(line):
parts = line.strip().split(" ", 2)
2014-07-21 12:01:24 +00:00
if len(parts) == 2: # handle missing message gracefully
2013-03-02 03:57:38 +00:00
parts.append("")
if len(parts) != 3:
return None
proto, code, msg = parts
try:
code = int(code)
except ValueError:
return None
return (proto, code, msg)
2014-07-21 12:01:24 +00:00
def read_response(rfile, request_method, body_size_limit, include_body=True):
2012-06-27 04:24:22 +00:00
"""
Return an (httpversion, code, msg, headers, content) tuple.
"""
2012-06-24 10:45:40 +00:00
line = rfile.readline()
2014-07-21 12:01:24 +00:00
if line == "\r\n" or line == "\n": # Possible leftover from previous message
2012-06-24 10:45:40 +00:00
line = rfile.readline()
if not line:
raise HttpErrorConnClosed(502, "Server disconnect.")
2013-03-02 03:57:38 +00:00
parts = parse_response_line(line)
if not parts:
2014-07-21 12:01:24 +00:00
raise HttpError(502, "Invalid server response: %s" % repr(line))
2012-06-24 10:45:40 +00:00
proto, code, msg = parts
httpversion = parse_http_protocol(proto)
if httpversion is None:
2014-07-21 12:01:24 +00:00
raise HttpError(502, "Invalid HTTP version in line: %s" % repr(proto))
2012-06-24 10:45:40 +00:00
headers = read_headers(rfile)
if headers is None:
raise HttpError(502, "Invalid headers.")
2013-12-15 05:43:54 +00:00
2014-07-21 12:01:24 +00:00
if include_body:
content = read_http_body(rfile, headers, body_size_limit, request_method, code, False)
else:
2014-07-21 12:01:24 +00:00
content = None # if include_body==False then a None content means the body should be read separately
2012-06-24 10:45:40 +00:00
return httpversion, code, msg, headers, content
2013-12-15 05:43:54 +00:00
2014-07-21 12:01:24 +00:00
def read_http_body(*args, **kwargs):
return "".join(content for _, content, _ in read_http_body_chunked(*args, **kwargs))
def read_http_body_chunked(rfile, headers, limit, request_method, response_code, is_request, max_chunk_size=None):
2013-12-15 05:43:54 +00:00
"""
Read an HTTP message body:
rfile: A file descriptor to read from
headers: An ODictCaseless object
limit: Size limit.
is_request: True if the body to read belongs to a request, False otherwise
"""
2014-07-21 12:01:24 +00:00
if max_chunk_size is None:
max_chunk_size = limit or sys.maxint
expected_size = expected_http_body_size(headers, is_request, request_method, response_code)
if expected_size is None:
if has_chunked_encoding(headers):
# Python 3: yield from
for x in read_chunked(rfile, limit, is_request):
yield x
else: # pragma: nocover
raise HttpError(400 if is_request else 502, "Content-Length unknown but no chunked encoding")
elif expected_size >= 0:
if limit is not None and expected_size > limit:
raise HttpError(400 if is_request else 509,
"HTTP Body too large. Limit is %s, content-length was %s" % (limit, expected_size))
bytes_left = expected_size
while bytes_left:
chunk_size = min(bytes_left, max_chunk_size)
yield "", rfile.read(chunk_size), ""
bytes_left -= chunk_size
2013-12-15 05:43:54 +00:00
else:
2014-07-21 12:01:24 +00:00
bytes_left = limit or -1
while bytes_left:
chunk_size = min(bytes_left, max_chunk_size)
content = rfile.read(chunk_size)
if not content:
return
yield "", content, ""
bytes_left -= chunk_size
2013-12-15 05:43:54 +00:00
not_done = rfile.read(1)
if not_done:
raise HttpError(400 if is_request else 509, "HTTP Body too large. Limit is %s," % limit)
2014-07-21 12:01:24 +00:00
def expected_http_body_size(headers, is_request, request_method, response_code):
"""
2014-07-21 12:01:24 +00:00
Returns the expected body length:
- a positive integer, if the size is known in advance
- None, if the size in unknown in advance (chunked encoding)
- -1, if all data should be read until end of stream.
"""
2014-07-21 12:01:24 +00:00
# Determine response size according to http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-16#section-3.3
if request_method:
request_method = request_method.upper()
if (not is_request and (
request_method == "HEAD" or
(request_method == "CONNECT" and response_code == 200) or
response_code in [204, 304] or
100 <= response_code <= 199)):
return 0
if has_chunked_encoding(headers):
return None
if "content-length" in headers:
try:
2014-07-21 12:01:24 +00:00
size = int(headers["content-length"][0])
if size < 0:
raise ValueError()
2014-07-21 12:01:24 +00:00
return size
except ValueError:
2014-07-21 12:01:24 +00:00
raise HttpError(400 if is_request else 502, "Invalid content-length header: %s" % headers["content-length"])
if is_request:
return 0
2014-07-21 12:01:24 +00:00
return -1