mirror of
https://github.com/Grasscutters/mitmproxy.git
synced 2025-01-30 23:09:44 +00:00
Merge pull request #883 from mitmproxy/hyper-h2
HTTP/2: Implementation using hyper-h2
This commit is contained in:
commit
023026e032
@ -1,16 +1,19 @@
|
||||
version: '{build}'
|
||||
shallow_clone: true
|
||||
build: off # Not a C# project
|
||||
environment:
|
||||
matrix:
|
||||
- PYTHON: "C:\\Python27"
|
||||
PATH: "C:\\Python27;C:\\Python27\\Scripts;%PATH%"
|
||||
PATH: "%APPDATA%\\Python\\Scripts;C:\\Python27;C:\\Python27\\Scripts;%PATH%"
|
||||
PYINSTALLER_VERSION: "git+https://github.com/pyinstaller/pyinstaller.git"
|
||||
install:
|
||||
- "pip install --src .. -r requirements.txt"
|
||||
- "pip install --user -U pip setuptools"
|
||||
- "pip install --user --src .. -r requirements.txt"
|
||||
- "python -c \"from OpenSSL import SSL; print(SSL.SSLeay_version(SSL.SSLEAY_VERSION))\""
|
||||
build: off # Not a C# project
|
||||
test_script:
|
||||
- "py.test -n 4"
|
||||
- "py.test -s --cov libmproxy --timeout 30"
|
||||
cache:
|
||||
- C:\Users\appveyor\AppData\Local\pip\cache
|
||||
after_test:
|
||||
- |
|
||||
git clone https://github.com/mitmproxy/release.git ..\release
|
||||
|
72
.travis.yml
72
.travis.yml
@ -5,36 +5,34 @@ matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- python: 2.7
|
||||
- python: 2.7
|
||||
env: OPENSSL=1.0.2
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
# Debian sid currently holds OpenSSL 1.0.2
|
||||
# change this with future releases!
|
||||
- debian-sid
|
||||
packages:
|
||||
- libssl-dev
|
||||
- python: pypy
|
||||
- python: pypy
|
||||
env: OPENSSL=1.0.2
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
# Debian sid currently holds OpenSSL 1.0.2
|
||||
# change this with future releases!
|
||||
- debian-sid
|
||||
packages:
|
||||
- libssl-dev
|
||||
- language: generic
|
||||
os: osx
|
||||
osx_image: xcode7.1
|
||||
- python: 2.7
|
||||
env: OPENSSL=1.0.2
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
# Debian sid currently holds OpenSSL 1.0.2
|
||||
# change this with future releases!
|
||||
- debian-sid
|
||||
packages:
|
||||
- libssl-dev
|
||||
- python: 2.7
|
||||
env: DOCS=1
|
||||
script: 'cd docs && make html'
|
||||
- python: pypy
|
||||
- python: pypy
|
||||
env: OPENSSL=1.0.2
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
# Debian sid currently holds OpenSSL 1.0.2
|
||||
# change this with future releases!
|
||||
- debian-sid
|
||||
packages:
|
||||
- libssl-dev
|
||||
allow_failures:
|
||||
# We allow pypy to fail until Travis fixes their infrastructure to a pypy
|
||||
# with a recent enought CFFI library to run cryptography 1.0+.
|
||||
- python: pypy
|
||||
|
||||
install:
|
||||
@ -45,13 +43,27 @@ install:
|
||||
brew outdated openssl || brew upgrade openssl
|
||||
brew install python
|
||||
fi
|
||||
- |
|
||||
if [ "$TRAVIS_PYTHON_VERSION" = "pypy" ]; then
|
||||
export PYENV_ROOT="$HOME/.pyenv"
|
||||
if [ -f "$PYENV_ROOT/bin/pyenv" ]; then
|
||||
pushd "$PYENV_ROOT" && git pull && popd
|
||||
else
|
||||
rm -rf "$PYENV_ROOT" && git clone --depth 1 https://github.com/yyuu/pyenv.git "$PYENV_ROOT"
|
||||
fi
|
||||
export PYPY_VERSION="4.0.1"
|
||||
"$PYENV_ROOT/bin/pyenv" install --skip-existing "pypy-$PYPY_VERSION"
|
||||
virtualenv --python="$PYENV_ROOT/versions/pypy-$PYPY_VERSION/bin/python" "$HOME/virtualenvs/pypy-$PYPY_VERSION"
|
||||
source "$HOME/virtualenvs/pypy-$PYPY_VERSION/bin/activate"
|
||||
fi
|
||||
- "pip install -U pip setuptools"
|
||||
- "pip install --src .. -r requirements.txt"
|
||||
|
||||
before_script:
|
||||
- "openssl version -a"
|
||||
|
||||
script:
|
||||
- "py.test -n 4 --cov libmproxy"
|
||||
- "py.test -s --cov libmproxy --timeout 30"
|
||||
|
||||
after_success:
|
||||
- coveralls
|
||||
@ -80,16 +92,8 @@ notifications:
|
||||
on_success: always
|
||||
on_failure: always
|
||||
|
||||
# exclude cryptography from cache
|
||||
# it depends on libssl-dev version
|
||||
# which needs to be compiled specifically to each version
|
||||
before_cache:
|
||||
- pip uninstall -y cryptography
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/pip
|
||||
- /home/travis/virtualenv/python2.7.9/lib/python2.7/site-packages
|
||||
- /home/travis/virtualenv/python2.7.9/bin
|
||||
- /home/travis/virtualenv/pypy-2.5.0/site-packages
|
||||
- /home/travis/virtualenv/pypy-2.5.0/bin
|
||||
- $HOME/.pyenv
|
||||
- $HOME/Library/Caches/pip
|
@ -35,12 +35,12 @@ from .contrib.wbxml.ASCommandResponse import ASCommandResponse
|
||||
try:
|
||||
import pyamf
|
||||
from pyamf import remoting, flex
|
||||
except ImportError: # pragma nocover
|
||||
except ImportError: # pragma no cover
|
||||
pyamf = None
|
||||
|
||||
try:
|
||||
import cssutils
|
||||
except ImportError: # pragma nocover
|
||||
except ImportError: # pragma no cover
|
||||
cssutils = None
|
||||
else:
|
||||
cssutils.log.setLevel(logging.CRITICAL)
|
||||
|
@ -56,7 +56,7 @@ class Channel:
|
||||
try:
|
||||
# The timeout is here so we can handle a should_exit event.
|
||||
g = m.reply.q.get(timeout=0.5)
|
||||
except Queue.Empty: # pragma: nocover
|
||||
except Queue.Empty: # pragma: no cover
|
||||
continue
|
||||
return g
|
||||
|
||||
|
@ -37,7 +37,7 @@ def get_server(dummy_server, options):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def mitmproxy(args=None): # pragma: nocover
|
||||
def mitmproxy(args=None): # pragma: no cover
|
||||
from . import console
|
||||
|
||||
check_pyopenssl_version()
|
||||
@ -68,7 +68,7 @@ def mitmproxy(args=None): # pragma: nocover
|
||||
pass
|
||||
|
||||
|
||||
def mitmdump(args=None): # pragma: nocover
|
||||
def mitmdump(args=None): # pragma: no cover
|
||||
from . import dump
|
||||
|
||||
check_pyopenssl_version()
|
||||
@ -103,7 +103,7 @@ def mitmdump(args=None): # pragma: nocover
|
||||
pass
|
||||
|
||||
|
||||
def mitmweb(args=None): # pragma: nocover
|
||||
def mitmweb(args=None): # pragma: no cover
|
||||
from . import web
|
||||
|
||||
check_pyopenssl_version()
|
||||
|
@ -11,9 +11,14 @@ from netlib.tcp import Address
|
||||
from .. import version, stateobject
|
||||
from .flow import Flow
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
class MessageMixin(stateobject.StateObject):
|
||||
_stateobject_attributes = dict(
|
||||
# The restoration order is important currently, e.g. because
|
||||
# of .content setting .headers["content-length"] automatically.
|
||||
# Using OrderedDict is the short term fix, restoring state should
|
||||
# be implemented without side-effects again.
|
||||
_stateobject_attributes = OrderedDict(
|
||||
http_version=bytes,
|
||||
headers=Headers,
|
||||
timestamp_start=float,
|
||||
@ -241,8 +246,6 @@ class HTTPRequest(MessageMixin, Request):
|
||||
timestamp_end=request.timestamp_end,
|
||||
form_out=(request.form_out if hasattr(request, 'form_out') else None),
|
||||
)
|
||||
if hasattr(request, 'stream_id'):
|
||||
req.stream_id = request.stream_id
|
||||
return req
|
||||
|
||||
def __hash__(self):
|
||||
@ -347,8 +350,6 @@ class HTTPResponse(MessageMixin, Response):
|
||||
timestamp_start=response.timestamp_start,
|
||||
timestamp_end=response.timestamp_end,
|
||||
)
|
||||
if hasattr(response, 'stream_id'):
|
||||
resp.stream_id = response.stream_id
|
||||
return resp
|
||||
|
||||
def _refresh_cookie(self, c, delta):
|
||||
|
@ -27,15 +27,19 @@ as late as possible; this makes server replay without any outgoing connections p
|
||||
|
||||
from __future__ import (absolute_import, print_function, division)
|
||||
from .base import Layer, ServerConnectionMixin, Kill
|
||||
from .http import Http1Layer, UpstreamConnectLayer, Http2Layer
|
||||
from .tls import TlsLayer
|
||||
from .tls import is_tls_record_magic
|
||||
from .tls import TlsClientHello
|
||||
from .http import UpstreamConnectLayer
|
||||
from .http1 import Http1Layer
|
||||
from .http2 import Http2Layer
|
||||
from .rawtcp import RawTCPLayer
|
||||
|
||||
__all__ = [
|
||||
"Layer", "ServerConnectionMixin", "Kill",
|
||||
"Http1Layer", "UpstreamConnectLayer", "Http2Layer",
|
||||
"TlsLayer", "is_tls_record_magic", "TlsClientHello",
|
||||
"UpstreamConnectLayer",
|
||||
"Http1Layer",
|
||||
"Http2Layer",
|
||||
"RawTCPLayer",
|
||||
]
|
||||
|
@ -14,7 +14,7 @@ class _LayerCodeCompletion(object):
|
||||
Dummy class that provides type hinting in PyCharm, which simplifies development a lot.
|
||||
"""
|
||||
|
||||
def __init__(self, **mixin_args): # pragma: nocover
|
||||
def __init__(self, **mixin_args): # pragma: no cover
|
||||
super(_LayerCodeCompletion, self).__init__(**mixin_args)
|
||||
if True:
|
||||
return
|
||||
|
@ -1,26 +1,30 @@
|
||||
from __future__ import (absolute_import, print_function, division)
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import six
|
||||
|
||||
from netlib import tcp
|
||||
from netlib.exceptions import HttpException, HttpReadDisconnect, NetlibException
|
||||
from netlib.http import http1, Headers
|
||||
from netlib.http import CONTENT_MISSING
|
||||
from netlib.tcp import Address
|
||||
from netlib.http.http2.connections import HTTP2Protocol
|
||||
from netlib.http.http2.frame import GoAwayFrame, PriorityFrame, WindowUpdateFrame
|
||||
from netlib.http import Headers, CONTENT_MISSING
|
||||
|
||||
from h2.exceptions import H2Error
|
||||
|
||||
from .. import utils
|
||||
from ..exceptions import HttpProtocolException, ProtocolException
|
||||
from ..models import (
|
||||
HTTPFlow, HTTPRequest, HTTPResponse, make_error_response, make_connect_response, Error, expect_continue_response
|
||||
HTTPFlow,
|
||||
HTTPResponse,
|
||||
make_error_response,
|
||||
make_connect_response,
|
||||
Error,
|
||||
expect_continue_response
|
||||
)
|
||||
|
||||
from .base import Layer, Kill
|
||||
|
||||
|
||||
class _HttpLayer(Layer):
|
||||
supports_streaming = False
|
||||
class _HttpTransmissionLayer(Layer):
|
||||
|
||||
def read_request(self):
|
||||
raise NotImplementedError()
|
||||
@ -31,26 +35,6 @@ class _HttpLayer(Layer):
|
||||
def send_request(self, request):
|
||||
raise NotImplementedError()
|
||||
|
||||
def read_response(self, request):
|
||||
raise NotImplementedError()
|
||||
|
||||
def send_response(self, response):
|
||||
raise NotImplementedError()
|
||||
|
||||
def check_close_connection(self, flow):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class _StreamingHttpLayer(_HttpLayer):
|
||||
supports_streaming = True
|
||||
|
||||
def read_response_headers(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def read_response_body(self, request, response):
|
||||
raise NotImplementedError()
|
||||
yield "this is a generator" # pragma: no cover
|
||||
|
||||
def read_response(self, request):
|
||||
response = self.read_response_headers()
|
||||
response.data.content = b"".join(
|
||||
@ -58,176 +42,27 @@ class _StreamingHttpLayer(_HttpLayer):
|
||||
)
|
||||
return response
|
||||
|
||||
def send_response_headers(self, response):
|
||||
raise NotImplementedError
|
||||
|
||||
def send_response_body(self, response, chunks):
|
||||
def read_response_headers(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def read_response_body(self, request, response):
|
||||
raise NotImplementedError()
|
||||
yield "this is a generator" # pragma: no cover
|
||||
|
||||
def send_response(self, response):
|
||||
if response.content == CONTENT_MISSING:
|
||||
raise HttpException("Cannot assemble flow with CONTENT_MISSING")
|
||||
self.send_response_headers(response)
|
||||
self.send_response_body(response, [response.content])
|
||||
|
||||
|
||||
class Http1Layer(_StreamingHttpLayer):
|
||||
|
||||
def __init__(self, ctx, mode):
|
||||
super(Http1Layer, self).__init__(ctx)
|
||||
self.mode = mode
|
||||
|
||||
def read_request(self):
|
||||
req = http1.read_request(self.client_conn.rfile, body_size_limit=self.config.body_size_limit)
|
||||
return HTTPRequest.wrap(req)
|
||||
|
||||
def read_request_body(self, request):
|
||||
expected_size = http1.expected_http_body_size(request)
|
||||
return http1.read_body(self.client_conn.rfile, expected_size, self.config.body_size_limit)
|
||||
|
||||
def send_request(self, request):
|
||||
self.server_conn.wfile.write(http1.assemble_request(request))
|
||||
self.server_conn.wfile.flush()
|
||||
|
||||
def read_response_headers(self):
|
||||
resp = http1.read_response_head(self.server_conn.rfile)
|
||||
return HTTPResponse.wrap(resp)
|
||||
|
||||
def read_response_body(self, request, response):
|
||||
expected_size = http1.expected_http_body_size(request, response)
|
||||
return http1.read_body(self.server_conn.rfile, expected_size, self.config.body_size_limit)
|
||||
|
||||
def send_response_headers(self, response):
|
||||
raw = http1.assemble_response_head(response)
|
||||
self.client_conn.wfile.write(raw)
|
||||
self.client_conn.wfile.flush()
|
||||
raise NotImplementedError()
|
||||
|
||||
def send_response_body(self, response, chunks):
|
||||
for chunk in http1.assemble_body(response.headers, chunks):
|
||||
self.client_conn.wfile.write(chunk)
|
||||
self.client_conn.wfile.flush()
|
||||
raise NotImplementedError()
|
||||
|
||||
def check_close_connection(self, flow):
|
||||
request_close = http1.connection_close(
|
||||
flow.request.http_version,
|
||||
flow.request.headers
|
||||
)
|
||||
response_close = http1.connection_close(
|
||||
flow.response.http_version,
|
||||
flow.response.headers
|
||||
)
|
||||
read_until_eof = http1.expected_http_body_size(flow.request, flow.response) == -1
|
||||
close_connection = request_close or response_close or read_until_eof
|
||||
if flow.request.form_in == "authority" and flow.response.status_code == 200:
|
||||
# Workaround for https://github.com/mitmproxy/mitmproxy/issues/313:
|
||||
# Charles Proxy sends a CONNECT response with HTTP/1.0
|
||||
# and no Content-Length header
|
||||
|
||||
return False
|
||||
return close_connection
|
||||
|
||||
def __call__(self):
|
||||
layer = HttpLayer(self, self.mode)
|
||||
layer()
|
||||
|
||||
|
||||
# TODO: The HTTP2 layer is missing multiplexing, which requires a major rewrite.
|
||||
class Http2Layer(_HttpLayer):
|
||||
|
||||
def __init__(self, ctx, mode):
|
||||
super(Http2Layer, self).__init__(ctx)
|
||||
self.mode = mode
|
||||
self.client_protocol = HTTP2Protocol(self.client_conn, is_server=True,
|
||||
unhandled_frame_cb=self.handle_unexpected_frame_from_client)
|
||||
self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
|
||||
unhandled_frame_cb=self.handle_unexpected_frame_from_server)
|
||||
|
||||
def read_request(self):
|
||||
request = HTTPRequest.from_protocol(
|
||||
self.client_protocol,
|
||||
body_size_limit=self.config.body_size_limit
|
||||
)
|
||||
self._stream_id = request.stream_id
|
||||
return request
|
||||
|
||||
def send_request(self, message):
|
||||
# TODO: implement flow control and WINDOW_UPDATE frames
|
||||
self.server_conn.send(self.server_protocol.assemble(message))
|
||||
|
||||
def read_response(self, request):
|
||||
return HTTPResponse.from_protocol(
|
||||
self.server_protocol,
|
||||
request_method=request.method,
|
||||
body_size_limit=self.config.body_size_limit,
|
||||
include_body=True,
|
||||
stream_id=self._stream_id
|
||||
)
|
||||
|
||||
def send_response(self, message):
|
||||
# TODO: implement flow control to prevent client buffer filling up
|
||||
# maintain a send buffer size, and read WindowUpdateFrames from client to increase the send buffer
|
||||
self.client_conn.send(self.client_protocol.assemble(message))
|
||||
|
||||
def check_close_connection(self, flow):
|
||||
# TODO: add a timer to disconnect after a 10 second timeout
|
||||
return False
|
||||
|
||||
def connect(self):
|
||||
self.ctx.connect()
|
||||
self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
|
||||
unhandled_frame_cb=self.handle_unexpected_frame_from_server)
|
||||
self.server_protocol.perform_connection_preface()
|
||||
|
||||
def set_server(self, *args, **kwargs):
|
||||
self.ctx.set_server(*args, **kwargs)
|
||||
self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
|
||||
unhandled_frame_cb=self.handle_unexpected_frame_from_server)
|
||||
self.server_protocol.perform_connection_preface()
|
||||
|
||||
def __call__(self):
|
||||
self.server_protocol.perform_connection_preface()
|
||||
layer = HttpLayer(self, self.mode)
|
||||
layer()
|
||||
|
||||
# terminate the connection
|
||||
self.client_conn.send(GoAwayFrame().to_bytes())
|
||||
|
||||
def handle_unexpected_frame_from_client(self, frame):
|
||||
if isinstance(frame, WindowUpdateFrame):
|
||||
# Clients are sending WindowUpdate frames depending on their flow control algorithm.
|
||||
# Since we cannot predict these frames, and we do not need to respond to them,
|
||||
# simply accept them, and hide them from the log.
|
||||
# Ideally we should keep track of our own flow control window and
|
||||
# stall transmission if the outgoing flow control buffer is full.
|
||||
return
|
||||
if isinstance(frame, PriorityFrame):
|
||||
# Clients are sending Priority frames depending on their implementation.
|
||||
# The RFC does not clearly state when or which priority preferences should be set.
|
||||
# Since we cannot predict these frames, and we do not need to respond to them,
|
||||
# simply accept them, and hide them from the log.
|
||||
# Ideally we should forward them to the server.
|
||||
return
|
||||
if isinstance(frame, GoAwayFrame):
|
||||
# Client wants to terminate the connection,
|
||||
# relay it to the server.
|
||||
self.server_conn.send(frame.to_bytes())
|
||||
return
|
||||
self.log("Unexpected HTTP2 frame from client: %s" % frame.human_readable(), "info")
|
||||
|
||||
def handle_unexpected_frame_from_server(self, frame):
|
||||
if isinstance(frame, WindowUpdateFrame):
|
||||
# Servers are sending WindowUpdate frames depending on their flow control algorithm.
|
||||
# Since we cannot predict these frames, and we do not need to respond to them,
|
||||
# simply accept them, and hide them from the log.
|
||||
# Ideally we should keep track of our own flow control window and
|
||||
# stall transmission if the outgoing flow control buffer is full.
|
||||
return
|
||||
if isinstance(frame, GoAwayFrame):
|
||||
# Server wants to terminate the connection,
|
||||
# relay it to the client.
|
||||
self.client_conn.send(frame.to_bytes())
|
||||
return
|
||||
self.log("Unexpected HTTP2 frame from server: %s" % frame.human_readable(), "info")
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ConnectServerConnection(object):
|
||||
@ -285,7 +120,7 @@ class UpstreamConnectLayer(Layer):
|
||||
def set_server(self, address, server_tls=None, sni=None):
|
||||
if self.ctx.server_conn:
|
||||
self.ctx.disconnect()
|
||||
address = Address.wrap(address)
|
||||
address = tcp.Address.wrap(address)
|
||||
self.connect_request.host = address.host
|
||||
self.connect_request.port = address.port
|
||||
self.server_conn.address = address
|
||||
@ -400,7 +235,8 @@ class HttpLayer(Layer):
|
||||
try:
|
||||
response = make_error_response(code, message)
|
||||
self.send_response(response)
|
||||
except NetlibException:
|
||||
except (NetlibException, H2Error):
|
||||
self.log(traceback.format_exc(), "debug")
|
||||
pass
|
||||
|
||||
def change_upstream_proxy_server(self, address):
|
||||
@ -420,7 +256,7 @@ class HttpLayer(Layer):
|
||||
layer()
|
||||
|
||||
def send_response_to_client(self, flow):
|
||||
if not (self.supports_streaming and flow.response.stream):
|
||||
if not flow.response.stream:
|
||||
# no streaming:
|
||||
# we already received the full response from the server and can
|
||||
# send it to the client straight away.
|
||||
@ -441,10 +277,7 @@ class HttpLayer(Layer):
|
||||
def get_response_from_server(self, flow):
|
||||
def get_response():
|
||||
self.send_request(flow.request)
|
||||
if self.supports_streaming:
|
||||
flow.response = self.read_response_headers()
|
||||
else:
|
||||
flow.response = self.read_response(flow.request)
|
||||
flow.response = self.read_response_headers()
|
||||
|
||||
try:
|
||||
get_response()
|
||||
@ -474,15 +307,14 @@ class HttpLayer(Layer):
|
||||
if flow == Kill:
|
||||
raise Kill()
|
||||
|
||||
if self.supports_streaming:
|
||||
if flow.response.stream:
|
||||
flow.response.data.content = CONTENT_MISSING
|
||||
else:
|
||||
flow.response.data.content = b"".join(self.read_response_body(
|
||||
flow.request,
|
||||
flow.response
|
||||
))
|
||||
flow.response.timestamp_end = utils.timestamp()
|
||||
if flow.response.stream:
|
||||
flow.response.data.content = CONTENT_MISSING
|
||||
else:
|
||||
flow.response.data.content = b"".join(self.read_response_body(
|
||||
flow.request,
|
||||
flow.response
|
||||
))
|
||||
flow.response.timestamp_end = utils.timestamp()
|
||||
|
||||
# no further manipulation of self.server_conn beyond this point
|
||||
# we can safely set it as the final attribute value here.
|
||||
|
70
libmproxy/protocol/http1.py
Normal file
70
libmproxy/protocol/http1.py
Normal file
@ -0,0 +1,70 @@
|
||||
from __future__ import (absolute_import, print_function, division)
|
||||
|
||||
import six
|
||||
|
||||
from netlib import tcp
|
||||
from netlib.http import http1
|
||||
|
||||
from .http import _HttpTransmissionLayer, HttpLayer
|
||||
from .. import utils
|
||||
from ..models import HTTPRequest, HTTPResponse
|
||||
|
||||
|
||||
class Http1Layer(_HttpTransmissionLayer):
|
||||
|
||||
def __init__(self, ctx, mode):
|
||||
super(Http1Layer, self).__init__(ctx)
|
||||
self.mode = mode
|
||||
|
||||
def read_request(self):
|
||||
req = http1.read_request(self.client_conn.rfile, body_size_limit=self.config.body_size_limit)
|
||||
return HTTPRequest.wrap(req)
|
||||
|
||||
def read_request_body(self, request):
|
||||
expected_size = http1.expected_http_body_size(request)
|
||||
return http1.read_body(self.client_conn.rfile, expected_size, self.config.body_size_limit)
|
||||
|
||||
def send_request(self, request):
|
||||
self.server_conn.wfile.write(http1.assemble_request(request))
|
||||
self.server_conn.wfile.flush()
|
||||
|
||||
def read_response_headers(self):
|
||||
resp = http1.read_response_head(self.server_conn.rfile)
|
||||
return HTTPResponse.wrap(resp)
|
||||
|
||||
def read_response_body(self, request, response):
|
||||
expected_size = http1.expected_http_body_size(request, response)
|
||||
return http1.read_body(self.server_conn.rfile, expected_size, self.config.body_size_limit)
|
||||
|
||||
def send_response_headers(self, response):
|
||||
raw = http1.assemble_response_head(response)
|
||||
self.client_conn.wfile.write(raw)
|
||||
self.client_conn.wfile.flush()
|
||||
|
||||
def send_response_body(self, response, chunks):
|
||||
for chunk in http1.assemble_body(response.headers, chunks):
|
||||
self.client_conn.wfile.write(chunk)
|
||||
self.client_conn.wfile.flush()
|
||||
|
||||
def check_close_connection(self, flow):
|
||||
request_close = http1.connection_close(
|
||||
flow.request.http_version,
|
||||
flow.request.headers
|
||||
)
|
||||
response_close = http1.connection_close(
|
||||
flow.response.http_version,
|
||||
flow.response.headers
|
||||
)
|
||||
read_until_eof = http1.expected_http_body_size(flow.request, flow.response) == -1
|
||||
close_connection = request_close or response_close or read_until_eof
|
||||
if flow.request.form_in == "authority" and flow.response.status_code == 200:
|
||||
# Workaround for https://github.com/mitmproxy/mitmproxy/issues/313:
|
||||
# Charles Proxy sends a CONNECT response with HTTP/1.0
|
||||
# and no Content-Length header
|
||||
|
||||
return False
|
||||
return close_connection
|
||||
|
||||
def __call__(self):
|
||||
layer = HttpLayer(self, self.mode)
|
||||
layer()
|
414
libmproxy/protocol/http2.py
Normal file
414
libmproxy/protocol/http2.py
Normal file
@ -0,0 +1,414 @@
|
||||
from __future__ import (absolute_import, print_function, division)
|
||||
|
||||
import threading
|
||||
import time
|
||||
import Queue
|
||||
|
||||
from netlib.tcp import ssl_read_select
|
||||
from netlib.exceptions import HttpException
|
||||
from netlib.http import Headers
|
||||
from netlib.utils import http2_read_raw_frame
|
||||
|
||||
import h2
|
||||
from h2.connection import H2Connection
|
||||
from h2.events import *
|
||||
|
||||
from .base import Layer
|
||||
from .http import _HttpTransmissionLayer, HttpLayer
|
||||
from .. import utils
|
||||
from ..models import HTTPRequest, HTTPResponse
|
||||
from ..exceptions import HttpProtocolException
|
||||
from ..exceptions import ProtocolException
|
||||
|
||||
|
||||
class SafeH2Connection(H2Connection):
|
||||
|
||||
def __init__(self, conn, *args, **kwargs):
|
||||
super(SafeH2Connection, self).__init__(*args, **kwargs)
|
||||
self.conn = conn
|
||||
self.lock = threading.RLock()
|
||||
|
||||
def safe_close_connection(self, error_code):
|
||||
with self.lock:
|
||||
self.close_connection(error_code)
|
||||
self.conn.send(self.data_to_send())
|
||||
|
||||
def safe_increment_flow_control(self, stream_id, length):
|
||||
if length == 0:
|
||||
return
|
||||
|
||||
with self.lock:
|
||||
self.increment_flow_control_window(length)
|
||||
self.conn.send(self.data_to_send())
|
||||
with self.lock:
|
||||
if stream_id in self.streams and not self.streams[stream_id].closed:
|
||||
self.increment_flow_control_window(length, stream_id=stream_id)
|
||||
self.conn.send(self.data_to_send())
|
||||
|
||||
def safe_reset_stream(self, stream_id, error_code):
|
||||
with self.lock:
|
||||
try:
|
||||
self.reset_stream(stream_id, error_code)
|
||||
except h2.exceptions.StreamClosedError:
|
||||
# stream is already closed - good
|
||||
pass
|
||||
self.conn.send(self.data_to_send())
|
||||
|
||||
def safe_update_settings(self, new_settings):
|
||||
with self.lock:
|
||||
self.update_settings(new_settings)
|
||||
self.conn.send(self.data_to_send())
|
||||
|
||||
def safe_send_headers(self, is_zombie, stream_id, headers):
|
||||
with self.lock:
|
||||
if is_zombie():
|
||||
return
|
||||
self.send_headers(stream_id, headers)
|
||||
self.conn.send(self.data_to_send())
|
||||
|
||||
def safe_send_body(self, is_zombie, stream_id, chunks):
|
||||
for chunk in chunks:
|
||||
position = 0
|
||||
while position < len(chunk):
|
||||
self.lock.acquire()
|
||||
if is_zombie():
|
||||
self.lock.release()
|
||||
return
|
||||
max_outbound_frame_size = self.max_outbound_frame_size
|
||||
frame_chunk = chunk[position:position + max_outbound_frame_size]
|
||||
if self.local_flow_control_window(stream_id) < len(frame_chunk):
|
||||
self.lock.release()
|
||||
time.sleep(0)
|
||||
continue
|
||||
self.send_data(stream_id, frame_chunk)
|
||||
self.conn.send(self.data_to_send())
|
||||
self.lock.release()
|
||||
position += max_outbound_frame_size
|
||||
with self.lock:
|
||||
if is_zombie():
|
||||
return
|
||||
self.end_stream(stream_id)
|
||||
self.conn.send(self.data_to_send())
|
||||
|
||||
|
||||
class Http2Layer(Layer):
|
||||
|
||||
def __init__(self, ctx, mode):
|
||||
super(Http2Layer, self).__init__(ctx)
|
||||
self.mode = mode
|
||||
self.streams = dict()
|
||||
self.server_to_client_stream_ids = dict([(0, 0)])
|
||||
self.client_conn.h2 = SafeH2Connection(self.client_conn, client_side=False)
|
||||
|
||||
# make sure that we only pass actual SSL.Connection objects in here,
|
||||
# because otherwise ssl_read_select fails!
|
||||
self.active_conns = [self.client_conn.connection]
|
||||
|
||||
def _initiate_server_conn(self):
|
||||
self.server_conn.h2 = SafeH2Connection(self.server_conn, client_side=True)
|
||||
self.server_conn.h2.initiate_connection()
|
||||
self.server_conn.send(self.server_conn.h2.data_to_send())
|
||||
self.active_conns.append(self.server_conn.connection)
|
||||
|
||||
def connect(self): # pragma: no cover
|
||||
raise ValueError("CONNECT inside an HTTP2 stream is not supported.")
|
||||
# self.ctx.connect()
|
||||
# self.server_conn.connect()
|
||||
# self._initiate_server_conn()
|
||||
|
||||
def set_server(self): # pragma: no cover
|
||||
raise NotImplementedError("Cannot change server for HTTP2 connections.")
|
||||
|
||||
def disconnect(self): # pragma: no cover
|
||||
raise NotImplementedError("Cannot dis- or reconnect in HTTP2 connections.")
|
||||
|
||||
def next_layer(self): # pragma: no cover
|
||||
# WebSockets over HTTP/2?
|
||||
# CONNECT for proxying?
|
||||
raise NotImplementedError()
|
||||
|
||||
def _handle_event(self, event, source_conn, other_conn, is_server):
|
||||
if hasattr(event, 'stream_id'):
|
||||
if is_server and event.stream_id % 2 == 1:
|
||||
eid = self.server_to_client_stream_ids[event.stream_id]
|
||||
else:
|
||||
eid = event.stream_id
|
||||
|
||||
if isinstance(event, RequestReceived):
|
||||
headers = Headers([[str(k), str(v)] for k, v in event.headers])
|
||||
self.streams[eid] = Http2SingleStreamLayer(self, eid, headers)
|
||||
self.streams[eid].timestamp_start = time.time()
|
||||
self.streams[eid].start()
|
||||
elif isinstance(event, ResponseReceived):
|
||||
headers = Headers([[str(k), str(v)] for k, v in event.headers])
|
||||
self.streams[eid].queued_data_length = 0
|
||||
self.streams[eid].timestamp_start = time.time()
|
||||
self.streams[eid].response_headers = headers
|
||||
self.streams[eid].response_arrived.set()
|
||||
elif isinstance(event, DataReceived):
|
||||
if self.config.body_size_limit and self.streams[eid].queued_data_length > self.config.body_size_limit:
|
||||
raise HttpException("HTTP body too large. Limit is {}.".format(self.config.body_size_limit))
|
||||
self.streams[eid].data_queue.put(event.data)
|
||||
self.streams[eid].queued_data_length += len(event.data)
|
||||
source_conn.h2.safe_increment_flow_control(event.stream_id, event.flow_controlled_length)
|
||||
elif isinstance(event, StreamEnded):
|
||||
self.streams[eid].timestamp_end = time.time()
|
||||
self.streams[eid].data_finished.set()
|
||||
elif isinstance(event, StreamReset):
|
||||
self.streams[eid].zombie = time.time()
|
||||
if eid in self.streams and event.error_code == 0x8:
|
||||
if is_server:
|
||||
other_stream_id = self.streams[eid].client_stream_id
|
||||
else:
|
||||
other_stream_id = self.streams[eid].server_stream_id
|
||||
if other_stream_id is not None:
|
||||
other_conn.h2.safe_reset_stream(other_stream_id, event.error_code)
|
||||
elif isinstance(event, RemoteSettingsChanged):
|
||||
new_settings = dict([(id, cs.new_value) for (id, cs) in event.changed_settings.iteritems()])
|
||||
other_conn.h2.safe_update_settings(new_settings)
|
||||
elif isinstance(event, ConnectionTerminated):
|
||||
# Do not immediately terminate the other connection.
|
||||
# Some streams might be still sending data to the client.
|
||||
return False
|
||||
elif isinstance(event, PushedStreamReceived):
|
||||
# pushed stream ids should be uniq and not dependent on race conditions
|
||||
# only the parent stream id must be looked up first
|
||||
parent_eid = self.server_to_client_stream_ids[event.parent_stream_id]
|
||||
with self.client_conn.h2.lock:
|
||||
self.client_conn.h2.push_stream(parent_eid, event.pushed_stream_id, event.headers)
|
||||
|
||||
headers = Headers([[str(k), str(v)] for k, v in event.headers])
|
||||
headers['x-mitmproxy-pushed'] = 'true'
|
||||
self.streams[event.pushed_stream_id] = Http2SingleStreamLayer(self, event.pushed_stream_id, headers)
|
||||
self.streams[event.pushed_stream_id].timestamp_start = time.time()
|
||||
self.streams[event.pushed_stream_id].pushed = True
|
||||
self.streams[event.pushed_stream_id].parent_stream_id = parent_eid
|
||||
self.streams[event.pushed_stream_id].timestamp_end = time.time()
|
||||
self.streams[event.pushed_stream_id].request_data_finished.set()
|
||||
self.streams[event.pushed_stream_id].start()
|
||||
elif isinstance(event, TrailersReceived):
|
||||
raise NotImplementedError()
|
||||
|
||||
return True
|
||||
|
||||
def _cleanup_streams(self):
|
||||
death_time = time.time() - 10
|
||||
for stream_id in self.streams.keys():
|
||||
zombie = self.streams[stream_id].zombie
|
||||
if zombie and zombie <= death_time:
|
||||
self.streams.pop(stream_id, None)
|
||||
|
||||
def __call__(self):
|
||||
if self.server_conn:
|
||||
self._initiate_server_conn()
|
||||
|
||||
preamble = self.client_conn.rfile.read(24)
|
||||
self.client_conn.h2.initiate_connection()
|
||||
self.client_conn.h2.receive_data(preamble)
|
||||
self.client_conn.send(self.client_conn.h2.data_to_send())
|
||||
|
||||
while True:
|
||||
r = ssl_read_select(self.active_conns, 1)
|
||||
for conn in r:
|
||||
source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn
|
||||
other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn
|
||||
is_server = (conn == self.server_conn.connection)
|
||||
|
||||
with source_conn.h2.lock:
|
||||
try:
|
||||
raw_frame = b''.join(http2_read_raw_frame(source_conn.rfile))
|
||||
except:
|
||||
for stream in self.streams.values():
|
||||
stream.zombie = time.time()
|
||||
return
|
||||
|
||||
events = source_conn.h2.receive_data(raw_frame)
|
||||
source_conn.send(source_conn.h2.data_to_send())
|
||||
|
||||
for event in events:
|
||||
if not self._handle_event(event, source_conn, other_conn, is_server):
|
||||
return
|
||||
|
||||
self._cleanup_streams()
|
||||
|
||||
|
||||
class Http2SingleStreamLayer(_HttpTransmissionLayer, threading.Thread):
|
||||
|
||||
def __init__(self, ctx, stream_id, request_headers):
|
||||
super(Http2SingleStreamLayer, self).__init__(ctx)
|
||||
self.zombie = None
|
||||
self.client_stream_id = stream_id
|
||||
self.server_stream_id = None
|
||||
self.request_headers = request_headers
|
||||
self.response_headers = None
|
||||
self.pushed = False
|
||||
|
||||
self.request_data_queue = Queue.Queue()
|
||||
self.request_queued_data_length = 0
|
||||
self.request_data_finished = threading.Event()
|
||||
|
||||
self.response_arrived = threading.Event()
|
||||
self.response_data_queue = Queue.Queue()
|
||||
self.response_queued_data_length = 0
|
||||
self.response_data_finished = threading.Event()
|
||||
|
||||
@property
|
||||
def data_queue(self):
|
||||
if self.response_arrived.is_set():
|
||||
return self.response_data_queue
|
||||
else:
|
||||
return self.request_data_queue
|
||||
|
||||
@property
|
||||
def queued_data_length(self):
|
||||
if self.response_arrived.is_set():
|
||||
return self.response_queued_data_length
|
||||
else:
|
||||
return self.request_queued_data_length
|
||||
|
||||
@property
|
||||
def data_finished(self):
|
||||
if self.response_arrived.is_set():
|
||||
return self.response_data_finished
|
||||
else:
|
||||
return self.request_data_finished
|
||||
|
||||
@queued_data_length.setter
|
||||
def queued_data_length(self, v):
|
||||
if self.response_arrived.is_set():
|
||||
return self.response_queued_data_length
|
||||
else:
|
||||
return self.request_queued_data_length
|
||||
|
||||
def is_zombie(self):
|
||||
return self.zombie is not None
|
||||
|
||||
def read_request(self):
|
||||
self.request_data_finished.wait()
|
||||
|
||||
authority = self.request_headers.get(':authority', '')
|
||||
method = self.request_headers.get(':method', 'GET')
|
||||
scheme = self.request_headers.get(':scheme', 'https')
|
||||
path = self.request_headers.get(':path', '/')
|
||||
host = None
|
||||
port = None
|
||||
|
||||
if path == '*' or path.startswith("/"):
|
||||
form_in = "relative"
|
||||
elif method == 'CONNECT': # pragma: no cover
|
||||
raise NotImplementedError("CONNECT over HTTP/2 is not implemented.")
|
||||
else: # pragma: no cover
|
||||
form_in = "absolute"
|
||||
# FIXME: verify if path or :host contains what we need
|
||||
scheme, host, port, _ = utils.parse_url(path)
|
||||
|
||||
if authority:
|
||||
host, _, port = authority.partition(':')
|
||||
|
||||
if not host:
|
||||
host = 'localhost'
|
||||
if not port:
|
||||
port = 443 if scheme == 'https' else 80
|
||||
port = int(port)
|
||||
|
||||
data = []
|
||||
while self.request_data_queue.qsize() > 0:
|
||||
data.append(self.request_data_queue.get())
|
||||
data = b"".join(data)
|
||||
|
||||
return HTTPRequest(
|
||||
form_in,
|
||||
method,
|
||||
scheme,
|
||||
host,
|
||||
port,
|
||||
path,
|
||||
(2, 0),
|
||||
self.request_headers,
|
||||
data,
|
||||
timestamp_start=self.timestamp_start,
|
||||
timestamp_end=self.timestamp_end,
|
||||
)
|
||||
|
||||
def send_request(self, message):
|
||||
if self.pushed:
|
||||
# nothing to do here
|
||||
return
|
||||
|
||||
with self.server_conn.h2.lock:
|
||||
# We must not assign a stream id if we are already a zombie.
|
||||
if self.zombie:
|
||||
return
|
||||
|
||||
self.server_stream_id = self.server_conn.h2.get_next_available_stream_id()
|
||||
self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id
|
||||
|
||||
self.server_conn.h2.safe_send_headers(
|
||||
self.is_zombie,
|
||||
self.server_stream_id,
|
||||
message.headers
|
||||
)
|
||||
self.server_conn.h2.safe_send_body(
|
||||
self.is_zombie,
|
||||
self.server_stream_id,
|
||||
message.body
|
||||
)
|
||||
|
||||
def read_response_headers(self):
|
||||
self.response_arrived.wait()
|
||||
|
||||
status_code = int(self.response_headers.get(':status', 502))
|
||||
|
||||
return HTTPResponse(
|
||||
http_version=(2, 0),
|
||||
status_code=status_code,
|
||||
reason='',
|
||||
headers=self.response_headers,
|
||||
content=None,
|
||||
timestamp_start=self.timestamp_start,
|
||||
timestamp_end=self.timestamp_end,
|
||||
)
|
||||
|
||||
def read_response_body(self, request, response):
|
||||
while True:
|
||||
try:
|
||||
yield self.response_data_queue.get(timeout=1)
|
||||
except Queue.Empty:
|
||||
pass
|
||||
if self.response_data_finished.is_set():
|
||||
while self.response_data_queue.qsize() > 0:
|
||||
yield self.response_data_queue.get()
|
||||
return
|
||||
if self.zombie:
|
||||
return
|
||||
|
||||
def send_response_headers(self, response):
|
||||
self.client_conn.h2.safe_send_headers(
|
||||
self.is_zombie,
|
||||
self.client_stream_id,
|
||||
response.headers
|
||||
)
|
||||
|
||||
def send_response_body(self, _response, chunks):
|
||||
self.client_conn.h2.safe_send_body(
|
||||
self.is_zombie,
|
||||
self.client_stream_id,
|
||||
chunks
|
||||
)
|
||||
|
||||
def check_close_connection(self, flow):
|
||||
# This layer only handles a single stream.
|
||||
# RFC 7540 8.1: An HTTP request/response exchange fully consumes a single stream.
|
||||
return True
|
||||
|
||||
def connect(self): # pragma: no cover
|
||||
raise ValueError("CONNECT inside an HTTP2 stream is not supported.")
|
||||
|
||||
def set_server(self, *args, **kwargs): # pragma: no cover
|
||||
# do not mess with the server connection - all streams share it.
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
layer = HttpLayer(self, self.mode)
|
||||
layer()
|
||||
self.zombie = time.time()
|
@ -349,7 +349,7 @@ class TlsLayer(Layer):
|
||||
layer = self.ctx.next_layer(self)
|
||||
layer()
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self): # pragma: no cover
|
||||
if self._client_tls and self._server_tls:
|
||||
return "TlsLayer(client and server)"
|
||||
elif self._client_tls:
|
||||
|
@ -103,9 +103,9 @@ class ConnectionHandler(object):
|
||||
return Socks5Proxy(root_context)
|
||||
elif mode == "regular":
|
||||
return HttpProxy(root_context)
|
||||
elif callable(mode): # pragma: nocover
|
||||
elif callable(mode): # pragma: no cover
|
||||
return mode(root_context)
|
||||
else: # pragma: nocover
|
||||
else: # pragma: no cover
|
||||
raise ValueError("Unknown proxy mode: %s" % mode)
|
||||
|
||||
def handle(self):
|
||||
|
@ -1,3 +1,3 @@
|
||||
-e git+https://github.com/mitmproxy/netlib.git#egg=netlib
|
||||
-e git+https://github.com/mitmproxy/pathod.git#egg=pathod
|
||||
-e .[dev,examples,contentviews]
|
||||
-e .[dev,examples,contentviews]
|
||||
|
2
setup.py
2
setup.py
@ -17,6 +17,7 @@ with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
|
||||
# This will break `pip install` on systems with old setuptools versions.
|
||||
deps = {
|
||||
"netlib>=%s, <%s" % (version.MINORVERSION, version.NEXT_MINORVERSION),
|
||||
"h2>=2.1.0, <3.0",
|
||||
"tornado>=4.3.0, <4.4",
|
||||
"configargparse>=0.10.0, <0.11",
|
||||
"pyperclip>=1.5.22, <1.6",
|
||||
@ -45,6 +46,7 @@ dev_deps = {
|
||||
"pytest>=2.8.0",
|
||||
"pytest-xdist>=1.13.1",
|
||||
"pytest-cov>=2.1.0",
|
||||
"pytest-timeout>=1.0.0",
|
||||
"coveralls>=0.4.1",
|
||||
"pathod>=%s, <%s" % (version.MINORVERSION, version.NEXT_MINORVERSION),
|
||||
"sphinx>=1.3.1",
|
||||
|
@ -1,7 +1,7 @@
|
||||
import cStringIO
|
||||
from libmproxy import filt
|
||||
from libmproxy.protocol import http
|
||||
from libmproxy.models import Error
|
||||
from libmproxy.models import http
|
||||
from netlib.http import Headers
|
||||
from . import tutils
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
from io import BytesIO
|
||||
from netlib.exceptions import HttpSyntaxException
|
||||
from netlib.http import http1
|
||||
from netlib.tcp import TCPClient
|
||||
@ -6,33 +5,6 @@ from netlib.tutils import treq, raises
|
||||
from . import tutils, tservers
|
||||
|
||||
|
||||
class TestHTTPResponse:
|
||||
|
||||
def test_read_from_stringio(self):
|
||||
s = (
|
||||
b"HTTP/1.1 200 OK\r\n"
|
||||
b"Content-Length: 7\r\n"
|
||||
b"\r\n"
|
||||
b"content\r\n"
|
||||
b"HTTP/1.1 204 OK\r\n"
|
||||
b"\r\n"
|
||||
)
|
||||
rfile = BytesIO(s)
|
||||
r = http1.read_response(rfile, treq())
|
||||
assert r.status_code == 200
|
||||
assert r.content == b"content"
|
||||
assert http1.read_response(rfile, treq()).status_code == 204
|
||||
|
||||
rfile = BytesIO(s)
|
||||
# HEAD must not have content by spec. We should leave it on the pipe.
|
||||
r = http1.read_response(rfile, treq(method=b"HEAD"))
|
||||
assert r.status_code == 200
|
||||
assert r.content == b""
|
||||
|
||||
with raises(HttpSyntaxException):
|
||||
http1.read_response(rfile, treq())
|
||||
|
||||
|
||||
class TestHTTPFlow(object):
|
||||
|
||||
def test_repr(self):
|
431
test/test_protocol_http2.py
Normal file
431
test/test_protocol_http2.py
Normal file
@ -0,0 +1,431 @@
|
||||
from __future__ import (absolute_import, print_function, division)
|
||||
|
||||
import OpenSSL
|
||||
import pytest
|
||||
import traceback
|
||||
import os
|
||||
import tempfile
|
||||
import sys
|
||||
|
||||
from libmproxy.proxy.config import ProxyConfig
|
||||
from libmproxy.proxy.server import ProxyServer
|
||||
from libmproxy.cmdline import APP_HOST, APP_PORT
|
||||
|
||||
import logging
|
||||
logging.getLogger("hyper.packages.hpack.hpack").setLevel(logging.WARNING)
|
||||
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(logging.WARNING)
|
||||
logging.getLogger("passlib.utils.compat").setLevel(logging.WARNING)
|
||||
logging.getLogger("passlib.registry").setLevel(logging.WARNING)
|
||||
logging.getLogger("PIL.Image").setLevel(logging.WARNING)
|
||||
logging.getLogger("PIL.PngImagePlugin").setLevel(logging.WARNING)
|
||||
|
||||
import netlib
|
||||
from netlib import tservers as netlib_tservers
|
||||
from netlib.utils import http2_read_raw_frame
|
||||
|
||||
import h2
|
||||
from hyperframe.frame import Frame
|
||||
|
||||
from libmproxy import utils
|
||||
from . import tservers
|
||||
|
||||
requires_alpn = pytest.mark.skipif(
|
||||
not OpenSSL._util.lib.Cryptography_HAS_ALPN,
|
||||
reason="requires OpenSSL with ALPN support")
|
||||
|
||||
|
||||
class _Http2ServerBase(netlib_tservers.ServerTestBase):
|
||||
ssl = dict(alpn_select=b'h2')
|
||||
|
||||
class handler(netlib.tcp.BaseHandler):
|
||||
|
||||
def handle(self):
|
||||
h2_conn = h2.connection.H2Connection(client_side=False)
|
||||
|
||||
preamble = self.rfile.read(24)
|
||||
h2_conn.initiate_connection()
|
||||
h2_conn.receive_data(preamble)
|
||||
self.wfile.write(h2_conn.data_to_send())
|
||||
self.wfile.flush()
|
||||
|
||||
done = False
|
||||
while not done:
|
||||
try:
|
||||
raw = b''.join(http2_read_raw_frame(self.rfile))
|
||||
events = h2_conn.receive_data(raw)
|
||||
except:
|
||||
break
|
||||
self.wfile.write(h2_conn.data_to_send())
|
||||
self.wfile.flush()
|
||||
|
||||
for event in events:
|
||||
try:
|
||||
if not self.server.handle_server_event(event, h2_conn, self.rfile, self.wfile):
|
||||
done = True
|
||||
break
|
||||
except Exception as e:
|
||||
print(repr(e))
|
||||
print(traceback.format_exc())
|
||||
done = True
|
||||
break
|
||||
|
||||
def handle_server_event(self, h2_conn, rfile, wfile):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class _Http2TestBase(object):
|
||||
|
||||
@classmethod
|
||||
def setup_class(self):
|
||||
self.config = ProxyConfig(**self.get_proxy_config())
|
||||
|
||||
tmaster = tservers.TestMaster(self.config)
|
||||
tmaster.start_app(APP_HOST, APP_PORT)
|
||||
self.proxy = tservers.ProxyThread(tmaster)
|
||||
self.proxy.start()
|
||||
|
||||
@classmethod
|
||||
def teardown_class(cls):
|
||||
cls.proxy.shutdown()
|
||||
|
||||
@property
|
||||
def master(self):
|
||||
return self.proxy.tmaster
|
||||
|
||||
@classmethod
|
||||
def get_proxy_config(cls):
|
||||
cls.cadir = os.path.join(tempfile.gettempdir(), "mitmproxy")
|
||||
return dict(
|
||||
no_upstream_cert = False,
|
||||
cadir = cls.cadir,
|
||||
authenticator = None,
|
||||
)
|
||||
|
||||
def setup(self):
|
||||
self.master.clear_log()
|
||||
self.master.state.clear()
|
||||
self.server.server.handle_server_event = self.handle_server_event
|
||||
|
||||
def _setup_connection(self):
|
||||
self.config.http2 = True
|
||||
|
||||
client = netlib.tcp.TCPClient(("127.0.0.1", self.proxy.port))
|
||||
client.connect()
|
||||
|
||||
# send CONNECT request
|
||||
client.wfile.write(
|
||||
b"CONNECT localhost:%d HTTP/1.1\r\n"
|
||||
b"Host: localhost:%d\r\n"
|
||||
b"\r\n" % (self.server.server.address.port, self.server.server.address.port)
|
||||
)
|
||||
client.wfile.flush()
|
||||
|
||||
# read CONNECT response
|
||||
while client.rfile.readline() != "\r\n":
|
||||
pass
|
||||
|
||||
client.convert_to_ssl(alpn_protos=[b'h2'])
|
||||
|
||||
h2_conn = h2.connection.H2Connection(client_side=True)
|
||||
h2_conn.initiate_connection()
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
|
||||
return client, h2_conn
|
||||
|
||||
def _send_request(self, wfile, h2_conn, stream_id=1, headers=[], body=b''):
|
||||
h2_conn.send_headers(
|
||||
stream_id=stream_id,
|
||||
headers=headers,
|
||||
end_stream=(len(body) == 0),
|
||||
)
|
||||
if body:
|
||||
h2_conn.send_data(stream_id, body)
|
||||
h2_conn.end_stream(stream_id)
|
||||
wfile.write(h2_conn.data_to_send())
|
||||
wfile.flush()
|
||||
|
||||
|
||||
@requires_alpn
|
||||
class TestSimple(_Http2TestBase, _Http2ServerBase):
|
||||
|
||||
@classmethod
|
||||
def setup_class(self):
|
||||
_Http2TestBase.setup_class()
|
||||
_Http2ServerBase.setup_class()
|
||||
|
||||
@classmethod
|
||||
def teardown_class(self):
|
||||
_Http2TestBase.teardown_class()
|
||||
_Http2ServerBase.teardown_class()
|
||||
|
||||
@classmethod
|
||||
def handle_server_event(self, event, h2_conn, rfile, wfile):
|
||||
if isinstance(event, h2.events.ConnectionTerminated):
|
||||
return False
|
||||
elif isinstance(event, h2.events.RequestReceived):
|
||||
h2_conn.send_headers(1, [
|
||||
(':status', '200'),
|
||||
('foo', 'bar'),
|
||||
])
|
||||
h2_conn.send_data(1, b'foobar')
|
||||
h2_conn.end_stream(1)
|
||||
wfile.write(h2_conn.data_to_send())
|
||||
wfile.flush()
|
||||
|
||||
return True
|
||||
|
||||
def test_simple(self):
|
||||
client, h2_conn = self._setup_connection()
|
||||
|
||||
self._send_request(client.wfile, h2_conn, headers=[
|
||||
(':authority', "127.0.0.1:%s" % self.server.server.address.port),
|
||||
(':method', 'GET'),
|
||||
(':scheme', 'https'),
|
||||
(':path', '/'),
|
||||
], body='my request body echoed back to me')
|
||||
|
||||
done = False
|
||||
while not done:
|
||||
try:
|
||||
events = h2_conn.receive_data(b''.join(http2_read_raw_frame(client.rfile)))
|
||||
except:
|
||||
break
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
|
||||
for event in events:
|
||||
if isinstance(event, h2.events.StreamEnded):
|
||||
done = True
|
||||
|
||||
h2_conn.close_connection()
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
|
||||
assert len(self.master.state.flows) == 1
|
||||
assert self.master.state.flows[0].response.status_code == 200
|
||||
assert self.master.state.flows[0].response.headers['foo'] == 'bar'
|
||||
assert self.master.state.flows[0].response.body == b'foobar'
|
||||
|
||||
|
||||
@requires_alpn
|
||||
class TestWithBodies(_Http2TestBase, _Http2ServerBase):
|
||||
tmp_data_buffer_foobar = b''
|
||||
|
||||
@classmethod
|
||||
def setup_class(self):
|
||||
_Http2TestBase.setup_class()
|
||||
_Http2ServerBase.setup_class()
|
||||
|
||||
@classmethod
|
||||
def teardown_class(self):
|
||||
_Http2TestBase.teardown_class()
|
||||
_Http2ServerBase.teardown_class()
|
||||
|
||||
@classmethod
|
||||
def handle_server_event(self, event, h2_conn, rfile, wfile):
|
||||
if isinstance(event, h2.events.ConnectionTerminated):
|
||||
return False
|
||||
if isinstance(event, h2.events.DataReceived):
|
||||
self.tmp_data_buffer_foobar += event.data
|
||||
elif isinstance(event, h2.events.StreamEnded):
|
||||
h2_conn.send_headers(1, [
|
||||
(':status', '200'),
|
||||
])
|
||||
h2_conn.send_data(1, self.tmp_data_buffer_foobar)
|
||||
h2_conn.end_stream(1)
|
||||
wfile.write(h2_conn.data_to_send())
|
||||
wfile.flush()
|
||||
|
||||
return True
|
||||
|
||||
def test_with_bodies(self):
|
||||
client, h2_conn = self._setup_connection()
|
||||
|
||||
self._send_request(
|
||||
client.wfile,
|
||||
h2_conn,
|
||||
headers=[
|
||||
(':authority', "127.0.0.1:%s" % self.server.server.address.port),
|
||||
(':method', 'GET'),
|
||||
(':scheme', 'https'),
|
||||
(':path', '/'),
|
||||
],
|
||||
body='foobar with request body',
|
||||
)
|
||||
|
||||
done = False
|
||||
while not done:
|
||||
try:
|
||||
events = h2_conn.receive_data(b''.join(http2_read_raw_frame(client.rfile)))
|
||||
except:
|
||||
break
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
|
||||
for event in events:
|
||||
if isinstance(event, h2.events.StreamEnded):
|
||||
done = True
|
||||
|
||||
h2_conn.close_connection()
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
|
||||
assert self.master.state.flows[0].response.body == b'foobar with request body'
|
||||
|
||||
|
||||
@requires_alpn
|
||||
class TestPushPromise(_Http2TestBase, _Http2ServerBase):
|
||||
|
||||
@classmethod
|
||||
def setup_class(self):
|
||||
_Http2TestBase.setup_class()
|
||||
_Http2ServerBase.setup_class()
|
||||
|
||||
@classmethod
|
||||
def teardown_class(self):
|
||||
_Http2TestBase.teardown_class()
|
||||
_Http2ServerBase.teardown_class()
|
||||
|
||||
@classmethod
|
||||
def handle_server_event(self, event, h2_conn, rfile, wfile):
|
||||
if isinstance(event, h2.events.ConnectionTerminated):
|
||||
return False
|
||||
elif isinstance(event, h2.events.RequestReceived):
|
||||
if event.stream_id != 1:
|
||||
# ignore requests initiated by push promises
|
||||
return True
|
||||
|
||||
h2_conn.send_headers(1, [(':status', '200')])
|
||||
h2_conn.push_stream(1, 2, [
|
||||
(':authority', "127.0.0.1:%s" % self.port),
|
||||
(':method', 'GET'),
|
||||
(':scheme', 'https'),
|
||||
(':path', '/pushed_stream_foo'),
|
||||
('foo', 'bar')
|
||||
])
|
||||
h2_conn.push_stream(1, 4, [
|
||||
(':authority', "127.0.0.1:%s" % self.port),
|
||||
(':method', 'GET'),
|
||||
(':scheme', 'https'),
|
||||
(':path', '/pushed_stream_bar'),
|
||||
('foo', 'bar')
|
||||
])
|
||||
wfile.write(h2_conn.data_to_send())
|
||||
wfile.flush()
|
||||
|
||||
h2_conn.send_headers(2, [(':status', '200')])
|
||||
h2_conn.send_headers(4, [(':status', '200')])
|
||||
wfile.write(h2_conn.data_to_send())
|
||||
wfile.flush()
|
||||
|
||||
h2_conn.send_data(1, b'regular_stream')
|
||||
h2_conn.send_data(2, b'pushed_stream_foo')
|
||||
h2_conn.send_data(4, b'pushed_stream_bar')
|
||||
wfile.write(h2_conn.data_to_send())
|
||||
wfile.flush()
|
||||
h2_conn.end_stream(1)
|
||||
h2_conn.end_stream(2)
|
||||
h2_conn.end_stream(4)
|
||||
wfile.write(h2_conn.data_to_send())
|
||||
wfile.flush()
|
||||
|
||||
return True
|
||||
|
||||
def test_push_promise(self):
|
||||
client, h2_conn = self._setup_connection()
|
||||
|
||||
self._send_request(client.wfile, h2_conn, stream_id=1, headers=[
|
||||
(':authority', "127.0.0.1:%s" % self.server.server.address.port),
|
||||
(':method', 'GET'),
|
||||
(':scheme', 'https'),
|
||||
(':path', '/'),
|
||||
('foo', 'bar')
|
||||
])
|
||||
|
||||
done = False
|
||||
ended_streams = 0
|
||||
pushed_streams = 0
|
||||
responses = 0
|
||||
while not done:
|
||||
try:
|
||||
raw = b''.join(http2_read_raw_frame(client.rfile))
|
||||
events = h2_conn.receive_data(raw)
|
||||
except:
|
||||
break
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
|
||||
for event in events:
|
||||
if isinstance(event, h2.events.StreamEnded):
|
||||
ended_streams += 1
|
||||
elif isinstance(event, h2.events.PushedStreamReceived):
|
||||
pushed_streams += 1
|
||||
elif isinstance(event, h2.events.ResponseReceived):
|
||||
responses += 1
|
||||
if isinstance(event, h2.events.ConnectionTerminated):
|
||||
done = True
|
||||
|
||||
if responses == 3 and ended_streams == 3 and pushed_streams == 2:
|
||||
done = True
|
||||
|
||||
h2_conn.close_connection()
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
|
||||
assert ended_streams == 3
|
||||
assert pushed_streams == 2
|
||||
|
||||
bodies = [flow.response.body for flow in self.master.state.flows]
|
||||
assert len(bodies) == 3
|
||||
assert b'regular_stream' in bodies
|
||||
assert b'pushed_stream_foo' in bodies
|
||||
assert b'pushed_stream_bar' in bodies
|
||||
|
||||
def test_push_promise_reset(self):
|
||||
client, h2_conn = self._setup_connection()
|
||||
|
||||
self._send_request(client.wfile, h2_conn, stream_id=1, headers=[
|
||||
(':authority', "127.0.0.1:%s" % self.server.server.address.port),
|
||||
(':method', 'GET'),
|
||||
(':scheme', 'https'),
|
||||
(':path', '/'),
|
||||
('foo', 'bar')
|
||||
])
|
||||
|
||||
done = False
|
||||
ended_streams = 0
|
||||
pushed_streams = 0
|
||||
responses = 0
|
||||
while not done:
|
||||
try:
|
||||
events = h2_conn.receive_data(b''.join(http2_read_raw_frame(client.rfile)))
|
||||
except:
|
||||
break
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
|
||||
for event in events:
|
||||
if isinstance(event, h2.events.StreamEnded) and event.stream_id == 1:
|
||||
ended_streams += 1
|
||||
elif isinstance(event, h2.events.PushedStreamReceived):
|
||||
pushed_streams += 1
|
||||
h2_conn.reset_stream(event.pushed_stream_id, error_code=0x8)
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
elif isinstance(event, h2.events.ResponseReceived):
|
||||
responses += 1
|
||||
if isinstance(event, h2.events.ConnectionTerminated):
|
||||
done = True
|
||||
|
||||
if responses >= 1 and ended_streams >= 1 and pushed_streams == 2:
|
||||
done = True
|
||||
|
||||
h2_conn.close_connection()
|
||||
client.wfile.write(h2_conn.data_to_send())
|
||||
client.wfile.flush()
|
||||
|
||||
bodies = [flow.response.body for flow in self.master.state.flows if flow.response]
|
||||
assert len(bodies) >= 1
|
||||
assert b'regular_stream' in bodies
|
||||
# the other two bodies might not be transmitted before the reset
|
Loading…
Reference in New Issue
Block a user