nuke legacy protocol stack

This commit is contained in:
Thomas Kriechbaumer 2020-12-14 20:00:35 +01:00
parent e6445af2cd
commit d159897d98
107 changed files with 1411 additions and 6499 deletions

View File

@ -25,11 +25,6 @@ from mitmproxy.addons import streambodies
from mitmproxy.addons import save
from mitmproxy.addons import tlsconfig
from mitmproxy.addons import upstream_auth
from mitmproxy.utils import compat
if compat.new_proxy_core: # pragma: no cover
if True: # noqa
from mitmproxy.addons import clientplayback_sansio as clientplayback # type: ignore # noqa
def default_addons():

View File

@ -1,133 +1,140 @@
import queue
import threading
import asyncio
import time
import traceback
import typing
import mitmproxy.types
from mitmproxy import command
from mitmproxy import connections
from mitmproxy import controller
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import io
from mitmproxy import log
from mitmproxy import options
from mitmproxy.coretypes import basethread
from mitmproxy.net import server_spec, tls
from mitmproxy.net.http import http1
from mitmproxy.net.http.url import hostport
from mitmproxy.utils import human
from mitmproxy.addons.proxyserver import AsyncReply
from mitmproxy.net import server_spec
from mitmproxy.options import Options
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy import commands, events, layers, server
from mitmproxy.proxy.context import ConnectionState, Context, Server
from mitmproxy.proxy.layer import CommandGenerator
from mitmproxy.utils import asyncio_utils
class RequestReplayThread(basethread.BaseThread):
daemon = True
class MockServer(layers.http.HttpConnection):
"""
A mock HTTP "server" that just pretends it received a full HTTP request,
which is then processed by the proxy core.
"""
flow: http.HTTPFlow
def __init__(
self,
opts: options.Options,
channel: controller.Channel,
queue: queue.Queue,
) -> None:
self.options = opts
self.channel = channel
self.queue = queue
self.inflight = threading.Event()
super().__init__("RequestReplayThread")
def __init__(self, flow: http.HTTPFlow, context: Context):
super().__init__(context, context.client)
self.flow = flow
def run(self):
while True:
f = self.queue.get()
self.inflight.set()
self.replay(f)
self.inflight.clear()
def _handle_event(self, event: events.Event) -> CommandGenerator[None]:
if isinstance(event, events.Start):
content = self.flow.request.raw_content
self.flow.request.timestamp_start = self.flow.request.timestamp_end = time.time()
yield layers.http.ReceiveHttp(layers.http.RequestHeaders(
1,
self.flow.request,
end_stream=not content,
replay_flow=self.flow,
))
if content:
yield layers.http.ReceiveHttp(layers.http.RequestData(1, content))
yield layers.http.ReceiveHttp(layers.http.RequestEndOfMessage(1))
elif isinstance(event, (
layers.http.ResponseHeaders,
layers.http.ResponseData,
layers.http.ResponseEndOfMessage,
layers.http.ResponseProtocolError,
)):
pass
else: # pragma: no cover
ctx.log(f"Unexpected event during replay: {events}")
def replay(self, f): # pragma: no cover
f.live = True
r = f.request
bsl = human.parse_size(self.options.body_size_limit)
authority_backup = r.authority
server = None
try:
f.response = None
# If we have a channel, run script hooks.
request_reply = self.channel.ask("request", f)
if isinstance(request_reply, http.HTTPResponse):
f.response = request_reply
class ReplayHandler(server.ConnectionHandler):
layer: layers.HttpLayer
if not f.response:
# In all modes, we directly connect to the server displayed
if self.options.mode.startswith("upstream:"):
server_address = server_spec.parse_with_mode(self.options.mode)[1].address
server = connections.ServerConnection(server_address)
server.connect()
if r.scheme == "https":
connect_request = http.make_connect_request((r.data.host, r.port))
server.wfile.write(http1.assemble_request(connect_request))
server.wfile.flush()
resp = http1.read_response(
server.rfile,
connect_request,
body_size_limit=bsl
)
if resp.status_code != 200:
raise exceptions.ReplayException(
"Upstream server refuses CONNECT request"
)
server.establish_tls(
sni=f.server_conn.sni,
**tls.client_arguments_from_options(self.options)
)
r.authority = b""
else:
r.authority = hostport(r.scheme, r.host, r.port)
else:
server_address = (r.host, r.port)
server = connections.ServerConnection(server_address)
server.connect()
if r.scheme == "https":
server.establish_tls(
sni=f.server_conn.sni,
**tls.client_arguments_from_options(self.options)
)
r.authority = ""
def __init__(self, flow: http.HTTPFlow, options: Options) -> None:
client = flow.client_conn.copy()
client.state = ConnectionState.OPEN
server.wfile.write(http1.assemble_request(r))
server.wfile.flush()
r.timestamp_start = r.timestamp_end = time.time()
context = Context(client, options)
context.server = Server(
(flow.request.host, flow.request.port)
)
context.server.tls = flow.request.scheme == "https"
if options.mode.startswith("upstream:"):
context.server.via = server_spec.parse_with_mode(options.mode)[1]
if f.server_conn:
f.server_conn.close()
f.server_conn = server
super().__init__(context)
f.response = http1.read_response(server.rfile, r, body_size_limit=bsl)
response_reply = self.channel.ask("response", f)
if response_reply == exceptions.Kill:
raise exceptions.Kill()
except (exceptions.ReplayException, exceptions.NetlibException) as e:
f.error = flow.Error(str(e))
self.channel.ask("error", f)
except exceptions.Kill:
self.channel.tell("log", log.LogEntry(flow.Error.KILLED_MESSAGE, "info"))
except Exception as e:
self.channel.tell("log", log.LogEntry(repr(e), "error"))
finally:
r.authority = authority_backup
f.live = False
if server and server.connected():
server.finish()
server.close()
self.layer = layers.HttpLayer(context, HTTPMode.transparent)
self.layer.connections[client] = MockServer(flow, context.fork())
self.flow = flow
self.done = asyncio.Event()
async def replay(self) -> None:
self.server_event(events.Start())
await self.done.wait()
def log(self, message: str, level: str = "info") -> None:
ctx.log(f"[replay] {message}", level)
async def handle_hook(self, hook: commands.Hook) -> None:
data, = hook.args()
data.reply = AsyncReply(data)
await ctx.master.addons.handle_lifecycle(hook.name, data)
await data.reply.done.wait()
if isinstance(hook, (layers.http.HttpResponseHook, layers.http.HttpErrorHook)):
if self.transports:
# close server connections
for x in self.transports.values():
if x.handler:
x.handler.cancel()
await asyncio.wait([x.handler for x in self.transports.values() if x.handler])
# signal completion
self.done.set()
class ClientPlayback:
def __init__(self):
self.q = queue.Queue()
self.thread: RequestReplayThread = None
playback_task: typing.Optional[asyncio.Task] = None
inflight: typing.Optional[http.HTTPFlow]
queue: asyncio.Queue
options: Options
def check(self, f: flow.Flow):
if f.live:
def __init__(self):
self.queue = asyncio.Queue()
self.inflight = None
self.task = None
def running(self):
self.playback_task = asyncio_utils.create_task(
self.playback(),
name="client playback"
)
self.options = ctx.options
def done(self):
if self.playback_task:
self.playback_task.cancel()
async def playback(self):
while True:
self.inflight = await self.queue.get()
try:
h = ReplayHandler(self.inflight, self.options)
await h.replay()
except Exception:
ctx.log(f"Client replay has crashed!\n{traceback.format_exc()}", "error")
self.queue.task_done()
self.inflight = None
def check(self, f: flow.Flow) -> typing.Optional[str]:
if f.live or f == self.inflight:
return "Can't replay live flow."
if f.intercepted:
return "Can't replay intercepted flow."
@ -138,6 +145,7 @@ class ClientPlayback:
return "Can't replay flow with missing content."
else:
return "Can only replay HTTP flows."
return None
def load(self, loader):
loader.add_option(
@ -145,14 +153,6 @@ class ClientPlayback:
"Replay client requests from a saved file."
)
def running(self):
self.thread = RequestReplayThread(
ctx.options,
ctx.master.channel,
self.q,
)
self.thread.start()
def configure(self, updated):
if "client_replay" in updated and ctx.options.client_replay:
try:
@ -166,20 +166,25 @@ class ClientPlayback:
"""
Approximate number of flows queued for replay.
"""
inflight = 1 if self.thread and self.thread.inflight.is_set() else 0
return self.q.qsize() + inflight
return self.queue.qsize() + int(bool(self.inflight))
@command.command("replay.client.stop")
def stop_replay(self) -> None:
"""
Clear the replay queue.
"""
with self.q.mutex:
lst = list(self.q.queue)
self.q.queue.clear()
for f in lst:
updated = []
while True:
try:
f = self.queue.get_nowait()
except asyncio.QueueEmpty:
break
else:
self.queue.task_done()
f.revert()
ctx.master.addons.trigger("update", lst)
updated.append(f)
ctx.master.addons.trigger("update", updated)
ctx.log.alert("Client replay queue cleared.")
@command.command("replay.client")
@ -187,30 +192,23 @@ class ClientPlayback:
"""
Add flows to the replay queue, skipping flows that can't be replayed.
"""
lst = []
updated: typing.List[http.HTTPFlow] = []
for f in flows:
hf = typing.cast(http.HTTPFlow, f)
err = self.check(hf)
err = self.check(f)
if err:
ctx.log.warn(err)
continue
lst.append(hf)
http_flow = typing.cast(http.HTTPFlow, f)
# Prepare the flow for replay
hf.backup()
hf.is_replay = "request"
hf.response = None
hf.error = None
# https://github.com/mitmproxy/mitmproxy/issues/2197
if hf.request.http_version == "HTTP/2.0":
hf.request.http_version = "HTTP/1.1"
hf.request.headers.pop(":authority", None)
host = hf.request.host
if host is not None:
hf.request.headers.insert(0, "host", host)
self.q.put(hf)
ctx.master.addons.trigger("update", lst)
http_flow.backup()
http_flow.is_replay = "request"
http_flow.response = None
http_flow.error = None
self.queue.put_nowait(http_flow)
updated.append(http_flow)
ctx.master.addons.trigger("update", updated)
@command.command("replay.client.file")
def load_file(self, path: mitmproxy.types.Path) -> None:

View File

@ -1,222 +0,0 @@
import asyncio
import time
import traceback
import typing
import mitmproxy.types
from mitmproxy import command
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import io
from mitmproxy.addons.proxyserver import AsyncReply
from mitmproxy.net import server_spec
from mitmproxy.options import Options
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2 import commands, events, layers, server
from mitmproxy.proxy2.context import ConnectionState, Context, Server
from mitmproxy.proxy2.layer import CommandGenerator
from mitmproxy.utils import asyncio_utils
class MockServer(layers.http.HttpConnection):
"""
A mock HTTP "server" that just pretends it received a full HTTP request,
which is then processed by the proxy core.
"""
flow: http.HTTPFlow
def __init__(self, flow: http.HTTPFlow, context: Context):
super().__init__(context, context.client)
self.flow = flow
def _handle_event(self, event: events.Event) -> CommandGenerator[None]:
if isinstance(event, events.Start):
content = self.flow.request.raw_content
self.flow.request.timestamp_start = self.flow.request.timestamp_end = time.time()
yield layers.http.ReceiveHttp(layers.http.RequestHeaders(
1,
self.flow.request,
end_stream=not content,
replay_flow=self.flow,
))
if content:
yield layers.http.ReceiveHttp(layers.http.RequestData(1, content))
yield layers.http.ReceiveHttp(layers.http.RequestEndOfMessage(1))
elif isinstance(event, (
layers.http.ResponseHeaders,
layers.http.ResponseData,
layers.http.ResponseEndOfMessage,
layers.http.ResponseProtocolError,
)):
pass
else: # pragma: no cover
ctx.log(f"Unexpected event during replay: {events}")
class ReplayHandler(server.ConnectionHandler):
layer: layers.HttpLayer
def __init__(self, flow: http.HTTPFlow, options: Options) -> None:
client = flow.client_conn.copy()
client.state = ConnectionState.OPEN
context = Context(client, options)
context.server = Server(
(flow.request.host, flow.request.port)
)
context.server.tls = flow.request.scheme == "https"
if options.mode.startswith("upstream:"):
context.server.via = server_spec.parse_with_mode(options.mode)[1]
super().__init__(context)
self.layer = layers.HttpLayer(context, HTTPMode.transparent)
self.layer.connections[client] = MockServer(flow, context.fork())
self.flow = flow
self.done = asyncio.Event()
async def replay(self) -> None:
self.server_event(events.Start())
await self.done.wait()
def log(self, message: str, level: str = "info") -> None:
ctx.log(f"[replay] {message}", level)
async def handle_hook(self, hook: commands.Hook) -> None:
data, = hook.args()
data.reply = AsyncReply(data)
await ctx.master.addons.handle_lifecycle(hook.name, data)
await data.reply.done.wait()
if isinstance(hook, (layers.http.HttpResponseHook, layers.http.HttpErrorHook)):
if self.transports:
# close server connections
for x in self.transports.values():
if x.handler:
x.handler.cancel()
await asyncio.wait([x.handler for x in self.transports.values() if x.handler])
# signal completion
self.done.set()
class ClientPlayback:
playback_task: typing.Optional[asyncio.Task] = None
inflight: typing.Optional[http.HTTPFlow]
queue: asyncio.Queue
options: Options
def __init__(self):
self.queue = asyncio.Queue()
self.inflight = None
self.task = None
def running(self):
self.playback_task = asyncio_utils.create_task(
self.playback(),
name="client playback"
)
self.options = ctx.options
def done(self):
if self.playback_task:
self.playback_task.cancel()
async def playback(self):
while True:
self.inflight = await self.queue.get()
try:
h = ReplayHandler(self.inflight, self.options)
await h.replay()
except Exception:
ctx.log(f"Client replay has crashed!\n{traceback.format_exc()}", "error")
self.queue.task_done()
self.inflight = None
def check(self, f: flow.Flow) -> typing.Optional[str]:
if f.live or f == self.inflight:
return "Can't replay live flow."
if f.intercepted:
return "Can't replay intercepted flow."
if isinstance(f, http.HTTPFlow):
if not f.request:
return "Can't replay flow with missing request."
if f.request.raw_content is None:
return "Can't replay flow with missing content."
else:
return "Can only replay HTTP flows."
return None
def load(self, loader):
loader.add_option(
"client_replay", typing.Sequence[str], [],
"Replay client requests from a saved file."
)
def configure(self, updated):
if "client_replay" in updated and ctx.options.client_replay:
try:
flows = io.read_flows_from_paths(ctx.options.client_replay)
except exceptions.FlowReadException as e:
raise exceptions.OptionsError(str(e))
self.start_replay(flows)
@command.command("replay.client.count")
def count(self) -> int:
"""
Approximate number of flows queued for replay.
"""
return self.queue.qsize() + int(bool(self.inflight))
@command.command("replay.client.stop")
def stop_replay(self) -> None:
"""
Clear the replay queue.
"""
updated = []
while True:
try:
f = self.queue.get_nowait()
except asyncio.QueueEmpty:
break
else:
self.queue.task_done()
f.revert()
updated.append(f)
ctx.master.addons.trigger("update", updated)
ctx.log.alert("Client replay queue cleared.")
@command.command("replay.client")
def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None:
"""
Add flows to the replay queue, skipping flows that can't be replayed.
"""
updated: typing.List[http.HTTPFlow] = []
for f in flows:
err = self.check(f)
if err:
ctx.log.warn(err)
continue
http_flow = typing.cast(http.HTTPFlow, f)
# Prepare the flow for replay
http_flow.backup()
http_flow.is_replay = "request"
http_flow.response = None
http_flow.error = None
self.queue.put_nowait(http_flow)
updated.append(http_flow)
ctx.master.addons.trigger("update", updated)
@command.command("replay.client.file")
def load_file(self, path: mitmproxy.types.Path) -> None:
"""
Load flows from file, and add them to the replay queue.
"""
try:
flows = io.read_flows_from_paths([path])
except exceptions.FlowReadException as e:
raise exceptions.CommandError(str(e))
self.start_replay(flows)

View File

@ -3,11 +3,10 @@ from typing import Type, Sequence, Union, Tuple, Any, Iterable, Optional, List
from mitmproxy import ctx, exceptions
from mitmproxy.net.tls import is_tls_record_magic
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2 import context, layer, layers
from mitmproxy.proxy2.layers import modes
from mitmproxy.proxy2.layers.tls import HTTP_ALPNS, parse_client_hello
from mitmproxy.proxy import context, layer, layers
from mitmproxy.proxy.layers import modes
from mitmproxy.proxy.layers.tls import HTTP_ALPNS, parse_client_hello
LayerCls = Type[layer.Layer]
@ -88,8 +87,6 @@ class NextLayer:
raise AssertionError()
def next_layer(self, nextlayer: layer.NextLayer):
if isinstance(nextlayer, base.Layer): # pragma: no cover
return # skip the old proxy core's next_layer event.
nextlayer.layer = self._next_layer(
nextlayer.context,
nextlayer.data_client(),

View File

@ -12,7 +12,7 @@ from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy.net.http import status_codes
from mitmproxy.utils import compat
from mitmproxy.proxy import context
REALM = "mitmproxy"
@ -49,7 +49,7 @@ class ProxyAuth:
self.singleuser = None
self.ldapconn = None
self.ldapserver = None
self.authenticated: MutableMapping[compat.Client, Tuple[str, str]] = weakref.WeakKeyDictionary()
self.authenticated: MutableMapping[context.Client, Tuple[str, str]] = weakref.WeakKeyDictionary()
"""Contains all connections that are permanently authenticated after an HTTP CONNECT"""
def load(self, loader):

View File

@ -4,8 +4,8 @@ from typing import Optional
from mitmproxy import controller, ctx, eventsequence, flow, log, master, options, platform
from mitmproxy.flow import Error
from mitmproxy.proxy2 import commands
from mitmproxy.proxy2 import server
from mitmproxy.proxy import commands
from mitmproxy.proxy import server
from mitmproxy.utils import asyncio_utils, human

View File

@ -5,9 +5,19 @@ from OpenSSL import SSL, crypto
from mitmproxy import certs, ctx, exceptions
from mitmproxy.net import tls as net_tls
from mitmproxy.options import CONF_BASENAME
from mitmproxy.proxy.protocol.tls import DEFAULT_CLIENT_CIPHERS
from mitmproxy.proxy2 import context
from mitmproxy.proxy2.layers import tls
from mitmproxy.proxy import context
from mitmproxy.proxy.layers import tls
# We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default.
# https://ssl-config.mozilla.org/#config=old
DEFAULT_CLIENT_CIPHERS = (
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:"
"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:"
"DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:"
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:"
"ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:"
"AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA"
)
class AppData(TypedDict):

View File

@ -18,12 +18,12 @@ import mitmproxy.flow
from mitmproxy import flowfilter
from mitmproxy import exceptions
from mitmproxy import command
from mitmproxy import connections
from mitmproxy import ctx
from mitmproxy import io
from mitmproxy import http
from mitmproxy import tcp
from mitmproxy.utils import compat, human
from mitmproxy.proxy import context
from mitmproxy.utils import human
# The underlying sorted list implementation expects the sort key to be stable
@ -460,12 +460,10 @@ class View(collections.abc.Sequence):
req = http.HTTPRequest.make(method.upper(), url)
except ValueError as e:
raise exceptions.CommandError("Invalid URL: %s" % e)
if compat.new_proxy_core: # pragma: no cover
c = compat.Client(("", 0), ("", 0), req.timestamp_start - 0.0001)
s = compat.Server((req.host, req.port))
else: # pragma: no cover
c = connections.ClientConnection.make_dummy(("", 0))
s = connections.ServerConnection.make_dummy((req.host, req.port))
c = context.Client(("", 0), ("", 0), req.timestamp_start - 0.0001)
s = context.Server((req.host, req.port))
f = http.HTTPFlow(c, s)
f.request = req
f.request.headers["Host"] = req.host

View File

@ -1,380 +0,0 @@
import os
import time
import typing
import uuid
import warnings
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy import stateobject
from mitmproxy.net import tcp
from mitmproxy.net import tls
from mitmproxy.utils import human
from mitmproxy.utils import strutils
class ClientConnection(tcp.BaseHandler, stateobject.StateObject):
"""
A client connection
Attributes:
address: Remote address
tls_established: True if TLS is established, False otherwise
mitmcert: The MITM'ed TLS server certificate presented to the client
timestamp_start: Connection start timestamp
timestamp_tls_setup: TLS established timestamp
timestamp_end: Connection end timestamp
sni: Server Name Indication sent by client during the TLS handshake
cipher_name: The current used cipher
alpn_proto_negotiated: The negotiated application protocol
tls_version: TLS version
tls_extensions: TLS ClientHello extensions
"""
def __init__(self, client_connection, address, server):
# Eventually, this object is restored from state. We don't have a
# connection then.
if client_connection:
super().__init__(client_connection, address, server)
else:
self.connection = None
self.server = None
self.wfile = None
self.rfile = None
self.address = None
self.tls_established = None
self.id = str(uuid.uuid4())
self.mitmcert = None
self.timestamp_start = time.time()
self.timestamp_end = None
self.timestamp_tls_setup = None
self.sni = None
self.cipher_name = None
self.alpn_proto_negotiated = None
self.tls_version = None
self.tls_extensions = None
def connected(self):
return bool(self.connection) and not self.finished
def __repr__(self):
if self.tls_established:
tls = f"[{self.tls_version}] "
else:
tls = ""
if self.alpn_proto_negotiated:
alpn = "[ALPN: {}] ".format(
strutils.bytes_to_escaped_str(self.alpn_proto_negotiated)
)
else:
alpn = ""
return "<ClientConnection: {tls}{alpn}{address}>".format(
tls=tls,
alpn=alpn,
address=human.format_address(self.address),
)
def __eq__(self, other):
if isinstance(other, ClientConnection):
return self.id == other.id
return False
def __hash__(self):
return hash(self.id)
# Sans-io attributes.
state = 0
sockname = ("", 0)
error = None
tls = None
certificate_list = ()
alpn_offers = None
cipher_list = None
_stateobject_attributes = dict(
id=str,
address=tuple,
tls_established=bool,
mitmcert=certs.Cert,
timestamp_start=float,
timestamp_tls_setup=float,
timestamp_end=float,
sni=str,
cipher_name=str,
alpn_proto_negotiated=bytes,
tls_version=str,
tls_extensions=typing.List[typing.Tuple[int, bytes]],
# sans-io exclusives
state=int,
sockname=tuple,
error=str,
tls=bool,
certificate_list=typing.List[certs.Cert],
alpn_offers=typing.List[bytes],
cipher_list=typing.List[str],
)
@property
def clientcert(self) -> typing.Optional[certs.Cert]: # pragma: no cover
warnings.warn(".clientcert is deprecated, use .certificate_list instead.", PendingDeprecationWarning)
if self.certificate_list:
return self.certificate_list[0]
else:
return None
@clientcert.setter
def clientcert(self, val): # pragma: no cover
warnings.warn(".clientcert is deprecated, use .certificate_list instead.", PendingDeprecationWarning)
if val:
self.certificate_list = [val]
else:
self.certificate_list = []
def send(self, message):
if isinstance(message, list):
message = b''.join(message)
self.wfile.write(message)
self.wfile.flush()
@classmethod
def from_state(cls, state):
f = cls(None, tuple(), None)
f.set_state(state)
return f
@classmethod
def make_dummy(cls, address):
return cls.from_state(dict(
id=str(uuid.uuid4()),
address=address,
mitmcert=None,
tls_established=False,
timestamp_start=None,
timestamp_end=None,
timestamp_tls_setup=None,
sni=None,
cipher_name=None,
alpn_proto_negotiated=None,
tls_version=None,
tls_extensions=None,
state=0,
sockname=("", 0),
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_list=[],
))
def convert_to_tls(self, cert, *args, **kwargs):
# Unfortunately OpenSSL provides no way to expose all TLS extensions, so we do this dance
# here and use our Kaitai parser.
try:
client_hello = tls.ClientHello.from_file(self.rfile)
except exceptions.TlsProtocolException: # pragma: no cover
pass # if this fails, we don't want everything to go down.
else:
self.tls_extensions = client_hello.extensions
super().convert_to_tls(cert, *args, **kwargs)
self.timestamp_tls_setup = time.time()
self.mitmcert = cert
sni = self.connection.get_servername()
if sni:
self.sni = sni.decode("idna")
else:
self.sni = None
self.cipher_name = self.connection.get_cipher_name()
self.alpn_proto_negotiated = self.get_alpn_proto_negotiated()
self.tls_version = self.connection.get_protocol_version_name()
def finish(self):
super().finish()
self.timestamp_end = time.time()
class ServerConnection(tcp.TCPClient, stateobject.StateObject):
"""
A server connection
Attributes:
address: Remote address. Can be both a domain or an IP address.
ip_address: Resolved remote IP address.
source_address: Local IP address or client's source IP address.
tls_established: True if TLS is established, False otherwise
sni: Server Name Indication sent by the proxy during the TLS handshake
alpn_proto_negotiated: The negotiated application protocol
tls_version: TLS version
via: The underlying server connection (e.g. the connection to the upstream proxy in upstream proxy mode)
timestamp_start: Connection start timestamp
timestamp_tcp_setup: TCP ACK received timestamp
timestamp_tls_setup: TLS established timestamp
timestamp_end: Connection end timestamp
"""
def __init__(self, address, source_address=None, spoof_source_address=None):
tcp.TCPClient.__init__(self, address, source_address, spoof_source_address)
self.id = str(uuid.uuid4())
self.alpn_proto_negotiated = None
self.tls_version = None
self.via = None
self.timestamp_start = None
self.timestamp_end = None
self.timestamp_tcp_setup = None
self.timestamp_tls_setup = None
def connected(self):
return bool(self.connection) and not self.finished
def __repr__(self):
if self.tls_established and self.sni:
tls = "[{}: {}] ".format(self.tls_version or "TLS", self.sni)
elif self.tls_established:
tls = "[{}] ".format(self.tls_version or "TLS")
else:
tls = ""
if self.alpn_proto_negotiated:
alpn = "[ALPN: {}] ".format(
strutils.bytes_to_escaped_str(self.alpn_proto_negotiated)
)
else:
alpn = ""
return "<ServerConnection: {tls}{alpn}{address}>".format(
tls=tls,
alpn=alpn,
address=human.format_address(self.address),
)
def __eq__(self, other):
if isinstance(other, ServerConnection):
return self.id == other.id
return False
def __hash__(self):
return hash(self.id)
# Sans-io attributes.
state = 0
error = None
tls = None
certificate_list = ()
alpn_offers = None
cipher_name = None
cipher_list = None
via2 = None
_stateobject_attributes = dict(
id=str,
address=tuple,
ip_address=tuple,
source_address=tuple,
tls_established=bool,
sni=str,
alpn_proto_negotiated=bytes,
tls_version=str,
timestamp_start=float,
timestamp_tcp_setup=float,
timestamp_tls_setup=float,
timestamp_end=float,
# sans-io exclusives
state=int,
error=str,
tls=bool,
certificate_list=typing.List[certs.Cert],
alpn_offers=typing.List[bytes],
cipher_name=str,
cipher_list=typing.List[str],
via2=None,
)
@property
def cert(self) -> typing.Optional[certs.Cert]: # pragma: no cover
warnings.warn(".cert is deprecated, use .certificate_list instead.", PendingDeprecationWarning)
if self.certificate_list:
return self.certificate_list[0]
else:
return None
@cert.setter
def cert(self, val): # pragma: no cover
warnings.warn(".cert is deprecated, use .certificate_list instead.", PendingDeprecationWarning)
if val:
self.certificate_list = [val]
else:
self.certificate_list = []
@classmethod
def from_state(cls, state):
f = cls(tuple())
f.set_state(state)
return f
@classmethod
def make_dummy(cls, address):
return cls.from_state(dict(
id=str(uuid.uuid4()),
address=address,
ip_address=address,
sni=address[0],
alpn_proto_negotiated=None,
tls_version=None,
source_address=('', 0),
tls_established=False,
timestamp_start=None,
timestamp_tcp_setup=None,
timestamp_tls_setup=None,
timestamp_end=None,
via=None,
state=0,
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_name=None,
cipher_list=[],
via2=None,
))
def connect(self):
self.timestamp_start = time.time()
tcp.TCPClient.connect(self)
self.timestamp_tcp_setup = time.time()
def send(self, message):
if isinstance(message, list):
message = b''.join(message)
self.wfile.write(message)
self.wfile.flush()
def establish_tls(self, *, sni=None, client_certs=None, **kwargs):
if sni and not isinstance(sni, str):
raise ValueError("sni must be str, not " + type(sni).__name__)
client_cert = None
if client_certs:
client_certs = os.path.expanduser(client_certs)
if os.path.isfile(client_certs):
client_cert = client_certs
else:
path = os.path.join(
client_certs,
(sni or self.address[0].encode("idna").decode()) + ".pem"
)
if os.path.exists(path):
client_cert = path
self.convert_to_tls(cert=client_cert, sni=sni, **kwargs)
self.sni = sni
self.alpn_proto_negotiated = self.get_alpn_proto_negotiated()
self.tls_version = self.connection.get_protocol_version_name()
self.timestamp_tls_setup = time.time()
def finish(self):
tcp.TCPClient.finish(self)
self.timestamp_end = time.time()
ServerConnection._stateobject_attributes["via"] = ServerConnection

View File

@ -6,7 +6,7 @@ from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import stateobject
from mitmproxy import version
from mitmproxy.utils import compat
from mitmproxy.proxy import context
class Error(stateobject.StateObject):
@ -64,8 +64,8 @@ class Flow(stateobject.StateObject):
def __init__(
self,
type: str,
client_conn: compat.Client,
server_conn: compat.Server,
client_conn: context.Client,
server_conn: context.Server,
live: bool=None
) -> None:
self.type = type
@ -85,8 +85,8 @@ class Flow(stateobject.StateObject):
_stateobject_attributes = dict(
id=str,
error=Error,
client_conn=compat.Client,
server_conn=compat.Server,
client_conn=context.Client,
server_conn=context.Server,
type=str,
intercepted=bool,
is_replay=str,

View File

@ -4,7 +4,7 @@ from typing import Optional, Tuple
from mitmproxy import flow
from mitmproxy import version
from mitmproxy.net import http
from mitmproxy.utils import compat
from mitmproxy.proxy import context
HTTPRequest = http.Request
HTTPResponse = http.Response
@ -23,8 +23,8 @@ class HTTPFlow(flow.Flow):
object. This might happen, for instance, when a response was received
from the server, but there was an error sending it back to the client.
"""
server_conn: compat.Server
client_conn: compat.Client
server_conn: context.Server
client_conn: context.Client
intercepted: bool = False
""" Is this flow currently being intercepted? """
mode: str

View File

@ -1,7 +1,6 @@
from .io import FlowWriter, FlowReader, FilteredFlowWriter, read_flows_from_paths
from .db import DBHandler
__all__ = [
"FlowWriter", "FlowReader", "FilteredFlowWriter", "read_flows_from_paths", "DBHandler"
"FlowWriter", "FlowReader", "FilteredFlowWriter", "read_flows_from_paths"
]

View File

@ -1,40 +0,0 @@
import sqlite3
import os
from mitmproxy.io import protobuf
class DBHandler:
"""
This class is wrapping up connection to SQLITE DB.
"""
def __init__(self, db_path, mode='load'):
if mode == 'write':
if os.path.isfile(db_path):
os.remove(db_path)
self.db_path = db_path
self._con = sqlite3.connect(self.db_path)
self._c = self._con.cursor()
self._create_db()
def _create_db(self):
with self._con:
self._con.execute('CREATE TABLE IF NOT EXISTS FLOWS('
'id INTEGER PRIMARY KEY,'
'pbuf_blob BLOB)')
def store(self, flows):
blobs = []
for flow in flows:
blobs.append((protobuf.dumps(flow),))
with self._con:
self._con.executemany('INSERT INTO FLOWS (pbuf_blob) values (?)', blobs)
def load(self):
flows = []
self._c.execute('SELECT pbuf_blob FROM FLOWS')
for row in self._c.fetchall():
flows.append(protobuf.loads(row[0]))
return flows

View File

@ -1,93 +0,0 @@
syntax='proto2';
message HTTPFlow {
optional HTTPRequest request = 1;
optional HTTPResponse response = 2;
optional HTTPError error = 3;
optional ClientConnection client_conn = 4;
optional ServerConnection server_conn = 5;
optional bool intercepted = 6;
optional bool marked = 7;
optional string mode = 8;
optional string id = 9;
}
message HTTPRequest {
optional string first_line_format = 1;
optional string method = 2;
optional string scheme = 3;
optional string host = 4;
optional int32 port = 5;
optional string path = 6;
optional string http_version = 7;
repeated HTTPHeader headers = 8;
optional bytes content = 9;
optional double timestamp_start = 10;
optional double timestamp_end = 11;
optional bool is_replay = 12;
}
message HTTPResponse {
optional string http_version = 1;
optional int32 status_code = 2;
optional string reason = 3;
repeated HTTPHeader headers = 4;
optional bytes content = 5;
optional double timestamp_start = 6;
optional double timestamp_end = 7;
optional bool is_replay = 8;
}
message HTTPError {
optional string msg = 1;
optional double timestamp = 2;
}
message HTTPHeader {
optional string name = 1;
optional string value = 2;
}
message Address {
optional string host = 1;
optional int32 port = 2;
}
message ClientConnection {
optional string id = 1;
optional Address address = 2;
optional bool tls_established = 3;
optional string clientcert = 4;
optional string mitmcert = 5;
optional double timestamp_start = 6;
optional double timestamp_tls_setup = 7;
optional double timestamp_end = 8;
optional string sni = 9;
optional string cipher_name = 10;
optional bytes alpn_proto_negotiated = 11;
optional string tls_version = 12;
repeated TLSExtension tls_extensions = 13;
}
message ServerConnection {
optional string id = 1;
optional Address address = 2;
optional Address ip_address = 3;
optional Address source_address = 4;
optional bool tls_established = 5;
optional string cert = 6;
optional string sni = 7;
optional bytes alpn_proto_negotiated = 8;
optional string tls_version = 9;
optional double timestamp_start = 10;
optional double timestamp_tcp_setup = 11;
optional double timestamp_tls_setup = 12;
optional double timestamp_end = 13;
optional ServerConnection via = 14;
}
message TLSExtension {
optional int64 int = 1;
optional bytes bytes = 2;
}

View File

@ -1,787 +0,0 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: http.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='http.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\nhttp.proto\"\xf4\x01\n\x08HTTPFlow\x12\x1d\n\x07request\x18\x01 \x01(\x0b\x32\x0c.HTTPRequest\x12\x1f\n\x08response\x18\x02 \x01(\x0b\x32\r.HTTPResponse\x12\x19\n\x05\x65rror\x18\x03 \x01(\x0b\x32\n.HTTPError\x12&\n\x0b\x63lient_conn\x18\x04 \x01(\x0b\x32\x11.ClientConnection\x12&\n\x0bserver_conn\x18\x05 \x01(\x0b\x32\x11.ServerConnection\x12\x13\n\x0bintercepted\x18\x06 \x01(\x08\x12\x0e\n\x06marked\x18\x07 \x01(\x08\x12\x0c\n\x04mode\x18\x08 \x01(\t\x12\n\n\x02id\x18\t \x01(\t\"\xfa\x01\n\x0bHTTPRequest\x12\x19\n\x11\x66irst_line_format\x18\x01 \x01(\t\x12\x0e\n\x06method\x18\x02 \x01(\t\x12\x0e\n\x06scheme\x18\x03 \x01(\t\x12\x0c\n\x04host\x18\x04 \x01(\t\x12\x0c\n\x04port\x18\x05 \x01(\x05\x12\x0c\n\x04path\x18\x06 \x01(\t\x12\x14\n\x0chttp_version\x18\x07 \x01(\t\x12\x1c\n\x07headers\x18\x08 \x03(\x0b\x32\x0b.HTTPHeader\x12\x0f\n\x07\x63ontent\x18\t \x01(\x0c\x12\x17\n\x0ftimestamp_start\x18\n \x01(\x01\x12\x15\n\rtimestamp_end\x18\x0b \x01(\x01\x12\x11\n\tis_replay\x18\x0c \x01(\x08\"\xbb\x01\n\x0cHTTPResponse\x12\x14\n\x0chttp_version\x18\x01 \x01(\t\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0e\n\x06reason\x18\x03 \x01(\t\x12\x1c\n\x07headers\x18\x04 \x03(\x0b\x32\x0b.HTTPHeader\x12\x0f\n\x07\x63ontent\x18\x05 \x01(\x0c\x12\x17\n\x0ftimestamp_start\x18\x06 \x01(\x01\x12\x15\n\rtimestamp_end\x18\x07 \x01(\x01\x12\x11\n\tis_replay\x18\x08 \x01(\x08\"+\n\tHTTPError\x12\x0b\n\x03msg\x18\x01 \x01(\t\x12\x11\n\ttimestamp\x18\x02 \x01(\x01\")\n\nHTTPHeader\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"%\n\x07\x41\x64\x64ress\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\"\xc2\x02\n\x10\x43lientConnection\x12\n\n\x02id\x18\x01 \x01(\t\x12\x19\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x08.Address\x12\x17\n\x0ftls_established\x18\x03 \x01(\x08\x12\x12\n\nclientcert\x18\x04 \x01(\t\x12\x10\n\x08mitmcert\x18\x05 \x01(\t\x12\x17\n\x0ftimestamp_start\x18\x06 \x01(\x01\x12\x1b\n\x13timestamp_tls_setup\x18\x07 \x01(\x01\x12\x15\n\rtimestamp_end\x18\x08 \x01(\x01\x12\x0b\n\x03sni\x18\t \x01(\t\x12\x13\n\x0b\x63ipher_name\x18\n \x01(\t\x12\x1d\n\x15\x61lpn_proto_negotiated\x18\x0b \x01(\x0c\x12\x13\n\x0btls_version\x18\x0c \x01(\t\x12%\n\x0etls_extensions\x18\r \x03(\x0b\x32\r.TLSExtension\"\xeb\x02\n\x10ServerConnection\x12\n\n\x02id\x18\x01 \x01(\t\x12\x19\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x08.Address\x12\x1c\n\nip_address\x18\x03 \x01(\x0b\x32\x08.Address\x12 \n\x0esource_address\x18\x04 \x01(\x0b\x32\x08.Address\x12\x17\n\x0ftls_established\x18\x05 \x01(\x08\x12\x0c\n\x04\x63\x65rt\x18\x06 \x01(\t\x12\x0b\n\x03sni\x18\x07 \x01(\t\x12\x1d\n\x15\x61lpn_proto_negotiated\x18\x08 \x01(\x0c\x12\x13\n\x0btls_version\x18\t \x01(\t\x12\x17\n\x0ftimestamp_start\x18\n \x01(\x01\x12\x1b\n\x13timestamp_tcp_setup\x18\x0b \x01(\x01\x12\x1b\n\x13timestamp_tls_setup\x18\x0c \x01(\x01\x12\x15\n\rtimestamp_end\x18\r \x01(\x01\x12\x1e\n\x03via\x18\x0e \x01(\x0b\x32\x11.ServerConnection\"*\n\x0cTLSExtension\x12\x0b\n\x03int\x18\x01 \x01(\x03\x12\r\n\x05\x62ytes\x18\x02 \x01(\x0c'
)
_HTTPFLOW = _descriptor.Descriptor(
name='HTTPFlow',
full_name='HTTPFlow',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='request', full_name='HTTPFlow.request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='response', full_name='HTTPFlow.response', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='HTTPFlow.error', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='client_conn', full_name='HTTPFlow.client_conn', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='server_conn', full_name='HTTPFlow.server_conn', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='intercepted', full_name='HTTPFlow.intercepted', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='marked', full_name='HTTPFlow.marked', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mode', full_name='HTTPFlow.mode', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id', full_name='HTTPFlow.id', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=15,
serialized_end=259,
)
_HTTPREQUEST = _descriptor.Descriptor(
name='HTTPRequest',
full_name='HTTPRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='first_line_format', full_name='HTTPRequest.first_line_format', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='method', full_name='HTTPRequest.method', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scheme', full_name='HTTPRequest.scheme', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host', full_name='HTTPRequest.host', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='port', full_name='HTTPRequest.port', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='path', full_name='HTTPRequest.path', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='http_version', full_name='HTTPRequest.http_version', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='headers', full_name='HTTPRequest.headers', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='content', full_name='HTTPRequest.content', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_start', full_name='HTTPRequest.timestamp_start', index=9,
number=10, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_end', full_name='HTTPRequest.timestamp_end', index=10,
number=11, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_replay', full_name='HTTPRequest.is_replay', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=262,
serialized_end=512,
)
_HTTPRESPONSE = _descriptor.Descriptor(
name='HTTPResponse',
full_name='HTTPResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='http_version', full_name='HTTPResponse.http_version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status_code', full_name='HTTPResponse.status_code', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reason', full_name='HTTPResponse.reason', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='headers', full_name='HTTPResponse.headers', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='content', full_name='HTTPResponse.content', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_start', full_name='HTTPResponse.timestamp_start', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_end', full_name='HTTPResponse.timestamp_end', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_replay', full_name='HTTPResponse.is_replay', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=515,
serialized_end=702,
)
_HTTPERROR = _descriptor.Descriptor(
name='HTTPError',
full_name='HTTPError',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='msg', full_name='HTTPError.msg', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='HTTPError.timestamp', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=704,
serialized_end=747,
)
_HTTPHEADER = _descriptor.Descriptor(
name='HTTPHeader',
full_name='HTTPHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='HTTPHeader.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='HTTPHeader.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=749,
serialized_end=790,
)
_ADDRESS = _descriptor.Descriptor(
name='Address',
full_name='Address',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='Address.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='port', full_name='Address.port', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=792,
serialized_end=829,
)
_CLIENTCONNECTION = _descriptor.Descriptor(
name='ClientConnection',
full_name='ClientConnection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ClientConnection.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='address', full_name='ClientConnection.address', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tls_established', full_name='ClientConnection.tls_established', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='clientcert', full_name='ClientConnection.clientcert', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mitmcert', full_name='ClientConnection.mitmcert', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_start', full_name='ClientConnection.timestamp_start', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_tls_setup', full_name='ClientConnection.timestamp_tls_setup', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_end', full_name='ClientConnection.timestamp_end', index=7,
number=8, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sni', full_name='ClientConnection.sni', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cipher_name', full_name='ClientConnection.cipher_name', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='alpn_proto_negotiated', full_name='ClientConnection.alpn_proto_negotiated', index=10,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tls_version', full_name='ClientConnection.tls_version', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tls_extensions', full_name='ClientConnection.tls_extensions', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=832,
serialized_end=1154,
)
_SERVERCONNECTION = _descriptor.Descriptor(
name='ServerConnection',
full_name='ServerConnection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ServerConnection.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='address', full_name='ServerConnection.address', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ip_address', full_name='ServerConnection.ip_address', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source_address', full_name='ServerConnection.source_address', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tls_established', full_name='ServerConnection.tls_established', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cert', full_name='ServerConnection.cert', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sni', full_name='ServerConnection.sni', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='alpn_proto_negotiated', full_name='ServerConnection.alpn_proto_negotiated', index=7,
number=8, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tls_version', full_name='ServerConnection.tls_version', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_start', full_name='ServerConnection.timestamp_start', index=9,
number=10, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_tcp_setup', full_name='ServerConnection.timestamp_tcp_setup', index=10,
number=11, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_tls_setup', full_name='ServerConnection.timestamp_tls_setup', index=11,
number=12, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp_end', full_name='ServerConnection.timestamp_end', index=12,
number=13, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='via', full_name='ServerConnection.via', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1157,
serialized_end=1520,
)
_TLSEXTENSION = _descriptor.Descriptor(
name='TLSExtension',
full_name='TLSExtension',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='int', full_name='TLSExtension.int', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bytes', full_name='TLSExtension.bytes', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1522,
serialized_end=1564,
)
_HTTPFLOW.fields_by_name['request'].message_type = _HTTPREQUEST
_HTTPFLOW.fields_by_name['response'].message_type = _HTTPRESPONSE
_HTTPFLOW.fields_by_name['error'].message_type = _HTTPERROR
_HTTPFLOW.fields_by_name['client_conn'].message_type = _CLIENTCONNECTION
_HTTPFLOW.fields_by_name['server_conn'].message_type = _SERVERCONNECTION
_HTTPREQUEST.fields_by_name['headers'].message_type = _HTTPHEADER
_HTTPRESPONSE.fields_by_name['headers'].message_type = _HTTPHEADER
_CLIENTCONNECTION.fields_by_name['address'].message_type = _ADDRESS
_CLIENTCONNECTION.fields_by_name['tls_extensions'].message_type = _TLSEXTENSION
_SERVERCONNECTION.fields_by_name['address'].message_type = _ADDRESS
_SERVERCONNECTION.fields_by_name['ip_address'].message_type = _ADDRESS
_SERVERCONNECTION.fields_by_name['source_address'].message_type = _ADDRESS
_SERVERCONNECTION.fields_by_name['via'].message_type = _SERVERCONNECTION
DESCRIPTOR.message_types_by_name['HTTPFlow'] = _HTTPFLOW
DESCRIPTOR.message_types_by_name['HTTPRequest'] = _HTTPREQUEST
DESCRIPTOR.message_types_by_name['HTTPResponse'] = _HTTPRESPONSE
DESCRIPTOR.message_types_by_name['HTTPError'] = _HTTPERROR
DESCRIPTOR.message_types_by_name['HTTPHeader'] = _HTTPHEADER
DESCRIPTOR.message_types_by_name['Address'] = _ADDRESS
DESCRIPTOR.message_types_by_name['ClientConnection'] = _CLIENTCONNECTION
DESCRIPTOR.message_types_by_name['ServerConnection'] = _SERVERCONNECTION
DESCRIPTOR.message_types_by_name['TLSExtension'] = _TLSEXTENSION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HTTPFlow = _reflection.GeneratedProtocolMessageType('HTTPFlow', (_message.Message,), {
'DESCRIPTOR' : _HTTPFLOW,
'__module__' : 'http_pb2'
# @@protoc_insertion_point(class_scope:HTTPFlow)
})
_sym_db.RegisterMessage(HTTPFlow)
HTTPRequest = _reflection.GeneratedProtocolMessageType('HTTPRequest', (_message.Message,), {
'DESCRIPTOR' : _HTTPREQUEST,
'__module__' : 'http_pb2'
# @@protoc_insertion_point(class_scope:HTTPRequest)
})
_sym_db.RegisterMessage(HTTPRequest)
HTTPResponse = _reflection.GeneratedProtocolMessageType('HTTPResponse', (_message.Message,), {
'DESCRIPTOR' : _HTTPRESPONSE,
'__module__' : 'http_pb2'
# @@protoc_insertion_point(class_scope:HTTPResponse)
})
_sym_db.RegisterMessage(HTTPResponse)
HTTPError = _reflection.GeneratedProtocolMessageType('HTTPError', (_message.Message,), {
'DESCRIPTOR' : _HTTPERROR,
'__module__' : 'http_pb2'
# @@protoc_insertion_point(class_scope:HTTPError)
})
_sym_db.RegisterMessage(HTTPError)
HTTPHeader = _reflection.GeneratedProtocolMessageType('HTTPHeader', (_message.Message,), {
'DESCRIPTOR' : _HTTPHEADER,
'__module__' : 'http_pb2'
# @@protoc_insertion_point(class_scope:HTTPHeader)
})
_sym_db.RegisterMessage(HTTPHeader)
Address = _reflection.GeneratedProtocolMessageType('Address', (_message.Message,), {
'DESCRIPTOR' : _ADDRESS,
'__module__' : 'http_pb2'
# @@protoc_insertion_point(class_scope:Address)
})
_sym_db.RegisterMessage(Address)
ClientConnection = _reflection.GeneratedProtocolMessageType('ClientConnection', (_message.Message,), {
'DESCRIPTOR' : _CLIENTCONNECTION,
'__module__' : 'http_pb2'
# @@protoc_insertion_point(class_scope:ClientConnection)
})
_sym_db.RegisterMessage(ClientConnection)
ServerConnection = _reflection.GeneratedProtocolMessageType('ServerConnection', (_message.Message,), {
'DESCRIPTOR' : _SERVERCONNECTION,
'__module__' : 'http_pb2'
# @@protoc_insertion_point(class_scope:ServerConnection)
})
_sym_db.RegisterMessage(ServerConnection)
TLSExtension = _reflection.GeneratedProtocolMessageType('TLSExtension', (_message.Message,), {
'DESCRIPTOR' : _TLSEXTENSION,
'__module__' : 'http_pb2'
# @@protoc_insertion_point(class_scope:TLSExtension)
})
_sym_db.RegisterMessage(TLSExtension)
# @@protoc_insertion_point(module_scope)

View File

@ -1,202 +0,0 @@
"""
Note: This module is currently unmaintained as we don't use it internally.
We're happy to accept PRs that fix bugs, but we won't provide any continuous maintenance.
"""
import typing
from mitmproxy import flow
from mitmproxy import exceptions
from mitmproxy.http import HTTPFlow, HTTPResponse, HTTPRequest
from mitmproxy.certs import Cert
from mitmproxy.connections import ClientConnection, ServerConnection
from mitmproxy.io.proto import http_pb2
def _move_attrs(s_obj, d_obj, attrs):
for attr in attrs:
if not isinstance(d_obj, dict):
if hasattr(s_obj, attr) and getattr(s_obj, attr) is not None:
setattr(d_obj, attr, getattr(s_obj, attr))
else:
if hasattr(s_obj, attr) and getattr(s_obj, attr) is not None:
# ugly fix to set None in empty str or bytes fields
if getattr(s_obj, attr) == "" or getattr(s_obj, attr) == b"":
d_obj[attr] = None
else:
d_obj[attr] = getattr(s_obj, attr)
def _dump_http_response(res: HTTPResponse) -> http_pb2.HTTPResponse:
pres = http_pb2.HTTPResponse()
_move_attrs(res, pres, ['http_version', 'status_code', 'reason',
'content', 'timestamp_start', 'timestamp_end', 'is_replay'])
if res.headers:
for h in res.headers.fields:
header = pres.headers.add()
header.name = h[0]
header.value = h[1]
return pres
def _dump_http_request(req: HTTPRequest) -> http_pb2.HTTPRequest:
preq = http_pb2.HTTPRequest()
_move_attrs(req, preq, ['first_line_format', 'method', 'scheme', 'host', 'port', 'path', 'http_version', 'content',
'timestamp_start', 'timestamp_end', 'is_replay'])
if req.headers:
for h in req.headers.fields:
header = preq.headers.add()
header.name = h[0]
header.value = h[1]
return preq
def _dump_http_client_conn(cc: ClientConnection) -> http_pb2.ClientConnection:
pcc = http_pb2.ClientConnection()
_move_attrs(cc, pcc, ['id', 'tls_established', 'timestamp_start', 'timestamp_tls_setup', 'timestamp_end', 'sni',
'cipher_name', 'alpn_proto_negotiated', 'tls_version'])
for cert in ['clientcert', 'mitmcert']:
if hasattr(cc, cert) and getattr(cc, cert) is not None:
setattr(pcc, cert, getattr(cc, cert).to_pem())
if cc.tls_extensions:
for extension in cc.tls_extensions:
ext = pcc.tls_extensions.add()
ext.int = extension[0]
ext.bytes = extension[1]
if cc.address:
pcc.address.host = cc.address[0]
pcc.address.port = cc.address[1]
return pcc
def _dump_http_server_conn(sc: ServerConnection) -> http_pb2.ServerConnection:
psc = http_pb2.ServerConnection()
_move_attrs(sc, psc, ['id', 'tls_established', 'sni', 'alpn_proto_negotiated', 'tls_version',
'timestamp_start', 'timestamp_tcp_setup', 'timestamp_tls_setup', 'timestamp_end'])
for addr in ['address', 'ip_address', 'source_address']:
if hasattr(sc, addr) and getattr(sc, addr) is not None:
getattr(psc, addr).host = getattr(sc, addr)[0]
getattr(psc, addr).port = getattr(sc, addr)[1]
if sc.cert:
psc.cert = sc.cert.to_pem()
if sc.via:
psc.via.MergeFrom(_dump_http_server_conn(sc.via))
return psc
def _dump_http_error(e: flow.Error) -> http_pb2.HTTPError:
pe = http_pb2.HTTPError()
for attr in ['msg', 'timestamp']:
if hasattr(e, attr) and getattr(e, attr) is not None:
setattr(pe, attr, getattr(e, attr))
return pe
def dump_http(f: flow.Flow) -> http_pb2.HTTPFlow:
pf = http_pb2.HTTPFlow()
for p in ['request', 'response', 'client_conn', 'server_conn', 'error']:
if hasattr(f, p) and getattr(f, p):
getattr(pf, p).MergeFrom(eval(f"_dump_http_{p}")(getattr(f, p)))
_move_attrs(f, pf, ['intercepted', 'marked', 'mode', 'id'])
return pf
def dumps(f: flow.Flow) -> bytes:
if f.type != "http":
raise exceptions.TypeError("Flow types different than HTTP not supported yet!")
else:
p = dump_http(f)
return p.SerializeToString()
def _load_http_request(o: http_pb2.HTTPRequest) -> HTTPRequest:
d: dict = {}
_move_attrs(o, d, ['host', 'port', 'method', 'scheme', 'authority', 'path', 'http_version', 'content',
'timestamp_start', 'timestamp_end'])
if d['content'] is None:
d['content'] = b""
d["headers"] = []
for header in o.headers:
d["headers"].append((bytes(header.name, "utf-8"), bytes(header.value, "utf-8")))
return HTTPRequest(**d)
def _load_http_response(o: http_pb2.HTTPResponse) -> HTTPResponse:
d: dict = {}
_move_attrs(o, d, ['http_version', 'status_code', 'reason',
'content', 'timestamp_start', 'timestamp_end'])
if d['content'] is None:
d['content'] = b""
d["headers"] = []
for header in o.headers:
d["headers"].append((bytes(header.name, "utf-8"), bytes(header.value, "utf-8")))
return HTTPResponse(**d)
def _load_http_client_conn(o: http_pb2.ClientConnection) -> ClientConnection:
d: dict = {}
_move_attrs(o, d, ['id', 'tls_established', 'sni', 'cipher_name', 'alpn_proto_negotiated', 'tls_version',
'timestamp_start', 'timestamp_tcp_setup', 'timestamp_tls_setup', 'timestamp_end'])
for cert in ['clientcert', 'mitmcert']:
if hasattr(o, cert) and getattr(o, cert):
d[cert] = Cert.from_pem(getattr(o, cert))
if o.tls_extensions:
d['tls_extensions'] = []
for extension in o.tls_extensions:
d['tls_extensions'].append((extension.int, extension.bytes))
if o.address:
d['address'] = (o.address.host, o.address.port)
cc = ClientConnection(None, tuple(), None)
for k, v in d.items():
setattr(cc, k, v)
return cc
def _load_http_server_conn(o: http_pb2.ServerConnection) -> ServerConnection:
d: dict = {}
_move_attrs(o, d, ['id', 'tls_established', 'sni', 'alpn_proto_negotiated', 'tls_version',
'timestamp_start', 'timestamp_tcp_setup', 'timestamp_tls_setup', 'timestamp_end'])
for addr in ['address', 'ip_address', 'source_address']:
if hasattr(o, addr):
d[addr] = (getattr(o, addr).host, getattr(o, addr).port)
if o.cert:
c = Cert.from_pem(o.cert)
d['cert'] = c
if o.HasField('via'):
d['via'] = _load_http_server_conn(o.via)
sc = ServerConnection(tuple())
for k, v in d.items():
setattr(sc, k, v)
return sc
def _load_http_error(o: http_pb2.HTTPError) -> typing.Optional[flow.Error]:
d = {}
for m in ['msg', 'timestamp']:
if hasattr(o, m) and getattr(o, m):
d[m] = getattr(o, m)
return None if not d else flow.Error(**d)
def load_http(hf: http_pb2.HTTPFlow) -> HTTPFlow:
parts = {}
for p in ['request', 'response', 'client_conn', 'server_conn', 'error']:
if hf.HasField(p):
parts[p] = eval(f"_load_http_{p}")(getattr(hf, p))
else:
parts[p] = None
_move_attrs(hf, parts, ['intercepted', 'marked', 'mode', 'id'])
f = HTTPFlow(ClientConnection(None, tuple(), None), ServerConnection(tuple()))
for k, v in parts.items():
setattr(f, k, v)
return f
def loads(b: bytes, typ="http") -> typing.Union[HTTPFlow]:
if typ != 'http':
raise exceptions.TypeError("Flow types different than HTTP not supported yet!")
else:
p = http_pb2.HTTPFlow()
p.ParseFromString(b)
return load_http(p)

View File

@ -1,22 +0,0 @@
PRAGMA foreign_keys = ON;
CREATE TABLE flow (
id VARCHAR(36) PRIMARY KEY,
content BLOB
);
CREATE TABLE body (
id INTEGER PRIMARY KEY,
flow_id VARCHAR(36),
type_id INTEGER,
content BLOB,
FOREIGN KEY(flow_id) REFERENCES flow(id)
);
CREATE TABLE annotation (
id INTEGER PRIMARY KEY,
flow_id VARCHAR(36),
type VARCHAR(16),
content BLOB,
FOREIGN KEY(flow_id) REFERENCES flow(id)
);

View File

@ -1,9 +1,45 @@
"""
This module contains mitmproxy's core network proxy.
The most important primitives are:
- Layers: represent protocol layers, e.g. one for TCP, TLS, and so on. Layers are nested, so
a typical configuration might be ReverseProxy/TLS/TCP.
Most importantly, layers are implemented using the sans-io pattern (https://sans-io.readthedocs.io/).
This means that calls return immediately, their is no blocking sync or async code.
- Server: the proxy server handles all I/O. This is implemented using asyncio, but could be done any other way.
The ConnectionHandler is subclassed in the Proxyserver addon, which handles the communication with the
rest of mitmproxy.
- Events: When I/O actions occur at the proxy server, they are passed to the outermost layer as events,
e.g. "DataReceived" or "ConnectionClosed".
- Commands: In the other direction, layers can emit commands to higher layers or the proxy server.
This is used to e.g. send data, request for new connections to be opened, or to call mitmproxy's
event hooks.
- Context: The context is the connection context each layer is provided with, which is always a client connection
and sometimes also a server connection.
"""
from .config import ProxyConfig
from .root_context import RootContext
from .server import ProxyServer, DummyServer
class DummyServer:
bound = False
def __init__(self, config=None):
self.config = config
self.address = "dummy"
def set_channel(self, channel):
pass
def serve_forever(self):
pass
def shutdown(self):
pass
__all__ = [
"ProxyServer", "DummyServer",
"DummyServer",
"ProxyConfig",
"RootContext"
]

View File

@ -10,10 +10,10 @@ import dataclasses
import re
from typing import Any, ClassVar, Dict, List, Literal, Type, Union, TYPE_CHECKING
from mitmproxy.proxy2.context import Connection, Server
from mitmproxy.proxy.context import Connection, Server
if TYPE_CHECKING:
import mitmproxy.proxy2.layer
import mitmproxy.proxy.layer
class Command:
@ -21,7 +21,7 @@ class Command:
Base class for all commands
"""
blocking: Union[bool, "mitmproxy.proxy2.layer.Layer"] = False
blocking: Union[bool, "mitmproxy.proxy.layer.Layer"] = False
"""
Determines if the command blocks until it has been completed.

View File

@ -10,7 +10,7 @@ from mitmproxy.net import server_spec
from mitmproxy.options import Options
if TYPE_CHECKING:
import mitmproxy.proxy2.layer
import mitmproxy.proxy.layer
class ConnectionState(Flag):
@ -306,7 +306,7 @@ class Context:
client: Client
server: Server
options: Options
layers: List["mitmproxy.proxy2.layer.Layer"]
layers: List["mitmproxy.proxy.layer.Layer"]
def __init__(
self,

View File

@ -7,8 +7,8 @@ import socket
import typing
from dataclasses import dataclass, is_dataclass
from mitmproxy.proxy2 import commands
from mitmproxy.proxy2.context import Connection
from mitmproxy.proxy import commands
from mitmproxy.proxy.context import Connection
class Event:

View File

@ -6,9 +6,9 @@ import textwrap
from abc import abstractmethod
from typing import Optional, List, ClassVar, Deque, NamedTuple, Generator, Any, TypeVar
from mitmproxy.proxy2 import commands, events
from mitmproxy.proxy2.commands import Command, Hook
from mitmproxy.proxy2.context import Connection, Context
from mitmproxy.proxy import commands, events
from mitmproxy.proxy.commands import Command, Hook
from mitmproxy.proxy.context import Connection, Context
T = TypeVar('T')
CommandGenerator = Generator[Command, Any, T]

View File

@ -7,11 +7,11 @@ from mitmproxy import flow, http
from mitmproxy.net import server_spec
from mitmproxy.net.http import url
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2 import commands, events, layer, tunnel
from mitmproxy.proxy2.context import Connection, ConnectionState, Context, Server
from mitmproxy.proxy2.layers import tls, websocket, tcp
from mitmproxy.proxy2.layers.http import _upstream_proxy
from mitmproxy.proxy2.utils import expect
from mitmproxy.proxy import commands, events, layer, tunnel
from mitmproxy.proxy.context import Connection, ConnectionState, Context, Server
from mitmproxy.proxy.layers import tls, websocket, tcp
from mitmproxy.proxy.layers.http import _upstream_proxy
from mitmproxy.proxy.utils import expect
from mitmproxy.utils import human
from ._base import HttpCommand, ReceiveHttp, StreamId, HttpConnection
from ._events import HttpEvent, RequestData, RequestEndOfMessage, RequestHeaders, RequestProtocolError, ResponseData, \

View File

@ -1,7 +1,7 @@
from dataclasses import dataclass
from mitmproxy.proxy2 import events, layer, commands
from mitmproxy.proxy2.context import Connection, Context
from mitmproxy.proxy import events, layer, commands
from mitmproxy.proxy.context import Connection, Context
StreamId = int

View File

@ -1,5 +1,5 @@
from mitmproxy import http
from mitmproxy.proxy2 import commands
from mitmproxy.proxy import commands
class HttpRequestHeadersHook(commands.Hook):

View File

@ -9,10 +9,10 @@ from mitmproxy import exceptions, http
from mitmproxy.net import http as net_http
from mitmproxy.net.http import http1, status_codes
from mitmproxy.net.http.http1 import read_sansio as http1_sansio
from mitmproxy.proxy2 import commands, events, layer
from mitmproxy.proxy2.context import Connection, ConnectionState, Context
from mitmproxy.proxy2.layers.http._base import ReceiveHttp, StreamId
from mitmproxy.proxy2.utils import expect
from mitmproxy.proxy import commands, events, layer
from mitmproxy.proxy.context import Connection, ConnectionState, Context
from mitmproxy.proxy.layers.http._base import ReceiveHttp, StreamId
from mitmproxy.proxy.utils import expect
from mitmproxy.utils import human
from ._base import HttpConnection
from ._events import HttpEvent, RequestData, RequestEndOfMessage, RequestHeaders, RequestProtocolError, ResponseData, \

View File

@ -6,7 +6,7 @@ from mitmproxy import http
from mitmproxy.net import server_spec
from mitmproxy.net.http import http1
from mitmproxy.net.http.http1 import read_sansio as http1_sansio
from mitmproxy.proxy2 import commands, context, layer, tunnel
from mitmproxy.proxy import commands, context, layer, tunnel
from mitmproxy.utils import human

View File

@ -2,9 +2,9 @@ from abc import ABCMeta
from mitmproxy import platform
from mitmproxy.net import server_spec
from mitmproxy.proxy2 import commands, events, layer
from mitmproxy.proxy2.layers import tls
from mitmproxy.proxy2.utils import expect
from mitmproxy.proxy import commands, events, layer
from mitmproxy.proxy.layers import tls
from mitmproxy.proxy.utils import expect
class HttpProxy(layer.Layer):

View File

@ -1,10 +1,10 @@
from typing import Optional
from mitmproxy import flow, tcp
from mitmproxy.proxy2 import commands, events, layer
from mitmproxy.proxy2.commands import Hook
from mitmproxy.proxy2.context import ConnectionState, Context, Connection
from mitmproxy.proxy2.utils import expect
from mitmproxy.proxy import commands, events, layer
from mitmproxy.proxy.commands import Hook
from mitmproxy.proxy.context import ConnectionState, Context, Connection
from mitmproxy.proxy.utils import expect
class TcpStartHook(Hook):

View File

@ -7,9 +7,9 @@ from OpenSSL import SSL
from mitmproxy import certs
from mitmproxy.net import tls as net_tls
from mitmproxy.proxy2 import commands, events, layer, tunnel
from mitmproxy.proxy2 import context
from mitmproxy.proxy2.commands import Hook
from mitmproxy.proxy import commands, events, layer, tunnel
from mitmproxy.proxy import context
from mitmproxy.proxy.commands import Hook
from mitmproxy.utils import human

View File

@ -5,10 +5,10 @@ import wsproto.extensions
import wsproto.frame_protocol
import wsproto.utilities
from mitmproxy import flow, websocket, http
from mitmproxy.proxy2 import commands, events, layer, context
from mitmproxy.proxy2.commands import Hook
from mitmproxy.proxy2.context import Context
from mitmproxy.proxy2.utils import expect
from mitmproxy.proxy import commands, events, layer, context
from mitmproxy.proxy.commands import Hook
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.utils import expect
from wsproto import ConnectionState
from wsproto.frame_protocol import CloseReason, Opcode

View File

@ -1,11 +0,0 @@
from .http_proxy import HttpProxy, HttpUpstreamProxy
from .reverse_proxy import ReverseProxy
from .socks_proxy import Socks5Proxy
from .transparent_proxy import TransparentProxy
__all__ = [
"HttpProxy", "HttpUpstreamProxy",
"ReverseProxy",
"Socks5Proxy",
"TransparentProxy"
]

View File

@ -1,26 +0,0 @@
from mitmproxy.proxy import protocol
class HttpProxy(protocol.Layer, protocol.ServerConnectionMixin):
def __call__(self):
layer = self.ctx.next_layer(self)
try:
layer()
finally:
if self.server_conn.connected():
self.disconnect()
class HttpUpstreamProxy(protocol.Layer, protocol.ServerConnectionMixin):
def __init__(self, ctx, server_address):
super().__init__(ctx, server_address=server_address)
def __call__(self):
layer = self.ctx.next_layer(self)
try:
layer()
finally:
if self.server_conn.connected():
self.disconnect()

View File

@ -1,16 +0,0 @@
from mitmproxy.proxy import protocol
class ReverseProxy(protocol.Layer, protocol.ServerConnectionMixin):
def __init__(self, ctx, server_address, server_tls):
super().__init__(ctx, server_address=server_address)
self.server_tls = server_tls
def __call__(self):
layer = self.ctx.next_layer(self)
try:
layer()
finally:
if self.server_conn.connected():
self.disconnect()

View File

@ -1,57 +0,0 @@
from mitmproxy import exceptions
from mitmproxy.proxy import protocol
from mitmproxy.net import socks
class Socks5Proxy(protocol.Layer, protocol.ServerConnectionMixin):
def __call__(self):
try:
# Parse Client Greeting
client_greet = socks.ClientGreeting.from_file(self.client_conn.rfile, fail_early=True)
client_greet.assert_socks5()
if socks.METHOD.NO_AUTHENTICATION_REQUIRED not in client_greet.methods:
raise socks.SocksError(
socks.METHOD.NO_ACCEPTABLE_METHODS,
"mitmproxy only supports SOCKS without authentication"
)
# Send Server Greeting
server_greet = socks.ServerGreeting(
socks.VERSION.SOCKS5,
socks.METHOD.NO_AUTHENTICATION_REQUIRED
)
server_greet.to_file(self.client_conn.wfile)
self.client_conn.wfile.flush()
# Parse Connect Request
connect_request = socks.Message.from_file(self.client_conn.rfile)
connect_request.assert_socks5()
if connect_request.msg != socks.CMD.CONNECT:
raise socks.SocksError(
socks.REP.COMMAND_NOT_SUPPORTED,
"mitmproxy only supports SOCKS5 CONNECT"
)
# We always connect lazily, but we need to pretend to the client that we connected.
connect_reply = socks.Message(
socks.VERSION.SOCKS5,
socks.REP.SUCCEEDED,
connect_request.atyp,
# dummy value, we don't have an upstream connection yet.
connect_request.addr
)
connect_reply.to_file(self.client_conn.wfile)
self.client_conn.wfile.flush()
except (socks.SocksError, exceptions.TcpException) as e:
raise exceptions.Socks5ProtocolException("SOCKS5 mode failure: %s" % repr(e))
self.server_conn.address = connect_request.addr
layer = self.ctx.next_layer(self)
try:
layer()
finally:
if self.server_conn.connected():
self.disconnect()

View File

@ -1,22 +0,0 @@
from mitmproxy import exceptions
from mitmproxy import platform
from mitmproxy.proxy import protocol
class TransparentProxy(protocol.Layer, protocol.ServerConnectionMixin):
def __init__(self, ctx):
super().__init__(ctx)
def __call__(self):
try:
self.set_server(platform.original_addr(self.client_conn.connection))
except Exception as e:
raise exceptions.ProtocolException("Transparent mode failure: %s" % repr(e))
layer = self.ctx.next_layer(self)
try:
layer()
finally:
if self.server_conn.connected():
self.disconnect()

View File

@ -1,50 +0,0 @@
"""
In mitmproxy, protocols are implemented as a set of layers, which are composed
on top each other. The first layer is usually the proxy mode, e.g. transparent
proxy or normal HTTP proxy. Next, various protocol layers are stacked on top of
each other - imagine WebSocket on top of an HTTP Upgrade request. An actual
mitmproxy connection may look as follows (outermost layer first):
Transparent HTTP proxy, no TLS:
- TransparentProxy
- Http1Layer
- HttpLayer
Regular proxy, CONNECT request with WebSocket over SSL:
- ReverseProxy
- Http1Layer
- HttpLayer
- TLSLayer
- WebSocketLayer (or TCPLayer)
Every layer acts as a read-only context for its inner layers (see
:py:class:`Layer`). To communicate with an outer layer, a layer can use
functions provided in the context. The next layer is always determined by a
call to :py:meth:`.next_layer() <mitmproxy.proxy.RootContext.next_layer>`,
which is provided by the root context.
Another subtle design goal of this architecture is that upstream connections
should be established as late as possible; this makes server replay without any
outgoing connections possible.
"""
from .base import Layer, ServerConnectionMixin
from .http import UpstreamConnectLayer
from .http import HttpLayer
from .http1 import Http1Layer
from .http2 import Http2Layer
from .websocket import WebSocketLayer
from .rawtcp import RawTCPLayer
from .tls import TlsLayer
__all__ = [
"Layer", "ServerConnectionMixin",
"TlsLayer",
"UpstreamConnectLayer",
"HttpLayer",
"Http1Layer",
"Http2Layer",
"WebSocketLayer",
"RawTCPLayer",
]

View File

@ -1,177 +0,0 @@
from mitmproxy import exceptions
from mitmproxy import connections
from mitmproxy import controller # noqa
from mitmproxy.proxy import config # noqa
class _LayerCodeCompletion:
"""
Dummy class that provides type hinting in PyCharm, which simplifies development a lot.
"""
def __init__(self, **mixin_args): # pragma: no cover
super().__init__(**mixin_args)
if True:
return
self.config: config.ProxyConfig = None
self.client_conn: connections.ClientConnection = None
self.server_conn: connections.ServerConnection = None
self.channel: controller.Channel = None
self.ctx = None
"""@type: mitmproxy.proxy.protocol.Layer"""
class Layer(_LayerCodeCompletion):
"""
Base class for all layers. All other protocol layers should inherit from this class.
"""
def __init__(self, ctx, **mixin_args):
"""
Each layer usually passes itself to its child layers as a context. Properties of the
context are transparently mapped to the layer, so that the following works:
.. code-block:: python
root_layer = Layer(None)
root_layer.client_conn = 42
sub_layer = Layer(root_layer)
print(sub_layer.client_conn) # 42
The root layer is passed a :py:class:`mitmproxy.proxy.RootContext` object,
which provides access to :py:attr:`.client_conn <mitmproxy.proxy.RootContext.client_conn>`,
:py:attr:`.next_layer <mitmproxy.proxy.RootContext.next_layer>` and other basic attributes.
Args:
ctx: The (read-only) parent layer / context.
"""
self.ctx = ctx
"""
The parent layer.
:type: :py:class:`Layer`
"""
super().__init__(**mixin_args)
def __call__(self):
"""Logic of the layer.
Returns:
Once the protocol has finished without exceptions.
Raises:
~mitmproxy.exceptions.ProtocolException: if an exception occurs. No other exceptions must be raised.
"""
raise NotImplementedError()
def __getattr__(self, name):
"""
Attributes not present on the current layer are looked up on the context.
"""
return getattr(self.ctx, name)
class ServerConnectionMixin:
"""
Mixin that provides a layer with the capabilities to manage a server connection.
The server address can be passed in the constructor or set by calling :py:meth:`set_server`.
Subclasses are responsible for calling :py:meth:`disconnect` before returning.
Recommended Usage:
.. code-block:: python
class MyLayer(Layer, ServerConnectionMixin):
def __call__(self):
try:
# Do something.
finally:
if self.server_conn.connected():
self.disconnect()
"""
def __init__(self, server_address=None):
super().__init__()
self.server_conn = self.__make_server_conn(server_address)
self.__check_self_connect()
def __check_self_connect(self):
"""
We try to protect the proxy from _accidentally_ connecting to itself,
e.g. because of a failed transparent lookup or an invalid configuration.
"""
address = self.server_conn.address
if address:
forbidden_hosts = ["localhost", "127.0.0.1", "::1"]
if self.config.options.listen_host:
forbidden_hosts.append(self.config.options.listen_host)
self_connect = (
address[1] == self.config.options.listen_port and
address[0] in forbidden_hosts
)
if self_connect:
raise exceptions.ProtocolException(
"Invalid server address: {}\r\n"
"The proxy shall not connect to itself.".format(repr(address))
)
def __make_server_conn(self, server_address):
if self.config.options.spoof_source_address and self.config.options.upstream_bind_address == '':
return connections.ServerConnection(
server_address, (self.ctx.client_conn.address[0], 0), True)
else:
return connections.ServerConnection(
server_address, (self.config.options.upstream_bind_address, 0),
self.config.options.spoof_source_address
)
def set_server(self, address):
"""
Sets a new server address. If there is an existing connection, it will be closed.
"""
if self.server_conn.connected():
self.disconnect()
self.log("Set new server address: {}:{}".format(address[0], address[1]), "debug")
self.server_conn.address = address
self.__check_self_connect()
def disconnect(self):
"""
Deletes (and closes) an existing server connection.
Must not be called if there is no existing connection.
"""
self.log("serverdisconnect", "debug", [repr(self.server_conn.address)])
address = self.server_conn.address
self.server_conn.finish()
self.server_conn.close()
self.channel.tell("serverdisconnect", self.server_conn)
self.server_conn = self.__make_server_conn(address)
def connect(self):
"""
Establishes a server connection.
Must not be called if there is an existing connection.
Raises:
~mitmproxy.exceptions.ProtocolException: if the connection could not be established.
"""
if not self.server_conn.address:
raise exceptions.ProtocolException("Cannot connect to server, no server address given.")
try:
self.server_conn.connect()
self.log("serverconnect", "debug", [repr(self.server_conn.address)])
self.channel.ask("serverconnect", self.server_conn)
except exceptions.TcpException as e:
raise exceptions.ProtocolException(
"Server connection to {} failed: {}".format(
repr(self.server_conn.address), str(e)
)
)

View File

@ -1,533 +1,7 @@
import textwrap
import h2.exceptions
import time
import enum
from mitmproxy import connections # noqa
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy import flow
from mitmproxy.net.http import url
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol.websocket import WebSocketLayer
from mitmproxy.net import websocket
class _HttpTransmissionLayer(base.Layer):
def read_request_headers(self, flow):
raise NotImplementedError()
def read_request_body(self, request):
raise NotImplementedError()
def read_request_trailers(self, request):
raise NotImplementedError()
def send_request(self, request):
raise NotImplementedError()
def read_response_headers(self):
raise NotImplementedError()
def read_response_body(self, request, response):
raise NotImplementedError()
yield "this is a generator" # pragma: no cover
def read_response_trailers(self, request, response):
raise NotImplementedError()
def read_response(self, request):
response = self.read_response_headers()
response.data.content = b"".join(
self.read_response_body(request, response)
)
response.data.trailers = self.read_response_trailers(request, response)
return response
def send_response(self, response):
if response.data.content is None:
raise exceptions.HttpException("Cannot assemble flow with missing content")
self.send_response_headers(response)
self.send_response_body(response, [response.data.content])
self.send_response_trailers(response)
def send_response_headers(self, response):
raise NotImplementedError()
def send_response_body(self, response, chunks):
raise NotImplementedError()
def send_response_trailers(self, response, chunks):
raise NotImplementedError()
def check_close_connection(self, f):
raise NotImplementedError()
class ConnectServerConnection:
"""
"Fake" ServerConnection to represent state after a CONNECT request to an upstream proxy.
"""
def __init__(self, address, ctx):
self.address = address
self._ctx = ctx
@property
def via(self):
return self._ctx.server_conn
def __getattr__(self, item):
return getattr(self.via, item)
def connected(self):
return self.via.connected()
class UpstreamConnectLayer(base.Layer):
def __init__(self, ctx, connect_request):
super().__init__(ctx)
self.connect_request = connect_request
self.server_conn = ConnectServerConnection(
(connect_request.host, connect_request.port),
self.ctx
)
def __call__(self):
layer = self.ctx.next_layer(self)
layer()
def _send_connect_request(self):
self.log("Sending CONNECT request", "debug", [
f"Proxy Server: {self.ctx.server_conn.address}",
f"Connect to: {self.connect_request.host}:{self.connect_request.port}"
])
self.send_request(self.connect_request)
resp = self.read_response(self.connect_request)
if resp.status_code != 200:
raise exceptions.ProtocolException("Reconnect: Upstream server refuses CONNECT request")
def connect(self):
if not self.server_conn.connected():
self.ctx.connect()
self._send_connect_request()
else:
pass # swallow the message
def change_upstream_proxy_server(self, address):
self.log("Changing upstream proxy to {} (CONNECTed)".format(repr(address)), "debug")
if address != self.server_conn.via.address:
self.ctx.set_server(address)
def set_server(self, address):
if self.ctx.server_conn.connected():
self.ctx.disconnect()
self.connect_request.host = address[0]
self.connect_request.port = address[1]
self.server_conn.address = address
def is_ok(status):
return 200 <= status < 300
class HTTPMode(enum.Enum):
regular = 1
transparent = 2
upstream = 3
# At this point, we see only a subset of the proxy modes
MODE_REQUEST_FORMS = {
HTTPMode.regular: ("authority", "absolute"),
HTTPMode.transparent: ("relative",),
HTTPMode.upstream: ("authority", "absolute"),
}
def validate_request_form(mode, request):
if request.first_line_format == "absolute" and request.scheme not in ("http", "https"):
raise exceptions.HttpException(
"Invalid request scheme: %s" % request.scheme
)
allowed_request_forms = MODE_REQUEST_FORMS[mode]
if request.first_line_format not in allowed_request_forms:
if request.is_http2 and mode is HTTPMode.transparent and request.first_line_format == "absolute":
return # dirty hack: h2 may have authority info. will be fixed properly with sans-io.
if mode == HTTPMode.transparent:
err_message = textwrap.dedent(
"""
Mitmproxy received an {} request even though it is not running
in regular mode. This usually indicates a misconfiguration,
please see the mitmproxy mode documentation for details.
"""
).strip().format("HTTP CONNECT" if request.first_line_format == "authority" else "absolute-form")
else:
err_message = "Invalid HTTP request form (expected: {}, got: {})".format(
" or ".join(allowed_request_forms), request.first_line_format
)
raise exceptions.HttpException(err_message)
class HttpLayer(base.Layer):
if False:
# mypy type hints
server_conn: connections.ServerConnection = None
def __init__(self, ctx, mode):
super().__init__(ctx)
self.mode = mode
self.__initial_server_address: tuple = None
"Contains the original destination in transparent mode, which needs to be restored"
"if an inline script modified the target server for a single http request"
# We cannot rely on server_conn.tls_established,
# see https://github.com/mitmproxy/mitmproxy/issues/925
self.__initial_server_tls = None
# Requests happening after CONNECT do not need Proxy-Authorization headers.
self.connect_request = False
def __call__(self):
if self.mode == HTTPMode.transparent:
self.__initial_server_tls = self.server_tls
self.__initial_server_address = self.server_conn.address
while True:
flow = http.HTTPFlow(
self.client_conn,
self.server_conn,
live=self,
mode=self.mode.name
)
if not self._process_flow(flow):
return
def handle_regular_connect(self, f):
self.connect_request = True
try:
self.set_server((f.request.host, f.request.port))
if f.response:
resp = f.response
else:
resp = http.make_connect_response(f.request.data.http_version)
self.send_response(resp)
if is_ok(resp.status_code):
layer = self.ctx.next_layer(self)
layer()
except (
exceptions.ProtocolException, exceptions.NetlibException
) as e:
# HTTPS tasting means that ordinary errors like resolution
# and connection errors can happen here.
self.send_error_response(502, repr(e))
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
return False
def handle_upstream_connect(self, f):
# if the user specifies a response in the http_connect hook, we do not connect upstream here.
# https://github.com/mitmproxy/mitmproxy/pull/2473
if not f.response:
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
self.send_request(f.request)
f.response = self.read_response_headers()
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
self.send_response(f.response)
if is_ok(f.response.status_code):
layer = UpstreamConnectLayer(self, f.request)
return layer()
return False
def _process_flow(self, f):
try:
try:
request: http.HTTPRequest = self.read_request_headers(f)
except exceptions.HttpReadDisconnect:
# don't throw an error for disconnects that happen
# before/between requests.
return False
f.request = request
if request.first_line_format == "authority":
# The standards are silent on what we should do with a CONNECT
# request body, so although it's not common, it's allowed.
f.request.data.content = b"".join(
self.read_request_body(f.request)
)
f.request.data.trailers = self.read_request_trailers(f.request)
f.request.timestamp_end = time.time()
self.channel.ask("http_connect", f)
if self.mode is HTTPMode.regular:
return self.handle_regular_connect(f)
elif self.mode is HTTPMode.upstream:
return self.handle_upstream_connect(f)
else:
msg = "Unexpected CONNECT request."
self.send_error_response(400, msg)
return False
if not self.config.options.relax_http_form_validation:
validate_request_form(self.mode, request)
self.channel.ask("requestheaders", f)
# Re-validate request form in case the user has changed something.
if not self.config.options.relax_http_form_validation:
validate_request_form(self.mode, request)
if request.headers.get("expect", "").lower() == "100-continue":
# TODO: We may have to use send_response_headers for HTTP2
# here.
self.send_response(http.make_expect_continue_response())
request.headers.pop("expect")
if f.request.stream:
f.request.data.content = None
else:
f.request.data.content = b"".join(self.read_request_body(request))
f.request.data.trailers = self.read_request_trailers(f.request)
request.timestamp_end = time.time()
except exceptions.HttpException as e:
# We optimistically guess there might be an HTTP client on the
# other end
self.send_error_response(400, repr(e))
# Request may be malformed at this point, so we unset it.
f.request = None
f.error = flow.Error(str(e))
self.channel.ask("error", f)
self.log(
"request",
"warn",
[f"HTTP protocol error in client request: {e}"]
)
return False
self.log("request", "debug", [repr(request)])
# set first line format to relative in regular mode,
# see https://github.com/mitmproxy/mitmproxy/issues/1759
if self.mode is HTTPMode.regular and request.first_line_format == "absolute":
request.authority = ""
# update host header in reverse proxy mode
if self.config.options.mode.startswith("reverse:") and not self.config.options.keep_host_header:
f.request.host_header = url.hostport(
self.config.upstream_server.scheme,
*self.config.upstream_server.address
)
# Determine .scheme, .host and .port attributes for inline scripts. For
# absolute-form requests, they are directly given in the request. For
# authority-form requests, we only need to determine the request
# scheme. For relative-form requests, we need to determine host and
# port as well.
if self.mode is HTTPMode.transparent:
# Setting request.host also updates the host header, which we want
# to preserve
f.request.data.host = self.__initial_server_address[0]
f.request.data.port = self.__initial_server_address[1]
f.request.data.scheme = b"https" if self.__initial_server_tls else b"http"
self.channel.ask("request", f)
try:
valid = (
websocket.check_handshake(request.headers) and
websocket.check_client_version(request.headers)
)
if valid:
f.metadata['websocket'] = True
# We only support RFC6455 with WebSocket version 13
# allow inline scripts to manipulate the client handshake
self.channel.ask("websocket_handshake", f)
if not f.response:
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
def get_response():
self.send_request_headers(f.request)
if f.request.stream:
chunks = self.read_request_body(f.request)
if callable(f.request.stream):
chunks = f.request.stream(chunks)
self.send_request_body(f.request, chunks)
else:
self.send_request_body(f.request, [f.request.data.content])
self.send_request_trailers(f.request)
f.response = self.read_response_headers()
try:
get_response()
except exceptions.NetlibException as e:
self.log(
"server communication error: %s" % repr(e),
level="debug"
)
# In any case, we try to reconnect at least once. This is
# necessary because it might be possible that we already
# initiated an upstream connection after clientconnect that
# has already been expired, e.g consider the following event
# log:
# > clientconnect (transparent mode destination known)
# > serverconnect (required for client tls handshake)
# > read n% of large request
# > server detects timeout, disconnects
# > read (100-n)% of large request
# > send large request upstream
if isinstance(e, exceptions.Http2ProtocolException):
# do not try to reconnect for HTTP2
raise exceptions.ProtocolException(
"First and only attempt to get response via HTTP2 failed."
)
elif f.request.stream:
# We may have already consumed some request chunks already,
# so all we can do is signal downstream that upstream closed the connection.
self.send_error_response(408, "Request Timeout")
f.error = flow.Error(repr(e))
self.channel.ask("error", f)
return False
self.disconnect()
self.connect()
get_response()
# call the appropriate script hook - this is an opportunity for
# an inline script to set f.stream = True
self.channel.ask("responseheaders", f)
if f.response.stream:
f.response.data.content = None
else:
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
f.response.timestamp_end = time.time()
# no further manipulation of self.server_conn beyond this point
# we can safely set it as the final attribute value here.
f.server_conn = self.server_conn
else:
# response was set by an inline script.
# we now need to emulate the responseheaders hook.
self.channel.ask("responseheaders", f)
f.response.data.trailers = self.read_response_trailers(f.request, f.response)
self.log("response", "debug", [repr(f.response)])
self.channel.ask("response", f)
if not f.response.stream:
# no streaming:
# we already received the full response from the server and can
# send it to the client straight away.
self.send_response(f.response)
else:
# streaming:
# First send the headers and then transfer the response incrementally
self.send_response_headers(f.response)
chunks = self.read_response_body(
f.request,
f.response
)
if callable(f.response.stream):
chunks = f.response.stream(chunks)
self.send_response_body(f.response, chunks)
f.response.timestamp_end = time.time()
if self.check_close_connection(f):
return False
# Handle 101 Switching Protocols
if f.response.status_code == 101:
# Handle a successful HTTP 101 Switching Protocols Response,
# received after e.g. a WebSocket upgrade request.
# Check for WebSocket handshake
is_websocket = (
websocket.check_handshake(f.request.headers) and
websocket.check_handshake(f.response.headers)
)
if is_websocket and not self.config.options.websocket:
self.log(
"Client requested WebSocket connection, but the protocol is disabled.",
"info"
)
if is_websocket and self.config.options.websocket:
layer = WebSocketLayer(self, f)
else:
layer = self.ctx.next_layer(self)
layer()
return False # should never be reached
except (exceptions.ProtocolException, exceptions.NetlibException) as e:
if not f.response:
self.send_error_response(502, repr(e))
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
else:
raise exceptions.ProtocolException(
"Error in HTTP connection: %s" % repr(e)
)
finally:
if f:
f.live = False
return True
def send_error_response(self, code, message, headers=None) -> None:
try:
response = http.make_error_response(code, message, headers)
self.send_response(response)
except (exceptions.NetlibException, h2.exceptions.H2Error, exceptions.Http2ProtocolException):
self.log(f"Failed to send error response to client: {message}", "debug")
def change_upstream_proxy_server(self, address):
# Make set_upstream_proxy_server always available,
# even if there's no UpstreamConnectLayer
if hasattr(self.ctx, "change_upstream_proxy_server"):
self.ctx.change_upstream_proxy_server(address)
elif address != self.server_conn.address:
self.log("Changing upstream proxy to {} (not CONNECTed)".format(repr(address)), "debug")
self.set_server(address)
def establish_server_connection(self, host: str, port: int, scheme: str):
tls = (scheme == "https")
if self.mode is HTTPMode.regular or self.mode is HTTPMode.transparent:
# If there's an existing connection that doesn't match our expectations, kill it.
address = (host, port)
if address != self.server_conn.address or tls != self.server_tls:
self.set_server(address)
self.set_server_tls(tls, address[0])
# Establish connection is necessary.
if not self.server_conn.connected():
self.connect()
else:
if not self.server_conn.connected():
self.connect()
if tls:
raise exceptions.HttpProtocolException("Cannot change scheme in upstream proxy mode.")

View File

@ -1,100 +0,0 @@
from mitmproxy.net.http import http1
from mitmproxy.proxy.protocol import http as httpbase
from mitmproxy.utils import human
class Http1Layer(httpbase._HttpTransmissionLayer):
def __init__(self, ctx, mode):
super().__init__(ctx)
self.mode = mode
def read_request_headers(self, flow):
return http1.read_request_head(self.client_conn.rfile)
def read_request_body(self, request):
expected_size = http1.expected_http_body_size(request)
return http1.read_body(
self.client_conn.rfile,
expected_size,
human.parse_size(self.config.options.body_size_limit)
)
def read_request_trailers(self, request):
if "Trailer" in request.headers:
# TODO: not implemented yet
self.log("HTTP/1.1 request trailer headers are not implemented yet!", "warn")
return None
def send_request_headers(self, request):
headers = http1.assemble_request_head(request)
self.server_conn.wfile.write(headers)
self.server_conn.wfile.flush()
def send_request_body(self, request, chunks):
for chunk in http1.assemble_body(request.headers, chunks, request.trailers):
self.server_conn.wfile.write(chunk)
self.server_conn.wfile.flush()
def send_request_trailers(self, request):
# HTTP/1.1 request trailer headers are sent in the body
pass
def send_request(self, request):
self.server_conn.wfile.write(http1.assemble_request(request))
self.server_conn.wfile.flush()
def read_response_headers(self):
return http1.read_response_head(self.server_conn.rfile)
def read_response_body(self, request, response):
expected_size = http1.expected_http_body_size(request, response)
return http1.read_body(
self.server_conn.rfile,
expected_size,
human.parse_size(self.config.options.body_size_limit)
)
def read_response_trailers(self, request, response):
# Trailers should actually be parsed unconditionally, the "Trailer" header is optional
if "Trailer" in response.headers:
# TODO: not implemented yet
self.log("HTTP/1.1 trailer headers are not implemented yet!", "warn")
return None
def send_response_headers(self, response):
raw = http1.assemble_response_head(response)
self.client_conn.wfile.write(raw)
self.client_conn.wfile.flush()
def send_response_body(self, response, chunks):
for chunk in http1.assemble_body(response.headers, chunks, response.trailers):
self.client_conn.wfile.write(chunk)
self.client_conn.wfile.flush()
def send_response_trailers(self, response):
# HTTP/1.1 response trailer headers are sent in the body
pass
def check_close_connection(self, flow):
request_close = http1.connection_close(
flow.request.http_version,
flow.request.headers
)
response_close = http1.connection_close(
flow.response.http_version,
flow.response.headers
)
read_until_eof = http1.expected_http_body_size(flow.request, flow.response) == -1
close_connection = request_close or response_close or read_until_eof
if flow.request.first_line_format == "authority" and flow.response.status_code == 200:
# Workaround for https://github.com/mitmproxy/mitmproxy/issues/313:
# Charles Proxy sends a CONNECT response with HTTP/1.0
# and no Content-Length header
return False
return close_connection
def __call__(self):
layer = httpbase.HttpLayer(self, self.mode)
layer()

View File

@ -1,733 +0,0 @@
import threading
import time
import functools
import types
from typing import Dict, Callable, Any, List, Optional # noqa
import h2.exceptions
from h2 import connection
from h2 import events
import queue
from mitmproxy import connections, flow # noqa
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol import http as httpbase
import mitmproxy.net.http
from mitmproxy.net import tcp
from mitmproxy.coretypes import basethread
from mitmproxy.net.http import http2, headers, url
from mitmproxy.utils import human
class SafeH2Connection(connection.H2Connection):
def __init__(self, conn, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conn = conn
self.lock = threading.RLock()
def safe_acknowledge_received_data(self, acknowledged_size: int, stream_id: int):
if acknowledged_size == 0:
return
with self.lock:
self.acknowledge_received_data(acknowledged_size, stream_id)
self.conn.send(self.data_to_send())
def safe_reset_stream(self, stream_id: int, error_code: int):
with self.lock:
try:
self.reset_stream(stream_id, error_code)
except h2.exceptions.StreamClosedError: # pragma: no cover
# stream is already closed - good
pass
self.conn.send(self.data_to_send())
def safe_update_settings(self, new_settings: Dict[int, Any]):
with self.lock:
self.update_settings(new_settings)
self.conn.send(self.data_to_send())
def safe_send_headers(self, raise_zombie: Callable, stream_id: int, headers: headers.Headers, **kwargs):
with self.lock:
raise_zombie()
self.send_headers(stream_id, headers.fields, **kwargs)
self.conn.send(self.data_to_send())
def safe_send_body(self, raise_zombie: Callable, stream_id: int, chunks: List[bytes], end_stream=True):
for chunk in chunks:
position = 0
while position < len(chunk):
self.lock.acquire()
raise_zombie(self.lock.release)
max_outbound_frame_size = self.max_outbound_frame_size
frame_chunk = chunk[position:position + max_outbound_frame_size]
if self.local_flow_control_window(stream_id) < len(frame_chunk): # pragma: no cover
self.lock.release()
time.sleep(0.1)
continue
self.send_data(stream_id, frame_chunk)
try:
self.conn.send(self.data_to_send())
except Exception as e: # pragma: no cover
raise e
finally:
self.lock.release()
position += max_outbound_frame_size
if end_stream:
with self.lock:
raise_zombie()
self.end_stream(stream_id)
self.conn.send(self.data_to_send())
class Http2Layer(base.Layer):
if False:
# mypy type hints
client_conn: connections.ClientConnection = None
class H2ConnLogger:
def __init__(self, name, log):
self.name = name
self.log = log
def debug(self, fmtstr, *args):
msg = "H2Conn {}: {}".format(self.name, fmtstr % args)
self.log(msg, "debug")
def trace(self, fmtstr, *args):
pass
def __init__(self, ctx, mode: str) -> None:
super().__init__(ctx)
self.mode = mode
self.streams: Dict[int, Http2SingleStreamLayer] = dict()
self.server_to_client_stream_ids: Dict[int, int] = dict([(0, 0)])
self.connections: Dict[object, SafeH2Connection] = {}
config = h2.config.H2Configuration(
client_side=False,
header_encoding=False,
validate_outbound_headers=False,
validate_inbound_headers=False,
logger=self.H2ConnLogger("client", self.log))
self.connections[self.client_conn] = SafeH2Connection(self.client_conn, config=config)
def _initiate_server_conn(self):
if self.server_conn.connected():
config = h2.config.H2Configuration(
client_side=True,
header_encoding=False,
validate_outbound_headers=False,
validate_inbound_headers=False,
logger=self.H2ConnLogger("server", self.log))
self.connections[self.server_conn] = SafeH2Connection(self.server_conn, config=config)
self.connections[self.server_conn].initiate_connection()
self.server_conn.send(self.connections[self.server_conn].data_to_send())
def _complete_handshake(self):
preamble = self.client_conn.rfile.read(24)
self.connections[self.client_conn].initiate_connection()
self.connections[self.client_conn].receive_data(preamble)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
def next_layer(self): # pragma: no cover
# WebSocket over HTTP/2?
# CONNECT for proxying?
raise NotImplementedError()
def _handle_event(self, event, source_conn, other_conn, is_server):
self.log(
"HTTP2 Event from {}".format("server" if is_server else "client"),
"debug",
[repr(event)]
)
eid = None
if hasattr(event, 'stream_id'):
if is_server and event.stream_id % 2 == 1:
eid = self.server_to_client_stream_ids[event.stream_id]
else:
eid = event.stream_id
if isinstance(event, events.RequestReceived):
return self._handle_request_received(eid, event)
elif isinstance(event, events.ResponseReceived):
return self._handle_response_received(eid, event)
elif isinstance(event, events.DataReceived):
return self._handle_data_received(eid, event, source_conn)
elif isinstance(event, events.StreamEnded):
return self._handle_stream_ended(eid)
elif isinstance(event, events.StreamReset):
return self._handle_stream_reset(eid, event, is_server, other_conn)
elif isinstance(event, events.RemoteSettingsChanged):
return self._handle_remote_settings_changed(event, other_conn)
elif isinstance(event, events.ConnectionTerminated):
return self._handle_connection_terminated(event, is_server)
elif isinstance(event, events.PushedStreamReceived):
return self._handle_pushed_stream_received(event)
elif isinstance(event, events.PriorityUpdated):
return self._handle_priority_updated(eid, event)
elif isinstance(event, events.TrailersReceived):
return self._handle_trailers(eid, event, is_server, other_conn)
# fail-safe for unhandled events
return True
def _handle_request_received(self, eid, event):
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid] = Http2SingleStreamLayer(self, self.connections[self.client_conn], eid, headers)
self.streams[eid].timestamp_start = time.time()
if event.priority_updated is not None:
self.streams[eid].priority_exclusive = event.priority_updated.exclusive
self.streams[eid].priority_depends_on = event.priority_updated.depends_on
self.streams[eid].priority_weight = event.priority_updated.weight
self.streams[eid].handled_priority_event = event.priority_updated
self.streams[eid].start()
self.streams[eid].request_message.arrived.set()
return True
def _handle_response_received(self, eid, event):
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid].queued_data_length = 0
self.streams[eid].timestamp_start = time.time()
self.streams[eid].response_message.headers = headers
self.streams[eid].response_message.arrived.set()
return True
def _handle_data_received(self, eid, event, source_conn):
bsl = human.parse_size(self.config.options.body_size_limit)
if bsl and self.streams[eid].queued_data_length > bsl:
self.streams[eid].kill()
self.connections[source_conn].safe_reset_stream(
event.stream_id,
h2.errors.ErrorCodes.REFUSED_STREAM
)
self.log(f"HTTP body too large. Limit is {bsl}.", "info")
else:
self.streams[eid].data_queue.put(event.data)
self.streams[eid].queued_data_length += len(event.data)
# always acknowledge receved data with a WINDOW_UPDATE frame
self.connections[source_conn].safe_acknowledge_received_data(
event.flow_controlled_length,
event.stream_id
)
return True
def _handle_stream_ended(self, eid):
self.streams[eid].timestamp_end = time.time()
self.streams[eid].stream_ended.set()
return True
def _handle_stream_reset(self, eid, event, is_server, other_conn):
if eid in self.streams:
self.streams[eid].kill()
if is_server:
other_stream_id = self.streams[eid].client_stream_id
else:
other_stream_id = self.streams[eid].server_stream_id
if other_stream_id is not None:
self.connections[other_conn].safe_reset_stream(other_stream_id, event.error_code)
return True
def _handle_trailers(self, eid, event, is_server, other_conn):
trailers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid].trailers = trailers
return True
def _handle_remote_settings_changed(self, event, other_conn):
new_settings = {key: cs.new_value for (key, cs) in event.changed_settings.items()}
self.connections[other_conn].safe_update_settings(new_settings)
return True
def _handle_connection_terminated(self, event, is_server):
self.log("HTTP/2 connection terminated by {}: error code: {}, last stream id: {}, additional data: {}".format(
"server" if is_server else "client",
event.error_code,
event.last_stream_id,
event.additional_data), "info")
if event.error_code != h2.errors.ErrorCodes.NO_ERROR:
# Something terrible has happened - kill everything!
self.connections[self.client_conn].close_connection(
error_code=event.error_code,
last_stream_id=event.last_stream_id,
additional_data=event.additional_data
)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
self._kill_all_streams()
else:
"""
Do not immediately terminate the other connection.
Some streams might be still sending data to the client.
"""
return False
def _handle_pushed_stream_received(self, event):
# pushed stream ids should be unique and not dependent on race conditions
# only the parent stream id must be looked up first
parent_eid = self.server_to_client_stream_ids[event.parent_stream_id]
with self.connections[self.client_conn].lock:
self.connections[self.client_conn].push_stream(parent_eid, event.pushed_stream_id, event.headers)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
layer = Http2SingleStreamLayer(self, self.connections[self.client_conn], event.pushed_stream_id, headers)
self.streams[event.pushed_stream_id] = layer
self.streams[event.pushed_stream_id].timestamp_start = time.time()
self.streams[event.pushed_stream_id].pushed = True
self.streams[event.pushed_stream_id].parent_stream_id = parent_eid
self.streams[event.pushed_stream_id].timestamp_end = time.time()
self.streams[event.pushed_stream_id].request_message.arrived.set()
self.streams[event.pushed_stream_id].request_message.stream_ended.set()
self.streams[event.pushed_stream_id].start()
return True
def _handle_priority_updated(self, eid, event):
if not self.config.options.http2_priority:
self.log("HTTP/2 PRIORITY frame suppressed. Use --http2-priority to enable forwarding.", "debug")
return True
if eid in self.streams and self.streams[eid].handled_priority_event is event:
# this event was already handled during stream creation
# HeadersFrame + Priority information as RequestReceived
return True
with self.connections[self.server_conn].lock:
mapped_stream_id = event.stream_id
if mapped_stream_id in self.streams and self.streams[mapped_stream_id].server_stream_id:
# if the stream is already up and running and was sent to the server,
# use the mapped server stream id to update priority information
mapped_stream_id = self.streams[mapped_stream_id].server_stream_id
if eid in self.streams:
self.streams[eid].priority_exclusive = event.exclusive
self.streams[eid].priority_depends_on = event.depends_on
self.streams[eid].priority_weight = event.weight
self.connections[self.server_conn].prioritize(
mapped_stream_id,
weight=event.weight,
depends_on=self._map_depends_on_stream_id(mapped_stream_id, event.depends_on),
exclusive=event.exclusive
)
self.server_conn.send(self.connections[self.server_conn].data_to_send())
return True
def _map_depends_on_stream_id(self, stream_id, depends_on):
mapped_depends_on = depends_on
if mapped_depends_on in self.streams and self.streams[mapped_depends_on].server_stream_id:
# if the depends-on-stream is already up and running and was sent to the server
# use the mapped server stream id to update priority information
mapped_depends_on = self.streams[mapped_depends_on].server_stream_id
if stream_id == mapped_depends_on:
# looks like one of the streams wasn't opened yet
# prevent self-dependent streams which result in ProtocolError
mapped_depends_on += 2
return mapped_depends_on
def _cleanup_streams(self):
death_time = time.time() - 10
zombie_streams = [(stream_id, stream) for stream_id, stream in list(self.streams.items()) if stream.zombie]
outdated_streams = [stream_id for stream_id, stream in zombie_streams if stream.zombie <= death_time]
for stream_id in outdated_streams: # pragma: no cover
self.streams.pop(stream_id, None)
def _kill_all_streams(self):
for stream in self.streams.values():
stream.kill()
def __call__(self):
self._initiate_server_conn()
self._complete_handshake()
conns = [c.connection for c in self.connections.keys()]
try:
while True:
r = tcp.ssl_read_select(conns, 0.1)
for conn in r:
source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn
other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn
is_server = (source_conn == self.server_conn)
with self.connections[source_conn].lock:
try:
_, consumed_bytes = http2.read_frame(source_conn.rfile)
except:
# read frame failed: connection closed
self._kill_all_streams()
return
if self.connections[source_conn].state_machine.state == h2.connection.ConnectionState.CLOSED:
self.log("HTTP/2 connection entered closed state already", "debug")
return
incoming_events = self.connections[source_conn].receive_data(consumed_bytes)
source_conn.send(self.connections[source_conn].data_to_send())
for event in incoming_events:
if not self._handle_event(event, source_conn, other_conn, is_server):
# connection terminated: GoAway
self._kill_all_streams()
return
self._cleanup_streams()
except Exception as e: # pragma: no cover
self.log(repr(e), "info")
self._kill_all_streams()
def detect_zombie_stream(func): # pragma: no cover
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.raise_zombie()
result = func(self, *args, **kwargs)
self.raise_zombie()
return result
return wrapper
class Http2SingleStreamLayer(httpbase._HttpTransmissionLayer, basethread.BaseThread):
class Message:
def __init__(self, headers=None):
self.headers: Optional[mitmproxy.net.http.Headers] = headers # headers are the first thing to be received on a new stream
self.data_queue: queue.Queue[bytes] = queue.Queue() # contains raw contents of DATA frames
self.queued_data_length = 0 # used to enforce mitmproxy's config.options.body_size_limit
self.trailers: Optional[mitmproxy.net.http.Headers] = None # trailers are received after stream_ended is set
self.arrived = threading.Event() # indicates the HEADERS+CONTINUTATION frames have been received
self.stream_ended = threading.Event() # indicates the a frame with the END_STREAM flag has been received
def __init__(self, ctx, h2_connection, stream_id: int, request_headers: mitmproxy.net.http.Headers) -> None:
super().__init__(
ctx, name=f"Http2SingleStreamLayer-{stream_id}"
)
self.h2_connection = h2_connection
self.zombie: Optional[float] = None
self.client_stream_id: int = stream_id
self.server_stream_id: Optional[int] = None
self.pushed = False
self.timestamp_start: Optional[float] = None
self.timestamp_end: Optional[float] = None
self.request_message = self.Message(request_headers)
self.response_message = self.Message()
self.priority_exclusive: bool
self.priority_depends_on: Optional[int] = None
self.priority_weight: Optional[int] = None
self.handled_priority_event: Any = None
def kill(self):
if not self.zombie:
self.zombie = time.time()
self.request_message.stream_ended.set()
self.request_message.arrived.set()
self.response_message.arrived.set()
self.response_message.stream_ended.set()
def connect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("HTTP2 layer should already have a connection.")
def disconnect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("Cannot dis- or reconnect in HTTP2 connections.")
def set_server(self, address): # pragma: no cover
raise exceptions.SetServerNotAllowedException(repr(address))
def check_close_connection(self, flow):
# This layer only handles a single stream.
# RFC 7540 8.1: An HTTP request/response exchange fully consumes a single stream.
return True
@property
def data_queue(self):
if self.response_message.arrived.is_set():
return self.response_message.data_queue
else:
return self.request_message.data_queue
@property
def queued_data_length(self):
if self.response_message.arrived.is_set():
return self.response_message.queued_data_length
else:
return self.request_message.queued_data_length
@queued_data_length.setter
def queued_data_length(self, v):
self.request_message.queued_data_length = v
@property
def stream_ended(self):
# This indicates that all message headers, the full message body, and all trailers have been received
# https://tools.ietf.org/html/rfc7540#section-8.1
if self.response_message.arrived.is_set():
return self.response_message.stream_ended
else:
return self.request_message.stream_ended
@property
def trailers(self):
if self.response_message.arrived.is_set():
return self.response_message.trailers
else:
return self.request_message.trailers
@trailers.setter
def trailers(self, v):
if self.response_message.arrived.is_set():
self.response_message.trailers = v
else:
self.request_message.trailers = v
def raise_zombie(self, pre_command=None): # pragma: no cover
connection_closed = self.h2_connection.state_machine.state == h2.connection.ConnectionState.CLOSED
if self.zombie is not None or connection_closed:
if pre_command is not None:
pre_command()
raise exceptions.Http2ZombieException(f"Connection or stream already dead: {self.zombie}, {connection_closed}")
@detect_zombie_stream
def read_request_headers(self, flow):
self.request_message.arrived.wait()
self.raise_zombie()
if self.pushed:
flow.metadata['h2-pushed-stream'] = True
# pseudo header must be present, see https://http2.github.io/http2-spec/#rfc.section.8.1.2.3
authority = self.request_message.headers.pop(':authority', "")
method = self.request_message.headers.pop(':method')
scheme = self.request_message.headers.pop(':scheme')
path = self.request_message.headers.pop(':path')
host, port = url.parse_authority(authority, check=True)
port = port or url.default_port(scheme) or 0
return http.HTTPRequest(
host,
port,
method.encode(),
scheme.encode(),
authority.encode(),
path.encode(),
b"HTTP/2.0",
self.request_message.headers,
None,
None,
self.timestamp_start,
self.timestamp_end,
)
@detect_zombie_stream
def read_request_body(self, request):
if not request.stream:
self.request_message.stream_ended.wait()
while True:
try:
yield self.request_message.data_queue.get(timeout=0.1)
except queue.Empty: # pragma: no cover
pass
if self.request_message.stream_ended.is_set():
self.raise_zombie()
while self.request_message.data_queue.qsize() > 0:
yield self.request_message.data_queue.get()
break
self.raise_zombie()
@detect_zombie_stream
def read_request_trailers(self, request):
return self.request_message.trailers
@detect_zombie_stream
def send_request_headers(self, request):
if self.pushed:
# nothing to do here
return
while True:
self.raise_zombie()
self.connections[self.server_conn].lock.acquire()
max_streams = self.connections[self.server_conn].remote_settings.max_concurrent_streams
if self.connections[self.server_conn].open_outbound_streams + 1 >= max_streams:
# wait until we get a free slot for a new outgoing stream
self.connections[self.server_conn].lock.release()
time.sleep(0.1)
continue
# keep the lock
break
# We must not assign a stream id if we are already a zombie.
self.raise_zombie()
self.server_stream_id = self.connections[self.server_conn].get_next_available_stream_id()
self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id
headers = request.headers.copy()
if request.authority:
headers.insert(0, ":authority", request.authority)
headers.insert(0, ":path", request.path)
headers.insert(0, ":method", request.method)
headers.insert(0, ":scheme", request.scheme)
priority_exclusive = None
priority_depends_on = None
priority_weight = None
if self.handled_priority_event:
# only send priority information if they actually came with the original HeadersFrame
# and not if they got updated before/after with a PriorityFrame
if not self.config.options.http2_priority:
self.log("HTTP/2 PRIORITY information in HEADERS frame suppressed. Use --http2-priority to enable forwarding.", "debug")
else:
priority_exclusive = self.priority_exclusive
priority_depends_on = self._map_depends_on_stream_id(self.server_stream_id, self.priority_depends_on)
priority_weight = self.priority_weight
try:
self.connections[self.server_conn].safe_send_headers(
self.raise_zombie,
self.server_stream_id,
headers,
end_stream=(False if request.content or request.trailers or request.stream else True),
priority_exclusive=priority_exclusive,
priority_depends_on=priority_depends_on,
priority_weight=priority_weight,
)
except Exception as e: # pragma: no cover
raise e
finally:
self.raise_zombie()
self.connections[self.server_conn].lock.release()
@detect_zombie_stream
def send_request_body(self, request, chunks):
if self.pushed:
# nothing to do here
return
if isinstance(chunks, types.GeneratorType) or (chunks and chunks[0]):
self.connections[self.server_conn].safe_send_body(
self.raise_zombie,
self.server_stream_id,
chunks,
end_stream=(request.trailers is None),
)
@detect_zombie_stream
def send_request_trailers(self, request):
self._send_trailers(self.server_conn, request.trailers)
@detect_zombie_stream
def send_request(self, request):
self.send_request_headers(request)
self.send_request_body(request, [request.content])
self.send_request_trailers(request)
@detect_zombie_stream
def read_response_headers(self):
self.response_message.arrived.wait()
self.raise_zombie()
status_code = int(self.response_message.headers.get(':status', 502))
headers = self.response_message.headers.copy()
headers.pop(":status", None)
return http.HTTPResponse(
http_version=b"HTTP/2.0",
status_code=status_code,
reason=b'',
headers=headers,
content=None,
trailers=None,
timestamp_start=self.timestamp_start,
timestamp_end=self.timestamp_end,
)
@detect_zombie_stream
def read_response_body(self, request, response):
while True:
try:
yield self.response_message.data_queue.get(timeout=0.1)
except queue.Empty: # pragma: no cover
pass
if self.response_message.stream_ended.is_set():
self.raise_zombie()
while self.response_message.data_queue.qsize() > 0:
yield self.response_message.data_queue.get()
break
self.raise_zombie()
@detect_zombie_stream
def read_response_trailers(self, request, response):
return self.response_message.trailers
@detect_zombie_stream
def send_response_headers(self, response):
headers = response.headers.copy()
headers.insert(0, ":status", str(response.status_code))
with self.connections[self.client_conn].lock:
self.connections[self.client_conn].safe_send_headers(
self.raise_zombie,
self.client_stream_id,
headers
)
@detect_zombie_stream
def send_response_body(self, response, chunks):
self.connections[self.client_conn].safe_send_body(
self.raise_zombie,
self.client_stream_id,
chunks,
end_stream=(response.trailers is None),
)
@detect_zombie_stream
def send_response_trailers(self, response):
self._send_trailers(self.client_conn, response.trailers)
def _send_trailers(self, conn, trailers):
if not trailers:
return
with self.connections[conn].lock:
self.connections[conn].safe_send_headers(
self.raise_zombie,
self.client_stream_id,
trailers,
end_stream=True
)
def __call__(self): # pragma: no cover
raise OSError('Http2SingleStreamLayer must be run as thread')
def run(self):
layer = httpbase.HttpLayer(self, self.mode)
try:
layer()
except exceptions.Http2ZombieException: # pragma: no cover
# zombies can be safely terminated - no need to kill them twice
return
except exceptions.ProtocolException as e: # pragma: no cover
self.log(repr(e), "info")
except exceptions.SetServerNotAllowedException as e: # pragma: no cover
self.log(f"Changing the Host server for HTTP/2 connections not allowed: {e}", "info")
except exceptions.Kill: # pragma: no cover
self.log(flow.Error.KILLED_MESSAGE, "info")
self.kill()

View File

@ -1,72 +0,0 @@
import socket
from OpenSSL import SSL
import mitmproxy.net.tcp
from mitmproxy import tcp
from mitmproxy import flow
from mitmproxy import exceptions
from mitmproxy.proxy.protocol import base
class RawTCPLayer(base.Layer):
chunk_size = 4096
def __init__(self, ctx, ignore=False):
self.ignore = ignore
super().__init__(ctx)
def __call__(self):
self.connect()
if not self.ignore:
f = tcp.TCPFlow(self.client_conn, self.server_conn, self)
self.channel.ask("tcp_start", f)
buf = memoryview(bytearray(self.chunk_size))
client = self.client_conn.connection
server = self.server_conn.connection
conns = [client, server]
# https://github.com/openssl/openssl/issues/6234
for conn in conns:
if isinstance(conn, SSL.Connection) and hasattr(SSL._lib, "SSL_clear_mode"):
SSL._lib.SSL_clear_mode(conn._ssl, SSL._lib.SSL_MODE_AUTO_RETRY)
try:
while not self.channel.should_exit.is_set():
r = mitmproxy.net.tcp.ssl_read_select(conns, 10)
for conn in r:
dst = server if conn == client else client
try:
size = conn.recv_into(buf, self.chunk_size)
except (SSL.WantReadError, SSL.WantWriteError):
continue
if not size:
conns.remove(conn)
# Shutdown connection to the other peer
if isinstance(conn, SSL.Connection):
# We can't half-close a connection, so we just close everything here.
# Sockets will be cleaned up on a higher level.
return
else:
dst.shutdown(socket.SHUT_WR)
if len(conns) == 0:
return
continue
tcp_message = tcp.TCPMessage(dst == server, buf[:size].tobytes())
if not self.ignore:
f.messages.append(tcp_message)
self.channel.ask("tcp_message", f)
dst.sendall(tcp_message.content)
except (OSError, exceptions.TcpException, SSL.Error) as e:
if not self.ignore:
f.error = flow.Error("TCP connection closed unexpectedly: {}".format(repr(e)))
self.channel.tell("tcp_error", f)
finally:
if not self.ignore:
self.channel.tell("tcp_end", f)

View File

@ -1,508 +0,0 @@
from typing import Optional # noqa
from typing import Union
from mitmproxy import exceptions
from mitmproxy.net import tls as net_tls
from mitmproxy.proxy.protocol import base
# taken from https://testssl.sh/openssl-rfc.mapping.html
CIPHER_ID_NAME_MAP = {
0x00: 'NULL-MD5',
0x01: 'NULL-MD5',
0x02: 'NULL-SHA',
0x03: 'EXP-RC4-MD5',
0x04: 'RC4-MD5',
0x05: 'RC4-SHA',
0x06: 'EXP-RC2-CBC-MD5',
0x07: 'IDEA-CBC-SHA',
0x08: 'EXP-DES-CBC-SHA',
0x09: 'DES-CBC-SHA',
0x0a: 'DES-CBC3-SHA',
0x0b: 'EXP-DH-DSS-DES-CBC-SHA',
0x0c: 'DH-DSS-DES-CBC-SHA',
0x0d: 'DH-DSS-DES-CBC3-SHA',
0x0e: 'EXP-DH-RSA-DES-CBC-SHA',
0x0f: 'DH-RSA-DES-CBC-SHA',
0x10: 'DH-RSA-DES-CBC3-SHA',
0x11: 'EXP-EDH-DSS-DES-CBC-SHA',
0x12: 'EDH-DSS-DES-CBC-SHA',
0x13: 'EDH-DSS-DES-CBC3-SHA',
0x14: 'EXP-EDH-RSA-DES-CBC-SHA',
0x15: 'EDH-RSA-DES-CBC-SHA',
0x16: 'EDH-RSA-DES-CBC3-SHA',
0x17: 'EXP-ADH-RC4-MD5',
0x18: 'ADH-RC4-MD5',
0x19: 'EXP-ADH-DES-CBC-SHA',
0x1a: 'ADH-DES-CBC-SHA',
0x1b: 'ADH-DES-CBC3-SHA',
# 0x1c: ,
# 0x1d: ,
0x1e: 'KRB5-DES-CBC-SHA',
0x1f: 'KRB5-DES-CBC3-SHA',
0x20: 'KRB5-RC4-SHA',
0x21: 'KRB5-IDEA-CBC-SHA',
0x22: 'KRB5-DES-CBC-MD5',
0x23: 'KRB5-DES-CBC3-MD5',
0x24: 'KRB5-RC4-MD5',
0x25: 'KRB5-IDEA-CBC-MD5',
0x26: 'EXP-KRB5-DES-CBC-SHA',
0x27: 'EXP-KRB5-RC2-CBC-SHA',
0x28: 'EXP-KRB5-RC4-SHA',
0x29: 'EXP-KRB5-DES-CBC-MD5',
0x2a: 'EXP-KRB5-RC2-CBC-MD5',
0x2b: 'EXP-KRB5-RC4-MD5',
0x2f: 'AES128-SHA',
0x30: 'DH-DSS-AES128-SHA',
0x31: 'DH-RSA-AES128-SHA',
0x32: 'DHE-DSS-AES128-SHA',
0x33: 'DHE-RSA-AES128-SHA',
0x34: 'ADH-AES128-SHA',
0x35: 'AES256-SHA',
0x36: 'DH-DSS-AES256-SHA',
0x37: 'DH-RSA-AES256-SHA',
0x38: 'DHE-DSS-AES256-SHA',
0x39: 'DHE-RSA-AES256-SHA',
0x3a: 'ADH-AES256-SHA',
0x3b: 'NULL-SHA256',
0x3c: 'AES128-SHA256',
0x3d: 'AES256-SHA256',
0x3e: 'DH-DSS-AES128-SHA256',
0x3f: 'DH-RSA-AES128-SHA256',
0x40: 'DHE-DSS-AES128-SHA256',
0x41: 'CAMELLIA128-SHA',
0x42: 'DH-DSS-CAMELLIA128-SHA',
0x43: 'DH-RSA-CAMELLIA128-SHA',
0x44: 'DHE-DSS-CAMELLIA128-SHA',
0x45: 'DHE-RSA-CAMELLIA128-SHA',
0x46: 'ADH-CAMELLIA128-SHA',
0x62: 'EXP1024-DES-CBC-SHA',
0x63: 'EXP1024-DHE-DSS-DES-CBC-SHA',
0x64: 'EXP1024-RC4-SHA',
0x65: 'EXP1024-DHE-DSS-RC4-SHA',
0x66: 'DHE-DSS-RC4-SHA',
0x67: 'DHE-RSA-AES128-SHA256',
0x68: 'DH-DSS-AES256-SHA256',
0x69: 'DH-RSA-AES256-SHA256',
0x6a: 'DHE-DSS-AES256-SHA256',
0x6b: 'DHE-RSA-AES256-SHA256',
0x6c: 'ADH-AES128-SHA256',
0x6d: 'ADH-AES256-SHA256',
0x80: 'GOST94-GOST89-GOST89',
0x81: 'GOST2001-GOST89-GOST89',
0x82: 'GOST94-NULL-GOST94',
0x83: 'GOST2001-GOST89-GOST89',
0x84: 'CAMELLIA256-SHA',
0x85: 'DH-DSS-CAMELLIA256-SHA',
0x86: 'DH-RSA-CAMELLIA256-SHA',
0x87: 'DHE-DSS-CAMELLIA256-SHA',
0x88: 'DHE-RSA-CAMELLIA256-SHA',
0x89: 'ADH-CAMELLIA256-SHA',
0x8a: 'PSK-RC4-SHA',
0x8b: 'PSK-3DES-EDE-CBC-SHA',
0x8c: 'PSK-AES128-CBC-SHA',
0x8d: 'PSK-AES256-CBC-SHA',
# 0x8e: ,
# 0x8f: ,
# 0x90: ,
# 0x91: ,
# 0x92: ,
# 0x93: ,
# 0x94: ,
# 0x95: ,
0x96: 'SEED-SHA',
0x97: 'DH-DSS-SEED-SHA',
0x98: 'DH-RSA-SEED-SHA',
0x99: 'DHE-DSS-SEED-SHA',
0x9a: 'DHE-RSA-SEED-SHA',
0x9b: 'ADH-SEED-SHA',
0x9c: 'AES128-GCM-SHA256',
0x9d: 'AES256-GCM-SHA384',
0x9e: 'DHE-RSA-AES128-GCM-SHA256',
0x9f: 'DHE-RSA-AES256-GCM-SHA384',
0xa0: 'DH-RSA-AES128-GCM-SHA256',
0xa1: 'DH-RSA-AES256-GCM-SHA384',
0xa2: 'DHE-DSS-AES128-GCM-SHA256',
0xa3: 'DHE-DSS-AES256-GCM-SHA384',
0xa4: 'DH-DSS-AES128-GCM-SHA256',
0xa5: 'DH-DSS-AES256-GCM-SHA384',
0xa6: 'ADH-AES128-GCM-SHA256',
0xa7: 'ADH-AES256-GCM-SHA384',
0x5600: 'TLS_FALLBACK_SCSV',
0xc001: 'ECDH-ECDSA-NULL-SHA',
0xc002: 'ECDH-ECDSA-RC4-SHA',
0xc003: 'ECDH-ECDSA-DES-CBC3-SHA',
0xc004: 'ECDH-ECDSA-AES128-SHA',
0xc005: 'ECDH-ECDSA-AES256-SHA',
0xc006: 'ECDHE-ECDSA-NULL-SHA',
0xc007: 'ECDHE-ECDSA-RC4-SHA',
0xc008: 'ECDHE-ECDSA-DES-CBC3-SHA',
0xc009: 'ECDHE-ECDSA-AES128-SHA',
0xc00a: 'ECDHE-ECDSA-AES256-SHA',
0xc00b: 'ECDH-RSA-NULL-SHA',
0xc00c: 'ECDH-RSA-RC4-SHA',
0xc00d: 'ECDH-RSA-DES-CBC3-SHA',
0xc00e: 'ECDH-RSA-AES128-SHA',
0xc00f: 'ECDH-RSA-AES256-SHA',
0xc010: 'ECDHE-RSA-NULL-SHA',
0xc011: 'ECDHE-RSA-RC4-SHA',
0xc012: 'ECDHE-RSA-DES-CBC3-SHA',
0xc013: 'ECDHE-RSA-AES128-SHA',
0xc014: 'ECDHE-RSA-AES256-SHA',
0xc015: 'AECDH-NULL-SHA',
0xc016: 'AECDH-RC4-SHA',
0xc017: 'AECDH-DES-CBC3-SHA',
0xc018: 'AECDH-AES128-SHA',
0xc019: 'AECDH-AES256-SHA',
0xc01a: 'SRP-3DES-EDE-CBC-SHA',
0xc01b: 'SRP-RSA-3DES-EDE-CBC-SHA',
0xc01c: 'SRP-DSS-3DES-EDE-CBC-SHA',
0xc01d: 'SRP-AES-128-CBC-SHA',
0xc01e: 'SRP-RSA-AES-128-CBC-SHA',
0xc01f: 'SRP-DSS-AES-128-CBC-SHA',
0xc020: 'SRP-AES-256-CBC-SHA',
0xc021: 'SRP-RSA-AES-256-CBC-SHA',
0xc022: 'SRP-DSS-AES-256-CBC-SHA',
0xc023: 'ECDHE-ECDSA-AES128-SHA256',
0xc024: 'ECDHE-ECDSA-AES256-SHA384',
0xc025: 'ECDH-ECDSA-AES128-SHA256',
0xc026: 'ECDH-ECDSA-AES256-SHA384',
0xc027: 'ECDHE-RSA-AES128-SHA256',
0xc028: 'ECDHE-RSA-AES256-SHA384',
0xc029: 'ECDH-RSA-AES128-SHA256',
0xc02a: 'ECDH-RSA-AES256-SHA384',
0xc02b: 'ECDHE-ECDSA-AES128-GCM-SHA256',
0xc02c: 'ECDHE-ECDSA-AES256-GCM-SHA384',
0xc02d: 'ECDH-ECDSA-AES128-GCM-SHA256',
0xc02e: 'ECDH-ECDSA-AES256-GCM-SHA384',
0xc02f: 'ECDHE-RSA-AES128-GCM-SHA256',
0xc030: 'ECDHE-RSA-AES256-GCM-SHA384',
0xc031: 'ECDH-RSA-AES128-GCM-SHA256',
0xc032: 'ECDH-RSA-AES256-GCM-SHA384',
0xcc13: 'ECDHE-RSA-CHACHA20-POLY1305',
0xcc14: 'ECDHE-ECDSA-CHACHA20-POLY1305',
0xcc15: 'DHE-RSA-CHACHA20-POLY1305',
0xff00: 'GOST-MD5',
0xff01: 'GOST-GOST94',
0xff02: 'GOST-GOST89MAC',
0xff03: 'GOST-GOST89STREAM',
0x010080: 'RC4-MD5',
0x020080: 'EXP-RC4-MD5',
0x030080: 'RC2-CBC-MD5',
0x040080: 'EXP-RC2-CBC-MD5',
0x050080: 'IDEA-CBC-MD5',
0x060040: 'DES-CBC-MD5',
0x0700c0: 'DES-CBC3-MD5',
0x080080: 'RC4-64-MD5',
}
# We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default.
# https://ssl-config.mozilla.org/#config=old
DEFAULT_CLIENT_CIPHERS = (
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:"
"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:"
"DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:"
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:"
"ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:"
"AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA"
)
class TlsLayer(base.Layer):
"""
The TLS layer implements transparent TLS connections.
It exposes the following API to child layers:
- :py:meth:`set_server_tls` to modify TLS settings for the server connection.
- :py:attr:`server_tls`, :py:attr:`server_sni` as read-only attributes describing the current TLS settings for
the server connection.
"""
def __init__(self, ctx, client_tls, server_tls, custom_server_sni=None):
super().__init__(ctx)
self._client_tls = client_tls
self._server_tls = server_tls
self._custom_server_sni = custom_server_sni
self._client_hello: Optional[net_tls.ClientHello] = None
def __call__(self):
"""
The strategy for establishing TLS is as follows:
First, we determine whether we need the server cert to establish ssl with the client.
If so, we first connect to the server and then to the client.
If not, we only connect to the client and do the server handshake lazily.
An additional complexity is that we need to mirror SNI and ALPN from the client when connecting to the server.
We manually peek into the connection and parse the ClientHello message to obtain these values.
"""
if self._client_tls:
# Peek into the connection, read the initial client hello and parse it to obtain SNI and ALPN values.
try:
self._client_hello = net_tls.ClientHello.from_file(self.client_conn.rfile)
except exceptions.TlsProtocolException as e:
self.log("Cannot parse Client Hello: %s" % repr(e), "error")
# Without knowning the ClientHello we cannot proceed in this connection.
return
# Do we need to do a server handshake now?
# There are two reasons why we would want to establish TLS with the server now:
# 1. If we already have an existing server connection and server_tls is True,
# we need to establish TLS now because .connect() will not be called anymore.
# 2. We may need information from the server connection for the client handshake.
#
# A couple of factors influence (2):
# 2.1 There actually is (or will be) a TLS-enabled upstream connection
# 2.2 An upstream connection is not wanted by the user if --no-upstream-cert is passed.
# 2.3 An upstream connection is implied by add_upstream_certs_to_client_chain
# 2.4 The client wants to negotiate an alternative protocol in its handshake, we need to find out
# what is supported by the server
# 2.5 The client did not sent a SNI value, we don't know the certificate subject.
client_tls_requires_server_connection = (
self._server_tls and
self.config.options.upstream_cert and
(
self.config.options.add_upstream_certs_to_client_chain or
self._client_tls and (
self._client_hello.alpn_protocols or
not self._client_hello.sni
)
)
)
establish_server_tls_now = (
(self.server_conn.connected() and self._server_tls) or
client_tls_requires_server_connection
)
if self._client_tls and establish_server_tls_now:
self._establish_tls_with_client_and_server()
elif self._client_tls:
self._establish_tls_with_client()
elif establish_server_tls_now:
self._establish_tls_with_server()
layer = self.ctx.next_layer(self)
layer()
def __repr__(self): # pragma: no cover
if self._client_tls and self._server_tls:
return "TlsLayer(client and server)"
elif self._client_tls:
return "TlsLayer(client)"
elif self._server_tls:
return "TlsLayer(server)"
else:
return "TlsLayer(inactive)"
def connect(self):
if not self.server_conn.connected():
self.ctx.connect()
if self._server_tls and not self.server_conn.tls_established:
self._establish_tls_with_server()
def set_server_tls(self, server_tls: bool, sni: Union[str, None, bool] = None) -> None:
"""
Set the TLS settings for the next server connection that will be established.
This function will not alter an existing connection.
Args:
server_tls: Shall we establish TLS with the server?
sni: ``str`` for a custom SNI value,
``None`` for the client SNI value,
``False`` if no SNI value should be sent.
"""
self._server_tls = server_tls
self._custom_server_sni = sni
@property
def server_tls(self):
"""
``True``, if the next server connection that will be established should be upgraded to TLS.
"""
return self._server_tls
@property
def server_sni(self) -> Optional[str]:
"""
The Server Name Indication we want to send with the next server TLS handshake.
"""
if self._custom_server_sni is False:
return None
elif self._custom_server_sni:
return self._custom_server_sni
elif self._client_hello and self._client_hello.sni:
return self._client_hello.sni.decode("idna")
else:
return None
@property
def alpn_for_client_connection(self):
return self.server_conn.get_alpn_proto_negotiated()
def __alpn_select_callback(self, conn_, options):
# This gets triggered if we haven't established an upstream connection yet.
default_alpn = b'http/1.1'
if self.alpn_for_client_connection in options:
choice = bytes(self.alpn_for_client_connection)
elif default_alpn in options:
choice = bytes(default_alpn)
else:
choice = options[0]
self.log("ALPN for client: %s" % choice, "debug")
return choice
def _establish_tls_with_client_and_server(self):
try:
self.ctx.connect()
self._establish_tls_with_server()
except Exception:
# If establishing TLS with the server fails, we try to establish TLS with the client nonetheless
# to send an error message over TLS.
try:
self._establish_tls_with_client()
except:
pass
raise
self._establish_tls_with_client()
def _establish_tls_with_client(self):
self.log("Establish TLS with client", "debug")
cert, key, chain_file = self._find_cert()
if self.config.options.add_upstream_certs_to_client_chain:
extra_certs = self.server_conn.server_certs
else:
extra_certs = None
try:
tls_method, tls_options = net_tls.VERSION_CHOICES[self.config.options.ssl_version_client]
self.client_conn.convert_to_tls(
cert, key,
method=tls_method,
options=tls_options,
cipher_list=self.config.options.ciphers_client or DEFAULT_CLIENT_CIPHERS,
dhparams=self.config.certstore.dhparams,
chain_file=chain_file,
alpn_select_callback=self.__alpn_select_callback,
extra_chain_certs=extra_certs,
)
# Some TLS clients will not fail the handshake,
# but will immediately throw an "unexpected eof" error on the first read.
# The reason for this might be difficult to find, so we try to peek here to see if it
# raises ann error.
self.client_conn.rfile.peek(1)
except exceptions.TlsException as e:
sni_str = self._client_hello.sni and self._client_hello.sni.decode("idna")
raise exceptions.ClientHandshakeException(
"Cannot establish TLS with client (sni: {sni}): {e}".format(
sni=sni_str, e=repr(e)
),
sni_str or repr(self.server_conn.address)
)
def _establish_tls_with_server(self):
self.log("Establish TLS with server", "debug")
try:
alpn = None
if self._client_tls:
if self._client_hello.alpn_protocols:
# We only support http/1.1 and h2.
# If the server only supports spdy (next to http/1.1), it may select that
# and mitmproxy would enter TCP passthrough mode, which we want to avoid.
alpn = [
x for x in self._client_hello.alpn_protocols if
not (x.startswith(b"h2-") or x.startswith(b"spdy"))
]
if alpn and b"h2" in alpn and not self.config.options.http2:
alpn.remove(b"h2")
if self.client_conn.tls_established and self.client_conn.get_alpn_proto_negotiated():
# If the client has already negotiated an ALP, then force the
# server to use the same. This can only happen if the host gets
# changed after the initial connection was established. E.g.:
# * the client offers http/1.1 and h2,
# * the initial host is only capable of http/1.1,
# * then the first server connection negotiates http/1.1,
# * but after the server_conn change, the new host offers h2
# * which results in garbage because the layers don' match.
alpn = [self.client_conn.get_alpn_proto_negotiated()]
# We pass through the list of ciphers send by the client, because some HTTP/2 servers
# will select a non-HTTP/2 compatible cipher from our default list and then hang up
# because it's incompatible with h2. :-)
ciphers_server = self.config.options.ciphers_server
if not ciphers_server and self._client_tls:
ciphers_server = []
for id in self._client_hello.cipher_suites:
if id in CIPHER_ID_NAME_MAP.keys():
ciphers_server.append(CIPHER_ID_NAME_MAP[id])
ciphers_server = ':'.join(ciphers_server)
args = net_tls.client_arguments_from_options(self.config.options)
args["cipher_list"] = ciphers_server
self.server_conn.establish_tls(
sni=self.server_sni,
alpn_protos=alpn,
**args
)
tls_cert_err = self.server_conn.ssl_verification_error
if tls_cert_err is not None:
self.log(str(tls_cert_err), "warn")
self.log("Ignoring server verification error, continuing with connection", "warn")
except exceptions.InvalidCertificateException as e:
raise exceptions.InvalidServerCertificate(str(e))
except exceptions.TlsException as e:
raise exceptions.TlsProtocolException(
"Cannot establish TLS with {host}:{port} (sni: {sni}): {e}".format(
host=self.server_conn.address[0],
port=self.server_conn.address[1],
sni=self.server_sni,
e=repr(e)
)
)
proto = self.alpn_for_client_connection.decode() if self.alpn_for_client_connection else '-'
self.log(f"ALPN selected by server: {proto}", "debug")
def _find_cert(self):
"""
This function determines the Common Name (CN), Subject Alternative Names (SANs) and Organization Name
our certificate should have and then fetches a matching cert from the certstore.
"""
host = None
sans = set()
organization = None
# In normal operation, the server address should always be known at this point.
# However, we may just want to establish TLS so that we can send an error message to the client,
# in which case the address can be None.
if self.server_conn.address:
host = self.server_conn.address[0].encode("idna")
# Should we incorporate information from the server certificate?
use_upstream_cert = (
self.server_conn and
self.server_conn.tls_established and
self.config.options.upstream_cert
)
if use_upstream_cert:
upstream_cert = self.server_conn.cert
sans.update(upstream_cert.altnames)
if upstream_cert.cn:
sans.add(host)
host = upstream_cert.cn.decode("utf8").encode("idna")
if upstream_cert.organization:
organization = upstream_cert.organization
# Also add SNI values.
if self._client_hello.sni:
sans.add(self._client_hello.sni)
if self._custom_server_sni:
sans.add(self._custom_server_sni.encode("idna"))
# RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity.
# In other words, the Common Name is irrelevant then.
if host:
sans.add(host)
return self.config.certstore.get_cert(host, list(sans), organization)

View File

@ -1,228 +0,0 @@
import queue
from OpenSSL import SSL
import wsproto
from wsproto import events, WSConnection
from wsproto.connection import ConnectionType
from wsproto.events import AcceptConnection, CloseConnection, Message, Ping, Request
from wsproto.extensions import PerMessageDeflate
from mitmproxy import exceptions, flow
from mitmproxy.proxy.protocol import base
from mitmproxy.net import tcp, websocket
from mitmproxy.websocket import WebSocketFlow, WebSocketMessage
from mitmproxy.utils import strutils
class WebSocketLayer(base.Layer):
"""
WebSocket layer to intercept, modify, and forward WebSocket messages.
Only version 13 is supported (as specified in RFC6455).
Only HTTP/1.1-initiated connections are supported.
The client starts by sending an Upgrade-request.
In order to determine the handshake and negotiate the correct protocol
and extensions, the Upgrade-request is forwarded to the server.
The response from the server is then parsed and negotiated settings are extracted.
Finally the handshake is completed by forwarding the server-response to the client.
After that, only WebSocket frames are exchanged.
PING/PONG frames pass through and must be answered by the other endpoint.
CLOSE frames are forwarded before this WebSocketLayer terminates.
This layer is transparent to any negotiated extensions.
This layer is transparent to any negotiated subprotocols.
Only raw frames are forwarded to the other endpoint.
WebSocket messages are stored in a WebSocketFlow.
"""
def __init__(self, ctx, handshake_flow):
super().__init__(ctx)
self.handshake_flow = handshake_flow
self.flow: WebSocketFlow = None
self.client_frame_buffer = []
self.server_frame_buffer = []
self.connections: dict[object, WSConnection] = {}
client_extensions = []
server_extensions = []
if 'Sec-WebSocket-Extensions' in handshake_flow.response.headers:
if PerMessageDeflate.name in handshake_flow.response.headers['Sec-WebSocket-Extensions']:
client_extensions = [PerMessageDeflate()]
server_extensions = [PerMessageDeflate()]
self.connections[self.client_conn] = WSConnection(ConnectionType.SERVER)
self.connections[self.server_conn] = WSConnection(ConnectionType.CLIENT)
if client_extensions:
client_extensions[0].finalize(handshake_flow.response.headers['Sec-WebSocket-Extensions'])
if server_extensions:
server_extensions[0].finalize(handshake_flow.response.headers['Sec-WebSocket-Extensions'])
request = Request(extensions=client_extensions, host=handshake_flow.request.host, target=handshake_flow.request.path)
data = self.connections[self.server_conn].send(request)
self.connections[self.client_conn].receive_data(data)
event = next(self.connections[self.client_conn].events())
assert isinstance(event, events.Request)
data = self.connections[self.client_conn].send(AcceptConnection(extensions=server_extensions))
self.connections[self.server_conn].receive_data(data)
assert isinstance(next(self.connections[self.server_conn].events()), events.AcceptConnection)
def _handle_event(self, event, source_conn, other_conn, is_server):
self.log(
"WebSocket Event from {}: {}".format("server" if is_server else "client", event),
"debug"
)
if isinstance(event, events.Message):
return self._handle_message(event, source_conn, other_conn, is_server)
elif isinstance(event, events.Ping):
return self._handle_ping(event, source_conn, other_conn, is_server)
elif isinstance(event, events.Pong):
return self._handle_pong(event, source_conn, other_conn, is_server)
elif isinstance(event, events.CloseConnection):
return self._handle_close_connection(event, source_conn, other_conn, is_server)
# fail-safe for unhandled events
return True # pragma: no cover
def _handle_message(self, event, source_conn, other_conn, is_server):
fb = self.server_frame_buffer if is_server else self.client_frame_buffer
fb.append(event.data)
if event.message_finished:
original_chunk_sizes = [len(f) for f in fb]
if isinstance(event, events.TextMessage):
message_type = wsproto.frame_protocol.Opcode.TEXT
payload = ''.join(fb)
else:
message_type = wsproto.frame_protocol.Opcode.BINARY
payload = b''.join(fb)
fb.clear()
websocket_message = WebSocketMessage(message_type, not is_server, payload)
length = len(websocket_message.content)
self.flow.messages.append(websocket_message)
self.channel.ask("websocket_message", self.flow)
if not self.flow.stream and not websocket_message.killed:
def get_chunk(payload):
if len(payload) == length:
# message has the same length, we can reuse the same sizes
pos = 0
for s in original_chunk_sizes:
yield (payload[pos:pos + s], True if pos + s == length else False)
pos += s
else:
# just re-chunk everything into 4kB frames
# header len = 4 bytes without masking key and 8 bytes with masking key
chunk_size = 4092 if is_server else 4088
chunks = range(0, len(payload), chunk_size)
for i in chunks:
yield (payload[i:i + chunk_size], True if i + chunk_size >= len(payload) else False)
for chunk, final in get_chunk(websocket_message.content):
data = self.connections[other_conn].send(Message(data=chunk, message_finished=final))
other_conn.send(data)
if self.flow.stream:
data = self.connections[other_conn].send(Message(data=event.data, message_finished=event.message_finished))
other_conn.send(data)
return True
def _handle_ping(self, event, source_conn, other_conn, is_server):
# Use event.response to create the approprate Pong response
data = self.connections[other_conn].send(Ping())
other_conn.send(data)
data = self.connections[source_conn].send(event.response())
source_conn.send(data)
self.log(
"Ping Received from {}".format("server" if is_server else "client"),
"info",
[strutils.bytes_to_escaped_str(bytes(event.payload))]
)
return True
def _handle_pong(self, event, source_conn, other_conn, is_server):
self.log(
"Pong Received from {}".format("server" if is_server else "client"),
"info",
[strutils.bytes_to_escaped_str(bytes(event.payload))]
)
return True
def _handle_close_connection(self, event, source_conn, other_conn, is_server):
self.flow.close_sender = "server" if is_server else "client"
self.flow.close_code = event.code
self.flow.close_reason = event.reason
data = self.connections[other_conn].send(CloseConnection(code=event.code, reason=event.reason))
other_conn.send(data)
data = self.connections[source_conn].send(event.response())
source_conn.send(data)
return False
def _inject_messages(self, endpoint, message_queue):
while True:
try:
payload = message_queue.get_nowait()
data = self.connections[endpoint].send(Message(data=payload, message_finished=True))
endpoint.send(data)
except queue.Empty:
break
def __call__(self):
self.flow = WebSocketFlow(self.client_conn, self.server_conn, self.handshake_flow)
self.flow.metadata['websocket_handshake'] = self.handshake_flow.id
self.handshake_flow.metadata['websocket_flow'] = self.flow.id
self.channel.ask("websocket_start", self.flow)
conns = [c.connection for c in self.connections.keys()]
close_received = False
try:
while not self.channel.should_exit.is_set():
self._inject_messages(self.client_conn, self.flow._inject_messages_client)
self._inject_messages(self.server_conn, self.flow._inject_messages_server)
r = tcp.ssl_read_select(conns, 0.1)
for conn in r:
source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn
other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn
is_server = (source_conn == self.server_conn)
header, frame, consumed_bytes = websocket.read_frame(source_conn.rfile)
self.log(
"WebSocket Frame from {}: {}, {}".format(
"server" if is_server else "client",
header,
frame,
),
"debug"
)
data = self.connections[source_conn].receive_data(consumed_bytes)
source_conn.send(data)
if close_received:
return
for event in self.connections[source_conn].events():
if not self._handle_event(event, source_conn, other_conn, is_server):
if not close_received:
close_received = True
except (OSError, exceptions.TcpException, SSL.Error) as e:
s = 'server' if is_server else 'client'
self.flow.error = flow.Error("WebSocket connection closed unexpectedly by {}: {}".format(s, repr(e)))
self.channel.tell("websocket_error", self.flow)
finally:
self.flow.ended = True
self.channel.tell("websocket_end", self.flow)

View File

@ -1,131 +0,0 @@
from mitmproxy import log
from mitmproxy import exceptions
from mitmproxy.net import tls
from mitmproxy.proxy import protocol
from mitmproxy.proxy import modes
from mitmproxy.proxy.protocol import http
class RootContext:
"""
The outermost context provided to the root layer.
As a consequence, every layer has access to methods and attributes defined here.
Attributes:
client_conn:
The :py:class:`client connection <mitmproxy.connections.ClientConnection>`.
channel:
A :py:class:`~mitmproxy.controller.Channel` to communicate with the FlowMaster.
Provides :py:meth:`.ask() <mitmproxy.controller.Channel.ask>` and
:py:meth:`.tell() <mitmproxy.controller.Channel.tell>` methods.
config:
The :py:class:`proxy server's configuration <mitmproxy.proxy.ProxyConfig>`
"""
def __init__(self, client_conn, config, channel):
self.client_conn = client_conn
self.channel = channel
self.config = config
def next_layer(self, top_layer):
"""
This function determines the next layer in the protocol stack.
Arguments:
top_layer: the current innermost layer.
Returns:
The next layer
"""
layer = self._next_layer(top_layer)
return self.channel.ask("next_layer", layer)
def _next_layer(self, top_layer):
try:
d = top_layer.client_conn.rfile.peek(3)
except exceptions.TcpException as e:
raise exceptions.ProtocolException(str(e))
client_tls = tls.is_tls_record_magic(d)
# 1. check for filter
if self.config.check_filter:
is_filtered = self.config.check_filter(top_layer.server_conn.address)
if not is_filtered and client_tls:
try:
client_hello = tls.ClientHello.from_file(self.client_conn.rfile)
except exceptions.TlsProtocolException as e:
self.log("Cannot parse Client Hello: %s" % repr(e), "error")
else:
sni_str = client_hello.sni and client_hello.sni.decode("idna")
is_filtered = self.config.check_filter((sni_str, 443))
if is_filtered:
return protocol.RawTCPLayer(top_layer, ignore=True)
# 2. Always insert a TLS layer, even if there's neither client nor server tls.
# An inline script may upgrade from http to https,
# in which case we need some form of TLS layer.
if isinstance(top_layer, modes.ReverseProxy):
return protocol.TlsLayer(
top_layer,
client_tls,
top_layer.server_tls,
top_layer.server_conn.address[0]
)
if isinstance(top_layer, protocol.ServerConnectionMixin):
return protocol.TlsLayer(top_layer, client_tls, client_tls)
if isinstance(top_layer, protocol.UpstreamConnectLayer):
# if the user manually sets a scheme for connect requests, we use this to decide if we
# want TLS or not.
if top_layer.connect_request.scheme:
server_tls = top_layer.connect_request.scheme == "https"
else:
server_tls = client_tls
return protocol.TlsLayer(top_layer, client_tls, server_tls)
# 3. In Http Proxy mode and Upstream Proxy mode, the next layer is fixed.
if isinstance(top_layer, protocol.TlsLayer):
if isinstance(top_layer.ctx, modes.HttpProxy):
return protocol.Http1Layer(top_layer, http.HTTPMode.regular)
if isinstance(top_layer.ctx, modes.HttpUpstreamProxy):
return protocol.Http1Layer(top_layer, http.HTTPMode.upstream)
# 4. Check for other TLS cases (e.g. after CONNECT).
if client_tls:
return protocol.TlsLayer(top_layer, True, True)
# 4. Check for --tcp
if self.config.check_tcp(top_layer.server_conn.address):
return protocol.RawTCPLayer(top_layer)
# 5. Check for TLS ALPN (HTTP1/HTTP2)
if isinstance(top_layer, protocol.TlsLayer):
alpn = top_layer.client_conn.get_alpn_proto_negotiated()
if alpn == b'h2':
return protocol.Http2Layer(top_layer, http.HTTPMode.transparent)
if alpn == b'http/1.1':
return protocol.Http1Layer(top_layer, http.HTTPMode.transparent)
# 6. Check for raw tcp mode
is_ascii = (
len(d) == 3 and
# expect A-Za-z
all(65 <= x <= 90 or 97 <= x <= 122 for x in d)
)
if self.config.options.rawtcp and not is_ascii:
return protocol.RawTCPLayer(top_layer)
# 7. Assume HTTP1 by default
return protocol.Http1Layer(top_layer, http.HTTPMode.transparent)
def log(self, msg, level, subs=()):
"""
Send a log message to the master.
"""
full_msg = [
"{}:{}: {}".format(self.client_conn.address[0], self.client_conn.address[1], msg)
]
for i in subs:
full_msg.append(" -> " + i)
full_msg = "\n".join(full_msg)
self.channel.tell("log", log.LogEntry(full_msg, level))

View File

@ -1,160 +1,452 @@
import sys
"""
Proxy Server Implementation using asyncio.
The very high level overview is as follows:
- Spawn one coroutine per client connection and create a reverse proxy layer to example.com
- Process any commands from layer (such as opening a server connection)
- Wait for any IO and send it as events to top layer.
"""
import abc
import asyncio
import collections
import time
import traceback
import typing
from contextlib import contextmanager
from dataclasses import dataclass
from mitmproxy import exceptions, flow
from mitmproxy import connections
from mitmproxy import controller # noqa
from mitmproxy import http
from mitmproxy import log
from mitmproxy import platform
from mitmproxy.proxy import config
from mitmproxy.proxy import modes
from mitmproxy.proxy import root_context
from mitmproxy.net import tcp
from mitmproxy.net.http import http1
from OpenSSL import SSL
from mitmproxy import http, options as moptions
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy import commands, events, layer, layers, server_hooks
from mitmproxy.proxy.context import Address, Client, Connection, ConnectionState, Context
from mitmproxy.proxy.layers import tls
from mitmproxy.utils import asyncio_utils
from mitmproxy.utils import human
from mitmproxy.utils.data import pkg_data
class DummyServer:
bound = False
class TimeoutWatchdog:
last_activity: float
CONNECTION_TIMEOUT = 10 * 60
can_timeout: asyncio.Event
blocker: int
def __init__(self, config=None):
self.config = config
self.address = "dummy"
def __init__(self, callback: typing.Callable[[], typing.Any]):
self.callback = callback
self.last_activity = time.time()
self.can_timeout = asyncio.Event()
self.can_timeout.set()
self.blocker = 0
def set_channel(self, channel):
pass
def register_activity(self):
self.last_activity = time.time()
def serve_forever(self):
pass
async def watch(self):
while True:
await self.can_timeout.wait()
await asyncio.sleep(self.CONNECTION_TIMEOUT - (time.time() - self.last_activity))
if self.last_activity + self.CONNECTION_TIMEOUT < time.time():
await self.callback()
return
def shutdown(self):
pass
class ProxyServer(tcp.TCPServer):
allow_reuse_address = True
bound = True
channel: controller.Channel
def __init__(self, config: config.ProxyConfig) -> None:
"""
Raises ServerException if there's a startup problem.
"""
self.config = config
@contextmanager
def disarm(self):
self.can_timeout.clear()
self.blocker += 1
try:
super().__init__(
(config.options.listen_host, config.options.listen_port)
)
if config.options.mode == "transparent":
platform.init_transparent_mode()
except Exception as e:
if self.socket:
self.socket.close()
raise exceptions.ServerException(
'Error starting proxy server: ' + repr(e)
) from e
yield
finally:
self.blocker -= 1
if self.blocker == 0:
self.register_activity()
self.can_timeout.set()
def set_channel(self, channel):
self.channel = channel
def handle_client_connection(self, conn, client_address):
h = ConnectionHandler(
conn,
client_address,
self.config,
self.channel
@dataclass
class ConnectionIO:
handler: typing.Optional[asyncio.Task] = None
reader: typing.Optional[asyncio.StreamReader] = None
writer: typing.Optional[asyncio.StreamWriter] = None
class ConnectionHandler(metaclass=abc.ABCMeta):
transports: typing.MutableMapping[Connection, ConnectionIO]
timeout_watchdog: TimeoutWatchdog
client: Client
max_conns: typing.DefaultDict[Address, asyncio.Semaphore]
layer: layer.Layer
def __init__(self, context: Context) -> None:
self.client = context.client
self.transports = {}
self.max_conns = collections.defaultdict(lambda: asyncio.Semaphore(5))
# Ask for the first layer right away.
# In a reverse proxy scenario, this is necessary as we would otherwise hang
# on protocols that start with a server greeting.
self.layer = layer.NextLayer(context, ask_on_start=True)
self.timeout_watchdog = TimeoutWatchdog(self.on_timeout)
async def handle_client(self) -> None:
watch = asyncio_utils.create_task(
self.timeout_watchdog.watch(),
name="timeout watchdog",
client=self.client.peername,
)
h.handle()
if not watch:
return # this should not be needed, see asyncio_utils.create_task
self.log("client connect")
await self.handle_hook(server_hooks.ClientConnectedHook(self.client))
if self.client.error:
self.log("client kill connection")
writer = self.transports.pop(self.client).writer
assert writer
writer.close()
else:
handler = asyncio_utils.create_task(
self.handle_connection(self.client),
name=f"client connection handler",
client=self.client.peername,
)
if not handler:
return # this should not be needed, see asyncio_utils.create_task
self.transports[self.client].handler = handler
self.server_event(events.Start())
await asyncio.wait([handler])
class ConnectionHandler:
watch.cancel()
def __init__(self, client_conn, client_address, config, channel):
self.config: config.ProxyConfig = config
self.client_conn = connections.ClientConnection(
client_conn,
client_address,
None)
"""@type: mitmproxy.proxy.connection.ClientConnection"""
self.channel = channel
"""@type: mitmproxy.controller.Channel"""
self.log("client disconnect")
self.client.timestamp_end = time.time()
await self.handle_hook(server_hooks.ClientClosedHook(self.client))
def _create_root_layer(self):
root_ctx = root_context.RootContext(
self.client_conn,
self.config,
self.channel
if self.transports:
self.log("closing transports...", "debug")
for io in self.transports.values():
if io.handler:
asyncio_utils.cancel_task(io.handler, "client disconnected")
await asyncio.wait([x.handler for x in self.transports.values() if x.handler])
self.log("transports closed!", "debug")
async def open_connection(self, command: commands.OpenConnection) -> None:
if not command.connection.address:
self.log(f"Cannot open connection, no hostname given.")
self.server_event(events.OpenConnectionReply(command, f"Cannot open connection, no hostname given."))
return
hook_data = server_hooks.ServerConnectionHookData(
client=self.client,
server=command.connection
)
await self.handle_hook(server_hooks.ServerConnectHook(hook_data))
if command.connection.error:
self.log(f"server connection to {human.format_address(command.connection.address)} killed before connect.")
self.server_event(events.OpenConnectionReply(command, "Connection killed."))
return
mode = self.config.options.mode
if mode.startswith("upstream:"):
return modes.HttpUpstreamProxy(
root_ctx,
self.config.upstream_server.address
)
elif mode == "transparent":
return modes.TransparentProxy(root_ctx)
elif mode.startswith("reverse:"):
server_tls = self.config.upstream_server.scheme == "https"
return modes.ReverseProxy(
root_ctx,
self.config.upstream_server.address,
server_tls
)
elif mode == "socks5":
return modes.Socks5Proxy(root_ctx)
elif mode == "regular":
return modes.HttpProxy(root_ctx)
elif callable(mode): # pragma: no cover
return mode(root_ctx)
else: # pragma: no cover
raise ValueError("Unknown proxy mode: %s" % mode)
def handle(self):
self.log("clientconnect", "info")
root_layer = None
try:
root_layer = self._create_root_layer()
root_layer = self.channel.ask("clientconnect", root_layer)
root_layer()
except exceptions.Kill:
self.log(flow.Error.KILLED_MESSAGE, "info")
except exceptions.ProtocolException as e:
if isinstance(e, exceptions.ClientHandshakeException):
self.log(
"Client Handshake failed. "
"The client may not trust the proxy's certificate for {}.".format(e.server),
"warn"
)
self.log(repr(e), "debug")
elif isinstance(e, exceptions.InvalidServerCertificate):
self.log(str(e), "warn")
self.log("Invalid certificate, closing connection. Pass --ssl-insecure to disable validation.", "warn")
else:
self.log(str(e), "warn")
self.log(repr(e), "debug")
# If an error propagates to the topmost level,
# we send an HTTP error response, which is both
# understandable by HTTP clients and humans.
async with self.max_conns[command.connection.address]:
try:
error_response = http.make_error_response(502, repr(e))
self.client_conn.send(http1.assemble_response(error_response))
except exceptions.TcpException:
pass
command.connection.timestamp_start = time.time()
reader, writer = await asyncio.open_connection(*command.connection.address)
except (IOError, asyncio.CancelledError) as e:
err = str(e)
if not err: # str(CancelledError()) returns empty string.
err = "connection cancelled"
self.log(f"error establishing server connection: {err}")
command.connection.error = err
self.server_event(events.OpenConnectionReply(command, err))
if isinstance(e, asyncio.CancelledError):
# From https://docs.python.org/3/library/asyncio-exceptions.html#asyncio.CancelledError:
# > In almost all situations the exception must be re-raised.
# It is not really defined what almost means here, but we play safe.
raise
else:
command.connection.timestamp_tcp_setup = time.time()
command.connection.state = ConnectionState.OPEN
command.connection.peername = writer.get_extra_info('peername')
command.connection.sockname = writer.get_extra_info('sockname')
self.transports[command.connection].reader = reader
self.transports[command.connection].writer = writer
assert command.connection.peername
if command.connection.address[0] != command.connection.peername[0]:
addr = f"{command.connection.address[0]} ({human.format_address(command.connection.peername)})"
else:
addr = human.format_address(command.connection.address)
self.log(f"server connect {addr}")
connected_hook = asyncio_utils.create_task(
self.handle_hook(server_hooks.ServerConnectedHook(hook_data)),
name=f"handle_hook(server_connected) {addr}",
client=self.client.peername,
)
if not connected_hook:
return # this should not be needed, see asyncio_utils.create_task
self.server_event(events.OpenConnectionReply(command, None))
# during connection opening, this function is the designated handler that can be cancelled.
# once we have a connection, we do want the teardown here to happen in any case, so we
# reassign the handler to .handle_connection and then clean up here once that is done.
new_handler = asyncio_utils.create_task(
self.handle_connection(command.connection),
name=f"server connection handler for {addr}",
client=self.client.peername,
)
if not new_handler:
return # this should not be needed, see asyncio_utils.create_task
self.transports[command.connection].handler = new_handler
await asyncio.wait([new_handler])
self.log(f"server disconnect {addr}")
command.connection.timestamp_end = time.time()
await connected_hook # wait here for this so that closed always comes after connected.
await self.handle_hook(server_hooks.ServerClosedHook(hook_data))
async def handle_connection(self, connection: Connection) -> None:
"""
Handle a connection for its entire lifetime.
This means we read until EOF,
but then possibly also keep on waiting for our side of the connection to be closed.
"""
cancelled = None
reader = self.transports[connection].reader
assert reader
while True:
try:
data = await reader.read(65535)
if not data:
raise OSError("Connection closed by peer.")
except OSError:
break
except asyncio.CancelledError as e:
cancelled = e
break
else:
self.server_event(events.DataReceived(connection, data))
if cancelled is None:
connection.state &= ~ConnectionState.CAN_READ
else:
connection.state = ConnectionState.CLOSED
self.server_event(events.ConnectionClosed(connection))
if cancelled is None and connection.state is ConnectionState.CAN_WRITE:
# we may still use this connection to *send* stuff,
# even though the remote has closed their side of the connection.
# to make this work we keep this task running and wait for cancellation.
await asyncio.Event().wait()
try:
writer = self.transports[connection].writer
assert writer
writer.close()
except OSError:
pass
self.transports.pop(connection)
if cancelled:
raise cancelled
async def on_timeout(self) -> None:
self.log(f"Closing connection due to inactivity: {self.client}")
handler = self.transports[self.client].handler
assert handler
asyncio_utils.cancel_task(handler, "timeout")
async def hook_task(self, hook: commands.Hook) -> None:
await self.handle_hook(hook)
if hook.blocking:
self.server_event(events.HookReply(hook))
@abc.abstractmethod
async def handle_hook(self, hook: commands.Hook) -> None:
pass
def log(self, message: str, level: str = "info") -> None:
print(message)
def server_event(self, event: events.Event) -> None:
self.timeout_watchdog.register_activity()
try:
layer_commands = self.layer.handle_event(event)
for command in layer_commands:
if isinstance(command, commands.OpenConnection):
assert command.connection not in self.transports
handler = asyncio_utils.create_task(
self.open_connection(command),
name=f"server connection manager {command.connection.address}",
client=self.client.peername,
)
self.transports[command.connection] = ConnectionIO(handler=handler)
elif isinstance(command, commands.ConnectionCommand) and command.connection not in self.transports:
return # The connection has already been closed.
elif isinstance(command, commands.SendData):
writer = self.transports[command.connection].writer
assert writer
writer.write(command.data)
elif isinstance(command, commands.CloseConnection):
self.close_connection(command.connection, command.half_close)
elif isinstance(command, commands.GetSocket):
writer = self.transports[command.connection].writer
assert writer
socket = writer.get_extra_info("socket")
self.server_event(events.GetSocketReply(command, socket))
elif isinstance(command, commands.Hook):
asyncio_utils.create_task(
self.hook_task(command),
name=f"handle_hook({command.name})",
client=self.client.peername,
)
elif isinstance(command, commands.Log):
self.log(command.message, command.level)
else:
raise RuntimeError(f"Unexpected command: {command}")
except Exception:
self.log(traceback.format_exc(), "error")
print(traceback.format_exc(), file=sys.stderr)
print("mitmproxy has crashed!", file=sys.stderr)
print("Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy", file=sys.stderr)
self.log(f"mitmproxy has crashed!\n{traceback.format_exc()}", level="error")
self.log("clientdisconnect", "info")
if root_layer is not None:
self.channel.tell("clientdisconnect", root_layer)
self.client_conn.finish()
def close_connection(self, connection: Connection, half_close: bool = False) -> None:
if half_close:
if not connection.state & ConnectionState.CAN_WRITE:
return
self.log(f"half-closing {connection}", "debug")
try:
writer = self.transports[connection].writer
assert writer
writer.write_eof()
except OSError:
# if we can't write to the socket anymore we presume it completely dead.
connection.state = ConnectionState.CLOSED
else:
connection.state &= ~ConnectionState.CAN_WRITE
else:
connection.state = ConnectionState.CLOSED
def log(self, msg, level):
msg = "{}: {}".format(human.format_address(self.client_conn.address), msg)
self.channel.tell("log", log.LogEntry(msg, level))
if connection.state is ConnectionState.CLOSED:
handler = self.transports[connection].handler
assert handler
asyncio_utils.cancel_task(handler, "closed by command")
class StreamConnectionHandler(ConnectionHandler, metaclass=abc.ABCMeta):
def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, options: moptions.Options) -> None:
client = Client(
writer.get_extra_info('peername'),
writer.get_extra_info('sockname'),
time.time(),
)
context = Context(client, options)
super().__init__(context)
self.transports[client] = ConnectionIO(handler=None, reader=reader, writer=writer)
class SimpleConnectionHandler(StreamConnectionHandler): # pragma: no cover
"""Simple handler that does not really process any hooks."""
hook_handlers: typing.Dict[str, typing.Callable]
def __init__(self, reader, writer, options, hooks):
super().__init__(reader, writer, options)
self.hook_handlers = hooks
async def handle_hook(
self,
hook: commands.Hook
) -> None:
if hook.name in self.hook_handlers:
self.hook_handlers[hook.name](*hook.args())
def log(self, message: str, level: str = "info"):
if "Hook" not in message:
pass # print(message, file=sys.stderr if level in ("error", "warn") else sys.stdout)
if __name__ == "__main__": # pragma: no cover
# simple standalone implementation for testing.
loop = asyncio.get_event_loop()
opts = moptions.Options()
# options duplicated here to simplify testing setup
opts.add_option(
"connection_strategy", str, "lazy",
"Determine when server connections should be established.",
choices=("eager", "lazy")
)
opts.add_option(
"keep_host_header", bool, False,
"""
Reverse Proxy: Keep the original host header instead of rewriting it
to the reverse proxy target.
"""
)
opts.mode = "reverse:http://127.0.0.1:3000/"
async def handle(reader, writer):
layer_stack = [
# lambda ctx: layers.ServerTLSLayer(ctx),
# lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
# lambda ctx: setattr(ctx.server, "tls", True) or layers.ServerTLSLayer(ctx),
# lambda ctx: layers.ClientTLSLayer(ctx),
lambda ctx: layers.modes.ReverseProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.transparent)
]
def next_layer(nl: layer.NextLayer):
l = layer_stack.pop(0)(nl.context)
l.debug = " " * len(nl.context.layers)
nl.layer = l
def request(flow: http.HTTPFlow):
if "cached" in flow.request.path:
flow.response = http.HTTPResponse.make(418, f"(cached) {flow.request.text}")
if "toggle-tls" in flow.request.path:
if flow.request.url.startswith("https://"):
flow.request.url = flow.request.url.replace("https://", "http://")
else:
flow.request.url = flow.request.url.replace("http://", "https://")
if "redirect" in flow.request.path:
flow.request.host = "httpbin.org"
def tls_start(tls_start: tls.TlsStartData):
# INSECURE
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
if tls_start.conn == tls_start.context.client:
ssl_context.use_privatekey_file(
pkg_data.path("../test/mitmproxy/data/verificationcerts/trusted-leaf.key")
)
ssl_context.use_certificate_chain_file(
pkg_data.path("../test/mitmproxy/data/verificationcerts/trusted-leaf.crt")
)
tls_start.ssl_conn = SSL.Connection(ssl_context)
if tls_start.conn == tls_start.context.client:
tls_start.ssl_conn.set_accept_state()
else:
tls_start.ssl_conn.set_connect_state()
tls_start.ssl_conn.set_tlsext_host_name(tls_start.context.client.sni)
await SimpleConnectionHandler(reader, writer, opts, {
"next_layer": next_layer,
"request": request,
"tls_start": tls_start,
}).handle_client()
coro = asyncio.start_server(handle, '127.0.0.1', 8080, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
assert server.sockets
print(f"Serving on {human.format_address(server.sockets[0].getsockname())}")
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()

View File

@ -1,8 +1,8 @@
from enum import Enum, auto
from typing import List, Optional, Tuple
from mitmproxy.proxy2 import commands, context, events, layer
from mitmproxy.proxy2.layer import Layer
from mitmproxy.proxy import commands, context, events, layer
from mitmproxy.proxy.layer import Layer
class TunnelState(Enum):

View File

@ -3,7 +3,7 @@ Utility decorators that help build state machines
"""
import functools
from mitmproxy.proxy2 import events
from mitmproxy.proxy import events
def expect(*event_types):

View File

@ -1,20 +0,0 @@
"""
This module contains mitmproxy's core network proxy.
The most important primitives are:
- Layers: represent protocol layers, e.g. one for TCP, TLS, and so on. Layers are nested, so
a typical configuration might be ReverseProxy/TLS/TCP.
Most importantly, layers are implemented using the sans-io pattern (https://sans-io.readthedocs.io/).
This means that calls return immediately, their is no blocking sync or async code.
- Server: the proxy server handles all I/O. This is implemented using asyncio, but could be done any other way.
The ConnectionHandler is subclassed in the Proxyserver addon, which handles the communication with the
rest of mitmproxy.
- Events: When I/O actions occur at the proxy server, they are passed to the outermost layer as events,
e.g. "DataReceived" or "ConnectionClosed".
- Commands: In the other direction, layers can emit commands to higher layers or the proxy server.
This is used to e.g. send data, request for new connections to be opened, or to call mitmproxy's
event hooks.
- Context: The context is the connection context each layer is provided with, which is always a client connection
and sometimes also a server connection.
"""

View File

@ -1,452 +0,0 @@
"""
Proxy Server Implementation using asyncio.
The very high level overview is as follows:
- Spawn one coroutine per client connection and create a reverse proxy layer to example.com
- Process any commands from layer (such as opening a server connection)
- Wait for any IO and send it as events to top layer.
"""
import abc
import asyncio
import collections
import time
import traceback
import typing
from contextlib import contextmanager
from dataclasses import dataclass
from OpenSSL import SSL
from mitmproxy import http, options as moptions
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2 import commands, events, layer, layers, server_hooks
from mitmproxy.proxy2.context import Address, Client, Connection, ConnectionState, Context
from mitmproxy.proxy2.layers import tls
from mitmproxy.utils import asyncio_utils
from mitmproxy.utils import human
from mitmproxy.utils.data import pkg_data
class TimeoutWatchdog:
last_activity: float
CONNECTION_TIMEOUT = 10 * 60
can_timeout: asyncio.Event
blocker: int
def __init__(self, callback: typing.Callable[[], typing.Any]):
self.callback = callback
self.last_activity = time.time()
self.can_timeout = asyncio.Event()
self.can_timeout.set()
self.blocker = 0
def register_activity(self):
self.last_activity = time.time()
async def watch(self):
while True:
await self.can_timeout.wait()
await asyncio.sleep(self.CONNECTION_TIMEOUT - (time.time() - self.last_activity))
if self.last_activity + self.CONNECTION_TIMEOUT < time.time():
await self.callback()
return
@contextmanager
def disarm(self):
self.can_timeout.clear()
self.blocker += 1
try:
yield
finally:
self.blocker -= 1
if self.blocker == 0:
self.register_activity()
self.can_timeout.set()
@dataclass
class ConnectionIO:
handler: typing.Optional[asyncio.Task] = None
reader: typing.Optional[asyncio.StreamReader] = None
writer: typing.Optional[asyncio.StreamWriter] = None
class ConnectionHandler(metaclass=abc.ABCMeta):
transports: typing.MutableMapping[Connection, ConnectionIO]
timeout_watchdog: TimeoutWatchdog
client: Client
max_conns: typing.DefaultDict[Address, asyncio.Semaphore]
layer: layer.Layer
def __init__(self, context: Context) -> None:
self.client = context.client
self.transports = {}
self.max_conns = collections.defaultdict(lambda: asyncio.Semaphore(5))
# Ask for the first layer right away.
# In a reverse proxy scenario, this is necessary as we would otherwise hang
# on protocols that start with a server greeting.
self.layer = layer.NextLayer(context, ask_on_start=True)
self.timeout_watchdog = TimeoutWatchdog(self.on_timeout)
async def handle_client(self) -> None:
watch = asyncio_utils.create_task(
self.timeout_watchdog.watch(),
name="timeout watchdog",
client=self.client.peername,
)
if not watch:
return # this should not be needed, see asyncio_utils.create_task
self.log("client connect")
await self.handle_hook(server_hooks.ClientConnectedHook(self.client))
if self.client.error:
self.log("client kill connection")
writer = self.transports.pop(self.client).writer
assert writer
writer.close()
else:
handler = asyncio_utils.create_task(
self.handle_connection(self.client),
name=f"client connection handler",
client=self.client.peername,
)
if not handler:
return # this should not be needed, see asyncio_utils.create_task
self.transports[self.client].handler = handler
self.server_event(events.Start())
await asyncio.wait([handler])
watch.cancel()
self.log("client disconnect")
self.client.timestamp_end = time.time()
await self.handle_hook(server_hooks.ClientClosedHook(self.client))
if self.transports:
self.log("closing transports...", "debug")
for io in self.transports.values():
if io.handler:
asyncio_utils.cancel_task(io.handler, "client disconnected")
await asyncio.wait([x.handler for x in self.transports.values() if x.handler])
self.log("transports closed!", "debug")
async def open_connection(self, command: commands.OpenConnection) -> None:
if not command.connection.address:
self.log(f"Cannot open connection, no hostname given.")
self.server_event(events.OpenConnectionReply(command, f"Cannot open connection, no hostname given."))
return
hook_data = server_hooks.ServerConnectionHookData(
client=self.client,
server=command.connection
)
await self.handle_hook(server_hooks.ServerConnectHook(hook_data))
if command.connection.error:
self.log(f"server connection to {human.format_address(command.connection.address)} killed before connect.")
self.server_event(events.OpenConnectionReply(command, "Connection killed."))
return
async with self.max_conns[command.connection.address]:
try:
command.connection.timestamp_start = time.time()
reader, writer = await asyncio.open_connection(*command.connection.address)
except (IOError, asyncio.CancelledError) as e:
err = str(e)
if not err: # str(CancelledError()) returns empty string.
err = "connection cancelled"
self.log(f"error establishing server connection: {err}")
command.connection.error = err
self.server_event(events.OpenConnectionReply(command, err))
if isinstance(e, asyncio.CancelledError):
# From https://docs.python.org/3/library/asyncio-exceptions.html#asyncio.CancelledError:
# > In almost all situations the exception must be re-raised.
# It is not really defined what almost means here, but we play safe.
raise
else:
command.connection.timestamp_tcp_setup = time.time()
command.connection.state = ConnectionState.OPEN
command.connection.peername = writer.get_extra_info('peername')
command.connection.sockname = writer.get_extra_info('sockname')
self.transports[command.connection].reader = reader
self.transports[command.connection].writer = writer
assert command.connection.peername
if command.connection.address[0] != command.connection.peername[0]:
addr = f"{command.connection.address[0]} ({human.format_address(command.connection.peername)})"
else:
addr = human.format_address(command.connection.address)
self.log(f"server connect {addr}")
connected_hook = asyncio_utils.create_task(
self.handle_hook(server_hooks.ServerConnectedHook(hook_data)),
name=f"handle_hook(server_connected) {addr}",
client=self.client.peername,
)
if not connected_hook:
return # this should not be needed, see asyncio_utils.create_task
self.server_event(events.OpenConnectionReply(command, None))
# during connection opening, this function is the designated handler that can be cancelled.
# once we have a connection, we do want the teardown here to happen in any case, so we
# reassign the handler to .handle_connection and then clean up here once that is done.
new_handler = asyncio_utils.create_task(
self.handle_connection(command.connection),
name=f"server connection handler for {addr}",
client=self.client.peername,
)
if not new_handler:
return # this should not be needed, see asyncio_utils.create_task
self.transports[command.connection].handler = new_handler
await asyncio.wait([new_handler])
self.log(f"server disconnect {addr}")
command.connection.timestamp_end = time.time()
await connected_hook # wait here for this so that closed always comes after connected.
await self.handle_hook(server_hooks.ServerClosedHook(hook_data))
async def handle_connection(self, connection: Connection) -> None:
"""
Handle a connection for its entire lifetime.
This means we read until EOF,
but then possibly also keep on waiting for our side of the connection to be closed.
"""
cancelled = None
reader = self.transports[connection].reader
assert reader
while True:
try:
data = await reader.read(65535)
if not data:
raise OSError("Connection closed by peer.")
except OSError:
break
except asyncio.CancelledError as e:
cancelled = e
break
else:
self.server_event(events.DataReceived(connection, data))
if cancelled is None:
connection.state &= ~ConnectionState.CAN_READ
else:
connection.state = ConnectionState.CLOSED
self.server_event(events.ConnectionClosed(connection))
if cancelled is None and connection.state is ConnectionState.CAN_WRITE:
# we may still use this connection to *send* stuff,
# even though the remote has closed their side of the connection.
# to make this work we keep this task running and wait for cancellation.
await asyncio.Event().wait()
try:
writer = self.transports[connection].writer
assert writer
writer.close()
except OSError:
pass
self.transports.pop(connection)
if cancelled:
raise cancelled
async def on_timeout(self) -> None:
self.log(f"Closing connection due to inactivity: {self.client}")
handler = self.transports[self.client].handler
assert handler
asyncio_utils.cancel_task(handler, "timeout")
async def hook_task(self, hook: commands.Hook) -> None:
await self.handle_hook(hook)
if hook.blocking:
self.server_event(events.HookReply(hook))
@abc.abstractmethod
async def handle_hook(self, hook: commands.Hook) -> None:
pass
def log(self, message: str, level: str = "info") -> None:
print(message)
def server_event(self, event: events.Event) -> None:
self.timeout_watchdog.register_activity()
try:
layer_commands = self.layer.handle_event(event)
for command in layer_commands:
if isinstance(command, commands.OpenConnection):
assert command.connection not in self.transports
handler = asyncio_utils.create_task(
self.open_connection(command),
name=f"server connection manager {command.connection.address}",
client=self.client.peername,
)
self.transports[command.connection] = ConnectionIO(handler=handler)
elif isinstance(command, commands.ConnectionCommand) and command.connection not in self.transports:
return # The connection has already been closed.
elif isinstance(command, commands.SendData):
writer = self.transports[command.connection].writer
assert writer
writer.write(command.data)
elif isinstance(command, commands.CloseConnection):
self.close_connection(command.connection, command.half_close)
elif isinstance(command, commands.GetSocket):
writer = self.transports[command.connection].writer
assert writer
socket = writer.get_extra_info("socket")
self.server_event(events.GetSocketReply(command, socket))
elif isinstance(command, commands.Hook):
asyncio_utils.create_task(
self.hook_task(command),
name=f"handle_hook({command.name})",
client=self.client.peername,
)
elif isinstance(command, commands.Log):
self.log(command.message, command.level)
else:
raise RuntimeError(f"Unexpected command: {command}")
except Exception:
self.log(f"mitmproxy has crashed!\n{traceback.format_exc()}", level="error")
def close_connection(self, connection: Connection, half_close: bool = False) -> None:
if half_close:
if not connection.state & ConnectionState.CAN_WRITE:
return
self.log(f"half-closing {connection}", "debug")
try:
writer = self.transports[connection].writer
assert writer
writer.write_eof()
except OSError:
# if we can't write to the socket anymore we presume it completely dead.
connection.state = ConnectionState.CLOSED
else:
connection.state &= ~ConnectionState.CAN_WRITE
else:
connection.state = ConnectionState.CLOSED
if connection.state is ConnectionState.CLOSED:
handler = self.transports[connection].handler
assert handler
asyncio_utils.cancel_task(handler, "closed by command")
class StreamConnectionHandler(ConnectionHandler, metaclass=abc.ABCMeta):
def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, options: moptions.Options) -> None:
client = Client(
writer.get_extra_info('peername'),
writer.get_extra_info('sockname'),
time.time(),
)
context = Context(client, options)
super().__init__(context)
self.transports[client] = ConnectionIO(handler=None, reader=reader, writer=writer)
class SimpleConnectionHandler(StreamConnectionHandler): # pragma: no cover
"""Simple handler that does not really process any hooks."""
hook_handlers: typing.Dict[str, typing.Callable]
def __init__(self, reader, writer, options, hooks):
super().__init__(reader, writer, options)
self.hook_handlers = hooks
async def handle_hook(
self,
hook: commands.Hook
) -> None:
if hook.name in self.hook_handlers:
self.hook_handlers[hook.name](*hook.args())
def log(self, message: str, level: str = "info"):
if "Hook" not in message:
pass # print(message, file=sys.stderr if level in ("error", "warn") else sys.stdout)
if __name__ == "__main__": # pragma: no cover
# simple standalone implementation for testing.
loop = asyncio.get_event_loop()
opts = moptions.Options()
# options duplicated here to simplify testing setup
opts.add_option(
"connection_strategy", str, "lazy",
"Determine when server connections should be established.",
choices=("eager", "lazy")
)
opts.add_option(
"keep_host_header", bool, False,
"""
Reverse Proxy: Keep the original host header instead of rewriting it
to the reverse proxy target.
"""
)
opts.mode = "reverse:http://127.0.0.1:3000/"
async def handle(reader, writer):
layer_stack = [
# lambda ctx: layers.ServerTLSLayer(ctx),
# lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
# lambda ctx: setattr(ctx.server, "tls", True) or layers.ServerTLSLayer(ctx),
# lambda ctx: layers.ClientTLSLayer(ctx),
lambda ctx: layers.modes.ReverseProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.transparent)
]
def next_layer(nl: layer.NextLayer):
l = layer_stack.pop(0)(nl.context)
l.debug = " " * len(nl.context.layers)
nl.layer = l
def request(flow: http.HTTPFlow):
if "cached" in flow.request.path:
flow.response = http.HTTPResponse.make(418, f"(cached) {flow.request.text}")
if "toggle-tls" in flow.request.path:
if flow.request.url.startswith("https://"):
flow.request.url = flow.request.url.replace("https://", "http://")
else:
flow.request.url = flow.request.url.replace("http://", "https://")
if "redirect" in flow.request.path:
flow.request.host = "httpbin.org"
def tls_start(tls_start: tls.TlsStartData):
# INSECURE
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
if tls_start.conn == tls_start.context.client:
ssl_context.use_privatekey_file(
pkg_data.path("../test/mitmproxy/data/verificationcerts/trusted-leaf.key")
)
ssl_context.use_certificate_chain_file(
pkg_data.path("../test/mitmproxy/data/verificationcerts/trusted-leaf.crt")
)
tls_start.ssl_conn = SSL.Connection(ssl_context)
if tls_start.conn == tls_start.context.client:
tls_start.ssl_conn.set_accept_state()
else:
tls_start.ssl_conn.set_connect_state()
tls_start.ssl_conn.set_tlsext_host_name(tls_start.context.client.sni)
await SimpleConnectionHandler(reader, writer, opts, {
"next_layer": next_layer,
"request": request,
"tls_start": tls_start,
}).handle_client()
coro = asyncio.start_server(handle, '127.0.0.1', 8080, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
assert server.sockets
print(f"Serving on {human.format_address(server.sockets[0].getsockname())}")
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()

View File

@ -8,7 +8,7 @@ from mitmproxy import controller
from mitmproxy import http
from mitmproxy import flow
from mitmproxy.net import http as net_http
from mitmproxy.utils import compat
from mitmproxy.proxy import context
from wsproto.frame_protocol import Opcode
@ -147,8 +147,8 @@ def tdummyflow(client_conn=True, server_conn=True, err=None):
return f
def tclient_conn() -> compat.Client:
c = compat.Client.from_state(dict(
def tclient_conn() -> context.Client:
c = context.Client.from_state(dict(
id=str(uuid.uuid4()),
address=("127.0.0.1", 22),
mitmcert=None,
@ -170,14 +170,11 @@ def tclient_conn() -> compat.Client:
cipher_list=[],
))
c.reply = controller.DummyReply()
if not compat.new_proxy_core:
c.rfile = io.BytesIO()
c.wfile = io.BytesIO()
return c
def tserver_conn() -> compat.Server:
c = compat.Server.from_state(dict(
def tserver_conn() -> context.Server:
c = context.Server.from_state(dict(
id=str(uuid.uuid4()),
address=("address", 22),
source_address=("address", 22),

View File

@ -10,7 +10,7 @@ from mitmproxy import exceptions, master
from mitmproxy import options
from mitmproxy import optmanager
from mitmproxy import proxy
from mitmproxy.utils import compat, debug, arg_check
from mitmproxy.utils import debug, arg_check
def assert_utf8_env():
@ -86,17 +86,10 @@ def run(
os.path.join(opts.confdir, "config.yml"),
)
pconf = process_options(parser, opts, args)
server: typing.Any = None
if pconf.options.server and not compat.new_proxy_core: # new core initializes itself as an addon
try:
server = proxy.server.ProxyServer(pconf)
except exceptions.ServerException as v:
print(str(v), file=sys.stderr)
sys.exit(1)
else:
server = proxy.server.DummyServer(pconf)
master.server = server
# new core initializes itself as an addon
master.server = proxy.DummyServer(pconf)
if args.options:
print(optmanager.dump_defaults(opts))
sys.exit(0)

View File

@ -1,13 +0,0 @@
new_proxy_core = True
"""If true, use mitmproxy's new sans-io proxy core."""
if new_proxy_core: # pragma: no cover
from mitmproxy.proxy2 import context
Client = context.Client # type: ignore
Server = context.Server # type: ignore
else: # pragma: no cover
from mitmproxy import connections
Client = connections.ClientConnection # type: ignore
Server = connections.ServerConnection # type: ignore

View File

@ -9,7 +9,7 @@ from wsproto.frame_protocol import Opcode
from mitmproxy import flow
from mitmproxy.net import websocket
from mitmproxy.coretypes import serializable
from mitmproxy.utils import strutils, human, compat
from mitmproxy.utils import strutils, human
class WebSocketMessage(serializable.Serializable):
@ -54,13 +54,12 @@ class WebSocketMessage(serializable.Serializable):
It will not be sent to the other endpoint. This has no effect in streaming mode.
"""
if compat.new_proxy_core: # pragma: no cover
warnings.warn("WebSocketMessage.kill is deprecated, set an empty content instead.",
PendingDeprecationWarning)
# empty str or empty bytes.
self.content = type(self.content)()
else: # pragma: no cover
self.killed = True
warnings.warn(
"WebSocketMessage.kill is deprecated, set an empty content instead.",
PendingDeprecationWarning
)
# empty str or empty bytes.
self.content = type(self.content)()
class WebSocketFlow(flow.Flow):

View File

@ -37,15 +37,6 @@ ignore_errors = True
[tool:full_coverage]
exclude =
mitmproxy/proxy/protocol/base.py
mitmproxy/proxy/protocol/http.py
mitmproxy/proxy/protocol/http1.py
mitmproxy/proxy/protocol/http2.py
mitmproxy/proxy/protocol/http_replay.py
mitmproxy/proxy/protocol/rawtcp.py
mitmproxy/proxy/protocol/tls.py
mitmproxy/proxy/root_context.py
mitmproxy/proxy/server.py
mitmproxy/tools/
release/hooks
@ -77,20 +68,7 @@ exclude =
mitmproxy/net/tls.py
mitmproxy/options.py
mitmproxy/proxy/config.py
mitmproxy/proxy/modes/http_proxy.py
mitmproxy/proxy/modes/reverse_proxy.py
mitmproxy/proxy/modes/socks_proxy.py
mitmproxy/proxy/modes/transparent_proxy.py
mitmproxy/proxy/protocol/base.py
mitmproxy/proxy/protocol/http.py
mitmproxy/proxy/protocol/http1.py
mitmproxy/proxy/protocol/http2.py
mitmproxy/proxy/protocol/http_replay.py
mitmproxy/proxy/protocol/rawtcp.py
mitmproxy/proxy/protocol/tls.py
mitmproxy/proxy/root_context.py
mitmproxy/proxy/server.py
mitmproxy/proxy2/server.py
mitmproxy/proxy2/layers/tls.py
mitmproxy/proxy/layers/tls.py
mitmproxy/utils/bits.py
release/hooks

View File

@ -1,116 +0,0 @@
import tempfile
import asyncio
import typing
import time
from statistics import mean
from mitmproxy import ctx
from mitmproxy.io import db
from mitmproxy.test import tflow
class StreamTester:
"""
Generates a constant stream of flows and
measure protobuf dumping throughput.
"""
def __init__(self):
self.dbh = None
self.streaming = False
self.tf = None
self.out = None
self.hot_flows = []
self.results = []
self._flushes = 0
self._stream_period = 0.001
self._flush_period = 3.0
self._flush_rate = 150
self._target = 2000
self.loop = asyncio.get_event_loop()
self.queue = asyncio.Queue(maxsize=self._flush_rate * 3, loop=self.loop)
self.temp = tempfile.NamedTemporaryFile()
def load(self, loader):
loader.add_option(
"testflow_size",
int,
1000,
"Length in bytes of test flow content"
)
loader.add_option(
"benchmark_save_path",
typing.Optional[str],
None,
"Destination for the stats result file"
)
def _log(self, msg):
if self.out:
self.out.write(msg + '\n')
else:
ctx.log(msg)
def running(self):
if not self.streaming:
ctx.log("<== Serialization Benchmark Enabled ==>")
self.tf = tflow.tflow()
self.tf.request.content = b'A' * ctx.options.testflow_size
ctx.log(f"With content size: {len(self.tf.request.content)} B")
if ctx.options.benchmark_save_path:
ctx.log(f"Storing results to {ctx.options.benchmark_save_path}")
self.out = open(ctx.options.benchmark_save_path, "w")
self.dbh = db.DBHandler(self.temp.name, mode='write')
self.streaming = True
tasks = (self.stream, self.writer, self.stats)
self.loop.create_task(asyncio.gather(*(t() for t in tasks)))
async def stream(self):
while True:
await self.queue.put(self.tf)
await asyncio.sleep(self._stream_period)
async def writer(self):
while True:
await asyncio.sleep(self._flush_period)
count = 1
f = await self.queue.get()
self.hot_flows.append(f)
while count < self._flush_rate:
try:
self.hot_flows.append(self.queue.get_nowait())
count += 1
except asyncio.QueueEmpty:
pass
start = time.perf_counter()
n = self._fflush()
end = time.perf_counter()
self._log(f"dumps/time ratio: {n} / {end-start} -> {n/(end-start)}")
self.results.append(n / (end - start))
self._flushes += n
self._log(f"Flows dumped: {self._flushes}")
ctx.log(f"Progress: {min(100.0, 100.0 * (self._flushes / self._target))}%")
async def stats(self):
while True:
await asyncio.sleep(1.0)
if self._flushes >= self._target:
self._log(f"AVG : {mean(self.results)}")
ctx.log(f"<== Benchmark Ended. Shutting down... ==>")
if self.out:
self.out.close()
self.temp.close()
ctx.master.shutdown()
def _fflush(self):
self.dbh.store(self.hot_flows)
n = len(self.hot_flows)
self.hot_flows = []
return n
addons = [
StreamTester()
]

View File

@ -9,8 +9,14 @@ import sys
def check_src_files_have_test():
missing_test_files = []
excluded = ['mitmproxy/contrib/', 'mitmproxy/io/proto/', 'mitmproxy/proxy2/layers/http',
'mitmproxy/test/', 'mitmproxy/tools/', 'mitmproxy/platform/']
excluded = [
'mitmproxy/contrib/',
'mitmproxy/io/proto/',
'mitmproxy/proxy/layers/http',
'mitmproxy/test/',
'mitmproxy/tools/',
'mitmproxy/platform/',
]
src_files = glob.glob('mitmproxy/**/*.py', recursive=True)
src_files = [f for f in src_files if os.path.basename(f) != '__init__.py']
src_files = [f for f in src_files if not any(os.path.normpath(p) in f for p in excluded)]
@ -25,7 +31,12 @@ def check_src_files_have_test():
def check_test_files_have_src():
unknown_test_files = []
excluded = ['test/mitmproxy/data/', 'test/mitmproxy/net/data/', '/tservers.py', '/conftest.py']
excluded = [
'test/mitmproxy/data/',
'test/mitmproxy/net/data/',
'/tservers.py',
'/conftest.py',
]
test_files = glob.glob('test/mitmproxy/**/*.py', recursive=True)
test_files = [f for f in test_files if os.path.basename(f) != '__init__.py']
test_files = [f for f in test_files if not any(os.path.normpath(p) in f for p in excluded)]

View File

@ -1,9 +1,8 @@
from unittest import mock
import pytest
from mitmproxy.addons import block
from mitmproxy.proxy import context
from mitmproxy.test import taddons
from mitmproxy.utils import compat
@pytest.mark.parametrize("block_global, block_private, should_be_killed, address", [
@ -56,22 +55,7 @@ from mitmproxy.utils import compat
async def test_block_global(block_global, block_private, should_be_killed, address):
ar = block.Block()
with taddons.context(ar) as tctx:
if compat.new_proxy_core:
from mitmproxy.proxy2 import context
tctx.configure(ar, block_global=block_global, block_private=block_private)
client = context.Client(address, ("127.0.0.1", 8080), 1607699500)
ar.client_connected(client)
assert bool(client.error) == should_be_killed
return
tctx.options.block_global = block_global
tctx.options.block_private = block_private
with mock.patch('mitmproxy.proxy.protocol.base.Layer') as layer:
layer.client_conn.address = address
ar.clientconnect(layer)
if should_be_killed:
assert layer.reply.kill.called
assert await tctx.master.await_log("killed", "warn")
else:
assert not layer.reply.kill.called
tctx.configure(ar, block_global=block_global, block_private=block_private)
client = context.Client(address, ("127.0.0.1", 8080), 1607699500)
ar.client_connected(client)
assert bool(client.error) == should_be_killed

View File

@ -1,186 +1,139 @@
import time
import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy.test import tflow, tutils
from mitmproxy import io
from mitmproxy import exceptions
from mitmproxy.net import http as net_http
from mitmproxy.addons import clientplayback
from mitmproxy.test import taddons
from .. import tservers
from ...conftest import skip_new_proxy_core
from mitmproxy.addons.clientplayback import ClientPlayback, ReplayHandler
from mitmproxy.exceptions import CommandError, OptionsError
from mitmproxy.proxy.context import Address
from mitmproxy.test import taddons, tflow
def tdump(path, flows):
with open(path, "wb") as f:
w = io.FlowWriter(f)
for i in flows:
w.add(i)
@asynccontextmanager
async def tcp_server(handle_conn) -> Address:
server = await asyncio.start_server(handle_conn, '127.0.0.1', 0)
await server.start_serving()
try:
yield server.sockets[0].getsockname()
finally:
server.close()
class MockThread():
def is_alive(self):
return False
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["regular", "upstream", "err"])
async def test_playback(mode):
handler_ok = asyncio.Event()
async def handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
if mode == "err":
writer.close()
handler_ok.set()
return
if mode == "upstream":
conn_req = await reader.readuntil(b"\r\n\r\n")
assert conn_req == b'CONNECT address:22 HTTP/1.1\r\n\r\n'
writer.write(b"HTTP/1.1 200 Connection Established\r\n\r\n")
req = await reader.readuntil(b"data")
assert req == (
b'GET /path HTTP/1.1\r\n'
b'header: qvalue\r\n'
b'content-length: 4\r\n'
b'\r\n'
b'data'
)
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
assert not await reader.read()
handler_ok.set()
class TBase(tservers.HTTPProxyTest):
@staticmethod
def wait_response(flow):
"""
Race condition: We don't want to replay the flow while it is still live.
"""
s = time.time()
while True:
if flow.response or flow.error:
flow.server_conn.close()
break
time.sleep(0.001)
if time.time() - s > 5:
raise RuntimeError("Flow is live for too long.")
cp = ClientPlayback()
with taddons.context(cp) as tctx:
async with tcp_server(handler) as addr:
@staticmethod
def reset(f):
f.live = False
f.repsonse = False
f.error = False
def addons(self):
return [clientplayback.ClientPlayback()]
@skip_new_proxy_core
def test_replay(self):
cr = self.master.addons.get("clientplayback")
assert self.pathod("304").status_code == 304
assert len(self.master.state.flows) == 1
l = self.master.state.flows[-1]
assert l.response.status_code == 304
l.request.path = "/p/305"
l.response = None
l.live = False
l.intercepted = False
cr.start_replay([l])
self.wait_response(l)
assert l.response.status_code == 305
# Disconnect error
cr.stop_replay()
self.reset(l)
l.request.path = "/p/305:d0"
cr.start_replay([l])
self.wait_response(l)
if isinstance(self, tservers.HTTPUpstreamProxyTest):
assert l.response.status_code == 502
else:
assert l.error
# # Port error
cr.stop_replay()
self.reset(l)
l.request.port = 1
# In upstream mode, we get a 502 response from the upstream proxy server.
# In upstream mode with ssl, the replay will fail as we cannot establish
# SSL with the upstream proxy.
cr.start_replay([l])
self.wait_response(l)
if isinstance(self, tservers.HTTPUpstreamProxyTest):
assert l.response.status_code == 502
else:
assert l.error
class TestHTTPProxy(TBase, tservers.HTTPProxyTest):
pass
class TestHTTPSProxy(TBase, tservers.HTTPProxyTest):
ssl = True
class TestUpstreamProxy(TBase, tservers.HTTPUpstreamProxyTest):
pass
class TestClientPlayback:
def test_load_file(self, tmpdir):
cp = clientplayback.ClientPlayback()
with taddons.context(cp):
fpath = str(tmpdir.join("flows"))
tdump(fpath, [tflow.tflow(resp=True)])
cp.load_file(fpath)
cp.running()
flow = tflow.tflow()
flow.request.content = b"data"
if mode == "upstream":
tctx.options.mode = f"upstream:http://{addr[0]}:{addr[1]}"
else:
flow.request.host, flow.request.port = addr
cp.start_replay([flow])
assert cp.count() == 1
with pytest.raises(exceptions.CommandError):
cp.load_file("/nonexistent")
await asyncio.wait_for(cp.queue.join(), 5)
await asyncio.wait_for(handler_ok.wait(), 5)
cp.done()
if mode != "err":
assert flow.response.status_code == 204
def test_configure(self, tmpdir):
cp = clientplayback.ClientPlayback()
with taddons.context(cp) as tctx:
path = str(tmpdir.join("flows"))
tdump(path, [tflow.tflow()])
assert cp.count() == 0
tctx.configure(cp, client_replay=[path])
assert cp.count() == 1
tctx.configure(cp, client_replay=[])
with pytest.raises(exceptions.OptionsError):
tctx.configure(cp, client_replay=["nonexistent"])
def test_check(self):
cp = clientplayback.ClientPlayback()
with taddons.context(cp):
f = tflow.tflow(resp=True)
f.live = True
assert "live flow" in cp.check(f)
@pytest.mark.asyncio
async def test_playback_crash(monkeypatch):
async def raise_err():
raise ValueError("oops")
f = tflow.tflow(resp=True)
f.intercepted = True
assert "intercepted flow" in cp.check(f)
monkeypatch.setattr(ReplayHandler, "replay", raise_err)
cp = ClientPlayback()
with taddons.context(cp) as tctx:
cp.running()
cp.start_replay([tflow.tflow()])
assert await tctx.master.await_log("Client replay has crashed!", level="error")
assert cp.count() == 0
f = tflow.tflow(resp=True)
f.request = None
assert "missing request" in cp.check(f)
f = tflow.tflow(resp=True)
f.request.raw_content = None
assert "missing content" in cp.check(f)
def test_check():
cp = ClientPlayback()
f = tflow.tflow(resp=True)
f.live = True
assert "live flow" in cp.check(f)
f = tflow.ttcpflow()
assert "Can only replay HTTP" in cp.check(f)
f = tflow.tflow(resp=True)
f.intercepted = True
assert "intercepted flow" in cp.check(f)
@pytest.mark.asyncio
async def test_playback(self):
cp = clientplayback.ClientPlayback()
with taddons.context(cp) as ctx:
assert cp.count() == 0
f = tflow.tflow(resp=True)
cp.start_replay([f])
assert cp.count() == 1
f = tflow.tflow(resp=True)
f.request = None
assert "missing request" in cp.check(f)
cp.stop_replay()
assert cp.count() == 0
f = tflow.tflow(resp=True)
f.request.raw_content = None
assert "missing content" in cp.check(f)
f.live = True
cp.start_replay([f])
assert cp.count() == 0
await ctx.master.await_log("live")
f = tflow.ttcpflow()
assert "Can only replay HTTP" in cp.check(f)
@skip_new_proxy_core
def test_http2(self):
cp = clientplayback.ClientPlayback()
with taddons.context(cp):
req = tutils.treq(
headers = net_http.Headers(
(
(b":authority", b"foo"),
(b"header", b"qvalue"),
(b"content-length", b"7")
)
)
)
f = tflow.tflow(req=req)
f.request.http_version = "HTTP/2.0"
cp.start_replay([f])
assert f.request.http_version == "HTTP/1.1"
assert ":authority" not in f.request.headers
@pytest.mark.asyncio
async def test_start_stop(tdata):
cp = ClientPlayback()
with taddons.context(cp) as tctx:
cp.start_replay([tflow.tflow()])
assert cp.count() == 1
cp.start_replay([tflow.twebsocketflow()])
assert await tctx.master.await_log("Can only replay HTTP flows.", level="warn")
assert cp.count() == 1
cp.stop_replay()
assert cp.count() == 0
def test_load(tdata):
cp = ClientPlayback()
with taddons.context(cp):
cp.load_file(tdata.path("mitmproxy/data/dumpfile-018.bin"))
assert cp.count() == 1
with pytest.raises(CommandError):
cp.load_file("/nonexistent")
assert cp.count() == 1
def test_configure(tdata):
cp = ClientPlayback()
with taddons.context(cp) as tctx:
assert cp.count() == 0
tctx.configure(cp, client_replay=[tdata.path("mitmproxy/data/dumpfile-018.bin")])
assert cp.count() == 1
tctx.configure(cp, client_replay=[])
with pytest.raises(OptionsError):
tctx.configure(cp, client_replay=["nonexistent"])

View File

@ -1,139 +0,0 @@
import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy.addons.clientplayback_sansio import ClientPlayback, ReplayHandler
from mitmproxy.exceptions import CommandError, OptionsError
from mitmproxy.proxy2.context import Address
from mitmproxy.test import taddons, tflow
@asynccontextmanager
async def tcp_server(handle_conn) -> Address:
server = await asyncio.start_server(handle_conn, '127.0.0.1', 0)
await server.start_serving()
try:
yield server.sockets[0].getsockname()
finally:
server.close()
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["regular", "upstream", "err"])
async def test_playback(mode):
handler_ok = asyncio.Event()
async def handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
if mode == "err":
writer.close()
handler_ok.set()
return
if mode == "upstream":
conn_req = await reader.readuntil(b"\r\n\r\n")
assert conn_req == b'CONNECT address:22 HTTP/1.1\r\n\r\n'
writer.write(b"HTTP/1.1 200 Connection Established\r\n\r\n")
req = await reader.readuntil(b"data")
assert req == (
b'GET /path HTTP/1.1\r\n'
b'header: qvalue\r\n'
b'content-length: 4\r\n'
b'\r\n'
b'data'
)
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
assert not await reader.read()
handler_ok.set()
cp = ClientPlayback()
with taddons.context(cp) as tctx:
async with tcp_server(handler) as addr:
cp.running()
flow = tflow.tflow()
flow.request.content = b"data"
if mode == "upstream":
tctx.options.mode = f"upstream:http://{addr[0]}:{addr[1]}"
else:
flow.request.host, flow.request.port = addr
cp.start_replay([flow])
assert cp.count() == 1
await asyncio.wait_for(cp.queue.join(), 5)
await asyncio.wait_for(handler_ok.wait(), 5)
cp.done()
if mode != "err":
assert flow.response.status_code == 204
@pytest.mark.asyncio
async def test_playback_crash(monkeypatch):
async def raise_err():
raise ValueError("oops")
monkeypatch.setattr(ReplayHandler, "replay", raise_err)
cp = ClientPlayback()
with taddons.context(cp) as tctx:
cp.running()
cp.start_replay([tflow.tflow()])
assert await tctx.master.await_log("Client replay has crashed!", level="error")
assert cp.count() == 0
def test_check():
cp = ClientPlayback()
f = tflow.tflow(resp=True)
f.live = True
assert "live flow" in cp.check(f)
f = tflow.tflow(resp=True)
f.intercepted = True
assert "intercepted flow" in cp.check(f)
f = tflow.tflow(resp=True)
f.request = None
assert "missing request" in cp.check(f)
f = tflow.tflow(resp=True)
f.request.raw_content = None
assert "missing content" in cp.check(f)
f = tflow.ttcpflow()
assert "Can only replay HTTP" in cp.check(f)
@pytest.mark.asyncio
async def test_start_stop(tdata):
cp = ClientPlayback()
with taddons.context(cp) as tctx:
cp.start_replay([tflow.tflow()])
assert cp.count() == 1
cp.start_replay([tflow.twebsocketflow()])
assert await tctx.master.await_log("Can only replay HTTP flows.", level="warn")
assert cp.count() == 1
cp.stop_replay()
assert cp.count() == 0
def test_load(tdata):
cp = ClientPlayback()
with taddons.context(cp):
cp.load_file(tdata.path("mitmproxy/data/dumpfile-018.bin"))
assert cp.count() == 1
with pytest.raises(CommandError):
cp.load_file("/nonexistent")
assert cp.count() == 1
def test_configure(tdata):
cp = ClientPlayback()
with taddons.context(cp) as tctx:
assert cp.count() == 0
tctx.configure(cp, client_replay=[tdata.path("mitmproxy/data/dumpfile-018.bin")])
assert cp.count() == 1
tctx.configure(cp, client_replay=[])
with pytest.raises(OptionsError):
tctx.configure(cp, client_replay=["nonexistent"])

View File

@ -4,7 +4,7 @@ import pytest
from mitmproxy.addons.next_layer import NextLayer
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2 import context, layers
from mitmproxy.proxy import context, layers
from mitmproxy.test import taddons

View File

@ -5,8 +5,8 @@ import pytest
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2 import layers
from mitmproxy.proxy2.context import Address
from mitmproxy.proxy import layers
from mitmproxy.proxy.context import Address
from mitmproxy.test import taddons
@ -57,7 +57,6 @@ async def test_start_stop():
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n"
print(f"{req=}")
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 204 No Content\r\n\r\n"

View File

@ -8,10 +8,10 @@ import pytest
from OpenSSL import SSL
from mitmproxy import certs
from mitmproxy.addons import tlsconfig
from mitmproxy.proxy2 import context
from mitmproxy.proxy2.layers import tls
from mitmproxy.proxy import context
from mitmproxy.proxy.layers import tls
from mitmproxy.test import taddons
from test.mitmproxy.proxy2.layers import test_tls
from test.mitmproxy.proxy.layers import test_tls
def test_alpn_select_callback():

View File

@ -1,29 +0,0 @@
import pytest
from mitmproxy.io import db
from mitmproxy.test import tflow
@pytest.mark.skip
class TestDB:
def test_create(self, tdata):
dh = db.DBHandler(db_path=tdata.path("mitmproxy/data") + "/tmp.sqlite")
with dh._con as c:
cur = c.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='FLOWS';")
assert cur.fetchall() == [('FLOWS',)]
def test_roundtrip(self, tdata):
dh = db.DBHandler(db_path=tdata.path("mitmproxy/data") + "/tmp.sqlite", mode='write')
flows = []
for i in range(10):
flows.append(tflow.tflow())
dh.store(flows)
dh = db.DBHandler(db_path=tdata.path("mitmproxy/data") + "/tmp.sqlite")
with dh._con as c:
cur = c.cursor()
cur.execute("SELECT count(*) FROM FLOWS;")
assert cur.fetchall()[0][0] == 10
loaded_flows = dh.load()
assert len(loaded_flows) == len(flows)

View File

@ -1,120 +0,0 @@
import pytest
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy.io import protobuf
from mitmproxy.test import tflow, tutils
@pytest.mark.skip
class TestProtobuf:
def test_roundtrip_client(self):
c = tflow.tclient_conn()
del c.reply
c.rfile = None
c.wfile = None
pc = protobuf._dump_http_client_conn(c)
lc = protobuf._load_http_client_conn(pc)
assert c.__dict__ == lc.__dict__
def test_roundtrip_client_cert(self, tdata):
c = tflow.tclient_conn()
c.rfile = None
c.wfile = None
del c.reply
with open(tdata.path("mitmproxy/net/data/clientcert/client.pem"), "rb") as f:
d = f.read()
c.clientcert = certs.Cert.from_pem(d)
pc = protobuf._dump_http_client_conn(c)
lc = protobuf._load_http_client_conn(pc)
assert c.__dict__ == lc.__dict__
def test_roundtrip_server(self):
s = tflow.tserver_conn()
del s.reply
s.wfile = None
s.rfile = None
ps = protobuf._dump_http_server_conn(s)
ls = protobuf._load_http_server_conn(ps)
assert s.__dict__ == ls.__dict__
def test_roundtrip_server_cert(self, tdata):
s = tflow.tserver_conn()
del s.reply
s.wfile = None
s.rfile = None
with open(tdata.path("mitmproxy/net/data/text_cert"), "rb") as f:
d = f.read()
s.cert = certs.Cert.from_pem(d)
ps = protobuf._dump_http_server_conn(s)
ls = protobuf._load_http_server_conn(ps)
assert s.__dict__ == ls.__dict__
def test_roundtrip_server_via(self):
s = tflow.tserver_conn()
s.via = tflow.tserver_conn()
del s.reply
s.wfile = None
s.rfile = None
ps = protobuf._dump_http_server_conn(s)
ls = protobuf._load_http_server_conn(ps)
assert s.__dict__ == ls.__dict__
del s.via.reply
s.via.wfile = None
s.via.rfile = None
assert s.via.__dict__ == ls.via.__dict__
def test_roundtrip_http_request(self):
req = tutils.treq()
preq = protobuf._dump_http_request(req)
lreq = protobuf._load_http_request(preq)
assert req.__dict__ == lreq.__dict__
def test_roundtrip_http_request_empty_content(self):
req = tutils.treq(content=b"")
preq = protobuf._dump_http_request(req)
lreq = protobuf._load_http_request(preq)
assert req.__dict__ == lreq.__dict__
def test_roundtrip_http_response(self):
res = tutils.tresp()
pres = protobuf._dump_http_response(res)
lres = protobuf._load_http_response(pres)
assert res.__dict__ == lres.__dict__
def test_roundtrip_http_response_empty_content(self):
res = tutils.tresp(content=b"")
pres = protobuf._dump_http_response(res)
lres = protobuf._load_http_response(pres)
assert res.__dict__ == lres.__dict__
def test_roundtrip_http_error(self):
err = tflow.terr()
perr = protobuf._dump_http_error(err)
lerr = protobuf._load_http_error(perr)
assert err.__dict__ == lerr.__dict__
def test_roundtrip_http_flow_only_req(self):
f = tflow.tflow()
f.reply = None
pf = protobuf.dumps(f)
lf = protobuf.loads(pf, "http")
assert f.__dict__ == lf.__dict__
def test_roundtrip_http_flow_res(self):
f = tflow.tflow(resp=True)
f.reply = None
pf = protobuf.dumps(f)
lf = protobuf.loads(pf, "http")
assert f.__dict__ == lf.__dict__
def test_unsupported_dumps(self):
w = tflow.twebsocketflow()
with pytest.raises(exceptions.TypeError):
protobuf.dumps(w)
def test_unsupported_loads(self):
b = b"blobs"
with pytest.raises(exceptions.TypeError):
protobuf.loads(b, 'not-http')

View File

@ -7,7 +7,7 @@ from mitmproxy import options
from mitmproxy.addons.core import Core
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.addons.termlog import TermLog
from mitmproxy.proxy2 import context
from mitmproxy.proxy import context
@pytest.fixture
@ -28,4 +28,4 @@ def tctx() -> context.Context:
settings.register_profile("fast", max_examples=10)
settings.register_profile("deep", max_examples=100_000, deadline=None)
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "fast"))
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "fast"))

View File

@ -4,12 +4,12 @@ from mitmproxy.flow import Error
from mitmproxy.http import HTTPFlow, HTTPResponse
from mitmproxy.net.server_spec import ServerSpec
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2 import layer
from mitmproxy.proxy2.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy2.context import ConnectionState, Server
from mitmproxy.proxy2.events import ConnectionClosed, DataReceived
from mitmproxy.proxy2.layers import TCPLayer, http, tls
from test.mitmproxy.proxy2.tutils import Placeholder, Playbook, reply, reply_next_layer
from mitmproxy.proxy import layer
from mitmproxy.proxy.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy.context import ConnectionState, Server
from mitmproxy.proxy.events import ConnectionClosed, DataReceived
from mitmproxy.proxy.layers import TCPLayer, http, tls
from test.mitmproxy.proxy.tutils import Placeholder, Playbook, reply, reply_next_layer
def test_http_proxy(tctx):

View File

@ -1,201 +1,201 @@
import pytest
from mitmproxy.net import http
from mitmproxy.proxy2.commands import SendData
from mitmproxy.proxy2.events import DataReceived
from mitmproxy.proxy2.layers.http import Http1Server, ReceiveHttp, RequestHeaders, RequestEndOfMessage, \
ResponseHeaders, ResponseEndOfMessage, RequestData, Http1Client, ResponseData
from test.mitmproxy.proxy2.tutils import Placeholder, Playbook
class TestServer:
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_simple(self, tctx, pipeline):
hdrs1 = Placeholder(RequestHeaders)
hdrs2 = Placeholder(RequestHeaders)
req2 = (
b"GET http://example.com/two HTTP/1.1\r\n"
b"Host: example.com\r\n"
b"\r\n"
)
playbook = Playbook(Http1Server(tctx))
(
playbook
>> DataReceived(tctx.client,
b"POST http://example.com/one HTTP/1.1\r\n"
b"Content-Length: 3\r\n"
b"\r\n"
b"abc"
+ (req2 if pipeline else b""))
<< ReceiveHttp(hdrs1)
<< ReceiveHttp(RequestData(1, b"abc"))
<< ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(200))
<< SendData(tctx.client, b'HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n')
>> ResponseEndOfMessage(1)
)
if not pipeline:
playbook >> DataReceived(tctx.client, req2)
assert (
playbook
<< ReceiveHttp(hdrs2)
<< ReceiveHttp(RequestEndOfMessage(3))
)
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_connect(self, tctx, pipeline):
playbook = Playbook(Http1Server(tctx))
(
playbook
>> DataReceived(tctx.client,
b"CONNECT example.com:443 HTTP/1.1\r\n"
b"content-length: 0\r\n"
b"\r\n"
+ (b"some plain tcp" if pipeline else b""))
<< ReceiveHttp(Placeholder(RequestHeaders))
# << ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(200))
<< SendData(tctx.client, b'HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n')
>> ResponseEndOfMessage(1)
)
if not pipeline:
playbook >> DataReceived(tctx.client, b"some plain tcp")
assert (playbook
<< ReceiveHttp(RequestData(1, b"some plain tcp"))
)
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_upgrade(self, tctx, pipeline):
playbook = Playbook(Http1Server(tctx))
(
playbook
>> DataReceived(tctx.client,
b"POST http://example.com/one HTTP/1.1\r\n"
b"Connection: Upgrade\r\n"
b"Upgrade: websocket\r\n"
b"\r\n"
+ (b"some websockets" if pipeline else b""))
<< ReceiveHttp(Placeholder(RequestHeaders))
<< ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(101))
<< SendData(tctx.client, b'HTTP/1.1 101 Switching Protocols\r\ncontent-length: 0\r\n\r\n')
>> ResponseEndOfMessage(1)
)
if not pipeline:
playbook >> DataReceived(tctx.client, b"some websockets")
assert (playbook
<< ReceiveHttp(RequestData(1, b"some websockets"))
)
def test_upgrade_denied(self, tctx):
assert (
Playbook(Http1Server(tctx))
>> DataReceived(tctx.client,
b"GET http://example.com/ HTTP/1.1\r\n"
b"Connection: Upgrade\r\n"
b"Upgrade: websocket\r\n"
b"\r\n")
<< ReceiveHttp(Placeholder(RequestHeaders))
<< ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(200))
<< SendData(tctx.client, b'HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n')
>> ResponseEndOfMessage(1)
>> DataReceived(tctx.client, b"GET / HTTP/1.1\r\n\r\n")
<< ReceiveHttp(Placeholder(RequestHeaders))
<< ReceiveHttp(RequestEndOfMessage(3))
)
class TestClient:
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_simple(self, tctx, pipeline):
req = http.Request.make("GET", "http://example.com/")
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
(
playbook
>> RequestHeaders(1, req, True)
<< SendData(tctx.server, b"GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n")
>> RequestEndOfMessage(1)
)
if pipeline:
with pytest.raises(AssertionError, match="assert self.stream_id == event.stream_id"):
assert (playbook
>> RequestHeaders(3, req, True)
)
return
assert (
playbook
>> DataReceived(tctx.server, b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
<< ReceiveHttp(resp)
<< ReceiveHttp(ResponseEndOfMessage(1))
# no we can send the next request
>> RequestHeaders(3, req, True)
<< SendData(tctx.server, b"GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n")
)
assert resp().response.status_code == 200
def test_connect(self, tctx):
req = http.Request.make("CONNECT", "http://example.com:443")
req.authority = "example.com:443"
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
assert (
playbook
>> RequestHeaders(1, req, True)
<< SendData(tctx.server, b"CONNECT example.com:443 HTTP/1.1\r\ncontent-length: 0\r\n\r\n")
>> RequestEndOfMessage(1)
>> DataReceived(tctx.server, b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\nsome plain tcp")
<< ReceiveHttp(resp)
# << ReceiveHttp(ResponseEndOfMessage(1))
<< ReceiveHttp(ResponseData(1, b"some plain tcp"))
# no we can send plain data
>> RequestData(1, b"some more tcp")
<< SendData(tctx.server, b"some more tcp")
)
def test_upgrade(self, tctx):
req = http.Request.make("GET", "http://example.com/ws", headers={
"Connection": "Upgrade",
"Upgrade": "websocket",
})
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
assert (
playbook
>> RequestHeaders(1, req, True)
<< SendData(tctx.server,
b"GET /ws HTTP/1.1\r\nConnection: Upgrade\r\nUpgrade: websocket\r\ncontent-length: 0\r\n\r\n")
>> RequestEndOfMessage(1)
>> DataReceived(tctx.server, b"HTTP/1.1 101 Switching Protocols\r\ncontent-length: 0\r\n\r\nhello")
<< ReceiveHttp(resp)
<< ReceiveHttp(ResponseEndOfMessage(1))
<< ReceiveHttp(ResponseData(1, b"hello"))
# no we can send plain data
>> RequestData(1, b"some more websockets")
<< SendData(tctx.server, b"some more websockets")
)
def test_upgrade_denied(self, tctx):
req = http.Request.make("GET", "http://example.com/ws", headers={
"Connection": "Upgrade",
"Upgrade": "websocket",
})
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
assert (
playbook
>> RequestHeaders(1, req, True)
<< SendData(tctx.server,
b"GET /ws HTTP/1.1\r\nConnection: Upgrade\r\nUpgrade: websocket\r\ncontent-length: 0\r\n\r\n")
>> RequestEndOfMessage(1)
>> DataReceived(tctx.server, b"HTTP/1.1 200 Ok\r\ncontent-length: 0\r\n\r\n")
<< ReceiveHttp(resp)
<< ReceiveHttp(ResponseEndOfMessage(1))
>> RequestHeaders(3, req, True)
<< SendData(tctx.server, Placeholder(bytes))
)
import pytest
from mitmproxy.net import http
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers.http import Http1Server, ReceiveHttp, RequestHeaders, RequestEndOfMessage, \
ResponseHeaders, ResponseEndOfMessage, RequestData, Http1Client, ResponseData
from test.mitmproxy.proxy.tutils import Placeholder, Playbook
class TestServer:
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_simple(self, tctx, pipeline):
hdrs1 = Placeholder(RequestHeaders)
hdrs2 = Placeholder(RequestHeaders)
req2 = (
b"GET http://example.com/two HTTP/1.1\r\n"
b"Host: example.com\r\n"
b"\r\n"
)
playbook = Playbook(Http1Server(tctx))
(
playbook
>> DataReceived(tctx.client,
b"POST http://example.com/one HTTP/1.1\r\n"
b"Content-Length: 3\r\n"
b"\r\n"
b"abc"
+ (req2 if pipeline else b""))
<< ReceiveHttp(hdrs1)
<< ReceiveHttp(RequestData(1, b"abc"))
<< ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(200))
<< SendData(tctx.client, b'HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n')
>> ResponseEndOfMessage(1)
)
if not pipeline:
playbook >> DataReceived(tctx.client, req2)
assert (
playbook
<< ReceiveHttp(hdrs2)
<< ReceiveHttp(RequestEndOfMessage(3))
)
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_connect(self, tctx, pipeline):
playbook = Playbook(Http1Server(tctx))
(
playbook
>> DataReceived(tctx.client,
b"CONNECT example.com:443 HTTP/1.1\r\n"
b"content-length: 0\r\n"
b"\r\n"
+ (b"some plain tcp" if pipeline else b""))
<< ReceiveHttp(Placeholder(RequestHeaders))
# << ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(200))
<< SendData(tctx.client, b'HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n')
>> ResponseEndOfMessage(1)
)
if not pipeline:
playbook >> DataReceived(tctx.client, b"some plain tcp")
assert (playbook
<< ReceiveHttp(RequestData(1, b"some plain tcp"))
)
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_upgrade(self, tctx, pipeline):
playbook = Playbook(Http1Server(tctx))
(
playbook
>> DataReceived(tctx.client,
b"POST http://example.com/one HTTP/1.1\r\n"
b"Connection: Upgrade\r\n"
b"Upgrade: websocket\r\n"
b"\r\n"
+ (b"some websockets" if pipeline else b""))
<< ReceiveHttp(Placeholder(RequestHeaders))
<< ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(101))
<< SendData(tctx.client, b'HTTP/1.1 101 Switching Protocols\r\ncontent-length: 0\r\n\r\n')
>> ResponseEndOfMessage(1)
)
if not pipeline:
playbook >> DataReceived(tctx.client, b"some websockets")
assert (playbook
<< ReceiveHttp(RequestData(1, b"some websockets"))
)
def test_upgrade_denied(self, tctx):
assert (
Playbook(Http1Server(tctx))
>> DataReceived(tctx.client,
b"GET http://example.com/ HTTP/1.1\r\n"
b"Connection: Upgrade\r\n"
b"Upgrade: websocket\r\n"
b"\r\n")
<< ReceiveHttp(Placeholder(RequestHeaders))
<< ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(200))
<< SendData(tctx.client, b'HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n')
>> ResponseEndOfMessage(1)
>> DataReceived(tctx.client, b"GET / HTTP/1.1\r\n\r\n")
<< ReceiveHttp(Placeholder(RequestHeaders))
<< ReceiveHttp(RequestEndOfMessage(3))
)
class TestClient:
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_simple(self, tctx, pipeline):
req = http.Request.make("GET", "http://example.com/")
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
(
playbook
>> RequestHeaders(1, req, True)
<< SendData(tctx.server, b"GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n")
>> RequestEndOfMessage(1)
)
if pipeline:
with pytest.raises(AssertionError, match="assert self.stream_id == event.stream_id"):
assert (playbook
>> RequestHeaders(3, req, True)
)
return
assert (
playbook
>> DataReceived(tctx.server, b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
<< ReceiveHttp(resp)
<< ReceiveHttp(ResponseEndOfMessage(1))
# no we can send the next request
>> RequestHeaders(3, req, True)
<< SendData(tctx.server, b"GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n")
)
assert resp().response.status_code == 200
def test_connect(self, tctx):
req = http.Request.make("CONNECT", "http://example.com:443")
req.authority = "example.com:443"
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
assert (
playbook
>> RequestHeaders(1, req, True)
<< SendData(tctx.server, b"CONNECT example.com:443 HTTP/1.1\r\ncontent-length: 0\r\n\r\n")
>> RequestEndOfMessage(1)
>> DataReceived(tctx.server, b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\nsome plain tcp")
<< ReceiveHttp(resp)
# << ReceiveHttp(ResponseEndOfMessage(1))
<< ReceiveHttp(ResponseData(1, b"some plain tcp"))
# no we can send plain data
>> RequestData(1, b"some more tcp")
<< SendData(tctx.server, b"some more tcp")
)
def test_upgrade(self, tctx):
req = http.Request.make("GET", "http://example.com/ws", headers={
"Connection": "Upgrade",
"Upgrade": "websocket",
})
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
assert (
playbook
>> RequestHeaders(1, req, True)
<< SendData(tctx.server,
b"GET /ws HTTP/1.1\r\nConnection: Upgrade\r\nUpgrade: websocket\r\ncontent-length: 0\r\n\r\n")
>> RequestEndOfMessage(1)
>> DataReceived(tctx.server, b"HTTP/1.1 101 Switching Protocols\r\ncontent-length: 0\r\n\r\nhello")
<< ReceiveHttp(resp)
<< ReceiveHttp(ResponseEndOfMessage(1))
<< ReceiveHttp(ResponseData(1, b"hello"))
# no we can send plain data
>> RequestData(1, b"some more websockets")
<< SendData(tctx.server, b"some more websockets")
)
def test_upgrade_denied(self, tctx):
req = http.Request.make("GET", "http://example.com/ws", headers={
"Connection": "Upgrade",
"Upgrade": "websocket",
})
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
assert (
playbook
>> RequestHeaders(1, req, True)
<< SendData(tctx.server,
b"GET /ws HTTP/1.1\r\nConnection: Upgrade\r\nUpgrade: websocket\r\ncontent-length: 0\r\n\r\n")
>> RequestEndOfMessage(1)
>> DataReceived(tctx.server, b"HTTP/1.1 200 Ok\r\ncontent-length: 0\r\n\r\n")
<< ReceiveHttp(resp)
<< ReceiveHttp(ResponseEndOfMessage(1))
>> RequestHeaders(3, req, True)
<< SendData(tctx.server, Placeholder(bytes))
)

View File

@ -10,13 +10,13 @@ from mitmproxy.flow import Error
from mitmproxy.http import HTTPFlow
from mitmproxy.net.http import Headers, Request, status_codes
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy2.context import Context, Server
from mitmproxy.proxy2.events import ConnectionClosed, DataReceived
from mitmproxy.proxy2.layers import http
from mitmproxy.proxy2.layers.http._http2 import split_pseudo_headers, Http2Client
from test.mitmproxy.proxy2.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy2.tutils import Placeholder, Playbook, reply
from mitmproxy.proxy.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy.context import Context, Server
from mitmproxy.proxy.events import ConnectionClosed, DataReceived
from mitmproxy.proxy.layers import http
from mitmproxy.proxy.layers.http._http2 import split_pseudo_headers, Http2Client
from test.mitmproxy.proxy.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy.tutils import Placeholder, Playbook, reply
example_request_headers = (
(b':method', b'GET'),

View File

@ -10,15 +10,15 @@ from mitmproxy import options
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.http import HTTPFlow
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2 import context, events
from mitmproxy.proxy2.commands import OpenConnection, SendData
from mitmproxy.proxy2.context import Server
from mitmproxy.proxy2.events import DataReceived, Start, ConnectionClosed
from mitmproxy.proxy2.layers import http
from test.mitmproxy.proxy2.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy2.layers.http.test_http2 import make_h2, example_response_headers, example_request_headers, \
from mitmproxy.proxy import context, events
from mitmproxy.proxy.commands import OpenConnection, SendData
from mitmproxy.proxy.context import Server
from mitmproxy.proxy.events import DataReceived, Start, ConnectionClosed
from mitmproxy.proxy.layers import http
from test.mitmproxy.proxy.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy.layers.http.test_http2 import make_h2, example_response_headers, example_request_headers, \
start_h2_client
from test.mitmproxy.proxy2.tutils import Placeholder, Playbook, reply, _TracebackInPlaybook, _eq
from test.mitmproxy.proxy.tutils import Placeholder, Playbook, reply, _TracebackInPlaybook, _eq
opts = options.Options()
Proxyserver().load(opts)

View File

@ -6,13 +6,13 @@ import h2.events
from mitmproxy.http import HTTPFlow
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy2.context import Context, Server
from mitmproxy.proxy2.events import DataReceived
from mitmproxy.proxy2.layers import http
from test.mitmproxy.proxy2.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy2.layers.http.test_http2 import example_request_headers, example_response_headers, make_h2
from test.mitmproxy.proxy2.tutils import Placeholder, Playbook, reply
from mitmproxy.proxy.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy.context import Context, Server
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers import http
from test.mitmproxy.proxy.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy.layers.http.test_http2 import example_request_headers, example_response_headers, make_h2
from test.mitmproxy.proxy.tutils import Placeholder, Playbook, reply
h2f = FrameFactory()

View File

@ -4,16 +4,16 @@ import pytest
from mitmproxy import platform
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2.commands import CloseConnection, OpenConnection, SendData, GetSocket, Log
from mitmproxy.proxy2.context import Client, Context, Server
from mitmproxy.proxy2.events import DataReceived, ConnectionClosed
from mitmproxy.proxy2.layer import NextLayer, NextLayerHook
from mitmproxy.proxy2.layers import http, modes, tcp, tls
from mitmproxy.proxy2.layers.tcp import TcpStartHook, TcpMessageHook
from mitmproxy.proxy2.layers.tls import ClientTLSLayer, TlsStartHook
from mitmproxy.proxy.commands import CloseConnection, OpenConnection, SendData, GetSocket, Log
from mitmproxy.proxy.context import Client, Context, Server
from mitmproxy.proxy.events import DataReceived, ConnectionClosed
from mitmproxy.proxy.layer import NextLayer, NextLayerHook
from mitmproxy.proxy.layers import http, modes, tcp, tls
from mitmproxy.proxy.layers.tcp import TcpStartHook, TcpMessageHook
from mitmproxy.proxy.layers.tls import ClientTLSLayer, TlsStartHook
from mitmproxy.tcp import TCPFlow
from test.mitmproxy.proxy2.layers.test_tls import reply_tls_start
from test.mitmproxy.proxy2.tutils import Placeholder, Playbook, reply, reply_next_layer
from test.mitmproxy.proxy.layers.test_tls import reply_tls_start
from test.mitmproxy.proxy.tutils import Placeholder, Playbook, reply, reply_next_layer
def test_upstream_https(tctx):

View File

@ -1,9 +1,9 @@
import pytest
from mitmproxy.proxy2.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy2.context import ConnectionState
from mitmproxy.proxy2.events import ConnectionClosed, DataReceived
from mitmproxy.proxy2.layers import tcp
from mitmproxy.proxy.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy.context import ConnectionState
from mitmproxy.proxy.events import ConnectionClosed, DataReceived
from mitmproxy.proxy.layers import tcp
from mitmproxy.tcp import TCPFlow
from ..tutils import Placeholder, Playbook, reply

View File

@ -4,11 +4,11 @@ import typing
import pytest
from OpenSSL import SSL
from mitmproxy.proxy2 import commands, context, events, layer
from mitmproxy.proxy2.context import ConnectionState
from mitmproxy.proxy2.layers import tls
from mitmproxy.proxy import commands, context, events, layer
from mitmproxy.proxy.context import ConnectionState
from mitmproxy.proxy.layers import tls
from mitmproxy.utils import data
from test.mitmproxy.proxy2 import tutils
from test.mitmproxy.proxy import tutils
tlsdata = data.Data(__name__)

View File

@ -1,26 +1,26 @@
from hypothesis import given, example
from hypothesis.strategies import binary, integers
from mitmproxy.net.tls import ClientHello
from mitmproxy.proxy2.layers.tls import parse_client_hello
client_hello_with_extensions = bytes.fromhex(
"16030300bb" # record layer
"010000b7" # handshake layer
"03033b70638d2523e1cba15f8364868295305e9c52aceabda4b5147210abc783e6e1000022c02bc02fc02cc030"
"cca9cca8cc14cc13c009c013c00ac014009c009d002f0035000a0100006cff0100010000000010000e00000b65"
"78616d706c652e636f6d0017000000230000000d00120010060106030501050304010403020102030005000501"
"00000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a00080006001d00"
"170018"
)
@given(i=integers(0, len(client_hello_with_extensions)), data=binary())
@example(i=183, data=b'\x00\x00\x00\x00\x00\x00\x00\x00\x00')
def test_fuzz_h2_request_chunks(i, data):
try:
ch = parse_client_hello(client_hello_with_extensions[:i] + data)
except ValueError:
pass
else:
assert ch is None or isinstance(ch, ClientHello)
from hypothesis import given, example
from hypothesis.strategies import binary, integers
from mitmproxy.net.tls import ClientHello
from mitmproxy.proxy.layers.tls import parse_client_hello
client_hello_with_extensions = bytes.fromhex(
"16030300bb" # record layer
"010000b7" # handshake layer
"03033b70638d2523e1cba15f8364868295305e9c52aceabda4b5147210abc783e6e1000022c02bc02fc02cc030"
"cca9cca8cc14cc13c009c013c00ac014009c009d002f0035000a0100006cff0100010000000010000e00000b65"
"78616d706c652e636f6d0017000000230000000d00120010060106030501050304010403020102030005000501"
"00000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a00080006001d00"
"170018"
)
@given(i=integers(0, len(client_hello_with_extensions)), data=binary())
@example(i=183, data=b'\x00\x00\x00\x00\x00\x00\x00\x00\x00')
def test_fuzz_h2_request_chunks(i, data):
try:
ch = parse_client_hello(client_hello_with_extensions[:i] + data)
except ValueError:
pass
else:
assert ch is None or isinstance(ch, ClientHello)

View File

@ -8,12 +8,12 @@ import wsproto.events
from mitmproxy.http import HTTPFlow
from mitmproxy.net.http import Request, Response
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2.commands import SendData, CloseConnection, Log
from mitmproxy.proxy2.context import ConnectionState
from mitmproxy.proxy2.events import DataReceived, ConnectionClosed
from mitmproxy.proxy2.layers import http, websocket
from mitmproxy.proxy.commands import SendData, CloseConnection, Log
from mitmproxy.proxy.context import ConnectionState
from mitmproxy.proxy.events import DataReceived, ConnectionClosed
from mitmproxy.proxy.layers import http, websocket
from mitmproxy.websocket import WebSocketFlow
from test.mitmproxy.proxy2.tutils import Placeholder, Playbook, reply
from test.mitmproxy.proxy.tutils import Placeholder, Playbook, reply
@dataclass

View File

@ -1,6 +1,6 @@
import pytest
from mitmproxy.proxy2 import commands, context
from mitmproxy.proxy import commands, context
@pytest.fixture

View File

@ -0,0 +1,28 @@
import pytest
from mitmproxy import options
from mitmproxy import exceptions
from mitmproxy.proxy.config import ProxyConfig
class TestProxyConfig:
def test_invalid_confdir(self):
opts = options.Options()
opts.confdir = "foo"
with pytest.raises(exceptions.OptionsError, match="parent directory does not exist"):
ProxyConfig(opts)
def test_invalid_certificate(self, tdata):
opts = options.Options()
opts.certs = [tdata.path("mitmproxy/data/dumpfile-011.bin")]
with pytest.raises(exceptions.OptionsError, match="Invalid certificate format"):
ProxyConfig(opts)
def test_cannot_set_both_allow_and_filter_options(self):
opts = options.Options()
opts.ignore_hosts = ["foo"]
opts.allow_hosts = ["bar"]
with pytest.raises(exceptions.OptionsError, match="--ignore-hosts and --allow-hosts are "
"mutually exclusive; please choose "
"one."):
ProxyConfig(opts)

View File

@ -1,4 +1,4 @@
from mitmproxy.proxy2 import context
from mitmproxy.proxy import context
from mitmproxy.test import tflow, taddons

View File

@ -2,7 +2,7 @@ from unittest.mock import Mock
import pytest
from mitmproxy.proxy2 import events, context, commands
from mitmproxy.proxy import events, context, commands
@pytest.fixture

View File

@ -1,7 +1,7 @@
import pytest
from mitmproxy.proxy2 import commands, events, layer
from test.mitmproxy.proxy2 import tutils
from mitmproxy.proxy import commands, events, layer
from test.mitmproxy.proxy import tutils
class TestLayer:

View File

@ -1,4 +1,4 @@
from mitmproxy.proxy2 import server_hooks
from mitmproxy.proxy import server_hooks
def test_noop():

View File

@ -2,11 +2,11 @@ from typing import Tuple, Optional
import pytest
from mitmproxy.proxy2 import tunnel, layer
from mitmproxy.proxy2.commands import SendData, Log, CloseConnection, OpenConnection
from mitmproxy.proxy2.context import Context, Server, ConnectionState
from mitmproxy.proxy2.events import Event, DataReceived, Start, ConnectionClosed
from test.mitmproxy.proxy2.tutils import Playbook, reply
from mitmproxy.proxy import tunnel, layer
from mitmproxy.proxy.commands import SendData, Log, CloseConnection, OpenConnection
from mitmproxy.proxy.context import Context, Server, ConnectionState
from mitmproxy.proxy.events import Event, DataReceived, Start, ConnectionClosed
from test.mitmproxy.proxy.tutils import Playbook, reply
class TChildLayer(layer.Layer):

View File

@ -3,7 +3,7 @@ from dataclasses import dataclass
import pytest
from mitmproxy.proxy2 import commands, events, layer
from mitmproxy.proxy import commands, events, layer
from . import tutils

View File

@ -1,6 +1,6 @@
import pytest
from mitmproxy.proxy2.utils import expect
from mitmproxy.proxy.utils import expect
def test_expect():

View File

@ -5,11 +5,11 @@ import re
import traceback
import typing
from mitmproxy.proxy2 import commands, context, layer
from mitmproxy.proxy2 import events
from mitmproxy.proxy2.context import ConnectionState
from mitmproxy.proxy2.events import command_reply_subclasses
from mitmproxy.proxy2.layer import Layer
from mitmproxy.proxy import commands, context, layer
from mitmproxy.proxy import events
from mitmproxy.proxy.context import ConnectionState
from mitmproxy.proxy.events import command_reply_subclasses
from mitmproxy.proxy.layer import Layer
PlaybookEntry = typing.Union[commands.Command, events.Event]
PlaybookEntryList = typing.List[PlaybookEntry]

View File

@ -1,13 +1,12 @@
import time
import pytest
from mitmproxy import controller
from mitmproxy.test import tflow
from mitmproxy.test import taddons
from mitmproxy import controller
import time
from .. import tservers
class Thing:
def __init__(self):
@ -15,7 +14,7 @@ class Thing:
self.live = True
class TestConcurrent(tservers.MasterTest):
class TestConcurrent:
def test_concurrent(self, tdata):
with taddons.context() as tctx:
sc = tctx.script(

View File

@ -1,225 +0,0 @@
import socket
import threading
import ssl
import OpenSSL
import pytest
from unittest import mock
from mitmproxy import connections
from mitmproxy import exceptions
from mitmproxy.net import tcp
from mitmproxy.net.http import http1
from mitmproxy.test import tflow
from .net import tservers
from ..conftest import skip_new_proxy_core
class TestClientConnection:
@skip_new_proxy_core
def test_send(self):
c = tflow.tclient_conn()
c.send(b'foobar')
c.send([b'foo', b'bar'])
with pytest.raises(TypeError):
c.send('string')
with pytest.raises(TypeError):
c.send(['string', 'not'])
assert c.wfile.getvalue() == b'foobarfoobar'
@skip_new_proxy_core
def test_repr(self):
c = tflow.tclient_conn()
assert '127.0.0.1:22' in repr(c)
assert 'ALPN' in repr(c)
assert 'TLS' in repr(c)
c.alpn_proto_negotiated = None
c.tls_established = False
assert 'ALPN' not in repr(c)
assert 'TLS' not in repr(c)
c.address = None
assert repr(c)
@skip_new_proxy_core
def test_tls_established_property(self):
c = tflow.tclient_conn()
c.tls_established = True
assert c.tls_established
assert c.tls_established
c.tls_established = False
assert not c.tls_established
assert not c.tls_established
def test_make_dummy(self):
c = connections.ClientConnection.make_dummy(('foobar', 1234))
assert c.address == ('foobar', 1234)
def test_state(self):
c = tflow.tclient_conn()
assert connections.ClientConnection.from_state(c.get_state()).get_state() == \
c.get_state()
c2 = tflow.tclient_conn()
c2.address = (c2.address[0], 4242)
assert not c == c2
c2.timestamp_start = 42
c.set_state(c2.get_state())
assert c.timestamp_start == 42
c3 = c.copy()
assert c3.get_state() != c.get_state()
c.id = c3.id = "foo"
assert c3.get_state() == c.get_state()
def test_eq(self):
c = tflow.tclient_conn()
c2 = c.copy()
assert c == c
assert c != c2
assert c != 42
assert hash(c) != hash(c2)
class TestServerConnection:
@skip_new_proxy_core
def test_send(self):
c = tflow.tserver_conn()
c.send(b'foobar')
c.send([b'foo', b'bar'])
with pytest.raises(TypeError):
c.send('string')
with pytest.raises(TypeError):
c.send(['string', 'not'])
assert c.wfile.getvalue() == b'foobarfoobar'
@skip_new_proxy_core
def test_repr(self):
c = tflow.tserver_conn()
c.sni = 'foobar'
c.tls_established = True
c.alpn_proto_negotiated = b'h2'
assert 'address:22' in repr(c)
assert 'ALPN' in repr(c)
assert 'TLSv1.2: foobar' in repr(c)
c.sni = None
c.tls_established = True
c.alpn_proto_negotiated = None
assert 'ALPN' not in repr(c)
assert 'TLS' in repr(c)
c.sni = None
c.tls_established = False
assert 'TLS' not in repr(c)
c.address = None
assert repr(c)
@skip_new_proxy_core
def test_tls_established_property(self):
c = tflow.tserver_conn()
c.tls_established = True
assert c.tls_established
assert c.tls_established
c.tls_established = False
assert not c.tls_established
assert not c.tls_established
def test_make_dummy(self):
c = connections.ServerConnection.make_dummy(('foobar', 1234))
assert c.address == ('foobar', 1234)
def test_sni(self):
c = connections.ServerConnection(('', 1234))
with pytest.raises(ValueError, match='sni must be str, not '):
c.establish_tls(sni=b'foobar')
def test_state(self):
c = tflow.tserver_conn()
c2 = c.copy()
assert c2.get_state() != c.get_state()
c.id = c2.id = "foo"
assert c2.get_state() == c.get_state()
def test_eq(self):
c = tflow.tserver_conn()
c2 = c.copy()
assert c == c
assert c != c2
assert c != 42
assert hash(c) != hash(c2)
class TestClientConnectionTLS:
@pytest.mark.parametrize("sni", [
None,
"example.com"
])
def test_tls_with_sni(self, sni, tdata):
address = ('127.0.0.1', 0)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen()
address = sock.getsockname()
def client_run():
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
s = socket.create_connection(address)
s = ctx.wrap_socket(s, server_hostname=sni)
s.send(b'foobar')
# we need to wait for the test to finish successfully before calling .close() on Windows.
# The workaround here is to signal completion by sending data the other way around.
s.recv(3)
s.close()
threading.Thread(target=client_run).start()
connection, client_address = sock.accept()
c = connections.ClientConnection(connection, client_address, None)
cert = tdata.path("mitmproxy/net/data/server.crt")
with open(tdata.path("mitmproxy/net/data/server.key")) as f:
raw_key = f.read()
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
raw_key)
c.convert_to_tls(cert, key)
assert c.connected()
assert c.sni == sni
assert c.tls_established
assert c.rfile.read(6) == b'foobar'
c.wfile.send(b"foo")
c.finish()
sock.close()
class TestServerConnectionTLS(tservers.ServerTestBase):
ssl = True
class handler(tcp.BaseHandler):
def handle(self):
self.finish()
@pytest.mark.parametrize("client_certs", [
None,
"mitmproxy/data/clientcert",
"mitmproxy/data/clientcert/client.pem",
])
def test_tls(self, client_certs, tdata):
if client_certs:
client_certs = tdata.path(client_certs)
c = connections.ServerConnection(("127.0.0.1", self.port))
c.connect()
c.establish_tls(client_certs=client_certs)
assert c.connected()
assert c.tls_established
c.close()
c.finish()

View File

@ -9,7 +9,23 @@ from mitmproxy import options
from mitmproxy.exceptions import FlowReadException
from mitmproxy.io import tnetstring
from mitmproxy.test import taddons, tflow
from . import tservers
class State:
def __init__(self):
self.flows = []
def request(self, f):
if f not in self.flows:
self.flows.append(f)
def response(self, f):
if f not in self.flows:
self.flows.append(f)
def websocket_start(self, f):
if f not in self.flows:
self.flows.append(f)
class TestSerialize:
@ -99,7 +115,7 @@ class TestFlowMaster:
opts = options.Options(
mode="reverse:https://use-this-domain"
)
s = tservers.TestState()
s = State()
with taddons.context(s, options=opts) as ctx:
f = tflow.tflow(resp=True)
await ctx.master.load_flow(f)
@ -110,7 +126,7 @@ class TestFlowMaster:
opts = options.Options(
mode="reverse:https://use-this-domain"
)
s = tservers.TestState()
s = State()
with taddons.context(s, options=opts) as ctx:
f = tflow.twebsocketflow()
await ctx.master.load_flow(f.handshake_flow)
@ -124,7 +140,7 @@ class TestFlowMaster:
opts = options.Options(
mode="reverse:https://use-this-domain"
)
s = tservers.TestState()
s = State()
with taddons.context(s, options=opts) as ctx:
f = tflow.tflow(req=None)
await ctx.master.addons.handle_lifecycle("clientconnect", f.client_conn)

View File

@ -1,16 +1,11 @@
import argparse
import platform
from unittest import mock
import pytest
from mitmproxy import options
from mitmproxy.proxy import ProxyConfig
from mitmproxy.proxy import config
from mitmproxy.proxy.server import ConnectionHandler, DummyServer, ProxyServer
from mitmproxy.proxy import DummyServer
from mitmproxy.tools import cmdline
from mitmproxy.tools import main
from ..conftest import skip_windows, skip_new_proxy_core
class MockParser(argparse.ArgumentParser):
@ -48,50 +43,9 @@ class TestProcessProxyOptions:
tdata.path("mitmproxy/data/testkey.pem"))
class TestProxyServer:
@skip_windows
@pytest.mark.skipif(platform.system() != "Linux", reason="Linux-only")
def test_err(self):
# binding to 0.0.0.0:1 works without special permissions on Windows and macOS Mojave+
conf = ProxyConfig(options.Options(listen_port=1))
with pytest.raises(Exception, match="Error starting proxy server"):
ProxyServer(conf)
def test_err_2(self):
conf = ProxyConfig(options.Options(listen_host="256.256.256.256"))
with pytest.raises(Exception, match="Error starting proxy server"):
ProxyServer(conf)
class TestDummyServer:
def test_simple(self):
d = DummyServer(None)
d.set_channel(None)
d.shutdown()
class TestConnectionHandler:
@skip_new_proxy_core
def test_fatal_error(self, capsys):
opts = options.Options()
pconf = config.ProxyConfig(opts)
channel = mock.Mock()
def ask(_, x):
raise RuntimeError
channel._ask = ask
c = ConnectionHandler(
mock.MagicMock(),
("127.0.0.1", 8080),
pconf,
channel
)
c.handle()
_, err = capsys.readouterr()
assert "mitmproxy has crashed" in err

Some files were not shown because too many files have changed in this diff Show More