Merge pull request #2152 from cortesi/readfile

Extract flow reading into addons
This commit is contained in:
Aldo Cortesi 2017-03-16 15:51:21 +13:00 committed by GitHub
commit 1e6c51c698
35 changed files with 354 additions and 212 deletions

View File

@ -1,5 +1,6 @@
from mitmproxy import exceptions
from mitmproxy import eventsequence
from . import ctx
import pprint
@ -31,31 +32,27 @@ class AddonManager:
return i
def configure_all(self, options, updated):
self.invoke_all_with_context("configure", options, updated)
def startup(self, s):
"""
Run startup events on addon.
"""
self.invoke_with_context(s, "start", self.master.options)
self.trigger("configure", options, updated)
def add(self, *addons):
"""
Add addons to the end of the chain, and run their startup events.
"""
self.chain.extend(addons)
for i in addons:
self.startup(i)
with self.master.handlecontext():
for i in addons:
self.invoke_addon(i, "start", self.master.options)
def remove(self, addon):
"""
Remove an addon from the chain, and run its done events.
"""
self.chain = [i for i in self.chain if i is not addon]
self.invoke_with_context(addon, "done")
with self.master.handlecontext():
self.invoke_addon(addon, "done")
def done(self):
self.invoke_all_with_context("done")
self.trigger("done")
def __len__(self):
return len(self.chain)
@ -63,18 +60,17 @@ class AddonManager:
def __str__(self):
return pprint.pformat([str(i) for i in self.chain])
def invoke_with_context(self, addon, name, *args, **kwargs):
with self.master.handlecontext():
self.invoke(addon, name, *args, **kwargs)
def invoke_all_with_context(self, name, *args, **kwargs):
with self.master.handlecontext():
for i in self.chain:
self.invoke(i, name, *args, **kwargs)
def invoke(self, addon, name, *args, **kwargs):
if name not in eventsequence.Events: # prama: no cover
raise NotImplementedError("Unknown event")
def invoke_addon(self, addon, name, *args, **kwargs):
"""
Invoke an event on an addon. This method must run within an
established handler context.
"""
if not ctx.master:
raise exceptions.AddonError(
"invoke_addon called without a handler context."
)
if name not in eventsequence.Events:
name = "event_" + name
func = getattr(addon, name, None)
if func:
if not callable(func):
@ -83,9 +79,13 @@ class AddonManager:
)
func(*args, **kwargs)
def __call__(self, name, *args, **kwargs):
for i in self.chain:
try:
self.invoke(i, name, *args, **kwargs)
except exceptions.AddonHalt:
return
def trigger(self, name, *args, **kwargs):
"""
Establish a handler context and trigger an event across all addons
"""
with self.master.handlecontext():
for i in self.chain:
try:
self.invoke_addon(i, name, *args, **kwargs)
except exceptions.AddonHalt:
return

View File

@ -8,6 +8,7 @@ from mitmproxy.addons import disable_h2c
from mitmproxy.addons import onboarding
from mitmproxy.addons import proxyauth
from mitmproxy.addons import replace
from mitmproxy.addons import readfile
from mitmproxy.addons import script
from mitmproxy.addons import serverplayback
from mitmproxy.addons import setheaders
@ -37,5 +38,6 @@ def default_addons():
stickycookie.StickyCookie(),
streambodies.StreamBodies(),
streamfile.StreamFile(),
readfile.ReadFile(),
upstream_auth.UpstreamAuth(),
]

View File

@ -10,7 +10,6 @@ class ClientPlayback:
def __init__(self):
self.flows = None
self.current_thread = None
self.keepserving = False
self.has_replayed = False
def count(self) -> int:
@ -32,7 +31,6 @@ class ClientPlayback:
self.load(flows)
else:
self.flows = None
self.keepserving = options.keepserving
def tick(self):
if self.current_thread and not self.current_thread.is_alive():
@ -41,5 +39,5 @@ class ClientPlayback:
self.current_thread = ctx.master.replay_request(self.flows.pop(0))
self.has_replayed = True
if self.has_replayed:
if not self.flows and not self.current_thread and not self.keepserving:
ctx.master.shutdown()
if not self.flows and not self.current_thread:
ctx.master.addons.trigger("processing_complete")

View File

@ -0,0 +1,7 @@
from mitmproxy import ctx
class KeepServing:
def event_processing_complete(self):
if not ctx.master.options.keepserving:
ctx.master.shutdown()

View File

@ -0,0 +1,46 @@
import os.path
from mitmproxy import ctx
from mitmproxy import io
from mitmproxy import exceptions
class ReadFile:
"""
An addon that handles reading from file on startup.
"""
def __init__(self):
self.path = None
def load_flows_file(self, path: str) -> int:
path = os.path.expanduser(path)
cnt = 0
try:
with open(path, "rb") as f:
freader = io.FlowReader(f)
for i in freader.stream():
cnt += 1
ctx.master.load_flow(i)
return cnt
except (IOError, exceptions.FlowReadException) as v:
if cnt:
ctx.log.warn(
"Flow file corrupted - loaded %i flows." % cnt,
)
else:
ctx.log.error("Flow file corrupted.")
raise exceptions.FlowReadException(v)
def configure(self, options, updated):
if "rfile" in updated and options.rfile:
self.path = options.rfile
def running(self):
if self.path:
try:
self.load_flows_file(self.path)
except exceptions.FlowReadException as v:
raise exceptions.OptionsError(v)
finally:
self.path = None
ctx.master.addons.trigger("processing_complete")

View File

@ -0,0 +1,26 @@
from mitmproxy import ctx
from mitmproxy import io
from mitmproxy import exceptions
import sys
class ReadStdin:
"""
An addon that reads from stdin if we're not attached to (someting like)
a tty.
"""
def running(self, stdin = sys.stdin):
if not stdin.isatty():
ctx.log.info("Reading from stdin")
try:
stdin.buffer.read(0)
except Exception as e:
ctx.log.warn("Cannot read from stdin: {}".format(e))
return
freader = io.FlowReader(stdin.buffer)
try:
for i in freader.stream():
ctx.master.load_flow(i)
except exceptions.FlowReadException as e:
ctx.log.error("Error reading from stdin: %s" % e)
ctx.master.addons.trigger("processing_complete")

View File

@ -273,11 +273,11 @@ class ScriptLoader:
ctx.master.addons.chain = ochain[:pos + 1] + ordered + ochain[pos + 1:]
for s in newscripts:
ctx.master.addons.startup(s)
ctx.master.addons.invoke_addon(s, "start", options)
if self.is_running:
# If we're already running, we configure and tell the addon
# we're up and running.
ctx.master.addons.invoke_with_context(
ctx.master.addons.invoke_addon(
s, "configure", options, options.keys()
)
ctx.master.addons.invoke_with_context(s, "running")
ctx.master.addons.invoke_addon(s, "running")

View File

@ -104,7 +104,7 @@ class ServerPlayback:
def tick(self):
if self.stop and not self.final_flow.live:
ctx.master.shutdown()
ctx.master.addons.trigger("processing_complete")
def request(self, f):
if self.flowmap:
@ -115,7 +115,7 @@ class ServerPlayback:
if self.options.refresh_server_playback:
response.refresh()
f.response = response
if not self.flowmap and not self.options.keepserving:
if not self.flowmap:
self.final_flow = f
self.stop = True
elif self.options.replay_kill_extra:

View File

@ -66,8 +66,7 @@ def handler(f):
with master.handlecontext():
ret = f(master, message)
if handling:
master.addons(f.__name__, message)
master.addons.trigger(f.__name__, message)
# Reset the handled flag - it's common for us to feed the same object
# through handlers repeatedly, so we don't want this to persist across

View File

@ -102,6 +102,9 @@ class AddonError(MitmproxyException):
class AddonHalt(MitmproxyException):
"""
Raised by addons to signal that no further handlers should handle this event.
"""
pass

View File

@ -1,8 +1,6 @@
import os
import threading
import contextlib
import queue
import sys
from mitmproxy import addonmanager
from mitmproxy import options
@ -12,7 +10,6 @@ from mitmproxy import exceptions
from mitmproxy import connections
from mitmproxy import http
from mitmproxy import log
from mitmproxy import io
from mitmproxy.proxy.protocol import http_replay
from mitmproxy.types import basethread
@ -67,8 +64,7 @@ class Master:
"""
level: debug, info, warn, error
"""
with self.handlecontext():
self.addons("log", log.LogEntry(e, level))
self.addons.trigger("log", log.LogEntry(e, level))
def start(self):
self.should_exit.clear()
@ -88,9 +84,8 @@ class Master:
def tick(self, timeout):
if self.first_tick:
self.first_tick = False
self.addons.invoke_all_with_context("running")
with self.handlecontext():
self.addons("tick")
self.addons.trigger("running")
self.addons.trigger("tick")
changed = False
try:
mtype, obj = self.event_queue.get(timeout=timeout)
@ -150,33 +145,6 @@ class Master:
for e, o in eventsequence.iterate(f):
getattr(self, e)(o)
def load_flows(self, fr: io.FlowReader) -> int:
"""
Load flows from a FlowReader object.
"""
cnt = 0
for i in fr.stream():
cnt += 1
self.load_flow(i)
return cnt
def load_flows_file(self, path: str) -> int:
path = os.path.expanduser(path)
try:
if path == "-":
try:
sys.stdin.buffer.read(0)
except Exception as e:
raise IOError("Cannot read from stdin: {}".format(e))
freader = io.FlowReader(sys.stdin.buffer)
return self.load_flows(freader)
else:
with open(path, "rb") as f:
freader = io.FlowReader(f)
return self.load_flows(freader)
except IOError as v:
raise exceptions.FlowReadException(v.strerror)
def replay_request(
self,
f: http.HTTPFlow,

View File

@ -79,7 +79,10 @@ class Options(optmanager.OptManager):
)
self.add_option(
"keepserving", bool, False,
"Continue serving after client playback or file read."
"""
Instructs mitmdump to continue serving after client playback,
server playback or file read. This option is ignored by interactive tools, which always keep serving.
"""
)
self.add_option(
"server", bool, True,

View File

@ -6,16 +6,36 @@ from mitmproxy import proxy
from mitmproxy import eventsequence
class _AddonWrapper:
def __init__(self, master, addons):
self.master = master
self.addons = addons
def trigger(self, event, *args, **kwargs):
self.master.events.append((event, args, kwargs))
return self.addons.trigger(event, *args, **kwargs)
def __getattr__(self, attr):
return getattr(self.addons, attr)
class RecordingMaster(mitmproxy.master.Master):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.event_log = []
self.addons = _AddonWrapper(self, self.addons)
self.events = []
self.logs = []
def has_event(self, name):
for i in self.events:
if i[0] == name:
return True
def add_log(self, e, level):
self.event_log.append((level, e))
self.logs.append((level, e))
def clear(self):
self.event_log = []
self.logs = []
class context:

View File

@ -256,19 +256,6 @@ class ConsoleMaster(master.Master):
)
self.ab = statusbar.ActionBar()
if self.options.rfile:
ret = self.load_flows_path(self.options.rfile)
if ret and self.view.store_count():
signals.add_log(
"File truncated or corrupted. "
"Loaded as many flows as possible.",
"error"
)
elif ret and not self.view.store_count():
self.shutdown()
print("Could not load file: {}".format(ret), file=sys.stderr)
sys.exit(1)
self.loop.set_alarm_in(0.01, self.ticker)
self.loop.set_alarm_in(
@ -289,7 +276,10 @@ class ConsoleMaster(master.Master):
print("Shutting down...", file=sys.stderr)
finally:
sys.stderr.flush()
self.shutdown()
super().shutdown()
def shutdown(self):
raise urwid.ExitMainLoop
def view_help(self, helpctx):
signals.push_view_state.send(
@ -402,7 +392,7 @@ class ConsoleMaster(master.Master):
def quit(self, a):
if a != "n":
raise urwid.ExitMainLoop
self.shutdown()
def clear_events(self):
self.logbuffer[:] = []

View File

@ -1,9 +1,8 @@
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import addons
from mitmproxy import options
from mitmproxy import master
from mitmproxy.addons import dumper, termlog, termstatus
from mitmproxy.addons import dumper, termlog, termstatus, readstdin, keepserving
class DumpMaster(master.Master):
@ -22,21 +21,9 @@ class DumpMaster(master.Master):
self.addons.add(*addons.default_addons())
if with_dumper:
self.addons.add(dumper.Dumper())
if options.rfile:
try:
self.load_flows_file(options.rfile)
except exceptions.FlowReadException as v:
self.add_log("Flow file corrupted.", "error")
raise exceptions.OptionsError(v)
self.addons.add(readstdin.ReadStdin(), keepserving.KeepServing())
@controller.handler
def log(self, e):
if e.level == "error":
self.has_errored = True
def run(self): # pragma: no cover
if self.options.rfile and not self.options.keepserving:
self.addons.done()
return
super().run()

View File

@ -230,7 +230,8 @@ class DumpFlows(RequestHandler):
def post(self):
self.view.clear()
bio = BytesIO(self.filecontents)
self.master.load_flows(io.FlowReader(bio))
for i in io.FlowReader(bio).stream():
self.master.load_flow(i)
bio.close()

View File

@ -3,7 +3,6 @@ import webbrowser
import tornado.httpserver
import tornado.ioloop
from mitmproxy import addons
from mitmproxy import exceptions
from mitmproxy import log
from mitmproxy import master
from mitmproxy.addons import eventstore
@ -42,14 +41,6 @@ class WebMaster(master.Master):
)
# This line is just for type hinting
self.options = self.options # type: Options
if options.rfile:
try:
self.load_flows_file(options.rfile)
except exceptions.FlowReadException as v:
self.add_log(
"Could not read flow file: %s" % v,
"error"
)
def _sig_view_add(self, view, flow):
app.ClientConnection.broadcast(

View File

@ -12,7 +12,7 @@ class TestCheckALPN:
with taddons.context() as tctx:
a = check_alpn.CheckALPN()
tctx.configure(a)
assert not any(msg in m for l, m in tctx.master.event_log)
assert not any(msg in m for l, m in tctx.master.logs)
def test_check_no_alpn(self, disable_alpn):
msg = 'ALPN support missing'
@ -20,4 +20,4 @@ class TestCheckALPN:
with taddons.context() as tctx:
a = check_alpn.CheckALPN()
tctx.configure(a)
assert any(msg in m for l, m in tctx.master.event_log)
assert any(msg in m for l, m in tctx.master.logs)

View File

@ -16,4 +16,4 @@ class TestCheckCA:
tctx.master.server.config.certstore.default_ca.has_expired = mock.MagicMock(return_value=expired)
a = check_ca.CheckCA()
tctx.configure(a)
assert any(msg in m for l, m in tctx.master.event_log) is expired
assert any(msg in m for l, m in tctx.master.logs) is expired

View File

@ -23,7 +23,7 @@ class MockThread():
class TestClientPlayback:
def test_playback(self):
cp = clientplayback.ClientPlayback()
with taddons.context():
with taddons.context() as tctx:
assert cp.count() == 0
f = tflow.tflow(resp=True)
cp.load([f])
@ -35,17 +35,14 @@ class TestClientPlayback:
assert rp.called
assert cp.current_thread
cp.keepserving = False
cp.flows = None
cp.current_thread = None
with mock.patch("mitmproxy.master.Master.shutdown") as sd:
cp.tick()
assert sd.called
cp.tick()
assert tctx.master.has_event("processing_complete")
cp.current_thread = MockThread()
with mock.patch("mitmproxy.master.Master.shutdown") as sd:
cp.tick()
assert cp.current_thread is None
cp.tick()
assert cp.current_thread is None
def test_configure(self, tmpdir):
cp = clientplayback.ClientPlayback()

View File

@ -151,7 +151,7 @@ class TestContentView:
with taddons.context(options=options.Options()) as ctx:
ctx.configure(d, flow_detail=4, verbosity=3)
d.response(tflow.tflow())
assert "Content viewer failed" in ctx.master.event_log[0][1]
assert "Content viewer failed" in ctx.master.logs[0][1]
def test_tcp():

View File

@ -0,0 +1,10 @@
from mitmproxy.addons import keepserving
from mitmproxy.test import taddons
def test_keepserving():
ks = keepserving.KeepServing()
with taddons.context() as tctx:
ks.event_processing_complete()
assert tctx.master.should_exit.is_set()

View File

@ -0,0 +1,62 @@
from mitmproxy.addons import readfile
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy import io
from mitmproxy import exceptions
from unittest import mock
import pytest
def write_data(path, corrupt=False):
with open(path, "wb") as tf:
w = io.FlowWriter(tf)
for i in range(3):
f = tflow.tflow(resp=True)
w.add(f)
for i in range(3):
f = tflow.tflow(err=True)
w.add(f)
f = tflow.ttcpflow()
w.add(f)
f = tflow.ttcpflow(err=True)
w.add(f)
if corrupt:
tf.write(b"flibble")
@mock.patch('mitmproxy.master.Master.load_flow')
def test_configure(mck, tmpdir):
rf = readfile.ReadFile()
with taddons.context() as tctx:
tf = str(tmpdir.join("tfile"))
write_data(tf)
tctx.configure(rf, rfile=str(tf))
assert not mck.called
rf.running()
assert mck.called
write_data(tf, corrupt=True)
tctx.configure(rf, rfile=str(tf))
with pytest.raises(exceptions.OptionsError):
rf.running()
@mock.patch('mitmproxy.master.Master.load_flow')
def test_corruption(mck, tmpdir):
rf = readfile.ReadFile()
with taddons.context() as tctx:
with pytest.raises(exceptions.FlowReadException):
rf.load_flows_file("nonexistent")
assert not mck.called
assert len(tctx.master.logs) == 1
tfc = str(tmpdir.join("tfile"))
write_data(tfc, corrupt=True)
with pytest.raises(exceptions.FlowReadException):
rf.load_flows_file(tfc)
assert mck.called
assert len(tctx.master.logs) == 2

View File

@ -0,0 +1,53 @@
import io
from mitmproxy.addons import readstdin
from mitmproxy.test import taddons
from mitmproxy.test import tflow
import mitmproxy.io
from unittest import mock
def gen_data(corrupt=False):
tf = io.BytesIO()
w = mitmproxy.io.FlowWriter(tf)
for i in range(3):
f = tflow.tflow(resp=True)
w.add(f)
for i in range(3):
f = tflow.tflow(err=True)
w.add(f)
f = tflow.ttcpflow()
w.add(f)
f = tflow.ttcpflow(err=True)
w.add(f)
if corrupt:
tf.write(b"flibble")
tf.seek(0)
return tf
class mStdin:
def __init__(self, d):
self.buffer = d
def isatty(self):
return False
@mock.patch('mitmproxy.master.Master.load_flow')
def test_read(m, tmpdir):
rf = readstdin.ReadStdin()
with taddons.context() as tctx:
assert not m.called
rf.running(stdin=mStdin(gen_data()))
assert m.called
rf.running(stdin=mStdin(None))
assert tctx.master.logs
tctx.master.clear()
m.reset_mock()
assert not m.called
rf.running(stdin=mStdin(gen_data(corrupt=True)))
assert m.called
assert tctx.master.logs

View File

@ -97,6 +97,6 @@ class TestReplaceFile:
tmpfile.remove()
f = tflow.tflow()
f.request.content = b"foo"
assert not tctx.master.event_log
assert not tctx.master.logs
r.request(f)
assert tctx.master.event_log
assert tctx.master.logs

View File

@ -22,14 +22,14 @@ def test_scriptenv():
with taddons.context() as tctx:
with script.scriptenv("path", []):
raise SystemExit
assert tctx.master.event_log[0][0] == "error"
assert "exited" in tctx.master.event_log[0][1]
assert tctx.master.logs[0][0] == "error"
assert "exited" in tctx.master.logs[0][1]
tctx.master.clear()
with script.scriptenv("path", []):
raise ValueError("fooo")
assert tctx.master.event_log[0][0] == "error"
assert "foo" in tctx.master.event_log[0][1]
assert tctx.master.logs[0][0] == "error"
assert "foo" in tctx.master.logs[0][1]
class Called:
@ -135,7 +135,7 @@ class TestScript:
f.write(".")
sc.tick()
time.sleep(0.1)
if tctx.master.event_log:
if tctx.master.logs:
return
raise AssertionError("Change event not detected.")
@ -147,11 +147,11 @@ class TestScript:
sc.start(tctx.options)
f = tflow.tflow(resp=True)
sc.request(f)
assert tctx.master.event_log[0][0] == "error"
assert len(tctx.master.event_log[0][1].splitlines()) == 6
assert re.search(r'addonscripts[\\/]error.py", line \d+, in request', tctx.master.event_log[0][1])
assert re.search(r'addonscripts[\\/]error.py", line \d+, in mkerr', tctx.master.event_log[0][1])
assert tctx.master.event_log[0][1].endswith("ValueError: Error!\n")
assert tctx.master.logs[0][0] == "error"
assert len(tctx.master.logs[0][1].splitlines()) == 6
assert re.search(r'addonscripts[\\/]error.py", line \d+, in request', tctx.master.logs[0][1])
assert re.search(r'addonscripts[\\/]error.py", line \d+, in mkerr', tctx.master.logs[0][1])
assert tctx.master.logs[0][1].endswith("ValueError: Error!\n")
def test_addon(self):
with taddons.context() as tctx:
@ -256,7 +256,7 @@ class TestScriptLoader:
"%s %s" % (rec, "c"),
]
)
debug = [(i[0], i[1]) for i in tctx.master.event_log if i[0] == "debug"]
debug = [(i[0], i[1]) for i in tctx.master.logs if i[0] == "debug"]
assert debug == [
('debug', 'a start'),
('debug', 'a configure'),
@ -270,7 +270,7 @@ class TestScriptLoader:
('debug', 'c configure'),
('debug', 'c running'),
]
tctx.master.event_log = []
tctx.master.logs = []
tctx.configure(
sc,
scripts = [
@ -279,11 +279,11 @@ class TestScriptLoader:
"%s %s" % (rec, "b"),
]
)
debug = [(i[0], i[1]) for i in tctx.master.event_log if i[0] == "debug"]
debug = [(i[0], i[1]) for i in tctx.master.logs if i[0] == "debug"]
# No events, only order has changed
assert debug == []
tctx.master.event_log = []
tctx.master.logs = []
tctx.configure(
sc,
scripts = [
@ -291,7 +291,7 @@ class TestScriptLoader:
"%s %s" % (rec, "a"),
]
)
debug = [(i[0], i[1]) for i in tctx.master.event_log if i[0] == "debug"]
debug = [(i[0], i[1]) for i in tctx.master.logs if i[0] == "debug"]
assert debug == [
('debug', 'c done'),
('debug', 'b done'),

View File

@ -34,7 +34,7 @@ def test_tick():
s.final_flow = tflow.tflow()
s.final_flow.live = False
s.tick()
assert tctx.master.should_exit.is_set()
assert tctx.master.has_event("processing_complete")
def test_server_playback():
@ -315,7 +315,6 @@ def test_server_playback_full():
tctx.configure(
s,
refresh_server_playback = True,
keepserving=False
)
f = tflow.tflow()

View File

@ -6,7 +6,7 @@ def test_configure():
ts = termstatus.TermStatus()
with taddons.context() as ctx:
ts.running()
assert not ctx.master.event_log
assert not ctx.master.logs
ctx.configure(ts, server=True)
ts.running()
assert ctx.master.event_log
assert ctx.master.logs

View File

@ -43,7 +43,7 @@ class TestConcurrent(tservers.MasterTest):
)
)
sc.start(tctx.options)
assert "decorator not supported" in tctx.master.event_log[0][1]
assert "decorator not supported" in tctx.master.logs[0][1]
def test_concurrent_class(self):
with taddons.context() as tctx:

View File

@ -11,6 +11,7 @@ class TAddon:
def __init__(self, name):
self.name = name
self.tick = True
self.custom_called = False
def __repr__(self):
return "Addon(%s)" % self.name
@ -18,11 +19,17 @@ class TAddon:
def done(self):
pass
def event_custom(self):
self.custom_called = True
def test_simple():
o = options.Options()
m = master.Master(o, proxy.DummyServer(o))
a = addonmanager.AddonManager(m)
with pytest.raises(exceptions.AddonError):
a.invoke_addon(TAddon("one"), "done")
a.add(TAddon("one"))
assert a.get("one")
assert not a.get("two")
@ -30,6 +37,11 @@ def test_simple():
assert not a.chain
a.add(TAddon("one"))
a("done")
a.trigger("done")
with pytest.raises(exceptions.AddonError):
a("tick")
a.trigger("tick")
ta = TAddon("one")
a.add(ta)
a.trigger("custom")
assert ta.custom_called

View File

@ -145,7 +145,7 @@ class TestHARDump:
path = str(tmpdir.join("somefile"))
m, sc = tscript("complex/har_dump.py", shlex.quote(path))
m.addons.invoke(m, "response", self.flow())
m.addons.trigger("response", self.flow())
m.addons.remove(sc)
with open(path, "r") as inp:
@ -156,7 +156,9 @@ class TestHARDump:
path = str(tmpdir.join("somefile"))
m, sc = tscript("complex/har_dump.py", shlex.quote(path))
m.addons.invoke(m, "response", self.flow(resp_content=b"foo" + b"\xFF" * 10))
m.addons.trigger(
"response", self.flow(resp_content=b"foo" + b"\xFF" * 10)
)
m.addons.remove(sc)
with open(path, "r") as inp:
@ -194,7 +196,7 @@ class TestHARDump:
path = str(tmpdir.join("somefile"))
m, sc = tscript("complex/har_dump.py", shlex.quote(path))
m.addons.invoke(m, "response", f)
m.addons.trigger("response", f)
m.addons.remove(sc)
with open(path, "r") as inp:

View File

@ -3,12 +3,13 @@ import pytest
from mitmproxy.test import tflow
import mitmproxy.io
from mitmproxy import flowfilter, options
from mitmproxy import flowfilter
from mitmproxy import options
from mitmproxy.proxy import config
from mitmproxy.contrib import tnetstring
from mitmproxy.exceptions import FlowReadException
from mitmproxy import flow
from mitmproxy import http
from mitmproxy.proxy import ProxyConfig
from mitmproxy.proxy.server import DummyServer
from mitmproxy import master
from . import tservers
@ -16,23 +17,6 @@ from . import tservers
class TestSerialize:
def _treader(self):
sio = io.BytesIO()
w = mitmproxy.io.FlowWriter(sio)
for i in range(3):
f = tflow.tflow(resp=True)
w.add(f)
for i in range(3):
f = tflow.tflow(err=True)
w.add(f)
f = tflow.ttcpflow()
w.add(f)
f = tflow.ttcpflow(err=True)
w.add(f)
sio.seek(0)
return mitmproxy.io.FlowReader(sio)
def test_roundtrip(self):
sio = io.BytesIO()
f = tflow.tflow()
@ -51,26 +35,6 @@ class TestSerialize:
assert f2.request == f.request
assert f2.marked
def test_load_flows(self):
r = self._treader()
s = tservers.TestState()
fm = master.Master(None, DummyServer())
fm.addons.add(s)
fm.load_flows(r)
assert len(s.flows) == 6
def test_load_flows_reverse(self):
r = self._treader()
s = tservers.TestState()
opts = options.Options(
mode="reverse:https://use-this-domain"
)
conf = ProxyConfig(opts)
fm = master.Master(opts, DummyServer(conf))
fm.addons.add(s)
fm.load_flows(r)
assert s.flows[0].request.host == "use-this-domain"
def test_filter(self):
sio = io.BytesIO()
flt = flowfilter.parse("~c 200")
@ -122,6 +86,17 @@ class TestSerialize:
class TestFlowMaster:
def test_load_flow_reverse(self):
s = tservers.TestState()
opts = options.Options(
mode="reverse:https://use-this-domain"
)
conf = config.ProxyConfig(opts)
fm = master.Master(opts, DummyServer(conf))
fm.addons.add(s)
f = tflow.tflow(resp=True)
fm.load_flow(f)
assert s.flows[0].request.host == "use-this-domain"
def test_replay(self):
fm = master.Master(None, DummyServer())

View File

@ -5,6 +5,7 @@ from mitmproxy import proxy
from mitmproxy import options
from mitmproxy.tools.console import common
from ... import tservers
import urwid
def test_format_keyvals():
@ -35,7 +36,10 @@ class TestMaster(tservers.MasterTest):
def test_basic(self):
m = self.mkmaster()
for i in (1, 2, 3):
self.dummy_cycle(m, 1, b"")
try:
self.dummy_cycle(m, 1, b"")
except urwid.ExitMainLoop:
pass
assert len(m.view) == i
def test_run_script_once(self):

View File

@ -2,7 +2,6 @@ import pytest
from unittest import mock
from mitmproxy import proxy
from mitmproxy import exceptions
from mitmproxy import log
from mitmproxy import controller
from mitmproxy import options
@ -17,18 +16,6 @@ class TestDumpMaster(tservers.MasterTest):
m = dump.DumpMaster(o, proxy.DummyServer(), with_termlog=False, with_dumper=False)
return m
def test_read(self, tmpdir):
p = str(tmpdir.join("read"))
self.flowfile(p)
self.dummy_cycle(
self.mkmaster(None, rfile=p),
1, b"",
)
with pytest.raises(exceptions.OptionsError):
self.mkmaster(None, rfile="/nonexistent")
with pytest.raises(exceptions.OptionsError):
self.mkmaster(None, rfile="test_dump.py")
def test_has_error(self):
m = self.mkmaster(None)
ent = log.LogEntry("foo", "error")

View File

@ -80,7 +80,7 @@ class TestMaster(master.Master):
self.addons.add(self.state)
self.addons.add(*addons)
self.addons.configure_all(self.options, self.options.keys())
self.addons.invoke_all_with_context("running")
self.addons.trigger("running")
def clear_log(self):
self.tlog = []