mirror of
https://github.com/Grasscutters/mitmproxy.git
synced 2024-11-23 00:01:36 +00:00
Add a new flow loading mechanism.
We now simulate the normal connection flow when we load flows. That means that we can run scripts, hooks, sticky cookies, etc.
This commit is contained in:
parent
6175d92583
commit
613e9a298e
@ -1127,11 +1127,10 @@ class ConsoleMaster(flow.FlowMaster):
|
||||
try:
|
||||
f = file(path, "r")
|
||||
fr = flow.FlowReader(f)
|
||||
data = list(fr.stream())
|
||||
f.close()
|
||||
except IOError, v:
|
||||
return v.strerror
|
||||
self.state.load_flows(data)
|
||||
flow.FlowMaster.load_flows(self, fr)
|
||||
f.close()
|
||||
if self.conn_list_view:
|
||||
self.sync_list_view()
|
||||
self.focus_current()
|
||||
|
@ -542,6 +542,20 @@ class FlowMaster(controller.Master):
|
||||
|
||||
controller.Master.tick(self, q)
|
||||
|
||||
def load_flows(self, fr):
|
||||
"""
|
||||
Load flows from a FlowReader object.
|
||||
"""
|
||||
for i in fr.stream():
|
||||
if i.request:
|
||||
f = self.state.add_request(i.request)
|
||||
self.process_new_request(f)
|
||||
if i.response:
|
||||
f = self.state.add_response(i.response)
|
||||
self.process_new_response(f)
|
||||
if i.error:
|
||||
f = self.state.add_error(i.error)
|
||||
|
||||
def process_new_request(self, f):
|
||||
if self.stickycookie_state:
|
||||
self.stickycookie_state.handle_request(f)
|
||||
@ -560,6 +574,12 @@ class FlowMaster(controller.Master):
|
||||
else:
|
||||
f.request.ack()
|
||||
|
||||
def process_new_response(self, f):
|
||||
if self.stickycookie_state:
|
||||
self.stickycookie_state.handle_response(f)
|
||||
if "response" in self.scripts:
|
||||
self._runscript(f, self.scripts["response"])
|
||||
|
||||
def replay_request(self, f):
|
||||
"""
|
||||
Returns None if successful, or error message if not.
|
||||
@ -605,10 +625,7 @@ class FlowMaster(controller.Master):
|
||||
self.client_playback.clear(f)
|
||||
if not f:
|
||||
r.ack()
|
||||
if self.stickycookie_state:
|
||||
self.stickycookie_state.handle_response(f)
|
||||
if "response" in self.scripts:
|
||||
self._runscript(f, self.scripts["response"])
|
||||
self.process_new_response(f)
|
||||
return f
|
||||
|
||||
|
||||
|
@ -369,6 +369,19 @@ class uState(libpry.AutoTree):
|
||||
|
||||
|
||||
class uSerialize(libpry.AutoTree):
|
||||
def _treader(self):
|
||||
sio = StringIO()
|
||||
w = flow.FlowWriter(sio)
|
||||
for i in range(3):
|
||||
f = tutils.tflow_full()
|
||||
w.add(f)
|
||||
for i in range(3):
|
||||
f = tutils.tflow_err()
|
||||
w.add(f)
|
||||
|
||||
sio.seek(0)
|
||||
return flow.FlowReader(sio)
|
||||
|
||||
def test_roundtrip(self):
|
||||
sio = StringIO()
|
||||
f = tutils.tflow()
|
||||
@ -381,6 +394,14 @@ class uSerialize(libpry.AutoTree):
|
||||
assert len(l) == 1
|
||||
assert l[0] == f
|
||||
|
||||
def test_load_flows(self):
|
||||
r = self._treader()
|
||||
s = flow.State()
|
||||
fm = flow.FlowMaster(None, s)
|
||||
fm.load_flows(r)
|
||||
assert len(s.flow_list) == 6
|
||||
|
||||
|
||||
def test_error(self):
|
||||
sio = StringIO()
|
||||
sio.write("bogus")
|
||||
|
@ -32,6 +32,13 @@ def tflow_full():
|
||||
return f
|
||||
|
||||
|
||||
def tflow_err():
|
||||
r = treq()
|
||||
f = flow.Flow(r)
|
||||
f.error = proxy.Error(r, "error")
|
||||
return f
|
||||
|
||||
|
||||
# Yes, the random ports are horrible. During development, sockets are often not
|
||||
# properly closed during error conditions, which means you have to wait until
|
||||
# you can re-bind to the same port. This is a pain in the ass, so we just pick
|
||||
|
Loading…
Reference in New Issue
Block a user