Improve benchmarking

- The benchmark addon now manages setting up and tearing down the backend and
traffic processes itself.
- Use wrk instead of hey. I get more consistent results with this tool, and hey
shows a strange tail-latency bump that seems artificial.
- Make termination behaviour simpler. The bencmark revealed a bug where .done
events were not called if the proxy was shut down by an addon.
This commit is contained in:
Aldo Cortesi 2018-04-07 07:48:58 +12:00 committed by Aldo Cortesi
parent 5f74adc2df
commit b663a224a3
9 changed files with 88 additions and 93 deletions

View File

@ -21,26 +21,28 @@ class Channel:
Raises:
exceptions.Kill: All connections should be closed immediately.
"""
m.reply = Reply(m)
asyncio.run_coroutine_threadsafe(
self.master.addons.handle_lifecycle(mtype, m),
self.loop,
)
g = m.reply.q.get()
if g == exceptions.Kill:
raise exceptions.Kill()
return g
if not self.should_exit.is_set():
m.reply = Reply(m)
asyncio.run_coroutine_threadsafe(
self.master.addons.handle_lifecycle(mtype, m),
self.loop,
)
g = m.reply.q.get()
if g == exceptions.Kill:
raise exceptions.Kill()
return g
def tell(self, mtype, m):
"""
Decorate a message with a dummy reply attribute, send it to the master,
then return immediately.
"""
m.reply = DummyReply()
asyncio.run_coroutine_threadsafe(
self.master.addons.handle_lifecycle(mtype, m),
self.loop,
)
if not self.should_exit.is_set():
m.reply = DummyReply()
asyncio.run_coroutine_threadsafe(
self.master.addons.handle_lifecycle(mtype, m),
self.loop,
)
NO_REPLY = object() # special object we can distinguish from a valid "None" reply.

View File

@ -81,7 +81,6 @@ class Master:
self.addons.trigger("running")
while True:
if self.should_exit.is_set():
asyncio.get_event_loop().stop()
return
self.addons.trigger("tick")
await asyncio.sleep(0.1)
@ -94,13 +93,18 @@ class Master:
loop.run_forever()
finally:
self.shutdown()
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
loop.close()
self.addons.trigger("done")
def shutdown(self):
if self.server:
self.server.shutdown()
self.should_exit.set()
if not self.should_exit.is_set():
if self.server:
self.server.shutdown()
self.should_exit.set()
loop = asyncio.get_event_loop()
loop.stop()
def _change_reverse_host(self, f):
"""

View File

@ -1,7 +1,7 @@
This directory contains a set of tools for benchmarking and profiling mitmproxy.
At the moment, this is simply to give developers a quick way to see the impact
of their work. Eventually, this might grow into a performance dashboard with
This directory contains an addon for benchmarking and profiling mitmproxy. At
the moment, this is simply to give developers a quick way to see the impact of
their work. Eventually, this might grow into a performance dashboard with
historical data, so we can track performance over time.
@ -9,48 +9,18 @@ historical data, so we can track performance over time.
Install the following tools:
go get -u github.com/rakyll/hey
https://github.com/wg/wrk
go get github.com/cortesi/devd/cmd/devd
You may also want to install snakeviz to make viewing profiles easier:
pip install snakeviz
In one window, run the devd server:
./backend
# Running tests
Each run consists of two files - a mitproxy invocation, and a traffic generator.
Make sure the backend is started, then run the proxy:
./simple.mitmproxy
Now run the traffic generator:
./simple.traffic
After the run is done, quit the proxy with ctrl-c.
# Reading results
Results are placed in the ./results directory. You should see two files - a
performance log from **hey**, and a profile. You can view the profile like so:
snakeviz ./results/simple.prof
Now run the benchmark by loading the addon. A typical invocation is as follows:
mitmdump -p0 -q --set benchmark_save_path=/tmp/foo -s ./benchmark.py
This will start up the backend server, run the benchmark, save the results to
/tmp/foo.bench and /tmp/foo.prof, and exit.

View File

@ -1,3 +0,0 @@
#!/bin/sh
devd -p 10001 .

51
test/bench/benchmark.py Normal file
View File

@ -0,0 +1,51 @@
import asyncio
import cProfile
from mitmproxy import ctx
class Benchmark:
"""
A simple profiler addon.
"""
def __init__(self):
self.pr = cProfile.Profile()
self.started = False
async def procs(self):
ctx.log.error("starting benchmark")
backend = await asyncio.create_subprocess_exec("devd", "-q", "-p", "10001", ".")
traf = await asyncio.create_subprocess_exec(
"wrk",
"-c50",
"-d5s",
"http://localhost:%s/benchmark.py" % ctx.master.server.address[1],
stdout=asyncio.subprocess.PIPE
)
stdout, _ = await traf.communicate()
open(ctx.options.benchmark_save_path + ".bench", mode="wb").write(stdout)
ctx.log.error(stdout.decode("ascii"))
backend.kill()
ctx.master.shutdown()
def load(self, loader):
loader.add_option(
"benchmark_save_path",
str,
"/tmp/profile",
"Destination for the .prof and and .bench result files"
)
ctx.options.update(
mode="reverse:http://devd.io:10001",
)
self.pr.enable()
def running(self):
if not self.started:
self.started = True
asyncio.get_event_loop().create_task(self.procs())
def done(self):
self.pr.dump_stats(ctx.options.benchmark_save_path + ".prof")
addons = [Benchmark()]

View File

@ -1,25 +0,0 @@
import cProfile
from mitmproxy import ctx
class Profile:
"""
A simple profiler addon.
"""
def __init__(self):
self.pr = cProfile.Profile()
def load(self, loader):
loader.add_option(
"profile_path",
str,
"/tmp/profile",
"Destination for the run profile, saved at exit"
)
self.pr.enable()
def done(self):
self.pr.dump_stats(ctx.options.profile_path)
addons = [Profile()]

4
test/bench/run Executable file
View File

@ -0,0 +1,4 @@
#!/bin/sh
mkdir -p results
mitmdump -p0 -q --set benchmark_save_path=./results/mitmdump -s ./benchmark.py

View File

@ -1,5 +0,0 @@
#!/bin/sh
mkdir -p results
mitmdump -p 10002 --mode reverse:http://devd.io:10001 \
-s ./profiler.py --set profile_path=./results/simple.prof

View File

@ -1,3 +0,0 @@
#!/bin/sh
hey -disable-keepalive http://localhost:10002/profiler.py | tee ./results/simple.perf