Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@
"build": "node scripts/build.js",
"build_python": "node scripts/build_python.js",
"bench": "node scripts/bench.js",
"bench_python": "python3 python/perspective/bench/perspective_benchmark.py",
"setup": "node scripts/setup.js",
"docs": "node scripts/docs.js",
"test": "node scripts/test.js",
Expand Down
11 changes: 11 additions & 0 deletions python/perspective/bench/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#

from bench import Benchmark, Suite, Runner

__all__ = ["Benchmark", "Suite", "Runner"]
200 changes: 200 additions & 0 deletions python/perspective/bench/bench.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import logging
import os
import sys
import signal
import tornado
from timeit import timeit
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '..'))
from perspective import Table, PerspectiveManager, PerspectiveTornadoHandler # noqa: E402

logging.basicConfig(level=logging.INFO)


class BenchmarkTornadoHandler(tornado.web.RequestHandler):
"""Host the results of the benchmark suite over a websocket."""

def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')

def get(self):
self.render("benchmark.html")


class Benchmark(object):
"""A single Benchmark function. Use as a wrapper for stateless lambdas
with no parameters.

Example:
>>> func = Benchmark(lambda: self._view.to_records(), meta={
"name": "to_records",
"group": "view"
})
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are these for use as a decorator?

@Benchmark
def _to_records_test():
    self._view.to_records();

If so I think the signature of __init__ is wrong - it wouldn't take a func. I see Benchmark is not used as a decorator though here or in the perspective.py benchmarks themselves, but they are referenced as decorator in the docs.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The docs are a little out of sync; Benchmark wraps a parameterless lambda and a metadata dictionary, which is a simpler implementation I went with instead of decorators.

"""

def __init__(self, func, meta={}):
"""A decorator for a benchmark function, which will attach each attribute
of the `meta` dictionary to the decorated function as well as provide
it with the `benchmark` attribute.

Args:
meta (dict) : a metadata dictionary, whose keys and values will
become attributes on the benchmark function. The metadata dictionary
should be consistent within each suite, i.e. there should be no
additional values in between different methods decorated with
`@benchmark`.
"""
self._func = func
self._meta = meta
self.benchmark = True

for k, v in self._meta.items():
marked_key = "__BENCH__{0}".format(k)
setattr(self, marked_key, v)

def __call__(self):
"""Call the lambda bound to the decorator. This call asserts that the
lambda has no parameters and no reference to a `self` object.
"""
self._func()


class Suite(object):
"""A benchmark suite stub that contains `register_benchmarks` and generic
before/after methods.

Inherit from this class and implement `register_benchmarks`, which should
set all benchmark methods as attributes on the class.
"""

def register_benchmarks(self):
"""Registers all callbacks with `Runner`.

This function must be implemented in all child classes of `Suite.`
"""
raise NotImplementedError(
"Must implement `register_benchmarks` to run benchmark suite.")

def before_all(self):
pass

def after_all(self):
pass

def before_each(self):
pass

def after_each(self):
pass


class Runner(object):

ITERATIONS = 10

def __init__(self, suite):
"""Initializes a benchmark runner for the `Suite`.

Args:
suite (Suite) : A class that inherits from `Suite`, with any number
of instance methods decorated with `@benchmark`.
"""
self._suite = suite
self._benchmarks = []
self._table = None
self._WROTE_RESULTS = False
self._HOSTING = False

self._suite.register_benchmarks()

class_attrs = self._suite.__class__.__dict__.items()
instance_attrs = self._suite.__dict__.items()

for (k, v) in class_attrs:
if hasattr(v, "benchmark") and getattr(v, "benchmark") is True:
logging.info("Registering {0}".format(k))
self._benchmarks.append(v)

for (k, v) in instance_attrs:
if hasattr(v, "benchmark") and getattr(v, "benchmark") is True:
logging.info("Registering {0}".format(k))
self._benchmarks.append(v)

# Write results on SIGINT
signal.signal(signal.SIGINT, self.sigint_handler)

def sigint_handler(self, signum, frame):
"""On SIGINT, host the results over a websocket."""
if not self._WROTE_RESULTS:
self.write_results()
if not self._HOSTING:
self.host_results()
else:
sys.exit(0)

def host_results(self):
"""Create a tornado application that hosts the results table over a
websocket."""
if self._table is None:
return
MANAGER = PerspectiveManager()
MANAGER.host_table("benchmark_results", self._table)
application = tornado.web.Application([
(r"/", BenchmarkTornadoHandler),
# create a websocket endpoint that the client Javascript can access
(r"/websocket", PerspectiveTornadoHandler, {
"manager": MANAGER,
"check_origin": True
})
])
self._HOSTING = True
application.listen(8888)
logging.critical("Displaying results at http://localhost:8888")
loop = tornado.ioloop.IOLoop.current()
loop.start()

def write_results(self):
if self._table is None:
return
logging.info("Writing results to `benchmark.arrow`")
arrow_path = os.path.join(os.path.dirname(__file__), "benchmark.arrow")
with open(arrow_path, "wb") as file:
arrow = self._table.view().to_arrow()
file.write(arrow)
self._WROTE_RESULTS = True
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be nice if this method could incrementally write to an existing arrow so we can locally archive multiple versions, by checking to see if an benchmark.arrow file exists on load. It would also enable cross-language benchmarks in the same archive ..


def run_method(self, func, *args, **kwargs):
"""Wrap the benchmark `func` with timing code and run for n
`ITERATIONS`, returning a result row that can be fed into Perspective.
"""
overall_result = {
k.replace("__BENCH__", ""):
v for (k, v) in func.__dict__.items() if "__BENCH__" in k
}

result = timeit(func, number=Runner.ITERATIONS) / Runner.ITERATIONS
overall_result["__TIME__"] = result
return overall_result

def run(self):
"""Runs each benchmark function from the suite for n `ITERATIONS`,
timing each function and writing the results to a `perspective.Table`.
"""
logging.info("Running benchmark suite...")
for benchmark in self._benchmarks:
result = self.run_method(benchmark)
print(result)
if self._table is None:
self._table = Table([result])
else:
self._table.update([result])
self.write_results()
self.host_results()
54 changes: 54 additions & 0 deletions python/perspective/bench/benchmark.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
<!--

Copyright (c) 2019, the Perspective Authors.

This file is part of the Perspective library, distributed under the terms of
the Apache License 2.0. The full license can be found in the LICENSE file.

-->

<!DOCTYPE html>
<html>

<head>

<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no">

<script src="http://localhost:8080/perspective-viewer"></script>
<script src="http://localhost:8080/perspective-viewer-hypergrid"></script>
<script src="http://localhost:8080/perspective-viewer-d3fc"></script>
<script src="http://localhost:8080/perspective"></script>

<link rel='stylesheet' href="http://localhost:8080/material.dark.css">

<style>
perspective-viewer{position:absolute;top:0;left:0;right:0;bottom:0;}
</style>

</head>

<body>

<perspective-viewer
id="viewer"
editable
plugin="d3_y_bar"
columns='["__TIME__"]'
row-pivots='["group", "name"]'
column-pivots='["group", "name"]'
aggregates="{'__TIME__':'avg'}">
</perspective-viewer>

<script>

window.addEventListener('WebComponentsReady', async function() {
const websocket = perspective.websocket("ws://localhost:8888/websocket");
const table = websocket.open_table('benchmark_results');
document.getElementById('viewer').load(table);
});

</script>

</body>

</html>
Loading