Skip to content

Commit e67d19e

Browse files
authored
Add benchmarks suite (#15)
1 parent 2cf2b0c commit e67d19e

File tree

9 files changed

+668
-0
lines changed

9 files changed

+668
-0
lines changed

.github/workflows/benchmarks.yml

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
name: benchmarks
2+
3+
on: workflow_dispatch
4+
5+
jobs:
6+
benchmark-base:
7+
runs-on: ubuntu-latest
8+
9+
steps:
10+
- uses: actions/checkout@v4
11+
- uses: actions/setup-python@v5
12+
with:
13+
python-version: '3.12'
14+
- uses: pyo3/maturin-action@v1
15+
with:
16+
command: build
17+
args: --release --interpreter python3.12
18+
target: x64
19+
manylinux: auto
20+
container: off
21+
- run: |
22+
export _whl=$(ls target/wheels/rloop-*.whl)
23+
pip install $_whl numpy uvloop
24+
- name: benchmark
25+
working-directory: ./benchmarks
26+
run: |
27+
python benchmarks.py raw
28+
- name: upload results
29+
uses: actions/upload-artifact@v4
30+
with:
31+
name: results-base
32+
path: benchmarks/results/*
33+
34+
benchmark-pyver:
35+
runs-on: ubuntu-latest
36+
37+
steps:
38+
- uses: actions/checkout@v4
39+
- uses: actions/setup-python@v5
40+
with:
41+
python-version: |
42+
3.10
43+
3.11
44+
3.12
45+
3.13
46+
- uses: pyo3/maturin-action@v1
47+
with:
48+
command: build
49+
args: --release --interpreter python3.10 python3.11 python3.12 python3.13
50+
target: x64
51+
manylinux: auto
52+
container: off
53+
- name: setup venvs
54+
run: |
55+
python3.10 -m venv .venv310
56+
python3.11 -m venv .venv311
57+
python3.12 -m venv .venv312
58+
python3.13 -m venv .venv313
59+
.venv310/bin/pip install $(ls target/wheels/rloop-*-cp310-*.whl) numpy uvloop
60+
.venv311/bin/pip install $(ls target/wheels/rloop-*-cp311-*.whl) numpy uvloop
61+
.venv312/bin/pip install $(ls target/wheels/rloop-*-cp312-*.whl) numpy uvloop
62+
.venv313/bin/pip install $(ls target/wheels/rloop-*-cp313-*.whl) numpy uvloop
63+
- name: benchmark
64+
working-directory: ./benchmarks
65+
run: |
66+
BENCHMARK_EXC_PREFIX=${{ github.workspace }}/.venv310/bin ${{ github.workspace }}/.venv310/bin/python benchmarks.py raw
67+
mv results/data.json results/py310.json
68+
BENCHMARK_EXC_PREFIX=${{ github.workspace }}/.venv311/bin ${{ github.workspace }}/.venv311/bin/python benchmarks.py raw
69+
mv results/data.json results/py311.json
70+
BENCHMARK_EXC_PREFIX=${{ github.workspace }}/.venv312/bin ${{ github.workspace }}/.venv312/bin/python benchmarks.py raw
71+
mv results/data.json results/py312.json
72+
BENCHMARK_EXC_PREFIX=${{ github.workspace }}/.venv313/bin ${{ github.workspace }}/.venv313/bin/python benchmarks.py raw
73+
mv results/data.json results/py313.json
74+
- name: upload results
75+
uses: actions/upload-artifact@v4
76+
with:
77+
name: results-pyver
78+
path: benchmarks/results/*
79+
80+
results:
81+
runs-on: ubuntu-latest
82+
needs: [benchmark-base, benchmark-pyver]
83+
84+
steps:
85+
- uses: actions/checkout@v4
86+
- uses: gi0baro/setup-noir@v1
87+
- uses: actions/download-artifact@v4
88+
with:
89+
name: results-base
90+
path: benchmarks/results
91+
- run: |
92+
mv benchmarks/results/data.json benchmarks/results/base.json
93+
- uses: actions/download-artifact@v4
94+
with:
95+
name: results-pyver
96+
path: benchmarks/results
97+
- name: render
98+
working-directory: ./benchmarks
99+
run: |
100+
noir -c data:results/base.json -v 'benv=GHA Linux x86_64' templates/main.md > README.md
101+
noir \
102+
-c data310:results/py310.json \
103+
-c data311:results/py311.json \
104+
-c data312:results/py312.json \
105+
-c data313:results/py313.json \
106+
-v pyvb=310 -v 'benv=GHA Linux x86_64' \
107+
templates/pyver.md > pyver.md
108+
- name: open PR
109+
uses: peter-evans/create-pull-request@v6
110+
with:
111+
branch: benchmarks-update
112+
branch-suffix: timestamp
113+
title: Update benchmark results
114+
body: SSIA
115+
commit-message: |
116+
Update benchmark results
117+
add-paths: |
118+
benchmarks/README.md
119+
benchmarks/pyver.md

benchmarks/benchmarks.py

Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,158 @@
1+
import datetime
2+
import json
3+
import multiprocessing
4+
import os
5+
import signal
6+
import subprocess
7+
import sys
8+
import time
9+
from contextlib import contextmanager
10+
from pathlib import Path
11+
12+
13+
WD = Path(__file__).resolve().parent
14+
CPU = multiprocessing.cpu_count()
15+
LOOPS = ['asyncio', 'rloop', 'uvloop']
16+
MSGS = [1024, 1024 * 10, 1024 * 100]
17+
CONCURRENCIES = sorted({1, max(CPU / 2, 1), max(CPU - 1, 1)})
18+
19+
20+
@contextmanager
21+
def server(loop, streams=False, proto=False):
22+
exc_prefix = os.environ.get('BENCHMARK_EXC_PREFIX')
23+
py = 'python'
24+
if exc_prefix:
25+
py = f'{exc_prefix}/{py}'
26+
target = WD / 'server.py'
27+
proc_cmd = f'{py} {target} --loop {loop}'
28+
if streams:
29+
proc_cmd += ' --streams'
30+
if proto:
31+
proc_cmd += ' --proto'
32+
33+
proc = subprocess.Popen(proc_cmd, shell=True, preexec_fn=os.setsid) # noqa: S602
34+
time.sleep(2)
35+
yield proc
36+
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
37+
38+
39+
def client(duration, concurrency, msgsize):
40+
exc_prefix = os.environ.get('BENCHMARK_EXC_PREFIX')
41+
py = 'python'
42+
if exc_prefix:
43+
py = f'{exc_prefix}/{py}'
44+
target = WD / 'client.py'
45+
cmd_parts = [
46+
py,
47+
str(target),
48+
f'--concurrency {concurrency}',
49+
f'--duration {duration}',
50+
f'--msize {msgsize}',
51+
'--output json',
52+
]
53+
try:
54+
proc = subprocess.run( # noqa: S602
55+
' '.join(cmd_parts),
56+
shell=True,
57+
check=True,
58+
capture_output=True,
59+
)
60+
data = json.loads(proc.stdout.decode('utf8'))
61+
return data
62+
except Exception as e:
63+
print(f'WARN: got exception {e} while loading client data')
64+
return {}
65+
66+
67+
def benchmark(msgs=None, concurrencies=None):
68+
concurrencies = concurrencies or CONCURRENCIES
69+
msgs = msgs or MSGS
70+
results = {}
71+
# primer
72+
client(1, 1, 1024)
73+
time.sleep(1)
74+
# warm up
75+
client(1, max(concurrencies), 1024 * 100)
76+
time.sleep(2)
77+
# bench
78+
for concurrency in concurrencies:
79+
cres = results[concurrency] = {}
80+
for msg in msgs:
81+
res = client(10, concurrency, msg)
82+
cres[msg] = res
83+
time.sleep(3)
84+
time.sleep(1)
85+
return results
86+
87+
88+
def raw():
89+
results = {}
90+
for loop in LOOPS:
91+
with server(loop):
92+
results[loop] = benchmark(concurrencies=CONCURRENCIES[0])
93+
return results
94+
95+
96+
def stream():
97+
results = {}
98+
for loop in LOOPS:
99+
with server(loop, streams=True):
100+
results[loop] = benchmark(concurrencies=CONCURRENCIES[0])
101+
return results
102+
103+
104+
def proto():
105+
results = {}
106+
for loop in LOOPS:
107+
with server(loop, proto=True):
108+
results[loop] = benchmark(concurrencies=CONCURRENCIES[0])
109+
return results
110+
111+
112+
def concurrency():
113+
results = {}
114+
for loop in LOOPS:
115+
with server(loop):
116+
results[loop] = benchmark(msgs=[1024], concurrencies=CONCURRENCIES[1:])
117+
return results
118+
119+
120+
def _rloop_version():
121+
import rloop
122+
123+
return rloop.__version__
124+
125+
126+
def run():
127+
all_benchmarks = {
128+
'raw': raw,
129+
'stream': stream,
130+
'proto': proto,
131+
'concurrency': concurrency,
132+
}
133+
inp_benchmarks = sys.argv[1:] or ['raw']
134+
run_benchmarks = set(inp_benchmarks) & set(all_benchmarks.keys())
135+
136+
now = datetime.datetime.utcnow()
137+
results = {}
138+
for benchmark_key in run_benchmarks:
139+
runner = all_benchmarks[benchmark_key]
140+
results[benchmark_key] = runner()
141+
142+
with open('results/data.json', 'w') as f:
143+
pyver = sys.version_info
144+
f.write(
145+
json.dumps(
146+
{
147+
'cpu': CPU,
148+
'run_at': int(now.timestamp()),
149+
'pyver': f'{pyver.major}.{pyver.minor}',
150+
'results': results,
151+
'rloop': _rloop_version(),
152+
}
153+
)
154+
)
155+
156+
157+
if __name__ == '__main__':
158+
run()

0 commit comments

Comments
 (0)