summaryrefslogtreecommitdiff
path: root/nemesis/html_gen.py
diff options
context:
space:
mode:
authorbd <bdunahu@operationnull.com>2025-10-15 00:16:38 -0400
committerbd <bdunahu@operationnull.com>2025-10-15 00:16:38 -0400
commit1a439653264adc3d9648645d2bb4006f331c1722 (patch)
tree27a4d50f3fb1cd333b15ec372500dd78cfb296b9 /nemesis/html_gen.py
parent16aa392a3c08c8769cc30bdbc1830a31f9b0808e (diff)
Fix a lot of bugs--core callbacks being delayed, reused handles
Diffstat (limited to 'nemesis/html_gen.py')
-rw-r--r--nemesis/html_gen.py218
1 files changed, 132 insertions, 86 deletions
diff --git a/nemesis/html_gen.py b/nemesis/html_gen.py
index 60d0c06..bffa748 100644
--- a/nemesis/html_gen.py
+++ b/nemesis/html_gen.py
@@ -1,6 +1,9 @@
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import hashlib
+import math
+
+TRIM_PERCENT = 0.05
def get_color(name):
hash_object = hashlib.md5(name.encode())
@@ -8,92 +11,135 @@ def get_color(name):
return f'hsl({color_index}, 100%, 50%)'
def plot_results(results, output_file, input_file):
- fig = make_subplots(rows=4, cols=1)
-
- for i, (coro_name, x_values) in enumerate(results.items(), start=1):
- x_list = []
- y_latency_list = []
- y_throughput_list = []
- y_max_latency_list = []
- y_num_callbacks_list = []
- latency_hover_text = []
- max_latency_hover_text = []
-
- for speedup, experiments in x_values.items():
- for experiment in experiments:
-
- completed_callbacks = experiment["latency"]
- virtual_run_time = experiment["virtual_run_time"][0]
-
- x_list.append(speedup * 100)
-
- num_callbacks = len(completed_callbacks)
- y_num_callbacks_list.append(num_callbacks)
-
- # handle average latency graph
-
- if num_callbacks > 0:
-
- breakdown = "<br>".join([f" {cb[0]}: {round(cb[1], 4)}" for cb in completed_callbacks])
- total_wait = sum([cb[1] for cb in completed_callbacks])
- max_cb = max(completed_callbacks, key=lambda cb: cb[1])
-
- latency = total_wait / num_callbacks
-
- y_max_latency_list.append(max_cb[1])
- y_latency_list.append(latency)
-
- else:
- latency = 0
- y_latency_list.append(latency)
-
- latency_hover_text.append(f"{coro_name}<br>Speedup: {speedup}<br>Average Wait: {round(latency, 4)}<br>Breakdown:<br>{breakdown}")
- max_latency_hover_text.append(f"{coro_name}<br>Speedup: {speedup}<br>Max Wait: {round(max_cb[1], 4)}<br>Handle: {max_cb[0]}")
-
- # handle throughput graph
- throughput = num_callbacks / virtual_run_time
- y_throughput_list.append(throughput)
-
- fig.add_trace(go.Scatter(
- x=x_list,
- y=y_latency_list,
- mode='markers',
- name=coro_name,
- marker=dict(color=get_color(coro_name)),
- hovertext=latency_hover_text,
- showlegend=True,
- ), row=1, col=1)
-
- fig.add_trace(go.Scatter(
- x=x_list,
- y=y_throughput_list,
- mode='markers',
- name=coro_name,
- marker=dict(color=get_color(coro_name)),
- showlegend=False,
- ), row=2, col=1)
-
- fig.add_trace(go.Scatter(
- x=x_list,
- y=y_max_latency_list,
- mode='markers',
- name=coro_name,
- marker=dict(color=get_color(coro_name)),
- hovertext=max_latency_hover_text,
- showlegend=False,
- ), row=3, col=1)
-
- fig.add_trace(go.Scatter(
- x=x_list,
- y=y_num_callbacks_list,
- mode='markers',
- name=coro_name,
- marker=dict(color=get_color(coro_name)),
- showlegend=False,
- ), row=4, col=1)
-
- fig.update_layout(title=input_file)
- fig.update_xaxes(title_text="speedup (% optimized away)", row=4, col=1)
+ # determine the number of loops we have data for
+ total_loops = set()
+ for x_values in results.values():
+ for xx_values in x_values.values():
+ total_loops.update(xx_values.keys())
+ total_loops = sorted(total_loops)
+ num_loops = len(total_loops)
+ loop_to_col = {loop: idx + 1 for idx, loop in enumerate(total_loops)}
+
+ fig = make_subplots(
+ rows=4,
+ cols=num_loops,
+ subplot_titles=[f"{loop}" for loop in total_loops] * 4,
+ vertical_spacing=0.1,
+ horizontal_spacing=0.05,
+ shared_xaxes=True,
+ shared_yaxes=False,
+ )
+
+ for coro_name, x_values in results.items():
+ for speedup, xx_values in x_values.items():
+ for loop, experiments in xx_values.items():
+ col = loop_to_col[loop]
+
+ x_list = []
+ y_latency_list = []
+ y_throughput_list = []
+ y_max_latency_list = []
+ y_num_callbacks_list = []
+ latency_hover_text = []
+ max_latency_hover_text = []
+
+ for experiment in experiments:
+ completed_callbacks = experiment[0]
+ virtual_run_time = experiment[1]
+
+ x_val = speedup * 100
+ x_list.append(x_val)
+
+ num_callbacks = len(completed_callbacks)
+ y_num_callbacks_list.append(num_callbacks)
+
+ # handle average latency graph
+ if num_callbacks > 0:
+
+ trim_count = math.floor(num_callbacks * TRIM_PERCENT / 2)
+ sorted_callbacks = sorted(completed_callbacks, key=lambda cb: cb[1])
+
+ trimmed_callbacks = (
+ sorted_callbacks[trim_count: len(sorted_callbacks) - trim_count]
+ if trim_count > 0 else sorted_callbacks
+ )
+
+ trimmed_latencies = [cb[1] for cb in trimmed_callbacks]
+ latency = sum(trimmed_latencies) / len(trimmed_latencies)
+
+ y_latency_list.append(latency)
+
+ max_cb = max(completed_callbacks, key=lambda cb: cb[1])
+ y_max_latency_list.append(max_cb[1])
+
+ breakdown = "<br>".join([f" {cb[0]}: {round(cb[1], 4)}" for cb in trimmed_callbacks])
+ latency_hover_text.append(
+ f"{coro_name}<br>Speedup: {speedup}<br>Trimmed Average Wait ({int(TRIM_PERCENT*100)}%): {round(latency, 4)}<br>Breakdown:<br>{breakdown}"
+ )
+ max_latency_hover_text.append(
+ f"{coro_name}<br>Speedup: {speedup}<br>Max Wait: {round(max_cb[1], 4)}<br>Handle: {max_cb[0]}"
+ )
+
+ else:
+ latency = 0
+ y_latency_list.append(latency)
+
+ latency_hover_text.append(f"{coro_name}<br>Speedup: {speedup}<br>No callbacks")
+ max_latency_hover_text.append(f"{coro_name}<br>Speedup: {speedup}<br>No callbacks")
+
+ # handle throughput graph
+ throughput = num_callbacks / virtual_run_time if virtual_run_time else 0
+ y_throughput_list.append(throughput)
+
+ fig.add_trace(go.Scatter(
+ x=x_list,
+ y=y_latency_list,
+ mode='markers',
+ name=coro_name,
+ marker=dict(color=get_color(coro_name)),
+ # hovertext=latency_hover_text,
+ showlegend=False,
+ ), row=1, col=col)
+
+ fig.add_trace(go.Scatter(
+ x=x_list,
+ y=y_throughput_list,
+ mode='markers',
+ name=coro_name,
+ marker=dict(color=get_color(coro_name)),
+ showlegend=False,
+ ), row=2, col=col)
+
+ fig.add_trace(go.Scatter(
+ x=x_list,
+ y=y_max_latency_list,
+ mode='markers',
+ name=coro_name,
+ marker=dict(color=get_color(coro_name)),
+ # hovertext=max_latency_hover_text,
+ showlegend=False,
+ ), row=3, col=col)
+
+ fig.add_trace(go.Scatter(
+ x=x_list,
+ y=y_num_callbacks_list,
+ mode='markers',
+ name=coro_name,
+ marker=dict(color=get_color(coro_name)),
+ showlegend=False,
+ ), row=4, col=col)
+
+ fig.update_layout(
+ height=1080,
+ width=1920 * num_loops,
+ title_text=f"Coroutine Performance Metrics: {input_file}",
+ showlegend=False,
+ )
+ for col in range(1, num_loops + 1):
+ fig.update_xaxes(title_text="speedup (% optimized away)", row=4, col=col)
+ fig.update_xaxes(showticklabels=True, col=col)
+ fig.update_xaxes(showticklabels=True, row=2, col=col)
+ fig.update_xaxes(showticklabels=True, row=3, col=col)
fig.update_yaxes(title_text="average latency (seconds)", row=1, col=1)
fig.update_yaxes(title_text="throughput (handles per second)", row=2, col=1)
fig.update_yaxes(title_text="maximum latency (seconds)", row=3, col=1)