diff options
| -rw-r--r-- | ex/starlette_web.py | 63 | ||||
| -rw-r--r-- | t/block_longtime.py | 30 | ||||
| -rw-r--r-- | t/block_shell_longtime.py | 30 |
3 files changed, 123 insertions, 0 deletions
diff --git a/ex/starlette_web.py b/ex/starlette_web.py new file mode 100644 index 0000000..62aca91 --- /dev/null +++ b/ex/starlette_web.py @@ -0,0 +1,63 @@ +PORT="""4567 +""" +import sys +import os +sys.path.append(os.path.expanduser('~/git/starlette')) +from starlette.applications import Starlette +from starlette.responses import JSONResponse, PlainTextResponse +from starlette.routing import Route +import uvicorn + +uvicorn_server: uvicorn.Server | None = None + +async def homepage(request): + return JSONResponse({'hello': 'world'}) + +async def large_json(request): + """ + This endpoint exercises starlette's JSON resposne class. + """ + data = {"items": [ {"x": i, "text": "A" * 2000} for i in range(5000) ]} + return JSONResponse(data) + +async def upload(request): + """ + This endpoint exercises starlette's ability to parse very + large requests. + """ + data = await request.body() + return PlainTextResponse(str(len(data))) + +async def crash(request): + """ + This endpoint tests the overhead imposed by crash handlers. + """ + raise RuntimeError("You've killed me!") + +async def kill(request): + """ + Shuts down the server. + """ + global uvicorn_server + uvicorn_server.should_exit = True + return PlainTextResponse("Shutting down...\n") + +routes = [ + Route("/", endpoint=homepage, methods=["GET"]), + Route("/json", endpoint=large_json, methods=["GET"]), + Route("/upload", endpoint=upload, methods=["POST"]), + Route("/crash", endpoint=crash, methods=["GET"]), + Route("/kill", endpoint=kill, methods=["GET"]), +] + +app = Starlette(debug=False, routes=routes) + +if __name__ == "__main__": + conf = uvicorn.Config( + app, + host="127.0.0.1", + port=int(PORT), + loop="nemesis.causal_event_loop:causal_loop_factory" + ) + uvicorn_server = uvicorn.Server(conf) + uvicorn_server.run() diff --git a/t/block_longtime.py b/t/block_longtime.py new file mode 100644 index 0000000..d848228 --- /dev/null +++ b/t/block_longtime.py @@ -0,0 +1,30 @@ +import asyncio +import time +import sys +import os +sys.path.append(os.path.expanduser('~/nemesis/')) +from nemesis.causal_event_loop import causal_loop_factory + + +async def burn_cpu(sec): + t0 = time.perf_counter() + elapsed = 0 + while (elapsed < sec): + for _ in range(1000): + pass + elapsed = time.perf_counter() - t0 + +async def sleep_task(duration): + await asyncio.sleep(duration) + +async def main(): + total_duration = 90 + sleep_durations = [1.0, 0.7, 0.7, 0.4] + + for _ in range(total_duration): + tasks = [asyncio.create_task(sleep_task(duration)) for duration in sleep_durations] + burn_task = asyncio.create_task(burn_cpu(1)) + + await asyncio.gather(burn_task, *tasks) + +asyncio.run(main(), loop_factory=causal_loop_factory) diff --git a/t/block_shell_longtime.py b/t/block_shell_longtime.py new file mode 100644 index 0000000..8d31dd8 --- /dev/null +++ b/t/block_shell_longtime.py @@ -0,0 +1,30 @@ +import asyncio +import sys +import os +sys.path.append(os.path.expanduser('~/nemesis/')) +from nemesis.causal_event_loop import causal_loop_factory +import time + + +async def burn_cpu(sec): + t0 = time.perf_counter() + elapsed = 0 + while (elapsed < sec): + for _ in range(1000): + pass + elapsed = time.perf_counter() - t0 + +async def sleep_task(duration): + await asyncio.create_subprocess_shell(f'sleep {duration}') + +async def main(): + total_duration = 90 + sleep_durations = [1.0, 0.4, 0.7, 0.7] + + for _ in range(total_duration): + tasks = [asyncio.create_task(sleep_task(duration)) for duration in sleep_durations] + burn_task = asyncio.create_task(burn_cpu(1)) + + await asyncio.gather(burn_task, *tasks) + +asyncio.run(main(), loop_factory=causal_loop_factory) |
