forked from NousResearch/hermes-agent
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_protocol.py
More file actions
806 lines (591 loc) · 26.4 KB
/
test_protocol.py
File metadata and controls
806 lines (591 loc) · 26.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
"""Tests for tui_gateway JSON-RPC protocol plumbing."""
import io
import json
import sys
import threading
import time
import types
from unittest.mock import MagicMock, patch
import pytest
_original_stdout = sys.stdout
@pytest.fixture(autouse=True)
def _restore_stdout():
yield
sys.stdout = _original_stdout
@pytest.fixture()
def server():
with patch.dict("sys.modules", {
"hermes_constants": MagicMock(get_hermes_home=MagicMock(return_value="/tmp/hermes_test")),
"hermes_cli.env_loader": MagicMock(),
"hermes_cli.banner": MagicMock(),
"hermes_state": MagicMock(),
}):
import importlib
mod = importlib.import_module("tui_gateway.server")
yield mod
mod._sessions.clear()
mod._pending.clear()
mod._answers.clear()
mod._methods.clear()
importlib.reload(mod)
@pytest.fixture()
def capture(server):
"""Redirect server's real stdout to a StringIO and return (server, buf)."""
buf = io.StringIO()
server._real_stdout = buf
return server, buf
# ── JSON-RPC envelope ────────────────────────────────────────────────
def test_unknown_method(server):
resp = server.handle_request({"id": "1", "method": "bogus"})
assert resp["error"]["code"] == -32601
def test_ok_envelope(server):
assert server._ok("r1", {"x": 1}) == {
"jsonrpc": "2.0", "id": "r1", "result": {"x": 1},
}
def test_err_envelope(server):
assert server._err("r2", 4001, "nope") == {
"jsonrpc": "2.0", "id": "r2", "error": {"code": 4001, "message": "nope"},
}
# ── write_json ───────────────────────────────────────────────────────
def test_write_json(capture):
server, buf = capture
assert server.write_json({"test": True})
assert json.loads(buf.getvalue()) == {"test": True}
def test_write_json_broken_pipe(server):
class _Broken:
def write(self, _): raise BrokenPipeError
def flush(self): raise BrokenPipeError
server._real_stdout = _Broken()
assert server.write_json({"x": 1}) is False
def test_write_json_closed_stream_returns_false(server):
"""ValueError ('I/O on closed file') used to bubble up; treat as gone."""
class _Closed:
def write(self, _): raise ValueError("I/O operation on closed file")
def flush(self): raise ValueError("I/O operation on closed file")
server._real_stdout = _Closed()
assert server.write_json({"x": 1}) is False
def test_write_json_unicode_encode_error_re_raises(server):
"""A non-UTF-8 stdout encoding raises UnicodeEncodeError (a ValueError
subclass). It must NOT be swallowed as 'peer gone' — that would let
`entry.py` exit cleanly via the False path and hide the real config
bug. We re-raise so the existing crash-log infrastructure records it."""
class _AsciiOnly:
def write(self, line):
line.encode("ascii") # raises UnicodeEncodeError on non-ascii
def flush(self): pass
server._real_stdout = _AsciiOnly()
with pytest.raises(UnicodeEncodeError):
server.write_json({"msg": "héllo"})
def test_write_json_unrelated_value_error_re_raises(server):
"""Only ValueError('...closed file...') means peer gone. Other
ValueErrors are programming errors and must surface."""
class _BadValue:
def write(self, _): raise ValueError("something else entirely")
def flush(self): pass
server._real_stdout = _BadValue()
with pytest.raises(ValueError, match="something else entirely"):
server.write_json({"x": 1})
def test_write_json_non_serializable_payload_re_raises(server):
"""Non-JSON-safe payloads are programming errors — they must NOT be
silently dropped via the False path (which would trigger a clean exit
in entry.py and mask the real bug)."""
import io
server._real_stdout = io.StringIO()
with pytest.raises(TypeError):
server.write_json({"obj": object()})
def test_write_json_peer_gone_oserror_on_flush_returns_false(server):
"""A flush that raises a peer-gone OSError (EPIPE) must not strand
the lock or crash; it returns False so the dispatcher exits cleanly."""
import errno
written = []
class _FlushPeerGone:
def write(self, line): written.append(line)
def flush(self): raise OSError(errno.EPIPE, "broken pipe")
server._real_stdout = _FlushPeerGone()
assert server.write_json({"x": 1}) is False
assert written and json.loads(written[0]) == {"x": 1}
def test_write_json_non_peer_gone_oserror_re_raises(server):
"""Host I/O failures (ENOSPC, EACCES, EIO …) are NOT peer-gone — they
must re-raise so the crash log records them instead of looking like
a clean disconnect via the False path."""
import errno
class _DiskFull:
def write(self, _): raise OSError(errno.ENOSPC, "no space left")
def flush(self): pass
server._real_stdout = _DiskFull()
with pytest.raises(OSError, match="no space"):
server.write_json({"x": 1})
def test_write_json_skips_flush_when_disable_flush_true(monkeypatch):
"""`StdioTransport` skips flush when `_DISABLE_FLUSH` is true.
Tests the runtime *behaviour* via direct module-attr patch. The env
var → module constant wiring is covered by the dedicated env test
below; reloading server.py here would re-register atexit hooks and
recreate the worker pool.
"""
import importlib
transport_mod = importlib.import_module("tui_gateway.transport")
monkeypatch.setattr(transport_mod, "_DISABLE_FLUSH", True)
flushed = {"count": 0}
written = []
class _Stream:
def write(self, line): written.append(line)
def flush(self): flushed["count"] += 1
stream = _Stream()
transport = transport_mod.StdioTransport(lambda: stream, threading.Lock())
assert transport.write({"x": 1}) is True
assert flushed["count"] == 0
def test_disable_flush_env_var_actually_wires_to_module_constant(monkeypatch):
"""End-to-end: setting `HERMES_TUI_GATEWAY_NO_FLUSH=1` and importing
`tui_gateway.transport` fresh actually flips `_DISABLE_FLUSH` true.
Reloads only the transport module — server.py is untouched so its
atexit hooks/worker pool stay intact."""
import importlib
monkeypatch.setenv("HERMES_TUI_GATEWAY_NO_FLUSH", "1")
transport_mod = importlib.reload(importlib.import_module("tui_gateway.transport"))
try:
assert transport_mod._DISABLE_FLUSH is True
finally:
# Restore the env-disabled state so other tests see the default.
monkeypatch.delenv("HERMES_TUI_GATEWAY_NO_FLUSH", raising=False)
importlib.reload(transport_mod)
# ── _emit ────────────────────────────────────────────────────────────
def test_emit_with_payload(capture):
server, buf = capture
server._emit("test.event", "s1", {"key": "val"})
msg = json.loads(buf.getvalue())
assert msg["method"] == "event"
assert msg["params"]["type"] == "test.event"
assert msg["params"]["session_id"] == "s1"
assert msg["params"]["payload"]["key"] == "val"
def test_emit_without_payload(capture):
server, buf = capture
server._emit("ping", "s2")
assert "payload" not in json.loads(buf.getvalue())["params"]
# ── Blocking prompt round-trip ───────────────────────────────────────
def test_block_and_respond(capture):
server, _ = capture
result = [None]
threading.Thread(
target=lambda: result.__setitem__(0, server._block("test.prompt", "s1", {"q": "?"}, timeout=5)),
).start()
for _ in range(100):
if server._pending:
break
threading.Event().wait(0.01)
rid = next(iter(server._pending))
server._answers[rid] = "my_answer"
# _pending values are (sid, Event) tuples — unpack to set the Event
_, ev = server._pending[rid]
ev.set()
threading.Event().wait(0.1)
assert result[0] == "my_answer"
def test_clear_pending(server):
ev = threading.Event()
# _pending values are (sid, Event) tuples
server._pending["r1"] = ("sid-x", ev)
server._clear_pending()
assert ev.is_set()
assert server._answers["r1"] == ""
# ── Session lookup ───────────────────────────────────────────────────
def test_sess_missing(server):
_, err = server._sess({"session_id": "nope"}, "r1")
assert err["error"]["code"] == 4001
def test_sess_found(server):
server._sessions["abc"] = {"agent": MagicMock()}
s, err = server._sess({"session_id": "abc"}, "r1")
assert s is not None
assert err is None
# ── session.resume payload ────────────────────────────────────────────
def test_session_resume_returns_hydrated_messages(server, monkeypatch):
class _DB:
def get_session(self, _sid):
return {"id": "20260409_010101_abc123"}
def get_session_by_title(self, _title):
return None
def reopen_session(self, _sid):
return None
def get_messages_as_conversation(self, _sid, include_ancestors=False):
return [
{"role": "user", "content": "hello"},
{"role": "assistant", "content": "yo"},
{"role": "tool", "content": "searched"},
{"role": "assistant", "content": " "},
{"role": "assistant", "content": None},
{"role": "narrator", "content": "skip"},
]
monkeypatch.setattr(server, "_get_db", lambda: _DB())
monkeypatch.setattr(server, "_make_agent", lambda sid, key, session_id=None: object())
monkeypatch.setattr(server, "_init_session", lambda sid, key, agent, history, cols=80: None)
monkeypatch.setattr(server, "_session_info", lambda _agent: {"model": "test/model"})
resp = server.handle_request(
{
"id": "r1",
"method": "session.resume",
"params": {"session_id": "20260409_010101_abc123", "cols": 100},
}
)
assert "error" not in resp
assert resp["result"]["message_count"] == 3
assert resp["result"]["messages"] == [
{"role": "user", "text": "hello"},
{"role": "assistant", "text": "yo"},
{"role": "tool", "name": "tool", "context": ""},
]
# ── Config I/O ───────────────────────────────────────────────────────
def test_config_load_missing(server, tmp_path):
server._hermes_home = tmp_path
assert server._load_cfg() == {}
def test_config_roundtrip(server, tmp_path):
server._hermes_home = tmp_path
server._save_cfg({"model": "test/model"})
assert server._load_cfg()["model"] == "test/model"
# ── _cli_exec_blocked ────────────────────────────────────────────────
@pytest.mark.parametrize("argv", [
[],
["setup"],
["gateway"],
["sessions", "browse"],
["config", "edit"],
])
def test_cli_exec_blocked(server, argv):
assert server._cli_exec_blocked(argv) is not None
@pytest.mark.parametrize("argv", [
["version"],
["sessions", "list"],
])
def test_cli_exec_allowed(server, argv):
assert server._cli_exec_blocked(argv) is None
# ── slash.exec skill command interception ────────────────────────────
def test_slash_exec_rejects_skill_commands(server):
"""slash.exec must reject skill commands so the TUI falls through to command.dispatch."""
# Register a mock session
sid = "test-session"
server._sessions[sid] = {"session_key": sid, "agent": None}
# Mock scan_skill_commands to return a known skill
fake_skills = {"/hermes-agent-dev": {"name": "hermes-agent-dev", "description": "Dev workflow"}}
with patch("agent.skill_commands.get_skill_commands", return_value=fake_skills):
resp = server.handle_request({
"id": "r1",
"method": "slash.exec",
"params": {"command": "hermes-agent-dev", "session_id": sid},
})
# Should return an error so the TUI's .catch() fires command.dispatch
assert "error" in resp
assert resp["error"]["code"] == 4018
assert "skill command" in resp["error"]["message"]
def test_slash_exec_handles_plugin_commands_in_live_gateway(server):
"""Plugin slash commands return normal slash.exec output without using the worker."""
sid = "test-session"
class Worker:
def __init__(self):
self.calls = []
def run(self, cmd):
self.calls.append(cmd)
return f"worker:{cmd}"
worker = Worker()
server._sessions[sid] = {"session_key": sid, "agent": None, "slash_worker": worker}
with patch(
"hermes_cli.plugins.get_plugin_command_handler",
lambda name: (lambda arg: f"plugin:{arg}") if name == "plugin-cmd" else None,
):
resp = server.handle_request({
"id": "r-plugin-slash",
"method": "slash.exec",
"params": {"command": "plugin-cmd hello", "session_id": sid},
})
assert "error" not in resp
assert resp["result"] == {"output": "plugin:hello"}
assert worker.calls == []
def test_slash_exec_plugin_lookup_failure_falls_back_to_worker(server):
"""Plugin discovery failures must not break ordinary slash-worker commands."""
sid = "test-session"
class Worker:
def __init__(self):
self.calls = []
def run(self, cmd):
self.calls.append(cmd)
return f"worker:{cmd}"
worker = Worker()
server._sessions[sid] = {"session_key": sid, "agent": None, "slash_worker": worker}
with patch(
"hermes_cli.plugins.get_plugin_command_handler",
side_effect=RuntimeError("discovery boom"),
):
resp = server.handle_request({
"id": "r-plugin-lookup-failure",
"method": "slash.exec",
"params": {"command": "help", "session_id": sid},
})
assert "error" not in resp
assert resp["result"] == {"output": "worker:help"}
assert worker.calls == ["help"]
def test_slash_exec_plugin_handler_error_returns_output(server):
"""Plugin handler failures return slash output so the TUI does not redispatch."""
sid = "test-session"
class Worker:
def __init__(self):
self.calls = []
def run(self, cmd):
self.calls.append(cmd)
return f"worker:{cmd}"
def handler(arg):
raise RuntimeError(f"handler boom: {arg}")
worker = Worker()
server._sessions[sid] = {"session_key": sid, "agent": None, "slash_worker": worker}
with patch(
"hermes_cli.plugins.get_plugin_command_handler",
lambda name: handler if name == "plugin-cmd" else None,
):
resp = server.handle_request({
"id": "r-plugin-handler-error",
"method": "slash.exec",
"params": {"command": "plugin-cmd hello", "session_id": sid},
})
assert "error" not in resp
assert resp["result"] == {"output": "Plugin command error: handler boom: hello"}
assert worker.calls == []
@pytest.mark.parametrize("cmd", ["retry", "queue hello", "q hello", "steer fix the test", "plan"])
def test_slash_exec_rejects_pending_input_commands(server, cmd):
"""slash.exec must reject commands that use _pending_input in the CLI."""
sid = "test-session"
server._sessions[sid] = {"session_key": sid, "agent": None}
resp = server.handle_request({
"id": "r1",
"method": "slash.exec",
"params": {"command": cmd, "session_id": sid},
})
assert "error" in resp
assert resp["error"]["code"] == 4018
assert "pending-input command" in resp["error"]["message"]
def test_command_dispatch_queue_sends_message(server):
"""command.dispatch /queue returns {type: 'send', message: ...} for the TUI."""
sid = "test-session"
server._sessions[sid] = {"session_key": sid}
resp = server.handle_request({
"id": "r1",
"method": "command.dispatch",
"params": {"name": "queue", "arg": "tell me about quantum computing", "session_id": sid},
})
assert "error" not in resp
result = resp["result"]
assert result["type"] == "send"
assert result["message"] == "tell me about quantum computing"
def test_command_dispatch_queue_requires_arg(server):
"""command.dispatch /queue without an argument returns an error."""
sid = "test-session"
server._sessions[sid] = {"session_key": sid}
resp = server.handle_request({
"id": "r2",
"method": "command.dispatch",
"params": {"name": "queue", "arg": "", "session_id": sid},
})
assert "error" in resp
assert resp["error"]["code"] == 4004
def test_skills_manage_search_uses_tools_hub_sources(server):
result = type("Result", (), {
"description": "Build better terminal demos",
"name": "showroom",
})()
auth = MagicMock(return_value="auth")
router = MagicMock(return_value=["source"])
search = MagicMock(return_value=[result])
fake_hub = types.SimpleNamespace(
GitHubAuth=auth,
create_source_router=router,
unified_search=search,
)
with patch.dict(sys.modules, {"tools.skills_hub": fake_hub}):
resp = server.handle_request({
"id": "skills-search",
"method": "skills.manage",
"params": {"action": "search", "query": "showroom"},
})
assert "error" not in resp
assert resp["result"] == {
"results": [{"description": "Build better terminal demos", "name": "showroom"}]
}
auth.assert_called_once_with()
router.assert_called_once_with("auth")
search.assert_called_once_with("showroom", ["source"], source_filter="all", limit=20)
def test_command_dispatch_steer_fallback_sends_message(server):
"""command.dispatch /steer with no active agent falls back to send."""
sid = "test-session"
server._sessions[sid] = {"session_key": sid, "agent": None}
resp = server.handle_request({
"id": "r3",
"method": "command.dispatch",
"params": {"name": "steer", "arg": "focus on testing", "session_id": sid},
})
assert "error" not in resp
result = resp["result"]
assert result["type"] == "send"
assert result["message"] == "focus on testing"
def test_command_dispatch_retry_finds_last_user_message(server):
"""command.dispatch /retry walks session['history'] to find the last user message."""
sid = "test-session"
history = [
{"role": "user", "content": "first question"},
{"role": "assistant", "content": "first answer"},
{"role": "user", "content": "second question"},
{"role": "assistant", "content": "second answer"},
]
server._sessions[sid] = {
"session_key": sid,
"agent": None,
"history": history,
"history_lock": threading.Lock(),
"history_version": 0,
}
resp = server.handle_request({
"id": "r4",
"method": "command.dispatch",
"params": {"name": "retry", "session_id": sid},
})
assert "error" not in resp
result = resp["result"]
assert result["type"] == "send"
assert result["message"] == "second question"
# Verify history was truncated: everything from last user message onward removed
assert len(server._sessions[sid]["history"]) == 2
assert server._sessions[sid]["history"][-1]["role"] == "assistant"
assert server._sessions[sid]["history_version"] == 1
def test_command_dispatch_retry_empty_history(server):
"""command.dispatch /retry with empty history returns error."""
sid = "test-session"
server._sessions[sid] = {
"session_key": sid,
"agent": None,
"history": [],
"history_lock": threading.Lock(),
"history_version": 0,
}
resp = server.handle_request({
"id": "r5",
"method": "command.dispatch",
"params": {"name": "retry", "session_id": sid},
})
assert "error" in resp
assert resp["error"]["code"] == 4018
def test_command_dispatch_retry_handles_multipart_content(server):
"""command.dispatch /retry extracts text from multipart content lists."""
sid = "test-session"
history = [
{"role": "user", "content": [
{"type": "text", "text": "analyze this"},
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}}
]},
{"role": "assistant", "content": "I see the image."},
]
server._sessions[sid] = {
"session_key": sid,
"agent": None,
"history": history,
"history_lock": threading.Lock(),
"history_version": 0,
}
resp = server.handle_request({
"id": "r6",
"method": "command.dispatch",
"params": {"name": "retry", "session_id": sid},
})
assert "error" not in resp
result = resp["result"]
assert result["type"] == "send"
assert result["message"] == "analyze this"
def test_command_dispatch_returns_skill_payload(server):
"""command.dispatch returns structured skill payload for the TUI to send()."""
sid = "test-session"
server._sessions[sid] = {"session_key": sid}
fake_skills = {"/hermes-agent-dev": {"name": "hermes-agent-dev", "description": "Dev workflow"}}
fake_msg = "Loaded skill content here"
with patch("agent.skill_commands.scan_skill_commands", return_value=fake_skills), \
patch("agent.skill_commands.build_skill_invocation_message", return_value=fake_msg):
resp = server.handle_request({
"id": "r2",
"method": "command.dispatch",
"params": {"name": "hermes-agent-dev", "session_id": sid},
})
assert "error" not in resp
result = resp["result"]
assert result["type"] == "skill"
assert result["message"] == fake_msg
assert result["name"] == "hermes-agent-dev"
def test_command_dispatch_awaits_async_plugin_handler(server):
async def _handler(arg):
return f"async:{arg}"
with patch(
"hermes_cli.plugins.get_plugin_command_handler",
lambda name: _handler if name == "async-cmd" else None,
):
resp = server.handle_request({
"id": "r-plugin",
"method": "command.dispatch",
"params": {"name": "async-cmd", "arg": "hello"},
})
assert "error" not in resp
assert resp["result"] == {"type": "plugin", "output": "async:hello"}
# ── dispatch(): pool routing for long handlers (#12546) ──────────────
def test_dispatch_runs_short_handlers_inline(server):
"""Non-long handlers return their response synchronously from dispatch()."""
server._methods["fast.ping"] = lambda rid, params: server._ok(rid, {"pong": True})
resp = server.dispatch({"id": "r1", "method": "fast.ping", "params": {}})
assert resp == {"jsonrpc": "2.0", "id": "r1", "result": {"pong": True}}
def test_dispatch_offloads_long_handlers_and_emits_via_stdout(capture):
"""Long handlers run on the pool and write their response via write_json."""
server, buf = capture
server._methods["slash.exec"] = lambda rid, params: server._ok(rid, {"output": "hi"})
resp = server.dispatch({"id": "r2", "method": "slash.exec", "params": {}})
assert resp is None
for _ in range(50):
if buf.getvalue():
break
time.sleep(0.01)
written = json.loads(buf.getvalue())
assert written == {"jsonrpc": "2.0", "id": "r2", "result": {"output": "hi"}}
def test_dispatch_long_handler_does_not_block_fast_handler(server):
"""A slow long handler must not prevent a concurrent fast handler from completing."""
released = threading.Event()
server._methods["slash.exec"] = lambda rid, params: (released.wait(timeout=5), server._ok(rid, {"done": True}))[1]
server._methods["fast.ping"] = lambda rid, params: server._ok(rid, {"pong": True})
t0 = time.monotonic()
assert server.dispatch({"id": "slow", "method": "slash.exec", "params": {}}) is None
fast_resp = server.dispatch({"id": "fast", "method": "fast.ping", "params": {}})
fast_elapsed = time.monotonic() - t0
assert fast_resp["result"] == {"pong": True}
assert fast_elapsed < 0.5, f"fast handler blocked for {fast_elapsed:.2f}s behind slow handler"
released.set()
def test_dispatch_session_compress_does_not_block_fast_handler(server):
"""Manual TUI compaction can take minutes, so it must not block the RPC loop."""
released = threading.Event()
def slow_compress(rid, params):
released.wait(timeout=5)
return server._ok(rid, {"done": True})
server._methods["session.compress"] = slow_compress
server._methods["fast.ping"] = lambda rid, params: server._ok(rid, {"pong": True})
t0 = time.monotonic()
assert server.dispatch({"id": "slow", "method": "session.compress", "params": {}}) is None
fast_resp = server.dispatch({"id": "fast", "method": "fast.ping", "params": {}})
fast_elapsed = time.monotonic() - t0
assert fast_resp["result"] == {"pong": True}
assert fast_elapsed < 0.5, f"fast handler blocked for {fast_elapsed:.2f}s behind session.compress"
released.set()
def test_dispatch_long_handler_exception_produces_error_response(capture):
"""An exception inside a pool-dispatched handler still yields a JSON-RPC error."""
server, buf = capture
def boom(rid, params):
raise RuntimeError("kaboom")
server._methods["slash.exec"] = boom
server.dispatch({"id": "r3", "method": "slash.exec", "params": {}})
for _ in range(50):
if buf.getvalue():
break
time.sleep(0.01)
written = json.loads(buf.getvalue())
assert written["id"] == "r3"
assert written["error"]["code"] == -32000
assert "kaboom" in written["error"]["message"]
def test_dispatch_unknown_long_method_still_goes_inline(server):
"""Method name not in _LONG_HANDLERS takes the sync path even if handler is slow."""
server._methods["some.method"] = lambda rid, params: server._ok(rid, {"ok": True})
resp = server.dispatch({"id": "r4", "method": "some.method", "params": {}})
assert resp["result"] == {"ok": True}