Skip to content

Commit dc3dd12

Browse files
committed
Improve doc (lm-sys#2371)
1 parent 94f4dd6 commit dc3dd12

File tree

4 files changed

+11
-9
lines changed

4 files changed

+11
-9
lines changed

docs/commands/test_process.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
## Unit tests for FastChat
2+
The scripts are under [FastChat/tests](../../tests).
3+
14
### Test CLI Inference
25

36
```

tests/launch_openai_api_test_server.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def launch_process(cmd):
1313
launch_process("python3 -m fastchat.serve.openai_api_server")
1414

1515
models = [
16-
"lmsys/vicuna-7b-v1.3",
16+
"lmsys/vicuna-7b-v1.5",
1717
"lmsys/fastchat-t5-3b-v1.0",
1818
"THUDM/chatglm-6b",
1919
"mosaicml/mpt-7b-chat",

tests/test_cli.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,13 @@
77

88
def test_single_gpu():
99
models = [
10-
"lmsys/vicuna-7b-v1.3",
10+
"lmsys/vicuna-7b-v1.5",
1111
"lmsys/longchat-7b-16k",
1212
"lmsys/fastchat-t5-3b-v1.0",
13+
"meta-llama/Llama-2-7b-chat-hf",
1314
"THUDM/chatglm-6b",
1415
"THUDM/chatglm2-6b",
1516
"mosaicml/mpt-7b-chat",
16-
"project-baize/baize-v2-7b",
17-
"h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b",
1817
"tiiuae/falcon-7b-instruct",
1918
"~/model_weights/alpaca-7b",
2019
"~/model_weights/RWKV-4-Raven-7B-v11x-Eng99%-Other1%-20230429-ctx8192.pth",

tests/test_openai_api.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -59,15 +59,15 @@ def test_chat_completion_stream(model):
5959
print()
6060

6161

62-
def test_openai_curl(model):
62+
def test_openai_curl():
6363
run_cmd("curl http://localhost:8000/v1/models")
6464

6565
run_cmd(
6666
"""
6767
curl http://localhost:8000/v1/chat/completions \
6868
-H "Content-Type: application/json" \
6969
-d '{
70-
"model": "vicuna-7b-v1.3",
70+
"model": "vicuna-7b-v1.5",
7171
"messages": [{"role": "user", "content": "Hello! What is your name?"}]
7272
}'
7373
"""
@@ -78,7 +78,7 @@ def test_openai_curl(model):
7878
curl http://localhost:8000/v1/completions \
7979
-H "Content-Type: application/json" \
8080
-d '{
81-
"model": "vicuna-7b-v1.3",
81+
"model": "vicuna-7b-v1.5",
8282
"prompt": "Once upon a time",
8383
"max_tokens": 41,
8484
"temperature": 0.5
@@ -91,7 +91,7 @@ def test_openai_curl(model):
9191
curl http://localhost:8000/v1/embeddings \
9292
-H "Content-Type: application/json" \
9393
-d '{
94-
"model": "vicuna-7b-v1.3",
94+
"model": "vicuna-7b-v1.5",
9595
"input": "Hello world!"
9696
}'
9797
"""
@@ -111,4 +111,4 @@ def test_openai_curl(model):
111111
test_chat_completion_stream(model)
112112

113113
print("===== Test curl =====")
114-
test_openai_curl("vicuna-7b-v1.3")
114+
test_openai_curl()

0 commit comments

Comments
 (0)