Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion fastchat/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.2.35"
__version__ = "0.2.36"
11 changes: 8 additions & 3 deletions fastchat/model/model_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,12 @@ def get_model_info(name: str) -> ModelInfo:
)

register_model_info(
["codellama-34b-instruct", "codellama-13b-instruct", "codellama-7b-instruct"],
[
"codellama-70b-instruct",
"codellama-34b-instruct",
"codellama-13b-instruct",
"codellama-7b-instruct",
],
"Code Llama",
"https://ai.meta.com/blog/code-llama-large-language-model-coding/",
"Open foundation models for code by Meta",
Expand Down Expand Up @@ -640,10 +645,10 @@ def get_model_info(name: str) -> ModelInfo:

register_model_info(
[
"llava-v1.6-vicuna-7b",
"llava-v1.6-vicuna-13b",
"llava-v1.5-7b",
"llava-v1.6-vicuna-7b",
"llava-v1.5-13b",
"llava-v1.5-7b",
],
"LLaVA",
"https://github.com/haotian-liu/LLaVA",
Expand Down
2 changes: 1 addition & 1 deletion fastchat/serve/gradio_block_arena_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def build_single_vision_language_model_ui(models, add_promotion_links=False):
with gr.Column(scale=3):
textbox = gr.Textbox(
show_label=False,
placeholder="Enter your prompt here and press ENTER",
placeholder="👉 Enter your prompt and press ENTER",
container=False,
render=False,
elem_id="input_box",
Expand Down
2 changes: 2 additions & 0 deletions fastchat/serve/register_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,15 @@
parser.add_argument("--controller-address", type=str)
parser.add_argument("--worker-name", type=str)
parser.add_argument("--check-heart-beat", action="store_true")
parser.add_argument("--multimodal", action="store_true")
args = parser.parse_args()

url = args.controller_address + "/register_worker"
data = {
"worker_name": args.worker_name,
"check_heart_beat": args.check_heart_beat,
"worker_status": None,
"multimodal": args.multimodal,
}
r = requests.post(url, json=data)
assert r.status_code == 200
3 changes: 2 additions & 1 deletion fastchat/serve/sglang_worker.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
A model worker that executes the model based on SGLANG.
A model worker that executes the model based on SGLang.

Usage:
python3 -m fastchat.serve.sglang_worker --model-path liuhaotian/llava-v1.5-7b --tokenizer-path llava-hf/llava-1.5-7b-hf --port 30000 --worker-address http://localhost:30000
Expand Down Expand Up @@ -121,6 +121,7 @@ async def generate_stream(self, params):
for i in range(len(split_prompt)):
prompt.append(split_prompt[i])
if i < len(images):
prompt[-1] = prompt[-1].strip()
prompt.append(load_image(images[i]))

state = pipeline.run(
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "fschat"
version = "0.2.35"
version = "0.2.36"
description = "An open platform for training, serving, and evaluating large language model based chatbots."
readme = "README.md"
requires-python = ">=3.8"
Expand Down