From 6f5926ef80e31a3c6d178a4af1d94a553d0668cb Mon Sep 17 00:00:00 2001 From: Hannah Zhang Date: Fri, 30 May 2025 12:01:59 -0700 Subject: [PATCH 1/8] feat: use prompt-template instead of hardcoding prompt --- examples/llm/configs/disagg.yaml | 3 ++- examples/multimodal/components/decode_worker.py | 2 +- examples/multimodal/components/prefill_worker.py | 4 ++-- examples/multimodal/components/processor.py | 7 ++++--- examples/multimodal/configs/agg.yaml | 1 + 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/examples/llm/configs/disagg.yaml b/examples/llm/configs/disagg.yaml index 0bfbeff142..a7c5ff443c 100644 --- a/examples/llm/configs/disagg.yaml +++ b/examples/llm/configs/disagg.yaml @@ -26,6 +26,7 @@ Frontend: Processor: router: round-robin common-configs: [model, block-size] + prompt-template: "USER: \n ASSISTANT:" VllmWorker: remote-prefill: true @@ -48,4 +49,4 @@ PrefillWorker: Planner: environment: local - no-operation: true \ No newline at end of file + no-operation: true diff --git a/examples/multimodal/components/decode_worker.py b/examples/multimodal/components/decode_worker.py index 6f05cc1ec8..52c3a53401 100644 --- a/examples/multimodal/components/decode_worker.py +++ b/examples/multimodal/components/decode_worker.py @@ -241,8 +241,8 @@ async def generate(self, request: vLLMMultimodalRequest): # The decode worker will pre-allocate the memory based on the prompt token length for the prefill worker to transfer the kv cache. # As a workaround, here we manually insert some placeholder dummy tokens based on the embedding size # so that decode worker can pre-allocate the memory with the correct size. - # The structure of the prompt will be like: "\nUSER: \n\nASSISTANT:". # Since the "" token is included in the prompt, only need to insert (embedding_size - 1) dummy tokens after the image token. + # TODO: make this more flexible/model-dependent IMAGE_TOKEN_ID = 32000 DUMMY_TOKEN_ID = 0 # Find the index of the image token in the prompt token ids diff --git a/examples/multimodal/components/prefill_worker.py b/examples/multimodal/components/prefill_worker.py index b0f5b45f66..f1f34c9499 100644 --- a/examples/multimodal/components/prefill_worker.py +++ b/examples/multimodal/components/prefill_worker.py @@ -246,8 +246,8 @@ async def generate(self, request: RemotePrefillRequest): self._loaded_metadata.add(engine_id) # To make sure the decode worker can pre-allocate the memory with the correct size for the prefill worker to transfer the kv cache, - # some placeholder dummy tokens were inserted based on the embedding size in the worker.py. - # The structure of the prompt is "\nUSER: \n\nASSISTANT:", need to remove the dummy tokens after the image token. + # some placeholder dummy tokens are inserted based on the embedding size in the worker.py. + # TODO: make this more flexible/model-dependent IMAGE_TOKEN_ID = 32000 embedding_size = embeddings.shape[1] padding_size = embedding_size - 1 diff --git a/examples/multimodal/components/processor.py b/examples/multimodal/components/processor.py index e794a82d10..f38b23db80 100644 --- a/examples/multimodal/components/processor.py +++ b/examples/multimodal/components/processor.py @@ -187,11 +187,12 @@ async def _generate_responses( # The generate endpoint will be used by the frontend to handle incoming requests. @endpoint() async def generate(self, raw_request: MultiModalRequest): + prompt = str(self.engine_args.prompt_template).replace( + "", raw_request.messages[0].content[0].text + ) msg = { "role": "user", - "content": "USER: \nQuestion:" - + raw_request.messages[0].content[0].text - + " Answer:", + "content": prompt, } chat_request = ChatCompletionRequest( diff --git a/examples/multimodal/configs/agg.yaml b/examples/multimodal/configs/agg.yaml index b1b2620056..22ae6083b0 100644 --- a/examples/multimodal/configs/agg.yaml +++ b/examples/multimodal/configs/agg.yaml @@ -20,6 +20,7 @@ Common: Processor: router: round-robin common-configs: [model, block-size, max-model-len] + prompt-template: "USER: \n ASSISTANT:" VllmDecodeWorker: enforce-eager: true From 8bcab41c3b6223a9d9446936e05155ba6cf7c957 Mon Sep 17 00:00:00 2001 From: Hannah Zhang Date: Mon, 2 Jun 2025 16:26:08 -0700 Subject: [PATCH 2/8] docs: add deployment docs to multimodal example --- examples/multimodal/README.md | 80 ++++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 1 deletion(-) diff --git a/examples/multimodal/README.md b/examples/multimodal/README.md index be2ce56f97..df6c8fabb3 100644 --- a/examples/multimodal/README.md +++ b/examples/multimodal/README.md @@ -97,7 +97,7 @@ You should see a response similar to this: - processor: Tokenizes the prompt and passes it to the decode worker. - frontend: HTTP endpoint to handle incoming requests. -### Deployment +### Local Serving In this deployment, we have three workers, [encode_worker](components/encode_worker.py), [decode_worker](components/decode_worker.py), and [prefill_worker](components/prefill_worker.py). For the Llava model, embeddings are only required during the prefill stage. As such, the encode worker is connected directly to the prefill worker. @@ -158,3 +158,81 @@ You should see a response similar to this: ```json {"id": "c1774d61-3299-4aa3-bea1-a0af6c055ba8", "object": "chat.completion", "created": 1747725645, "model": "llava-hf/llava-1.5-7b-hf", "choices": [{"index": 0, "message": {"role": "assistant", "content": " This image shows a passenger bus traveling down the road near power lines and trees. The bus displays a sign that says \"OUT OF SERVICE\" on its front."}, "finish_reason": "stop"}]} ``` + +## Deployment with Dynamo Operator + +These multimodal examples can be deployed to a Kubernetes cluster using [Dynamo Cloud](../../docs/guides/dynamo_deploy/dynamo_cloud.md) and the Dynamo CLI. + +### Prerequisites + +You must have first followed the instructions in [deploy/cloud/helm/README.md](../../deploy/cloud/helm/README.md) to install Dynamo Cloud on your Kubernetes cluster. + +**Note**: The `KUBE_NS` variable in the following steps must match the Kubernetes namespace where you installed Dynamo Cloud. You must also expose the `dynamo-store` service externally. This will be the endpoint the CLI uses to interface with Dynamo Cloud. + +### Deployment Steps + +For detailed deployment instructions, please refer to the [Operator Deployment Guide](../../docs/guides/dynamo_deploy/operator_deployment.md). The following are the specific commands for the multimodal examples: + +```bash +# Set your project root directory +export PROJECT_ROOT=$(pwd) + +# Configure environment variables (see operator_deployment.md for details) +export KUBE_NS=dynamo-cloud +export DYNAMO_CLOUD=http://localhost:8080 # If using port-forward +# OR +# export DYNAMO_CLOUD=https://dynamo-cloud.nvidia.com # If using Ingress/VirtualService + +# Build the Dynamo base image (see operator_deployment.md for details) +export DYNAMO_IMAGE=/: + +# Build the service +cd $PROJECT_ROOT/examples/multimodal +DYNAMO_TAG=$(dynamo build graphs.disagg:Frontend | grep "Successfully built" | awk '{ print $NF }' | sed 's/\.$//') +# For aggregated serving: +# DYNAMO_TAG=$(dynamo build graphs.agg:Frontend | grep "Successfully built" | awk '{ print $NF }' | sed 's/\.$//') + +# Deploy to Kubernetes +export DEPLOYMENT_NAME=multimodal-disagg +# For disaggregated serving: +dynamo deployment create $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/disagg.yaml +# For aggregated serving: +# export DEPLOYMENT_NAME=multimodal-agg +# dynamo deployment create $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/agg.yaml +``` + +**Note**: To avoid rate limiting from unauthenticated requests to HuggingFace (HF), you can provide your `HF_TOKEN` as a secret in your deployment. See the [operator deployment guide](../../docs/guides/dynamo_deploy/operator_deployment.md#referencing-secrets-in-your-deployment) for instructions on referencing secrets like `HF_TOKEN` in your deployment configuration. + +**Note**: Optionally add `--Planner.no-operation=false` at the end of the deployment command to enable the planner component to take scaling actions on your deployment. + +### Testing the Deployment + +Once the deployment is complete, you can test it using: + +```bash +# Find your frontend pod +export FRONTEND_POD=$(kubectl get pods -n ${KUBE_NS} | grep "${DEPLOYMENT_NAME}-frontend" | sort -k1 | tail -n1 | awk '{print $1}') + +# Forward the pod's port to localhost +kubectl port-forward pod/$FRONTEND_POD 8000:8000 -n ${KUBE_NS} + +# Test the API endpoint +curl localhost:8000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "llava-hf/llava-1.5-7b-hf", + "messages": [ + { + "role": "user", + "content": [ + { "type": "text", "text": "What is in this image?" }, + { "type": "image_url", "image_url": { "url": "http://images.cocodataset.org/test2017/000000155781.jpg" } } + ] + } + ], + "max_tokens": 300, + "stream": false + }' +``` + +For more details on managing deployments, testing, and troubleshooting, please refer to the [Operator Deployment Guide](../../docs/guides/dynamo_deploy/operator_deployment.md). From 93f2f2cf7661e8f98ba8a6762f24a20be2327d82 Mon Sep 17 00:00:00 2001 From: Hannah Zhang Date: Tue, 3 Jun 2025 15:50:51 -0700 Subject: [PATCH 3/8] feat: add prompt template to engine args, fix gpu --- examples/multimodal/configs/agg.yaml | 6 +++--- examples/multimodal/configs/disagg.yaml | 7 ++++--- examples/multimodal/utils/vllm.py | 7 +++++++ 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/examples/multimodal/configs/agg.yaml b/examples/multimodal/configs/agg.yaml index 22ae6083b0..344a6e46c1 100644 --- a/examples/multimodal/configs/agg.yaml +++ b/examples/multimodal/configs/agg.yaml @@ -19,8 +19,8 @@ Common: Processor: router: round-robin - common-configs: [model, block-size, max-model-len] prompt-template: "USER: \n ASSISTANT:" + common-configs: [model, block-size, max-model-len] VllmDecodeWorker: enforce-eager: true @@ -31,7 +31,7 @@ VllmDecodeWorker: ServiceArgs: workers: 1 resources: - gpu: 1 + gpu: '1' common-configs: [model, block-size, max-model-len] VllmEncodeWorker: @@ -40,5 +40,5 @@ VllmEncodeWorker: ServiceArgs: workers: 1 resources: - gpu: 1 + gpu: '1' common-configs: [model] diff --git a/examples/multimodal/configs/disagg.yaml b/examples/multimodal/configs/disagg.yaml index e6dcdb11b6..6c6fbbb200 100644 --- a/examples/multimodal/configs/disagg.yaml +++ b/examples/multimodal/configs/disagg.yaml @@ -20,6 +20,7 @@ Common: Processor: router: round-robin + prompt-template: "USER: \n ASSISTANT:" common-configs: [model, block-size] VllmDecodeWorker: @@ -30,7 +31,7 @@ VllmDecodeWorker: ServiceArgs: workers: 1 resources: - gpu: 1 + gpu: '1' common-configs: [model, block-size, max-model-len, kv-transfer-config] VllmPrefillWorker: @@ -38,7 +39,7 @@ VllmPrefillWorker: ServiceArgs: workers: 1 resources: - gpu: 1 + gpu: '1' common-configs: [model, block-size, max-model-len, kv-transfer-config] VllmEncodeWorker: @@ -47,5 +48,5 @@ VllmEncodeWorker: ServiceArgs: workers: 1 resources: - gpu: 1 + gpu: '1' common-configs: [model] diff --git a/examples/multimodal/utils/vllm.py b/examples/multimodal/utils/vllm.py index bbb489757f..7b6b1d888c 100644 --- a/examples/multimodal/utils/vllm.py +++ b/examples/multimodal/utils/vllm.py @@ -51,6 +51,12 @@ def parse_vllm_args(service_name, prefix) -> AsyncEngineArgs: default=3, help="Maximum queue size for remote prefill. If the prefill queue size is greater than this value, prefill phase of the incoming request will be executed locally.", ) + parser.add_argument( + "--prompt-template", + type=str, + default="", + help="Prompt template to use for the model", + ) parser = AsyncEngineArgs.add_cli_args(parser) args = parser.parse_args(vllm_args) engine_args = AsyncEngineArgs.from_cli_args(args) @@ -59,4 +65,5 @@ def parse_vllm_args(service_name, prefix) -> AsyncEngineArgs: engine_args.conditional_disagg = args.conditional_disagg engine_args.max_local_prefill_length = args.max_local_prefill_length engine_args.max_prefill_queue_size = args.max_prefill_queue_size + engine_args.prompt_template = args.prompt_template return engine_args From accf06a697cedc69b8cd6eb8c697b64668f16f1a Mon Sep 17 00:00:00 2001 From: Hannah Zhang Date: Tue, 3 Jun 2025 16:28:47 -0700 Subject: [PATCH 4/8] feat: load decode worker vision model just like encode worker's --- examples/multimodal/components/decode_worker.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/multimodal/components/decode_worker.py b/examples/multimodal/components/decode_worker.py index 52c3a53401..ab95c00d13 100644 --- a/examples/multimodal/components/decode_worker.py +++ b/examples/multimodal/components/decode_worker.py @@ -135,8 +135,10 @@ async def async_init(self): self.disaggregated_router = None model = LlavaForConditionalGeneration.from_pretrained( - self.engine_args.model - ) + self.engine_args.model, + device_map="auto", + torch_dtype=torch.bfloat16, + ).eval() vision_tower = model.vision_tower self.embedding_size = ( vision_tower.vision_model.embeddings.position_embedding.num_embeddings From 69831adf00006d36f0efdfb4d64337dbf8576b59 Mon Sep 17 00:00:00 2001 From: Hannah Zhang Date: Tue, 3 Jun 2025 17:01:09 -0700 Subject: [PATCH 5/8] docs: have agg be the default in multimodal example --- examples/multimodal/README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/multimodal/README.md b/examples/multimodal/README.md index df6c8fabb3..6fae04c456 100644 --- a/examples/multimodal/README.md +++ b/examples/multimodal/README.md @@ -188,17 +188,17 @@ export DYNAMO_IMAGE=/: # Build the service cd $PROJECT_ROOT/examples/multimodal -DYNAMO_TAG=$(dynamo build graphs.disagg:Frontend | grep "Successfully built" | awk '{ print $NF }' | sed 's/\.$//') -# For aggregated serving: -# DYNAMO_TAG=$(dynamo build graphs.agg:Frontend | grep "Successfully built" | awk '{ print $NF }' | sed 's/\.$//') +DYNAMO_TAG=$(dynamo build graphs.agg:Frontend | grep "Successfully built" | awk '{ print $NF }' | sed 's/\.$//') +# For disaggregated serving: +# DYNAMO_TAG=$(dynamo build graphs.disagg:Frontend | grep "Successfully built" | awk '{ print $NF }' | sed 's/\.$//') # Deploy to Kubernetes -export DEPLOYMENT_NAME=multimodal-disagg -# For disaggregated serving: -dynamo deployment create $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/disagg.yaml +export DEPLOYMENT_NAME=multimodal-agg # For aggregated serving: -# export DEPLOYMENT_NAME=multimodal-agg -# dynamo deployment create $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/agg.yaml +dynamo deployment create $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/agg.yaml +# For disaggregated serving: +# export DEPLOYMENT_NAME=multimodal-disagg +# dynamo deployment create $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/disagg.yaml ``` **Note**: To avoid rate limiting from unauthenticated requests to HuggingFace (HF), you can provide your `HF_TOKEN` as a secret in your deployment. See the [operator deployment guide](../../docs/guides/dynamo_deploy/operator_deployment.md#referencing-secrets-in-your-deployment) for instructions on referencing secrets like `HF_TOKEN` in your deployment configuration. From 14135c3e219bd68a98ce919c90054ad893f7650f Mon Sep 17 00:00:00 2001 From: Hannah Zhang Date: Wed, 4 Jun 2025 06:38:06 -0700 Subject: [PATCH 6/8] docs: minor doc change --- examples/multimodal/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/multimodal/README.md b/examples/multimodal/README.md index 6fae04c456..5e2e2bcc76 100644 --- a/examples/multimodal/README.md +++ b/examples/multimodal/README.md @@ -195,10 +195,10 @@ DYNAMO_TAG=$(dynamo build graphs.agg:Frontend | grep "Successfully built" | awk # Deploy to Kubernetes export DEPLOYMENT_NAME=multimodal-agg # For aggregated serving: -dynamo deployment create $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/agg.yaml +dynamo deploy $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/agg.yaml # For disaggregated serving: # export DEPLOYMENT_NAME=multimodal-disagg -# dynamo deployment create $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/disagg.yaml +# dynamo deploy $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/disagg.yaml ``` **Note**: To avoid rate limiting from unauthenticated requests to HuggingFace (HF), you can provide your `HF_TOKEN` as a secret in your deployment. See the [operator deployment guide](../../docs/guides/dynamo_deploy/operator_deployment.md#referencing-secrets-in-your-deployment) for instructions on referencing secrets like `HF_TOKEN` in your deployment configuration. From 3f82cd2cb6d38f6b629e53c536d04747db6e9704 Mon Sep 17 00:00:00 2001 From: Hannah Zhang Date: Wed, 4 Jun 2025 06:52:24 -0700 Subject: [PATCH 7/8] docs: minor doc change --- examples/llm/README.md | 3 ++- examples/multimodal/README.md | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/llm/README.md b/examples/llm/README.md index d17aa94e48..af3415ad44 100644 --- a/examples/llm/README.md +++ b/examples/llm/README.md @@ -225,7 +225,8 @@ dynamo deployment create $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/agg.yaml ### Testing the Deployment -Once the deployment is complete, you can test it using: +Once the deployment is complete, you can test it. If you have ingress available for your deployment, you can directly call the url returned +in `dynamo deployment get ${DEPLOYMENT_NAME}` and skip the steps to find and forward the frontend pod. ```bash # Find your frontend pod diff --git a/examples/multimodal/README.md b/examples/multimodal/README.md index 5e2e2bcc76..441028c6d7 100644 --- a/examples/multimodal/README.md +++ b/examples/multimodal/README.md @@ -207,7 +207,8 @@ dynamo deploy $DYNAMO_TAG -n $DEPLOYMENT_NAME -f ./configs/agg.yaml ### Testing the Deployment -Once the deployment is complete, you can test it using: +Once the deployment is complete, you can test it. If you have ingress available for your deployment, you can directly call the url returned +in `dynamo deployment get ${DEPLOYMENT_NAME}` and skip the steps to find and forward the frontend pod. ```bash # Find your frontend pod From 555d447ca4c41b1c5c81ec5d00c0dfce500284db Mon Sep 17 00:00:00 2001 From: Hannah Zhang Date: Wed, 4 Jun 2025 08:25:02 -0700 Subject: [PATCH 8/8] docs: minor doc change --- examples/multimodal/README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/multimodal/README.md b/examples/multimodal/README.md index 441028c6d7..76f5e843ce 100644 --- a/examples/multimodal/README.md +++ b/examples/multimodal/README.md @@ -28,16 +28,16 @@ The examples are based on the [llava-1.5-7b-hf](https://huggingface.co/llava-hf/ - processor: Tokenizes the prompt and passes it to the decode worker. - frontend: HTTP endpoint to handle incoming requests. -### Deployment +### Graph -In this deployment, we have two workers, [encode_worker](components/encode_worker.py) and [decode_worker](components/decode_worker.py). +In this graph, we have two workers, [encode_worker](components/encode_worker.py) and [decode_worker](components/decode_worker.py). The encode worker is responsible for encoding the image and passing the embeddings to the decode worker via a combination of NATS and RDMA. The work complete event is sent via NATS, while the embeddings tensor is transferred via RDMA through the NIXL interface. Its decode worker then prefills and decodes the prompt, just like the [LLM aggregated serving](../llm/README.md) example. By separating the encode from the prefill and decode stages, we can have a more flexible deployment and scale the encode worker independently from the prefill and decode workers if needed. -This figure shows the flow of the deployment: +This figure shows the flow of the graph: ```mermaid flowchart LR HTTP --> processor @@ -89,7 +89,7 @@ You should see a response similar to this: {"id": "c37b946e-9e58-4d54-88c8-2dbd92c47b0c", "object": "chat.completion", "created": 1747725277, "model": "llava-hf/llava-1.5-7b-hf", "choices": [{"index": 0, "message": {"role": "assistant", "content": " In the image, there is a city bus parked on a street, with a street sign nearby on the right side. The bus appears to be stopped out of service. The setting is in a foggy city, giving it a slightly moody atmosphere."}, "finish_reason": "stop"}]} ``` -## Multimodal Disaggregated serving +## Multimodal Disaggregated Serving ### Components @@ -97,16 +97,16 @@ You should see a response similar to this: - processor: Tokenizes the prompt and passes it to the decode worker. - frontend: HTTP endpoint to handle incoming requests. -### Local Serving +### Graph -In this deployment, we have three workers, [encode_worker](components/encode_worker.py), [decode_worker](components/decode_worker.py), and [prefill_worker](components/prefill_worker.py). +In this graph, we have three workers, [encode_worker](components/encode_worker.py), [decode_worker](components/decode_worker.py), and [prefill_worker](components/prefill_worker.py). For the Llava model, embeddings are only required during the prefill stage. As such, the encode worker is connected directly to the prefill worker. The encode worker is responsible for encoding the image and passing the embeddings to the prefill worker via a combination of NATS and RDMA. Its work complete event is sent via NATS, while the embeddings tensor is transferred via RDMA through the NIXL interface. The prefill worker performs the prefilling step and forwards the KV cache to the decode worker for decoding. For more details on the roles of the prefill and decode workers, refer to the [LLM disaggregated serving](../llm/README.md) example. -This figure shows the flow of the deployment: +This figure shows the flow of the graph: ```mermaid flowchart LR HTTP --> processor