Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions tensorrt_llm/inputs/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,21 @@ def __init__(self,
self._use_fast: bool = kwargs.get('use_fast', True)
self._multimodal_hashing_supported: Optional[bool] = None

def attach_multimodal_embeddings(
self,
inputs: TextPrompt,
multimodal_embedding: Dict[str, List[torch.Tensor]],
sampling_params: SamplingParams,
) -> Tuple[List[int], Optional[ExtraProcessedInputs]]:
"""
Handle externally provided multimodal input embeddings.

While inputs["multi_modal_data"] is handled by __call__, this method is intended to process
inputs["multi_modal_embeddings"].
"""
raise NotImplementedError(
"Input processor does not support multimodal embedding input")

@property
@abstractmethod
def processor(self) -> AutoProcessor:
Expand Down
11 changes: 7 additions & 4 deletions tensorrt_llm/llmapi/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import weakref
from collections.abc import Mapping
from pathlib import Path
from typing import Any, List, Literal, Optional, Sequence, Union
from typing import Any, List, Literal, Optional, Sequence, Union, cast

import transformers
from tqdm import tqdm
Expand All @@ -17,7 +17,8 @@
from tensorrt_llm._utils import mpi_disabled
from tensorrt_llm.inputs.data import TextPrompt
from tensorrt_llm.inputs.multimodal import MultimodalInput, MultimodalParams
from tensorrt_llm.inputs.registry import DefaultInputProcessor
from tensorrt_llm.inputs.registry import (BaseMultimodalInputProcessor,
DefaultInputProcessor)
from tensorrt_llm.llmapi import tracing
from tensorrt_llm.metrics.enums import MetricNames

Expand Down Expand Up @@ -458,8 +459,10 @@ def generate_async(
inputs, sampling_params)
elif 'multi_modal_embeddings' in inputs:
mm_embedding_info = inputs['multi_modal_embeddings']
prompt_token_ids, extra_processed_inputs = self.input_processor.attach_multimodal_embeddings(
inputs, mm_embedding_info, sampling_params)
prompt_token_ids, extra_processed_inputs = cast(
self.input_processor,
BaseMultimodalInputProcessor).attach_multimodal_embeddings(
inputs, mm_embedding_info, sampling_params)
else:
with nvtx_range_debug("input_processor"):
prompt_token_ids, extra_processed_inputs = self.input_processor(
Expand Down