Skip to content

Error when loading multiple lora in Qwenvl-2.5-3B Model #2535

@benjamintam0607

Description

@benjamintam0607

System Info

PEFT version: '0.15.2'

Here is the code and error message, thanks for your time!:

from transformers import LlamaTokenizer, LlamaForCausalLM
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch

model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
    "Qwen/Qwen2.5-VL-3B-Instruct",
    # load_in_4bit=True,
    torch_dtype=torch.bfloat16,
    attn_implementation="flash_attention_2",
    device_map="auto",
)

from peft import PeftModel, PeftConfig, LoraConfig, get_peft_model

lora_path_1 = "path1"
lora_config_1 = LoraConfig.from_pretrained(lora_path_1)

lora_path_2 = "path2"
lora_config_2 = LoraConfig.from_pretrained(lora_path_2)

model = get_peft_model(model, lora_config_1, adapter_name="adapter1")
model.load_adapter(model_id=lora_path_2, adapter_name="adapter2")

ValueError                                Traceback (most recent call last)
Cell In[1], [line 32](vscode-notebook-cell:?execution_count=1&line=32)
     [29](vscode-notebook-cell:?execution_count=1&line=29) lora_config_2 = LoraConfig.from_pretrained(lora_path_2)
     [31](vscode-notebook-cell:?execution_count=1&line=31) model = get_peft_model(model, lora_config_1, adapter_name="adapter1")
---> [32](vscode-notebook-cell:?execution_count=1&line=32) model.load_adapter(model_id=lora_path_2, adapter_name="adapter2")
     [35](vscode-notebook-cell:?execution_count=1&line=35) min_pixels = 256*28*28
     [36](vscode-notebook-cell:?execution_count=1&line=36) max_pixels = 768*28*28

File ~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:1270, in PeftModel.load_adapter(self, model_id, adapter_name, is_trainable, torch_device, autocast_adapter_dtype, ephemeral_gpu_offload, low_cpu_mem_usage, **kwargs)
   [1268](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:1268)     self._check_new_adapter_config(peft_config, is_trainable=is_trainable)
   [1269](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:1269)     peft_config.inference_mode = not is_trainable
-> [1270](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:1270)     self.add_adapter(adapter_name, peft_config, low_cpu_mem_usage=low_cpu_mem_usage)
   [1272](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:1272) adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs)
   [1274](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:1274) # load the weights into the model

File ~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:939, in PeftModel.add_adapter(self, adapter_name, peft_config, low_cpu_mem_usage)
    [937](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:937)     else:
    [938](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:938)         self.peft_config[adapter_name] = peft_config
--> [939](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:939)         self.base_model.inject_adapter(
    [940](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:940)             self.base_model.model, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage
    [941](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:941)         )
    [942](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:942) except Exception:  # something went wrong, roll back
    [943](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/peft_model.py:943)     if adapter_name in self.peft_config:

File ~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/tuners_utils.py:508, in BaseTuner.inject_adapter(self, model, adapter_name, autocast_adapter_dtype, low_cpu_mem_usage)
    [506](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/tuners_utils.py:506)         ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
    [507](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/tuners_utils.py:507)         with ctx():
--> [508](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/tuners_utils.py:508)             self._create_and_replace(peft_config, adapter_name, target, target_name, parent, current_key=key)
    [510](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/tuners_utils.py:510) if not self.targeted_module_names and not uses_dummy_target_modules:
    [511](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/tuners_utils.py:511)     if excluded_modules and not unmatched_modules:
    [512](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/tuners_utils.py:512)         # All targeted modules were excluded

File ~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:237, in LoraModel._create_and_replace(self, lora_config, adapter_name, target, target_name, parent, current_key)
    [235](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:235) else:
    [236](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:236)     device_map = self.model.hf_device_map if hasattr(self.model, "hf_device_map") else None
--> [237](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:237)     new_module = self._create_new_module(lora_config, adapter_name, target, device_map=device_map, **kwargs)
    [238](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:238)     if adapter_name not in self.active_adapters:
    [239](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:239)         # adding an additional adapter: it is not automatically trainable
    [240](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:240)         new_module.requires_grad_(False)

File ~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:348, in LoraModel._create_new_module(lora_config, adapter_name, target, **kwargs)
    [344](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:344)         break
    [346](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:346) if new_module is None:
    [347](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:347)     # no module could be matched
--> [348](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:348)     raise ValueError(
    [349](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:349)         f"Target module {target} is not supported. Currently, only the following modules are supported: "
    [350](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:350)         "`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv1d`, `torch.nn.Conv2d`, `torch.nn.Conv3d`, "
    [351](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:351)         "`transformers.pytorch_utils.Conv1D`, `torch.nn.MultiheadAttention.`."
    [352](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:352)     )
    [354](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/tam/project/001_qwen2vl_utils/~/anaconda3/envs/qwenvllm/lib/python3.10/site-packages/peft/tuners/lora/model.py:354) return new_module

ValueError: Target module ModuleDict(
  (adapter1): Identity()
  (adapter2): Identity()
) is not supported. Currently, only the following modules are supported: `torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv1d`, `torch.nn.Conv2d`, `torch.nn.Conv3d`, `transformers.pytorch_utils.Conv1D`, `torch.nn.MultiheadAttention.`..py:354) return new_module

Who can help?

No response

Information

  • The official example scripts
  • My own modified scripts

Tasks

  • An officially supported task in the examples folder
  • My own task or dataset (give details below)

Reproduction

from transformers import LlamaTokenizer, LlamaForCausalLM

from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch



model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
    "Qwen/Qwen2.5-VL-3B-Instruct",
    # load_in_4bit=True,
    torch_dtype=torch.bfloat16,
    attn_implementation="flash_attention_2",
    device_map="auto",
)

from peft import PeftModel, PeftConfig, LoraConfig, get_peft_model

lora_path_1 = "/home/tam/project/006_llama_factory/latest/LLaMA-Factory/saves/Qwen2.5-VL-3B-Instruct/lora/20250506-aio-v5"
lora_config_1 = LoraConfig.from_pretrained(lora_path_1)

lora_path_2 = "/home/tam/project/006_llama_factory/latest/LLaMA-Factory/saves/Qwen2.5-VL-3B-Instruct/lora/20250506-aio-v1"
lora_config_2 = LoraConfig.from_pretrained(lora_path_2)

model = get_peft_model(model, lora_config_1, adapter_name="adapter1")
model.load_adapter(model_id=lora_path_2, adapter_name="adapter2")

Expected behavior

Load multiple finetuned lora at the same base model.

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions