Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions invokeai/app/invocations/cogview4_model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import (
GlmEncoderField,
ModelIdentifierField,
Expand All @@ -14,6 +14,7 @@
)
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType


@invocation_output("cogview4_model_loader_output")
Expand All @@ -38,8 +39,9 @@ class CogView4ModelLoaderInvocation(BaseInvocation):

model: ModelIdentifierField = InputField(
description=FieldDescriptions.cogview4_model,
ui_type=UIType.CogView4MainModel,
input=Input.Direct,
ui_model_base=BaseModelType.CogView4,
ui_model_type=ModelType.Main,
)

def invoke(self, context: InvocationContext) -> CogView4ModelLoaderOutput:
Expand Down
10 changes: 7 additions & 3 deletions invokeai/app/invocations/controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
ImageField,
InputField,
OutputField,
UIType,
)
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageOutput
Expand All @@ -28,6 +27,7 @@
heuristic_resize_fast,
)
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType


class ControlField(BaseModel):
Expand Down Expand Up @@ -63,13 +63,17 @@ class ControlOutput(BaseInvocationOutput):
control: ControlField = OutputField(description=FieldDescriptions.control)


@invocation("controlnet", title="ControlNet - SD1.5, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3")
@invocation(
"controlnet", title="ControlNet - SD1.5, SD2, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3"
)
class ControlNetInvocation(BaseInvocation):
"""Collects ControlNet info to pass to other nodes"""

image: ImageField = InputField(description="The control image")
control_model: ModelIdentifierField = InputField(
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
description=FieldDescriptions.controlnet_model,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2, BaseModelType.StableDiffusionXL],
ui_model_type=ModelType.ControlNet,
)
control_weight: Union[float, List[float]] = InputField(
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
Expand Down
273 changes: 208 additions & 65 deletions invokeai/app/invocations/fields.py

Large diffs are not rendered by default.

8 changes: 6 additions & 2 deletions invokeai/app/invocations/flux_control_lora_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
from invokeai.app.invocations.model import ControlLoRAField, ModelIdentifierField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType


@invocation_output("flux_control_lora_loader_output")
Expand All @@ -29,7 +30,10 @@ class FluxControlLoRALoaderInvocation(BaseInvocation):
"""LoRA model and Image to use with FLUX transformer generation."""

lora: ModelIdentifierField = InputField(
description=FieldDescriptions.control_lora_model, title="Control LoRA", ui_type=UIType.ControlLoRAModel
description=FieldDescriptions.control_lora_model,
title="Control LoRA",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.ControlLoRa,
)
image: ImageField = InputField(description="The image to encode.")
weight: float = InputField(description="The weight of the LoRA.", default=1.0)
Expand Down
7 changes: 5 additions & 2 deletions invokeai/app/invocations/flux_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType


class FluxControlNetField(BaseModel):
Expand Down Expand Up @@ -57,7 +58,9 @@ class FluxControlNetInvocation(BaseInvocation):

image: ImageField = InputField(description="The control image")
control_model: ModelIdentifierField = InputField(
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
description=FieldDescriptions.controlnet_model,
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.ControlNet,
)
control_weight: float | list[float] = InputField(
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
Expand Down
8 changes: 6 additions & 2 deletions invokeai/app/invocations/flux_ip_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from typing_extensions import Self

from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import InputField, UIType
from invokeai.app.invocations.fields import InputField
from invokeai.app.invocations.ip_adapter import (
CLIP_VISION_MODEL_MAP,
IPAdapterField,
Expand All @@ -20,6 +20,7 @@
IPAdapterCheckpointConfig,
IPAdapterInvokeAIConfig,
)
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType


@invocation(
Expand All @@ -36,7 +37,10 @@ class FluxIPAdapterInvocation(BaseInvocation):

image: ImageField = InputField(description="The IP-Adapter image prompt(s).")
ip_adapter_model: ModelIdentifierField = InputField(
description="The IP-Adapter model.", title="IP-Adapter Model", ui_type=UIType.IPAdapterModel
description="The IP-Adapter model.",
title="IP-Adapter Model",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.IPAdapter,
)
# Currently, the only known ViT model used by FLUX IP-Adapters is ViT-L.
clip_vision_model: Literal["ViT-L"] = InputField(description="CLIP Vision model to use.", default="ViT-L")
Expand Down
9 changes: 6 additions & 3 deletions invokeai/app/invocations/flux_lora_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import BaseModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType


@invocation_output("flux_lora_loader_output")
Expand All @@ -36,7 +36,10 @@ class FluxLoRALoaderInvocation(BaseInvocation):
"""Apply a LoRA model to a FLUX transformer and/or text encoder."""

lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
description=FieldDescriptions.lora_model,
title="LoRA",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.LoRA,
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
transformer: TransformerField | None = InputField(
Expand Down
19 changes: 13 additions & 6 deletions invokeai/app/invocations/flux_model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.t5_model_identifier import (
Expand All @@ -17,7 +17,7 @@
from invokeai.backend.model_manager.config import (
CheckpointConfigBase,
)
from invokeai.backend.model_manager.taxonomy import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType


@invocation_output("flux_model_loader_output")
Expand Down Expand Up @@ -46,23 +46,30 @@ class FluxModelLoaderInvocation(BaseInvocation):

model: ModelIdentifierField = InputField(
description=FieldDescriptions.flux_model,
ui_type=UIType.FluxMainModel,
input=Input.Direct,
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.Main,
)

t5_encoder_model: ModelIdentifierField = InputField(
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
description=FieldDescriptions.t5_encoder,
input=Input.Direct,
title="T5 Encoder",
ui_model_type=ModelType.T5Encoder,
)

clip_embed_model: ModelIdentifierField = InputField(
description=FieldDescriptions.clip_embed_model,
ui_type=UIType.CLIPEmbedModel,
input=Input.Direct,
title="CLIP Embed",
ui_model_type=ModelType.CLIPEmbed,
)

vae_model: ModelIdentifierField = InputField(
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
description=FieldDescriptions.vae_model,
title="VAE",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.VAE,
)

def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
Expand Down
4 changes: 2 additions & 2 deletions invokeai/app/invocations/flux_redux.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
InputField,
OutputField,
TensorField,
UIType,
)
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageField
Expand Down Expand Up @@ -64,7 +63,8 @@ class FluxReduxInvocation(BaseInvocation):
redux_model: ModelIdentifierField = InputField(
description="The FLUX Redux model to use.",
title="FLUX Redux Model",
ui_type=UIType.FluxReduxModel,
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.FluxRedux,
)
downsampling_factor: int = InputField(
ge=1,
Expand Down
5 changes: 3 additions & 2 deletions invokeai/app/invocations/ip_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from typing_extensions import Self

from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
Expand Down Expand Up @@ -85,7 +85,8 @@ class IPAdapterInvocation(BaseInvocation):
description="The IP-Adapter model.",
title="IP-Adapter Model",
ui_order=-1,
ui_type=UIType.IPAdapterModel,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusionXL],
ui_model_type=ModelType.IPAdapter,
)
clip_vision_model: Literal["ViT-H", "ViT-G", "ViT-L"] = InputField(
description="CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models.",
Expand Down
5 changes: 3 additions & 2 deletions invokeai/app/invocations/llava_onevision_vllm.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration, LlavaOnevisionProcessor

from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import StringOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.llava_onevision_pipeline import LlavaOnevisionPipeline
from invokeai.backend.model_manager.taxonomy import ModelType
from invokeai.backend.util.devices import TorchDevice


Expand All @@ -34,7 +35,7 @@ class LlavaOnevisionVllmInvocation(BaseInvocation):
vllm_model: ModelIdentifierField = InputField(
title="LLaVA Model Type",
description=FieldDescriptions.vllm_model,
ui_type=UIType.LlavaOnevisionModel,
ui_model_type=ModelType.LlavaOnevision,
)

@field_validator("images", mode="before")
Expand Down
10 changes: 4 additions & 6 deletions invokeai/app/invocations/metadata_linked.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@
from invokeai.app.invocations.scheduler import SchedulerOutput
from invokeai.app.invocations.t2i_adapter import T2IAdapterField, T2IAdapterInvocation
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import ModelType, SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
from invokeai.version import __version__

Expand Down Expand Up @@ -473,7 +473,6 @@ class MetadataToModelOutput(BaseInvocationOutput):
model: ModelIdentifierField = OutputField(
description=FieldDescriptions.main_model,
title="Model",
ui_type=UIType.MainModel,
)
name: str = OutputField(description="Model Name", title="Name")
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
Expand All @@ -488,7 +487,6 @@ class MetadataToSDXLModelOutput(BaseInvocationOutput):
model: ModelIdentifierField = OutputField(
description=FieldDescriptions.main_model,
title="Model",
ui_type=UIType.SDXLMainModel,
)
name: str = OutputField(description="Model Name", title="Name")
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
Expand Down Expand Up @@ -519,8 +517,7 @@ class MetadataToModelInvocation(BaseInvocation, WithMetadata):
input=Input.Direct,
)
default_value: ModelIdentifierField = InputField(
description="The default model to use if not found in the metadata",
ui_type=UIType.MainModel,
description="The default model to use if not found in the metadata", ui_model_type=ModelType.Main
)

_validate_custom_label = model_validator(mode="after")(validate_custom_label)
Expand Down Expand Up @@ -575,7 +572,8 @@ class MetadataToSDXLModelInvocation(BaseInvocation, WithMetadata):
)
default_value: ModelIdentifierField = InputField(
description="The default SDXL Model to use if not found in the metadata",
ui_type=UIType.SDXLMainModel,
ui_model_type=ModelType.Main,
ui_model_base=BaseModelType.StableDiffusionXL,
)

_validate_custom_label = model_validator(mode="after")(validate_custom_label)
Expand Down
Loading