|
583 | 583 | "hidream_i1_fast": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.", |
584 | 584 | "hidream_i1_full": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.", |
585 | 585 | "hidream_e1_full": "Edit images with HiDream E1 - Professional natural language image editing model.", |
586 | | - "sd3.5_simple_example": "Generate images using SD 3.5.", |
587 | | - "sd3.5_large_canny_controlnet_example": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.", |
588 | | - "sd3.5_large_depth": "Generate images guided by depth information using SD 3.5.", |
589 | | - "sd3.5_large_blur": "Generate images guided by blurred reference images using SD 3.5.", |
| 586 | + "sd3_5_simple_example": "Generate images using SD 3.5.", |
| 587 | + "sd3_5_large_canny_controlnet_example": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.", |
| 588 | + "sd3_5_large_depth": "Generate images guided by depth information using SD 3.5.", |
| 589 | + "sd3_5_large_blur": "Generate images guided by blurred reference images using SD 3.5.", |
590 | 590 | "sdxl_simple_example": "Generate high-quality images using SDXL.", |
591 | 591 | "sdxl_refiner_prompt_example": "Enhance SDXL images using refiner models.", |
592 | 592 | "sdxl_revision_text_prompts": "Generate images by transferring concepts from reference images using SDXL Revision.", |
|
602 | 602 | "video_wan_vace_outpainting": "Generate extended videos by expanding video size using Wan VACE outpainting.", |
603 | 603 | "video_wan_vace_flf2v": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.", |
604 | 604 | "video_wan_vace_inpainting": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.", |
605 | | - "video_wan2.1_fun_camera_v1.1_1.3B": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.", |
606 | | - "video_wan2.1_fun_camera_v1.1_14B": "Generate high-quality videos with advanced camera control using the full 14B model", |
| 605 | + "video_wan2_1_fun_camera_v1_1_1_3B": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.", |
| 606 | + "video_wan2_1_fun_camera_v1_1_14B": "Generate high-quality videos with advanced camera control using the full 14B model", |
607 | 607 | "text_to_video_wan": "Generate videos from text prompts using Wan 2.1.", |
608 | 608 | "image_to_video_wan": "Generate videos from images using Wan 2.1.", |
609 | | - "wan2.1_fun_inp": "Generate videos from start and end frames using Wan 2.1 inpainting.", |
610 | | - "wan2.1_fun_control": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.", |
611 | | - "wan2.1_flf2v_720_f16": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.", |
| 609 | + "wan2_1_fun_inp": "Generate videos from start and end frames using Wan 2.1 inpainting.", |
| 610 | + "wan2_1_fun_control": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.", |
| 611 | + "wan2_1_flf2v_720_f16": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.", |
612 | 612 | "ltxv_text_to_video": "Generate videos from text prompts.", |
613 | 613 | "ltxv_image_to_video": "Generate videos from still images.", |
614 | 614 | "mochi_text_to_video_example": "Generate videos from text prompts using Mochi model.", |
|
630 | 630 | "api_runway_reference_to_image": "Generate new images based on reference styles and compositions with Runway's AI.", |
631 | 631 | "api_stability_ai_stable_image_ultra_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.", |
632 | 632 | "api_stability_ai_i2i": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.", |
633 | | - "api_stability_ai_sd3.5_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.", |
634 | | - "api_stability_ai_sd3.5_i2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.", |
| 633 | + "api_stability_ai_sd3_5_t2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.", |
| 634 | + "api_stability_ai_sd3_5_i2i": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.", |
635 | 635 | "api_ideogram_v3_t2i": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.", |
636 | 636 | "api_openai_image_1_t2i": "Generate images from text prompts using OpenAI GPT Image 1 API.", |
637 | 637 | "api_openai_image_1_i2i": "Generate images from input images using OpenAI GPT Image 1 API.", |
|
735 | 735 | "hidream_i1_fast": "HiDream I1 Fast", |
736 | 736 | "hidream_i1_full": "HiDream I1 Full", |
737 | 737 | "hidream_e1_full": "HiDream E1 Full", |
738 | | - "sd3.5_simple_example": "SD3.5 Simple", |
739 | | - "sd3.5_large_canny_controlnet_example": "SD3.5 Large Canny ControlNet", |
740 | | - "sd3.5_large_depth": "SD3.5 Large Depth", |
741 | | - "sd3.5_large_blur": "SD3.5 Large Blur", |
| 738 | + "sd3_5_simple_example": "SD3.5 Simple", |
| 739 | + "sd3_5_large_canny_controlnet_example": "SD3.5 Large Canny ControlNet", |
| 740 | + "sd3_5_large_depth": "SD3.5 Large Depth", |
| 741 | + "sd3_5_large_blur": "SD3.5 Large Blur", |
742 | 742 | "sdxl_simple_example": "SDXL Simple", |
743 | 743 | "sdxl_refiner_prompt_example": "SDXL Refiner Prompt", |
744 | 744 | "sdxl_revision_text_prompts": "SDXL Revision Text Prompts", |
|
754 | 754 | "video_wan_vace_outpainting": "Wan VACE Outpainting", |
755 | 755 | "video_wan_vace_flf2v": "Wan VACE First-Last Frame", |
756 | 756 | "video_wan_vace_inpainting": "Wan VACE Inpainting", |
757 | | - "video_wan2.1_fun_camera_v1.1_1.3B": "Wan 2.1 Fun Camera 1.3B", |
758 | | - "video_wan2.1_fun_camera_v1.1_14B": "Wan 2.1 Fun Camera 14B", |
| 757 | + "video_wan2_1_fun_camera_v1_1_1_3B": "Wan 2.1 Fun Camera 1.3B", |
| 758 | + "video_wan2_1_fun_camera_v1_1_14B": "Wan 2.1 Fun Camera 14B", |
759 | 759 | "text_to_video_wan": "Wan 2.1 Text to Video", |
760 | 760 | "image_to_video_wan": "Wan 2.1 Image to Video", |
761 | | - "wan2.1_fun_inp": "Wan 2.1 Inpainting", |
762 | | - "wan2.1_fun_control": "Wan 2.1 ControlNet", |
763 | | - "wan2.1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16", |
| 761 | + "wan2_1_fun_inp": "Wan 2.1 Inpainting", |
| 762 | + "wan2_1_fun_control": "Wan 2.1 ControlNet", |
| 763 | + "wan2_1_flf2v_720_f16": "Wan 2.1 FLF2V 720p F16", |
764 | 764 | "ltxv_text_to_video": "LTXV Text to Video", |
765 | 765 | "ltxv_image_to_video": "LTXV Image to Video", |
766 | 766 | "mochi_text_to_video_example": "Mochi Text to Video", |
|
782 | 782 | "api_runway_reference_to_image": "Runway: Reference to Image", |
783 | 783 | "api_stability_ai_stable_image_ultra_t2i": "Stability AI: Stable Image Ultra Text to Image", |
784 | 784 | "api_stability_ai_i2i": "Stability AI: Image to Image", |
785 | | - "api_stability_ai_sd3.5_t2i": "Stability AI: SD3.5 Text to Image", |
786 | | - "api_stability_ai_sd3.5_i2i": "Stability AI: SD3.5 Image to Image", |
| 785 | + "api_stability_ai_sd3_5_t2i": "Stability AI: SD3.5 Text to Image", |
| 786 | + "api_stability_ai_sd3_5_i2i": "Stability AI: SD3.5 Image to Image", |
787 | 787 | "api_ideogram_v3_t2i": "Ideogram V3: Text to Image", |
788 | 788 | "api_openai_image_1_t2i": "OpenAI: GPT-Image-1 Text to Image", |
789 | 789 | "api_openai_image_1_i2i": "OpenAI: GPT-Image-1 Image to Image", |
|
0 commit comments