diff --git a/README.md b/README.md index 7ae1c93..5911b4e 100644 --- a/README.md +++ b/README.md @@ -14,12 +14,14 @@ Demo: ## Features - Generate images using stable-diffusion-webui. - Well documented [settings](https://github.com/Trojaner/text-generation-webui-stable_diffusion/blob/main/settings.debug.yaml) file for easy configuration. -- Supports face swapping for generating consistent character images without needing loras. See [Ethical Guidelines](#ethical-guidelines) for more information. -- Multi-threading support - can handle concurrent chat sessions and requests. +- Supports face swapping for generating consistent character images. See [Ethical Guidelines](#ethical-guidelines) for more information. +- Supports generation rules for defining when and how to generate images. Can be used for character specific parameters, triggerwords for LoRA or for generating images based on the sentiment of the generated text, etc. ## Supported Stable Diffusion WebUI Extensions - [FaceSwapLab](https://github.com/glucauze/sd-webui-faceswaplab) - [ReActor](https://github.com/Gourieff/sd-webui-reactor) +- [FaceID](https://github.com/vladmandic/automatic) (SD.Next feature) +- [IP Adapter](https://github.com/vladmandic/automatic) (SD.Next feature) ## Installation - Open a shell with cmd_linux.sh/cmd_macos.sh/cmd_windows.bat inside text-generation-webui folder. @@ -53,7 +55,7 @@ text-generation-webui, Visual Studio Code and Python 3.10 are required for devel - Last but not least, ensure that you do not accidentally commit changes you might have made to the `settings.debug.yaml` or `launch.json` files unless intentional. ## Ethical Guidelines -This extension integrates with various face swap extensions for stable-diffusion-webui and hence allows to swap faces in the generated images. This extension is not intended to for the creation of non-consensual deepfake content. Please use this extension responsibly and do not use it to create such content. The main purpose of the face swapping functionality is to allow the creation of consistent images of text-generation-webui characters. If you are unsure whether your use case is ethical, please refrain from using this extension. +This extension integrates with various face swap extensions for stable-diffusion-webui and hence allows to swap faces in the generated images. This extension is not intended for the creation of non-consensual deepfake content. Please use this extension responsibly and do not use it to create such content. The main purpose of the face swapping functionality is to allow the creation of consistent images for text-generation-webui characters. If you are unsure whether your use case is ethical, please refrain from using this extension. The maintainers and contributors of this extension cannot be held liable for any misuse of this extension but will try to prevent such misuse by all means. diff --git a/context.py b/context.py index 7ceff57..5249345 100644 --- a/context.py +++ b/context.py @@ -1,6 +1,4 @@ -import threading from dataclasses import dataclass -from typing import cast from .params import StableDiffusionWebUiExtensionParams from .sd_client import SdWebUIApi @@ -17,7 +15,7 @@ class GenerationContext(object): # Create a thread-local state for multi-threading support in case # multiple sessions run concurrently at the same time. -_local_state = threading.local() +_current_context: GenerationContext | None = None def get_current_context() -> GenerationContext | None: @@ -25,9 +23,7 @@ def get_current_context() -> GenerationContext | None: Gets the current generation context (thread-safe). """ - global _local_state - _local_state.current_context = getattr(_local_state, "current_context", None) - return cast(GenerationContext | None, _local_state.current_context) + return _current_context def set_current_context(context: GenerationContext | None) -> None: @@ -35,5 +31,5 @@ def set_current_context(context: GenerationContext | None) -> None: Sets the current generation context (thread-safe). """ - global _local_state - _local_state.current_context = context + global _current_context + _current_context = context diff --git a/ext_modules/image_generator.py b/ext_modules/image_generator.py index ce34024..3474b5c 100644 --- a/ext_modules/image_generator.py +++ b/ext_modules/image_generator.py @@ -29,7 +29,7 @@ def normalize_regex(regex: str) -> str: return regex -def normalize_prompt(prompt: str, do_additional_normalization: bool = False) -> str: +def normalize_prompt(prompt: str) -> str: if prompt is None: return "" @@ -192,7 +192,9 @@ def generate_html_images_for_context( except Exception as e: logger.error( - f"[SD WebUI Integration] Failed to apply rule: {rule['regex']}: {e}" + f"[SD WebUI Integration] Failed to apply rule: {rule['regex']}: %s", + e, + exc_info=True, ) context_prompt = "" @@ -223,10 +225,8 @@ def generate_html_images_for_context( .lower() ) - generated_prompt = _combine_prompts( - normalize_prompt(rules_prompt), normalize_prompt(context_prompt) - ) - generated_negative_prompt = normalize_prompt(rules_negative_prompt) + generated_prompt = _combine_prompts(rules_prompt, normalize_prompt(context_prompt)) + generated_negative_prompt = rules_negative_prompt full_prompt = _combine_prompts(generated_prompt, context.params.base_prompt) @@ -238,12 +238,14 @@ def generate_html_images_for_context( ( "[SD WebUI Integration] Using stable-diffusion-webui to generate images." + ( - f"\n" - f" Prompt: {full_prompt}\n" - f" Negative Prompt: {full_negative_prompt}" + ( + f"\n" + f" Prompt: {full_prompt}\n" + f" Negative Prompt: {full_negative_prompt}" + ) + if context.params.debug_mode_enabled + else "" ) - if context.params.debug_mode_enabled - else "" ) try: @@ -252,16 +254,29 @@ def generate_html_images_for_context( negative_prompt=full_negative_prompt, seed=context.params.seed, sampler_name=context.params.sampler_name, - enable_hr=context.params.upscaling_enabled, + full_quality=True, + enable_hr=context.params.upscaling_enabled + or context.params.hires_fix_enabled, hr_scale=context.params.upscaling_scale, hr_upscaler=context.params.upscaling_upscaler, - denoising_strength=context.params.denoising_strength, + denoising_strength=context.params.hires_fix_denoising_strength, + hr_sampler=context.params.hires_fix_sampler, + hr_force=context.params.hires_fix_enabled, + hr_second_pass_steps=context.params.hires_fix_sampling_steps + if context.params.hires_fix_enabled + else 0, steps=context.params.sampling_steps, cfg_scale=context.params.cfg_scale, width=context.params.width, height=context.params.height, - restore_faces=context.params.enhance_faces_enabled, - override_settings_restore_afterwards=True, + restore_faces=context.params.restore_faces_enabled, + faceid_enabled=context.params.faceid_enabled, + faceid_scale=context.params.faceid_scale, + faceid_image=context.params.faceid_source_face, + ipadapter_enabled=context.params.ipadapter_enabled, + ipadapter_adapter=context.params.ipadapter_adapter, + ipadapter_scale=context.params.ipadapter_scale, + ipadapter_image=context.params.ipadapter_reference_image, use_async=False, ) @@ -310,7 +325,9 @@ def generate_html_images_for_context( image = response.image except Exception as e: logger.error( - f"[SD WebUI Integration] FaceSwapLab failed to swap faces: {e}" + "[SD WebUI Integration] FaceSwapLab failed to swap faces: %s", + e, + exc_info=True, ) if reactor_force_enabled or ( @@ -338,7 +355,9 @@ def generate_html_images_for_context( image = response.image except Exception as e: logger.error( - f"[SD WebUI Integration] ReActor failed to swap faces: {e}" + "[SD WebUI Integration] ReActor failed to swap faces: %s", + e, + exc_info=True, ) if context.params.save_images: @@ -386,10 +405,13 @@ def generate_html_images_for_context( def _combine_prompts(prompt1: str, prompt2: str) -> str: - if not prompt1 or prompt1 == "": + if prompt1 is None and prompt2 is None: + return "" + + if prompt1 is None or prompt1 == "": return prompt2.strip(",").strip() - if not prompt2 or prompt2 == "": + if prompt2 is None or prompt2 == "": return prompt1.strip(",").strip() return prompt1.strip(",").strip() + ", " + prompt2.strip(",").strip() diff --git a/params.py b/params.py index 18d475b..32cdd08 100644 --- a/params.py +++ b/params.py @@ -1,8 +1,9 @@ import base64 -from dataclasses import MISSING, dataclass, field, fields +from dataclasses import dataclass, field, fields from enum import Enum import requests from typing_extensions import Self +from modules.logging_colors import logger default_description_prompt = """ You are now a text generator for the Stable Diffusion AI image generator. You will generate a text prompt for it. @@ -31,6 +32,26 @@ def __str__(self) -> str: return self +class IPAdapterAdapter(str, Enum): + BASE = "Base" + LIGHT = "Light" + PLUS = "Plus" + PLUS_FACE = "Plus Face" + FULL_FACE = "Full face" + BASE_SDXL = "Base SDXL" + + @classmethod + def index_of(cls, mode: Self) -> int: + return list(IPAdapterAdapter).index(mode) + + @classmethod + def from_index(cls, index: int) -> Self: + return list(IPAdapterAdapter)[index] # type: ignore + + def __str__(self) -> str: + return self + + class ContinuousModePromptGenerationMode(str, Enum): STATIC = "static" GENERATED_TEXT = "generated_text" @@ -87,14 +108,29 @@ class StableDiffusionClientParams: @dataclass class StableDiffusionGenerationParams: - base_prompt: str = field(default="high resolution, detailed, realistic, vivid") - base_negative_prompt: str = field(default="ugly, disformed, disfigured, immature") - sampler_name: str = field(default="UniPC") - denoising_strength: float = field(default=0.7) + base_prompt: str = field( + default=( + "RAW photo, subject, 8k uhd, dslr, soft lighting, high quality, " + "film grain, Fujifilm XT3" + ) + ) + base_negative_prompt: str = field( + default=( + "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, " + "sketch, cartoon, drawing, anime), text, cropped, out of frame, " + "worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, " + "mutilated, extra fingers, mutated hands, poorly drawn hands, " + "poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, " + "bad proportions, extra limbs, cloned face, disfigured, gross proportions, " + "malformed limbs, missing arms, missing legs, extra arms, extra legs, " + "fused fingers, too many fingers, long neck" + ) + ) + sampler_name: str = field(default="DPM SDE") sampling_steps: int = field(default=25) width: int = field(default=512) height: int = field(default=512) - cfg_scale: float = field(default=7) + cfg_scale: float = field(default=6) clip_skip: int = field(default=1) seed: int = field(default=-1) @@ -104,7 +140,11 @@ class StableDiffusionPostProcessingParams: upscaling_enabled: bool = field(default=False) upscaling_upscaler: str = field(default="RealESRGAN 4x+") upscaling_scale: float = field(default=2) - enhance_faces_enabled: bool = field(default=False) + hires_fix_enabled: bool = field(default=False) + hires_fix_denoising_strength: float = field(default=0.2) + hires_fix_sampler: str = field(default="UniPC") + hires_fix_sampling_steps: int = field(default=10) + restore_faces_enabled: bool = field(default=False) @dataclass @@ -161,6 +201,25 @@ class UserPreferencesParams: ) # list[RegexGenerationRule] | None = field(default=None) +@dataclass +class FaceIDParams: + faceid_enabled: bool = field(default=False) + faceid_source_face: str = field( + default=("file:///extensions/stable_diffusion/assets/example_face.jpg") + ) + faceid_scale: float = field(default=0.5) + + +@dataclass +class IPAdapterParams: + ipadapter_enabled: bool = field(default=False) + ipadapter_adapter: IPAdapterAdapter = field(default=IPAdapterAdapter.BASE) + ipadapter_reference_image: str = field( + default=("file:///extensions/stable_diffusion/assets/example_face.jpg") + ) + ipadapter_scale: float = field(default=0.5) + + @dataclass class FaceSwapLabParams: faceswaplab_enabled: bool = field(default=False) @@ -179,14 +238,14 @@ class FaceSwapLabParams: faceswaplab_sort_by_size: bool = field(default=True) faceswaplab_source_face_index: int = field(default=0) faceswaplab_target_face_index: int = field(default=0) - faceswaplab_enhance_face_enabled: bool = field(default=False) - faceswaplab_enhance_face_model: str = field(default="CodeFormer") - faceswaplab_enhance_face_visibility: float = field(default=1) - faceswaplab_enhance_face_codeformer_weight: float = field(default=1) - faceswaplab_postprocessing_enhance_face_enabled: bool = field(default=False) - faceswaplab_postprocessing_enhance_face_model: str = field(default="CodeFormer") - faceswaplab_postprocessing_enhance_face_visibility: float = field(default=1) - faceswaplab_postprocessing_enhance_face_codeformer_weight: float = field(default=1) + faceswaplab_restore_face_enabled: bool = field(default=False) + faceswaplab_restore_face_model: str = field(default="CodeFormer") + faceswaplab_restore_face_visibility: float = field(default=1) + faceswaplab_restore_face_codeformer_weight: float = field(default=1) + faceswaplab_postprocessing_restore_face_enabled: bool = field(default=False) + faceswaplab_postprocessing_restore_face_model: str = field(default="CodeFormer") + faceswaplab_postprocessing_restore_face_visibility: float = field(default=1) + faceswaplab_postprocessing_restore_face_codeformer_weight: float = field(default=1) faceswaplab_color_corrections_enabled: bool = field(default=False) faceswaplab_mask_erosion_factor: float = field(default=1) faceswaplab_mask_improved_mask_enabled: bool = field(default=False) @@ -204,11 +263,11 @@ class ReactorParams: reactor_target_gender: ReactorFace = field(default=ReactorFace.NONE) reactor_source_face_index: int = field(default=0) reactor_target_face_index: int = field(default=0) - reactor_enhance_face_enabled: bool = field(default=False) - reactor_enhance_face_model: str = field(default="CodeFormer") - reactor_enhance_face_visibility: float = field(default=1) - reactor_enhance_face_codeformer_weight: float = field(default=1) - reactor_enhance_face_upscale_first: bool = field(default=False) + reactor_restore_face_enabled: bool = field(default=False) + reactor_restore_face_model: str = field(default="CodeFormer") + reactor_restore_face_visibility: float = field(default=1) + reactor_restore_face_codeformer_weight: float = field(default=1) + reactor_restore_face_upscale_first: bool = field(default=False) reactor_upscaling_enabled: bool = field(default=False) reactor_upscaling_upscaler: str = field(default="RealESRGAN 4x+") reactor_upscaling_scale: float = field(default=2) @@ -226,27 +285,23 @@ class StableDiffusionWebUiExtensionParams( UserPreferencesParams, FaceSwapLabParams, ReactorParams, + FaceIDParams, + IPAdapterParams, ): display_name: str = field(default="Stable Diffusion") is_tab: bool = field(default=True) debug_mode_enabled: bool = field(default=False) - def update(self, params: Self) -> None: + def update(self, params: dict) -> None: """ Updates the parameters. """ - for f in fields(self): - val = getattr(params, f.name) - - if val == f.default or val == MISSING: - continue - - if f.default_factory != MISSING: - if val == f.default_factory(): - continue + for f in params.keys(): + assert f in [x.name for x in fields(self)], f"Invalid field for params: {f}" - setattr(self, f.name, val) + val = params[f] + setattr(self, f, val) def normalize(self) -> None: """ @@ -269,26 +324,75 @@ def normalize(self) -> None: ReactorFace[self.reactor_target_gender.upper()] or ReactorFace.NONE ) + # Todo: images are redownloaded and files are reread every time a text is generated. # noqa E501 + # This happens because normalize() is called on every generation and the downloaded values are not cached. # noqa E501 + if self.faceswaplab_enabled and ( self.faceswaplab_source_face.startswith("http://") or self.faceswaplab_source_face.startswith("https://") ): - # todo: image may not be png format but for now it does not really matter - self.faceswaplab_source_face = ( - "data:image/png;base64," - + base64.b64encode( + try: + self.faceswaplab_source_face = base64.b64encode( requests.get(self.faceswaplab_source_face).content ).decode() - ) + except Exception as e: + logger.exception( + "Failed to load FaceSwapLab source face image: %s", e, exc_info=True + ) + self.faceswaplab_enabled = False if self.reactor_enabled and ( self.reactor_source_face.startswith("http://") or self.reactor_source_face.startswith("https://") ): - # todo: same here issue as with faceswaplab above - self.reactor_source_face = ( - "data:image/png;base64," - + base64.b64encode( + try: + self.reactor_source_face = base64.b64encode( requests.get(self.reactor_source_face).content ).decode() - ) + except Exception as e: + logger.exception( + "Failed to load ReActor source face image: %s", e, exc_info=True + ) + self.reactor_enabled = False + + if self.faceid_enabled: + try: + if self.faceid_source_face.startswith( + "http://" + ) or self.faceid_source_face.startswith("https://"): + self.faceid_source_face = base64.b64encode( + requests.get(self.faceid_source_face).content + ).decode() + + if self.faceid_source_face.startswith("file:///"): + with open( + self.faceid_source_face.replace("file:///", ""), "rb" + ) as f: + self.faceid_source_face = base64.b64encode(f.read()).decode() + except Exception as e: + logger.exception( + "Failed to load FaceID source face image: %s", e, exc_info=True + ) + self.faceid_enabled = False + + if self.ipadapter_enabled: + try: + if self.ipadapter_reference_image.startswith( + "http://" + ) or self.ipadapter_reference_image.startswith("https://"): + self.ipadapter_reference_image = base64.b64encode( + requests.get(self.ipadapter_reference_image).content + ).decode() + + if self.ipadapter_reference_image.startswith("file:///"): + with open( + self.ipadapter_reference_image.replace("file:///", ""), "rb" + ) as f: + self.ipadapter_reference_image = base64.b64encode( + f.read() + ).decode() + except Exception as e: + logger.exception( + "Failed to load IP Adapter reference image: %s", e, exc_info=True + ) + self.ipadapter_enabled = False diff --git a/pyproject.toml b/pyproject.toml index b01ca7a..cba2699 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "stable_diffusion" -version = "1.2" +version = "1.4" authors = [{ name = "Enes Sadık Özbek", email = "es.ozbek@outlook.com" }] description = "Stable Diffusion integration for text-generation-webui" readme = { file = "README.md", content-type = "text/markdown" } diff --git a/script.py b/script.py index 2ad7b73..a362c11 100644 --- a/script.py +++ b/script.py @@ -141,6 +141,17 @@ def history_modifier(history: List[str]) -> List[str]: return history +def cleanup_context() -> None: + context = get_current_context() + + if context is not None: + context.is_completed = True + + set_current_context(None) + shared.processing_message = default_processing_message + pass + + def output_modifier(string: str, state: dict, is_chat: bool = False) -> str: """ Modifies the LLM output before it gets presented. @@ -149,12 +160,12 @@ def output_modifier(string: str, state: dict, is_chat: bool = False) -> str: and the original version goes into history['internal']. """ + global params + if not is_chat: - set_current_context(None) + cleanup_context() return string - global params - context = get_current_context() if context is None or context.is_completed: @@ -185,12 +196,16 @@ def output_modifier(string: str, state: dict, is_chat: bool = False) -> str: set_current_context(context) if context is None or context.is_completed: - set_current_context(None) + cleanup_context() return string context.state = state context.output_text = string + if " str: except Exception as e: string += "\n\n*Image generation has failed. Check logs for errors.*" - logger.error(e) - - context.is_completed = True - set_current_context(None) - shared.processing_message = default_processing_message + logger.error(e, exc_info=True) + cleanup_context() return string diff --git a/sd_client.py b/sd_client.py index e640a1a..2d5a078 100644 --- a/sd_client.py +++ b/sd_client.py @@ -4,7 +4,7 @@ from io import BytesIO from typing import Any, List from PIL import Image -from webuiapi import WebUIApi +from webuiapi import HiResUpscaler, WebUIApi, WebUIApiResult from .params import FaceSwapLabParams, ReactorParams @@ -48,6 +48,130 @@ def reload_checkpoint(self, use_async: bool = False) -> Task[None] | None: f"{self.baseurl}/reload-checkpoint", "", use_async ) + def txt2img( + self, + enable_hr: bool = False, + denoising_strength: float = 0.7, + firstphase_width: int = 0, + firstphase_height: int = 0, + hr_scale: float = 2, + hr_upscaler: str = HiResUpscaler.Latent, + hr_second_pass_steps: int = 0, + hr_resize_x: float = 0, + hr_resize_y: float = 0, + hr_sampler: str = "UniPC", + hr_force: bool = False, + prompt: str = "", + styles: List[str] = [], + seed: int = -1, + subseed: int = -1, + subseed_strength: float = 0.0, + seed_resize_from_h: float = 0, + seed_resize_from_w: float = 0, + sampler_name: str | None = None, + batch_size: int = 1, + n_iter: int = 1, + steps: int | None = None, + cfg_scale: float = 6.0, + width: int = 512, + height: int = 512, + restore_faces: bool = False, + tiling: bool = False, + do_not_save_samples: bool = False, + do_not_save_grid: bool = False, + negative_prompt: str = "", + eta: float = 1.0, + s_churn: int = 0, + s_tmax: int = 0, + s_tmin: int = 0, + s_noise: int = 1, + script_args: dict | None = None, + script_name: str | None = None, + send_images: bool = True, + save_images: bool = False, + full_quality: bool = True, + faceid_enabled: bool = False, + faceid_scale: float = 0.7, + faceid_image: str | None = None, + ipadapter_enabled: bool = False, + ipadapter_adapter: str = "Base", + ipadapter_scale: float = 0.7, + ipadapter_image: str | None = None, + alwayson_scripts: dict = {}, + use_async: bool = False, + ) -> Task[WebUIApiResult] | WebUIApiResult: + if sampler_name is None: + sampler_name = self.default_sampler + if steps is None: + steps = self.default_steps + if script_args is None: + script_args = {} + payload = { + "enable_hr": enable_hr or hr_force, + "hr_scale": hr_scale, + "hr_upscaler": hr_upscaler, + "hr_second_pass_steps": hr_second_pass_steps, + "hr_resize_x": hr_resize_x, + "hr_resize_y": hr_resize_y, + "hr_force": hr_force, # SD.Next (no equivalent in AUTOMATIC1111) + "hr_sampler_name": hr_sampler, # AUTOMATIC1111 + "latent_sampler": hr_sampler, # SD.Next + "denoising_strength": denoising_strength, + "firstphase_width": firstphase_width, + "firstphase_height": firstphase_height, + "prompt": prompt, + "styles": styles, + "seed": seed, + "full_quality": full_quality, # SD.Next + "subseed": subseed, + "subseed_strength": subseed_strength, + "seed_resize_from_h": seed_resize_from_h, + "seed_resize_from_w": seed_resize_from_w, + "batch_size": batch_size, + "n_iter": n_iter, + "steps": steps, + "cfg_scale": cfg_scale, + "width": width, + "height": height, + "restore_faces": restore_faces, + "tiling": tiling, + "do_not_save_samples": do_not_save_samples, + "do_not_save_grid": do_not_save_grid, + "negative_prompt": negative_prompt, + "eta": eta, + "s_churn": s_churn, + "s_tmax": s_tmax, + "s_tmin": s_tmin, + "s_noise": s_noise, + "sampler_name": sampler_name, + "send_images": send_images, + "save_images": save_images, + } + + if faceid_enabled: + payload["face_id"] = { + "scale": faceid_scale, + "image": faceid_image, + } + + if alwayson_scripts: + payload["alwayson_scripts"] = alwayson_scripts + + if script_name: + payload["script_name"] = script_name + payload["script_args"] = script_args + + if ipadapter_enabled: + payload["ip_adapter"] = { + "adapter": ipadapter_adapter, + "scale": ipadapter_scale, + "image": ipadapter_image, + } + + return self.post_and_get_api_result( + f"{self.baseurl}/txt2img", payload, use_async + ) + def reactor_swap_face( self, target_image: Image.Image, @@ -91,12 +215,12 @@ def reactor_swap_face( else "None", "scale": params.reactor_upscaling_scale, "upscale_visibility": params.reactor_upscaling_visibility, - "face_restorer": params.reactor_enhance_face_model - if params.reactor_enhance_face_enabled + "face_restorer": params.reactor_restore_face_model + if params.reactor_restore_face_enabled else "None", - "restorer_visibility": params.reactor_enhance_face_visibility, - "codeformer_weight": params.reactor_enhance_face_codeformer_weight, - "restore_first": 0 if params.reactor_enhance_face_upscale_first else 1, + "restorer_visibility": params.reactor_restore_face_visibility, + "codeformer_weight": params.reactor_restore_face_codeformer_weight, + "restore_first": 0 if params.reactor_restore_face_upscale_first else 1, "model": params.reactor_model, "gender_source": params.reactor_source_gender, "gender_target": params.reactor_target_gender, @@ -174,11 +298,11 @@ def faceswaplab_swap_face( "inpainting_seed": 0, }, "swapping_options": { - "face_restorer_name": params.faceswaplab_enhance_face_model - if params.faceswaplab_enhance_face_enabled + "face_restorer_name": params.faceswaplab_restore_face_model + if params.faceswaplab_restore_face_enabled else "None", - "restorer_visibility": params.faceswaplab_enhance_face_visibility, # noqa: E501 - "codeformer_weight": params.faceswaplab_enhance_face_codeformer_weight, # noqa: E501 + "restorer_visibility": params.faceswaplab_restore_face_visibility, # noqa: E501 + "codeformer_weight": params.faceswaplab_restore_face_codeformer_weight, # noqa: E501 "upscaler_name": params.faceswaplab_upscaling_upscaler if params.faceswaplab_upscaling_enabled else "None", @@ -199,11 +323,11 @@ def faceswaplab_swap_face( } ], "postprocessing": { - "face_restorer_name": params.faceswaplab_postprocessing_enhance_face_model # noqa: E501 - if params.faceswaplab_postprocessing_enhance_face_enabled + "face_restorer_name": params.faceswaplab_postprocessing_restore_face_model # noqa: E501 + if params.faceswaplab_postprocessing_restore_face_enabled else "None", - "restorer_visibility": params.faceswaplab_postprocessing_enhance_face_visibility, # noqa: E501 - "codeformer_weight": params.faceswaplab_postprocessing_enhance_face_codeformer_weight, # noqa: E501 + "restorer_visibility": params.faceswaplab_postprocessing_restore_face_visibility, # noqa: E501 + "codeformer_weight": params.faceswaplab_postprocessing_restore_face_codeformer_weight, # noqa: E501 "upscaler_name": params.faceswaplab_postprocessing_upscaling_upscaler if params.faceswaplab_postprocessing_upscaling_enabled else "None", diff --git a/settings.debug.yaml b/settings.debug.yaml index 515f30f..f905d69 100644 --- a/settings.debug.yaml +++ b/settings.debug.yaml @@ -32,14 +32,13 @@ stable_diffusion-api_password: "" # IMAGE GENERATION PARAMETERS # #-----------------------------# -stable_diffusion-base_prompt: "high resolution, detailed, realistic, vivid" -stable_diffusion-base_negative_prompt: "disformed, disfigured, blurry, low resolution, unrealistic, pixelated, low quality, low res" +stable_diffusion-base_prompt: "RAW photo, subject, 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3" +stable_diffusion-base_negative_prompt: "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck" stable_diffusion-sampler_name: "DPM SDE" -stable_diffusion-sampling_steps: 7 -stable_diffusion-denoising_strength: 0.7 -stable_diffusion-width: 1024 -stable_diffusion-height: 1024 -stable_diffusion-cfg_scale: 2 +stable_diffusion-sampling_steps: 25 +stable_diffusion-width: 512 +stable_diffusion-height: 512 +stable_diffusion-cfg_scale: 6 stable_diffusion-clip_skip: 1 stable_diffusion-seed: -1 @@ -68,11 +67,11 @@ stable_diffusion-interactive_mode_prompt_generation_mode: "dynamic" ## Defines the regex pattern for the input message which triggers image generation in interactive mode. stable_diffusion-interactive_mode_input_trigger_regex: >- - .*(send|upload|add|show|attach|generate)\b.+?\b(image|pic(ture)?|photo|snap(shot)?|selfie|meme)(s?) + .*(draw|paint|create|send|upload|add|show|attach|generate)\b.+?\b(image|pic(ture)?|photo|snap(shot)?|selfie|meme)(s?) ## Defines the regex pattern for the generated output message which triggers image generation in interactive mode. stable_diffusion-interactive_mode_output_trigger_regex: >- - .*[*([](sends|uploads|adds|shows|attaches|generates|here (is|are))\b.+?\b(image|pic(ture)?|photo|snap(shot)?|selfie|meme)(s?) + .*[*([]?(draws|paints|creates|sends|uploads|adds|shows|attaches|generates|here (is|are))\b.+?\b(image|pic(ture)?|photo|snap(shot)?|selfie|meme)(s?) ## Defines the regex pattern for extracting the subject of the message for dynamic prompt generation in interactive mode. ## Only used when prompt_generation_mode is set to "dynamic". @@ -113,9 +112,9 @@ stable_diffusion-dont_stream_when_generating_images: true ## match: A list of where to match the regex. Available options: ## - "input": match on input text. ## - "input_sentence": match on any sentence in input text. -# - "output": match on generated output text. -# - "output_sentence": match on any sentence in generated output text. -# - "character_name": match on current character name (only if using gallery extension). +## - "output": match on generated output text. +## - "output_sentence": match on any sentence in generated output text. +## - "character_name": match on current character name (only if using gallery extension). ## actions: A list of actions to perform if the regex matches. ## - name: The name of the action to perform. ## Available options: @@ -168,8 +167,20 @@ stable_diffusion-upscaling_upscaler: "RealESRGAN 4x+" ## Amount to upscale by (1 = 100%, 2 = 200%, etc.). stable_diffusion-upscaling_scale: 2 +## Sets if HiRes.fix should be enabled. +stable_diffusion-hires_fix_enabled: false + +## Sets the sampler to use for HiRes.fix. +stable_diffusion-hires_fix_sampler: "UniPC" + +## Sets the amount of steps for HiRes.fix. +stable_diffusion-hires_sampling_steps: 10 + +## Sets the denoising strength for HiRes.fix. +stable_diffusion-hires_fix_denoising_strength: 0.2 + ## Sets if faces should be enhanced (or "restored") in generated images. -stable_diffusion-enhance_faces_enabled: false +stable_diffusion-restore_faces_enabled: false #-------------# # FACESWAPLAB # @@ -237,28 +248,28 @@ stable_diffusion-faceswaplab_postprocessing_upscaling_scale: 2 stable_diffusion-faceswaplab_postprocessing_upscaling_visibility: 1 ## Sets if the face should be enhanced (or "restored") during swapping -stable_diffusion-faceswaplab_enhance_face_enabled: false +stable_diffusion-faceswaplab_restore_face_enabled: false ## Model to use for enhancing the face (CodeFormer, GFPGAN) -stable_diffusion-faceswaplab_enhance_face_model: "CodeFormer" +stable_diffusion-faceswaplab_restore_face_model: "CodeFormer" -## Visibility of the enhanced face (0.0 - 1.0) -stable_diffusion-faceswaplab_enhance_face_visibility: 1 +## Visibility of the restored face (0.0 - 1.0) +stable_diffusion-faceswaplab_restore_face_visibility: 1 ## Weight of the CodeFormer model (0.0 - 1.0) -stable_diffusion-faceswaplab_enhance_face_codeformer_weight: 1 +stable_diffusion-faceswaplab_restore_face_codeformer_weight: 1 ## Sets if the faces should be enhanced (or "restored") in the final result image after swapping -stable_diffusion-faceswaplab_postprocessing_enhance_face_enabled: false +stable_diffusion-faceswaplab_postprocessing_restore_face_enabled: false ## Model to use for restoring the faces (CodeFormer, GFPGAN) -stable_diffusion-faceswaplab_postprocessing_enhance_face_model: "CodeFormer" +stable_diffusion-faceswaplab_postprocessing_restore_face_model: "CodeFormer" -## Visibility of the enhanced faces (0.0 - 1.0) -stable_diffusion-faceswaplab_postprocessing_enhance_face_visibility: 1 +## Visibility of the restored faces (0.0 - 1.0) +stable_diffusion-faceswaplab_postprocessing_restore_face_visibility: 1 ## Weight of the CodeFormer model (0.0 - 1.0) -stable_diffusion-faceswaplab_postprocessing_enhance_face_codeformer_weight: 1 +stable_diffusion-faceswaplab_postprocessing_restore_face_codeformer_weight: 1 ## Sets if color corrections should be applied stable_diffusion-faceswaplab_color_corrections_enabled: false @@ -315,19 +326,19 @@ stable_diffusion-reactor_source_face_index: 0 stable_diffusion-reactor_target_face_index: 0 ## Sets if the face should be enhanced (or "restored") after swapping -stable_diffusion-reactor_enhance_face_enabled: false +stable_diffusion-reactor_restore_face_enabled: false ## Model to use for restoring the face (CodeFormer, GFPGAN) -stable_diffusion-reactor_enhance_face_model: "CodeFormer" +stable_diffusion-reactor_restore_face_model: "CodeFormer" -## Visibility of the enhanced face (0.0 - 1.0) -stable_diffusion-reactor_enhance_face_visibility: 1 +## Visibility of the restored face (0.0 - 1.0) +stable_diffusion-reactor_restore_face_visibility: 1 ## Weight of the CodeFormer model (0.0 - 1.0) -stable_diffusion-reactor_enhance_face_codeformer_weight: 1 +stable_diffusion-reactor_restore_face_codeformer_weight: 1 -## Upscale face first before enhancing it; otherwise enhances face first then upscales it instead -stable_diffusion-reactor_enhance_face_upscale_first: false +## Upscale face first before enhancing it; otherwise restores face first then upscales it instead +stable_diffusion-reactor_restore_face_upscale_first: false ## Sets if the face should be upscaled. stable_diffusion-reactor_upscaling_enabled: false @@ -352,3 +363,69 @@ stable_diffusion-reactor_model: "inswapper_128.onnx" ## CUDA recommended for faster inference if you have an NVIDIA GPU. ## Note: CUDA requires installation of the onnxruntime-gpu package instead of onnxruntime in stable-diffusion-webui stable_diffusion-reactor_device: "CPU" + +#---------# +# FaceID # +#---------# + +## Apply face swapping using FaceID feature of SD.Next (a fork of AUTOMATIC1111). +## See: https://github.com/vladmandic/automatic for SD.Next repository. +## +## Works much better than ReActor or FaceSwapLab as +## the face not actually swapped but instead directly +## generated like the source face while the image is +## still being generated. +## +## Works with stylized images too, e.g. 3D renders, drawings, cartoon, paintings etc. +## +## WARNING: DOES NOT WORK WITH VANILLA AUTOMATIC1111. YOU _MUST_ USE SD.NEXT INSTEAD. +## +## Requires "insightface", "ip_adapter" and "onnxruntime" (or "onnxruntime-gpu") PIP packages to be installed in SD.Next. + +## Sets if faces should be swapped in generated images. +stable_diffusion-faceid_enabled: false + +## Sets the source image with the face to use for face swapping. +## It's possible to set it in 2 difference ways: +## 1. Local file: "file:///./example.jpg" +## 2. URL: "https://some-site.com/example.png" +stable_diffusion-faceid_source_face: "file:///{STABLE_DIFFUSION_EXTENSION_DIRECTORY}/assets/example_face.jpg" + +## Scale for the source face during image generation (0.0 - 1.0) +stable_diffusion-faceid_scale: 0.5 + +#-------------# +# IP ADAPTER # +#-------------# + +## Adjust the IP Adapter integration feature of SD.Next (a fork of AUTOMATIC1111). +## See: https://github.com/vladmandic/automatic for SD.Next repository. +## See: https://ip-adapter.github.io/ for IP Adapter paper. +## +## Can be used for face swapping as well similar to the FaceID feature +## (by using the "Plus Face" or "Full face" adapters). +## +## WARNING: DOES NOT WORK WITH VANILLA AUTOMATIC1111. YOU _MUST_ USE SD.NEXT INSTEAD. +## Requires "ip_adapter" and "onnxruntime" (or "onnxruntime-gpu") PIP packages to be installed in SD.Next. + +## Sets if IP adapter should be enabled. +stable_diffusion-ipadapter_enabled: false + +## Sets the source image to use for face swapping. +## It's possible to set it in 2 difference ways: +## 1. Local file: "file:///./example.jpg" +## 2. URL: "https://some-site.com/example.png" +stable_diffusion-ipadapter_reference_image: "file:///{STABLE_DIFFUSION_EXTENSION_DIRECTORY}/assets/example_face.jpg" + +## The adapter to use. +## Possible values: +## - "Base" +## - "Light" +## - "Plus" +## - "Plus Face" +## - "Full face" +## - "Base SDXL" +stable_diffusion-ipadapter_adapter: "Base" + +## Scale for the source face during image generation (0.0 - 1.0) +stable_diffusion-ipadapter_scale: 0.5 diff --git a/ui.py b/ui.py index 4291e2d..cca90d5 100644 --- a/ui.py +++ b/ui.py @@ -8,6 +8,7 @@ from .params import ( ContinuousModePromptGenerationMode, InteractiveModePromptGenerationMode, + IPAdapterAdapter, ) from .params import StableDiffusionWebUiExtensionParams as Params from .params import TriggerMode @@ -48,9 +49,15 @@ def render_ui(params: Params) -> None: with gr.Row(): _render_chat_config(params) + + with gr.Row(): _render_faceswaplab_config(params) _render_reactor_config(params) + with gr.Row(): + _render_faceid_config(params) + _render_ipadapter_config(params) + def _render_connection_details(params: Params) -> None: global refresh_button @@ -64,9 +71,7 @@ def _render_connection_details(params: Params) -> None: value=lambda: params.api_username or "", ) api_username.change( - lambda new_api_username: params.update( - Params(api_endpoint=new_api_username) - ), + lambda new_username: params.update({"api_username": new_username}), api_username, None, ) @@ -79,7 +84,7 @@ def _render_connection_details(params: Params) -> None: ) api_password.change( lambda new_api_password: params.update( - Params(api_endpoint=new_api_password) + {"api_password": new_api_password} ), api_password, None, @@ -93,7 +98,7 @@ def _render_connection_details(params: Params) -> None: ) api_endpoint.change( lambda new_api_endpoint: params.update( - Params(api_endpoint=new_api_endpoint) + {"api_endpoint": new_api_endpoint} ), api_endpoint, None, @@ -121,7 +126,7 @@ def _render_prompts(params: Params) -> None: value=lambda: params.base_prompt, ) prompt.change( - lambda new_prompt: params.update(Params(base_prompt=new_prompt)), + lambda new_prompt: params.update({"base_prompt": new_prompt}), prompt, None, ) @@ -132,9 +137,7 @@ def _render_prompts(params: Params) -> None: value=lambda: params.base_negative_prompt, ) negative_prompt.change( - lambda new_prompt: params.update( - Params(base_negative_prompt=new_prompt) - ), + lambda new_prompt: params.update({"base_negative_prompt": new_prompt}), negative_prompt, None, ) @@ -182,24 +185,22 @@ def _render_generation_parameters(params: Params) -> None: with gr.Row("Image size"): width = gr.Number( label="Width", - minimum=64, maximum=2048, value=lambda: params.width, ) width.change( - lambda new_width: params.update(Params(width=new_width)), + lambda new_width: params.update({"width": new_width}), width, None, ) height = gr.Number( label="Height", - minimum=64, maximum=2048, value=lambda: params.height, ) height.change( - lambda new_height: params.update(Params(height=new_height)), + lambda new_height: params.update({"height": new_height}), height, None, ) @@ -214,7 +215,7 @@ def _render_generation_parameters(params: Params) -> None: ) sampler_name.change( lambda new_sampler_name: params.update( - Params(sampler_name=new_sampler_name) + {"sampler_name": new_sampler_name} ), sampler_name, None, @@ -230,9 +231,7 @@ def _render_generation_parameters(params: Params) -> None: elem_id="steps_box", ) steps.change( - lambda new_steps: params.update( - Params(sampling_steps=new_steps) - ), + lambda new_steps: params.update({"sampling_steps": new_steps}), steps, None, ) @@ -247,7 +246,7 @@ def _render_generation_parameters(params: Params) -> None: ) clip_skip.change( lambda new_clip_skip: params.update( - Params(clip_skip=new_clip_skip) + {"clip_skip": new_clip_skip} ), clip_skip, None, @@ -259,9 +258,7 @@ def _render_generation_parameters(params: Params) -> None: value=lambda: params.seed, elem_id="seed_box", ) - seed.change( - lambda new_seed: params.update(Params(seed=new_seed)), seed, None - ) + seed.change(lambda new_seed: params.update({"seed": new_seed}), seed, None) cfg_scale = gr.Slider( label="CFG Scale", @@ -271,18 +268,18 @@ def _render_generation_parameters(params: Params) -> None: elem_id="cfg_box", ) cfg_scale.change( - lambda new_cfg_scale: params.update(Params(cfg_scale=new_cfg_scale)), + lambda new_cfg_scale: params.update({"cfg_scale": new_cfg_scale}), cfg_scale, None, ) with gr.Column() as hr_options: restore_faces = gr.Checkbox( - label="Restore faces", value=lambda: params.enhance_faces_enabled + label="Restore faces", value=lambda: params.restore_faces_enabled ) restore_faces.change( lambda new_value: params.update( - Params(enhance_faces_enabled=new_value) + {"restore_faces_enabled": new_value} ), restore_faces, None, @@ -292,9 +289,7 @@ def _render_generation_parameters(params: Params) -> None: label="Upscale image", value=lambda: params.upscaling_enabled ) enable_hr.change( - lambda new_value: params.update( - Params(upscaling_enabled=new_value) - ), + lambda new_value: params.update({"upscaling_enabled": new_value}), enable_hr, None, ) @@ -318,7 +313,7 @@ def _render_generation_parameters(params: Params) -> None: ) hr_upscaler.change( lambda new_upscaler: params.update( - Params(upscaling_upscaler=new_upscaler) + {"upscaling_upscaler": new_upscaler} ), hr_upscaler, None, @@ -333,21 +328,23 @@ def _render_generation_parameters(params: Params) -> None: step=0.1, ) hr_scale.change( - lambda new_value: params.update(Params(upscaling_scale=new_value)), + lambda new_value: params.update({"upscaling_scale": new_value}), hr_scale, None, ) - denoising_strength = gr.Slider( + hires_fix_denoising_strength = gr.Slider( label="Denoising strength", minimum=0, maximum=1, - value=lambda: params.denoising_strength, + value=lambda: params.hires_fix_denoising_strength, step=0.05, ) - denoising_strength.change( - lambda new_value: params.update(Params(denoising_strength=new_value)), - denoising_strength, + hires_fix_denoising_strength.change( + lambda new_value: params.update( + {"hires_fix_denoising_strength": new_value} + ), + hires_fix_denoising_strength, None, ) @@ -364,9 +361,7 @@ def _render_faceswaplab_config(params: Params) -> None: ) faceswap_enabled.change( - lambda new_enabled: params.update( - Params(faceswaplab_enabled=new_enabled) - ), + lambda new_enabled: params.update({"faceswaplab_enabled": new_enabled}), faceswap_enabled, None, ) @@ -379,7 +374,7 @@ def _render_faceswaplab_config(params: Params) -> None: faceswap_source_face.change( lambda new_source_face: params.update( - Params(faceswaplab_source_face=new_source_face) + {"faceswaplab_source_face": new_source_face} ), faceswap_source_face, None, @@ -396,7 +391,7 @@ def _render_reactor_config(params: Params) -> None: ) reactor_enabled.change( - lambda new_enabled: params.update(Params(reactor_enabled=new_enabled)), + lambda new_enabled: params.update({"reactor_enabled": new_enabled}), reactor_enabled, None, ) @@ -409,13 +404,118 @@ def _render_reactor_config(params: Params) -> None: reactor_source_face.change( lambda new_source_face: params.update( - Params(reactor_source_face=new_source_face) + {"reactor_source_face": new_source_face} ), reactor_source_face, None, ) +def _render_faceid_config(params: Params) -> None: + with gr.Accordion("FaceID", open=True, visible=sd_connected) as faceid_config: + connect_listeners.append(faceid_config) + + with gr.Column(): + faceid_enabled = gr.Checkbox( + label="Enabled", value=lambda: params.faceid_enabled + ) + + faceid_enabled.change( + lambda new_enabled: params.update({"faceid_enabled": new_enabled}), + faceid_enabled, + None, + ) + + faceid_source_face = gr.Text( + label="Source face", + placeholder="See documentation for details...", + value=lambda: params.faceid_source_face, + ) + + faceid_source_face.change( + lambda new_source_face: params.update( + {"faceid_source_face": new_source_face} + ), + faceid_source_face, + None, + ) + + faceid_scale = gr.Slider( + label="Scale", + minimum=0, + maximum=1, + value=lambda: params.faceid_scale, + step=0.1, + ) + + faceid_scale.change( + lambda new_scale: params.update({"faceid_scale": new_scale}), + faceid_scale, + None, + ) + + +def _render_ipadapter_config(params: Params) -> None: + with gr.Accordion( + "IP Adapter", open=True, visible=sd_connected + ) as ipadapter_config: + connect_listeners.append(ipadapter_config) + + with gr.Column(): + ipadapter_enabled = gr.Checkbox( + label="Enabled", value=lambda: params.ipadapter_enabled + ) + + ipadapter_enabled.change( + lambda new_enabled: params.update({"ipadapter_enabled": new_enabled}), + ipadapter_enabled, + None, + ) + + ipadapter_adapter = gr.Dropdown( + label="Adapter", + choices=[adapter for adapter in IPAdapterAdapter], + value=lambda: params.ipadapter_adapter, + type="index", + ) + + ipadapter_adapter.change( + lambda index: params.update( + {"ipadapter_adapter": IPAdapterAdapter.from_index(index)} + ), + ipadapter_adapter, + None, + ) + + ipadapter_reference_image = gr.Text( + label="Reference image", + placeholder="See documentation for details...", + value=lambda: params.ipadapter_reference_image, + ) + + ipadapter_reference_image.change( + lambda new_reference_image: params.update( + {"ipadapter_reference_image": new_reference_image} + ), + ipadapter_reference_image, + None, + ) + + ipadapter_scale = gr.Slider( + label="Scale", + minimum=0, + maximum=1, + value=lambda: params.ipadapter_scale, + step=0.1, + ) + + ipadapter_scale.change( + lambda new_scale: params.update({"ipadapter_scale": new_scale}), + ipadapter_scale, + None, + ) + + def _render_chat_config(params: Params) -> None: with gr.Accordion("Chat Settings", open=True, visible=sd_connected) as chat_config: connect_listeners.append(chat_config) @@ -430,7 +530,7 @@ def _render_chat_config(params: Params) -> None: trigger_mode.change( lambda index: params.update( - Params(trigger_mode=TriggerMode.from_index(index)) + {"trigger_mode": TriggerMode.from_index(index)} ), trigger_mode, None, @@ -449,11 +549,11 @@ def _render_chat_config(params: Params) -> None: interactive_prompt_generation_mode.change( lambda index: params.update( - Params( - interactive_mode_prompt_generation_mode=InteractiveModePromptGenerationMode.from_index( # noqa: E501 + { + "interactive_mode_prompt_generation_mode": InteractiveModePromptGenerationMode.from_index( # noqa: E501 index ) - ) + } ), interactive_prompt_generation_mode, None, @@ -472,11 +572,11 @@ def _render_chat_config(params: Params) -> None: continuous_prompt_generation_mode.change( lambda index: params.update( - Params( - continuous_mode_prompt_generation_mode=ContinuousModePromptGenerationMode.from_index( # noqa: E501 + { + "continuous_mode_prompt_generation_mode": ContinuousModePromptGenerationMode.from_index( # noqa: E501 index ) - ) + } ), continuous_prompt_generation_mode, None, @@ -540,7 +640,7 @@ def _fetch_sd_options(sd_client: SdWebUIApi) -> None: try: sd_options = sd_client.get_options() except BaseException as error: - logger.error(error) + logger.error(error, exc_info=True) sd_connected = False @@ -555,7 +655,7 @@ def _fetch_samplers(sd_client: SdWebUIApi) -> None: for sampler in sd_client.get_samplers() ] except BaseException as error: - logger.error(error) + logger.error(error, exc_info=True) sd_connected = False @@ -570,7 +670,7 @@ def _fetch_upscalers(sd_client: SdWebUIApi) -> None: for upscaler in sd_client.get_upscalers() ] except BaseException as error: - logger.error(error) + logger.error(error, exc_info=True) sd_connected = False @@ -587,7 +687,7 @@ def _fetch_checkpoints(sd_client: SdWebUIApi) -> None: checkpoint["title"] for checkpoint in sd_client.get_sd_models() ] except BaseException as error: - logger.error(error) + logger.error(error, exc_info=True) sd_connected = False @@ -601,7 +701,7 @@ def _fetch_vaes(sd_client: SdWebUIApi) -> None: sd_current_vae = sd_options["sd_vae"] sd_vaes = [checkpoint["model_name"] for checkpoint in sd_client.get_sd_vae()] except BaseException as error: - logger.error(error) + logger.error(error, exc_info=True) sd_connected = False