diff --git a/prompt_enhancer_nodes.py b/prompt_enhancer_nodes.py index 6dd1e4c..2c1df90 100644 --- a/prompt_enhancer_nodes.py +++ b/prompt_enhancer_nodes.py @@ -38,7 +38,7 @@ def __init__( + 1073741824 ) - def forward(self, prompt, image_conditioning, max_resulting_tokens): + def forward(self, prompt, image_conditioning, max_resulting_tokens, seed=-1): enhanced_prompt = generate_cinematic_prompt( self.image_caption_model, self.image_caption_processor, @@ -47,6 +47,7 @@ def forward(self, prompt, image_conditioning, max_resulting_tokens): prompt, image_conditioning, max_new_tokens=max_resulting_tokens, + seed=seed ) return enhanced_prompt @@ -172,6 +173,7 @@ def INPUT_TYPES(s): }, "optional": { "image_prompt": ("IMAGE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), }, } @@ -192,6 +194,7 @@ def enhance( prompt_enhancer: comfy.model_patcher.ModelPatcher, image_prompt: torch.Tensor = None, max_resulting_tokens=256, + seed=-1, ): comfy.model_management.free_memory( prompt_enhancer.memory_required([]), @@ -204,5 +207,5 @@ def enhance( permuted_image = image_prompt.permute(3, 0, 1, 2)[None, :] image_conditioning = [(permuted_image, 0, 1.0)] - enhanced_prompt = model(prompt, image_conditioning, max_resulting_tokens) + enhanced_prompt = model(prompt, image_conditioning, max_resulting_tokens, seed) return (enhanced_prompt[0],) diff --git a/prompt_enhancer_utils.py b/prompt_enhancer_utils.py index e05ecfa..7c87846 100644 --- a/prompt_enhancer_utils.py +++ b/prompt_enhancer_utils.py @@ -80,7 +80,12 @@ def generate_cinematic_prompt( prompt: Union[str, List[str]], conditioning_items: Optional[List[Tuple[torch.Tensor, int, float]]] = None, max_new_tokens: int = 256, + seed: int = -1, ) -> List[str]: + from transformers import set_seed + if seed > -1: + set_seed(seed) + prompts = [prompt] if isinstance(prompt, str) else prompt if conditioning_items is None: