diff options
author | Fampai <> | 2022-10-31 13:54:51 +0000 |
---|---|---|
committer | Fampai <> | 2022-10-31 13:54:51 +0000 |
commit | 3b0127e698a2eeb913437bce0b25b478fb06ff11 (patch) | |
tree | e0894e49eb0d7609b12d4e4f3a71fd979830b29c /modules/processing.py | |
parent | 006756f9cd6258eae418e9209cfc13f940ec53e1 (diff) | |
parent | 9b384dfb5c05129f50cc3f0262f89e8b788e5cf3 (diff) | |
download | stable-diffusion-webui-gfx803-3b0127e698a2eeb913437bce0b25b478fb06ff11.tar.gz stable-diffusion-webui-gfx803-3b0127e698a2eeb913437bce0b25b478fb06ff11.tar.bz2 stable-diffusion-webui-gfx803-3b0127e698a2eeb913437bce0b25b478fb06ff11.zip |
Merge branch 'master' of https://github.com/AUTOMATIC1111/stable-diffusion-webui into TI_optimizations
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/modules/processing.py b/modules/processing.py index 50343846..b1df4918 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -396,6 +396,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
"Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name),
+ "Hypernet strength": (None if shared.loaded_hypernetwork is None or shared.opts.sd_hypernetwork_strength >= 1 else shared.opts.sd_hypernetwork_strength),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
@@ -686,15 +687,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
+ image_conditioning = self.txt2img_image_conditioning(x)
+
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
- image_conditioning = self.img2img_image_conditioning(
- decoded_samples,
- samples,
- decoded_samples.new_ones(decoded_samples.shape[0], 1, decoded_samples.shape[2], decoded_samples.shape[3])
- )
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=image_conditioning)
return samples
|