aboutsummaryrefslogtreecommitdiffstats
path: root/modules/processing.py
diff options
context:
space:
mode:
Diffstat (limited to 'modules/processing.py')
-rw-r--r--modules/processing.py26
1 files changed, 11 insertions, 15 deletions
diff --git a/modules/processing.py b/modules/processing.py
index 0138e5ac..0c191428 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -91,8 +91,8 @@ def create_binary_mask(image):
def txt2img_image_conditioning(sd_model, x, width, height):
if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
- # The "masked-image" in this case will just be all zeros since the entire image is masked.
- image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
+ # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
+ image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method))
# Add the fake full 1s mask to the first dimension.
@@ -533,6 +533,7 @@ class Processed:
self.all_seeds = all_seeds or p.all_seeds or [self.seed]
self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
self.infotexts = infotexts or [info]
+ self.version = program_version()
def js(self):
obj = {
@@ -567,6 +568,7 @@ class Processed:
"job_timestamp": self.job_timestamp,
"clip_skip": self.clip_skip,
"is_using_inpainting_conditioning": self.is_using_inpainting_conditioning,
+ "version": self.version,
}
return json.dumps(obj)
@@ -1148,18 +1150,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
else:
decoded_samples = None
- current = shared.sd_model.sd_checkpoint_info
- try:
- if self.hr_checkpoint_info is not None:
- self.sampler = None
- sd_models.reload_model_weights(info=self.hr_checkpoint_info)
- devices.torch_gc()
-
- return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
- finally:
- self.sampler = None
- sd_models.reload_model_weights(info=current)
- devices.torch_gc()
+ with sd_models.SkipWritingToConfig():
+ sd_models.reload_model_weights(info=self.hr_checkpoint_info)
+
+ devices.torch_gc()
+
+ return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
if shared.state.interrupted:
@@ -1321,7 +1317,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if shared.opts.hires_fix_use_firstpass_conds:
self.calculate_hr_conds()
- elif lowvram.is_enabled(shared.sd_model): # if in lowvram mode, we need to calculate conds right away, before the cond NN is unloaded
+ elif lowvram.is_enabled(shared.sd_model) and shared.sd_model.sd_checkpoint_info == sd_models.select_checkpoint(): # if in lowvram mode, we need to calculate conds right away, before the cond NN is unloaded
with devices.autocast():
extra_networks.activate(self, self.hr_extra_network_data)