aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Mandic <mandic00@live.com>2023-01-09 21:54:12 +0000
committerGitHub <noreply@github.com>2023-01-09 21:54:12 +0000
commit95727312ca5913876aa1c74f47d1ff6d93bb6b1f (patch)
tree0b3b4f90dc135651d09fa834279b5b28c458cf68
parent47534577eda63b0db1eeb8921c2a161773ec434c (diff)
downloadstable-diffusion-webui-gfx803-95727312ca5913876aa1c74f47d1ff6d93bb6b1f.tar.gz
stable-diffusion-webui-gfx803-95727312ca5913876aa1c74f47d1ff6d93bb6b1f.tar.bz2
stable-diffusion-webui-gfx803-95727312ca5913876aa1c74f47d1ff6d93bb6b1f.zip
remove bytes -> gb conversion
-rw-r--r--modules/api/api.py18
1 files changed, 8 insertions, 10 deletions
diff --git a/modules/api/api.py b/modules/api/api.py
index d2222b18..1c121ff0 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -467,26 +467,24 @@ class Api:
return TrainResponse(info = "train embedding error: {error}".format(error = error))
def get_memory(self):
- def gb(val: float):
- return round(val / 1024 / 1024 / 1024, 2)
try:
import os, psutil
process = psutil.Process(os.getpid())
- res = process.memory_info()
- ram_total = 100 * res.rss / process.memory_percent()
- ram = { 'free': gb(ram_total - res.rss), 'used': gb(res.rss), 'total': gb(ram_total) }
+ res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values
+ ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe
+ ram = { 'free': ram_total - res.rss, 'used': res.rss, 'total': ram_total }
except Exception as err:
ram = { 'error': f'{err}' }
try:
import torch
if torch.cuda.is_available():
s = torch.cuda.mem_get_info()
- system = { 'free': gb(s[0]), 'used': gb(s[1] - s[0]), 'total': gb(s[1]) }
+ system = { 'free': s[0], 'used': s[1] - s[0], 'total': s[1] }
s = dict(torch.cuda.memory_stats(shared.device))
- allocated = { 'current': gb(s['allocated_bytes.all.current']), 'peak': gb(s['allocated_bytes.all.peak']) }
- reserved = { 'current': gb(s['reserved_bytes.all.current']), 'peak': gb(s['reserved_bytes.all.peak']) }
- active = { 'current': gb(s['active_bytes.all.current']), 'peak': gb(s['active_bytes.all.peak']) }
- inactive = { 'current': gb(s['inactive_split_bytes.all.current']), 'peak': gb(s['inactive_split_bytes.all.peak']) }
+ allocated = { 'current': s['allocated_bytes.all.current'], 'peak': s['allocated_bytes.all.peak'] }
+ reserved = { 'current': s['reserved_bytes.all.current'], 'peak': s['reserved_bytes.all.peak'] }
+ active = { 'current': s['active_bytes.all.current'], 'peak': s['active_bytes.all.peak'] }
+ inactive = { 'current': s['inactive_split_bytes.all.current'], 'peak': s['inactive_split_bytes.all.peak'] }
warnings = { 'retries': s['num_alloc_retries'], 'oom': s['num_ooms'] }
cuda = {
'system': system,