Update sd-webui-smea/scripts/sd-webui-smea.py
#4
by Dikz - opened
sd-webui-smea/scripts/sd-webui-smea.py
CHANGED
|
@@ -1297,43 +1297,102 @@ def sample_euler_h_m_b_c(model, x, sigmas, extra_args=None, callback=None, disab
|
|
| 1297 |
return x
|
| 1298 |
|
| 1299 |
@torch.no_grad()
|
| 1300 |
-
def sample_euler_h_m_b_c_pp(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1301 |
extra_args = {} if extra_args is None else extra_args
|
| 1302 |
s_in = x.new_ones([x.shape[0]])
|
| 1303 |
-
|
| 1304 |
-
|
| 1305 |
-
|
| 1306 |
-
|
| 1307 |
-
|
| 1308 |
-
|
| 1309 |
-
|
| 1310 |
-
|
| 1311 |
-
|
| 1312 |
-
|
| 1313 |
-
|
| 1314 |
-
|
| 1315 |
-
|
| 1316 |
-
|
| 1317 |
-
|
| 1318 |
-
|
| 1319 |
-
|
| 1320 |
-
|
| 1321 |
-
|
| 1322 |
-
|
| 1323 |
-
|
| 1324 |
-
|
| 1325 |
-
|
| 1326 |
-
|
| 1327 |
-
|
| 1328 |
-
|
| 1329 |
-
|
| 1330 |
-
|
| 1331 |
-
|
| 1332 |
-
|
| 1333 |
-
|
| 1334 |
-
|
| 1335 |
-
|
| 1336 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1337 |
|
| 1338 |
@torch.no_grad()
|
| 1339 |
def sample_euler_smea_max(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., smooth=False):
|
|
|
|
| 1297 |
return x
|
| 1298 |
|
| 1299 |
@torch.no_grad()
|
| 1300 |
+
def sample_euler_h_m_b_c_pp(
|
| 1301 |
+
model,
|
| 1302 |
+
x,
|
| 1303 |
+
sigmas,
|
| 1304 |
+
extra_args=None,
|
| 1305 |
+
callback=None,
|
| 1306 |
+
disable=None,
|
| 1307 |
+
s_churn=0.,
|
| 1308 |
+
s_tmin=0.,
|
| 1309 |
+
s_tmax=float('inf'),
|
| 1310 |
+
s_noise=1.,
|
| 1311 |
+
noise_sampler=None,
|
| 1312 |
+
):
|
| 1313 |
extra_args = {} if extra_args is None else extra_args
|
| 1314 |
s_in = x.new_ones([x.shape[0]])
|
| 1315 |
+
|
| 1316 |
+
old_need_last = getattr(model, 'need_last_noise_uncond', False)
|
| 1317 |
+
model.need_last_noise_uncond = True
|
| 1318 |
+
|
| 1319 |
+
try:
|
| 1320 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 1321 |
+
wave = math.cos(math.pi * 0.5 * i) / (0.5 * i + 1.5) + 1
|
| 1322 |
+
sigma_min = sigmas[sigmas > 0].min()
|
| 1323 |
+
sigma_max = sigmas.max()
|
| 1324 |
+
s_tmin_eff = sigma_min if s_tmin == 0. else s_tmin
|
| 1325 |
+
s_tmax_eff = sigma_max if s_tmax == float('inf') else s_tmax
|
| 1326 |
+
|
| 1327 |
+
gamma = (
|
| 1328 |
+
min(
|
| 1329 |
+
wave * ((2 ** 0.5 - 1) + s_churn) / (len(sigmas) - 1),
|
| 1330 |
+
2 ** 0.5 - 1,
|
| 1331 |
+
)
|
| 1332 |
+
if s_tmin_eff <= sigmas[i] <= s_tmax_eff
|
| 1333 |
+
else 0.
|
| 1334 |
+
)
|
| 1335 |
+
|
| 1336 |
+
eps = (
|
| 1337 |
+
k_diffusion.sampling.BrownianTreeNoiseSampler(x, s_tmin_eff, s_tmax_eff, 0)
|
| 1338 |
+
if noise_sampler is None
|
| 1339 |
+
else noise_sampler
|
| 1340 |
+
)
|
| 1341 |
+
|
| 1342 |
+
gammaup = gamma + 1
|
| 1343 |
+
sigma_hat = sigmas[i] * gammaup
|
| 1344 |
+
|
| 1345 |
+
if gamma > 0:
|
| 1346 |
+
x = x + eps(sigmas[i], sigmas[i + 1]) * s_noise * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
|
| 1347 |
+
|
| 1348 |
+
denoised = model(x, sigma_hat * s_in, **extra_args)
|
| 1349 |
+
d = to_d(x, sigma_hat, denoised)
|
| 1350 |
+
dt = sigmas[i + 1] - sigma_hat
|
| 1351 |
+
|
| 1352 |
+
if callback is not None:
|
| 1353 |
+
callback({
|
| 1354 |
+
'x': x,
|
| 1355 |
+
'i': i,
|
| 1356 |
+
'sigma': sigmas[i],
|
| 1357 |
+
'sigma_hat': sigma_hat,
|
| 1358 |
+
'denoised': denoised,
|
| 1359 |
+
})
|
| 1360 |
+
|
| 1361 |
+
if i == 0:
|
| 1362 |
+
x = x + d * dt
|
| 1363 |
+
|
| 1364 |
+
elif i <= len(sigmas) - 4:
|
| 1365 |
+
x_2 = x + d * dt
|
| 1366 |
+
d_2 = to_d(x_2, sigmas[i + 1] * gammaup, denoised)
|
| 1367 |
+
|
| 1368 |
+
x_3 = x_2 + d_2 * dt
|
| 1369 |
+
sigma_3 = sigmas[i + 2] * gammaup
|
| 1370 |
+
|
| 1371 |
+
# Fresh evaluation at the actual third-point location.
|
| 1372 |
+
# With the A1111 CFG++ core hotfix, model.last_noise_uncond is stored
|
| 1373 |
+
# as derivative/noise estimate, which is exactly what this branch needs.
|
| 1374 |
+
_ = model(x_3, sigma_3 * s_in, **extra_args)
|
| 1375 |
+
d_3 = model.last_noise_uncond
|
| 1376 |
+
|
| 1377 |
+
if d_3 is None:
|
| 1378 |
+
d_3 = d_2
|
| 1379 |
+
|
| 1380 |
+
d_prime = d * 0.5 + d_2 * 0.375 + d_3 * 0.125
|
| 1381 |
+
x = x + d_prime * dt
|
| 1382 |
+
|
| 1383 |
+
elif sigmas[i + 1] > 0:
|
| 1384 |
+
x_2 = x + d * dt
|
| 1385 |
+
d_2 = to_d(x_2, sigmas[i + 1] * gammaup, denoised)
|
| 1386 |
+
d_prime = d * 0.5 + d_2 * 0.5
|
| 1387 |
+
x = x + d_prime * dt
|
| 1388 |
+
|
| 1389 |
+
else:
|
| 1390 |
+
x = x + d * dt
|
| 1391 |
+
|
| 1392 |
+
return x
|
| 1393 |
+
|
| 1394 |
+
finally:
|
| 1395 |
+
model.need_last_noise_uncond = old_need_last
|
| 1396 |
|
| 1397 |
@torch.no_grad()
|
| 1398 |
def sample_euler_smea_max(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., smooth=False):
|