| import sys, os |
| import pickle as p |
| now_dir = os.getcwd() |
| sys.path.append(os.path.join(now_dir)) |
| sys.path.append(os.path.join(now_dir, "train")) |
| import utils |
| Loss_Gen_Per_Epoch = [] |
| Loss_Disc_Per_Epoch = [] |
| elapsed_time_record = [] |
| Lowest_lg = 0 |
| Lowest_ld = 0 |
| import datetime |
| hps = utils.get_hparams() |
| overtrain = hps.overtrain |
| experiment_name = hps.name |
| os.environ["CUDA_VISIBLE_DEVICES"] = hps.gpus.replace("-", ",") |
| n_gpus = len(hps.gpus.split("-")) |
| from random import shuffle, randint |
| import traceback, json, argparse, itertools, math, torch, pdb |
|
|
| torch.backends.cudnn.deterministic = False |
| torch.backends.cudnn.benchmark = False |
| from torch import nn, optim |
| from torch.nn import functional as F |
| from torch.utils.data import DataLoader |
| from torch.utils.tensorboard import SummaryWriter |
| import torch.multiprocessing as mp |
| import torch.distributed as dist |
| from torch.nn.parallel import DistributedDataParallel as DDP |
| from torch.cuda.amp import autocast, GradScaler |
| from lib.infer_pack import commons |
| from time import sleep |
| from time import time as ttime |
| from data_utils import ( |
| TextAudioLoaderMultiNSFsid, |
| TextAudioLoader, |
| TextAudioCollateMultiNSFsid, |
| TextAudioCollate, |
| DistributedBucketSampler, |
| ) |
|
|
| import csv |
|
|
| if hps.version == "v1": |
| from lib.infer_pack.models import ( |
| SynthesizerTrnMs256NSFsid as RVC_Model_f0, |
| SynthesizerTrnMs256NSFsid_nono as RVC_Model_nof0, |
| MultiPeriodDiscriminator, |
| ) |
| elif hps.version == "v2" and hps.Large_HuBert == True: |
| from lib.infer_pack.models import ( |
| SynthesizerTrnMs1024NSFsid as RVC_Model_f0, |
| SynthesizerTrnMs1024NSFsid_nono as RVC_Model_nof0, |
| MultiPeriodDiscriminatorV2 as MultiPeriodDiscriminator, |
| ) |
| else: |
| from lib.infer_pack.models import ( |
| SynthesizerTrnMs768NSFsid as RVC_Model_f0, |
| SynthesizerTrnMs768NSFsid_nono as RVC_Model_nof0, |
| MultiPeriodDiscriminatorV2 as MultiPeriodDiscriminator, |
| ) |
| from losses import generator_loss, discriminator_loss, feature_loss, kl_loss |
| from mel_processing import mel_spectrogram_torch, spec_to_mel_torch |
| from process_ckpt import savee |
|
|
| global global_step |
| global_step = 0 |
|
|
| def Calculate_format_elapsed_time(elapsed_time): |
| h = int(elapsed_time/3600) |
| m,s,ms = int(elapsed_time/60 - h*60), int(elapsed_time%60), round((elapsed_time - int(elapsed_time))*10000) |
| return h,m,s,ms |
| def right_index(List,Value): |
| index = len(List)-1-List[::-1].index(Value) |
| return index |
| def formating_time(time): |
| time = time if time >= 10 else f"0{time}" |
| return time |
| class EpochRecorder: |
| def __init__(self): |
| self.last_time = ttime() |
|
|
| def record(self): |
| now_time = ttime() |
| elapsed_time = now_time - self.last_time |
| self.last_time = now_time |
| elapsed_time_str = str(datetime.timedelta(seconds=elapsed_time)) |
| current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
| return f"[{current_time}] | ({elapsed_time_str})" |
|
|
|
|
| def main(): |
| n_gpus = torch.cuda.device_count() |
| if torch.cuda.is_available() == False and torch.backends.mps.is_available() == True: |
| n_gpus = 1 |
| os.environ["MASTER_ADDR"] = "localhost" |
| os.environ["MASTER_PORT"] = str(randint(20000, 55555)) |
| children = [] |
| for i in range(n_gpus): |
| subproc = mp.Process( |
| target=run, |
| args=( |
| i, |
| n_gpus, |
| hps, |
| ), |
| ) |
| children.append(subproc) |
| subproc.start() |
| for i in range(n_gpus): |
| children[i].join() |
| |
|
|
|
|
| def run(rank, n_gpus, hps): |
| global global_step, loss_disc, loss_gen_all, Loss_Disc_Per_Epoch, Loss_Gen_Per_Epoch, elapsed_time_record, best_epoch, best_global_step, Min_for_Single_epoch, prev_best_epoch |
| if rank == 0: |
| logger = utils.get_logger(hps.model_dir) |
| logger.info(hps) |
| |
| writer = SummaryWriter(log_dir=hps.model_dir) |
| writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) |
|
|
| dist.init_process_group( |
| backend="gloo", init_method="env://", world_size=n_gpus, rank=rank |
| ) |
| torch.manual_seed(hps.train.seed) |
| if torch.cuda.is_available(): |
| torch.cuda.set_device(rank) |
|
|
| if hps.if_f0 == 1: |
| train_dataset = TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) |
| else: |
| train_dataset = TextAudioLoader(hps.data.training_files, hps.data) |
| train_sampler = DistributedBucketSampler( |
| train_dataset, |
| hps.train.batch_size * n_gpus, |
| |
| [100, 200, 300, 400, 500, 600, 700, 800, 900], |
| num_replicas=n_gpus, |
| rank=rank, |
| shuffle=True, |
| ) |
| |
| |
| if hps.if_f0 == 1: |
| collate_fn = TextAudioCollateMultiNSFsid() |
| else: |
| collate_fn = TextAudioCollate() |
| train_loader = DataLoader( |
| train_dataset, |
| num_workers=4, |
| shuffle=False, |
| pin_memory=True, |
| collate_fn=collate_fn, |
| batch_sampler=train_sampler, |
| persistent_workers=True, |
| prefetch_factor=8, |
| ) |
| if hps.if_f0 == 1: |
| net_g = RVC_Model_f0( |
| hps.data.filter_length // 2 + 1, |
| hps.train.segment_size // hps.data.hop_length, |
| **hps.model, |
| is_half=hps.train.fp16_run, |
| sr=hps.sample_rate, |
| ) |
| else: |
| net_g = RVC_Model_nof0( |
| hps.data.filter_length // 2 + 1, |
| hps.train.segment_size // hps.data.hop_length, |
| **hps.model, |
| is_half=hps.train.fp16_run, |
| ) |
| if torch.cuda.is_available(): |
| net_g = net_g.cuda(rank) |
| net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm) |
| if torch.cuda.is_available(): |
| net_d = net_d.cuda(rank) |
| optim_g = torch.optim.AdamW( |
| net_g.parameters(), |
| hps.train.learning_rate, |
| betas=hps.train.betas, |
| eps=hps.train.eps, |
| ) |
| optim_d = torch.optim.AdamW( |
| net_d.parameters(), |
| hps.train.learning_rate, |
| betas=hps.train.betas, |
| eps=hps.train.eps, |
| ) |
| |
| |
| if torch.cuda.is_available(): |
| net_g = DDP(net_g, device_ids=[rank]) |
| net_d = DDP(net_d, device_ids=[rank]) |
| else: |
| net_g = DDP(net_g) |
| net_d = DDP(net_d) |
|
|
| try: |
| _, _, _, epoch_str = utils.load_checkpoint( |
| utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d |
| ) |
| if rank == 0: |
| logger.info("loaded D") |
| |
| _, _, _, epoch_str = utils.load_checkpoint( |
| utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g |
| ) |
| global_step = (epoch_str - 1) * len(train_loader) |
| |
| |
| except: |
| |
| epoch_str = 1 |
| global_step = 0 |
| if hps.pretrainG != "": |
| if rank == 0: |
| logger.info("loaded pretrained %s" % (hps.pretrainG)) |
| print( |
| net_g.module.load_state_dict( |
| torch.load(hps.pretrainG, map_location="cpu")["model"] |
| ) |
| ) |
| if hps.pretrainD != "": |
| if rank == 0: |
| logger.info("loaded pretrained %s" % (hps.pretrainD)) |
| print( |
| net_d.module.load_state_dict( |
| torch.load(hps.pretrainD, map_location="cpu")["model"] |
| ) |
| ) |
|
|
| scheduler_g = torch.optim.lr_scheduler.ExponentialLR( |
| optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 |
| ) |
| scheduler_d = torch.optim.lr_scheduler.ExponentialLR( |
| optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 |
| ) |
|
|
| scaler = GradScaler(enabled=hps.train.fp16_run) |
| |
| |
| |
| |
| |
| Min_for_Single_epoch = 1 |
| |
| if os.path.exists(f"Loss_Gen_Per_Epoch_{hps.name}.p") and os.path.exists(f"Loss_Disc_Per_Epoch_{hps.name}.p"): |
| with open(f'Loss_Gen_Per_Epoch_{hps.name}.p', 'rb') as Loss_Gen: |
| Loss_Gen_Per_Epoch = p.load(Loss_Gen) |
| for i in range(len(Loss_Gen_Per_Epoch)-epoch_str+1): |
| Loss_Gen_Per_Epoch.pop() |
| with open(f'Loss_Disc_Per_Epoch_{hps.name}.p', 'rb') as Loss_Disc: |
| Loss_Disc_Per_Epoch = p.load(Loss_Disc) |
| for i in range(len(Loss_Disc_Per_Epoch)-epoch_str+1): |
| Loss_Disc_Per_Epoch.pop() |
| if os.path.exists(f"prev_best_epoch_{hps.name}.p"): |
| with open(f'prev_best_epoch_{hps.name}.p', 'rb') as prev_best_epoch_f: |
| prev_best_epoch = p.load(prev_best_epoch_f) |
| |
| cache = [] |
| for epoch in range(epoch_str, hps.train.epochs+1): |
| start_time = ttime() |
| if rank == 0: |
| train_and_evaluate( |
| rank, |
| epoch, |
| hps, |
| [net_g, net_d], |
| [optim_g, optim_d], |
| [scheduler_g, scheduler_d], |
| scaler, |
| [train_loader, None], |
| logger, |
| [writer, writer_eval], |
| cache, |
| ) |
| |
| |
| loss_gen_all = loss_gen_all.item() |
| loss_disc = loss_disc.item() |
| |
| Loss_Gen_Per_Epoch.append(loss_gen_all) |
| Loss_Disc_Per_Epoch.append(loss_disc) |
| |
| |
| with open(f'Loss_Gen_Per_Epoch_{hps.name}.p', 'wb') as Loss_Gen: |
| p.dump(Loss_Gen_Per_Epoch, Loss_Gen) |
| Loss_Gen.close() |
| with open(f'Loss_Disc_Per_Epoch_{hps.name}.p', 'wb') as Loss_Disc: |
| p.dump(Loss_Disc_Per_Epoch, Loss_Disc) |
| Loss_Disc.close() |
| |
| Lowest_lg = f"{min(Loss_Gen_Per_Epoch):.5f}, epoch: {right_index(Loss_Gen_Per_Epoch,min(Loss_Gen_Per_Epoch))+1}" |
| Lowest_ld = f"{min(Loss_Disc_Per_Epoch):.5f}, epoch: {right_index(Loss_Disc_Per_Epoch,min(Loss_Disc_Per_Epoch))+1}" |
| print(f"{hps.name}_e{epoch}_s{global_step} | Loss gen total: {Loss_Gen_Per_Epoch[-1]:.5f} | Lowest loss G: {Lowest_lg}\n Loss disc: {Loss_Disc_Per_Epoch[-1]:.5f} | Lowest loss D: {Lowest_ld}") |
| print(f"Specific Value: loss gen={loss_gen:.3f}, loss fm={loss_fm:.3f},loss mel={loss_mel:.3f}, loss kl={loss_kl:.3f}") |
| |
| if len(Loss_Gen_Per_Epoch) > Min_for_Single_epoch and epoch % hps.save_every_epoch != 0: |
| if min(Loss_Gen_Per_Epoch[Min_for_Single_epoch::1]) == Loss_Gen_Per_Epoch[-1]: |
| if hasattr(net_g, "module"): |
| ckpt = net_g.module.state_dict() |
| else: |
| ckpt = net_g.state_dict() |
| savee(ckpt, hps.sample_rate, hps.if_f0, hps.name + "_e%s_s%s" % (epoch, global_step), epoch, hps.version, hps, experiment_name) |
| os.rename(f"logs/{hps.name}/weights/{hps.name}_e{epoch}_s{global_step}.pth",f"logs/{hps.name}/weights/{hps.name}_e{epoch}_s{global_step}_Best_Epoch.pth") |
| print(f"Saved: {hps.name}_e{epoch}_s{global_step}_Best_Epoch.pth") |
| try: |
| os.remove(prev_best_epoch) |
| except: |
| print("Nothing to remove, if there's is you may need to check again") |
| pass |
| else: |
| print(f"{os.path.split(prev_best_epoch)[-1]} Removed") |
| best_epoch = epoch |
| best_global_step = global_step |
| prev_best_epoch = f"logs/{hps.name}/weights/{hps.name}_e{best_epoch}_s{best_global_step}_Best_Epoch.pth" |
| with open(f'prev_best_epoch_{hps.name}.p', 'wb') as prev_best_epoch_f: |
| p.dump(prev_best_epoch, prev_best_epoch_f) |
| |
| elapsed_time = ttime() - start_time |
| elapsed_time_record.append(elapsed_time) |
| if epoch-1 == epoch_str: |
| elapsed_time_record.pop(0) |
| elapsed_time_avg = sum(elapsed_time_record)/len(elapsed_time_record) |
| time_left = elapsed_time_avg*(hps.total_epoch-epoch) |
| hour, minute, second, millisec = Calculate_format_elapsed_time(elapsed_time) |
| hour_left, minute_left, second_left, millisec_left = Calculate_format_elapsed_time(time_left) |
| print(f"Time Elapsed: {hour}h:{formating_time(minute)}m:{formating_time(second)}s:{millisec}ms || Time left: {hour_left}h:{formating_time(minute_left)}m:{formating_time(second_left)}s:{millisec_left}ms\n") |
| |
| if ((len(Loss_Gen_Per_Epoch) - right_index(Loss_Gen_Per_Epoch,min(Loss_Gen_Per_Epoch)) + 1) > overtrain and overtrain != -1): |
| logger.info("Over Train threshold reached. Training is done.") |
| print("Over Train threshold reached. Training is done.") |
| |
| if hasattr(net_g, "module"): |
| ckpt = net_g.module.state_dict() |
| else: |
| ckpt = net_g.state_dict() |
| logger.info( |
| "saving final ckpt:%s" |
| % ( |
| savee( |
| ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps, experiment_name |
| ) |
| ) |
| ) |
| sleep(1) |
| with open("csvdb/stop.csv", "w+", newline="") as STOPCSVwrite: |
| csv_writer = csv.writer(STOPCSVwrite, delimiter=",") |
| csv_writer.writerow(["False"]) |
| os._exit(2333333) |
| |
| else: |
| train_and_evaluate( |
| rank, |
| epoch, |
| hps, |
| [net_g, net_d], |
| [optim_g, optim_d], |
| [scheduler_g, scheduler_d], |
| scaler, |
| [train_loader, None], |
| None, |
| None, |
| cache, |
| ) |
| scheduler_g.step() |
| scheduler_d.step() |
| |
| |
| |
| |
|
|
| |
|
|
| |
|
|
| def train_and_evaluate( |
| rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, cache |
| ): |
| global loss_gen_all, loss_disc, ckpt, loss_kl, loss_fm, loss_gen, loss_mel |
| net_g, net_d = nets |
| optim_g, optim_d = optims |
| train_loader, eval_loader = loaders |
| if writers is not None: |
| writer, writer_eval = writers |
|
|
| train_loader.batch_sampler.set_epoch(epoch) |
| global global_step |
|
|
| net_g.train() |
| net_d.train() |
|
|
| |
| if hps.if_cache_data_in_gpu == True: |
| |
| data_iterator = cache |
| if cache == []: |
| |
| for batch_idx, info in enumerate(train_loader): |
| |
| if hps.if_f0 == 1: |
| ( |
| phone, |
| phone_lengths, |
| pitch, |
| pitchf, |
| spec, |
| spec_lengths, |
| wave, |
| wave_lengths, |
| sid, |
| ) = info |
| else: |
| ( |
| phone, |
| phone_lengths, |
| spec, |
| spec_lengths, |
| wave, |
| wave_lengths, |
| sid, |
| ) = info |
| |
| if torch.cuda.is_available(): |
| phone = phone.cuda(rank, non_blocking=True) |
| phone_lengths = phone_lengths.cuda(rank, non_blocking=True) |
| if hps.if_f0 == 1: |
| pitch = pitch.cuda(rank, non_blocking=True) |
| pitchf = pitchf.cuda(rank, non_blocking=True) |
| sid = sid.cuda(rank, non_blocking=True) |
| spec = spec.cuda(rank, non_blocking=True) |
| spec_lengths = spec_lengths.cuda(rank, non_blocking=True) |
| wave = wave.cuda(rank, non_blocking=True) |
| wave_lengths = wave_lengths.cuda(rank, non_blocking=True) |
| |
| if hps.if_f0 == 1: |
| cache.append( |
| ( |
| batch_idx, |
| ( |
| phone, |
| phone_lengths, |
| pitch, |
| pitchf, |
| spec, |
| spec_lengths, |
| wave, |
| wave_lengths, |
| sid, |
| ), |
| ) |
| ) |
| else: |
| cache.append( |
| ( |
| batch_idx, |
| ( |
| phone, |
| phone_lengths, |
| spec, |
| spec_lengths, |
| wave, |
| wave_lengths, |
| sid, |
| ), |
| ) |
| ) |
| else: |
| |
| shuffle(cache) |
| else: |
| |
| data_iterator = enumerate(train_loader) |
|
|
| |
| epoch_recorder = EpochRecorder() |
|
|
| for batch_idx, info in data_iterator: |
| |
| |
| if hps.if_f0 == 1: |
| ( |
| phone, |
| phone_lengths, |
| pitch, |
| pitchf, |
| spec, |
| spec_lengths, |
| wave, |
| wave_lengths, |
| sid, |
| ) = info |
| else: |
| phone, phone_lengths, spec, spec_lengths, wave, wave_lengths, sid = info |
| |
| if (hps.if_cache_data_in_gpu == False) and torch.cuda.is_available(): |
| phone = phone.cuda(rank, non_blocking=True) |
| phone_lengths = phone_lengths.cuda(rank, non_blocking=True) |
| if hps.if_f0 == 1: |
| pitch = pitch.cuda(rank, non_blocking=True) |
| pitchf = pitchf.cuda(rank, non_blocking=True) |
| sid = sid.cuda(rank, non_blocking=True) |
| spec = spec.cuda(rank, non_blocking=True) |
| spec_lengths = spec_lengths.cuda(rank, non_blocking=True) |
| wave = wave.cuda(rank, non_blocking=True) |
| |
|
|
| |
| with autocast(enabled=hps.train.fp16_run): |
| if hps.if_f0 == 1: |
| ( |
| y_hat, |
| ids_slice, |
| x_mask, |
| z_mask, |
| (z, z_p, m_p, logs_p, m_q, logs_q), |
| ) = net_g(phone, phone_lengths, pitch, pitchf, spec, spec_lengths, sid) |
| else: |
| ( |
| y_hat, |
| ids_slice, |
| x_mask, |
| z_mask, |
| (z, z_p, m_p, logs_p, m_q, logs_q), |
| ) = net_g(phone, phone_lengths, spec, spec_lengths, sid) |
| mel = spec_to_mel_torch( |
| spec, |
| hps.data.filter_length, |
| hps.data.n_mel_channels, |
| hps.data.sampling_rate, |
| hps.data.mel_fmin, |
| hps.data.mel_fmax, |
| ) |
| y_mel = commons.slice_segments( |
| mel, ids_slice, hps.train.segment_size // hps.data.hop_length |
| ) |
| with autocast(enabled=False): |
| y_hat_mel = mel_spectrogram_torch( |
| y_hat.float().squeeze(1), |
| hps.data.filter_length, |
| hps.data.n_mel_channels, |
| hps.data.sampling_rate, |
| hps.data.hop_length, |
| hps.data.win_length, |
| hps.data.mel_fmin, |
| hps.data.mel_fmax, |
| ) |
| if hps.train.fp16_run == True: |
| y_hat_mel = y_hat_mel.half() |
| wave = commons.slice_segments( |
| wave, ids_slice * hps.data.hop_length, hps.train.segment_size |
| ) |
|
|
| |
| y_d_hat_r, y_d_hat_g, _, _ = net_d(wave, y_hat.detach()) |
| with autocast(enabled=False): |
| loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( |
| y_d_hat_r, y_d_hat_g |
| ) |
| optim_d.zero_grad() |
| scaler.scale(loss_disc).backward() |
| scaler.unscale_(optim_d) |
| grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) |
| scaler.step(optim_d) |
|
|
| with autocast(enabled=hps.train.fp16_run): |
| |
| y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(wave, y_hat) |
| with autocast(enabled=False): |
| loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel |
| loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl |
| loss_fm = feature_loss(fmap_r, fmap_g) |
| loss_gen, losses_gen = generator_loss(y_d_hat_g) |
| loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl |
| optim_g.zero_grad() |
| scaler.scale(loss_gen_all).backward() |
| scaler.unscale_(optim_g) |
| grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) |
| scaler.step(optim_g) |
| scaler.update() |
|
|
| if rank == 0: |
| if global_step % hps.train.log_interval == 0: |
| lr = optim_g.param_groups[0]["lr"] |
| logger.info( "" |
| |
| |
| |
| ) |
| |
| if loss_mel > 75: |
| loss_mel = 75 |
| if loss_kl > 9: |
| loss_kl = 9 |
|
|
| logger.info([global_step, lr]) |
| logger.info("" |
| |
| ) |
| scalar_dict = { |
| "loss/g/total": loss_gen_all, |
| "loss/d/total": loss_disc, |
| "learning_rate": lr, |
| "grad_norm_d": grad_norm_d, |
| "grad_norm_g": grad_norm_g, |
| } |
| scalar_dict.update( |
| { |
| "loss/g/fm": loss_fm, |
| "loss/g/mel": loss_mel, |
| "loss/g/kl": loss_kl, |
| } |
| ) |
|
|
| scalar_dict.update( |
| {"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)} |
| ) |
| scalar_dict.update( |
| {"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)} |
| ) |
| scalar_dict.update( |
| {"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)} |
| ) |
| image_dict = { |
| "slice/mel_org": utils.plot_spectrogram_to_numpy( |
| y_mel[0].data.cpu().numpy() |
| ), |
| "slice/mel_gen": utils.plot_spectrogram_to_numpy( |
| y_hat_mel[0].data.cpu().numpy() |
| ), |
| "all/mel": utils.plot_spectrogram_to_numpy( |
| mel[0].data.cpu().numpy() |
| ), |
| } |
| utils.summarize( |
| writer=writer, |
| global_step=global_step, |
| images=image_dict, |
| scalars=scalar_dict, |
| ) |
| global_step += 1 |
| |
|
|
| if epoch % hps.save_every_epoch == 0 and rank == 0: |
| print(f"Saved: {hps.name}_e{epoch}_s{global_step}.pth") |
| if hps.if_latest == 0: |
| utils.save_checkpoint( |
| net_g, |
| optim_g, |
| hps.train.learning_rate, |
| epoch, |
| os.path.join(hps.model_dir, "G_{}.pth".format(global_step)), |
| ) |
| utils.save_checkpoint( |
| net_d, |
| optim_d, |
| hps.train.learning_rate, |
| epoch, |
| os.path.join(hps.model_dir, "D_{}.pth".format(global_step)), |
| ) |
| else: |
| utils.save_checkpoint( |
| net_g, |
| optim_g, |
| hps.train.learning_rate, |
| epoch, |
| os.path.join(hps.model_dir, "G_{}.pth".format(2333333)), |
| ) |
| utils.save_checkpoint( |
| net_d, |
| optim_d, |
| hps.train.learning_rate, |
| epoch, |
| os.path.join(hps.model_dir, "D_{}.pth".format(2333333)), |
| ) |
| if rank == 0 and hps.save_every_weights == "1": |
| if hasattr(net_g, "module"): |
| ckpt = net_g.module.state_dict() |
| else: |
| ckpt = net_g.state_dict() |
| logger.info( |
| "saving ckpt %s_e%s:%s" |
| % ( |
| hps.name, |
| epoch, |
| savee( |
| ckpt, |
| hps.sample_rate, |
| hps.if_f0, |
| hps.name + "_e%s_s%s" % (epoch, global_step), |
| epoch, |
| hps.version, |
| hps, |
| experiment_name, |
| ), |
| ) |
| ) |
|
|
| try: |
| with open("csvdb/stop.csv") as CSVStop: |
| csv_reader = list(csv.reader(CSVStop)) |
| stopbtn = ( |
| csv_reader[0][0] |
| if csv_reader is not None |
| else (lambda: exec('raise ValueError("No data")'))() |
| ) |
| stopbtn = ( |
| lambda stopbtn: True |
| if stopbtn.lower() == "true" |
| else (False if stopbtn.lower() == "false" else stopbtn) |
| )(stopbtn) |
| except (ValueError, TypeError, IndexError): |
| stopbtn = False |
|
|
| if stopbtn: |
| logger.info("Stop Button was pressed. The program is closed.") |
| if hasattr(net_g, "module"): |
| ckpt = net_g.module.state_dict() |
| else: |
| ckpt = net_g.state_dict() |
| logger.info( |
| "saving final ckpt:%s" |
| % ( |
| savee( |
| ckpt, |
| hps.sample_rate, |
| hps.if_f0, |
| hps.name, |
| epoch, |
| hps.version, |
| hps, |
| experiment_name, |
| ) |
| ) |
| ) |
| sleep(1) |
| with open("csvdb/stop.csv", "w+", newline="") as STOPCSVwrite: |
| csv_writer = csv.writer(STOPCSVwrite, delimiter=",") |
| csv_writer.writerow(["False"]) |
| os._exit(2333333) |
|
|
| if rank == 0: |
| logger.info('') |
| if epoch > hps.total_epoch and rank == 0: |
| logger.info("Training is done. The program is closed.") |
|
|
| if hasattr(net_g, "module"): |
| ckpt = net_g.module.state_dict() |
| else: |
| ckpt = net_g.state_dict() |
| logger.info( |
| "saving final ckpt:%s" |
| % ( |
| savee( |
| ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps, experiment_name |
| ) |
| ) |
| ) |
| sleep(1) |
| with open("csvdb/stop.csv", "w+", newline="") as STOPCSVwrite: |
| csv_writer = csv.writer(STOPCSVwrite, delimiter=",") |
| csv_writer.writerow(["False"]) |
| os._exit(2333333) |
|
|
|
|
| if __name__ == "__main__": |
| torch.multiprocessing.set_start_method("spawn") |
| main() |