refactor: vae 退化为 ae

This commit is contained in:
unanmed 2026-02-03 13:35:52 +08:00
parent df2bd80bec
commit bc73cf9cc3
4 changed files with 25 additions and 37 deletions

View File

@ -65,7 +65,12 @@ disable_tqdm = not sys.stdout.isatty()
def gt_prob(epoch: int, max_epoch: int) -> float:
progress = epoch / max_epoch
return 1
if progress < 0.2:
return 1
elif progress < 0.8:
return 1 - (progress - 0.2) / 0.6
else:
return 0
def parse_arguments():
parser = argparse.ArgumentParser(description="training codes")
@ -115,30 +120,24 @@ def train():
for epoch in tqdm(range(args.epochs), desc="VAE Training", disable=disable_tqdm):
loss_total = torch.Tensor([0]).to(device)
reco_loss_total = torch.Tensor([0]).to(device)
kl_loss_total = torch.Tensor([0]).to(device)
for batch in tqdm(dataloader, leave=False, desc="Epoch Progress", disable=disable_tqdm):
target_map = batch["target_map"].to(device)
fake_logits, mu, logvar = vae(target_map, 1 - gt_prob(epoch, args.epochs))
loss, reco_loss, kl_loss = criterion.vae_loss(fake_logits, target_map, mu, logvar, KL_BETA)
loss = criterion.vae_loss(fake_logits, target_map, mu, logvar, KL_BETA)
loss.backward()
torch.nn.utils.clip_grad_norm_(vae.parameters(), max_norm=1.0)
optimizer_ginka.step()
loss_total += loss.detach()
reco_loss_total += reco_loss.detach()
kl_loss_total += kl_loss.detach()
avg_loss = loss_total.item() / len(dataloader)
avg_reco_loss = reco_loss_total.item() / len(dataloader)
avg_kl_loss = kl_loss_total.item() / len(dataloader)
tqdm.write(
f"[Epoch {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] " +
f"E: {epoch + 1} | Loss: {avg_loss:.6f} | Reco Loss: {avg_reco_loss:.6f} | " +
f"KL Loss: {avg_kl_loss:.6f} | LR: {optimizer_ginka.param_groups[0]['lr']:.6f}"
f"E: {epoch + 1} | Loss: {avg_loss:.6f} | " +
f"LR: {optimizer_ginka.param_groups[0]['lr']:.6f}"
)
scheduler_ginka.step()
@ -153,7 +152,6 @@ def train():
val_loss_total = torch.Tensor([0]).to(device)
reco_loss_total = torch.Tensor([0]).to(device)
kl_loss_total = torch.Tensor([0]).to(device)
with torch.no_grad():
idx = 0
gap = 5
@ -163,12 +161,10 @@ def train():
for batch in tqdm(dataloader_val, desc="Validating generator.", leave=False, disable=disable_tqdm):
target_map = batch["target_map"].to(device)
fake_logits, mu, logvar = vae(target_map, 1 - gt_prob(epoch, args.epochs))
fake_logits, z = vae(target_map, 1 - gt_prob(epoch, args.epochs))
loss, reco_loss, kl_loss = criterion.vae_loss(fake_logits, target_map, mu, logvar, KL_BETA)
loss = criterion.vae_loss(fake_logits, target_map, z, KL_BETA)
val_loss_total += loss.detach()
reco_loss_total += reco_loss.detach()
kl_loss_total += kl_loss.detach()
fake_map = torch.argmax(fake_logits, dim=1).cpu().numpy()
fake_img = matrix_to_image_cv(fake_map[0], tile_dict)
@ -195,10 +191,8 @@ def train():
index2 = random.randint(0, val_length - 1)
map1 = torch.LongTensor(dataset_val.data[index1]["map"]).to(device).reshape(1, 13, 13)
map2 = torch.LongTensor(dataset_val.data[index2]["map"]).to(device).reshape(1, 13, 13)
mu1, logvar1 = vae.encoder(map1)
mu2, logvar2 = vae.encoder(map2)
z1 = vae.reparameterize(mu1, logvar1)
z2 = vae.reparameterize(mu2, logvar2)
z1 = vae.encoder(map1)
z2 = vae.encoder(map2)
real_img1 = matrix_to_image_cv(map1[0].cpu().numpy(), tile_dict)
real_img2 = matrix_to_image_cv(map2[0].cpu().numpy(), tile_dict)
i = 0
@ -213,12 +207,9 @@ def train():
i += 1
avg_loss_val = val_loss_total.item() / len(dataloader_val)
avg_reco_loss = reco_loss_total.item() / len(dataloader_val)
avg_kl_loss = kl_loss_total.item() / len(dataloader_val)
tqdm.write(
f"[Validate {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] E: {epoch + 1} | " +
f"Loss: {avg_loss_val:.6f} | Reco Loss: {avg_reco_loss:.6f} | " +
f"KL Loss: {avg_kl_loss:.6f}"
f"Loss: {avg_loss_val:.6f}"
)
print("Train ended.")

View File

@ -91,6 +91,9 @@ class VAEEncoder(nn.Module):
self.embedding = EncoderEmbedding(tile_classes, width, height, 128, 256)
self.rnn = EncoderGRU(256, self.rnn_hidden, self.logits_dim)
self.fusion = EncoderFusion(256)
self.fc = nn.Sequential(
nn.Linear(512, latent_dim)
)
self.fc_mu = nn.Linear(512, latent_dim)
self.fc_logvar = nn.Linear(512, latent_dim)
@ -118,9 +121,8 @@ class VAEEncoder(nn.Module):
output[:, idx] = logits
h = self.fusion(output)
mu = self.fc_mu(h)
logvar = self.fc_logvar(h)
return mu, logvar
vec = self.fc(h)
return vec
if __name__ == "__main__":
device = torch.device("cpu")

View File

@ -5,13 +5,9 @@ class VAELoss:
def __init__(self):
self.num_classes = 32
def vae_loss(self, logits, target, mu, logvar, beta=0.1):
def vae_loss(self, logits, target):
# target: [B, 13, 13]
target = F.one_hot(target, num_classes=self.num_classes).float().permute(0, 3, 1, 2)
recon_loss = F.cross_entropy(logits, target)
kl_loss = -0.5 * torch.mean(
1 + logvar - mu.pow(2) - logvar.exp()
)
return recon_loss + beta * kl_loss, recon_loss, kl_loss
return recon_loss

View File

@ -18,10 +18,9 @@ class GinkaVAE(nn.Module):
return mu + eps * std
def forward(self, target_map: torch.Tensor, use_self_probility=0):
mu, logvar = self.encoder(target_map)
z = self.reparameterize(mu, logvar)
z = self.encoder(target_map)
logits = self.decoder(z, target_map, use_self_probility)
return logits, mu, logvar
return logits, z
if __name__ == "__main__":
device = torch.device("cpu")
@ -35,13 +34,13 @@ if __name__ == "__main__":
# 前向传播
start = time.perf_counter()
logits, mu, logvar = model(input)
logits, z = model(input)
end = time.perf_counter()
print_memory("前向传播后")
print(f"推理耗时: {end - start}")
print(f"输出形状: logits= {logits.shape}, mu={mu.shape}, logvar={logvar.shape}")
print(f"输出形状: logits= {logits.shape}, z={z.shape}")
print(f"Encoder parameters: {sum(p.numel() for p in model.encoder.parameters())}")
print(f"Decoder parameters: {sum(p.numel() for p in model.decoder.parameters())}")
print(f"Total parameters: {sum(p.numel() for p in model.parameters())}")