mirror of
https://github.com/unanmed/ginka-generator.git
synced 2026-05-17 06:51:11 +08:00
feat: heatmap 生成模型
This commit is contained in:
parent
36a4faff4e
commit
b4f49e702d
@ -77,4 +77,70 @@ class GinkaMaskGITDataset(Dataset):
|
||||
"target_map": target,
|
||||
"heatmap": heatmap
|
||||
}
|
||||
|
||||
|
||||
class GinkaHeatmapDataset(Dataset):
|
||||
def __init__(self, data_path: str, min_mask=0, max_mask=0.8, blur_min=3, blur_max=6):
|
||||
self.data = load_data(data_path)
|
||||
self.blur_min = blur_min
|
||||
self.blur_max = blur_max
|
||||
self.min_mask = min_mask
|
||||
self.max_mask = max_mask
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
item = self.data[idx]
|
||||
|
||||
heatmap = np.array(item['heatmap'], dtype=np.float32)
|
||||
|
||||
# 数据增强
|
||||
if np.random.rand() > 0.5:
|
||||
k = np.random.randint(0, 4)
|
||||
for i in range(0, heatmap.shape[0]):
|
||||
heatmap[i] = np.rot90(heatmap[i], k)
|
||||
|
||||
if np.random.rand() > 0.5:
|
||||
for i in range(0, heatmap.shape[0]):
|
||||
heatmap[i] = np.fliplr(heatmap[i])
|
||||
|
||||
if np.random.rand() > 0.5:
|
||||
for i in range(0, heatmap.shape[0]):
|
||||
heatmap[i] = np.flipud(heatmap[i])
|
||||
|
||||
target = heatmap.copy()
|
||||
|
||||
if random.random() < 0.5:
|
||||
size = random.randint(self.blur_min, self.blur_max)
|
||||
if size % 2 == 0:
|
||||
size = size + 1 if random.random() < 0.5 else size - 1
|
||||
target = cv2.GaussianBlur(target, (size, size), 0)
|
||||
else:
|
||||
sizeX = random.randint(self.blur_min, self.blur_max)
|
||||
sizeY = random.randint(self.blur_min, self.blur_max)
|
||||
if sizeX % 2 == 0:
|
||||
sizeX = sizeX + 1 if random.random() < 0.5 else sizeX - 1
|
||||
if sizeY % 2 == 0:
|
||||
sizeY = sizeY + 1 if random.random() < 0.5 else sizeY - 1
|
||||
target = cv2.GaussianBlur(target, (sizeX, sizeY), 0)
|
||||
|
||||
target = torch.FloatTensor(target) # [heatmap_channel, H, W]
|
||||
cond = torch.FloatTensor(heatmap) # [heatmap_channel, H, W]
|
||||
C, H, W = target.shape
|
||||
|
||||
for i in range(C):
|
||||
total = H * W
|
||||
ratio = np.random.random() * (self.max_mask - self.min_mask) + self.min_mask
|
||||
num = int(total * ratio)
|
||||
|
||||
idx = np.random.choice(total, num, replace=False)
|
||||
|
||||
mask = np.zeros(total, dtype=bool)
|
||||
mask[idx] = True
|
||||
mask = mask.reshape(H, W)
|
||||
cond[i, mask] = 0
|
||||
|
||||
return {
|
||||
"target_heatmap": heatmap,
|
||||
"cond_heatmap": cond
|
||||
}
|
||||
49
ginka/heatmap/cond.py
Normal file
49
ginka/heatmap/cond.py
Normal file
@ -0,0 +1,49 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
class HeatmapCond(nn.Module):
|
||||
def __init__(self, T=100, embed_dim=128, heatmap_dim=8, output_dim=128):
|
||||
super().__init__()
|
||||
self.time_embedding = nn.Embedding(T, embed_dim)
|
||||
self.conv1 = nn.Sequential(
|
||||
nn.Conv2d(heatmap_dim, output_dim // 4, 3, padding=1, padding_mode='replicate'),
|
||||
nn.BatchNorm2d(output_dim // 4),
|
||||
nn.GELU()
|
||||
)
|
||||
self.conv2 = nn.Sequential(
|
||||
nn.Conv2d(output_dim // 4, output_dim // 2, 3, padding=1, padding_mode='replicate'),
|
||||
nn.BatchNorm2d(output_dim // 2),
|
||||
nn.GELU()
|
||||
)
|
||||
self.conv3 = nn.Sequential(
|
||||
nn.Conv2d(output_dim // 2, output_dim, 3, padding=1, padding_mode='replicate')
|
||||
)
|
||||
|
||||
self.fc1 = nn.Sequential(
|
||||
nn.Linear(embed_dim, output_dim // 4),
|
||||
nn.Dropout(0.3),
|
||||
nn.LayerNorm(output_dim // 4),
|
||||
nn.GELU()
|
||||
)
|
||||
self.fc2 = nn.Sequential(
|
||||
nn.Linear(embed_dim, output_dim // 2),
|
||||
nn.Dropout(0.3),
|
||||
nn.LayerNorm(output_dim // 2),
|
||||
nn.GELU()
|
||||
)
|
||||
self.fc3 = nn.Sequential(
|
||||
nn.Linear(embed_dim, output_dim),
|
||||
nn.Dropout(0.3),
|
||||
nn.LayerNorm(output_dim),
|
||||
nn.GELU()
|
||||
)
|
||||
|
||||
def forward(self, heatmap: torch.Tensor, t: torch.Tensor):
|
||||
# heatmap: [B, C, H, W]
|
||||
# t: [B, 1]
|
||||
t_embed = self.time_embedding(t)
|
||||
x = self.conv1(heatmap) + self.fc1(t_embed).unsqueeze(1).unsqueeze(1).permute(0, 3, 1, 2)
|
||||
x = self.conv2(x) + self.fc2(t_embed).unsqueeze(1).unsqueeze(1).permute(0, 3, 1, 2)
|
||||
x = self.conv3(x) + self.fc3(t_embed).unsqueeze(1).unsqueeze(1).permute(0, 3, 1, 2)
|
||||
return x
|
||||
|
||||
50
ginka/heatmap/diffusion.py
Normal file
50
ginka/heatmap/diffusion.py
Normal file
@ -0,0 +1,50 @@
|
||||
import math
|
||||
import torch
|
||||
|
||||
class Diffusion:
|
||||
def __init__(self, device, T=100):
|
||||
self.T = T
|
||||
self.device = device
|
||||
|
||||
# cosine schedule(推荐)
|
||||
steps = torch.arange(T + 1, dtype=torch.float32)
|
||||
s = 0.008
|
||||
f = torch.cos(((steps / T) + s) / (1 + s) * math.pi * 0.5) ** 2
|
||||
alpha_bar = f / f[0]
|
||||
|
||||
self.alpha_bar = alpha_bar.to(device)
|
||||
self.sqrt_ab = torch.sqrt(self.alpha_bar)
|
||||
self.sqrt_one_minus_ab = torch.sqrt(1 - self.alpha_bar)
|
||||
|
||||
def q_sample(self, x0, t, noise):
|
||||
"""
|
||||
前向加噪
|
||||
"""
|
||||
return (
|
||||
self.sqrt_ab[t][:, None, None, None] * x0
|
||||
+ self.sqrt_one_minus_ab[t][:, None, None, None] * noise
|
||||
)
|
||||
|
||||
def sample(self, model, cond: torch.Tensor, steps=20):
|
||||
B = cond.shape[0]
|
||||
x = torch.randn_like(cond).to(cond.device)
|
||||
|
||||
step_size = self.T // steps
|
||||
|
||||
for i in reversed(range(0, self.T, step_size)):
|
||||
t = torch.full((B,), i, device=cond.device)
|
||||
|
||||
pred_noise = model(x, cond, t)
|
||||
|
||||
alpha = self.alpha_bar[i]
|
||||
alpha_prev = self.alpha_bar[max(i - step_size, 0)]
|
||||
|
||||
x0_pred = (x - torch.sqrt(1 - alpha) * pred_noise) / torch.sqrt(alpha)
|
||||
|
||||
x = (
|
||||
torch.sqrt(alpha_prev) * x0_pred
|
||||
+ torch.sqrt(1 - alpha_prev) * pred_noise
|
||||
)
|
||||
|
||||
return x
|
||||
|
||||
64
ginka/heatmap/model.py
Normal file
64
ginka/heatmap/model.py
Normal file
@ -0,0 +1,64 @@
|
||||
import time
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from .cond import HeatmapCond
|
||||
from ..maskGIT.maskGIT import MaskGIT
|
||||
from ..utils import print_memory
|
||||
|
||||
class GinkaHeatmapModel(nn.Module):
|
||||
def __init__(
|
||||
self, T=100, embed_dim=128, heatmap_dim=8, d_model=128, dim_ff=512, nhead=8,
|
||||
num_layers=4, map_size=13*13
|
||||
):
|
||||
super().__init__()
|
||||
self.heatmap_dim = heatmap_dim
|
||||
self.pos_embedding = nn.Parameter(torch.randn(1, map_size, d_model))
|
||||
self.cond = HeatmapCond(T, embed_dim=embed_dim, heatmap_dim=heatmap_dim, output_dim=d_model)
|
||||
self.input = HeatmapCond(T, embed_dim=embed_dim, heatmap_dim=heatmap_dim, output_dim=d_model)
|
||||
self.transformer = MaskGIT(d_model=d_model, dim_ff=dim_ff, nhead=nhead, num_layers=num_layers)
|
||||
self.output_fc = nn.Sequential(
|
||||
nn.Linear(d_model, heatmap_dim),
|
||||
nn.Sigmoid()
|
||||
)
|
||||
|
||||
def forward(self, input: torch.Tensor, cond: torch.Tensor, t: torch.Tensor):
|
||||
# input: [B, heatmap_dim, H, W] 噪声
|
||||
# cond: [B, heatmap_dim, H, W] 点图
|
||||
# t: [B, 1]
|
||||
input = self.input(input, t) # [B, d_model, H, W]
|
||||
cond = self.cond(cond, t) # [B, d_model, H, W]
|
||||
hidden = input + cond
|
||||
B, C, H, W = hidden.shape
|
||||
hidden = hidden.view(B, C, H * W).permute(0, 2, 1) # [B, H * W, d_model]
|
||||
hidden = hidden + self.pos_embedding
|
||||
hidden = self.transformer(hidden) # [B, H * W, d_model]
|
||||
output = self.output_fc(hidden)
|
||||
return output.view(B, self.heatmap_dim, H, W)
|
||||
|
||||
if __name__ == "__main__":
|
||||
device = torch.device("cpu")
|
||||
|
||||
input = torch.randn(1, 9, 13, 13).to(device)
|
||||
cond = torch.randint(0, 1, [1, 9, 13, 13]).to(device)
|
||||
t = torch.randint(0, 100, [1, 1]).to(device)
|
||||
|
||||
# 初始化模型
|
||||
model = GinkaHeatmapModel(heatmap_dim=9).to(device)
|
||||
|
||||
print_memory("初始化后")
|
||||
|
||||
# 前向传播
|
||||
start = time.perf_counter()
|
||||
output = model(input, cond.float(), t)
|
||||
end = time.perf_counter()
|
||||
|
||||
print_memory("前向传播后")
|
||||
|
||||
print(f"推理耗时: {end - start}")
|
||||
print(f"输出形状: output={output.shape}")
|
||||
print(f"Tile Embedding parameters: {sum(p.numel() for p in model.cond.parameters())}")
|
||||
print(f"Condition Encoder parameters: {sum(p.numel() for p in model.input.parameters())}")
|
||||
print(f"MaskGIT parameters: {sum(p.numel() for p in model.transformer.parameters())}")
|
||||
print(f"Output parameters: {sum(p.numel() for p in model.output_fc.parameters())}")
|
||||
print(f"Total parameters: {sum(p.numel() for p in model.parameters())}")
|
||||
|
||||
@ -4,17 +4,8 @@ import torch.nn as nn
|
||||
from ..utils import print_memory
|
||||
|
||||
class GinkaMaskGITCond(nn.Module):
|
||||
def __init__(self, cond_dim=16, heatmap_channel=4, output_dim=256):
|
||||
def __init__(self, heatmap_channel=4, output_dim=256):
|
||||
super().__init__()
|
||||
self.cond_fc = nn.Sequential(
|
||||
nn.Linear(cond_dim, output_dim // 2),
|
||||
nn.Dropout(0.3),
|
||||
nn.LayerNorm(output_dim // 2),
|
||||
nn.GELU(),
|
||||
|
||||
nn.Linear(output_dim // 2, output_dim)
|
||||
)
|
||||
|
||||
self.heatmap_conv = nn.Sequential(
|
||||
nn.Conv2d(heatmap_channel, output_dim // 4, kernel_size=3, padding=1, padding_mode='replicate'),
|
||||
nn.BatchNorm2d(output_dim // 4),
|
||||
@ -24,20 +15,19 @@ class GinkaMaskGITCond(nn.Module):
|
||||
nn.BatchNorm2d(output_dim // 2),
|
||||
nn.GELU(),
|
||||
|
||||
nn.Conv2d(output_dim // 2, output_dim, kernel_size=3, padding=1, padding_mode='replicate')
|
||||
nn.Conv2d(output_dim // 2, output_dim, kernel_size=3, padding=1, padding_mode='replicate'),
|
||||
nn.BatchNorm2d(output_dim),
|
||||
nn.GELU()
|
||||
)
|
||||
|
||||
def forward(self, cond, heatmap):
|
||||
# cond: [B, cond_dim]
|
||||
def forward(self, heatmap):
|
||||
# heatmap: [B, C, H, W]
|
||||
cond = self.cond_fc(cond)
|
||||
heatmap = self.heatmap_conv(heatmap)
|
||||
return cond, heatmap
|
||||
return heatmap
|
||||
|
||||
if __name__ == "__main__":
|
||||
device = torch.device("cpu")
|
||||
|
||||
cond = torch.rand(1, 16).to(device)
|
||||
heatmap = torch.rand(1, 4, 13, 13).to(device)
|
||||
|
||||
# 初始化模型
|
||||
@ -47,7 +37,7 @@ if __name__ == "__main__":
|
||||
|
||||
# 前向传播
|
||||
start = time.perf_counter()
|
||||
cond, heatmap = model(cond, heatmap)
|
||||
cond, heatmap = model(heatmap)
|
||||
end = time.perf_counter()
|
||||
|
||||
print_memory("前向传播后")
|
||||
|
||||
@ -7,15 +7,15 @@ from .maskGIT import MaskGIT
|
||||
|
||||
class GinkaMaskGIT(nn.Module):
|
||||
def __init__(
|
||||
self, num_classes=16, cond_dim=16, heatmap_channel=4, d_model=256,
|
||||
self, num_classes=16, heatmap_channel=4, d_model=256,
|
||||
dim_ff=512, nhead=8, num_layers=4, map_size=13*13
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.tile_embedding = nn.Embedding(num_classes, d_model)
|
||||
self.pos_embedding = nn.Parameter(torch.randn(1, map_size + 1, d_model))
|
||||
self.pos_embedding = nn.Parameter(torch.randn(1, map_size, d_model))
|
||||
|
||||
self.cond_encoder = GinkaMaskGITCond(cond_dim=cond_dim, heatmap_channel=heatmap_channel, output_dim=d_model)
|
||||
self.cond_encoder = GinkaMaskGITCond(heatmap_channel=heatmap_channel, output_dim=d_model)
|
||||
|
||||
self.transformer = MaskGIT(d_model=d_model, dim_ff=dim_ff, nhead=nhead, num_layers=num_layers)
|
||||
|
||||
@ -23,12 +23,11 @@ class GinkaMaskGIT(nn.Module):
|
||||
nn.Linear(d_model, num_classes)
|
||||
)
|
||||
|
||||
def forward(self, map: torch.Tensor, cond: torch.Tensor, heatmap: torch.Tensor):
|
||||
def forward(self, map: torch.Tensor, heatmap: torch.Tensor):
|
||||
# map: [B, H * W]
|
||||
# cond: [B, cond_dim]
|
||||
# heatmap: [B, C, H, W]
|
||||
# output: [B, H * W, num_classes]
|
||||
cond, heatmap = self.cond_encoder(cond, heatmap)
|
||||
heatmap = self.cond_encoder(heatmap)
|
||||
# cond: [B, d_model]
|
||||
# heatmap: [B, d_model, H, W]
|
||||
|
||||
@ -36,18 +35,17 @@ class GinkaMaskGIT(nn.Module):
|
||||
|
||||
heatmap = heatmap.view(B, C, H * W).permute(0, 2, 1)
|
||||
x = self.tile_embedding(map) + heatmap
|
||||
x = torch.cat([cond.unsqueeze(1), x], dim=1) + self.pos_embedding
|
||||
x = x + self.pos_embedding
|
||||
x = self.transformer(x)
|
||||
|
||||
logits = self.output_fc(x)
|
||||
|
||||
return logits[:, :-1, :]
|
||||
return logits
|
||||
|
||||
if __name__ == "__main__":
|
||||
device = torch.device("cpu")
|
||||
|
||||
map = torch.randint(0, 16, [1, 169]).to(device)
|
||||
cond = torch.rand(1, 16).to(device)
|
||||
heatmap = torch.rand(1, 4, 13, 13).to(device)
|
||||
|
||||
# 初始化模型
|
||||
@ -57,7 +55,7 @@ if __name__ == "__main__":
|
||||
|
||||
# 前向传播
|
||||
start = time.perf_counter()
|
||||
output = model(map, cond, heatmap)
|
||||
output = model(map, heatmap)
|
||||
end = time.perf_counter()
|
||||
|
||||
print_memory("前向传播后")
|
||||
|
||||
251
ginka/train_heatmap.py
Normal file
251
ginka/train_heatmap.py
Normal file
@ -0,0 +1,251 @@
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import math
|
||||
from datetime import datetime
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
import cv2
|
||||
import numpy as np
|
||||
from perlin_numpy import generate_fractal_noise_2d
|
||||
from tqdm import tqdm
|
||||
from torch.utils.data import DataLoader
|
||||
from .maskGIT.model import GinkaMaskGIT
|
||||
from .dataset import GinkaHeatmapDataset
|
||||
from shared.image import matrix_to_image_cv
|
||||
from .heatmap.model import GinkaHeatmapModel
|
||||
from .heatmap.diffusion import Diffusion
|
||||
from .utils import nms_sampling
|
||||
|
||||
# 图块定义:
|
||||
# 0. 空地, 1. 墙壁, 2. 门, 3. 钥匙, 4. 红宝石, 5. 蓝宝石, 6. 绿宝石, 7. 血瓶
|
||||
# 8. 道具, 9. 怪物, 10. 入口, 15. 掩码 token
|
||||
|
||||
# 热力图定义
|
||||
# 0. 墙壁热力图, 1. 怪物热力图, 2. 资源热力图, 3. 血瓶热力图, 4. 宝石热力图, 5. 钥匙热力图
|
||||
# 6. 道具热力图, 7. 入口热力图, 8. 门热力图
|
||||
|
||||
BATCH_SIZE = 8
|
||||
VAL_BATCH_DIVIDER = 8
|
||||
NUM_CLASSES = 16
|
||||
MASK_TOKEN = 15
|
||||
GENERATE_STEP = 8
|
||||
MAP_W = 13
|
||||
MAP_H = 13
|
||||
HEATMAP_CHANNEL = 9
|
||||
LABEL_SMOOTHING = 0
|
||||
BLUR_MIN_SIZE = 3
|
||||
BLUR_MAX_SIZE = 9
|
||||
RAND_RATIO = 0.15
|
||||
# MaskGIT 生成设置
|
||||
USE_MASK_GIT_PREVIEW = True
|
||||
NUM_LAYERS = 4
|
||||
D_MODEL = 128
|
||||
# Diffusion 生成设置
|
||||
NUM_LAYERS_DIFFUSION = 4
|
||||
D_MODEL_DIFFUSION = 128
|
||||
T_DIFFUSION = 100
|
||||
MIN_MASK = 0
|
||||
MAX_MASK = 0.8
|
||||
NOISE_SAMPLING_K = [40, 15, 21, 8, 8, 4, 1, 2, 10]
|
||||
|
||||
device = torch.device(
|
||||
"cuda:1" if torch.cuda.is_available()
|
||||
else "mps" if torch.mps.is_available()
|
||||
else "cpu"
|
||||
)
|
||||
os.makedirs("result", exist_ok=True)
|
||||
os.makedirs("result/heatmap", exist_ok=True)
|
||||
os.makedirs("result/final_img", exist_ok=True)
|
||||
|
||||
disable_tqdm = not sys.stdout.isatty()
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(description="training codes")
|
||||
parser.add_argument("--resume", type=bool, default=False)
|
||||
parser.add_argument("--state_ginka", type=str, default="result/heatmap/ginka-100.pth")
|
||||
parser.add_argument("--train", type=str, default="ginka-dataset.json")
|
||||
parser.add_argument("--validate", type=str, default="ginka-eval.json")
|
||||
parser.add_argument("--epochs", type=int, default=100)
|
||||
parser.add_argument("--checkpoint", type=int, default=5)
|
||||
parser.add_argument("--load_optim", type=bool, default=True)
|
||||
parser.add_argument("--use_maskgit", type=bool, default=True)
|
||||
parser.add_argument("--maskgit_path", type=str, default="result/ginka_transformer.pth")
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
def train():
|
||||
print(f"Using {device.type} to train model.")
|
||||
|
||||
args = parse_arguments()
|
||||
|
||||
if args.use_maskgit:
|
||||
maskGIT = GinkaMaskGIT(
|
||||
num_classes=NUM_CLASSES, heatmap_channel=HEATMAP_CHANNEL,
|
||||
num_layers=NUM_LAYERS, d_model=D_MODEL
|
||||
).to(device)
|
||||
model = GinkaHeatmapModel(
|
||||
T=T_DIFFUSION, heatmap_dim=HEATMAP_CHANNEL, d_model=D_MODEL_DIFFUSION,
|
||||
num_layers=NUM_LAYERS_DIFFUSION
|
||||
).to(device)
|
||||
|
||||
diffusion = Diffusion(device)
|
||||
|
||||
dataset = GinkaHeatmapDataset(args.train, min_mask=MIN_MASK, max_mask=MAX_MASK)
|
||||
dataset_val = GinkaHeatmapDataset(args.validate, min_mask=MIN_MASK, max_mask=MAX_MASK)
|
||||
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
|
||||
dataloader_val = DataLoader(dataset_val, batch_size=BATCH_SIZE // VAL_BATCH_DIVIDER, shuffle=True)
|
||||
|
||||
optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-2)
|
||||
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-6)
|
||||
|
||||
# 用于生成图片
|
||||
tile_dict = dict()
|
||||
for file in os.listdir('tiles'):
|
||||
name = os.path.splitext(file)[0]
|
||||
tile_dict[name] = cv2.imread(f"tiles/{file}", cv2.IMREAD_UNCHANGED)
|
||||
|
||||
# 接续训练
|
||||
if args.resume:
|
||||
data_ginka = torch.load(args.state_ginka, map_location=device)
|
||||
|
||||
model.load_state_dict(data_ginka["model_state"], strict=False)
|
||||
|
||||
if args.load_optim:
|
||||
if data_ginka.get("optim_state") is not None:
|
||||
optimizer.load_state_dict(data_ginka["optim_state"])
|
||||
|
||||
print("Train from loaded state.")
|
||||
|
||||
if args.use_maskgit:
|
||||
data_maskGIT = torch.load(args.maskgit_path, map_location=device)
|
||||
maskGIT.load_state_dict(data_maskGIT["model_state"])
|
||||
print("Loaded MaskGIT model state.")
|
||||
|
||||
for epoch in tqdm(range(args.epochs), desc="Diffusion Training", disable=disable_tqdm):
|
||||
loss_total = torch.Tensor([0]).to(device)
|
||||
|
||||
for batch in tqdm(dataloader, leave=False, desc="Epoch Progress", disable=disable_tqdm):
|
||||
cond_heatmap = batch["cond_heatmap"].to(device)
|
||||
target_heatmap = batch["target_heatmap"].to(device)
|
||||
B, C, H, W = target_heatmap.shape
|
||||
|
||||
t = torch.randint(1, T_DIFFUSION, (B,), device=device)
|
||||
noise = torch.randn_like(target_heatmap)
|
||||
|
||||
x_t = diffusion.q_sample(target_heatmap, t, noise)
|
||||
|
||||
pred_noise = model(x_t, cond_heatmap, t)
|
||||
|
||||
loss = F.mse_loss(pred_noise, noise)
|
||||
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
loss_total += loss.detach()
|
||||
|
||||
scheduler.step()
|
||||
|
||||
avg_loss = loss_total.item() / len(dataloader)
|
||||
tqdm.write(
|
||||
f"[Epoch {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] " +
|
||||
f"E: {epoch + 1} | Loss: {avg_loss:.6f} | " +
|
||||
f"LR: {scheduler.get_last_lr()[0]:.6f}"
|
||||
)
|
||||
|
||||
# 每若干轮输出一次图片,并保存检查点
|
||||
if (epoch + 1) % args.checkpoint == 0:
|
||||
# 保存检查点
|
||||
torch.save({
|
||||
"model_state": model.state_dict(),
|
||||
"optim_state": optimizer.state_dict(),
|
||||
}, f"result/heatmap/ginka-{epoch + 1}.pth")
|
||||
|
||||
val_loss_total = torch.Tensor([0]).to(device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
idx = 0
|
||||
for batch in tqdm(dataloader_val, desc="Validating", leave=False, disable=disable_tqdm):
|
||||
# 1. 验证集验证
|
||||
cond_heatmap = batch["cond_heatmap"].to(device)
|
||||
target_heatmap = batch["target_heatmap"].to(device)
|
||||
B, C, H, W = target_heatmap.shape
|
||||
|
||||
t = torch.randint(1, T_DIFFUSION, [B], device=device)
|
||||
noise = torch.randn_like(target_heatmap)
|
||||
|
||||
x_t = diffusion.q_sample(target_heatmap, t, noise)
|
||||
|
||||
pred_noise = model(x_t, cond_heatmap, t)
|
||||
|
||||
loss = F.mse_loss(pred_noise, noise)
|
||||
|
||||
val_loss_total += loss.detach()
|
||||
|
||||
# 2. 从头完整生成,并使用训练好的 MaskGIT 生成地图
|
||||
if args.use_maskgit:
|
||||
fake_heatmap = diffusion.sample(model, cond_heatmap)
|
||||
map = maskGIT_generate(maskGIT, B, fake_heatmap)
|
||||
|
||||
generated_img = matrix_to_image_cv(map.view(B, H, W)[0].cpu().numpy(), tile_dict)
|
||||
cv2.imwrite(f"result/final_img/{idx}.png", generated_img)
|
||||
|
||||
# 3. 完全随机生成五张图
|
||||
if args.use_maskgit:
|
||||
for i in range(0, 5):
|
||||
ar = np.ndarray([1, HEATMAP_CHANNEL, MAP_H, MAP_W])
|
||||
for c in range(0, HEATMAP_CHANNEL):
|
||||
noise = generate_fractal_noise_2d((16, 16), (4, 4), 1)[0:MAP_H,0:MAP_W]
|
||||
ar[0,c] = nms_sampling(noise, NOISE_SAMPLING_K[c])
|
||||
|
||||
fake_heatmap = diffusion.sample(model, torch.FloatTensor(ar).to(device))
|
||||
map = maskGIT_generate(maskGIT, B, fake_heatmap)
|
||||
generated_img = matrix_to_image_cv(map.view(1, H, W)[0].cpu().numpy(), tile_dict)
|
||||
cv2.imwrite(f"result/final_img/g-{i}.png", generated_img)
|
||||
|
||||
avg_loss_val = val_loss_total.item() / len(dataloader_val)
|
||||
tqdm.write(
|
||||
f"[Validate {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] E: {epoch + 1} | " +
|
||||
f"Loss: {avg_loss_val:.6f}"
|
||||
)
|
||||
|
||||
print("Train ended.")
|
||||
torch.save({
|
||||
"model_state": maskGIT.state_dict(),
|
||||
}, f"result/ginka_heatmap.pth")
|
||||
|
||||
def maskGIT_generate(maskGIT, B: int, heatmap: torch.Tensor):
|
||||
map = torch.full((B, MAP_H * MAP_W), MASK_TOKEN).to(device)
|
||||
for i in range(GENERATE_STEP):
|
||||
# 1. 预测
|
||||
logits = maskGIT(map, heatmap) # [1, H * W, num_classes]
|
||||
probs = F.softmax(logits, dim=-1)
|
||||
|
||||
# 2. 采样(为了多样性,这里可以使用概率采样而不是取最大值)
|
||||
dist = torch.distributions.Categorical(probs)
|
||||
sampled_tiles = dist.sample() # [1, H * W]
|
||||
|
||||
# 3. 计算置信度 (模型对采样结果的信心程度)
|
||||
confidences = torch.gather(probs, -1, sampled_tiles.unsqueeze(-1)).squeeze(-1)
|
||||
|
||||
# 4. 决定本轮要固定多少个格子 (上凸函数逻辑)
|
||||
ratio = math.cos(((i + 1) / GENERATE_STEP) * math.pi / 2)
|
||||
num_to_mask = math.floor(ratio * MAP_H * MAP_W)
|
||||
|
||||
# 5. 更新画布:保留置信度最高的部分,其余位置设回 MASK
|
||||
# 注意:这里逻辑上通常是保留当前步预测中置信度最高的,并结合已有的非 mask 部分
|
||||
if num_to_mask > 0:
|
||||
_, mask_indices = torch.topk(confidences, k=num_to_mask, largest=False)
|
||||
sampled_tiles = sampled_tiles.scatter(1, mask_indices, MASK_TOKEN)
|
||||
|
||||
map = sampled_tiles
|
||||
if (map == MASK_TOKEN).sum() == 0:
|
||||
break
|
||||
|
||||
return map
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
torch.set_num_threads(4)
|
||||
train()
|
||||
@ -113,7 +113,6 @@ def train():
|
||||
|
||||
for batch in tqdm(dataloader, leave=False, desc="Epoch Progress", disable=disable_tqdm):
|
||||
target_map = batch["target_map"].to(device)
|
||||
cond = batch["cond"].to(device)
|
||||
heatmap = batch["heatmap"].to(device)
|
||||
B, H, W = target_map.shape
|
||||
|
||||
@ -129,7 +128,7 @@ def train():
|
||||
masked_input = target_map.clone()
|
||||
masked_input[mask] = MASK_TOKEN # 填充为 [MASK] 标记
|
||||
|
||||
logits = model(masked_input, cond, heatmap)
|
||||
logits = model(masked_input, heatmap)
|
||||
|
||||
loss = F.cross_entropy(logits.permute(0, 2, 1), target_map, reduction='none', label_smoothing=LABEL_SMOOTHING)
|
||||
loss = (loss * mask).sum() / (mask.sum() + 1e-6)
|
||||
@ -166,7 +165,6 @@ def train():
|
||||
for batch in tqdm(dataloader_val, desc="Validating", leave=False, disable=disable_tqdm):
|
||||
# 1. 常规生成
|
||||
target_map = batch["target_map"].to(device)
|
||||
cond = batch["cond"].to(device)
|
||||
heatmap = batch["heatmap"].to(device)
|
||||
B, H, W = target_map.shape
|
||||
target_map = target_map.view(B, H * W)
|
||||
@ -181,7 +179,7 @@ def train():
|
||||
masked_input = target_map.clone()
|
||||
masked_input[mask] = MASK_TOKEN # 填充为 [MASK] 标记
|
||||
|
||||
logits = model(masked_input, cond, heatmap)
|
||||
logits = model(masked_input, heatmap)
|
||||
|
||||
loss = F.cross_entropy(logits.permute(0, 2, 1), target_map, reduction='none', label_smoothing=LABEL_SMOOTHING)
|
||||
loss = (loss * mask).sum() / (mask.sum() + 1e-6)
|
||||
@ -201,7 +199,7 @@ def train():
|
||||
map = torch.full((B, MAP_SIZE), MASK_TOKEN).to(device)
|
||||
for i in range(GENERATE_STEP):
|
||||
# 1. 预测
|
||||
logits = model(map, cond, heatmap) # [1, H * W, num_classes]
|
||||
logits = model(map, heatmap) # [1, H * W, num_classes]
|
||||
probs = F.softmax(logits, dim=-1)
|
||||
|
||||
# 2. 采样(为了多样性,这里可以使用概率采样而不是取最大值)
|
||||
|
||||
@ -1,7 +1,34 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
def print_memory(device, tag=""):
|
||||
if torch.cuda.is_available():
|
||||
print(f"{tag} | 当前显存: {torch.cuda.memory_allocated(device) / 1024**2:.2f} MB, 最大显存: {torch.cuda.max_memory_allocated(device) / 1024**2:.2f} MB")
|
||||
else:
|
||||
print("当前设备不支持 cuda.")
|
||||
print("当前设备不支持 cuda.")
|
||||
|
||||
def nms_sampling(noise: np.ndarray, k: int, radius=2):
|
||||
# noise: [H, W]
|
||||
noise = noise.copy()
|
||||
points = []
|
||||
|
||||
for _ in range(k):
|
||||
idx = np.argmax(noise)
|
||||
x, y = np.unravel_index(idx, noise.shape)
|
||||
|
||||
points.append((x, y))
|
||||
|
||||
# 抑制周围
|
||||
x0 = max(0, x - radius)
|
||||
x1 = min(noise.shape[0], x + radius + 1)
|
||||
y0 = max(0, y - radius)
|
||||
y1 = min(noise.shape[1], y + radius + 1)
|
||||
|
||||
noise[x0:x1, y0:y1] = -np.inf
|
||||
|
||||
result = np.zeros_like(noise)
|
||||
for x, y in points:
|
||||
result[y, x] = 1
|
||||
|
||||
return result
|
||||
|
||||
Loading…
Reference in New Issue
Block a user