feat: maskGIT

This commit is contained in:
unanmed 2026-03-10 23:06:23 +08:00
parent a119cbb155
commit d312fb65b7
9 changed files with 341 additions and 9 deletions

View File

@ -232,8 +232,6 @@ class GinkaRNNDataset(Dataset):
H, W = target.shape
tag_cond = torch.FloatTensor(item['tag'])
val_cond = torch.FloatTensor(item['val'])
val_cond[9] = val_cond[9] / H / W
val_cond[10] = val_cond[10] / H / W
return {
"tag_cond": tag_cond,

238
ginka/train_maskGIT.py Normal file
View File

@ -0,0 +1,238 @@
import argparse
import os
import sys
import random
import math
from datetime import datetime
import torch
import torch.nn.functional as F
import torch.optim as optim
import cv2
import numpy as np
from torch_geometric.loader import DataLoader
from tqdm import tqdm
from .transformer.maskGIT import GinkaMaskGIT
from .vae_rnn.loss import VAELoss
from .vae_rnn.scheduler import VAEScheduler
from .dataset import GinkaRNNDataset
from shared.image import matrix_to_image_cv
# 手工标注标签定义(暂时不用):
# 0. 蓝海, 1. 红海, 2: 室内, 3. 野外, 4. 左右对称, 5. 上下对称, 6. 伪对称, 7. 咸鱼层,
# 8. 剧情层, 9. 水层, 10. 爽塔, 11. Boss层, 12. 纯Boss层, 13. 多房间, 14. 多走廊, 15. 道具风
# 16. 区域入口, 17. 区域连接, 18. 有机关门, 19. 道具层, 20. 斜向对称, 21. 左右通道, 22. 上下通道, 23. 多机关门
# 24. 中心对称, 25. 部分对称, 26. 鱼骨
# 自动标注标签定义(暂时不用):
# 0. 左右对称, 1. 上下对称, 2. 中心对称, 3. 斜向对称, 4. 伪对称, 5. 多房间, 6. 多走廊
# 32. 平面塔, 33. 转换塔, 34. 道具塔
# 标量值定义:
# 0. 整体密度,非空白图块/地图面积,空白图块还包括装饰图块
# 1. 墙体密度,墙壁/地图面积
# 2. 装饰密度,装饰数量/地图面积
# 3. 门密度,门数量/地图面积
# 4. 怪物密度,怪物数量/地图面积
# 5. 资源密度,资源数量/地图面积
# 6. 宝石密度,宝石数量/地图面积
# 7. 血瓶密度,血瓶数量/地图面积
# 8. 钥匙密度,钥匙数量/地图面积
# 9. 道具密度,道具数量/地图面积
# 10. 入口数量
# 11. 机关门数量
# 12. 咸鱼门数量(多层咸鱼门只算一个)
# 图块定义:
# 0. 空地, 1. 墙壁, 2. 门, 3. 钥匙, 4. 红宝石, 5. 蓝宝石, 6. 绿宝石, 7. 血瓶
# 8. 道具, 9. 怪物, 10. 入口, 15. 掩码 token
BATCH_SIZE = 16
VAL_BATCH_DIVIDER = 16
NUM_CLASSES = 16
MASK_TOKEN = 15
GENERATE_STEP = 8
device = torch.device(
"cuda:1" if torch.cuda.is_available()
else "mps" if torch.mps.is_available()
else "cpu"
)
os.makedirs("result", exist_ok=True)
os.makedirs("result/transformer", exist_ok=True)
os.makedirs("result/transformer_img", exist_ok=True)
disable_tqdm = not sys.stdout.isatty()
def parse_arguments():
parser = argparse.ArgumentParser(description="training codes")
parser.add_argument("--resume", type=bool, default=False)
parser.add_argument("--state_ginka", type=str, default="result/vae/ginka-100.pth")
parser.add_argument("--train", type=str, default="ginka-dataset.json")
parser.add_argument("--validate", type=str, default="ginka-eval.json")
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--checkpoint", type=int, default=5)
parser.add_argument("--load_optim", type=bool, default=True)
args = parser.parse_args()
return args
def train():
print(f"Using {device.type} to train model.")
args = parse_arguments()
model = GinkaMaskGIT(num_classes=NUM_CLASSES).to(device)
dataset = GinkaRNNDataset(args.train, device)
dataset_val = GinkaRNNDataset(args.validate, device)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
dataloader_val = DataLoader(dataset_val, batch_size=BATCH_SIZE // VAL_BATCH_DIVIDER, shuffle=True)
optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-2)
# 自定义调度器允许在 self_prob 提高时重置调度器信息并提高学习率以适应学习
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-5)
# 用于生成图片
tile_dict = dict()
for file in os.listdir('tiles'):
name = os.path.splitext(file)[0]
tile_dict[name] = cv2.imread(f"tiles/{file}", cv2.IMREAD_UNCHANGED)
if args.resume:
data_ginka = torch.load(args.state_ginka, map_location=device)
model.load_state_dict(data_ginka["model_state"], strict=False)
if args.load_optim:
if data_ginka.get("optim_state") is not None:
optimizer.load_state_dict(data_ginka["optim_state"])
print("Train from loaded state.")
for epoch in tqdm(range(args.epochs), desc="VAE Training", disable=disable_tqdm):
loss_total = torch.Tensor([0]).to(device)
# for batch in tqdm(dataloader, leave=False, desc="Epoch Progress", disable=disable_tqdm):
# target_map = batch["target_map"].to(device)
# cond = batch["val_cond"].to(device)
# B, H, W = target_map.shape
# target_map = target_map.view(B, H * W)
# # 1. 随机采样掩码比例 r (遵循余弦调度效果更好)
# r = torch.rand(B).to(device)
# r = torch.cos(r * math.pi / 2).unsqueeze(1) # 产生更多高掩码比例的样本
# # 2. 生成掩码矩阵
# masks = torch.rand(target_map.shape).to(device) < r
# masked_input = target_map.clone()
# masked_input[masks] = MASK_TOKEN # 填充为 [MASK] 标记
# logits = model(masked_input, cond)
# loss = F.cross_entropy(logits.permute(0, 2, 1), target_map, reduction='none')
# loss = (loss * masks).sum() / (masks.sum() + 1e-6)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# loss_total += loss.detach()
avg_loss = loss_total.item() / len(dataloader)
tqdm.write(
f"[Epoch {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] " +
f"E: {epoch + 1} | Loss: {avg_loss:.6f} | " +
f"LR: {scheduler.get_last_lr()[0]:.6f}"
)
# 每若干轮输出一次图片,并保存检查点
if (epoch + 1) % args.checkpoint == 0:
# 保存检查点
torch.save({
"model_state": model.state_dict(),
"optim_state": optimizer.state_dict(),
}, f"result/transformer/ginka-{epoch + 1}.pth")
val_loss_total = torch.Tensor([0]).to(device)
model.eval()
with torch.no_grad():
idx = 0
gap = 5
color = (255, 255, 255) # 白色
vline = np.full((416, gap, 3), color, dtype=np.uint8) # 垂直分割线
for batch in tqdm(dataloader_val, desc="Validating generator.", leave=False, disable=disable_tqdm):
# 1. 常规生成
target_map = batch["target_map"].to(device)
cond = batch["val_cond"].to(device)
B, H, W = target_map.shape
target_map = target_map.view(B, H * W)
# 1. 随机采样掩码比例 r (遵循余弦调度效果更好)
r = torch.rand(B).to(device)
r = torch.cos(r * math.pi / 2).unsqueeze(1) # 产生更多高掩码比例的样本
# 2. 生成掩码矩阵
masks = torch.rand(target_map.shape).to(device) < r
masked_input = target_map.clone()
masked_input[masks] = MASK_TOKEN # 填充为 [MASK] 标记
logits = model(masked_input, cond)
loss = F.cross_entropy(logits.permute(0, 2, 1), target_map, reduction='none')
loss = (loss * masks.view(-1)).sum() / (masks.sum() + 1e-6)
val_loss_total += loss.detach()
fake_map = torch.argmax(logits, dim=2).view(B, H, W).cpu().numpy()
fake_img = matrix_to_image_cv(fake_map[0], tile_dict)
real_map = target_map.view(B, H, W).cpu().numpy()
real_img = matrix_to_image_cv(real_map[0], tile_dict)
img = np.block([[real_img], [vline], [fake_img]])
cv2.imwrite(f"result/transformer_img/{idx}.png", img)
idx += 1
# 2. 从头完整生成
map = torch.full((1, 169), MASK_TOKEN).to(device)
for _ in range(GENERATE_STEP):
# 1. 预测
logits = model(map, cond)
probs = F.softmax(logits, dim=-1)
# 2. 采样(为了多样性,这里可以使用概率采样而不是取最大值)
dist = torch.distributions.Categorical(probs)
sampled_tiles = dist.sample() # (1, 169)
# 3. 计算置信度 (模型对采样结果的信心程度)
confidences = torch.gather(probs, -1, sampled_tiles.unsqueeze(-1)).squeeze(-1)
# 4. 决定本轮要固定多少个格子 (上凸函数逻辑)
ratio = math.cos((GENERATE_STEP) * math.pi / 2)
num_to_mask = int(ratio * 169)
# 5. 更新画布:保留置信度最高的部分,其余位置设回 MASK
# 注意:这里逻辑上通常是保留当前步预测中置信度最高的,并结合已有的非 mask 部分
if num_to_mask > 0:
_, mask_indices = torch.topk(confidences, k=num_to_mask, largest=False)
sampled_tiles.scatter_(1, mask_indices, MASK_TOKEN)
map = sampled_tiles
if (map == MASK_TOKEN).sum() == 0:
break
generated_img = matrix_to_image_cv(map.view(1, H, W)[0].cpu().numpy(), tile_dict)
cv2.imwrite(f"result/transformer_img/g-{idx}.png", generated_img)
avg_loss_val = val_loss_total.item() / len(dataloader_val)
tqdm.write(
f"[Validate {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] E: {epoch + 1} | " +
f"Loss: {avg_loss_val:.6f}"
)
print("Train ended.")
torch.save({
"model_state": model.state_dict(),
}, f"result/ginka_transformer.pth")
if __name__ == "__main__":
torch.set_num_threads(4)
train()

View File

@ -10,7 +10,7 @@ import cv2
import numpy as np
from torch_geometric.loader import DataLoader
from tqdm import tqdm
from .transformer_vae.vae import GinkaTransformerVAE
from .transformer.vae import GinkaTransformerVAE
from .vae_rnn.loss import VAELoss
from .vae_rnn.scheduler import VAEScheduler
from .dataset import GinkaRNNDataset

View File

@ -1,6 +1,7 @@
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..utils import print_memory
class GinkaTransformerEncoder(nn.Module):
@ -8,11 +9,11 @@ class GinkaTransformerEncoder(nn.Module):
super().__init__()
self.dim_ff = dim_ff
self.encoder = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=dim_ff, dim_feedforward=dim_ff, nhead=nhead, batch_first=True),
nn.TransformerEncoderLayer(d_model=dim_ff, dim_feedforward=dim_ff, nhead=nhead, batch_first=True, activation=F.gelu),
num_layers=num_layers
)
self.decoder = nn.TransformerDecoder(
nn.TransformerDecoderLayer(d_model=dim_ff, dim_feedforward=dim_ff, nhead=nhead, batch_first=True),
nn.TransformerDecoderLayer(d_model=dim_ff, dim_feedforward=dim_ff, nhead=nhead, batch_first=True, activation=F.gelu),
num_layers=max(num_layers // 2, 1)
)
@ -23,7 +24,7 @@ class GinkaTransformerEncoder(nn.Module):
x = self.encoder(x)
x = self.decoder(first_token, x)
return x.squeeze(1)
class GinkaTransformerBottleneck(nn.Module):
def __init__(self, dim_ff=256, hidden_dim=512, latent_dim=32):
super().__init__()
@ -31,7 +32,7 @@ class GinkaTransformerBottleneck(nn.Module):
nn.Linear(dim_ff, hidden_dim),
nn.Dropout(0.3),
nn.LayerNorm(hidden_dim),
nn.ReLU(),
nn.GELU(),
)
self.fc_mu = nn.Sequential(
nn.Linear(hidden_dim, latent_dim)

22
ginka/transformer/fsq.py Normal file
View File

@ -0,0 +1,22 @@
import torch
import torch.nn as nn
class FSQ(nn.Module):
def __init__(self, levels=7):
super().__init__()
self.levels = levels
self.scale = (levels - 1) / 2
def forward(self, z):
# 限制范围
z = torch.tanh(z)
# 量化
z_q = torch.round(z * self.scale) / self.scale
# Straight-through estimator
z_q = z + (z_q - z).detach()
return z_q

View File

@ -0,0 +1,73 @@
import time
import torch
import torch.nn as nn
from ..utils import print_memory
class GinkaMaskGIT(nn.Module):
def __init__(
self, num_classes=16, cond_dim=16, d_model=256, dim_ff=512, nhead=8, num_layers=4, map_size=13*13
):
super().__init__()
self.tile_embedding = nn.Embedding(num_classes, d_model)
self.pos_embedding = nn.Parameter(torch.randn(1, map_size, d_model))
self.cond_projection = nn.Sequential(
nn.Linear(cond_dim, d_model)
)
self.encoder = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=dim_ff, batch_first=True),
num_layers=num_layers
)
self.decoder = nn.TransformerDecoder(
nn.TransformerDecoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=dim_ff, batch_first=True),
num_layers=num_layers
)
self.output_fc = nn.Sequential(
nn.Linear(d_model, num_classes)
)
def forward(self, map: torch.Tensor, cond: torch.Tensor):
# map: [B, H * W]
# cond: [B, cond_dim]
# output: [B, H * W, num_classes]
x = self.tile_embedding(map) + self.pos_embedding
c = self.cond_projection(cond).unsqueeze(1)
x = torch.cat([c, x], dim=1)
m = self.encoder(x)
out = self.decoder(x, m)
logits = self.output_fc(out)
return logits[:, :-1, :]
if __name__ == "__main__":
device = torch.device("cpu")
map = torch.randint(0, 16, [1, 169]).to(device)
cond = torch.rand(1, 16).to(device)
# 初始化模型
model = GinkaMaskGIT().to(device)
print_memory("初始化后")
# 前向传播
start = time.perf_counter()
output = model(map, cond)
end = time.perf_counter()
print_memory("前向传播后")
print(f"推理耗时: {end - start}")
print(f"输出形状: output={output.shape}")
print(f"Tile Embedding parameters: {sum(p.numel() for p in model.tile_embedding.parameters())}")
print(f"Projection parameters: {sum(p.numel() for p in model.cond_projection.parameters())}")
print(f"Encoder parameters: {sum(p.numel() for p in model.encoder.parameters())}")
print(f"Decoder parameters: {sum(p.numel() for p in model.decoder.parameters())}")
print(f"Output parameters: {sum(p.numel() for p in model.output_fc.parameters())}")
print(f"Total parameters: {sum(p.numel() for p in model.parameters())}")

View File

@ -6,12 +6,12 @@ class VAELoss:
self.num_classes = 32
def vae_loss(self, logits, target, mu, logvar, beta=0.1):
# logits: [B, 169, 16]
# target: [B, 169]
B, L = target.shape
end_token = torch.tensor([15], dtype=torch.long).to(logits.device).repeat(B, 1)
target = torch.cat([target, end_token], dim=1)
target = F.one_hot(target, num_classes=self.num_classes).float()
recon_loss = F.cross_entropy(logits, target)
recon_loss = F.cross_entropy(logits.permute(0, 2, 1), target)
kl_loss = -0.5 * torch.mean(
1 + logvar - mu.pow(2) - logvar.exp()