| from flax import linen as nn |
| import jax |
| import jax.numpy as jnp |
| from local_response_norm import LocalResponseNorm |
|
|
| LATENT_DIM = 500 |
| EPSILON = 1e-8 |
|
|
| class Generator(nn.Module): |
| @nn.compact |
| def __call__(self, latent, training=True): |
| x = nn.Dense(features=64)(latent) |
| |
| x = nn.relu(x) |
| x = nn.Dense(features=2*2*1024)(x) |
| x = nn.BatchNorm(not training)(x) |
| x = nn.relu(x) |
| x = nn.Dropout(0.25, deterministic=not training)(x) |
| x = x.reshape((x.shape[0], 2, 2, -1)) |
| x4 = nn.ConvTranspose(features=512, kernel_size=(3, 3), strides=(2, 2))(x) |
| x4 = LocalResponseNorm()(x4) |
| x4 = nn.relu(x4) |
| x4o = nn.ConvTranspose(features=3, kernel_size=(3, 3))(x4) |
| x4 = nn.ConvTranspose(features=512, kernel_size=(3, 3))(x4) |
| x4 = LocalResponseNorm()(x4) |
| x4 = nn.relu(x4) |
| x8 = nn.ConvTranspose(features=256, kernel_size=(3, 3), strides=(2, 2))(x4) |
| x8 = LocalResponseNorm()(x8) |
| x8 = nn.relu(x8) |
| x8o = nn.ConvTranspose(features=3, kernel_size=(3, 3))(x8) |
| x8 = nn.ConvTranspose(features=256, kernel_size=(3, 3))(x8) |
| x8 = LocalResponseNorm()(x8) |
| x8 = nn.relu(x8) |
| x16 = nn.ConvTranspose(features=128, kernel_size=(3, 3), strides=(2, 2))(x8) |
| x16 = LocalResponseNorm()(x16) |
| x16 = nn.relu(x16) |
| x16o = nn.ConvTranspose(features=3, kernel_size=(3, 3))(x16) |
| x16 = nn.ConvTranspose(features=128, kernel_size=(3, 3))(x16) |
| x16 = LocalResponseNorm()(x16) |
| x16 = nn.relu(x16) |
| x32 = nn.ConvTranspose(features=64, kernel_size=(3, 3), strides=(2, 2))(x16) |
| x32 = LocalResponseNorm()(x32) |
| x32 = nn.relu(x32) |
| x32o = nn.ConvTranspose(features=3, kernel_size=(3, 3))(x32) |
| x32 = nn.ConvTranspose(features=64, kernel_size=(3, 3))(x32) |
| x32 = LocalResponseNorm()(x32) |
| x32 = nn.relu(x32) |
| x64 = nn.ConvTranspose(features=32, kernel_size=(3, 3), strides=(2, 2))(x32) |
| x64 = LocalResponseNorm()(x64) |
| x64 = nn.relu(x64) |
| x64o = nn.ConvTranspose(features=3, kernel_size=(3, 3))(x64) |
| x64 = nn.ConvTranspose(features=32, kernel_size=(3, 3))(x64) |
| x64 = LocalResponseNorm()(x64) |
| x64 = nn.relu(x64) |
| x128 = nn.ConvTranspose(features=64, kernel_size=(3, 3), strides=(2, 2))(x64) |
| x128 = LocalResponseNorm()(x128) |
| x128 = nn.relu(x128) |
| x128o = nn.ConvTranspose(features=3, kernel_size=(3, 3))(x128) |
| return (nn.tanh(x128o), nn.tanh(x64o), nn.tanh(x32o), nn.tanh(x16o), nn.tanh(x8o), nn.tanh(x4o)) |