File size: 7,636 Bytes
abd08dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
import torch
from .qwen_image_image2lora import ImageEmbeddingToLoraMatrix, SequencialMLP


class LoRATrainerBlock(torch.nn.Module):
    def __init__(self, lora_patterns, in_dim=1536+4096, compress_dim=128, rank=4, block_id=0, use_residual=True, residual_length=64+7, residual_dim=3584, residual_mid_dim=1024, prefix="transformer_blocks"):
        super().__init__()
        self.prefix = prefix
        self.lora_patterns = lora_patterns
        self.block_id = block_id
        self.layers = []
        for name, lora_a_dim, lora_b_dim in self.lora_patterns:
            self.layers.append(ImageEmbeddingToLoraMatrix(in_dim, compress_dim, lora_a_dim, lora_b_dim, rank))
        self.layers = torch.nn.ModuleList(self.layers)
        if use_residual:
            self.proj_residual = SequencialMLP(residual_length, residual_dim, residual_mid_dim, compress_dim)
        else:
            self.proj_residual = None
    
    def forward(self, x, residual=None):
        lora = {}
        if self.proj_residual is not None: residual = self.proj_residual(residual)
        for lora_pattern, layer in zip(self.lora_patterns, self.layers):
            name = lora_pattern[0]
            lora_a, lora_b = layer(x, residual=residual)
            lora[f"{self.prefix}.{self.block_id}.{name}.lora_A.default.weight"] = lora_a
            lora[f"{self.prefix}.{self.block_id}.{name}.lora_B.default.weight"] = lora_b
        return lora


class ZImageImage2LoRAComponent(torch.nn.Module):
    def __init__(self, lora_patterns, prefix, num_blocks=60, use_residual=True, compress_dim=128, rank=4, residual_length=64+7, residual_mid_dim=1024):
        super().__init__()
        self.lora_patterns = lora_patterns
        self.num_blocks = num_blocks
        self.blocks = []
        for lora_patterns in self.lora_patterns:
            for block_id in range(self.num_blocks):
                self.blocks.append(LoRATrainerBlock(lora_patterns, block_id=block_id, use_residual=use_residual, compress_dim=compress_dim, rank=rank, residual_length=residual_length, residual_mid_dim=residual_mid_dim, prefix=prefix))
        self.blocks = torch.nn.ModuleList(self.blocks)
        self.residual_scale = 0.05
        self.use_residual = use_residual
        
    def forward(self, x, residual=None):
        if residual is not None:
            if self.use_residual:
                residual = residual * self.residual_scale
            else:
                residual = None
        lora = {}
        for block in self.blocks:
            lora.update(block(x, residual))
        return lora


class ZImageImage2LoRAModel(torch.nn.Module):
    def __init__(self, use_residual=False, compress_dim=64, rank=4, residual_length=64+7, residual_mid_dim=1024):
        super().__init__()
        lora_patterns = [
            [
                ("attention.to_q", 3840, 3840),
                ("attention.to_k", 3840, 3840),
                ("attention.to_v", 3840, 3840),
                ("attention.to_out.0", 3840, 3840),
            ],
            [
                ("feed_forward.w1", 3840, 10240),
                ("feed_forward.w2", 10240, 3840),
                ("feed_forward.w3", 3840, 10240),
            ],
        ]
        config = {
            "lora_patterns": lora_patterns,
            "use_residual": use_residual,
            "compress_dim": compress_dim,
            "rank": rank,
            "residual_length": residual_length,
            "residual_mid_dim": residual_mid_dim,
        }
        self.layers_lora = ZImageImage2LoRAComponent(
            prefix="layers",
            num_blocks=30,
            **config,
        )
        self.context_refiner_lora = ZImageImage2LoRAComponent(
            prefix="context_refiner",
            num_blocks=2,
            **config,
        )
        self.noise_refiner_lora = ZImageImage2LoRAComponent(
            prefix="noise_refiner",
            num_blocks=2,
            **config,
        )
        
    def forward(self, x, residual=None):
        lora = {}
        lora.update(self.layers_lora(x, residual=residual))
        lora.update(self.context_refiner_lora(x, residual=residual))
        lora.update(self.noise_refiner_lora(x, residual=residual))
        return lora

    def initialize_weights(self):
        state_dict = self.state_dict()
        for name in state_dict:
            if ".proj_a." in name:
                state_dict[name] = state_dict[name] * 0.3
            elif ".proj_b.proj_out." in name:
                state_dict[name] = state_dict[name] * 0
            elif ".proj_residual.proj_out." in name:
                state_dict[name] = state_dict[name] * 0.3
        self.load_state_dict(state_dict)


class ImageEmb2LoRAWeightCompressed(torch.nn.Module):
    def __init__(self, in_dim, out_dim, emb_dim, rank):
        super().__init__()
        self.lora_a = torch.nn.Parameter(torch.randn((rank, in_dim)))
        self.lora_b = torch.nn.Parameter(torch.randn((out_dim, rank)))
        self.proj = torch.nn.Linear(emb_dim, rank * rank, bias=True)
        self.rank = rank
    
    def forward(self, x):
        x = self.proj(x).view(self.rank, self.rank)
        lora_a = x @ self.lora_a
        lora_b = self.lora_b
        return lora_a, lora_b


class ZImageImage2LoRAModelCompressed(torch.nn.Module):
    def __init__(self, emb_dim=1536+4096, rank=32):
        super().__init__()
        target_layers = [
            ("attention.to_q", 3840, 3840),
            ("attention.to_k", 3840, 3840),
            ("attention.to_v", 3840, 3840),
            ("attention.to_out.0", 3840, 3840),
            ("feed_forward.w1", 3840, 10240),
            ("feed_forward.w2", 10240, 3840),
            ("feed_forward.w3", 3840, 10240),
        ]
        self.lora_patterns = [
            {
                "prefix": "layers",
                "num_layers": 30,
                "target_layers": target_layers,
            },
            {
                "prefix": "context_refiner",
                "num_layers": 2,
                "target_layers": target_layers,
            },
            {
                "prefix": "noise_refiner",
                "num_layers": 2,
                "target_layers": target_layers,
            },
        ]
        module_dict = {}
        for lora_pattern in self.lora_patterns:
            prefix, num_layers, target_layers = lora_pattern["prefix"], lora_pattern["num_layers"], lora_pattern["target_layers"]
            for layer_id in range(num_layers):
                for layer_name, in_dim, out_dim in target_layers:
                    name = f"{prefix}.{layer_id}.{layer_name}".replace(".", "___")
                    model = ImageEmb2LoRAWeightCompressed(in_dim, out_dim, emb_dim, rank)
                    module_dict[name] = model
        self.module_dict = torch.nn.ModuleDict(module_dict)

    def forward(self, x, residual=None):
        lora = {}
        for name, module in self.module_dict.items():
            name = name.replace("___", ".")
            name_a, name_b = f"{name}.lora_A.default.weight", f"{name}.lora_B.default.weight"
            lora_a, lora_b = module(x)
            lora[name_a] = lora_a
            lora[name_b] = lora_b
        return lora

    def initialize_weights(self):
        state_dict = self.state_dict()
        for name in state_dict:
            if "lora_b" in name:
                state_dict[name] = state_dict[name] * 0
            elif "lora_a" in name:
                state_dict[name] = state_dict[name] * 0.2
            elif "proj.weight" in name:
                print(name)
                state_dict[name] = state_dict[name] * 0.2
        self.load_state_dict(state_dict)