-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodels.py
More file actions
318 lines (274 loc) · 10.9 KB
/
models.py
File metadata and controls
318 lines (274 loc) · 10.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
import torch
import torch.nn as nn
import torchvision.models as models
import copy
class ResNetEncoder(nn.Module):
def __init__(self, base='resnet18', out_dim=128):
"""
base: 'resnet18' or 'resnet50'
out_dim: dimension of projection head output
"""
super().__init__()
if base == 'resnet18':
self.backbone = models.resnet18(pretrained=False)
feat_dim = 512
elif base == 'resnet50':
self.backbone = models.resnet50(pretrained=False)
feat_dim = 2048
else:
raise ValueError("Unsupported backbone")
# remove final classifier
self.backbone.fc = nn.Identity()
# projection head: MLP (feat_dim → feat_dim → out_dim)
self.projection_head = nn.Sequential(
nn.Linear(feat_dim, feat_dim),
nn.ReLU(inplace=True),
nn.Linear(feat_dim, out_dim),
)
def forward(self, x):
"""
Returns both:
- h: the representation before projection (for linear eval)
- z: the projection used in the SSL loss
"""
h = self.backbone(x) # shape: [B, feat_dim]
z = self.projection_head(h) # shape: [B, out_dim]
return h, z
################################################################################
# MoCo Wrapper
class MoCo(nn.Module):
def __init__(self,
base_encoder,
dim=128,
K=4096,
m=0.99,
T=0.2):
super().__init__()
self.K = K
self.m = m
self.T = T
# query encoder
self.encoder_q = base_encoder(out_dim=dim)
# Key encoder
self.encoder_k = base_encoder(out_dim=dim)
# initialize key encoder weights to match query
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data.copy_(param_q.data)
param_k.requires_grad = False # no gradients for key encoder
# ceate the queue (dim × K) and pointer
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
mmomentum update: θ_k ← m θ_k + (1 - m) θ_q
"""
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
"""
keys: [B, dim] tensor of new keys
"""
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
# ensure K % batch_size == 0 for simplicity
assert self.K % batch_size == 0
# replace the oldest keys
self.queue[:, ptr:ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K
self.queue_ptr[0] = ptr
def forward(self, im_q, im_k):
"""
im_q: query images (view1) [B, 3, H, W]
im_k: key images (view2) [B, 3, H, W]
returns: logits [B, 1+K], labels [B] (all zeros)
"""
#compute query features
_, q = self.encoder_q(im_q) # [B, dim]
q = nn.functional.normalize(q, dim=1)
# compute key features with no_grad
with torch.no_grad():
self._momentum_update_key_encoder()
_, k = self.encoder_k(im_k) # [B, dim]
k = nn.functional.normalize(k, dim=1)
# compute logits
# positive logits: [B, 1]
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: [B, K]
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
logits = torch.cat([l_pos, l_neg], dim=1) # [B, 1+K]
logits /= self.T
# labels: positive at index 0
labels = torch.zeros(logits.size(0), dtype=torch.long, device=logits.device)
# ennqueue and dequeue
self._dequeue_and_enqueue(k)
return logits, labels
################################################################################
# BYOL Wrapper
class BYOL(nn.Module):
"""
BYOL wrapper:
- online_encoder: backbone + projection head
- predictor: small MLP
- target_encoder: momentum copy of online
"""
def __init__(self, base_encoder, out_dim=128, hidden_dim=512, m=0.996):
super().__init__()
self.online_encoder = base_encoder(out_dim=out_dim)
self.predictor = nn.Sequential(
nn.Linear(out_dim, hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, out_dim),
)
# create target encoder as a copy of online (no gradients)
self.target_encoder = copy.deepcopy(self.online_encoder)
for param in self.target_encoder.parameters():
param.requires_grad = False
self.m = m # momentum coefficient
@torch.no_grad()
def _momentum_update_target(self):
"""
θ_target ← m θ_target + (1-m) θ_online
"""
for param_o, param_t in zip(self.online_encoder.parameters(),
self.target_encoder.parameters()):
param_t.data = param_t.data * self.m + param_o.data * (1.0 - self.m)
def forward(self, x1, x2):
"""
x1, x2: two augmented views [B, 3, H, W]
Returns BYOL loss (MSE between p and stop_grad(z_t))
"""
# online pass
h1, z1 = self.online_encoder(x1) # z1: [B, out_dim]
h2, z2 = self.online_encoder(x2)
p1 = self.predictor(z1) # [B, out_dim]
p2 = self.predictor(z2)
# target pass (no grad)
with torch.no_grad():
_, z1_t = self.target_encoder(x1) # [B, out_dim]
_, z2_t = self.target_encoder(x2)
#normalize
p1 = nn.functional.normalize(p1, dim=1)
p2 = nn.functional.normalize(p2, dim=1)
z1_t = nn.functional.normalize(z1_t, dim=1)
z2_t = nn.functional.normalize(z2_t, dim=1)
# compute BYOL loss: 2 - 2 * cos_sim
loss1 = 2 - 2 * (p1 * z2_t).sum(dim=1)
loss2 = 2 - 2 * (p2 * z1_t).sum(dim=1)
loss = (loss1 + loss2).mean()
return loss
def update_target(self):
self._momentum_update_target()
################################################################################
# DINO Skeleton (ResNet-18 + prototypes head)
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim=1024, hidden_dim=512):
"""
in_dim: backbone output dim (512 for ResNet-18)
out_dim: number of prototypes (e.g., 1024)
"""
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, out_dim),
)
#temperature parameters
self.student_temp = 0.1
self.teacher_temp = 0.04
# center for teacher
self.register_buffer("center", torch.zeros(1, out_dim))
def forward_student(self, x):
logits = self.mlp(x) # [B, out_dim]
logits = logits / self.student_temp
return logits
@torch.no_grad()
def forward_teacher(self, x):
logits = self.mlp(x) # [B, out_dim]
logits = (logits - self.center) / self.teacher_temp
return torch.softmax(logits, dim=-1)
@torch.no_grad()
def update_center(self, teacher_outputs):
"""
teacher_outputs: [B, out_dim], after softmax
"""
batch_center = torch.mean(teacher_outputs, dim=0, keepdim=True)
decay = 0.9
self.center = self.center * decay + batch_center * (1 - decay)
class DINO(nn.Module):
"""
DINO wrapper:
- student_backbone (ResNet-18) + student_head (DINOHead)
- teacher_backbone + teacher_head (momentum copy)
- Multi-crop loss
"""
def __init__(self,
backbone_fn,
in_dim=512,
num_prototypes=1024,
m=0.996):
super().__init__()
# Student
self.student_backbone = backbone_fn()
self.student_head = DINOHead(in_dim=in_dim, out_dim=num_prototypes)
# Teacher
self.teacher_backbone = copy.deepcopy(self.student_backbone)
self.teacher_head = copy.deepcopy(self.student_head)
for p in self.teacher_backbone.parameters():
p.requires_grad = False
for p in self.teacher_head.parameters():
p.requires_grad = False
self.m = m # momentum for teacher
@torch.no_grad()
def _momentum_update_teacher(self):
# Update backbone
for param_s, param_t in zip(self.student_backbone.parameters(),
self.teacher_backbone.parameters()):
param_t.data = param_t.data * self.m + param_s.data * (1.0 - self.m)
# Update head
for param_s, param_t in zip(self.student_head.parameters(),
self.teacher_head.parameters()):
param_t.data = param_t.data * self.m + param_s.data * (1.0 - self.m)
def forward(self, crops):
"""
crops: list of augmented images
e.g. [global1, global2, local1, local2, local3, local4]
Returns: DINO loss
"""
B = crops[0].size(0)
# sytudent: forward on all crops
student_logits = []
for crop in crops:
feat = self.student_backbone(crop) # [B, in_dim]
logit = self.student_head.forward_student(feat) # [B, num_prototypes]
student_logits.append(logit)
# teacher: forward on only the first two global crops
with torch.no_grad():
teacher_probs = []
for crop in crops[:2]:
feat_t = self.teacher_backbone(crop)
prob = self.teacher_head.forward_teacher(feat_t) # [B, num_prototypes]
teacher_probs.append(prob)
# concatenate to shape [2B, num_prototypes]
teacher_probs = torch.cat(teacher_probs, dim=0)
# 3) coss-entropy between teacher_probs and student_logits for each crop
total_loss = 0.0
n_crops = len(crops)
for v in range(n_crops):
logits_v = student_logits[v] # [B, P]
log_probs = torch.log_softmax(logits_v, dim=1) # [B, P]
# repeat to match [2B, P]
log_probs = torch.cat([log_probs, log_probs], dim=0)
loss_v = - (teacher_probs * log_probs).sum(dim=1).mean()
total_loss += loss_v
total_loss /= n_crops
return total_loss
def update_teacher(self):
self._momentum_update_teacher()
# ffter computing teacher outputs, call `self.student_head.update_center(...)` in training code