-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathresnet_proxy_train.py
More file actions
219 lines (181 loc) · 7.2 KB
/
resnet_proxy_train.py
File metadata and controls
219 lines (181 loc) · 7.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
# 2022.06.28-Changed for building CMT
# Huawei Technologies Co., Ltd. <foss@huawei.com>
# Modified from Fackbook, Deit
# jianyuan.guo@huawei.com
#
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
## --------------------- NOTE ---------------
# for RTX 40XX using latest Version of pytorch
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
from utils.tools import *
from torch.autograd import Variable
import torch
import torch.optim as optim
import time
import timm
from loguru import logger
torch.multiprocessing.set_sharing_strategy('file_system')
from relative_similarity import *
from centroids_generator import *
import torch.nn.functional as F
from loss.loss import RelaHashLoss
torch.manual_seed(3407)
torch.cuda.manual_seed(3407)
from timm.scheduler import create_scheduler, CosineLRScheduler
from timm.optim import create_optimizer
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma, ApexScaler
from loguru import logger
import CrossFormer_utils as utils
from loss.softTriplet import *
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
import warnings
warnings.filterwarnings("ignore") # Del ImageNet Warnings
import os
# from cmt_args import get_args_parser
# from swiftformer_args import get_args_parser
# from crossFormer_args import get_config as get_configs
# from crossFormer_args import parse_option
# from Biformer_args import get_args_parser
from hornet_args import get_args_parser
from model.renet import *
from loss.hypbird import margin_contrastive
from timm.models import create_model
from loss.proxysyn import *
def get_config():
config = {
"alpha": 0.1,
'info': "[ResNet50]",
'eval' : 10 ,
'model' : 'small',
"step_continuation": 20,
"resize_size": 256,
"crop_size": 224,
"batch_size": 32,
"datasets": "mirflickr",
# "datasets": "cifar10",
# "datasets":'nuswide_21',
# "datasets":'coco',
"Label_dim": 10,
"epoch": 150,
"test_map": 0,
"save_path": "save/HashNet",
"device": torch.device("cuda:0"),
'test_device': torch.device("cuda:1"),
"bit_list": [16,32,64,128],
"img_size": 224,
"patch_size": 4,
"in_chans": 3,
"num_work": 10,
"model_type": "BiFormer",
"top_img": 100,
"pretrained":'fca50.pth',
# RelaHash
"Beta": 8,
'm': 0.7,
}
config = config_dataset(config)
return config
def build_model(name=None,bit=None):
models = ResNet50(bit=bit)
return models
def train_val(config, bit, args=None, configs=None):
device = torch.device(config['device'])
torch.manual_seed(3407)
np.random.seed(3407)
torch.cuda.manual_seed(3407)
cudnn.benchmark = True
# print(f"Creating model: {args.cfg}")
class_len = 10
if config['datasets'] == 'mirflickr':
class_len = 38
elif config['datasets'] == 'coco':
class_len = 80
elif config['datasets'] == 'nuswide_21':
class_len = 21
model = build_model(config['model'],bit=bit)
model.to(device)
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
input_size = [1, 3, 224, 224]
input = torch.randn(input_size).cuda()
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
# linear_scaled_lr = args.lr * args.batch_size / 512.0
args.lr = linear_scaled_lr
print('learning rate: ', args.lr)
optimizer = create_optimizer(args, model)
print('Using NVIDIA APEX AMP. Training in mixed precision.')
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
if config['pretrained']:
checkpoint = torch.load(config['pretrained'], map_location='cpu')
print('Pretrained Weiught Loaded')
model.load_state_dict(checkpoint, strict=False)
print('\b\b\b\bLoaded Pretrained Model')
model.to(config["device"])
relative_similarity = RelativeSimilarity(nbit=bit, nclass=class_len, batchsize=config["batch_size"])
rela_optimizer = optim.Adam(relative_similarity.parameters(), lr=1e-5)
train_loader, test_loader, dataset_loader, num_train, num_test, num_dataset = get_data(config)
quan_loss = RelaHashLoss(multiclass=True, beta=config['Beta'], m=config['m']) # cifar10 _ False
Best_mAP = 0
args = Namespace(n_bits=bit, n_classes=class_len, scale=23.0, ps_alpha=0.40, ps_mu=0.8, method=1)
criterion = Norm_SoftMax(args, ps_rate=0.5).cuda()
proxy_opt = torch.optim.Adam(criterion.parameters(),lr=1e-3)
for epoch in range(config["epoch"]):
# model.module.update_temperature()
current_time = time.strftime('%H:%M:%S', time.localtime(time.time()))
logger.info("%s[%2d/%2d][%s] bit:%d, datasets:%s, training...." % (
config["info"], epoch + 1, config["epoch"], current_time, bit, config["datasets"]), end="")
model.train()
train_loss = 0
for image, label, ind in train_loader:
image = image.to(device)
label = label.to(device).float()
with torch.cuda.amp.autocast():
u = model(image)
u = F.normalize(u)
A = ((label == 0).sum(dim=1) == label.shape[1])
label[A == True] = 1
loss = criterion(u,label)
train_loss += loss
optimizer.zero_grad()
proxy_opt.zero_grad()
loss.backward()
optimizer.step()
proxy_opt.step()
train_loss = train_loss / len(train_loader)
# /home/wbt/conda_env_with_new_amp/conda_env/anaconda3/bin/python3.9 CMT_train.py --output_dir './' --model cmt_s --batch-size 32 --apex-amp --input-size 224 --weight-decay 0.05 --drop-path 0.1 --epochs 300 --test_freq 100 --test_epoch 260 --warmup-lr 1e-7 --warmup-epochs 20
logger.info("\b\b\b\b\b\b\b train_loss:%.4f" % (train_loss))
if (epoch + 1) > config['eval']:
Best_mAP, index_img = validate(config, Best_mAP, test_loader, dataset_loader, model, bit, epoch, 10)
model.to(config["device"])
if __name__ == '__main__':
# parser = argparse.ArgumentParser(' traini ng and evaluation script', parents=[get_config()])
# args = parser.parse_args()
parser = argparse.ArgumentParser('DeiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
config = get_config()
# 建立日志文件(Create log file)
logger.add('logs/{time}' + config["info"] + '_' + config["datasets"] + ' alpha ' + str(config["alpha"]) + '.log',
rotation='50 MB', level='DEBUG')
logger.info(config)
for bit in config["bit_list"]:
config["pr_curve_path"] = f"log/alexnet/HashNet_{config['datasets']}_{bit}.json"
train_val(config, bit, args,)