diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/.keep" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/.keep" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/NA_abl.sh" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/NA_abl.sh" new file mode 100644 index 0000000000000000000000000000000000000000..c1cc6a63ecdb9ffd92a7d280e82ebba8697ea98f --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/NA_abl.sh" @@ -0,0 +1,8 @@ +source=nwpu +target=aid +gpuid=4 +version=0809 + +python main2.py -v ${version}_src --gpu ${gpuid} -src ${source} -tar ${target} --arch source_only --mode test --acc 0.8913 +python main_cf.py -v ${version}_cf --gpu ${gpuid} -src ${source} -tar ${target} --arch cf_only --mode test --acc 0.9676 +python main_dis.py -v ${version}_dis --gpu ${gpuid} -src ${source} -tar ${target} --arch dis_only --mode test --acc 0.9463 \ No newline at end of file diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/data/.keep" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/data/.keep" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/data/nc.json" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/data/nc.json" new file mode 100644 index 0000000000000000000000000000000000000000..dfc3f555b720de391932b83daf90211d457a6914 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/data/nc.json" @@ -0,0 +1,8 @@ +{ + "u_a" : 13, + "a_u" : 13, + "n_u" : 20, + "u_n" : 20, + "n_a" : 19, + "a_n" : 19 +} \ No newline at end of file diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/dataset/.keep" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/dataset/.keep" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/dataset/__init__.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/dataset/__init__.py" new file mode 100644 index 0000000000000000000000000000000000000000..f8401198345c2a42ce91a9323d52654d9a98637b --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/dataset/__init__.py" @@ -0,0 +1 @@ +from .folder import CVDataLoader diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/dataset/folder.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/dataset/folder.py" new file mode 100644 index 0000000000000000000000000000000000000000..7aa8edfa8988ffaac0f6de8ccedddeb8889bdfea --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/dataset/folder.py" @@ -0,0 +1,86 @@ +import random +import torch.utils.data +from builtins import object + + +class PairedData(object): + def __init__(self, data_loader_A, data_loader_B, max_dataset_size, flip): + self.data_loader_A = data_loader_A + self.data_loader_B = data_loader_B + self.stop_A = False + self.stop_B = False + self.max_dataset_size = max_dataset_size + self.flip = flip + + def __iter__(self): + self.stop_A = False + self.stop_B = False + self.data_loader_A_iter = iter(self.data_loader_A) + self.data_loader_B_iter = iter(self.data_loader_B) + self.iter = 0 + return self + + def __next__(self): + A, A_paths = None, None + B, B_paths = None, None + try: + A, A_paths = next(self.data_loader_A_iter) + except StopIteration: + if A is None or A_paths is None: + self.stop_A = True + self.data_loader_A_iter = iter(self.data_loader_A) + A, A_paths = next(self.data_loader_A_iter) + + try: + B, B_paths = next(self.data_loader_B_iter) + except StopIteration: + if B is None or B_paths is None: + self.stop_B = True + self.data_loader_B_iter = iter(self.data_loader_B) + B, B_paths = next(self.data_loader_B_iter) + + if (self.stop_A and self.stop_B) or self.iter > self.max_dataset_size: + self.stop_A = False + self.stop_B = False + raise StopIteration() + else: + self.iter += 1 + if self.flip and random.random() < 0.5: + idx = [i for i in range(A.size(3) - 1, -1, -1)] + idx = torch.LongTensor(idx) + A = A.index_select(3, idx) + B = B.index_select(3, idx) + return {'S': A, 'S_label': A_paths, + 'T': B, 'T_label': B_paths} + + def __len__(self): + return min(max(len(self.data_loader_A), len(self.data_loader_B)), self.max_dataset_size) + + +class CVDataLoader(object): + def initialize(self, dataset_A, dataset_B, batch_size, shuffle=True): + # normalize = transforms.Normalize(mean=mean_im,std=std_im) + self.max_dataset_size = float("inf") + data_loader_A = torch.utils.data.DataLoader( + dataset_A, + batch_size=batch_size, + shuffle=shuffle, + num_workers=4, drop_last=True) + data_loader_B = torch.utils.data.DataLoader( + dataset_B, + batch_size=batch_size, + shuffle=shuffle, + num_workers=4, drop_last=True) + self.dataset_A = dataset_A + self.dataset_B = dataset_B + flip = False + self.paired_data = PairedData(data_loader_A, data_loader_B, self.max_dataset_size, flip) + + def name(self): + return 'UnalignedDataLoader' + + def load_data(self): + return self.paired_data + + def __len__(self): + return min(max(len(self.dataset_A), len(self.dataset_B)), self.max_dataset_size) diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main.py" new file mode 100644 index 0000000000000000000000000000000000000000..96a920f870f30bf110ae62637aa7684b8d560882 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main.py" @@ -0,0 +1,415 @@ +import argparse +import models +import os +import random +import json +from utils import * +from torchvision import datasets, transforms +import torch +from dataset import * +import matplotlib.pyplot as plt +import matplotlib +from torch.autograd import Variable +import torch.nn.functional as F +import numpy as np +import sys +import scipy.io as sio + +model_names = sorted(name for name in models.__dict__ + if not name.startswith("__") and callable(models.__dict__[name])) +print(model_names) + +parser = argparse.ArgumentParser(description='Scene Classification') +parser.add_argument('--mode', type=str, default='train', + metavar='MODE', help='train or test') +parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50', choices=model_names, + help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet50)') +parser.add_argument('--measure', '-m', metavar='MEASURE', default='L1', + help='the measure of distance between f1 and f2') +parser.add_argument('--source_data', '-src', metavar='SOURCE', dest='train_data', + help='source dataset') +parser.add_argument('--target_data', '-tar', metavar='TARGET', dest='val_data', + help='target dataset') +parser.add_argument('--batch_size', '-b', type=int, default=32, + metavar='N', help='mini-batch size (default: 32)') +parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('--epochs', type=int, default=40, + metavar='N', help='number of epochs to train (default: 40)') +parser.add_argument('--lr', type=float, default=0.001, + metavar='LR', help='initial learning rate (default: 0.001)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') +parser.add_argument('--weight-decay', '-wd', default=0.0005, type=float, + metavar='W', help='weight decay (default: 0.0005)') +parser.add_argument('--layers', type=int, default=2, metavar='K', + help='numbers of layers for classifier') +parser.add_argument('--num_k', type=int, default=4, metavar='K', + help='numbers of steps to repeat the generator update') +parser.add_argument('--seed', default=1, type=int, + help='seed for initializing training') +parser.add_argument('--save_path', '-s', metavar='PATH', default=None, + help='saving path') +parser.add_argument('--gpu', type=str, default='1', metavar='GPU_ID', + help='GPU id to use') +parser.add_argument('--print_freq', '-p', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--version', '-v', type=str, default='0', metavar='Ver', + help='model version') +parser.add_argument('--acc', type=str, default='0', metavar='Accuracy', + help='model to test') +parser.add_argument('--clusters', type=str, default='1000', metavar='clusters for visualization', + help='tsne clusters') +args = parser.parse_args() +best_prec = 0 +val_acc = [] +cls_loss, f_loss, g_loss = [], [], [] +matplotlib.use('Agg') + + +def plot_graph(x_vals, y_vals, x_label, y_label, legend): + for i in range(len(legend)): + plt.xlabel(x_label[i]) + plt.ylabel(y_label[i]) + plt.plot(x_vals[i], y_vals[i]) + fileName = os.path.join(args.save_path, legend[i] + ".png") + # fileName = args.save_path + '/' + legend[i] + ".png" + plt.savefig(fileName) + plt.close() + + +def main(): + global args, best_prec + print(args) + + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + args.iter_num = 1 # record loss every ${print_freq} times + + '''get number of classes''' + args.pair = args.train_data[0] + '_' + args.val_data[0] + with open('./data/nc.json', 'r') as f: + nc_info = json.load(f) + args.nc = nc_info[args.pair] + + '''set saving dir''' + if args.save_path is None: + args.save_path = os.path.join('./output', args.pair.upper(), args.version) + if not os.path.exists(args.save_path): + os.makedirs(args.save_path) + + '''random seed''' + if args.seed is not None: + random.seed(args.seed) + else: + args.seed = random.randint(1, 10000) + torch.manual_seed(args.seed) + + '''load data''' + train_path = os.path.join('/home/zzd/dataserver/zzd/TL', get_pair(args.pair.upper()), args.train_data) + val_path = os.path.join('/home/zzd/dataserver/zzd/TL', get_pair(args.pair.upper()), args.val_data) + data_transforms = { + train_path: transforms.Compose([ + transforms.Scale(256), + transforms.RandomHorizontalFlip(), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + val_path: transforms.Compose([ + transforms.Scale(256), + transforms.RandomHorizontalFlip(), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + } + dsets = {x: datasets.ImageFolder(os.path.join(x), data_transforms[x]) for x in [train_path, val_path]} + dset_classes = dsets[train_path].classes + print('\nclasses' + str(dset_classes) + '\n') + + train_loader = CVDataLoader() + train_loader.initialize(dsets[train_path], dsets[val_path], args.batch_size, shuffle=True) + train_dataset = train_loader.load_data() + test_loader = CVDataLoader() + test_loader.initialize(dsets[train_path], dsets[val_path], args.batch_size, shuffle=True) + test_dataset = test_loader.load_data() + + '''model building''' + model, criterion = models.__dict__[args.arch](pretrained=True, args=args) + if args.gpu is not None: + model = model.cuda() + criterion = criterion.cuda() + + if args.mode == 'test' or args.mode == 'visualize': + print("Testing! Arch:" + args.arch) + path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{}.pth'.format(args.acc)) + if args.acc == '0': + print('No Model Here!') + sys.exit() + model.load_state_dict(torch.load(path)['state_dict']) + model.eval() + + if args.mode == 'visualize': + with torch.no_grad(): + features = np.array([]) + labels = np.array([]) + for batch_idx, data in enumerate(train_dataset): + print(batch_idx) + stage = 0 + if batch_idx * args.batch_size > int(args.clusters): + break + data1 = data['S'] + target1 = data['S_label'] + for i in range(len(target1)): + target1[i] = 0 + data2 = data['T'] + target2 = data['T_label'] + for i in range(len(target2)): + target2[i] = 1 + data1, target1 = data1.cuda(), target1.cuda() + data2, target2 = data2.cuda(), target2.cuda() + input = Variable(torch.cat((data1, data2), 0)) + target = Variable(torch.cat((target1, target2), 0)) + feature = model(input)[2].cpu().numpy() + feature = feature.reshape(feature.shape[0], -1) + target = target.cpu().numpy() + if batch_idx == 0: + features = feature + labels = target + else: + features = np.vstack([features, feature]) + # print(target) + labels = np.hstack([labels, target]) + # print(labels) + + print('start tsne...') + visual_fig = tsne_visualize(features, labels) + print('start saving...') + fig_path = os.path.join(args.save_path, 'Figures', + '{}_visual_feature_{}.png'.format(args.version, args.clusters)) + plt.savefig(fig_path) + sys.exit() + + correct = 0 + correct2 = 0 + size = 0 + print(val_path) + val_data_loader = torch.utils.data.DataLoader(dsets[val_path], batch_size=args.batch_size, + shuffle=False, num_workers=4, ) + target_labels = np.array([]) + pred_labels_f1 = np.array([]) + pred_labels_f2 = np.array([]) + print(len(val_data_loader)) + for data2, target2 in val_data_loader: + data2, target2 = data2.cuda(), target2.cuda() + data1, target1 = Variable(data2, volatile=True), Variable(target2) + output1, output2 = model(data1) + pred = output1.data.max(1)[1] # get the index of the max log-probability + correct += pred.eq(target1.data).cpu().sum() + pred_labels_f1 = np.append(pred_labels_f1, pred.cpu().numpy()) + target_labels = np.append(target_labels, target2.cpu().numpy()) + pred = output2.data.max(1)[1] # get the index of the max log-probability + pred_labels_f2 = np.append(pred_labels_f2, pred.cpu().numpy()) + k = target1.data.size()[0] + correct2 += pred.eq(target1.data).cpu().sum() + size += k + acc1 = 1.0 * correct.numpy() / (1.0 * size) + acc2 = 1.0 * correct2.numpy() / (1.0 * size) + acc = max(acc1, acc2) + print('Accuracy: {:.4f}'.format(acc)) + + mat_path = os.path.join(args.save_path, 'test_{:.4f}.mat'.format(acc)) + if os.path.exists(mat_path): + sys.exit() + if correct2 > correct: + sio.savemat(mat_path, {'pred': pred_labels_f2, 'target': target_labels}) + else: + sio.savemat(mat_path, {'pred': pred_labels_f1, 'target': target_labels}) + class_names = val_data_loader.dataset.classes + plot_confusion_matrix(mat_path, class_names, args.save_path, acc) + sys.exit() + + '''set optimizer''' + g_params = [v for k, v in model.named_parameters() if 'gen' in k] + f_params = [v for k, v in model.named_parameters() if 'cls' in k] + + g_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, g_params), + args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + f_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, f_params), + args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + + '''training''' + for epoch in range(args.start_epoch, args.epochs): + train(train_dataset, model, criterion, g_optimizer, f_optimizer, epoch) + + prec = validate(test_dataset, model) + val_acc.append(prec) + is_best = prec > best_prec + best_prec = max(prec, best_prec) + + # save model + if epoch == args.epochs - 1 or (is_best and best_prec > 0.5): + save_path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{:.4f}.pth'.format(prec)) + if epoch == args.epochs - 1: + save_path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{:.4f}_last.pth'.format(prec)) + torch.save({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + }, save_path) + print('saving!!!!') + + # plot graph + x_vals, y_vals, x_label, y_label, legend = [], [cls_loss, f_loss, g_loss, val_acc], \ + [], [], ["L_cls", "L_f", "L_g", "val_acc"] + for i in range(len(legend)): + if 'L_' in legend[i]: + x_vals.append(range(1, args.iter_num)) + x_label.append('iter_num(*10)') + y_label.append('loss') + else: + x_vals.append(range(1, epoch + 1 - args.start_epoch + 1)) + x_label.append('epoch_num') + y_label.append('acc') + plot_graph(x_vals, y_vals, x_label, y_label, legend) + + +def train(train_dataset, model, criterion, g_optimizer, f_optimizer, epoch): + model.train() + + for batch_idx, data in enumerate(train_dataset): + stage = 0 + if batch_idx * args.batch_size > 30000: + break + data1 = data['S'] + target1 = data['S_label'] + data2 = data['T'] + target2 = data['T_label'] + data1, target1 = data1.cuda(), target1.cuda() + data2, target2 = data2.cuda(), target2.cuda() + input = Variable(torch.cat((data1, data2), 0)) + target = Variable(target1) + + # Step A: train all networks to minimize the loss on source + g_optimizer.zero_grad() + f_optimizer.zero_grad() + logits = model(input) + loss = criterion(logits, target, epoch) + current_loss = loss[stage] + L_cls = current_loss + current_loss.backward() + g_optimizer.step() + f_optimizer.step() + stage += 1 + + # Step B: train classifier to maximize discrepancy + g_optimizer.zero_grad() + f_optimizer.zero_grad() + + # block the G + for name, param in model.named_parameters(): + if 'gen' in name: + param.requires_grad = False + + logits = model(input) + loss = criterion(logits, target, epoch) + current_loss = loss[stage] + L_f = current_loss + current_loss.backward(retain_graph=True) + stage += 1 + + # block the F + for name, param in model.named_parameters(): + param.requires_grad = True + if 'cls' in name: + param.requires_grad = False + + # confuse the G,use the same cal graph + g_optimizer.zero_grad() + current_loss = loss[stage] + L_cf = current_loss + current_loss.backward() + + # update the F and G + for p in model.parameters(): + p.requires_grad = True + g_optimizer.step() + f_optimizer.step() + stage += 1 + + # Step C: train generator to minimize discrepancy + L_g = None + for i in range(args.num_k): + g_optimizer.zero_grad() + + # block the F + if i == 0: + for name, param in model.named_parameters(): + param.requires_grad = True + if 'cls' in name: + param.requires_grad = False + + logits = model(input) + loss = criterion(logits, target, epoch) + current_loss = loss[stage] + L_g = current_loss + current_loss.backward() + g_optimizer.step() + # stage += 1 + + # g_optimizer.zero_grad() + # logits = model(input) + # loss = criterion(logits, target, epoch) + # current_loss = loss[stage] + # L_cf = current_loss + # current_loss.backward() + # g_optimizer.step() + + for p in model.parameters(): + p.requires_grad = True + + # print result + if batch_idx % args.print_freq == 0: + print('Epoch: [{0}][{1}/{2}] loss:'.format + (epoch, batch_idx, len(train_dataset)), end='') + print('L_cls: {:.4f} L_f: {:.4f} L_g: {:.4f} L_cf: {:.4f}'.format + (L_cls.item(), L_f.item(), L_g.item(), L_cf.item())) # .data[0] + args.iter_num = args.iter_num + 1 + cls_loss.append(L_cls.item()) + f_loss.append(L_f.item()) + g_loss.append(L_g.item()) + + +def validate(test_dataset, model): + model.eval() + correct = 0 + correct2 = 0 + size = 0 + with torch.no_grad(): + for batch_idx, data in enumerate(test_dataset): + if batch_idx * args.batch_size > 5000: + break + data2 = data['T'] + target2 = data['T_label'] + data2, target2 = data2.cuda(), target2.cuda() + test_data, test_target = Variable(data2, volatile=True), Variable(target2) + output1, output2 = model(test_data) + pred = output1.data.max(1)[1] # get the index of the max log-probability + correct += pred.eq(test_target.data).cpu().sum() + pred = output2.data.max(1)[1] + k = test_target.data.size()[0] + correct2 += pred.eq(test_target.data).cpu().sum() + size += k + f1_acc = 1.0 * correct.numpy() / (1.0 * size) + f2_acc = 1.0 * correct2.numpy() / (1.0 * size) + + print('f1_acc: {:.4f} f2_acc: {:.4f}'.format(f1_acc, f2_acc)) + + return max(f1_acc, f2_acc) + + +if __name__ == '__main__': + main() diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main2.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main2.py" new file mode 100644 index 0000000000000000000000000000000000000000..57cb6e1eeb0e46497179873b82bd6e770da66216 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main2.py" @@ -0,0 +1,346 @@ +import argparse +import models +import os +import random +import json +from utils import * +from torchvision import datasets, transforms +import torch +from dataset import * +import matplotlib.pyplot as plt +import matplotlib +from torch.autograd import Variable +import torch.nn.functional as F +import numpy as np +import sys +import scipy.io as sio + +model_names = sorted(name for name in models.__dict__ + if not name.startswith("__") and callable(models.__dict__[name])) +print(model_names) + +parser = argparse.ArgumentParser(description='Scene Classification') +parser.add_argument('--mode', type=str, default='train', + metavar='MODE', help='train or test') +parser.add_argument('--arch', '-a', metavar='ARCH', default='source_only', choices=model_names, + help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet50)') +parser.add_argument('--measure', '-m', metavar='MEASURE', default='L1', + help='the measure of distance between f1 and f2') +parser.add_argument('--source_data', '-src', metavar='SOURCE', dest='train_data', + help='source dataset') +parser.add_argument('--target_data', '-tar', metavar='TARGET', dest='val_data', + help='target dataset') +parser.add_argument('--batch_size', '-b', type=int, default=32, + metavar='N', help='mini-batch size (default: 32)') +parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('--epochs', type=int, default=40, + metavar='N', help='number of epochs to train (default: 40)') +parser.add_argument('--lr', type=float, default=0.001, + metavar='LR', help='initial learning rate (default: 0.001)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') +parser.add_argument('--weight-decay', '-wd', default=0.0005, type=float, + metavar='W', help='weight decay (default: 0.0005)') +parser.add_argument('--layers', type=int, default=2, metavar='K', + help='numbers of layers for classifier') +parser.add_argument('--num_k', type=int, default=4, metavar='K', + help='numbers of steps to repeat the generator update') +parser.add_argument('--seed', default=1, type=int, + help='seed for initializing training') +parser.add_argument('--save_path', '-s', metavar='PATH', default=None, + help='saving path') +parser.add_argument('--gpu', type=str, default='1', metavar='GPU_ID', + help='GPU id to use') +parser.add_argument('--print_freq', '-p', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--version', '-v', type=str, default='0', metavar='Ver', + help='model version') +parser.add_argument('--acc', type=str, default='0', metavar='Accuracy', + help='model to test') +parser.add_argument('--clusters', type=str, default='1000', metavar='clusters for visualization', + help='tsne clusters') +args = parser.parse_args() +best_prec = 0 +val_acc = [] +cls_loss, f_loss, g_loss = [], [], [] +matplotlib.use('Agg') + + +def plot_graph(x_vals, y_vals, x_label, y_label, legend): + for i in range(len(legend)): + plt.xlabel(x_label[i]) + plt.ylabel(y_label[i]) + plt.plot(x_vals[i], y_vals[i]) + fileName = os.path.join(args.save_path, legend[i] + ".png") + # fileName = args.save_path + '/' + legend[i] + ".png" + plt.savefig(fileName) + plt.close() + + +def main(): + global args, best_prec + print(args) + + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + args.iter_num = 1 # record loss every ${print_freq} times + + '''get number of classes''' + args.pair = args.train_data[0] + '_' + args.val_data[0] + with open('./data/nc.json', 'r') as f: + nc_info = json.load(f) + args.nc = nc_info[args.pair] + + '''set saving dir''' + if args.save_path is None: + args.save_path = os.path.join('./output', args.pair.upper(), args.version) + if not os.path.exists(args.save_path): + os.makedirs(args.save_path) + + '''random seed''' + if args.seed is not None: + random.seed(args.seed) + else: + args.seed = random.randint(1, 10000) + torch.manual_seed(args.seed) + + '''load data''' + train_path = os.path.join('/home/zzd/dataserver/zzd/TL', get_pair(args.pair.upper()), args.train_data) + val_path = os.path.join('/home/zzd/dataserver/zzd/TL', get_pair(args.pair.upper()), args.val_data) + data_transforms = { + train_path: transforms.Compose([ + transforms.Scale(256), + transforms.RandomHorizontalFlip(), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + val_path: transforms.Compose([ + transforms.Scale(256), + transforms.RandomHorizontalFlip(), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + } + dsets = {x: datasets.ImageFolder(os.path.join(x), data_transforms[x]) for x in [train_path, val_path]} + dset_classes = dsets[train_path].classes + print('\nclasses' + str(dset_classes) + '\n') + + train_loader = CVDataLoader() + train_loader.initialize(dsets[train_path], dsets[val_path], args.batch_size, shuffle=True) + train_dataset = train_loader.load_data() + test_loader = CVDataLoader() + test_loader.initialize(dsets[train_path], dsets[val_path], args.batch_size, shuffle=True) + test_dataset = test_loader.load_data() + + '''model building''' + model, criterion = models.__dict__[args.arch](pretrained=True, args=args) + if args.gpu is not None: + model = model.cuda() + criterion = criterion.cuda() + + if args.mode == 'test' or args.mode == 'visualize': + print("Testing! Arch:" + args.arch) + path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{}.pth'.format(args.acc)) + if args.acc == '0': + print('No Model Here!') + sys.exit() + model.load_state_dict(torch.load(path)['state_dict']) + model.eval() + + if args.mode == 'visualize': + with torch.no_grad(): + features = np.array([]) + labels = np.array([]) + for batch_idx, data in enumerate(train_dataset): + print(batch_idx) + stage = 0 + if batch_idx * args.batch_size > int(args.clusters): + break + data1 = data['S'] + target1 = data['S_label'] + for i in range(len(target1)): + target1[i] = 0 + data2 = data['T'] + target2 = data['T_label'] + for i in range(len(target2)): + target2[i] = 1 + data1, target1 = data1.cuda(), target1.cuda() + data2, target2 = data2.cuda(), target2.cuda() + input = Variable(torch.cat((data1, data2), 0)) + target = Variable(torch.cat((target1, target2), 0)) + feature = model(input)[2].cpu().numpy() + feature = feature.reshape(feature.shape[0], -1) + target = target.cpu().numpy() + if batch_idx == 0: + features = feature + labels = target + else: + features = np.vstack([features, feature]) + # print(target) + labels = np.hstack([labels, target]) + # print(labels) + + print('start tsne...') + visual_fig = tsne_visualize(features, labels) + print('start saving...') + fig_path = os.path.join(args.save_path, 'Figures', + '{}_visual_feature_{}.png'.format(args.version, args.clusters)) + plt.savefig(fig_path) + sys.exit() + + correct = 0 + correct2 = 0 + size = 0 + print(val_path) + val_data_loader = torch.utils.data.DataLoader(dsets[val_path], batch_size=args.batch_size, + shuffle=False, num_workers=4, ) + target_labels = np.array([]) + pred_labels_f1 = np.array([]) + pred_labels_f2 = np.array([]) + print(len(val_data_loader)) + for data2, target2 in val_data_loader: + data2, target2 = data2.cuda(), target2.cuda() + data1, target1 = Variable(data2, volatile=True), Variable(target2) + output1, output2 = model(data1) + pred = output1.data.max(1)[1] # get the index of the max log-probability + correct += pred.eq(target1.data).cpu().sum() + pred_labels_f1 = np.append(pred_labels_f1, pred.cpu().numpy()) + target_labels = np.append(target_labels, target2.cpu().numpy()) + pred = output2.data.max(1)[1] # get the index of the max log-probability + pred_labels_f2 = np.append(pred_labels_f2, pred.cpu().numpy()) + k = target1.data.size()[0] + correct2 += pred.eq(target1.data).cpu().sum() + size += k + acc1 = 1.0 * correct.numpy() / (1.0 * size) + acc2 = 1.0 * correct2.numpy() / (1.0 * size) + acc = max(acc1, acc2) + print('Accuracy: {:.4f}'.format(acc)) + + mat_path = os.path.join(args.save_path, 'test_{:.4f}.mat'.format(acc)) + if os.path.exists(mat_path): + sys.exit() + if correct2 > correct: + sio.savemat(mat_path, {'pred': pred_labels_f2, 'target': target_labels}) + else: + sio.savemat(mat_path, {'pred': pred_labels_f1, 'target': target_labels}) + class_names = val_data_loader.dataset.classes + plot_confusion_matrix(mat_path, class_names, args.save_path, acc) + sys.exit() + + '''set optimizer''' + g_params = [v for k, v in model.named_parameters() if 'gen' in k] + f_params = [v for k, v in model.named_parameters() if 'cls' in k] + + g_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, g_params), + args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + f_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, f_params), + args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + + '''training''' + for epoch in range(args.start_epoch, args.epochs): + train(train_dataset, model, criterion, g_optimizer, f_optimizer, epoch) + + prec = validate(test_dataset, model) + val_acc.append(prec) + is_best = prec > best_prec + best_prec = max(prec, best_prec) + + # save model + if epoch == args.epochs - 1 or (is_best and best_prec > 0.5): + save_path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{:.4f}.pth'.format(prec)) + if epoch == args.epochs - 1: + save_path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{:.4f}_last.pth'.format(prec)) + torch.save({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + }, save_path) + print('saving!!!!') + + # plot graph + x_vals, y_vals, x_label, y_label, legend = [], [cls_loss, val_acc], \ + [], [], ["L_cls", "val_acc"] + for i in range(len(legend)): + if 'L_' in legend[i]: + x_vals.append(range(1, args.iter_num)) + x_label.append('iter_num(*10)') + y_label.append('loss') + else: + x_vals.append(range(1, epoch + 1 - args.start_epoch + 1)) + x_label.append('epoch_num') + y_label.append('acc') + plot_graph(x_vals, y_vals, x_label, y_label, legend) + + +def train(train_dataset, model, criterion, g_optimizer, f_optimizer, epoch): + model.train() + + for batch_idx, data in enumerate(train_dataset): + stage = 0 + if batch_idx * args.batch_size > 30000: + break + data1 = data['S'] + target1 = data['S_label'] + data2 = data['T'] + target2 = data['T_label'] + data1, target1 = data1.cuda(), target1.cuda() + data2, target2 = data2.cuda(), target2.cuda() + input = Variable(torch.cat((data1, data2), 0)) + target = Variable(target1) + + # Step A: train all networks to minimize the loss on source + g_optimizer.zero_grad() + f_optimizer.zero_grad() + logits = model(input) + loss = criterion(logits, target, epoch) + current_loss = loss[stage] + L_cls = current_loss + current_loss.backward() + g_optimizer.step() + f_optimizer.step() + stage += 1 + + if batch_idx % args.print_freq == 0: + print('Epoch: [{0}][{1}/{2}] loss:'.format + (epoch, batch_idx, len(train_dataset)), end='') + print('L_cls: {:.4f}'.format + (L_cls.item())) # .data[0] + args.iter_num = args.iter_num + 1 + cls_loss.append(L_cls.item()) + + +def validate(test_dataset, model): + model.eval() + correct = 0 + correct2 = 0 + size = 0 + with torch.no_grad(): + for batch_idx, data in enumerate(test_dataset): + if batch_idx * args.batch_size > 5000: + break + data2 = data['T'] + target2 = data['T_label'] + data2, target2 = data2.cuda(), target2.cuda() + test_data, test_target = Variable(data2, volatile=True), Variable(target2) + output1, output2 = model(test_data) + pred = output1.data.max(1)[1] # get the index of the max log-probability + correct += pred.eq(test_target.data).cpu().sum() + pred = output2.data.max(1)[1] + k = test_target.data.size()[0] + correct2 += pred.eq(test_target.data).cpu().sum() + size += k + f1_acc = 1.0 * correct.numpy() / (1.0 * size) + f2_acc = 1.0 * correct2.numpy() / (1.0 * size) + + print('f1_acc: {:.4f} f2_acc: {:.4f}'.format(f1_acc, f2_acc)) + + return max(f1_acc, f2_acc) + + +if __name__ == '__main__': + main() diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main_cf.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main_cf.py" new file mode 100644 index 0000000000000000000000000000000000000000..5520eab42aef938ece375ebb363e03fb33e6c4a9 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main_cf.py" @@ -0,0 +1,387 @@ +import argparse +import models +import os +import random +import json +from utils import * +from torchvision import datasets, transforms +import torch +from dataset import * +import matplotlib.pyplot as plt +import matplotlib +from torch.autograd import Variable +import torch.nn.functional as F +import numpy as np +import sys +import scipy.io as sio + +model_names = sorted(name for name in models.__dict__ + if not name.startswith("__") and callable(models.__dict__[name])) +print(model_names) + +parser = argparse.ArgumentParser(description='Scene Classification') +parser.add_argument('--mode', type=str, default='train', + metavar='MODE', help='train or test') +parser.add_argument('--arch', '-a', metavar='ARCH', default='cf_only', choices=model_names, + help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet50)') +parser.add_argument('--measure', '-m', metavar='MEASURE', default='L1', + help='the measure of distance between f1 and f2') +parser.add_argument('--source_data', '-src', metavar='SOURCE', dest='train_data', + help='source dataset') +parser.add_argument('--target_data', '-tar', metavar='TARGET', dest='val_data', + help='target dataset') +parser.add_argument('--batch_size', '-b', type=int, default=32, + metavar='N', help='mini-batch size (default: 32)') +parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('--epochs', type=int, default=40, + metavar='N', help='number of epochs to train (default: 40)') +parser.add_argument('--lr', type=float, default=0.001, + metavar='LR', help='initial learning rate (default: 0.001)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') +parser.add_argument('--weight-decay', '-wd', default=0.0005, type=float, + metavar='W', help='weight decay (default: 0.0005)') +parser.add_argument('--layers', type=int, default=2, metavar='K', + help='numbers of layers for classifier') +parser.add_argument('--num_k', type=int, default=4, metavar='K', + help='numbers of steps to repeat the generator update') +parser.add_argument('--seed', default=1, type=int, + help='seed for initializing training') +parser.add_argument('--save_path', '-s', metavar='PATH', default=None, + help='saving path') +parser.add_argument('--gpu', type=str, default='1', metavar='GPU_ID', + help='GPU id to use') +parser.add_argument('--print_freq', '-p', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--version', '-v', type=str, default='0', metavar='Ver', + help='model version') +parser.add_argument('--acc', type=str, default='0', metavar='Ver', + help='model to test') +parser.add_argument('--clusters', type=str, default='1000', metavar='clusters for visualization', + help='tsne clusters') +args = parser.parse_args() +best_prec = 0 +val_acc = [] +cls_loss, f_loss, g_loss = [], [], [] +matplotlib.use('Agg') + + +def plot_graph(x_vals, y_vals, x_label, y_label, legend): + for i in range(len(legend)): + plt.xlabel(x_label[i]) + plt.ylabel(y_label[i]) + plt.plot(x_vals[i], y_vals[i]) + fileName = os.path.join(args.save_path, legend[i] + ".png") + # fileName = args.save_path + '/' + legend[i] + ".png" + plt.savefig(fileName) + plt.close() + + +def main(): + global args, best_prec + print(args) + + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + args.iter_num = 1 # record loss every ${print_freq} times + + '''get number of classes''' + args.pair = args.train_data[0] + '_' + args.val_data[0] + with open('./data/nc.json', 'r') as f: + nc_info = json.load(f) + args.nc = nc_info[args.pair] + + '''set saving dir''' + if args.save_path is None: + args.save_path = os.path.join('./output', args.pair.upper(), args.version) + if not os.path.exists(args.save_path): + os.makedirs(args.save_path) + + '''random seed''' + if args.seed is not None: + random.seed(args.seed) + else: + args.seed = random.randint(1, 10000) + torch.manual_seed(args.seed) + + '''load data''' + train_path = os.path.join('/home/zzd/dataserver/zzd/TL', get_pair(args.pair.upper()), args.train_data) + val_path = os.path.join('/home/zzd/dataserver/zzd/TL', get_pair(args.pair.upper()), args.val_data) + data_transforms = { + train_path: transforms.Compose([ + transforms.Scale(256), + transforms.RandomHorizontalFlip(), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + val_path: transforms.Compose([ + transforms.Scale(256), + transforms.RandomHorizontalFlip(), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + } + dsets = {x: datasets.ImageFolder(os.path.join(x), data_transforms[x]) for x in [train_path, val_path]} + dset_classes = dsets[train_path].classes + print('\nclasses' + str(dset_classes) + '\n') + + train_loader = CVDataLoader() + train_loader.initialize(dsets[train_path], dsets[val_path], args.batch_size, shuffle=True) + train_dataset = train_loader.load_data() + test_loader = CVDataLoader() + test_loader.initialize(dsets[train_path], dsets[val_path], args.batch_size, shuffle=True) + test_dataset = test_loader.load_data() + + '''model building''' + model, criterion = models.__dict__[args.arch](pretrained=True, args=args) + if args.gpu is not None: + model = model.cuda() + criterion = criterion.cuda() + + if args.mode == 'test' or args.mode == 'visualize': + print("Testing! Arch:" + args.arch) + path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{}.pth'.format(args.acc)) + if args.acc == '0': + print('No Model Here!') + sys.exit() + model.load_state_dict(torch.load(path)['state_dict']) + model.eval() + + if args.mode == 'visualize': + with torch.no_grad(): + features = np.array([]) + labels = np.array([]) + for batch_idx, data in enumerate(train_dataset): + print(batch_idx) + stage = 0 + if batch_idx * args.batch_size > int(args.clusters): + break + data1 = data['S'] + target1 = data['S_label'] + for i in range(len(target1)): + target1[i] = 0 + data2 = data['T'] + target2 = data['T_label'] + for i in range(len(target2)): + target2[i] = 1 + data1, target1 = data1.cuda(), target1.cuda() + data2, target2 = data2.cuda(), target2.cuda() + input = Variable(torch.cat((data1, data2), 0)) + target = Variable(torch.cat((target1, target2), 0)) + feature = model(input)[2].cpu().numpy() + feature = feature.reshape(feature.shape[0], -1) + target = target.cpu().numpy() + if batch_idx == 0: + features = feature + labels = target + else: + features = np.vstack([features, feature]) + # print(target) + labels = np.hstack([labels, target]) + # print(labels) + + print('start tsne...') + visual_fig = tsne_visualize(features, labels) + print('start saving...') + fig_path = os.path.join(args.save_path, 'Figures', + '{}_visual_feature_{}.png'.format(args.version, args.clusters)) + plt.savefig(fig_path) + sys.exit() + + correct = 0 + correct2 = 0 + size = 0 + print(val_path) + val_data_loader = torch.utils.data.DataLoader(dsets[val_path], batch_size=args.batch_size, + shuffle=False, num_workers=4, ) + target_labels = np.array([]) + pred_labels_f1 = np.array([]) + pred_labels_f2 = np.array([]) + print(len(val_data_loader)) + for data2, target2 in val_data_loader: + data2, target2 = data2.cuda(), target2.cuda() + data1, target1 = Variable(data2, volatile=True), Variable(target2) + output1, output2 = model(data1) + pred = output1.data.max(1)[1] # get the index of the max log-probability + correct += pred.eq(target1.data).cpu().sum() + pred_labels_f1 = np.append(pred_labels_f1, pred.cpu().numpy()) + target_labels = np.append(target_labels, target2.cpu().numpy()) + pred = output2.data.max(1)[1] # get the index of the max log-probability + pred_labels_f2 = np.append(pred_labels_f2, pred.cpu().numpy()) + k = target1.data.size()[0] + correct2 += pred.eq(target1.data).cpu().sum() + size += k + acc1 = 1.0 * correct.numpy() / (1.0 * size) + acc2 = 1.0 * correct2.numpy() / (1.0 * size) + acc = max(acc1, acc2) + print('Accuracy: {:.4f}'.format(acc)) + + mat_path = os.path.join(args.save_path, 'test_{:.4f}.mat'.format(acc)) + if os.path.exists(mat_path): + sys.exit() + if correct2 > correct: + sio.savemat(mat_path, {'pred': pred_labels_f2, 'target': target_labels}) + else: + sio.savemat(mat_path, {'pred': pred_labels_f1, 'target': target_labels}) + class_names = val_data_loader.dataset.classes + plot_confusion_matrix(mat_path, class_names, args.save_path, acc) + sys.exit() + + '''set optimizer''' + g_params = [v for k, v in model.named_parameters() if 'gen' in k] + f_params = [v for k, v in model.named_parameters() if 'cls' in k] + + g_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, g_params), + args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + f_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, f_params), + args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + + '''training''' + for epoch in range(args.start_epoch, args.epochs): + train(train_dataset, model, criterion, g_optimizer, f_optimizer, epoch) + + prec = validate(test_dataset, model) + val_acc.append(prec) + is_best = prec > best_prec + best_prec = max(prec, best_prec) + + # save model + if epoch == args.epochs - 1 or (is_best and best_prec > 0.5): + save_path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{:.4f}.pth'.format(prec)) + if epoch == args.epochs - 1: + save_path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{:.4f}_last.pth'.format(prec)) + torch.save({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + }, save_path) + print('saving!!!!') + + # plot graph + x_vals, y_vals, x_label, y_label, legend = [], [cls_loss, f_loss, g_loss,val_acc], \ + [], [], ["L_cls", "L_f", "L_cf","val_acc"] + for i in range(len(legend)): + if 'L_' in legend[i]: + x_vals.append(range(1, args.iter_num)) + x_label.append('iter_num(*10)') + y_label.append('loss') + else: + x_vals.append(range(1, epoch + 1 - args.start_epoch + 1)) + x_label.append('epoch_num') + y_label.append('acc') + plot_graph(x_vals, y_vals, x_label, y_label, legend) + + +def train(train_dataset, model, criterion, g_optimizer, f_optimizer, epoch): + model.train() + + for batch_idx, data in enumerate(train_dataset): + stage = 0 + if batch_idx * args.batch_size > 30000: + break + data1 = data['S'] + target1 = data['S_label'] + data2 = data['T'] + target2 = data['T_label'] + data1, target1 = data1.cuda(), target1.cuda() + data2, target2 = data2.cuda(), target2.cuda() + input = Variable(torch.cat((data1, data2), 0)) + target = Variable(target1) + + # Step A: train all networks to minimize the loss on source + g_optimizer.zero_grad() + f_optimizer.zero_grad() + logits = model(input) + loss = criterion(logits, target, epoch) + current_loss = loss[stage] + L_cls = current_loss + current_loss.backward() + g_optimizer.step() + f_optimizer.step() + stage += 1 + + # Step B: train classifier to maximize discrepancy + g_optimizer.zero_grad() + f_optimizer.zero_grad() + + # block the G + # for name, param in model.named_parameters(): + # if 'gen' in name: + # param.requires_grad = False + + logits = model(input) + loss = criterion(logits, target, epoch) + current_loss = loss[stage] + L_f = current_loss + current_loss.backward(retain_graph=True) + stage += 1 + + # block the F + for name, param in model.named_parameters(): + param.requires_grad = True + if 'cls' in name: + param.requires_grad = False + + # confuse the F,use the same cal graph + g_optimizer.zero_grad() + current_loss = loss[stage] + L_cf = current_loss + current_loss.backward() + + # update the F and G + for p in model.parameters(): + p.requires_grad = True + g_optimizer.step() + f_optimizer.step() + stage += 1 + + for p in model.parameters(): + p.requires_grad = True + + # print result + if batch_idx % args.print_freq == 0: + print('Epoch: [{0}][{1}/{2}] loss:'.format + (epoch, batch_idx, len(train_dataset)), end='') + print('L_cls: {:.4f} L_f: {:.4f} L_cf: {:.4f}'.format + (L_cls.item(), L_f.item(), L_cf.item())) # .data[0] + args.iter_num = args.iter_num + 1 + cls_loss.append(L_cls.item()) + f_loss.append(L_f.item()) + g_loss.append(L_cf.item()) + + +def validate(test_dataset, model): + model.eval() + correct = 0 + correct2 = 0 + size = 0 + with torch.no_grad(): + for batch_idx, data in enumerate(test_dataset): + if batch_idx * args.batch_size > 5000: + break + data2 = data['T'] + target2 = data['T_label'] + data2, target2 = data2.cuda(), target2.cuda() + test_data, test_target = Variable(data2, volatile=True), Variable(target2) + output1, output2 = model(test_data) + pred = output1.data.max(1)[1] # get the index of the max log-probability + correct += pred.eq(test_target.data).cpu().sum() + pred = output2.data.max(1)[1] + k = test_target.data.size()[0] + correct2 += pred.eq(test_target.data).cpu().sum() + size += k + f1_acc = 1.0 * correct.numpy() / (1.0 * size) + f2_acc = 1.0 * correct2.numpy() / (1.0 * size) + + print('f1_acc: {:.4f} f2_acc: {:.4f}'.format(f1_acc, f2_acc)) + + return max(f1_acc, f2_acc) + + +if __name__ == '__main__': + main() diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main_dis.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main_dis.py" new file mode 100644 index 0000000000000000000000000000000000000000..8fa3b346e1371211968dd212b247bcd979a341f7 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/main_dis.py" @@ -0,0 +1,420 @@ +import argparse +import models +import os +import random +import json +from utils import * +from torchvision import datasets, transforms +import torch +from dataset import * +import matplotlib.pyplot as plt +import matplotlib +from torch.autograd import Variable +import torch.nn.functional as F +import numpy as np +import sys +import scipy.io as sio + +model_names = sorted(name for name in models.__dict__ + if not name.startswith("__") and callable(models.__dict__[name])) +print(model_names) + +parser = argparse.ArgumentParser(description='Scene Classification') +parser.add_argument('--mode', type=str, default='train', + metavar='MODE', help='train or test') +parser.add_argument('--arch', '-a', metavar='ARCH', default='dis_only', choices=model_names, + help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet50)') +parser.add_argument('--measure', '-m', metavar='MEASURE', default='L1', + help='the measure of distance between f1 and f2') +parser.add_argument('--source_data', '-src', metavar='SOURCE', dest='train_data', + help='source dataset') +parser.add_argument('--target_data', '-tar', metavar='TARGET', dest='val_data', + help='target dataset') +parser.add_argument('--batch_size', '-b', type=int, default=32, + metavar='N', help='mini-batch size (default: 32)') +parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('--epochs', type=int, default=40, + metavar='N', help='number of epochs to train (default: 40)') +parser.add_argument('--lr', type=float, default=0.001, + metavar='LR', help='initial learning rate (default: 0.001)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') +parser.add_argument('--weight-decay', '-wd', default=0.0005, type=float, + metavar='W', help='weight decay (default: 0.0005)') +parser.add_argument('--layers', type=int, default=2, metavar='K', + help='numbers of layers for classifier') +parser.add_argument('--num_k', type=int, default=4, metavar='K', + help='numbers of steps to repeat the generator update') +parser.add_argument('--seed', default=1, type=int, + help='seed for initializing training') +parser.add_argument('--save_path', '-s', metavar='PATH', default=None, + help='saving path') +parser.add_argument('--gpu', type=str, default='1', metavar='GPU_ID', + help='GPU id to use') +parser.add_argument('--print_freq', '-p', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--version', '-v', type=str, default='0', metavar='Ver', + help='model version') +parser.add_argument('--acc', type=str, default='0', metavar='Accuracy', + help='model to test') +parser.add_argument('--clusters', type=str, default='1000', metavar='clusters for visualization', + help='tsne clusters') +args = parser.parse_args() +best_prec = 0 +val_acc = [] +cls_loss, f_loss, g_loss = [], [], [] +matplotlib.use('Agg') + + +def plot_graph(x_vals, y_vals, x_label, y_label, legend): + for i in range(len(legend)): + plt.xlabel(x_label[i]) + plt.ylabel(y_label[i]) + plt.plot(x_vals[i], y_vals[i]) + fileName = os.path.join(args.save_path, legend[i] + ".png") + # fileName = args.save_path + '/' + legend[i] + ".png" + plt.savefig(fileName) + plt.close() + + +def main(): + global args, best_prec + print(args) + + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + args.iter_num = 1 # record loss every ${print_freq} times + + '''get number of classes''' + args.pair = args.train_data[0] + '_' + args.val_data[0] + with open('./data/nc.json', 'r') as f: + nc_info = json.load(f) + args.nc = nc_info[args.pair] + + '''set saving dir''' + if args.save_path is None: + args.save_path = os.path.join('./output', args.pair.upper(), args.version) + if not os.path.exists(args.save_path): + os.makedirs(args.save_path) + + '''random seed''' + if args.seed is not None: + random.seed(args.seed) + else: + args.seed = random.randint(1, 10000) + torch.manual_seed(args.seed) + + '''load data''' + train_path = os.path.join('/home/zzd/dataserver/zzd/TL', get_pair(args.pair.upper()), args.train_data) + val_path = os.path.join('/home/zzd/dataserver/zzd/TL', get_pair(args.pair.upper()), args.val_data) + data_transforms = { + train_path: transforms.Compose([ + transforms.Scale(256), + transforms.RandomHorizontalFlip(), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + val_path: transforms.Compose([ + transforms.Scale(256), + transforms.RandomHorizontalFlip(), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + } + dsets = {x: datasets.ImageFolder(os.path.join(x), data_transforms[x]) for x in [train_path, val_path]} + dset_classes = dsets[train_path].classes + print('\nclasses' + str(dset_classes) + '\n') + + train_loader = CVDataLoader() + train_loader.initialize(dsets[train_path], dsets[val_path], args.batch_size, shuffle=True) + train_dataset = train_loader.load_data() + test_loader = CVDataLoader() + test_loader.initialize(dsets[train_path], dsets[val_path], args.batch_size, shuffle=True) + test_dataset = test_loader.load_data() + + '''model building''' + model, criterion = models.__dict__[args.arch](pretrained=True, args=args) + if args.gpu is not None: + model = model.cuda() + criterion = criterion.cuda() + + if args.mode == 'test' or args.mode == 'visualize': + print("Testing! Arch:" + args.arch) + path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{}.pth'.format(args.acc)) + if args.acc == '0': + print('No Model Here!') + sys.exit() + model.load_state_dict(torch.load(path)['state_dict']) + model.eval() + + if args.mode == 'visualize': + with torch.no_grad(): + features = np.array([]) + labels = np.array([]) + for batch_idx, data in enumerate(train_dataset): + print(batch_idx) + stage = 0 + if batch_idx * args.batch_size > int(args.clusters): + break + data1 = data['S'] + target1 = data['S_label'] + for i in range(len(target1)): + target1[i] = 0 + data2 = data['T'] + target2 = data['T_label'] + for i in range(len(target2)): + target2[i] = 1 + data1, target1 = data1.cuda(), target1.cuda() + data2, target2 = data2.cuda(), target2.cuda() + input = Variable(torch.cat((data1, data2), 0)) + target = Variable(torch.cat((target1, target2), 0)) + feature = model(input)[2].cpu().numpy() + feature = feature.reshape(feature.shape[0], -1) + target = target.cpu().numpy() + if batch_idx == 0: + features = feature + labels = target + else: + features = np.vstack([features, feature]) + # print(target) + labels = np.hstack([labels, target]) + # print(labels) + + print('start tsne...') + visual_fig = tsne_visualize(features, labels) + print('start saving...') + fig_path = os.path.join(args.save_path, 'Figures', + '{}_visual_feature_{}.png'.format(args.version, args.clusters)) + plt.savefig(fig_path) + sys.exit() + + correct = 0 + correct2 = 0 + size = 0 + print(val_path) + val_data_loader = torch.utils.data.DataLoader(dsets[val_path], batch_size=args.batch_size, + shuffle=False, num_workers=4, ) + target_labels = np.array([]) + pred_labels_f1 = np.array([]) + pred_labels_f2 = np.array([]) + print(len(val_data_loader)) + for data2, target2 in val_data_loader: + data2, target2 = data2.cuda(), target2.cuda() + data1, target1 = Variable(data2, volatile=True), Variable(target2) + output1, output2 = model(data1) + pred = output1.data.max(1)[1] # get the index of the max log-probability + correct += pred.eq(target1.data).cpu().sum() + pred_labels_f1 = np.append(pred_labels_f1, pred.cpu().numpy()) + target_labels = np.append(target_labels, target2.cpu().numpy()) + pred = output2.data.max(1)[1] # get the index of the max log-probability + pred_labels_f2 = np.append(pred_labels_f2, pred.cpu().numpy()) + k = target1.data.size()[0] + correct2 += pred.eq(target1.data).cpu().sum() + size += k + acc1 = 1.0 * correct.numpy() / (1.0 * size) + acc2 = 1.0 * correct2.numpy() / (1.0 * size) + acc = max(acc1, acc2) + print('Accuracy: {:.4f}'.format(acc)) + + mat_path = os.path.join(args.save_path, 'test_{:.4f}.mat'.format(acc)) + if os.path.exists(mat_path): + sys.exit() + if correct2 > correct: + sio.savemat(mat_path, {'pred': pred_labels_f2, 'target': target_labels}) + else: + sio.savemat(mat_path, {'pred': pred_labels_f1, 'target': target_labels}) + class_names = val_data_loader.dataset.classes + plot_confusion_matrix(mat_path, class_names, args.save_path, acc) + sys.exit() + + '''set optimizer''' + g_params = [v for k, v in model.named_parameters() if 'gen' in k] + f_params = [v for k, v in model.named_parameters() if 'cls' in k] + + g_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, g_params), + args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + f_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, f_params), + args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + + '''training''' + for epoch in range(args.start_epoch, args.epochs): + train(train_dataset, model, criterion, g_optimizer, f_optimizer, epoch) + + prec = validate(test_dataset, model) + val_acc.append(prec) + is_best = prec > best_prec + best_prec = max(prec, best_prec) + + # save model + if epoch == args.epochs - 1 or (is_best and best_prec > 0.5): + save_path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{:.4f}.pth'.format(prec)) + if epoch == args.epochs - 1: + save_path = os.path.join(args.save_path, args.arch + '_' + args.measure + + '_{:.4f}_last.pth'.format(prec)) + torch.save({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + }, save_path) + print('saving!!!!') + + # plot graph + x_vals, y_vals, x_label, y_label, legend = [], [cls_loss, f_loss, g_loss, val_acc], \ + [], [], ["L_cls", "L_f", "L_g", "val_acc"] + for i in range(len(legend)): + if 'L_' in legend[i]: + x_vals.append(range(1, args.iter_num)) + x_label.append('iter_num(*10)') + y_label.append('loss') + else: + x_vals.append(range(1, epoch + 1 - args.start_epoch + 1)) + x_label.append('epoch_num') + y_label.append('acc') + plot_graph(x_vals, y_vals, x_label, y_label, legend) + + +def train(train_dataset, model, criterion, g_optimizer, f_optimizer, epoch): + model.train() + + for batch_idx, data in enumerate(train_dataset): + stage = 0 + if batch_idx * args.batch_size > 30000: + break + data1 = data['S'] + target1 = data['S_label'] + data2 = data['T'] + target2 = data['T_label'] + data1, target1 = data1.cuda(), target1.cuda() + data2, target2 = data2.cuda(), target2.cuda() + input = Variable(torch.cat((data1, data2), 0)) + target = Variable(target1) + + # Step A: train all networks to minimize the loss on source + g_optimizer.zero_grad() + f_optimizer.zero_grad() + logits = model(input) + loss = criterion(logits, target, epoch) + current_loss = loss[stage] + L_cls = current_loss + current_loss.backward() + g_optimizer.step() + f_optimizer.step() + stage += 1 + + # Step B: train classifier to maximize discrepancy + g_optimizer.zero_grad() + f_optimizer.zero_grad() + + # block the G + for name, param in model.named_parameters(): + if 'gen' in name: + param.requires_grad = False + + logits = model(input) + loss = criterion(logits, target, epoch) + current_loss = loss[stage] + L_f = current_loss + current_loss.backward() + # current_loss.backward(retain_graph=True) + f_optimizer.step() + stage += 1 + + # for p in model.parameters(): + # p.requires_grad = True + + # block the F + # for name, param in model.named_parameters(): + # param.requires_grad = True + # if 'cls' in name: + # param.requires_grad = False + # + # # confuse the F,use the same cal graph + # g_optimizer.zero_grad() + # current_loss = loss[stage] + # L_cf = current_loss + # current_loss.backward() + # + # # update the F and G + # for p in model.parameters(): + # p.requires_grad = True + # g_optimizer.step() + # f_optimizer.step() + # stage += 1 + + # Step C: train generator to minimize discrepancy + L_g = None + for i in range(args.num_k): + g_optimizer.zero_grad() + + # block the F + if i == 0: + for name, param in model.named_parameters(): + param.requires_grad = True + if 'cls' in name: + param.requires_grad = False + + logits = model(input) + loss = criterion(logits, target, epoch) + current_loss = loss[stage] + L_g = current_loss + current_loss.backward() + g_optimizer.step() + # stage += 1 + + # g_optimizer.zero_grad() + # logits = model(input) + # loss = criterion(logits, target, epoch) + # current_loss = loss[stage] + # L_cf = current_loss + # current_loss.backward() + # g_optimizer.step() + + for p in model.parameters(): + p.requires_grad = True + + # print result + if batch_idx % args.print_freq == 0: + print('Epoch: [{0}][{1}/{2}] loss:'.format + (epoch, batch_idx, len(train_dataset)), end='') + print('L_cls: {:.4f} L_f: {:.4f} L_g: {:.4f}'.format + (L_cls.item(), L_f.item(), L_g.item())) # .data[0] + args.iter_num = args.iter_num + 1 + cls_loss.append(L_cls.item()) + f_loss.append(L_f.item()) + g_loss.append(L_g.item()) + + +def validate(test_dataset, model): + model.eval() + correct = 0 + correct2 = 0 + size = 0 + with torch.no_grad(): + for batch_idx, data in enumerate(test_dataset): + if batch_idx * args.batch_size > 5000: + break + data2 = data['T'] + target2 = data['T_label'] + data2, target2 = data2.cuda(), target2.cuda() + test_data, test_target = Variable(data2, volatile=True), Variable(target2) + output1, output2 = model(test_data) + pred = output1.data.max(1)[1] # get the index of the max log-probability + correct += pred.eq(test_target.data).cpu().sum() + pred = output2.data.max(1)[1] + k = test_target.data.size()[0] + correct2 += pred.eq(test_target.data).cpu().sum() + size += k + f1_acc = 1.0 * correct.numpy() / (1.0 * size) + f2_acc = 1.0 * correct2.numpy() / (1.0 * size) + + print('f1_acc: {:.4f} f2_acc: {:.4f}'.format(f1_acc, f2_acc)) + + return max(f1_acc, f2_acc) + + +if __name__ == '__main__': + main() diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/.keep" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/.keep" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/__init__.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/__init__.py" new file mode 100644 index 0000000000000000000000000000000000000000..88ea97d436841690062404269950466e546f45f6 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/__init__.py" @@ -0,0 +1,5 @@ +from .resnet50 import * +from .source_only import * +from .cf_only import * +from .dis_only import * +from .zzd import * diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/cf_only.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/cf_only.py" new file mode 100644 index 0000000000000000000000000000000000000000..b50b5694a6b1e23f5bc3c9115339235e35031d8c --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/cf_only.py" @@ -0,0 +1,162 @@ +import torch.nn as nn +from torchvision import models +import torch.nn.functional as F +import torch +from torch.autograd import Variable +import math + +__all__ = ['cf_only'] + + +class Model(nn.Module): + def __init__(self, pret=True, args=None): + super(Model, self).__init__() + self.dim = 2048 + option = args.arch + nc = args.nc + mid = 10000 + prob = 0.5 + + '''feature generator''' + if option == 'resnet18': + model_ft = models.resnet18(pretrained=pret) + self.dim = 512 + if option == 'resnet50' or option == 'cf_only': + model_ft = models.resnet50(pretrained=pret) + if option == 'resnet101': + model_ft = models.resnet101(pretrained=pret) + if option == 'resnet152': + model_ft = models.resnet152(pretrained=pret) + mod = list(model_ft.children()) + mod.pop() + self.gen = nn.Sequential(*mod) + + '''classifier''' + layers = [] + layers.append(nn.Dropout(p=prob)) + layers.append(nn.Linear(self.dim, mid)) + layers.append(nn.BatchNorm1d(mid, affine=True)) + layers.append(nn.ReLU(inplace=True)) + + for i in range(args.layers-1): + layers.append(nn.Dropout(p=prob)) + layers.append(nn.Linear(mid, mid)) + layers.append(nn.BatchNorm1d(mid, affine=True)) + layers.append(nn.ReLU(inplace=True)) + + layers.append(nn.Linear(mid, args.nc)) + self.cls1 = nn.Sequential(*layers) + self.cls2 = nn.Sequential(*layers) + + '''params ini''' + for name, param in self.named_modules(): + if 'cls' in name: + if isinstance(param, nn.Linear): + param.weight.data.normal_(0.0, 0.01) + param.bias.data.normal_(0.0, 0.01) + if isinstance(param, nn.BatchNorm1d): + param.weight.data.normal_(1.0, 0.01) + param.bias.data.fill_(0) + + def forward(self, x): + # generate feature + x = self.gen(x) + g = x.view(x.size(0), self.dim) + + # classify + f1 = self.cls1(g) + f2 = self.cls2(g) + + return f1, f2, x + + +class Loss(nn.Module): + def __init__(self, args): + super(Loss, self).__init__() + self.cls_loss = nn.CrossEntropyLoss() + self.eta = 1.0 + self.batch_size = args.batch_size + self.measure = args.measure + self.nc = args.nc + self.epochs = args.epochs + + def measure_dis(self, measure, output_t1, output_t2): + if measure == 'L1': + return 1 * torch.mean(torch.abs(output_t1 - output_t2)) + + def st_loss(self, output, area): + batch_size = output.size(0) + prob = F.softmax(output, dim=1) + if area == 'left': + if (prob.data[:, :self.nc].sum(1) == 0).sum() != 0: # in case of log(0) + soft_weight = torch.FloatTensor(batch_size).fill_(0) + soft_weight[prob[:, :self.nc].sum(1).data.cpu() == 0] = 1e-6 + soft_weight_var = Variable(soft_weight).cuda() + loss = -((prob[:, :self.nc].sum(1) + soft_weight_var).log().mean()) + else: + loss = -(prob[:, :self.nc].sum(1).log().mean()) + return loss + if area == 'right': + if (prob.data[:, self.nc:].sum(1) == 0).sum() != 0: # in case of log(0) + soft_weight = torch.FloatTensor(batch_size).fill_(0) + soft_weight[prob[:, self.nc:].sum(1).data.cpu() == 0] = 1e-6 + soft_weight_var = Variable(soft_weight).cuda() + loss = -((prob[:, self.nc:].sum(1) + soft_weight_var).log().mean()) + else: + loss = -(prob[:, self.nc:].sum(1).log().mean()) + return loss + + def em_loss(self, input): + batch_size = input.size(0) + prob = F.softmax(input, dim=1) + prob_source = prob[:, :self.nc] + prob_target = prob[:, self.nc:] + prob_sum = prob_target + prob_source + if (prob_sum.data.cpu() == 0).sum() != 0: # in case of log(0) + weight_sum = torch.FloatTensor(batch_size, self.nc).fill_(0) + weight_sum[prob_sum.data.cpu() == 0] = 1e-6 + weight_sum = Variable(weight_sum).cuda() + loss_sum = -(prob_sum + weight_sum).log().mul(prob_sum).sum(1).mean() + else: + loss_sum = -prob_sum.log().mul(prob_sum).sum(1).mean() + + return loss_sum + + def forward(self, logits, label, epoch): + output1 = logits[0] + output2 = logits[1] + output_s1 = output1[:self.batch_size, :] + output_s2 = output2[:self.batch_size, :] + output_t1 = output1[self.batch_size:, :] + output_t2 = output2[self.batch_size:, :] + output_pt1 = F.softmax(output_t1) + output_pt2 = F.softmax(output_t2) + output_sts = torch.cat((output_s1, output_s2), dim=1) + output_stt = torch.cat((output_t1, output_t2), dim=1) + + entropy_loss = - torch.mean(torch.log(torch.mean(output_pt1, 0)+1e-6)) + entropy_loss -= torch.mean(torch.log(torch.mean(output_pt2, 0)+1e-6)) + loss1 = self.cls_loss(output_s1, label) + loss2 = self.cls_loss(output_s2, label) + # dis_loss = self.measure_dis(self.measure, output_pt1, output_pt2) + domain_loss = self.st_loss(output_sts, area='left') + self.st_loss(output_stt, area='right') + st_loss1 = self.cls_loss(output_sts, label) + st_loss2 = self.cls_loss(output_sts, label + self.nc) + st_cat_loss = 0.5 * st_loss1 + 0.5 * st_loss2 + em_loss = self.em_loss(output_stt) + st_dom_loss = 0.5 * self.st_loss(output_stt, area='left') + 0.5 * self.st_loss(output_stt, area='right') + em_loss + + all_loss = loss1 + loss2 + 0.01 * entropy_loss + # f_loss = loss1 + loss2 - self.eta * dis_loss + 0.01 * entropy_loss + f_loss = loss1 + loss2 + 0.01 * entropy_loss + domain_loss + # g_loss = dis_loss + lam = 2 / (1 + math.exp(-1 * 10 * epoch / self.epochs)) - 1 + st_loss = st_cat_loss + lam * st_dom_loss + # g_loss = dis_loss + return all_loss, f_loss, st_loss + + +def cf_only(pretrained=True, args=None): + model = Model(pretrained, args) + loss_model = Loss(args) + return model, loss_model diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/dis_only.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/dis_only.py" new file mode 100644 index 0000000000000000000000000000000000000000..afc9bf0ced021edaebc9eb86e89f116163ec0638 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/dis_only.py" @@ -0,0 +1,159 @@ +import torch.nn as nn +from torchvision import models +import torch.nn.functional as F +import torch +from torch.autograd import Variable +import math + +__all__ = ['dis_only'] + + +class Model(nn.Module): + def __init__(self, pret=True, args=None): + super(Model, self).__init__() + self.dim = 2048 + option = args.arch + nc = args.nc + mid = 10000 + prob = 0.5 + + '''feature generator''' + if option == 'resnet18': + model_ft = models.resnet18(pretrained=pret) + self.dim = 512 + if option == 'resnet50' or option == 'dis_only': + model_ft = models.resnet50(pretrained=pret) + if option == 'resnet101': + model_ft = models.resnet101(pretrained=pret) + if option == 'resnet152': + model_ft = models.resnet152(pretrained=pret) + mod = list(model_ft.children()) + mod.pop() + self.gen = nn.Sequential(*mod) + + '''classifier''' + layers = [] + layers.append(nn.Dropout(p=prob)) + layers.append(nn.Linear(self.dim, mid)) + layers.append(nn.BatchNorm1d(mid, affine=True)) + layers.append(nn.ReLU(inplace=True)) + + for i in range(args.layers-1): + layers.append(nn.Dropout(p=prob)) + layers.append(nn.Linear(mid, mid)) + layers.append(nn.BatchNorm1d(mid, affine=True)) + layers.append(nn.ReLU(inplace=True)) + + layers.append(nn.Linear(mid, args.nc)) + self.cls1 = nn.Sequential(*layers) + self.cls2 = nn.Sequential(*layers) + + '''params ini''' + for name, param in self.named_modules(): + if 'cls' in name: + if isinstance(param, nn.Linear): + param.weight.data.normal_(0.0, 0.01) + param.bias.data.normal_(0.0, 0.01) + if isinstance(param, nn.BatchNorm1d): + param.weight.data.normal_(1.0, 0.01) + param.bias.data.fill_(0) + + def forward(self, x): + # generate feature + x = self.gen(x) + g = x.view(x.size(0), self.dim) + + # classify + f1 = self.cls1(g) + f2 = self.cls2(g) + + return f1, f2, x + + +class Loss(nn.Module): + def __init__(self, args): + super(Loss, self).__init__() + self.cls_loss = nn.CrossEntropyLoss() + self.eta = 1.0 + self.batch_size = args.batch_size + self.measure = args.measure + self.nc = args.nc + self.epochs = args.epochs + + def measure_dis(self, measure, output_t1, output_t2): + if measure == 'L1': + return 1 * torch.mean(torch.abs(output_t1 - output_t2)) + + def st_loss(self, output, area): + batch_size = output.size(0) + prob = F.softmax(output, dim=1) + if area == 'left': + if (prob.data[:, :self.nc].sum(1) == 0).sum() != 0: # in case of log(0) + soft_weight = torch.FloatTensor(batch_size).fill_(0) + soft_weight[prob[:, :self.nc].sum(1).data.cpu() == 0] = 1e-6 + soft_weight_var = Variable(soft_weight).cuda() + loss = -((prob[:, :self.nc].sum(1) + soft_weight_var).log().mean()) + else: + loss = -(prob[:, :self.nc].sum(1).log().mean()) + return loss + if area == 'right': + if (prob.data[:, self.nc:].sum(1) == 0).sum() != 0: # in case of log(0) + soft_weight = torch.FloatTensor(batch_size).fill_(0) + soft_weight[prob[:, self.nc:].sum(1).data.cpu() == 0] = 1e-6 + soft_weight_var = Variable(soft_weight).cuda() + loss = -((prob[:, self.nc:].sum(1) + soft_weight_var).log().mean()) + else: + loss = -(prob[:, self.nc:].sum(1).log().mean()) + return loss + + def em_loss(self, input): + batch_size = input.size(0) + prob = F.softmax(input, dim=1) + prob_source = prob[:, :self.nc] + prob_target = prob[:, self.nc:] + prob_sum = prob_target + prob_source + if (prob_sum.data.cpu() == 0).sum() != 0: # in case of log(0) + weight_sum = torch.FloatTensor(batch_size, self.nc).fill_(0) + weight_sum[prob_sum.data.cpu() == 0] = 1e-6 + weight_sum = Variable(weight_sum).cuda() + loss_sum = -(prob_sum + weight_sum).log().mul(prob_sum).sum(1).mean() + else: + loss_sum = -prob_sum.log().mul(prob_sum).sum(1).mean() + + return loss_sum + + def forward(self, logits, label, epoch): + output1 = logits[0] + output2 = logits[1] + output_s1 = output1[:self.batch_size, :] + output_s2 = output2[:self.batch_size, :] + output_t1 = output1[self.batch_size:, :] + output_t2 = output2[self.batch_size:, :] + output_pt1 = F.softmax(output_t1) + output_pt2 = F.softmax(output_t2) + output_sts = torch.cat((output_s1, output_s2), dim=1) + output_stt = torch.cat((output_t1, output_t2), dim=1) + + entropy_loss = - torch.mean(torch.log(torch.mean(output_pt1, 0)+1e-6)) + entropy_loss -= torch.mean(torch.log(torch.mean(output_pt2, 0)+1e-6)) + loss1 = self.cls_loss(output_s1, label) + loss2 = self.cls_loss(output_s2, label) + dis_loss = self.measure_dis(self.measure, output_pt1, output_pt2) + domain_loss = self.st_loss(output_sts, area='left') + self.st_loss(output_stt, area='right') + # st_loss1 = self.cls_loss(output_sts, label) st_loss2 = self.cls_loss(output_sts, label + self.nc) + # st_cat_loss = 0.5 * st_loss1 + 0.5 * st_loss2 em_loss = self.em_loss(output_stt) st_dom_loss = 0.5 * + # self.st_loss(output_stt, area='left') + 0.5 * self.st_loss(output_stt, area='right') + em_loss + + all_loss = loss1 + loss2 + 0.01 * entropy_loss + # f_loss = loss1 + loss2 - self.eta * dis_loss + 0.01 * entropy_loss + f_loss = loss1 + loss2 - self.eta * dis_loss + 0.01 * entropy_loss + domain_loss + # g_loss = dis_loss + # st_loss = st_cat_loss + lam * st_dom_loss + g_loss = dis_loss + return all_loss, f_loss, g_loss + + +def dis_only(pretrained=True, args=None): + model = Model(pretrained, args) + loss_model = Loss(args) + return model, loss_model diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/resnet50.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/resnet50.py" new file mode 100644 index 0000000000000000000000000000000000000000..c120f3d84d956d0abad5931dca6f783062fae957 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/resnet50.py" @@ -0,0 +1,162 @@ +import torch.nn as nn +from torchvision import models +import torch.nn.functional as F +import torch +from torch.autograd import Variable +import math + +__all__ = ['resnet50'] + + +class Model(nn.Module): + def __init__(self, pret=True, args=None): + super(Model, self).__init__() + self.dim = 2048 + option = args.arch + nc = args.nc + mid = 10000 + prob = 0.5 + + '''feature generator''' + if option == 'resnet18': + model_ft = models.resnet18(pretrained=pret) + self.dim = 512 + if option == 'resnet50': + model_ft = models.resnet50(pretrained=pret) + if option == 'resnet101': + model_ft = models.resnet101(pretrained=pret) + if option == 'resnet152': + model_ft = models.resnet152(pretrained=pret) + mod = list(model_ft.children()) + mod.pop() + self.gen = nn.Sequential(*mod) + + '''classifier''' + layers = [] + layers.append(nn.Dropout(p=prob)) + layers.append(nn.Linear(self.dim, mid)) + layers.append(nn.BatchNorm1d(mid, affine=True)) + layers.append(nn.ReLU(inplace=True)) + + for i in range(args.layers-1): + layers.append(nn.Dropout(p=prob)) + layers.append(nn.Linear(mid, mid)) + layers.append(nn.BatchNorm1d(mid, affine=True)) + layers.append(nn.ReLU(inplace=True)) + + layers.append(nn.Linear(mid, args.nc)) + self.cls1 = nn.Sequential(*layers) + self.cls2 = nn.Sequential(*layers) + + '''params ini''' + for name, param in self.named_modules(): + if 'cls' in name: + if isinstance(param, nn.Linear): + param.weight.data.normal_(0.0, 0.01) + param.bias.data.normal_(0.0, 0.01) + if isinstance(param, nn.BatchNorm1d): + param.weight.data.normal_(1.0, 0.01) + param.bias.data.fill_(0) + + def forward(self, x): + # generate feature + x = self.gen(x) + g = x.view(x.size(0), self.dim) + + # classify + f1 = self.cls1(g) + f2 = self.cls2(g) + + return f1, f2, x + + +class Loss(nn.Module): + def __init__(self, args): + super(Loss, self).__init__() + self.cls_loss = nn.CrossEntropyLoss() + self.eta = 1.0 + self.batch_size = args.batch_size + self.measure = args.measure + self.nc = args.nc + self.epochs = args.epochs + + def measure_dis(self, measure, output_t1, output_t2): + if measure == 'L1': + return 1 * torch.mean(torch.abs(output_t1 - output_t2)) + + def st_loss(self, output, area): + batch_size = output.size(0) + prob = F.softmax(output, dim=1) + if area == 'left': + if (prob.data[:, :self.nc].sum(1) == 0).sum() != 0: # in case of log(0) + soft_weight = torch.FloatTensor(batch_size).fill_(0) + soft_weight[prob[:, :self.nc].sum(1).data.cpu() == 0] = 1e-6 + soft_weight_var = Variable(soft_weight).cuda() + loss = -((prob[:, :self.nc].sum(1) + soft_weight_var).log().mean()) + else: + loss = -(prob[:, :self.nc].sum(1).log().mean()) + return loss + if area == 'right': + if (prob.data[:, self.nc:].sum(1) == 0).sum() != 0: # in case of log(0) + soft_weight = torch.FloatTensor(batch_size).fill_(0) + soft_weight[prob[:, self.nc:].sum(1).data.cpu() == 0] = 1e-6 + soft_weight_var = Variable(soft_weight).cuda() + loss = -((prob[:, self.nc:].sum(1) + soft_weight_var).log().mean()) + else: + loss = -(prob[:, self.nc:].sum(1).log().mean()) + return loss + + def em_loss(self, input): + batch_size = input.size(0) + prob = F.softmax(input, dim=1) + prob_source = prob[:, :self.nc] + prob_target = prob[:, self.nc:] + prob_sum = prob_target + prob_source + if (prob_sum.data.cpu() == 0).sum() != 0: # in case of log(0) + weight_sum = torch.FloatTensor(batch_size, self.nc).fill_(0) + weight_sum[prob_sum.data.cpu() == 0] = 1e-6 + weight_sum = Variable(weight_sum).cuda() + loss_sum = -(prob_sum + weight_sum).log().mul(prob_sum).sum(1).mean() + else: + loss_sum = -prob_sum.log().mul(prob_sum).sum(1).mean() + + return loss_sum + + def forward(self, logits, label, epoch): + output1 = logits[0] + output2 = logits[1] + output_s1 = output1[:self.batch_size, :] + output_s2 = output2[:self.batch_size, :] + output_t1 = output1[self.batch_size:, :] + output_t2 = output2[self.batch_size:, :] + output_pt1 = F.softmax(output_t1) + output_pt2 = F.softmax(output_t2) + output_sts = torch.cat((output_s1, output_s2), dim=1) + output_stt = torch.cat((output_t1, output_t2), dim=1) + + entropy_loss = - torch.mean(torch.log(torch.mean(output_pt1, 0)+1e-6)) + entropy_loss -= torch.mean(torch.log(torch.mean(output_pt2, 0)+1e-6)) + loss1 = self.cls_loss(output_s1, label) + loss2 = self.cls_loss(output_s2, label) + dis_loss = self.measure_dis(self.measure, output_pt1, output_pt2) + domain_loss = self.st_loss(output_sts, area='left') + self.st_loss(output_stt, area='right') + st_loss1 = self.cls_loss(output_sts, label) + st_loss2 = self.cls_loss(output_sts, label + self.nc) + st_cat_loss = 0.5 * st_loss1 + 0.5 * st_loss2 + em_loss = self.em_loss(output_stt) + st_dom_loss = 0.5 * self.st_loss(output_stt, area='left') + 0.5 * self.st_loss(output_stt, area='right') + em_loss + + all_loss = loss1 + loss2 + 0.01 * entropy_loss + # f_loss = loss1 + loss2 - self.eta * dis_loss + 0.01 * entropy_loss + f_loss = loss1 + loss2 - self.eta * dis_loss + 0.01 * entropy_loss + domain_loss + # g_loss = dis_loss + lam = 2 / (1 + math.exp(-1 * 10 * epoch / self.epochs)) - 1 + st_loss = st_cat_loss + lam * st_dom_loss + g_loss = dis_loss + return all_loss, f_loss, st_loss, g_loss + + +def resnet50(pretrained=True, args=None): + model = Model(pretrained, args) + loss_model = Loss(args) + return model, loss_model diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/source_only.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/source_only.py" new file mode 100644 index 0000000000000000000000000000000000000000..1c046d0e553b49fe3657ff1a9668541fa9ed6815 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/models/source_only.py" @@ -0,0 +1,149 @@ +import torch.nn as nn +from torchvision import models +import torch.nn.functional as F +import torch +from torch.autograd import Variable +import math + +__all__ = ['source_only'] + + +class Model(nn.Module): + def __init__(self, pret=True, args=None): + super(Model, self).__init__() + self.dim = 2048 + option = 'resnet50' + nc = args.nc + mid = 10000 + prob = 0.5 + + '''feature generator''' + if option == 'resnet18': + model_ft = models.resnet18(pretrained=pret) + self.dim = 512 + if option == 'resnet50': + model_ft = models.resnet50(pretrained=pret) + if option == 'resnet101': + model_ft = models.resnet101(pretrained=pret) + if option == 'resnet152': + model_ft = models.resnet152(pretrained=pret) + mod = list(model_ft.children()) + mod.pop() + self.gen = nn.Sequential(*mod) + + '''classifier''' + layers = [] + layers.append(nn.Dropout(p=prob)) + layers.append(nn.Linear(self.dim, mid)) + layers.append(nn.BatchNorm1d(mid, affine=True)) + layers.append(nn.ReLU(inplace=True)) + + for i in range(args.layers-1): + layers.append(nn.Dropout(p=prob)) + layers.append(nn.Linear(mid, mid)) + layers.append(nn.BatchNorm1d(mid, affine=True)) + layers.append(nn.ReLU(inplace=True)) + + layers.append(nn.Linear(mid, args.nc)) + self.cls1 = nn.Sequential(*layers) + self.cls2 = nn.Sequential(*layers) + + '''params ini''' + for name, param in self.named_modules(): + if 'cls' in name: + if isinstance(param, nn.Linear): + param.weight.data.normal_(0.0, 0.01) + param.bias.data.normal_(0.0, 0.01) + if isinstance(param, nn.BatchNorm1d): + param.weight.data.normal_(1.0, 0.01) + param.bias.data.fill_(0) + + def forward(self, x): + # generate feature + x = self.gen(x) + g = x.view(x.size(0), self.dim) + + # classify + f1 = self.cls1(g) + f2 = self.cls2(g) + + return f1, f2, x + + +class Loss(nn.Module): + def __init__(self, args): + super(Loss, self).__init__() + self.cls_loss = nn.CrossEntropyLoss() + self.eta = 1.0 + self.batch_size = args.batch_size + self.measure = args.measure + self.nc = args.nc + self.epochs = args.epochs + + def measure_dis(self, measure, output_t1, output_t2): + if measure == 'L1': + return 1 * torch.mean(torch.abs(output_t1 - output_t2)) + + def st_loss(self, output, area): + batch_size = output.size(0) + prob = F.softmax(output, dim=1) + if area == 'left': + if (prob.data[:, :self.nc].sum(1) == 0).sum() != 0: # in case of log(0) + soft_weight = torch.FloatTensor(batch_size).fill_(0) + soft_weight[prob[:, :self.nc].sum(1).data.cpu() == 0] = 1e-6 + soft_weight_var = Variable(soft_weight).cuda() + loss = -((prob[:, :self.nc].sum(1) + soft_weight_var).log().mean()) + else: + loss = -(prob[:, :self.nc].sum(1).log().mean()) + return loss + if area == 'right': + if (prob.data[:, self.nc:].sum(1) == 0).sum() != 0: # in case of log(0) + soft_weight = torch.FloatTensor(batch_size).fill_(0) + soft_weight[prob[:, self.nc:].sum(1).data.cpu() == 0] = 1e-6 + soft_weight_var = Variable(soft_weight).cuda() + loss = -((prob[:, self.nc:].sum(1) + soft_weight_var).log().mean()) + else: + loss = -(prob[:, self.nc:].sum(1).log().mean()) + return loss + + def em_loss(self, input): + batch_size = input.size(0) + prob = F.softmax(input, dim=1) + prob_source = prob[:, :self.nc] + prob_target = prob[:, self.nc:] + prob_sum = prob_target + prob_source + if (prob_sum.data.cpu() == 0).sum() != 0: # in case of log(0) + weight_sum = torch.FloatTensor(batch_size, self.nc).fill_(0) + weight_sum[prob_sum.data.cpu() == 0] = 1e-6 + weight_sum = Variable(weight_sum).cuda() + loss_sum = -(prob_sum + weight_sum).log().mul(prob_sum).sum(1).mean() + else: + loss_sum = -prob_sum.log().mul(prob_sum).sum(1).mean() + + return loss_sum + + def forward(self, logits, label, epoch): + output1 = logits[0] + output2 = logits[1] + output_s1 = output1[:self.batch_size, :] + output_s2 = output2[:self.batch_size, :] + output_t1 = output1[self.batch_size:, :] + output_t2 = output2[self.batch_size:, :] + output_pt1 = F.softmax(output_t1) + output_pt2 = F.softmax(output_t2) + + entropy_loss = - torch.mean(torch.log(torch.mean(output_pt1, 0)+1e-6)) + entropy_loss -= torch.mean(torch.log(torch.mean(output_pt2, 0)+1e-6)) + loss1 = self.cls_loss(output_s1, label) + loss2 = self.cls_loss(output_s2, label) + + all_loss = loss1 + loss2 + 0.01 * entropy_loss + # f_loss = loss1 + loss2 - self.eta * dis_loss + 0.01 * entropy_loss + + return all_loss + + +def source_only(pretrained=True, args=None): + model = Model(pretrained, args) + loss_model = Loss(args) + return model, loss_model diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/utils.py" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/utils.py" new file mode 100644 index 0000000000000000000000000000000000000000..4c4a166624a0defdbaa10a78b7a21c9bdf975747 --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/utils.py" @@ -0,0 +1,130 @@ +import scipy.io as sio +import itertools +import numpy as np +import matplotlib.pyplot as plt +import os +from sklearn.metrics import confusion_matrix +from torchvision import datasets, transforms +import torch +from torch.utils.data import dataloader +from sklearn.manifold import TSNE +from PIL import Image + + +def get_pair(pair_str): + if pair_str == 'U_A': + return 'A_U' + elif pair_str == 'U_N': + return 'N_U' + elif pair_str == 'A_N': + return 'N_A' + else: + return pair_str + + +def plot_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): + if normalize: + cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] + print("Generating normalized confusion matrix...") + else: + print('Generating confusion matrix without normalization') + + # print(cm) + + plt.imshow(cm, interpolation='nearest', cmap=cmap) + plt.title(title) + plt.colorbar() + tick_marks = np.arange(len(classes)) + plt.xticks(tick_marks, classes, rotation=45) + plt.yticks(tick_marks, classes) + + fmt = '.2f' if normalize else 'd' + thresh = cm.max() / 2. + for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): + plt.text(j, i, format(cm[i, j], fmt), + horizontalalignment="center", + color="white" if cm[i, j] > thresh else "black") + + plt.ylabel('True label') + plt.xlabel('Predicted label') + plt.tight_layout() + + +def plot_confusion_matrix(mat_path, class_names, save_prefix, acc): + train_fn = sio.loadmat(mat_path) + y_test = train_fn['target'] + y_pred = train_fn['pred'] + cnf_matrix = confusion_matrix(y_test.T, y_pred.T) + np.set_printoptions(precision=2) + + # Plot non-normalized confusion matrix + # plt.figure() + # plot_matrix(cnf_matrix, classes=class_names, + # title='Confusion matrix, without normalization') + + # Plot normalized confusion matrix + plt.figure(figsize=(16, 9)) + plot_matrix(cnf_matrix, classes=class_names, normalize=True, + title='Normalized confusion matrix') + save_path = os.path.join(save_prefix, 'Figures') + if not os.path.exists(save_path): + os.makedirs(save_path) + plt.savefig(os.path.join(save_path, 'test_{:.4f}.png'.format(acc)), format='png', dpi=2000) + plt.close('all') + + +def load_training(root_path, dir, batch_size, kwargs): + transform = transforms.Compose( + [ + transforms.RandomResizedCrop(size=256, scale=(0.2, 1.0), + ratio=(1. / 1., 1. / 1.), interpolation=Image.BILINEAR), + transforms.Resize(256), + transforms.RandomCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor()]) + data = datasets.ImageFolder(root=root_path + dir, transform=transform) + train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, **kwargs) + return train_loader + + +def load_testing(root_path, dir, batch_size, kwargs): + transform = transforms.Compose( + [transforms.Resize([224, 224]), + transforms.ToTensor()]) + data = datasets.ImageFolder(root=root_path + dir, transform=transform) + test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, **kwargs) + return test_loader + + +def load_predict(root_path, dir, batch_size, kwargs): + transform = transforms.Compose( + [transforms.Resize([224, 224]), + transforms.ToTensor()]) + data = datasets.ImageFolder(root=root_path + dir, transform=transform) + pre_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, **kwargs) + return pre_loader + + +def get_color(label): + if label == 0: + return 'blue' + return 'red' + + +def plot_feature(data, label): + x_min, x_max = np.min(data, 0), np.max(data, 0) + data = (data - x_min) / (x_max - x_min) + + fig = plt.figure() + for i in range(data.shape[0]): + plt.scatter(data[i, 0], data[i, 1], s=100, c=get_color(label[i])) + plt.xticks([]) + plt.yticks([]) + return fig + + +def tsne_visualize(data, label): + tsne = TSNE(n_components=2) + result = tsne.fit_transform(data) + fig = plot_feature(result, label) + return fig diff --git "a/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/visualize.sh" "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/visualize.sh" new file mode 100644 index 0000000000000000000000000000000000000000..b9f337f65b3dc8dcb84c8a713e1208a60ad02b1b --- /dev/null +++ "b/code/2022_autumn/\351\242\234\345\215\232\351\227\273-\345\237\272\344\272\216\345\237\237\350\207\252\351\200\202\345\272\224\347\232\204\350\267\250\345\237\237\351\201\245\346\204\237\345\234\272\346\231\257\345\210\206\347\261\273/visualize.sh" @@ -0,0 +1,9 @@ +source=aid +target=ucm +gpuid=2 +version=0429 + +python main.py -v ${version} --gpu ${gpuid} -src ${source} -tar ${target} --mode visualize --acc 0.8806 +#python main2.py -v ${version}_src --gpu ${gpuid} -src ${source} -tar ${target} --arch source_only --mode visualize --acc 0.7033 +#python main_cf.py -v ${version}_cf --gpu ${gpuid} -src ${source} -tar ${target} --arch cf_only --mode visualize --acc 0.8757 +#python main_dis.py -v ${version}_dis --gpu ${gpuid} -src ${source} -tar ${target} --arch dis_only --mode visualize --acc 0.8424 \ No newline at end of file