diff --git "a/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253-\346\230\223\346\267\256\347\254\240/resnet18_places365.pth.tar" "b/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253-\346\230\223\346\267\256\347\254\240/resnet18_places365.pth.tar" new file mode 100644 index 0000000000000000000000000000000000000000..b5f71891a684a7c88707eca009b0efb5ea607e4c Binary files /dev/null and "b/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253-\346\230\223\346\267\256\347\254\240/resnet18_places365.pth.tar" differ diff --git "a/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253-\346\230\223\346\267\256\347\254\240/test.py" "b/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253-\346\230\223\346\267\256\347\254\240/test.py" new file mode 100644 index 0000000000000000000000000000000000000000..93a571244b0d5c21c69b633a5af3948547082107 --- /dev/null +++ "b/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253-\346\230\223\346\267\256\347\254\240/test.py" @@ -0,0 +1,91 @@ +import torch +from torch.autograd import Variable as V +import torchvision.models as models +import torchvision.transforms as transforms +import torch.nn.functional as F +import torchvision.datasets as datasets +import os +from PIL import Image +import random + +import scipy.io as sio + + +model_file = 'resnet18_places365.pth.tar' + +model = models.__dict__['resnet18'](num_classes=365) +checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage) +state_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()} +model.load_state_dict(state_dict) +model.eval() + +centre_crop = transforms.Compose([ + transforms.Resize((256,256)), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) +]) + + +normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + +file = "D:\\测试数据集\\scene.tar\\scene" + +ans = dict() + +valuemap = dict() + +for i in range(1,365): + valuemap[i] = 0 + +valuemap[274] = 10 +valuemap[146] = 9 +valuemap[222] = 9 +valuemap[223] = 9 +valuemap[321] = 9 +valuemap[215] = 8 +valuemap[208] = 6 +valuemap[203] = 5 +valuemap[285] = 5 +valuemap[217] = 3 +valuemap[75] = 2 +valuemap[99] = 2 + + + +for root, dirs, files in os.walk(file): + for d in dirs: + dist = os.path.join(file,d) + for droot, ddirs, dfiles in os.walk(dist): + dictionary = dict() + for f in dfiles: + f = os.path.join(dist,f) + img = Image.open(f) + input_img = V(centre_crop(img).unsqueeze(0)) + logit = model.forward(input_img) + h_x = F.softmax(logit, 1).data.squeeze() + probs, idx = h_x.sort(0, True) + key = valuemap[idx[1].item()] + if(key==8): + temp = random.randint(1,10) + if(temp >=5): + key = 7 + if(key==5): + temp = random.randint(1,10) + if(temp >=5): + key = 4 + if(key==2): + temp = random.randint(1,10) + if(temp >=5): + key = 1 + + if key in dictionary: + dictionary[key] += 1 + else : + dictionary[key] = 1 + print(d) + ans[d]=max(dictionary,key=dictionary.get) + + +sio.savemat("ans.mat", ans) \ No newline at end of file diff --git "a/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253-\346\230\223\346\267\256\347\254\240/train_model.py" "b/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253-\346\230\223\346\267\256\347\254\240/train_model.py" new file mode 100644 index 0000000000000000000000000000000000000000..892bd5dd58eccfb751cca3522527289851cfe9a2 --- /dev/null +++ "b/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253-\346\230\223\346\267\256\347\254\240/train_model.py" @@ -0,0 +1,206 @@ +import argparse +import os +import shutil +import time + +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.backends.cudnn as cudnn +import torch.optim +import torch.utils.data +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as models + +best_prec1 = 0 + +def main(): + global best_prec1 + model = models.resnet18(pretrained= True) + model = torch.nn.DataParallel(model).cuda() + #使用Torche中的resnet18模型 + + cudnn.benchmark = True + + traindir = os.path.join("D:\\TrainingSet\\places365standard_easyformat\\places365_standard","train") + valdir = os.path.join("D:\\TrainingSet\\places365standard_easyformat\\places365_standard","val") + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + train_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(traindir, transforms.Compose([ + transforms.RandomSizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])), + shuffle=True,batch_size=150, + pin_memory=True,num_workers=4) + + val_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Scale(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])), + shuffle=False,batch_size=150, + pin_memory=True,num_workers=4) + #进行训练集数据预处理 + + criterion = nn.CrossEntropyLoss().cuda() + + optimizer = torch.optim.SGD(model.parameters(), lr = 0.1, momentum=0.9, weight_decay=1e-4) + + epochs = 10 + + for epoch in range (epochs): + adjust_learning_rate(optimizer, epoch) + + + train(train_loader, model, criterion, optimizer, epoch) + + + prec1 = validate(val_loader, model, criterion) + + best_prec1 = max(prec1, best_prec1) + + torch.save(model,"D:\\PlaceCNN365") + + +def train(train_loader, model, criterion, optimizer, epoch): + batch_time = AverageMeter() + data_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + model.train() + + end = time.time() + for i, (input, target) in enumerate(train_loader): + + data_time.update(time.time() - end) + + target = target.cuda() + input_var = torch.autograd.Variable(input) + target_var = torch.autograd.Variable(target) + + output = model(input_var) + loss = criterion(output, target_var) + + + prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking + losses.update(loss.item(), input.size(0)) + top1.update(prec1.item(), input.size(0)) + top5.update(prec5.item(), input.size(0)) + + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + + batch_time.update(time.time() - end) + end = time.time() + + if i % 10 == 0: + print('Epoch: [{0}][{1}/{2}]\t' + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' + 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( + epoch, i, len(train_loader), batch_time=batch_time, + data_time=data_time, loss=losses, top1=top1, top5=top5)) + + + +def validate(val_loader, model, criterion): + batch_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + + model.eval() + + end = time.time() + for i, (input, target) in enumerate(val_loader): + target = target.cuda() + input_var = torch.autograd.Variable(input, volatile=True) + target_var = torch.autograd.Variable(target, volatile=True) + + + + output = model(input_var) + loss = criterion(output, target_var) + + + prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking + losses.update(loss.data[0], input.size(0)) + top1.update(prec1[0], input.size(0)) + top5.update(prec5[0], input.size(0)) + + + batch_time.update(time.time() - end) + end = time.time() + + if i % 10 == 0: + print('Test: [{0}/{1}]\t' + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( + i, len(val_loader), batch_time=batch_time, loss=losses, + top1=top1, top5=top5)) + + print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}' + .format(top1=top1, top5=top5)) + return top1.avg + +def adjust_learning_rate(optimizer, epoch): + """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" + lr = 0.1 * (0.1 ** (epoch // 30)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + + pred = pred.t() + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + + for k in topk: + correct_k = correct[k-1:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +class AverageMeter(object): + #计算和存储当前均值和当前值 + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + +if __name__ == '__main__': + main()