From 85b8c2609a4fde5049c13d5c5b3303f55937261f Mon Sep 17 00:00:00 2001 From: 199******99 <9200608+doglikegodness@user.noreply.gitee.com> Date: Sat, 10 Jul 2021 09:47:03 +0000 Subject: [PATCH] =?UTF-8?q?add=20code/2021=5Fspring/=E5=9C=BA=E6=99=AF?= =?UTF-8?q?=E8=AF=86=E5=88=AB/=E9=BB=84=E7=8E=AE=E7=90=AA2019302110409/mai?= =?UTF-8?q?n.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../main.py" | 182 ++++++++++++++++++ 1 file changed, 182 insertions(+) create mode 100644 "code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253/\351\273\204\347\216\256\347\220\2522019302110409/main.py" diff --git "a/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253/\351\273\204\347\216\256\347\220\2522019302110409/main.py" "b/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253/\351\273\204\347\216\256\347\220\2522019302110409/main.py" new file mode 100644 index 0000000..3f27324 --- /dev/null +++ "b/code/2021_spring/\345\234\272\346\231\257\350\257\206\345\210\253/\351\273\204\347\216\256\347\220\2522019302110409/main.py" @@ -0,0 +1,182 @@ +import argparse +import os +import shutil +import time + +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.backends.cudnn as cudnn +import torch.optim +import torch.utils.data +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as models + +best_prec1 = 0 + +def train(train_loader, model, criterion, optimizer, epoch): + batch_time = AverageMeter() + data_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + model.train() + + end = time.time() + for i, (input, target) in enumerate(train_loader): + data_time.update(time.time() - end) + + target = target.cuda(async=True) + input_var = torch.autograd.Variable(input) + target_var = torch.autograd.Variable(target) + + output = model(input_var) + loss = criterion(output, target_var) + + prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) + losses.update(loss.data[0], input.size(0)) + top1.update(prec1[0], input.size(0)) + top5.update(prec5[0], input.size(0)) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + batch_time.update(time.time() - end) + end = time.time() + if i % 100== 0: + print('Epoch: [{0}][{1}/{2}]\t' + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' + 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( + epoch, i, len(train_loader), batch_time=batch_time, + data_time=data_time, loss=losses, top1=top1, top5=top5)) + +def validate(val_loader, model, criterion): + batch_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + model.eval() + + end = time.time() + for i, (input, target) in enumerate(val_loader): + target = target.cuda(async=True) + input_var = torch.autograd.Variable(input, volatile=True) + target_var = torch.autograd.Variable(target, volatile=True) + + # compute output + output = model(input_var) + loss = criterion(output, target_var) + + prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) + losses.update(loss.data[0], input.size(0)) + top1.update(prec1[0], input.size(0)) + top5.update(prec5[0], input.size(0)) + + batch_time.update(time.time() - end) + end = time.time() + if i %10== 0: + print('Test: [{0}/{1}]\t' + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( + i, len(val_loader), batch_time=batch_time, loss=losses, + top1=top1, top5=top5)) + return top1.avg + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + +def adjust_learning_rate(optimizer, epoch): + """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" + lr = 0.15 * (0.13 ** (epoch // 30)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + +def main(): + global best_prec1 + model = models.resnet50(number_of_classes = 20) + model = torch.nn.DataParallel(model).cuda() + + traindir = os.path.join("/mnt/d/TrainingSet/places365standard_easyformat","train") + valdir = os.path.join("/mnt/d/TrainingSet/places365standard_easyformat","val") + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + train_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(traindir, transforms.Compose([ + transforms.RandomSizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])), + shuffle=True,batch_size=32, + pin_memory=True) + + val_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Scale(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])), + shuffle=False,batch_size=32, + pin_memory=True) + + criterion = nn.CrossEntropyLoss().cuda() + + optimizer = torch.optim.SGD(model.parameters()) + + epochs = 30 + + for epoch in range (epochs): + adjust_learning_rate(optimizer, epoch) + + + train(train_loader, model, criterion, optimizer, epoch) + + + prec1 = validate(val_loader, model, criterion) + + best_prec1 = max(prec1, best_prec1) + + if __name__ == '__main__': + main() + + -- Gitee