| @@ -1,9 +1,11 @@ | |||||
| import glob | |||||
| import os | import os | ||||
| import numpy | |||||
| import random | |||||
| import torch | import torch | ||||
| from torch import Tensor | |||||
| from torch.utils.data import Dataset, DataLoader | from torch.utils.data import Dataset, DataLoader | ||||
| import pytorch_lightning as pl | import pytorch_lightning as pl | ||||
| from PIL import Image | |||||
| from torchvision import transforms | |||||
| class DataModule(pl.LightningDataModule): | class DataModule(pl.LightningDataModule): | ||||
| @@ -17,47 +19,45 @@ class DataModule(pl.LightningDataModule): | |||||
| self.dataset_path = dataset_path | self.dataset_path = dataset_path | ||||
| def setup(self, stage=None) -> None: | def setup(self, stage=None) -> None: | ||||
| # 得到全部数据的list | |||||
| dataset_list = self.get_dataset_list() | |||||
| k_fold_dataset_list = self.get_k_fold_dataset_list() | |||||
| if stage == 'fit' or stage is None: | if stage == 'fit' or stage is None: | ||||
| dataset_train, dataset_val = self.get_dataset_lists(dataset_list) | |||||
| self.train_dataset = CustomDataset(dataset_train, self.config) | |||||
| self.val_dataset = CustomDataset(dataset_val, self.config) | |||||
| dataset_train, dataset_val = self.get_fit_dataset_lists(k_fold_dataset_list) | |||||
| self.train_dataset = CustomDataset(self.dataset_path, dataset_train, self.config, 'train') | |||||
| self.val_dataset = CustomDataset(self.dataset_path, dataset_val, self.config, 'train') | |||||
| if stage == 'test' or stage is None: | if stage == 'test' or stage is None: | ||||
| self.test_dataset = CustomDataset(dataset_list, self.config) | |||||
| dataset_test = self.get_test_dataset_lists(k_fold_dataset_list) | |||||
| self.test_dataset = CustomDataset(self.dataset_path, dataset_test, self.config, 'test') | |||||
| def get_dataset_list(self): | |||||
| if not os.path.exists(self.dataset_path + '/dataset_list.txt'): | |||||
| # 针对数据拟合获得dataset | |||||
| dataset = torch.randn(self.config['dataset_len'], self.config['dim_in'] + 1) | |||||
| noise = torch.randn(self.config['dataset_len']) | |||||
| dataset[:, self.config['dim_in']] = torch.cos(1.5 * dataset[:, 0]) * (dataset[:, 1] ** 2.0) + torch.cos( | |||||
| torch.sin(dataset[:, 2] ** 3)) + torch.arctan(dataset[:, 4]) + noise | |||||
| assert (dataset[torch.isnan(dataset)].shape[0] == 0) | |||||
| written = [' '.join([str(temp) for temp in dataset[cou, :].tolist()]) for cou in range(dataset.shape[0])] | |||||
| def get_k_fold_dataset_list(self): | |||||
| # 得到用于K折分割的数据的list, 并生成文件夹进行保存 | |||||
| if not os.path.exists(self.dataset_path + '/k_fold_dataset_list.txt'): | |||||
| # 获得用于k折分割的数据的list | |||||
| dataset = glob.glob(self.dataset_path + '/train/image/*.png') | |||||
| random.shuffle(dataset) | |||||
| written = dataset | |||||
| with open(self.dataset_path + '/dataset_list.txt', 'w', encoding='utf-8') as f: | |||||
| with open(self.dataset_path + '/k_fold_dataset_list.txt', 'w', encoding='utf-8') as f: | |||||
| for line in written: | for line in written: | ||||
| f.write(line + '\n') | |||||
| print('已生成新的数据list') | |||||
| f.write(line.replace('\\', '/') + '\n') | |||||
| print('已生成新的k折数据list') | |||||
| else: | else: | ||||
| dataset_list = open(self.dataset_path + '/dataset_list.txt').readlines() | |||||
| # 针对数据拟合获得dataset | |||||
| dataset_list = [[float(temp) for temp in item.strip('\n').split(' ')] for item in dataset_list] | |||||
| dataset = torch.Tensor(dataset_list).float() | |||||
| dataset = open(self.dataset_path + '/k_fold_dataset_list.txt').readlines() | |||||
| dataset = [item.strip('\n') for item in dataset] | |||||
| return dataset | return dataset | ||||
| def get_dataset_lists(self, dataset_list: Tensor): | |||||
| def get_fit_dataset_lists(self, dataset_list: list): | |||||
| # 得到一个fold的数据量和不够组成一个fold的剩余数据的数据量 | # 得到一个fold的数据量和不够组成一个fold的剩余数据的数据量 | ||||
| num_1fold, remainder = divmod(self.config['dataset_len'], self.k_fold) | |||||
| num_1fold, remainder = divmod(len(dataset_list), self.k_fold) | |||||
| # 分割全部数据, 得到训练集, 验证集, 测试集 | # 分割全部数据, 得到训练集, 验证集, 测试集 | ||||
| dataset_val = dataset_list[num_1fold * self.kth_fold:(num_1fold * (self.kth_fold + 1) + remainder), :] | |||||
| temp = torch.ones(dataset_list.shape[0]) | |||||
| temp[num_1fold * self.kth_fold:(num_1fold * (self.kth_fold + 1) + remainder)] = 0 | |||||
| dataset_train = dataset_list[temp == 1] | |||||
| dataset_val = dataset_list[num_1fold * self.kth_fold:(num_1fold * (self.kth_fold + 1) + remainder)] | |||||
| del (dataset_list[num_1fold * self.kth_fold:(num_1fold * (self.kth_fold + 1) + remainder)]) | |||||
| dataset_train = dataset_list | |||||
| return dataset_train, dataset_val | return dataset_train, dataset_val | ||||
| def get_test_dataset_lists(self, dataset_list): | |||||
| dataset = glob.glob(self.dataset_path + '/test/image/*.png') | |||||
| return dataset | |||||
| def train_dataloader(self): | def train_dataloader(self): | ||||
| return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, | return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, | ||||
| pin_memory=True) | pin_memory=True) | ||||
| @@ -72,13 +72,19 @@ class DataModule(pl.LightningDataModule): | |||||
| class CustomDataset(Dataset): | class CustomDataset(Dataset): | ||||
| def __init__(self, dataset, config): | |||||
| def __init__(self, dataset_path, dataset, config, type): | |||||
| super().__init__() | super().__init__() | ||||
| self.x = dataset[:, 0:config['dim_in']] | |||||
| self.y = dataset[:, config['dim_in']] | |||||
| self.dataset = dataset | |||||
| self.trans = transforms.ToTensor() | |||||
| self.labels = open(dataset_path + '/' + type + '/label.txt').readlines() | |||||
| def __getitem__(self, idx): | def __getitem__(self, idx): | ||||
| return self.x[idx, :], self.y[idx] | |||||
| image_path = self.dataset[idx] | |||||
| image_name = os.path.basename(image_path) | |||||
| image = Image.open(image_path) | |||||
| image = self.trans(image) | |||||
| label = torch.Tensor([int(self.labels[int(image_name.strip('.png'))].strip('\n'))]) | |||||
| return image_name, image, label.long() | |||||
| def __len__(self): | def __len__(self): | ||||
| return self.x.shape[0] | |||||
| return len(self.dataset) | |||||
| @@ -32,15 +32,15 @@ def main(stage, | |||||
| :param precision: 训练精度, 正常精度为32, 半精度为16, 也可以是64. 精度代表每个参数的类型所占的位数 | :param precision: 训练精度, 正常精度为32, 半精度为16, 也可以是64. 精度代表每个参数的类型所占的位数 | ||||
| :param seed: | :param seed: | ||||
| :param dataset_path: 数据集地址, 其目录下包含数据集, 标签, 全部数据的命名list | |||||
| :param dataset_path: 数据集地址, 其目录下包含数据集文件夹, 标签文件夹, 全部数据的命名list | |||||
| :param gpus: | :param gpus: | ||||
| :param tpu_cores: | :param tpu_cores: | ||||
| :param version_nth: 该folds的第一个版本的版本号 | |||||
| :param version_nth: 不论是重载训练还是测试, 固定为该folds的第一个版本的版本号 | |||||
| :param path_final_save: | :param path_final_save: | ||||
| :param every_n_epochs: 每n个epoch设置一个检查点 | :param every_n_epochs: 每n个epoch设置一个检查点 | ||||
| :param save_top_k: | :param save_top_k: | ||||
| :param kth_fold_start: 从第几个fold开始, 若使用重载训练, 则kth_fold_start为重载第几个fold, 第一个值为0. | :param kth_fold_start: 从第几个fold开始, 若使用重载训练, 则kth_fold_start为重载第几个fold, 第一个值为0. | ||||
| 非重载训练的情况下, 可以通过调整该值控制训练的次数 | |||||
| 非重载训练的情况下, 可以通过调整该值控制训练的次数; | |||||
| :param k_fold: | :param k_fold: | ||||
| """ | """ | ||||
| # 经常改动的 参数 作为main的输入参数 | # 经常改动的 参数 作为main的输入参数 | ||||
| @@ -70,8 +70,8 @@ def main(stage, | |||||
| save_checkpoint = SaveCheckpoint(seed=seed, max_epochs=max_epochs, | save_checkpoint = SaveCheckpoint(seed=seed, max_epochs=max_epochs, | ||||
| path_final_save=path_final_save, | path_final_save=path_final_save, | ||||
| every_n_epochs=every_n_epochs, verbose=True, | every_n_epochs=every_n_epochs, verbose=True, | ||||
| monitor='Validation loss', save_top_k=save_top_k, | |||||
| mode='min') | |||||
| monitor='Validation acc', save_top_k=save_top_k, | |||||
| mode='max') | |||||
| training_module = TrainModule(config=config) | training_module = TrainModule(config=config) | ||||
| if kth_fold != kth_fold_start or load_checkpoint_path is None: | if kth_fold != kth_fold_start or load_checkpoint_path is None: | ||||
| print('进行初始训练') | print('进行初始训练') | ||||
| @@ -101,6 +101,6 @@ def main(stage, | |||||
| if __name__ == "__main__": | if __name__ == "__main__": | ||||
| main('fit', max_epochs=2, batch_size=32, precision=16, seed=1234, dataset_path='./dataset', k_fold=5, | main('fit', max_epochs=2, batch_size=32, precision=16, seed=1234, dataset_path='./dataset', k_fold=5, | ||||
| # gpus=1, | # gpus=1, | ||||
| # version_nth=8, # 该folds的第一个版本的版本号 | |||||
| # version_nth=8, | |||||
| kth_fold_start=4, | kth_fold_start=4, | ||||
| ) | ) | ||||
| @@ -1,51 +0,0 @@ | |||||
| import math | |||||
| import torch.nn as nn | |||||
| from network_module.activation import jdlu, JDLU | |||||
| class MLPLayer(nn.Module): | |||||
| def __init__(self, dim_in, dim_out, res_coef=0.0, dropout_p=0.1): | |||||
| super().__init__() | |||||
| self.linear = nn.Linear(dim_in, dim_out) | |||||
| self.res_coef = res_coef | |||||
| self.activation = JDLU(dim_out) | |||||
| self.dropout = nn.Dropout(dropout_p) | |||||
| self.ln = nn.LayerNorm(dim_out) | |||||
| def forward(self, x): | |||||
| y = self.linear(x) | |||||
| y = self.activation(y) | |||||
| y = self.dropout(y) | |||||
| if self.res_coef == 0: | |||||
| return y | |||||
| else: | |||||
| return self.res_coef * x + y | |||||
| class MLP_JDLU(nn.Module): | |||||
| def __init__(self, dim_in, dim, res_coef=0.5, dropout_p=0.1, n_layers=10): | |||||
| super().__init__() | |||||
| self.mlp = nn.ModuleList() | |||||
| self.first_linear = MLPLayer(dim_in, dim) | |||||
| self.n_layers = n_layers | |||||
| for i in range(n_layers): | |||||
| self.mlp.append(MLPLayer(dim, dim, res_coef, dropout_p)) | |||||
| self.final = nn.Linear(dim, 1) | |||||
| self.apply(self.weight_init) | |||||
| def forward(self, x): | |||||
| x = self.first_linear(x) | |||||
| for layer in self.mlp: | |||||
| x = layer(x) | |||||
| x = self.final(x) | |||||
| return x.squeeze() | |||||
| @staticmethod | |||||
| def weight_init(m): | |||||
| if isinstance(m, nn.Linear): | |||||
| nn.init.xavier_normal_(m.weight) | |||||
| fan_in, _ = nn.init._calculate_fan_in_and_fan_out(m.weight) | |||||
| bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 | |||||
| nn.init.uniform_(m.bias, -bound, bound) | |||||
| @@ -1,41 +0,0 @@ | |||||
| import torch.nn as nn | |||||
| from network_module.activation import jdlu, JDLU | |||||
| class MLPLayer(nn.Module): | |||||
| def __init__(self, dim_in, dim_out, res_coef=0.0, dropout_p=0.1): | |||||
| super().__init__() | |||||
| self.linear = nn.Linear(dim_in, dim_out) | |||||
| self.res_coef = res_coef | |||||
| self.activation = nn.ReLU() | |||||
| self.activation1 = JDLU(dim_out) | |||||
| self.dropout = nn.Dropout(dropout_p) | |||||
| self.ln = nn.LayerNorm(dim_out) | |||||
| def forward(self, x): | |||||
| y = self.linear(x) | |||||
| y = self.activation1(y) | |||||
| # y = jdlu(y) | |||||
| y = self.dropout(y) | |||||
| if self.res_coef == 0: | |||||
| return self.ln(y) | |||||
| else: | |||||
| return self.ln(self.res_coef * x + y) | |||||
| class MLP(nn.Module): | |||||
| def __init__(self, dim_in, dim, res_coef=0.5, dropout_p=0.1, n_layers=10): | |||||
| super().__init__() | |||||
| self.mlp = nn.ModuleList() | |||||
| self.first_linear = MLPLayer(dim_in, dim) | |||||
| self.n_layers = n_layers | |||||
| for i in range(n_layers): | |||||
| self.mlp.append(MLPLayer(dim, dim, res_coef, dropout_p)) | |||||
| self.final = nn.Linear(dim, 1) | |||||
| def forward(self, x): | |||||
| x = self.first_linear(x) | |||||
| for layer in self.mlp: | |||||
| x = layer(x) | |||||
| x = self.final(x) | |||||
| return x.squeeze() | |||||
| @@ -0,0 +1,185 @@ | |||||
| ''' | |||||
| Properly implemented ResNet-s for CIFAR10 as described in paper [1]. | |||||
| The implementation and structure of this file is hugely influenced by [2] | |||||
| which is implemented for ImageNet and doesn't have option A for identity. | |||||
| Moreover, most of the implementations on the web is copy-paste from | |||||
| torchvision's resnet and has wrong number of params. | |||||
| Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following | |||||
| number of layers and parameters: | |||||
| name | layers | params | |||||
| ResNet20 | 20 | 0.27M | |||||
| ResNet32 | 32 | 0.46M | |||||
| ResNet44 | 44 | 0.66M | |||||
| ResNet56 | 56 | 0.85M | |||||
| ResNet110 | 110 | 1.7M | |||||
| ResNet1202| 1202 | 19.4m | |||||
| which this implementation indeed has. | |||||
| Reference: | |||||
| [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun | |||||
| Deep Residual Learning for Image Recognition. arXiv:1512.03385 | |||||
| [2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py | |||||
| If you use this implementation in you work, please don't forget to mention the | |||||
| author, Yerlan Idelbayev. | |||||
| ''' | |||||
| import math | |||||
| import torch | |||||
| import torch.nn as nn | |||||
| import torch.nn.functional as F | |||||
| import torch.nn.init as init | |||||
| from torch.autograd import Variable | |||||
| __all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202'] | |||||
| def _weights_init(m): | |||||
| if isinstance(m, nn.Linear): | |||||
| nn.init.kaiming_normal_(m.weight) | |||||
| fan_in, _ = nn.init._calculate_fan_in_and_fan_out(m.weight) | |||||
| bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 | |||||
| nn.init.uniform_(m.bias, -bound, bound) | |||||
| elif isinstance(m, nn.Conv2d): | |||||
| nn.init.kaiming_normal_(m.weight) | |||||
| class LambdaLayer(nn.Module): | |||||
| def __init__(self, lambd): | |||||
| super(LambdaLayer, self).__init__() | |||||
| self.lambd = lambd | |||||
| def forward(self, x): | |||||
| return self.lambd(x) | |||||
| class BasicBlock(nn.Module): | |||||
| expansion = 1 | |||||
| def __init__(self, in_planes, planes, stride=1, option='A'): | |||||
| super(BasicBlock, self).__init__() | |||||
| self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) | |||||
| self.bn1 = nn.BatchNorm2d(planes) | |||||
| self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) | |||||
| self.bn2 = nn.BatchNorm2d(planes) | |||||
| self.shortcut = nn.Sequential() | |||||
| if stride != 1 or in_planes != planes: | |||||
| if option == 'A': | |||||
| """ | |||||
| For CIFAR10 ResNet paper uses option A. | |||||
| """ | |||||
| self.shortcut = LambdaLayer(lambda x: | |||||
| F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes // 4, planes // 4), "constant", | |||||
| 0)) | |||||
| elif option == 'B': | |||||
| self.shortcut = nn.Sequential( | |||||
| nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), | |||||
| nn.BatchNorm2d(self.expansion * planes) | |||||
| ) | |||||
| def forward(self, x): | |||||
| out = F.relu(self.bn1(self.conv1(x))) | |||||
| out = self.bn2(self.conv2(out)) | |||||
| out += self.shortcut(x) | |||||
| out = F.relu(out) | |||||
| return out | |||||
| class ResNet(nn.Module): | |||||
| def __init__(self, block, num_blocks, num_classes=10): | |||||
| super(ResNet, self).__init__() | |||||
| self.in_planes = 16 | |||||
| self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) | |||||
| self.bn1 = nn.BatchNorm2d(16) | |||||
| self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1) | |||||
| self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2) | |||||
| self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2) | |||||
| self.linear = nn.Linear(64, num_classes) | |||||
| self.apply(_weights_init) | |||||
| def _make_layer(self, block, planes, num_blocks, stride): | |||||
| strides = [stride] + [1] * (num_blocks - 1) | |||||
| layers = [] | |||||
| for stride in strides: | |||||
| layers.append(block(self.in_planes, planes, stride)) | |||||
| self.in_planes = planes * block.expansion | |||||
| return nn.Sequential(*layers) | |||||
| def forward(self, x): | |||||
| out = F.relu(self.bn1(self.conv1(x))) | |||||
| out = self.layer1(out) | |||||
| out = self.layer2(out) | |||||
| out = self.layer3(out) | |||||
| out = F.avg_pool2d(out, out.size()[3]) | |||||
| out = out.view(out.size(0), -1) | |||||
| out = self.linear(out) | |||||
| return out | |||||
| def resnet20(): | |||||
| return ResNet(BasicBlock, [3, 3, 3]) | |||||
| def resnet32(): | |||||
| return ResNet(BasicBlock, [5, 5, 5]) | |||||
| def resnet44(): | |||||
| return ResNet(BasicBlock, [7, 7, 7]) | |||||
| def resnet56(): | |||||
| return ResNet(BasicBlock, [9, 9, 9], num_classes=100) | |||||
| def resnet110(): | |||||
| return ResNet(BasicBlock, [18, 18, 18]) | |||||
| def resnet1202(): | |||||
| return ResNet(BasicBlock, [200, 200, 200]) | |||||
| def test(net): | |||||
| import numpy as np | |||||
| total_params = 0 | |||||
| for x in filter(lambda p: p.requires_grad, net.parameters()): | |||||
| total_params += np.prod(x.data.numpy().shape) | |||||
| print("Total number of params", total_params) | |||||
| print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size()) > 1, net.parameters())))) | |||||
| def accuracy(output, target, topk=(1,)): | |||||
| """Computes the precision@k for the specified values of k""" | |||||
| maxk = max(topk) | |||||
| batch_size = target.size(0) | |||||
| _, pred = output.topk(maxk, 1, True, True) | |||||
| pred = pred.t() | |||||
| correct = pred.eq(target.view(1, -1).expand_as(pred)) | |||||
| res = [] | |||||
| for k in topk: | |||||
| correct_k = correct[:k].contiguous().view(-1).float().sum(0) | |||||
| res.append(correct_k.mul_(100.0 / batch_size)) | |||||
| return res | |||||
| if __name__ == "__main__": | |||||
| for net_name in __all__: | |||||
| if net_name.startswith('resnet'): | |||||
| print(net_name) | |||||
| test(globals()[net_name]()) | |||||
| print() | |||||
| @@ -92,10 +92,10 @@ class SaveCheckpoint(ModelCheckpoint): | |||||
| f.write(line + '\n') | f.write(line + '\n') | ||||
| # 每次更新ckpt文件后, 将其存放到另一个位置 | # 每次更新ckpt文件后, 将其存放到另一个位置 | ||||
| if self.path_final_save is not None: | if self.path_final_save is not None: | ||||
| zip_dir('./logs', './logs.zip') | |||||
| if os.path.exists(self.path_final_save + '/logs.zip'): | |||||
| os.remove(self.path_final_save + '/logs.zip') | |||||
| shutil.move('./logs.zip', self.path_final_save) | |||||
| zip_dir('./logs/default/' + self.dirpath.split('\\')[1], './' + self.dirpath.split('\\')[1] + '.zip') | |||||
| if os.path.exists(self.path_final_save + '/' + self.dirpath.split('\\')[1] + '.zip'): | |||||
| os.remove(self.path_final_save + '/' + self.dirpath.split('\\')[1] + '.zip') | |||||
| shutil.move('./' + self.dirpath.split('\\')[1] + '.zip', self.path_final_save) | |||||
| elif self.verbose: | elif self.verbose: | ||||
| epoch = monitor_candidates.get("epoch") | epoch = monitor_candidates.get("epoch") | ||||
| step = monitor_candidates.get("step") | step = monitor_candidates.get("step") | ||||
| @@ -6,8 +6,7 @@ from pytorch_lightning.utilities.types import EPOCH_OUTPUT | |||||
| from torch import nn | from torch import nn | ||||
| import torch | import torch | ||||
| from network.MLP_JDLU import MLP_JDLU | |||||
| from network.MLP_ReLU import MLP_ReLU | |||||
| from network.res_net import resnet56, accuracy | |||||
| class TrainModule(pl.LightningModule): | class TrainModule(pl.LightningModule): | ||||
| @@ -15,30 +14,32 @@ class TrainModule(pl.LightningModule): | |||||
| super().__init__() | super().__init__() | ||||
| self.time_sum = None | self.time_sum = None | ||||
| self.config = config | self.config = config | ||||
| if 1: | |||||
| self.net = MLP_ReLU(config['dim_in'], config['dim'], config['res_coef'], config['dropout_p'], | |||||
| config['n_layers']) | |||||
| else: | |||||
| self.net = MLP_JDLU(config['dim_in'], config['dim'], config['res_coef'], config['dropout_p'], | |||||
| config['n_layers']) | |||||
| self.loss = nn.MSELoss() | |||||
| self.net = resnet56() | |||||
| self.loss = nn.CrossEntropyLoss() | |||||
| def training_step(self, batch, batch_idx): | def training_step(self, batch, batch_idx): | ||||
| x, y = batch | |||||
| x = self.net(x) | |||||
| loss = self.loss(x, y.type(torch.float32)) | |||||
| _, input, label = batch | |||||
| label = label.flatten() | |||||
| pred = self.net(input) | |||||
| loss = self.loss(pred, label) | |||||
| self.log("Training loss", loss) | self.log("Training loss", loss) | ||||
| acc = accuracy(pred, label)[0] | |||||
| self.log("Training acc", acc) | |||||
| return loss | return loss | ||||
| def validation_step(self, batch, batch_idx): | def validation_step(self, batch, batch_idx): | ||||
| x, y = batch | |||||
| x = self.net(x) | |||||
| loss = self.loss(x, y.type(torch.float32)) | |||||
| _, input, label = batch | |||||
| label = label.flatten() | |||||
| pred = self.net(input) | |||||
| loss = self.loss(pred, label) | |||||
| self.log("Validation loss", loss) | self.log("Validation loss", loss) | ||||
| acc = accuracy(pred, label)[0] | |||||
| self.log("Validation acc", acc) | |||||
| return loss | return loss | ||||
| def test_step(self, batch, batch_idx): | def test_step(self, batch, batch_idx): | ||||
| input, label = batch | |||||
| _, input, label = batch | |||||
| label = label.flatten() | |||||
| if self.time_sum is None: | if self.time_sum is None: | ||||
| time_start = time.time() | time_start = time.time() | ||||
| pred = self.net(input) | pred = self.net(input) | ||||
| @@ -47,46 +48,14 @@ class TrainModule(pl.LightningModule): | |||||
| print(f'\n推理时间为: {self.time_sum:f}') | print(f'\n推理时间为: {self.time_sum:f}') | ||||
| else: | else: | ||||
| pred = self.net(input) | pred = self.net(input) | ||||
| loss = self.loss(pred.reshape(1), label.type(torch.float32)) | |||||
| loss = self.loss(pred, label) | |||||
| self.log("Test loss", loss) | self.log("Test loss", loss) | ||||
| acc = accuracy(pred, label)[0] | |||||
| self.log("Test acc", acc) | |||||
| return input, label, pred | return input, label, pred | ||||
| def test_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: | |||||
| records = numpy.empty((self.config['dataset_len'], 4)) | |||||
| # count | |||||
| for cou in range(len(outputs)): | |||||
| records[cou, 0] = outputs[cou][0][0, 0] | |||||
| records[cou, 1] = outputs[cou][0][0, 1] | |||||
| records[cou, 2] = outputs[cou][1][0] | |||||
| records[cou, 3] = outputs[cou][2] | |||||
| import plotly.graph_objects as go | |||||
| trace0 = go.Mesh3d(x=records[:, 0], | |||||
| y=records[:, 1], | |||||
| z=records[:, 2], | |||||
| opacity=0.5, | |||||
| name='label' | |||||
| ) | |||||
| trace1 = go.Mesh3d(x=records[:, 0], | |||||
| y=records[:, 1], | |||||
| z=records[:, 3], | |||||
| opacity=0.5, | |||||
| name='pred' | |||||
| ) | |||||
| fig = go.Figure(data=[trace0, trace1]) | |||||
| fig.update_layout( | |||||
| scene=dict( | |||||
| # xaxis=dict(nticks=4, range=[-100, 100], ), | |||||
| # yaxis=dict(nticks=4, range=[-50, 100], ), | |||||
| # zaxis=dict(nticks=4, range=[-100, 100], ), | |||||
| aspectratio=dict(x=1, y=1, z=0.5), | |||||
| ), | |||||
| ) | |||||
| fig.show() | |||||
| def configure_optimizers(self): | def configure_optimizers(self): | ||||
| optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) | |||||
| optimizer = torch.optim.SGD(self.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4) | |||||
| return optimizer | return optimizer | ||||
| def load_pretrain_parameters(self): | def load_pretrain_parameters(self): | ||||