|
|
@@ -5,8 +5,10 @@ import matplotlib.pyplot as plt |
|
|
|
import mindspore # 导入mindspore核心模块
|
|
|
|
import mindspore.nn as nn # nn模块是mindspore和神经网络相关的模块
|
|
|
|
import mindspore.dataset as ds # 从mindspore.dataset模块获取数据集的操作的模块
|
|
|
|
from mindspore import context
|
|
|
|
from mindspore.dataset import GeneratorDataset
|
|
|
|
from mindspore.dataset.vision import c_transforms
|
|
|
|
from mindspore.common.initializer import Normal
|
|
|
|
import mindspore.dataset.vision.c_transforms as c_vision #引入c_vision模块
|
|
|
|
import mindspore.dataset.vision.py_transforms as py_vision
|
|
|
|
from mindspore.dataset.transforms.c_transforms import Compose #引入Compose模块
|
|
|
@@ -17,6 +19,7 @@ import pickle # pickle模块主要是用来将对象序列化存储到硬盘 |
|
|
|
import PIL.Image as Image # 处理图像数据的模块
|
|
|
|
import argparse # 参数命令行交互模块
|
|
|
|
|
|
|
|
context.set_context(mode=context.GRAPH_MODE)
|
|
|
|
os.environ['KMP_DUPLICATE_LIB_OK']='True'
|
|
|
|
|
|
|
|
|
|
|
@@ -84,29 +87,20 @@ def lfw_dataset(lfw_path, shape_img): |
|
|
|
自定义LeNet网络
|
|
|
|
'''
|
|
|
|
class LeNet(nn.Cell): # nn.Cell, 定义神经网络必须继承的模块, Mindspore框架规定的形式
|
|
|
|
def __init__(self, channel=3, hidden=768, num_classes=10): # 假设输入cifar10数据集, 默认3通道, 隐层维度为768, 分类为10
|
|
|
|
super(LeNet, self).__init__() # 继承mindspore神经网络工具箱中的模块
|
|
|
|
act = nn.Sigmoid # 激活函数为Sigmoid
|
|
|
|
# nn.Sequential: 顺序容器。 模块将按照在构造函数中传递的顺序添加到模块中。 或者,也可以传递模块的有序字典
|
|
|
|
self.body = nn.SequentialCell( # 设计神经网络结构,对于nn.nn.SequentialCell : https://mindspore.cn/docs/api/zh-CN/r1.5/api_python/nn/mindspore.nn.SequentialCell.html#mindspore.nn.SequentialCell
|
|
|
|
# 设计输入通道为channel,输出通道为12, 5x5卷积核尺寸,填充为5 // 2是整除。故填充为2, 步长为2的卷积层
|
|
|
|
nn.Conv2d(channel, 12, kernel_size=5, padding=5 // 2, stride=2),
|
|
|
|
# 经过卷积后, 使用Sigmoid激活函数激活
|
|
|
|
act(),
|
|
|
|
# 设计输入通道为12,输出通道为12, 5x5卷积核尺寸,填充为5 // 2是整除。故填充为2, 步长为2的卷积层
|
|
|
|
nn.Conv2d(12, 12, kernel_size=5, padding=5 // 2, stride=2),
|
|
|
|
# 经过卷积后, 使用Sigmoid激活函数激活
|
|
|
|
act(),
|
|
|
|
# 设计输入通道为12,输出通道为12, 5x5卷积核尺寸,填充为5 // 2是整除。故填充为2, 步长为1的卷积层
|
|
|
|
nn.Conv2d(12, 12, kernel_size=5, padding=5 // 2, stride=1),
|
|
|
|
# 经过卷积后, 使用Sigmoid激活函数激活
|
|
|
|
act()
|
|
|
|
)
|
|
|
|
# 设计一个全连接映射层, 将hidden隐藏层映射到十个分类标签
|
|
|
|
self.fc = nn.SequentialCell(
|
|
|
|
nn.Dense(hidden, num_classes)
|
|
|
|
)
|
|
|
|
print("-----猫子说一切正常-----")
|
|
|
|
def __init__(self, num_class=10, channel=1, include_top=True):
|
|
|
|
super(LeNet, self).__init__()
|
|
|
|
self.conv1 = nn.Conv2d(channel, 6, 5, pad_mode='valid')
|
|
|
|
self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
|
|
|
|
self.relu = nn.Sigmoid()
|
|
|
|
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
|
|
|
|
self.include_top = include_top
|
|
|
|
if self.include_top:
|
|
|
|
self.flatten = nn.Flatten()
|
|
|
|
self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
|
|
|
|
self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
|
|
|
|
self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))
|
|
|
|
|
|
|
|
print("-----猫子说一切正常1-----")
|
|
|
|
# 设计前向传播算法
|
|
|
|
def forward(self, x):
|
|
|
|
out = self.body(x) # 先经过nn.SequentialCell的顺序层得到一个输出
|
|
|
@@ -114,7 +108,7 @@ class LeNet(nn.Cell): # nn.Cell, 定义神经网络必须继承的模块, Mind |
|
|
|
out = self.fc(out) # 最后将输出映射到一个十分类的一个列向量
|
|
|
|
return out
|
|
|
|
|
|
|
|
print("-----猫子说一切正常-----")
|
|
|
|
print("-----猫子说一切正常2-----")
|
|
|
|
'''
|
|
|
|
init weights
|
|
|
|
'''
|
|
|
@@ -136,7 +130,7 @@ def main(): |
|
|
|
|
|
|
|
dataset = args.dataset # 获得命令行输入的dataset
|
|
|
|
root_path = '.'
|
|
|
|
data_path = os.path.join(root_path, './data').replace('\\', '/') # 指定数据存放的路径地址, replace是进行转义
|
|
|
|
data_path = os.path.join(root_path, './data/cifar-10-batches-py').replace('\\', '/') # 指定数据存放的路径地址, replace是进行转义
|
|
|
|
save_path = os.path.join(root_path, 'results/DLG_%s' % dataset).replace('\\', '/') # 图片保存的路径
|
|
|
|
|
|
|
|
lr = 1.0 # 学习率
|
|
|
@@ -160,32 +154,32 @@ def main(): |
|
|
|
os.mkdir('results')
|
|
|
|
if not os.path.exists(save_path): # 是否存在路径, 不存在则创建保存图片的路径
|
|
|
|
os.mkdir(save_path)
|
|
|
|
print('--------已成功调用---------')
|
|
|
|
'''
|
|
|
|
加载数据
|
|
|
|
'''
|
|
|
|
if ds == 'MNIST' or ds == 'mnist': # 判断是什么数据集
|
|
|
|
if dataset == 'MNIST' or dataset == 'mnist': # 判断是什么数据集
|
|
|
|
image_shape = (28, 28) # mnist数据集图片尺寸是28x28
|
|
|
|
num_classes = 10 # mnist数据分类为十分类: 0 ~ 9
|
|
|
|
channel = 1 # mnist数据集是灰度图像所以是单通道
|
|
|
|
hidden = 588 # hidden是神经网络最后一层全连接层的维度
|
|
|
|
dst = ds.MnistDataset(data_path, download=True)
|
|
|
|
print('--------已成功调用11111---------')
|
|
|
|
elif ds == 'cifar10' or ds == 'CIFAR10':
|
|
|
|
|
|
|
|
elif dataset == 'cifar10' or dataset == 'CIFAR10':
|
|
|
|
image_shape = (32, 32) # cifar10数据集图片尺寸是32x32
|
|
|
|
num_classes = 10 # cifar10数据分类为十分类:卡车、 飞机等
|
|
|
|
channel = 3 # cifar10数据集是RGB图像所以是三通道
|
|
|
|
hidden = 768 # hidden是神经网络最后一层全连接层的维度
|
|
|
|
dst = ds.Cifar10Dataset(data_path, download=True)
|
|
|
|
dst = ds.Cifar10Dataset(data_path)
|
|
|
|
print('--------猫子已成功调用---------')
|
|
|
|
|
|
|
|
elif ds == 'cifar100' or ds == 'CIFAR100':
|
|
|
|
elif dataset == 'cifar100' or dataset == 'CIFAR100':
|
|
|
|
image_shape = (32, 32) # cifar100数据集图片尺寸是32x32
|
|
|
|
num_classes = 100 # cifar100数据分类为一百个分类
|
|
|
|
channel = 3 # cifar100数据集是灰度图像所以是单通道
|
|
|
|
hidden = 768 # hidden是神经网络最后一层全连接层的维度
|
|
|
|
dst = ds.Cifar100Dataset(data_path, download=True)
|
|
|
|
elif ds == 'lfw':
|
|
|
|
dst = ds.Cifar100Dataset(dataset_dir=data_path)
|
|
|
|
|
|
|
|
elif dataset == 'lfw':
|
|
|
|
shape_img = (32, 32)
|
|
|
|
num_classes = 5749
|
|
|
|
channel = 3
|
|
|
@@ -197,23 +191,23 @@ def main(): |
|
|
|
|
|
|
|
|
|
|
|
for idx_net in range(num_exp):
|
|
|
|
net = LeNet(channel=channel, hidden=hidden, num_classes=num_classes) # 初始化LeNet模型
|
|
|
|
net.apply(weights_init) # 初始化模型中的卷积核的权重
|
|
|
|
net = LeNet(channel=channel) # 初始化LeNet模型
|
|
|
|
# 初始化模型中的卷积核的权重
|
|
|
|
|
|
|
|
print('running %d|%d experiment' % (idx_net, num_exp))
|
|
|
|
net = net.to(device)
|
|
|
|
|
|
|
|
print('%s, Try to generate %d images' % ('DLG', num_dummy))
|
|
|
|
|
|
|
|
criterion = nn.CrossEntropyLoss().to(device) # 设置损失函数为交叉熵函数
|
|
|
|
print('%s, Try to generate %d images' % ('DLG', num_dummy))
|
|
|
|
context.set_context(device_target="GPU")
|
|
|
|
criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=False, reduction='mean') # 设置损失函数为交叉熵函数
|
|
|
|
imidx_list = [] # 用于记录当前还原图片的下标
|
|
|
|
|
|
|
|
for imidx in range(num_dummy):
|
|
|
|
idx = args.index # 从命令行获取还原图片的index
|
|
|
|
imidx_list.append(idx) # 将index加入到列表中
|
|
|
|
tmp_datum = tt(dst[idx][0]).float().to(device) # 将数据集中index对应的图片数据拿出来转换成Tensor张量
|
|
|
|
tmp_datum = tt(dst[idx][0]).float() # 将数据集中index对应的图片数据拿出来转换成Tensor张量
|
|
|
|
tmp_datum = tmp_datum.view(1, *tmp_datum.size()) # 将tmp_datum数据重构形状, 可以用shape打印出来看看
|
|
|
|
tmp_label = mindspore.Tensor([dst[idx][1]]).long().to(device) # 将数据集中index对应的图片的标签拿出来转换成Tensor张量
|
|
|
|
tmp_label = mindspore.Tensor([dst[idx][1]]).long() # 将数据集中index对应的图片的标签拿出来转换成Tensor张量
|
|
|
|
tmp_label = tmp_label.view(1, ) # 将标签重塑为列向量形式
|
|
|
|
if imidx == 0: # 如果imidx为0, 代表只处理一张图片
|
|
|
|
gt_data = tmp_datum # gt_data表示真实图片数据
|
|
|
@@ -231,8 +225,8 @@ def main(): |
|
|
|
original_dy_dx = list((_.detach().clone() for _ in dy_dx))
|
|
|
|
|
|
|
|
# generate dummy data and label。 生成假的数据和标签
|
|
|
|
dummy_data = mindspore.ops.StandardNormal(gt_data.size()).to(device).requires_grad_(True)
|
|
|
|
dummy_label = mindspore.ops.StandardNormal((gt_data.shape[0], num_classes)).to(device).requires_grad_(True)
|
|
|
|
dummy_data = mindspore.ops.StandardNormal(gt_data.size()).requires_grad_(True)
|
|
|
|
dummy_label = mindspore.ops.StandardNormal((gt_data.shape[0], num_classes)).requires_grad_(True)
|
|
|
|
|
|
|
|
optimizer = mindspore.nn.Adam([dummy_data, dummy_label], lr=lr) #设置优化器为Adam法
|
|
|
|
|
|
|
|