@@ -79,9 +79,6 @@ class AdversarialDefense(Defense): | |||||
Returns: | Returns: | ||||
numpy.ndarray, loss of defense operation. | numpy.ndarray, loss of defense operation. | ||||
Examples: | |||||
>>> adv_defense.defense(inputs, labels) | |||||
""" | """ | ||||
inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', | inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', | ||||
labels) | labels) | ||||
@@ -98,6 +98,9 @@ class SimilarityDetector(Detector): | |||||
>>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) | >>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) | ||||
>>> detector.detect(benign_queries) | >>> detector.detect(benign_queries) | ||||
>>> detections = detector.get_detection_interval() | >>> detections = detector.get_detection_interval() | ||||
>>> detector.detect_diff() | |||||
>>> detected_queries = detector.get_detected_queries() | |||||
>>> detector.transform(x_train) | |||||
""" | """ | ||||
def __init__(self, trans_model, max_k_neighbor=1000, chunk_size=1000, | def __init__(self, trans_model, max_k_neighbor=1000, chunk_size=1000, | ||||
@@ -137,13 +140,6 @@ class SimilarityDetector(Detector): | |||||
Raises: | Raises: | ||||
ValueError: The number of training data is less than | ValueError: The number of training data is less than | ||||
max_k_neighbor! | max_k_neighbor! | ||||
Examples: | |||||
>>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32) | |||||
>>> perm = np.random.permutation(x_train.shape[0]) | |||||
>>> benign_queries = x_train[perm[:10], :, :, :] | |||||
>>> num_nearest_neighbors, thresholds = detector.fit(inputs=x_train) | |||||
>>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) | |||||
""" | """ | ||||
data = check_numpy_param('inputs', inputs) | data = check_numpy_param('inputs', inputs) | ||||
data_len = data.shape[0] | data_len = data.shape[0] | ||||
@@ -192,14 +188,6 @@ class SimilarityDetector(Detector): | |||||
Raises: | Raises: | ||||
ValueError: The parameters of threshold or num_of_neighbors is | ValueError: The parameters of threshold or num_of_neighbors is | ||||
not available. | not available. | ||||
Examples: | |||||
>>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32) | |||||
>>> perm = np.random.permutation(x_train.shape[0]) | |||||
>>> benign_queries = x_train[perm[:10], :, :, :] | |||||
>>> num_nearest_neighbors, thresholds = detector.fit(inputs=x_train) | |||||
>>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) | |||||
>>> detector.detect(benign_queries) | |||||
""" | """ | ||||
if self._threshold is None or self._num_of_neighbors is None: | if self._threshold is None or self._num_of_neighbors is None: | ||||
msg = 'Explicit detection threshold and number of nearest ' \ | msg = 'Explicit detection threshold and number of nearest ' \ | ||||
@@ -247,9 +235,6 @@ class SimilarityDetector(Detector): | |||||
def clear_buffer(self): | def clear_buffer(self): | ||||
""" | """ | ||||
Clear the buffer memory. | Clear the buffer memory. | ||||
Examples: | |||||
>>> detector.detect(benign_queries) | |||||
""" | """ | ||||
while self._buffer: | while self._buffer: | ||||
self._buffer.pop() | self._buffer.pop() | ||||
@@ -261,10 +246,6 @@ class SimilarityDetector(Detector): | |||||
Args: | Args: | ||||
num_of_neighbors (int): Number of the nearest neighbors. | num_of_neighbors (int): Number of the nearest neighbors. | ||||
threshold (float): Detection threshold. | threshold (float): Detection threshold. | ||||
Examples: | |||||
>>> num_nearest_neighbors, thresholds = detector.fit(inputs=x_train) | |||||
>>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) | |||||
""" | """ | ||||
self._num_of_neighbors = check_int_positive('num_of_neighbors', | self._num_of_neighbors = check_int_positive('num_of_neighbors', | ||||
num_of_neighbors) | num_of_neighbors) | ||||
@@ -276,9 +257,6 @@ class SimilarityDetector(Detector): | |||||
Returns: | Returns: | ||||
list[int], number of queries between adjacent detections. | list[int], number of queries between adjacent detections. | ||||
Examples: | |||||
>>> detector.get_detection_interval() | |||||
""" | """ | ||||
detected_queries = self._detected_queries | detected_queries = self._detected_queries | ||||
interval = [] | interval = [] | ||||
@@ -292,9 +270,6 @@ class SimilarityDetector(Detector): | |||||
Returns: | Returns: | ||||
list[int], sequence number of detected malicious queries. | list[int], sequence number of detected malicious queries. | ||||
Examples: | |||||
>>> detector.get_detected_queries() | |||||
""" | """ | ||||
detected_queries = self._detected_queries | detected_queries = self._detected_queries | ||||
return detected_queries | return detected_queries | ||||
@@ -311,9 +286,6 @@ class SimilarityDetector(Detector): | |||||
Raises: | Raises: | ||||
NotImplementedError: This function is not available | NotImplementedError: This function is not available | ||||
in class `SimilarityDetector`. | in class `SimilarityDetector`. | ||||
Examples: | |||||
>>> detector.detect_diff() | |||||
""" | """ | ||||
msg = 'The function detect_diff() is not available in the class ' \ | msg = 'The function detect_diff() is not available in the class ' \ | ||||
'`SimilarityDetector`.' | '`SimilarityDetector`.' | ||||
@@ -329,9 +301,6 @@ class SimilarityDetector(Detector): | |||||
Raises: | Raises: | ||||
NotImplementedError: This function is not available in class `SimilarityDetector`. | NotImplementedError: This function is not available in class `SimilarityDetector`. | ||||
Examples: | |||||
>>> detector.transform(x_train) | |||||
""" | """ | ||||
msg = 'The function transform() is not available in the class `SimilarityDetector`.' | msg = 'The function transform() is not available in the class `SimilarityDetector`.' | ||||
LOGGER.error(TAG, msg) | LOGGER.error(TAG, msg) | ||||
@@ -124,7 +124,7 @@ class Fuzzer: | |||||
... self.reshape = P.Reshape() | ... self.reshape = P.Reshape() | ||||
... self.summary = TensorSummary() | ... self.summary = TensorSummary() | ||||
... | ... | ||||
... def construct(self, x): | |||||
... def construct(self, x): | |||||
... x = self.conv1(x) | ... x = self.conv1(x) | ||||
... x = self.relu(x) | ... x = self.relu(x) | ||||
... self.summary('conv1', x) | ... self.summary('conv1', x) | ||||
@@ -172,8 +172,50 @@ class NeuronCoverage(CoverageMetrics): | |||||
float, the metric of 'neuron coverage'. | float, the metric of 'neuron coverage'. | ||||
Examples: | Examples: | ||||
>>> from mindspore.common.initializer import TruncatedNormal | |||||
>>> from mindspore.ops import operations as P | |||||
>>> from mindspore.train import Model | |||||
>>> from mindspore.ops import TensorSummary | |||||
>>> from mindarmour.fuzz_testing import NeuronCoverage | |||||
>>> class Net(nn.Cell): | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self.conv1 = nn.Conv2d(1, 6, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.conv2 = nn.Conv2d(6, 16, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.fc1 = nn.Dense(16 * 5 * 5, 120, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc2 = nn.Dense(120, 84, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc3 = nn.Dense(84, 10, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.relu = nn.ReLU() | |||||
... self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) | |||||
... self.reshape = P.Reshape() | |||||
... self.summary = TensorSummary() | |||||
... def construct(self, x): | |||||
... x = self.conv1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv1', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.conv2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv2', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.reshape(x, (-1, 16 * 5 * 5)) | |||||
... x = self.fc1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc1', x) | |||||
... x = self.fc2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc2', x) | |||||
... x = self.fc3(x) | |||||
... self.summary('fc3', x) | |||||
... return x | |||||
>>> net = Net() | |||||
>>> model = Model(net) | |||||
>>> batch_size = 8 | |||||
>>> num_classe = 10 | |||||
>>> train_images = np.random.rand(32, 1, 32, 32).astype(np.float32) | |||||
>>> test_images = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) | |||||
>>> nc = NeuronCoverage(model, threshold=0.1) | >>> nc = NeuronCoverage(model, threshold=0.1) | ||||
>>> nc_metrics = nc.get_metrics(test_data) | |||||
>>> nc_metrics = nc.get_metrics(test_images) | |||||
""" | """ | ||||
dataset = check_numpy_param('dataset', dataset) | dataset = check_numpy_param('dataset', dataset) | ||||
batches = math.ceil(dataset.shape[0] / self.batch_size) | batches = math.ceil(dataset.shape[0] / self.batch_size) | ||||
@@ -219,8 +261,50 @@ class TopKNeuronCoverage(CoverageMetrics): | |||||
float, the metrics of 'top k neuron coverage'. | float, the metrics of 'top k neuron coverage'. | ||||
Examples: | Examples: | ||||
>>> from mindspore.common.initializer import TruncatedNormal | |||||
>>> from mindspore.ops import operations as P | |||||
>>> from mindspore.train import Model | |||||
>>> from mindspore.ops import TensorSummary | |||||
>>> from mindarmour.fuzz_testing import TopKNeuronCoverage | |||||
>>> class Net(nn.Cell): | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self.conv1 = nn.Conv2d(1, 6, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.conv2 = nn.Conv2d(6, 16, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.fc1 = nn.Dense(16 * 5 * 5, 120, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc2 = nn.Dense(120, 84, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc3 = nn.Dense(84, 10, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.relu = nn.ReLU() | |||||
... self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) | |||||
... self.reshape = P.Reshape() | |||||
... self.summary = TensorSummary() | |||||
... def construct(self, x): | |||||
... x = self.conv1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv1', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.conv2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv2', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.reshape(x, (-1, 16 * 5 * 5)) | |||||
... x = self.fc1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc1', x) | |||||
... x = self.fc2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc2', x) | |||||
... x = self.fc3(x) | |||||
... self.summary('fc3', x) | |||||
... return x | |||||
>>> net = Net() | |||||
>>> model = Model(net) | |||||
>>> batch_size = 8 | |||||
>>> num_classe = 10 | |||||
>>> train_images = np.random.rand(32, 1, 32, 32).astype(np.float32) | |||||
>>> test_images = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) | |||||
>>> tknc = TopKNeuronCoverage(model, top_k=3) | >>> tknc = TopKNeuronCoverage(model, top_k=3) | ||||
>>> metrics = tknc.get_metrics(test_data) | |||||
>>> metrics = tknc.get_metrics(test_images) | |||||
""" | """ | ||||
dataset = check_numpy_param('dataset', dataset) | dataset = check_numpy_param('dataset', dataset) | ||||
batches = math.ceil(dataset.shape[0] / self.batch_size) | batches = math.ceil(dataset.shape[0] / self.batch_size) | ||||
@@ -269,8 +353,50 @@ class SuperNeuronActivateCoverage(CoverageMetrics): | |||||
float, the metric of 'strong neuron activation coverage'. | float, the metric of 'strong neuron activation coverage'. | ||||
Examples: | Examples: | ||||
>>> snac = SuperNeuronActivateCoverage(model, train_dataset) | |||||
>>> metrics = snac.get_metrics(test_data) | |||||
>>> from mindspore.common.initializer import TruncatedNormal | |||||
>>> from mindspore.ops import operations as P | |||||
>>> from mindspore.train import Model | |||||
>>> from mindspore.ops import TensorSummary | |||||
>>> from mindarmour.fuzz_testing import SuperNeuronActivateCoverage | |||||
>>> class Net(nn.Cell): | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self.conv1 = nn.Conv2d(1, 6, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.conv2 = nn.Conv2d(6, 16, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.fc1 = nn.Dense(16 * 5 * 5, 120, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc2 = nn.Dense(120, 84, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc3 = nn.Dense(84, 10, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.relu = nn.ReLU() | |||||
... self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) | |||||
... self.reshape = P.Reshape() | |||||
... self.summary = TensorSummary() | |||||
... def construct(self, x): | |||||
... x = self.conv1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv1', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.conv2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv2', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.reshape(x, (-1, 16 * 5 * 5)) | |||||
... x = self.fc1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc1', x) | |||||
... x = self.fc2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc2', x) | |||||
... x = self.fc3(x) | |||||
... self.summary('fc3', x) | |||||
... return x | |||||
>>> net = Net() | |||||
>>> model = Model(net) | |||||
>>> batch_size = 8 | |||||
>>> num_classe = 10 | |||||
>>> train_images = np.random.rand(32, 1, 32, 32).astype(np.float32) | |||||
>>> test_images = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) | |||||
>>> snac = SuperNeuronActivateCoverage(model, train_images) | |||||
>>> metrics = snac.get_metrics(test_images) | |||||
""" | """ | ||||
dataset = check_numpy_param('dataset', dataset) | dataset = check_numpy_param('dataset', dataset) | ||||
if not self.incremental or not self._activate_table: | if not self.incremental or not self._activate_table: | ||||
@@ -319,8 +445,50 @@ class NeuronBoundsCoverage(SuperNeuronActivateCoverage): | |||||
float, the metric of 'neuron boundary coverage'. | float, the metric of 'neuron boundary coverage'. | ||||
Examples: | Examples: | ||||
>>> nbc = NeuronBoundsCoverage(model, train_dataset) | |||||
>>> metrics = nbc.get_metrics(test_data) | |||||
>>> from mindspore.common.initializer import TruncatedNormal | |||||
>>> from mindspore.ops import operations as P | |||||
>>> from mindspore.train import Model | |||||
>>> from mindspore.ops import TensorSummary | |||||
>>> from mindarmour.fuzz_testing import NeuronBoundsCoverage | |||||
>>> class Net(nn.Cell): | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self.conv1 = nn.Conv2d(1, 6, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.conv2 = nn.Conv2d(6, 16, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.fc1 = nn.Dense(16 * 5 * 5, 120, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc2 = nn.Dense(120, 84, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc3 = nn.Dense(84, 10, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.relu = nn.ReLU() | |||||
... self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) | |||||
... self.reshape = P.Reshape() | |||||
... self.summary = TensorSummary() | |||||
... def construct(self, x): | |||||
... x = self.conv1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv1', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.conv2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv2', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.reshape(x, (-1, 16 * 5 * 5)) | |||||
... x = self.fc1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc1', x) | |||||
... x = self.fc2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc2', x) | |||||
... x = self.fc3(x) | |||||
... self.summary('fc3', x) | |||||
... return x | |||||
>>> net = Net() | |||||
>>> model = Model(net) | |||||
>>> batch_size = 8 | |||||
>>> num_classe = 10 | |||||
>>> train_images = np.random.rand(32, 1, 32, 32).astype(np.float32) | |||||
>>> test_images = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) | |||||
>>> nbc = NeuronBoundsCoverage(model, train_images) | |||||
>>> metrics = nbc.get_metrics(test_images) | |||||
""" | """ | ||||
dataset = check_numpy_param('dataset', dataset) | dataset = check_numpy_param('dataset', dataset) | ||||
if not self.incremental or not self._activate_table: | if not self.incremental or not self._activate_table: | ||||
@@ -383,8 +551,50 @@ class KMultisectionNeuronCoverage(SuperNeuronActivateCoverage): | |||||
float, the metric of 'k-multisection neuron coverage'. | float, the metric of 'k-multisection neuron coverage'. | ||||
Examples: | Examples: | ||||
>>> kmnc = KMultisectionNeuronCoverage(model, train_dataset, segmented_num=100) | |||||
>>> metrics = kmnc.get_metrics(test_data) | |||||
>>> from mindspore.common.initializer import TruncatedNormal | |||||
>>> from mindspore.ops import operations as P | |||||
>>> from mindspore.train import Model | |||||
>>> from mindspore.ops import TensorSummary | |||||
>>> from mindarmour.fuzz_testing import KMultisectionNeuronCoverage | |||||
>>> class Net(nn.Cell): | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self.conv1 = nn.Conv2d(1, 6, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.conv2 = nn.Conv2d(6, 16, 5, padding=0, weight_init=TruncatedNormal(0.02), pad_mode="valid") | |||||
... self.fc1 = nn.Dense(16 * 5 * 5, 120, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc2 = nn.Dense(120, 84, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.fc3 = nn.Dense(84, 10, TruncatedNormal(0.02), TruncatedNormal(0.02)) | |||||
... self.relu = nn.ReLU() | |||||
... self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) | |||||
... self.reshape = P.Reshape() | |||||
... self.summary = TensorSummary() | |||||
... def construct(self, x): | |||||
... x = self.conv1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv1', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.conv2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('conv2', x) | |||||
... x = self.max_pool2d(x) | |||||
... x = self.reshape(x, (-1, 16 * 5 * 5)) | |||||
... x = self.fc1(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc1', x) | |||||
... x = self.fc2(x) | |||||
... x = self.relu(x) | |||||
... self.summary('fc2', x) | |||||
... x = self.fc3(x) | |||||
... self.summary('fc3', x) | |||||
... return x | |||||
>>> net = Net() | |||||
>>> model = Model(net) | |||||
>>> batch_size = 8 | |||||
>>> num_classe = 10 | |||||
>>> train_images = np.random.rand(32, 1, 32, 32).astype(np.float32) | |||||
>>> test_images = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) | |||||
>>> kmnc = KMultisectionNeuronCoverage(model, train_images, segmented_num=100) | |||||
>>> metrics = kmnc.get_metrics(test_images) | |||||
""" | """ | ||||
dataset = check_numpy_param('dataset', dataset) | dataset = check_numpy_param('dataset', dataset) | ||||
@@ -196,10 +196,6 @@ class RDPMonitor(Callback): | |||||
Returns: | Returns: | ||||
int, the recommended maximum training epochs. | int, the recommended maximum training epochs. | ||||
Examples: | |||||
>>> rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=100, batch_size=32) | |||||
>>> suggest_epoch = rdp.max_epoch_suggest() | |||||
""" | """ | ||||
if self._target_delta is not None and self._max_eps is None: | if self._target_delta is not None and self._max_eps is None: | ||||
msg = 'max_eps should be consistent with target_delta, but got None.' | msg = 'max_eps should be consistent with target_delta, but got None.' | ||||
@@ -450,11 +446,6 @@ class ZCDPMonitor(Callback): | |||||
Returns: | Returns: | ||||
int, the recommended maximum training epochs. | int, the recommended maximum training epochs. | ||||
Examples: | |||||
>>> zcdp = PrivacyMonitorFactory.create(policy='zcdp', | |||||
>>> num_samples=60000, batch_size=32) | |||||
>>> suggest_epoch = zcdp.max_epoch_suggest() | |||||
""" | """ | ||||
epoch = 1 | epoch = 1 | ||||
while epoch < 10000: | while epoch < 10000: | ||||
@@ -120,7 +120,7 @@ def _attack_rf(features, labels, random_grid, n_jobs): | |||||
return rf_model | return rf_model | ||||
def get_attack_model(features, labels, config, n_jobs=-1): | |||||
def _get_attack_model(features, labels, config, n_jobs=-1): | |||||
""" | """ | ||||
Get trained attack model specify by config. | Get trained attack model specify by config. | ||||
@@ -139,10 +139,11 @@ def get_attack_model(features, labels, config, n_jobs=-1): | |||||
sklearn.BaseEstimator, trained model specify by config["method"]. | sklearn.BaseEstimator, trained model specify by config["method"]. | ||||
Examples: | Examples: | ||||
>>> from mindarmour.privacy.evaluation.attacker import get_attack_model | |||||
>>> features = np.random.randn(10, 10) | >>> features = np.random.randn(10, 10) | ||||
>>> labels = np.random.randint(0, 2, 10) | >>> labels = np.random.randint(0, 2, 10) | ||||
>>> config = {"method": "knn", "params": {"n_neighbors": [3, 5, 7]}} | |||||
>>> attack_model = get_attack_model(features, labels, config) | |||||
>>> config = {"method": "knn", "params": {"n_neighbors": [3, 5]}} | |||||
>>> attack_model = _get_attack_model(features, labels, config) | |||||
""" | """ | ||||
features, labels = check_pair_numpy_param("features", features, "labels", labels) | features, labels = check_pair_numpy_param("features", features, "labels", labels) | ||||
config = check_param_type("config", config, dict) | config = check_param_type("config", config, dict) | ||||
@@ -135,7 +135,6 @@ class ImageInversionAttack: | |||||
... loss_weights=[1, 0.2, 5]) | ... loss_weights=[1, 0.2, 5]) | ||||
>>> inversion_images = inversion_attack.generate(target_features, iters=10) | >>> inversion_images = inversion_attack.generate(target_features, iters=10) | ||||
>>> evaluate_result = inversion_attack.evaluate(original_images, inversion_images) | >>> evaluate_result = inversion_attack.evaluate(original_images, inversion_images) | ||||
>>> print(evaluate_result) | |||||
""" | """ | ||||
def __init__(self, network, input_shape, input_bound, loss_weights=(1, 0.2, 5)): | def __init__(self, network, input_shape, input_bound, loss_weights=(1, 0.2, 5)): | ||||
self._network = check_param_type('network', network, Cell) | self._network = check_param_type('network', network, Cell) | ||||
@@ -25,7 +25,7 @@ from mindspore import Tensor | |||||
from mindarmour.utils.logger import LogUtil | from mindarmour.utils.logger import LogUtil | ||||
from mindarmour.utils._check_param import check_param_type, check_param_multi_types, \ | from mindarmour.utils._check_param import check_param_type, check_param_multi_types, \ | ||||
check_model, check_numpy_param | check_model, check_numpy_param | ||||
from .attacker import get_attack_model | |||||
from .attacker import _get_attack_model | |||||
from ._check_config import verify_config_params | from ._check_config import verify_config_params | ||||
LOGGER = LogUtil.get_instance() | LOGGER = LogUtil.get_instance() | ||||
@@ -123,8 +123,7 @@ class MembershipInference: | |||||
... data = np.random.randn(batches * batch_size,1,10).astype(np.float32) | ... data = np.random.randn(batches * batch_size,1,10).astype(np.float32) | ||||
... label = np.random.randint(0,10, batches * batch_size).astype(np.int32) | ... label = np.random.randint(0,10, batches * batch_size).astype(np.int32) | ||||
... for i in range(batches): | ... for i in range(batches): | ||||
... yield data[i*batch_size:(i+1)*batch_size],\ | |||||
... label[i*batch_size:(i+1)*batch_size] | |||||
... yield data[i*batch_size:(i+1)*batch_size], label[i*batch_size:(i+1)*batch_size] | |||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
... def __init__(self): | ... def __init__(self): | ||||
... super(Net, self).__init__() | ... super(Net, self).__init__() | ||||
@@ -143,7 +142,7 @@ class MembershipInference: | |||||
>>> config = [{ | >>> config = [{ | ||||
... "method": "KNN", | ... "method": "KNN", | ||||
... "params": {"n_neighbors": [3, 5, 7],} | ... "params": {"n_neighbors": [3, 5, 7],} | ||||
}] | |||||
... }] | |||||
>>> ds_train = ds.GeneratorDataset(dataset_generator, ["image", "label"]) | >>> ds_train = ds.GeneratorDataset(dataset_generator, ["image", "label"]) | ||||
>>> ds_test = ds.GeneratorDataset(dataset_generator, ["image", "label"]) | >>> ds_test = ds.GeneratorDataset(dataset_generator, ["image", "label"]) | ||||
>>> inference_model.train(ds_train, ds_test, config) | >>> inference_model.train(ds_train, ds_test, config) | ||||
@@ -195,7 +194,7 @@ class MembershipInference: | |||||
features, labels = self._transform(dataset_train, dataset_test) | features, labels = self._transform(dataset_train, dataset_test) | ||||
for config in attack_config: | for config in attack_config: | ||||
self._attack_list.append(get_attack_model(features, labels, config, n_jobs=self._n_jobs)) | |||||
self._attack_list.append(_get_attack_model(features, labels, config, n_jobs=self._n_jobs)) | |||||
def eval(self, dataset_train, dataset_test, metrics): | def eval(self, dataset_train, dataset_test, metrics): | ||||
@@ -158,13 +158,6 @@ class ConceptDriftCheckTimeSeries: | |||||
- numpy.ndarray, the concept drift score of the example series. | - numpy.ndarray, the concept drift score of the example series. | ||||
- float, the threshold to judge concept drift. | - float, the threshold to judge concept drift. | ||||
- list, the location of the concept drift. | - list, the location of the concept drift. | ||||
Examples: | |||||
>>> concept = ConceptDriftCheckTimeSeries(window_size=100, rolling_window=10, | |||||
... step=10, threshold_index=1.5, need_label=False) | |||||
>>> data_example = 5*np.random.rand(1000) | |||||
>>> data_example[200: 800] = 20*np.random.rand(600) | |||||
>>> score, drift_threshold, drift_location = concept.concept_check(data_example) | |||||
""" | """ | ||||
# data check | # data check | ||||
data = _check_array_not_empty('data', data) | data = _check_array_not_empty('data', data) | ||||
@@ -59,8 +59,7 @@ class FaultInjector: | |||||
... data = np.random.randn(batches * batch_size,1,10).astype(np.float32) | ... data = np.random.randn(batches * batch_size,1,10).astype(np.float32) | ||||
... label = np.random.randint(0,10, batches * batch_size).astype(np.int32) | ... label = np.random.randint(0,10, batches * batch_size).astype(np.int32) | ||||
... for i in range(batches): | ... for i in range(batches): | ||||
... yield data[i*batch_size:(i+1)*batch_size],\ | |||||
... label[i*batch_size:(i+1)*batch_size] | |||||
... yield data[i*batch_size:(i+1)*batch_size], label[i*batch_size:(i+1)*batch_size] | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = Model(net) | >>> model = Model(net) | ||||
>>> ds_eval = ds.GeneratorDataset(dataset_generator, ['image', 'label']) | >>> ds_eval = ds.GeneratorDataset(dataset_generator, ['image', 'label']) | ||||
@@ -102,12 +102,24 @@ class WithLossCell(Cell): | |||||
loss_fn (Function): The loss function is used for computing loss. | loss_fn (Function): The loss function is used for computing loss. | ||||
Examples: | Examples: | ||||
>>> data = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)*0.01) | |||||
>>> label = Tensor(np.ones([1, 10]).astype(np.float32)) | |||||
>>> net = NET() | |||||
>>> from mindspore import Tensor | |||||
>>> from mindarmour.utils.util import WithLossCell | |||||
>>> import mindspore.ops.operations as P | |||||
>>> class Net(nn.Cell): | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._softmax = P.Softmax() | |||||
... self._Dense = nn.Dense(10,10) | |||||
... self._squeeze = P.Squeeze(1) | |||||
... def construct(self, inputs): | |||||
... out = self._softmax(inputs) | |||||
... out = self._Dense(out) | |||||
... return self._squeeze(out) | |||||
>>> data = Tensor(np.ones([2, 1, 10]).astype(np.float32)*0.01) | |||||
>>> labels = Tensor(np.ones([2, 10]).astype(np.float32)) | |||||
>>> net = Net() | |||||
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits() | >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits() | ||||
>>> loss_net = WithLossCell(net, loss_fn) | >>> loss_net = WithLossCell(net, loss_fn) | ||||
>>> loss_out = loss_net(data, label) | |||||
""" | """ | ||||
def __init__(self, network, loss_fn): | def __init__(self, network, loss_fn): | ||||
super(WithLossCell, self).__init__() | super(WithLossCell, self).__init__() | ||||
@@ -138,9 +150,23 @@ class GradWrapWithLoss(Cell): | |||||
network (Cell): The target network to wrap. | network (Cell): The target network to wrap. | ||||
Examples: | Examples: | ||||
>>> data = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)*0.01) | |||||
>>> labels = Tensor(np.ones([1, 10]).astype(np.float32)) | |||||
>>> net = NET() | |||||
>>> from mindspore import Tensor | |||||
>>> from mindarmour.utils import GradWrapWithLoss | |||||
>>> from mindarmour.utils.util import WithLossCell | |||||
>>> import mindspore.ops.operations as P | |||||
>>> class Net(nn.Cell): | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._softmax = P.Softmax() | |||||
... self._Dense = nn.Dense(10,10) | |||||
... self._squeeze = P.Squeeze(1) | |||||
... def construct(self, inputs): | |||||
... out = self._softmax(inputs) | |||||
... out = self._Dense(out) | |||||
... return self._squeeze(out) | |||||
>>> data = Tensor(np.ones([2, 1, 10]).astype(np.float32)*0.01) | |||||
>>> labels = Tensor(np.ones([2, 10]).astype(np.float32)) | |||||
>>> net = Net() | |||||
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits() | >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits() | ||||
>>> loss_net = WithLossCell(net, loss_fn) | >>> loss_net = WithLossCell(net, loss_fn) | ||||
>>> grad_all = GradWrapWithLoss(loss_net) | >>> grad_all = GradWrapWithLoss(loss_net) | ||||
@@ -176,12 +202,25 @@ class GradWrap(Cell): | |||||
network (Cell): The target network to wrap. | network (Cell): The target network to wrap. | ||||
Examples: | Examples: | ||||
>>> data = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)*0.01) | |||||
>>> label = Tensor(np.ones([1, 10]).astype(np.float32)) | |||||
>>> from mindspore import Tensor | |||||
>>> from mindarmour.utils import GradWrap | |||||
>>> from mindarmour.utils.util import WithLossCell | |||||
>>> import mindspore.ops.operations as P | |||||
>>> class Net(nn.Cell): | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._softmax = P.Softmax() | |||||
... self._Dense = nn.Dense(10,10) | |||||
... self._squeeze = P.Squeeze(1) | |||||
... def construct(self, inputs): | |||||
... out = self._softmax(inputs) | |||||
... out = self._Dense(out) | |||||
... return self._squeeze(out) | |||||
>>> data = Tensor(np.ones([2, 1, 10]).astype(np.float32)*0.01) | |||||
>>> labels = Tensor(np.ones([2, 10]).astype(np.float32)) | |||||
>>> num_classes = 10 | >>> num_classes = 10 | ||||
>>> sens = np.zeros((data.shape[0], num_classes)).astype(np.float32) | >>> sens = np.zeros((data.shape[0], num_classes)).astype(np.float32) | ||||
>>> sens[:, 1] = 1.0 | >>> sens[:, 1] = 1.0 | ||||
>>> net = NET() | |||||
>>> wrap_net = GradWrap(net) | >>> wrap_net = GradWrap(net) | ||||
>>> wrap_net(data, Tensor(sens)) | >>> wrap_net(data, Tensor(sens)) | ||||
""" | """ | ||||
@@ -18,7 +18,7 @@ import pytest | |||||
import numpy as np | import numpy as np | ||||
from mindarmour.privacy.evaluation.attacker import get_attack_model | |||||
from mindarmour.privacy.evaluation.attacker import _get_attack_model | |||||
@pytest.mark.level0 | @pytest.mark.level0 | ||||
@@ -35,7 +35,7 @@ def test_get_knn_model(): | |||||
"n_neighbors": [3, 5, 7], | "n_neighbors": [3, 5, 7], | ||||
} | } | ||||
} | } | ||||
knn_attacker = get_attack_model(features, labels, config_knn, -1) | |||||
knn_attacker = _get_attack_model(features, labels, config_knn, -1) | |||||
pred = knn_attacker.predict(features) | pred = knn_attacker.predict(features) | ||||
assert pred is not None | assert pred is not None | ||||
@@ -54,7 +54,7 @@ def test_get_lr_model(): | |||||
"C": np.logspace(-4, 2, 10), | "C": np.logspace(-4, 2, 10), | ||||
} | } | ||||
} | } | ||||
lr_attacker = get_attack_model(features, labels, config_lr, -1) | |||||
lr_attacker = _get_attack_model(features, labels, config_lr, -1) | |||||
pred = lr_attacker.predict(features) | pred = lr_attacker.predict(features) | ||||
assert pred is not None | assert pred is not None | ||||
@@ -75,7 +75,7 @@ def test_get_mlp_model(): | |||||
"alpha": [0.0001, 0.001, 0.01], | "alpha": [0.0001, 0.001, 0.01], | ||||
} | } | ||||
} | } | ||||
mlpc_attacker = get_attack_model(features, labels, config_mlpc, -1) | |||||
mlpc_attacker = _get_attack_model(features, labels, config_mlpc, -1) | |||||
pred = mlpc_attacker.predict(features) | pred = mlpc_attacker.predict(features) | ||||
assert pred is not None | assert pred is not None | ||||
@@ -98,6 +98,6 @@ def test_get_rf_model(): | |||||
"min_samples_leaf": [1, 2, 4], | "min_samples_leaf": [1, 2, 4], | ||||
} | } | ||||
} | } | ||||
rf_attacker = get_attack_model(features, labels, config_rf, -1) | |||||
rf_attacker = _get_attack_model(features, labels, config_rf, -1) | |||||
pred = rf_attacker.predict(features) | pred = rf_attacker.predict(features) | ||||
assert pred is not None | assert pred is not None |