You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

MLP_JDLU.py 1.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051
  1. import math
  2. import torch.nn as nn
  3. from network_module.activation import jdlu, JDLU
  4. class MLPLayer(nn.Module):
  5. def __init__(self, dim_in, dim_out, res_coef=0.0, dropout_p=0.1):
  6. super().__init__()
  7. self.linear = nn.Linear(dim_in, dim_out)
  8. self.res_coef = res_coef
  9. self.activation = JDLU(dim_out)
  10. self.dropout = nn.Dropout(dropout_p)
  11. self.ln = nn.LayerNorm(dim_out)
  12. def forward(self, x):
  13. y = self.linear(x)
  14. y = self.activation(y)
  15. y = self.dropout(y)
  16. if self.res_coef == 0:
  17. return y
  18. else:
  19. return self.res_coef * x + y
  20. class MLP_JDLU(nn.Module):
  21. def __init__(self, dim_in, dim, res_coef=0.5, dropout_p=0.1, n_layers=10):
  22. super().__init__()
  23. self.mlp = nn.ModuleList()
  24. self.first_linear = MLPLayer(dim_in, dim)
  25. self.n_layers = n_layers
  26. for i in range(n_layers):
  27. self.mlp.append(MLPLayer(dim, dim, res_coef, dropout_p))
  28. self.final = nn.Linear(dim, 1)
  29. self.apply(self.weight_init)
  30. def forward(self, x):
  31. x = self.first_linear(x)
  32. for layer in self.mlp:
  33. x = layer(x)
  34. x = self.final(x)
  35. return x.squeeze()
  36. @staticmethod
  37. def weight_init(m):
  38. if isinstance(m, nn.Linear):
  39. nn.init.xavier_normal_(m.weight)
  40. fan_in, _ = nn.init._calculate_fan_in_and_fan_out(m.weight)
  41. bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
  42. nn.init.uniform_(m.bias, -bound, bound)

基于pytorch lightning的机器学习模板, 用于对机器学习算法进行训练, 验证, 测试等, 目前实现了神经网路, 深度学习, k折交叉, 自动保存训练信息等.

Contributors (1)