You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

encoder.py 3.5 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. import mindspore.nn as nn
  2. import mindspore.ops.operations as ops
  3. import mindspore.common.initializer as init
  4. import mindspore.tensor as Tensor
  5. import mindspore.common.dtype as mstype
  6. import mindspore.common.dtype as mstype
  7. class ConvLayer(nn.Cell):
  8. def __init__(self, c_in):
  9. super(ConvLayer, self).__init__()
  10. padding = 1 if torch.__version__ >= '1.5.0' else 2
  11. self.downConv = nn.Conv1d(in_channels=c_in,
  12. out_channels=c_in,
  13. kernel_size=3,
  14. padding=padding,
  15. padding_mode='circular')
  16. self.norm = nn.BatchNorm1d(c_in)
  17. self.activation = nn.ELU()
  18. self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
  19. def construct(self, x):
  20. x = self.downConv(x.permute(0, 2, 1))
  21. x = self.norm(x)
  22. x = self.activation(x)
  23. x = self.maxPool(x)
  24. x = x.transpose(1, 2)
  25. return x
  26. class EncoderLayer(nn.Cell):
  27. def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation="relu"):
  28. super(EncoderLayer, self).__init__()
  29. d_ff = d_ff or 4 * d_model
  30. self.attention = attention
  31. self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
  32. self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
  33. self.norm1 = nn.LayerNorm(d_model)
  34. self.norm2 = nn.LayerNorm(d_model)
  35. self.dropout = nn.Dropout(dropout)
  36. self.activation = ops.ReLU() if activation == "relu" else ops.GELU()
  37. def construct(self, x, attn_mask=None):
  38. new_x, attn = self.attention(
  39. x, x, x,
  40. attn_mask=attn_mask
  41. )
  42. x = x + self.dropout(new_x)
  43. y = x = self.norm1(x)
  44. y = self.dropout(self.activation(self.conv1(y.transpose(2, 1))))
  45. y = self.dropout(self.conv2(y).transpose(2, 1))
  46. return self.norm2(x + y), attn
  47. class Encoder(nn.Cell):
  48. def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
  49. super(Encoder, self).__init__()
  50. self.attn_layers = nn.CellList(attn_layers)
  51. self.conv_layers = nn.CellList(conv_layers) if conv_layers is not None else None
  52. self.norm = norm_layer
  53. def construct(self, x, attn_mask=None):
  54. attns = []
  55. if self.conv_layers is not None:
  56. for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers):
  57. x, attn = attn_layer(x, attn_mask=attn_mask)
  58. x = conv_layer(x)
  59. attns.append(attn)
  60. x, attn = self.attn_layers[-1](x, attn_mask=attn_mask)
  61. attns.append(attn)
  62. else:
  63. for attn_layer in self.attn_layers:
  64. x, attn = attn_layer(x, attn_mask=attn_mask)
  65. attns.append(attn)
  66. if self.norm is not None:
  67. x = self.norm(x)
  68. return x, attns
  69. class EncoderStack(nn.Cell):
  70. def __init__(self, encoders, inp_lens):
  71. super(EncoderStack, self).__init__()
  72. self.encoders = nn.CellList(encoders)
  73. self.inp_lens = inp_lens
  74. def construct(self, x, attn_mask=None):
  75. x_stack = []
  76. attns = []
  77. for i_len, encoder in zip(self.inp_lens, self.encoders):
  78. inp_len = x.shape[1] // (2 ** i_len)
  79. x_s, attn = encoder(x[:, -inp_len:, :])
  80. x_stack.append(x_s)
  81. attns.append(attn)
  82. x_stack = ops.Concat(2)(x_stack)
  83. return x_stack, attns

基于MindSpore的多模态股票价格预测系统研究 Informer,LSTM,RNN