| @@ -0,0 +1,252 @@ | |||
| import argparse | |||
| import os | |||
| import torch | |||
| from exp.exp_informer import Exp_Informer | |||
| parser = argparse.ArgumentParser(description='[Informer] Long Sequences Forecasting') | |||
| parser.add_argument('--model', type=str, required=False, default='informer',help='model of experiment, options: [informer, informerstack, informerlight(TBD)]') | |||
| parser.add_argument('--data', type=str, required=False, default='SH000001.csv', help='data') | |||
| # parser.add_argument('--data', type=str, required=True, default='ETTh1', help='data') | |||
| parser.add_argument('--root_path', type=str, default='./data/stock/', help='root path of the data file') | |||
| parser.add_argument('--data_path', type=str, default='SH000001.csv', help='data file') | |||
| # parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file') | |||
| # parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file') | |||
| parser.add_argument('--features', type=str, default='M', help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate') | |||
| parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task') | |||
| parser.add_argument('--freq', type=str, default='h', help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h') | |||
| parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints') | |||
| parser.add_argument('--seq_len', type=int, default=96, help='input sequence length of Informer encoder') | |||
| parser.add_argument('--label_len', type=int, default=48, help='start token length of Informer decoder') | |||
| parser.add_argument('--pred_len', type=int, default=24, help='prediction sequence length') | |||
| # Informer decoder input: concat[start token series(label_len), zero padding series(pred_len)] | |||
| parser.add_argument('--enc_in', type=int, default=7, help='encoder input size') | |||
| parser.add_argument('--dec_in', type=int, default=7, help='decoder input size') | |||
| parser.add_argument('--c_out', type=int, default=7, help='output size') | |||
| parser.add_argument('--d_model', type=int, default=512, help='dimension of model') | |||
| parser.add_argument('--n_heads', type=int, default=8, help='num of heads') | |||
| parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers') | |||
| parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers') | |||
| parser.add_argument('--s_layers', type=str, default='3,2,1', help='num of stack encoder layers') | |||
| parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn') | |||
| parser.add_argument('--factor', type=int, default=5, help='probsparse attn factor') | |||
| parser.add_argument('--padding', type=int, default=0, help='padding type') | |||
| parser.add_argument('--distil', action='store_false', help='whether to use distilling in encoder, using this argument means not using distilling', default=True) | |||
| parser.add_argument('--dropout', type=float, default=0.05, help='dropout') | |||
| parser.add_argument('--attn', type=str, default='prob', help='attention used in encoder, options:[prob, full]') | |||
| parser.add_argument('--embed', type=str, default='timeF', help='time features encoding, options:[timeF, fixed, learned]') | |||
| parser.add_argument('--activation', type=str, default='gelu',help='activation') | |||
| parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder') | |||
| parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data') | |||
| parser.add_argument('--mix', action='store_false', help='use mix attention in generative decoder', default=True) | |||
| parser.add_argument('--cols', type=str, nargs='+', help='certain cols from the data files as the input features') | |||
| parser.add_argument('--num_workers', type=int, default=0, help='data loader num workers') | |||
| parser.add_argument('--itr', type=int, default=2, help='experiments times') | |||
| parser.add_argument('--train_epochs', type=int, default=6, help='train epochs') | |||
| parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data') | |||
| parser.add_argument('--patience', type=int, default=3, help='early stopping patience') | |||
| parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate') | |||
| parser.add_argument('--des', type=str, default='test',help='exp description') | |||
| parser.add_argument('--loss', type=str, default='mse',help='loss function') | |||
| parser.add_argument('--lradj', type=str, default='type1',help='adjust learning rate') | |||
| parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False) | |||
| parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False) | |||
| parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu') | |||
| parser.add_argument('--gpu', type=int, default=0, help='gpu') | |||
| parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False) | |||
| parser.add_argument('--devices', type=str, default='0,1,2,3',help='device ids of multile gpus') | |||
| args = parser.parse_args() | |||
| args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False | |||
| def denormalize(normalized_num, min_val, max_val): | |||
| """ | |||
| 还原函数,将归一化后的数值还原为原始数值 | |||
| """ | |||
| return normalized_num * (max_val - min_val) + min_val | |||
| if args.use_gpu and args.use_multi_gpu: | |||
| args.devices = args.devices.replace(' ','') | |||
| device_ids = args.devices.split(',') | |||
| args.device_ids = [int(id_) for id_ in device_ids] | |||
| args.gpu = args.device_ids[0] | |||
| args.model = 'informer' # model of experiment, options: [informer, informerstack, informerlight(TBD)] | |||
| args.data = 'custom' # data | |||
| args.root_path = './data/stock/' # root path of data file | |||
| args.data_path = 'SH600000.csv' # data file | |||
| # args.data_path = 'SH000001.csv' # data file | |||
| args.features = 'MS' # forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate | |||
| args.target = 'Close' # target feature in S or MS task | |||
| args.freq = 'd' # freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h | |||
| args.checkpoints = './checkpoints' # location of model checkpoints | |||
| args.seq_len = 20 # input sequence length of Informer encoder | |||
| args.label_len = 10 # start token length of Informer decoder | |||
| args.pred_len = 5 # prediction sequence length | |||
| # Informer decoder input: concat[start token series(label_len), zero padding series(pred_len)] | |||
| args.enc_in = 5 # encoder input size | |||
| args.dec_in = 5 # decoder input size | |||
| args.c_out = 1 # output size | |||
| args.factor = 5 # probsparse attn factor | |||
| args.padding = 0 # padding type | |||
| args.d_model = 256 # dimension of model | |||
| args.n_heads = 4 # num of heads | |||
| args.e_layers = 2 # num of encoder layers | |||
| args.d_layers = 1 # num of decoder layers | |||
| args.d_ff = 256 # dimension of fcn in model | |||
| args.dropout = 0.05 # dropout | |||
| args.attn = 'prob' # attention used in encoder, options:[prob, full] | |||
| args.embed = 'timeF' # time features encoding, options:[timeF, fixed, learned] | |||
| args.activation = 'gelu' # activation | |||
| args.distil = True # whether to use distilling in encoder | |||
| args.output_attention = False # whether to output attention in ecoder | |||
| args.batch_size = 32 | |||
| args.learning_rate = 0.0001 | |||
| args.loss = 'mse' | |||
| args.lradj = 'type1' | |||
| args.use_amp = False # whether to use automatic mixed precision training | |||
| args.num_workers = 0 | |||
| args.train_epochs = 20 | |||
| args.patience = 3 | |||
| args.des = 'exp' | |||
| # args.use_gpu = True if torch.cuda.is_available() else False | |||
| args.use_gpu = False | |||
| args.gpu = 0 | |||
| args.use_multi_gpu = False | |||
| args.devices = '0,1,2,3' | |||
| args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False | |||
| if args.use_gpu and args.use_multi_gpu: | |||
| args.devices = args.devices.replace(' ','') | |||
| device_ids = args.devices.split(',') | |||
| args.device_ids = [int(id_) for id_ in device_ids] | |||
| args.gpu = args.device_ids[0] | |||
| args.detail_freq = args.freq | |||
| args.freq = args.freq[-1:] | |||
| #%% | |||
| print('Args in experiment:') | |||
| print(args) | |||
| Exp = Exp_Informer | |||
| # | |||
| # for ii in range(args.itr): | |||
| # # setting record of experiments | |||
| # setting = ('{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_at{}_fc{}_eb{}_dt{}_mx{}_{}_{}'. | |||
| # format(args.model, args.data, args.features, | |||
| # args.seq_len, args.label_len, args.pred_len, | |||
| # args.d_model, args.n_heads, args.e_layers, args.d_layers, args.d_ff, args.attn, args.factor, | |||
| # args.embed, args.distil, args.mix, args.des, ii)) | |||
| # | |||
| # exp = Exp(args) # set experiments | |||
| # print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting)) | |||
| # exp.train(setting) | |||
| # | |||
| # print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) | |||
| # exp.test(setting) | |||
| # | |||
| # if args.do_predict: | |||
| # print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) | |||
| # exp.predict(setting, True) | |||
| # | |||
| # torch.cuda.empty_cache() | |||
| # | |||
| # | |||
| # # the prediction will be saved in ./results1/{setting}/real_prediction.npy | |||
| import pandas as pd | |||
| # 读取CSV文件 | |||
| csv_file = './data/stock/SH000001.csv' | |||
| # csv_file = './data/stock/SH600000.csv' | |||
| data = pd.read_csv(csv_file) | |||
| # 找到最大值和最小值 | |||
| max_value = data['High'].max() | |||
| min_value = data['Low'].min() | |||
| last_dat = data['date'].iloc[-1] | |||
| from datetime import datetime, timedelta | |||
| date_obj = datetime.strptime(last_dat, '%Y/%m/%d') | |||
| # 创建一个空列表来存储后五天的日期 | |||
| next_dates = [] | |||
| # 计算后五天的日期并将其添加到列表中 | |||
| for i in range(5): | |||
| next_date = date_obj + timedelta(days=i + 1) | |||
| next_dates.append(next_date.strftime('%Y/%m/%d')) | |||
| # print("最大值:", max_value) | |||
| # print("最小值:", min_value) | |||
| def denormalize(normalized_num, min_val, max_val): | |||
| """ | |||
| 还原函数,将归一化后的数值还原为原始数值 | |||
| """ | |||
| return normalized_num * (max_val - min_val) + min_val | |||
| import numpy as np | |||
| import matplotlib.pyplot as plt | |||
| setting = 'informer_custom_ftMS_sl20_ll10_pl5_dm256_nh4_el2_dl1_df256_atprob_fc5_ebtimeF_dtTrue_exp' | |||
| # setting = 'informer_custom_ftMS_sl20_ll10_pl5_dm256_nh4_el2_dl1_df256_atprob_fc5_ebtimeF_dtTrue_mxTrue_exp_0' | |||
| prediction = np.load('./results1/' + setting + '/real_prediction.npy') | |||
| print(prediction.shape) | |||
| # plt.figure() | |||
| prediction[0, :, -1]= denormalize(prediction[0, :, -1], min_value, max_value) | |||
| x = [1, 2, 3, 4, 5] | |||
| plt.plot(next_dates,prediction[0, :, -1]) | |||
| plt.xlabel("day") | |||
| plt.ylabel("price") | |||
| plt.title("Price trend in the next 5 days") | |||
| # # 创建示例数据 | |||
| # # 设置x轴和y轴的刻度起始值为1 | |||
| # plt.xticks(range(1, max(x) + 1)) | |||
| plt.show() | |||
| # preds = np.load('./results1/'+setting+'/pred.npy') | |||
| # trues = np.load('./results1/'+setting+'/true.npy') | |||
| # [samples, pred_len, dimensions] | |||
| # plt.figure() | |||
| # plt.plot(trues[10,:,-1], label='GroundTruth') | |||
| # plt.plot(preds[10,:,-1], label='Prediction') | |||
| # plt.legend() | |||
| # plt.show() | |||
| # | |||
| # | |||
| # plt.figure() | |||
| # plt.plot(trues[20,:,-1], label='GroundTruth') | |||
| # plt.plot(preds[20,:,-1], label='Prediction') | |||
| # plt.legend() | |||
| # plt.show() | |||
| # | |||
| # | |||
| # plt.figure() | |||
| # plt.plot(trues[30,:,-1], label='GroundTruth') | |||
| # plt.plot(preds[30,:,-1], label='Prediction') | |||
| # plt.legend() | |||
| # plt.show() | |||
| # plt.figure() | |||
| # plt.plot(trues[:,0,-1].reshape(-1), label='GroundTruth') | |||
| # plt.plot(preds[:,0,-1].reshape(-1), label='Prediction') | |||
| # plt.legend() | |||
| # plt.show() | |||