首页下载资源人工智能Informer模型实战案例(代码+数据集+参数讲解)

ZIPInformer模型实战案例(代码+数据集+参数讲解)

java1314777115.86MB需要积分:1

资源文件列表:

Informer2020-main.zip 大约有83个文件
  1. .idea/
  2. .idea/.gitignore 50B
  3. .idea/aws.xml 304B
  4. .idea/Informer2020-main.iml 482B
  5. .idea/inspectionProfiles/
  6. .idea/inspectionProfiles/profiles_settings.xml 174B
  7. .idea/inspectionProfiles/Project_Default.xml 1.27KB
  8. .idea/misc.xml 185B
  9. .idea/modules.xml 293B
  10. .idea/workspace.xml 6.02KB
  11. checkpoints/
  12. checkpoints/informer_custom_ftMS_sl126_ll64_pl24_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/
  13. checkpoints/informer_custom_ftMS_sl126_ll64_pl24_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/checkpoint.pth 62.77MB
  14. checkpoints/informer_custom_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/
  15. checkpoints/informer_custom_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/checkpoint.pth 62.77MB
  16. data/
  17. data/__init__.py 1B
  18. data/__pycache__/
  19. data/__pycache__/__init__.cpython-39.pyc 153B
  20. data/__pycache__/data_loader.cpython-39.pyc 8.79KB
  21. data/data_loader.py 13.42KB
  22. environment.yml 198B
  23. ETTh1-Test.csv 38.37KB
  24. ETTh1.csv 2.47MB
  25. exp/
  26. exp/__init__.py
  27. exp/__pycache__/
  28. exp/__pycache__/__init__.cpython-39.pyc 152B
  29. exp/__pycache__/exp_basic.cpython-39.pyc 1.54KB
  30. exp/__pycache__/exp_informer.cpython-39.pyc 7.75KB
  31. exp/exp_basic.py 875B
  32. exp/exp_informer.py 10.83KB
  33. forecsat.csv 265B
  34. main_informer.py 7.26KB
  35. models/
  36. models/__init__.py
  37. models/__pycache__/
  38. models/__pycache__/__init__.cpython-39.pyc 155B
  39. models/__pycache__/attn.cpython-39.pyc 5.02KB
  40. models/__pycache__/decoder.cpython-39.pyc 1.95KB
  41. models/__pycache__/embed.cpython-39.pyc 5.04KB
  42. models/__pycache__/encoder.cpython-39.pyc 3.49KB
  43. models/__pycache__/model.cpython-39.pyc 4.78KB
  44. models/attn.py 6.03KB
  45. models/decoder.py 1.73KB
  46. models/embed.py 4.04KB
  47. models/encoder.py 3.47KB
  48. models/model.py 7.05KB
  49. results/
  50. results/informer_custom_ftMS_sl126_ll64_pl24_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/
  51. results/informer_custom_ftMS_sl126_ll64_pl24_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/metrics.npy 148B
  52. results/informer_custom_ftMS_sl126_ll64_pl24_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/pred.npy 12.13KB
  53. results/informer_custom_ftMS_sl126_ll64_pl24_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/true.npy 12.13KB
  54. results/informer_custom_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/
  55. results/informer_custom_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/metrics.npy 148B
  56. results/informer_custom_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/pred.npy 2.13KB
  57. results/informer_custom_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/true.npy 2.13KB
  58. results/informer_Sum_ftM_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/
  59. results/informer_Sum_ftM_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/metrics.npy 148B
  60. results/informer_Sum_ftM_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/pred.npy 27.13KB
  61. results/informer_Sum_ftM_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/real_prediction.npy 176B
  62. results/informer_Sum_ftM_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/true.npy 27.13KB
  63. results/informer_Sum_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/
  64. results/informer_Sum_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/metrics.npy 148B
  65. results/informer_Sum_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/pred.npy 9.13KB
  66. results/informer_Sum_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/real_prediction.npy 144B
  67. results/informer_Sum_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_0/true.npy 9.13KB
  68. results/informer_Sum_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_1/
  69. results/informer_Sum_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_1/metrics.npy 148B
  70. results/informer_Sum_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_1/pred.npy 2.13KB
  71. results/informer_Sum_ftMS_sl126_ll64_pl4_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_test_1/true.npy 2.13KB
  72. utils/
  73. utils/__init__.py
  74. utils/__pycache__/
  75. utils/__pycache__/__init__.cpython-39.pyc 154B
  76. utils/__pycache__/masking.cpython-39.pyc 1.43KB
  77. utils/__pycache__/metrics.cpython-39.pyc 1.42KB
  78. utils/__pycache__/timefeatures.cpython-39.pyc 7.16KB
  79. utils/__pycache__/tools.cpython-39.pyc 3.21KB
  80. utils/masking.py 851B
  81. utils/metrics.py 826B
  82. utils/timefeatures.py 5.43KB
  83. utils/tools.py 2.76KB

资源介绍:

本篇博客带大家看的是Informer模型进行时间序列预测的实战案例,它是在2019年被提出并在ICLR 2020上被评为Best Paper,可以说Informer模型在当今的时间序列预测方面还是十分可靠的,Informer模型的实质是注意力机制+Transformer模型,Informer模型的核心思想是将输入序列进行自注意力机制的处理,以捕捉序列中的长期依赖关系,并利用Transformer的编码器-解码器结构进行预测,通过阅读本文你可以学会利用个人数据集训练模型。Informer是一种用于长序列时间序列预测的Transformer模型,但是它与传统的Transformer模型又有些不同点,与传统的Transformer模型相比,Informer具有以下几个独特的特点: 1. ProbSparse自注意力机制:Informer引入了ProbSparse自注意力机制,该机制在时间复杂度和内存使用方面达到了O(Llog L)的水平,能够有效地捕捉序列之间的长期依赖关系。 2. 自注意力蒸馏:通过减少级联层的输入,自注意力蒸馏技术可以有效处理极长的输入序列,提高了模型处理长序列的能力
import os import numpy as np import pandas as pd import torch from torch.utils.data import Dataset, DataLoader # from sklearn.preprocessing import StandardScaler from utils.tools import StandardScaler from utils.timefeatures import time_features import warnings warnings.filterwarnings('ignore') class Dataset_ETT_hour(Dataset): def __init__(self, root_path, flag='train', size=None, features='S', data_path='ETTh1.csv', target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None): # size [seq_len, label_len, pred_len] # info if size == None: self.seq_len = 24*4*4 self.label_len = 24*4 self.pred_len = 24*4 else: self.seq_len = size[0] self.label_len = size[1] self.pred_len = size[2] # init assert flag in ['train', 'test', 'val'] type_map = {'train':0, 'val':1, 'test':2} self.set_type = type_map[flag] self.features = features self.target = target self.scale = scale self.inverse = inverse self.timeenc = timeenc self.freq = freq self.root_path = root_path self.data_path = data_path self.__read_data__() def __read_data__(self): self.scaler = StandardScaler() df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path)) border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len] border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24] border1 = border1s[self.set_type] border2 = border2s[self.set_type] if self.features=='M' or self.features=='MS': cols_data = df_raw.columns[1:] df_data = df_raw[cols_data] elif self.features=='S': df_data = df_raw[[self.target]] if self.scale: train_data = df_data[border1s[0]:border2s[0]] self.scaler.fit(train_data.values) data = self.scaler.transform(df_data.values) else: data = df_data.values df_stamp = df_raw[['date']][border1:border2] df_stamp['date'] = pd.to_datetime(df_stamp.date) data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq) self.data_x = data[border1:border2] if self.inverse: self.data_y = df_data.values[border1:border2] else: self.data_y = data[border1:border2] self.data_stamp = data_stamp def __getitem__(self, index): s_begin = index s_end = s_begin + self.seq_len r_begin = s_end - self.label_len r_end = r_begin + self.label_len + self.pred_len seq_x = self.data_x[s_begin:s_end] if self.inverse: seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0) else: seq_y = self.data_y[r_begin:r_end] seq_x_mark = self.data_stamp[s_begin:s_end] seq_y_mark = self.data_stamp[r_begin:r_end] return seq_x, seq_y, seq_x_mark, seq_y_mark def __len__(self): return len(self.data_x) - self.seq_len- self.pred_len + 1 def inverse_transform(self, data): return self.scaler.inverse_transform(data) class Dataset_ETT_minute(Dataset): def __init__(self, root_path, flag='train', size=None, features='S', data_path='sum.csv', target='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None): # size [seq_len, label_len, pred_len] # info if size == None: self.seq_len = 24*4*4 self.label_len = 24*4 self.pred_len = 24*4 else: self.seq_len = size[0] self.label_len = size[1] self.pred_len = size[2] # init assert flag in ['train', 'test', 'val'] type_map = {'train':0, 'val':1, 'test':2} self.set_type = type_map[flag] self.features = features self.target = target self.scale = scale self.inverse = inverse self.timeenc = timeenc self.freq = freq self.root_path = root_path self.data_path = data_path self.__read_data__() def __read_data__(self): self.scaler = StandardScaler() df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path)) border1s = [0, 12*30*24*4 - self.seq_len, 12*30*24*4+4*30*24*4 - self.seq_len] border2s = [12*30*24*4, 12*30*24*4+4*30*24*4, 12*30*24*4+8*30*24*4] border1 = border1s[self.set_type] border2 = border2s[self.set_type] if self.features=='M' or self.features=='MS': cols_data = df_raw.columns[1:] df_data = df_raw[cols_data] elif self.features=='S': df_data = df_raw[[self.target]] if self.scale: train_data = df_data[border1s[0]:border2s[0]] self.scaler.fit(train_data.values) data = self.scaler.transform(df_data.values) else: data = df_data.values df_stamp = df_raw[['date']][border1:border2] df_stamp['date'] = pd.to_datetime(df_stamp.date) data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq) self.data_x = data[border1:border2] if self.inverse: self.data_y = df_data.values[border1:border2] else: self.data_y = data[border1:border2] self.data_stamp = data_stamp def __getitem__(self, index): s_begin = index s_end = s_begin + self.seq_len r_begin = s_end - self.label_len r_end = r_begin + self.label_len + self.pred_len seq_x = self.data_x[s_begin:s_end] if self.inverse: seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0) else: seq_y = self.data_y[r_begin:r_end] seq_x_mark = self.data_stamp[s_begin:s_end] seq_y_mark = self.data_stamp[r_begin:r_end] return seq_x, seq_y, seq_x_mark, seq_y_mark def __len__(self): return len(self.data_x) - self.seq_len - self.pred_len + 1 def inverse_transform(self, data): return self.scaler.inverse_transform(data) class Dataset_Custom(Dataset): def __init__(self, root_path, flag='train', size=None, features='S', pre_data = None, data_path='ETTh1.csv', target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None): # size [seq_len, label_len, pred_len] # info if size == None: self.seq_len = 24*4*4 self.label_len = 24*4 self.pred_len = 24*4 else: self.seq_len = size[0] self.label_len = size[1] self.pred_len = size[2] # init assert flag in ['train', 'test', 'val'] type_map = {'train':0, 'val':1, 'test':2} self.set_type = type_map[flag] self.features = features self.target = target self.scale = scale self.inverse = inverse self.timeenc = timeenc self.freq = freq self.cols = cols self.root_path = root_path self.data_path = data_path self.__read_data__() def __read_data__(self): self.scaler = StandardScaler() df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path)) ''' df_raw.columns: ['date', ...(other features), target feature] ''' # cols = list(df_raw.columns); if self.cols: cols=self.cols.copy() cols.remove(self.target) else: cols = list
100+评论
captcha