data_handler.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. #!/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. # @FileName :data_handler.py
  4. # @Time :2025/1/8 14:56
  5. # @Author :David
  6. # @Company: shenyang JY
  7. import argparse
  8. import pandas as pd
  9. from common.data_cleaning import *
  10. class DataHandler(object):
  11. def __init__(self, logger, args):
  12. self.logger = logger
  13. self.opt = argparse.Namespace(**args)
  14. def get_train_data(self, df):
  15. train_x, valid_x, train_y, valid_y = [], [], [], []
  16. if len(df) < self.opt.Model["time_step"]:
  17. self.logger.info("特征处理-训练数据-不满足time_step")
  18. datax, datay = self.get_timestep_features(df, is_train=True)
  19. if len(datax) < 10:
  20. self.logger.info("特征处理-训练数据-无法进行最小分割")
  21. tx, vx, ty, vy = self.train_valid_split(datax, datay, valid_rate=self.opt.Model["valid_data_rate"], shuffle=self.opt.Model['shuffle_train_data'])
  22. train_x.extend(tx)
  23. valid_x.extend(vx)
  24. train_y.extend(ty)
  25. valid_y.extend(vy)
  26. train_y = np.concatenate([[y.iloc[:, 1].values for y in train_y]], axis=0)
  27. valid_y = np.concatenate([[y.iloc[:, 1].values for y in valid_y]], axis=0)
  28. train_x = [np.array([x[0].values for x in train_x]), np.array([x[1].values for x in train_x])]
  29. valid_x = [np.array([x[0].values for x in valid_x]), np.array([x[1].values for x in valid_x])]
  30. return train_x, valid_x, train_y, valid_y
  31. def get_timestep_features(self, norm_data, is_train): # 这段代码基于pandas方法的优化
  32. time_step = self.opt.Model["time_step"]
  33. feature_data = norm_data.reset_index(drop=True)
  34. time_step_loc = time_step - 1
  35. train_num = int(len(feature_data))
  36. label_features = ['C_TIME', 'C_REAL_VALUE'] if is_train is True else ['C_TIME', 'C_REAL_VALUE']
  37. nwp_cs = self.opt.nwp_columns.copy()
  38. if 'C_TIME' in nwp_cs:
  39. nwp_cs.pop(nwp_cs.index('C_TIME'))
  40. nwp = [feature_data.loc[i:i + time_step_loc, nwp_cs].reset_index(drop=True) for i in range(train_num - time_step + 1)] # 数据库字段 'C_T': 'C_WS170'
  41. labels = [feature_data.loc[i:i + time_step_loc, label_features].reset_index(drop=True) for i in range(train_num - time_step + 1)]
  42. features_x, features_y = [], []
  43. self.logger.info("匹配环境前,{}组 -> ".format(len(nwp)))
  44. for i, row in enumerate(zip(nwp, labels)):
  45. features_x.append(row[0])
  46. features_y.append(row[1])
  47. self.logger.info("匹配环境后,{}组".format(len(features_x)))
  48. return features_x, features_y
  49. def fill_train_data(self, unite):
  50. unite['C_TIME'] = pd.to_datetime(unite['C_TIME'])
  51. unite['time_diff'] = unite['C_TIME'].diff()
  52. dt_short = pd.Timedelta(minutes=15)
  53. dt_long = pd.Timedelta(minutes=15 * self.opt.Model['how_long_fill'])
  54. data_train = self.missing_time_splite(unite, dt_short, dt_long)
  55. miss_points = unite[(unite['time_diff'] > dt_short) & (unite['time_diff'] < dt_long)]
  56. miss_number = miss_points['time_diff'].dt.total_seconds().sum(axis=0) / (15 * 60) - len(miss_points)
  57. self.logger.info("再次测算,需要插值的总点数为:{}".format(miss_number))
  58. if miss_number > 0 and self.opt.Model["train_data_fill"]:
  59. data_train = self.data_fill(data_train)
  60. return data_train
  61. def missing_time_splite(self, df, dt_short, dt_long):
  62. n_long, n_short, n_points = 0, 0, 0
  63. start_index = 0
  64. dfs = []
  65. for i in range(1, len(df)):
  66. if df['time_diff'][i] >= dt_long:
  67. df_long = df.iloc[start_index:i, :-1]
  68. dfs.append(df_long)
  69. start_index = i
  70. n_long += 1
  71. if df['time_diff'][i] > dt_short:
  72. self.logger.info(f"{df['C_TIME'][i-1]} ~ {df['C_TIME'][i]}")
  73. points = df['time_diff'].dt.total_seconds()[i]/(60*15)-1
  74. self.logger.info("缺失点数:{}".format(points))
  75. if df['time_diff'][i] < dt_long:
  76. n_short += 1
  77. n_points += points
  78. self.logger.info("需要补值的点数:{}".format(points))
  79. dfs.append(df.iloc[start_index:, :-1])
  80. self.logger.info(f"数据总数:{len(df)}, 时序缺失的间隔:{n_short}, 其中,较长的时间间隔:{n_long}")
  81. self.logger.info("需要补值的总点数:{}".format(n_points))
  82. return dfs
  83. def data_fill(self, dfs, test=False):
  84. dfs_fill, inserts = [], 0
  85. for i, df in enumerate(dfs):
  86. df = rm_duplicated(df, self.logger)
  87. df1 = df.set_index('C_TIME', inplace=False)
  88. dff = df1.resample('15T').interpolate(method='linear') # 采用线性补值,其他补值方法需要进一步对比
  89. dff.reset_index(inplace=True)
  90. points = len(dff) - len(df1)
  91. dfs_fill.append(dff)
  92. self.logger.info("{} ~ {} 有 {} 个点, 填补 {} 个点.".format(dff.iloc[0, 0], dff.iloc[-1, 0], len(dff), points))
  93. inserts += points
  94. name = "预测数据" if test is True else "训练集"
  95. self.logger.info("{}分成了{}段,实际一共补值{}点".format(name, len(dfs_fill), inserts))
  96. return dfs_fill
  97. def train_valid_split(self, datax, datay, valid_rate, shuffle):
  98. shuffle_index = np.random.permutation(len(datax))
  99. indexs = shuffle_index.tolist() if shuffle else np.arange(0, len(datax)).tolist()
  100. valid_size = int(len(datax) * valid_rate)
  101. valid_index = indexs[-valid_size:]
  102. train_index = indexs[:-valid_size]
  103. tx, vx, ty, vy = [], [], [], []
  104. for i, data in enumerate(zip(datax, datay)):
  105. if i in train_index:
  106. tx.append(data[0])
  107. ty.append(data[1])
  108. elif i in valid_index:
  109. vx.append(data[0])
  110. vy.append(data[1])
  111. return tx, vx, ty, vy