figure.py 4.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # time: 2023/3/20 15:19
  4. # file: figure.py
  5. # author: David
  6. # company: shenyang JY
  7. import sys
  8. import numpy as np
  9. import matplotlib.pyplot as plt
  10. class Figure(object):
  11. def __init__(self, opt, logger, ds):
  12. self.opt = opt
  13. self.ds = ds
  14. self.logger = logger
  15. def draw(self, label_data, dq_data, predict_norm_data):
  16. # label_data = origin_data.data[origin_data.train_num + origin_data.start_num_in_test : ,
  17. # config.label_in_feature_index]
  18. dq_data = dq_data.reshape((-1, self.opt.output_size))
  19. label_data = label_data.reshape((-1, self.opt.output_size))
  20. # label_data 要进行反归一化
  21. label_data = label_data * self.ds.std[self.opt.label_in_feature_index] + \
  22. self.ds.mean[self.opt.label_in_feature_index]
  23. predict_data = predict_norm_data * self.ds.std[self.opt.label_in_feature_index] + \
  24. self.ds.mean[self.opt.label_in_feature_index] # 通过保存的均值和方差还原数据
  25. dq_data = dq_data * self.ds.std[0] + self.ds.mean[0]
  26. # predict_data = predict_norm_data
  27. assert label_data.shape[0] == predict_data.shape[0], "The element number in origin and predicted data is different"
  28. label_name = [self.ds.tables_column_name[i] for i in self.opt.label_in_feature_index]
  29. label_column_num = len(self.opt.label_columns)
  30. # label 和 predict 是错开config.predict_day天的数据的
  31. # 下面是两种norm后的loss的计算方式,结果是一样的,可以简单手推一下
  32. # label_norm_data = origin_data.norm_data[origin_data.train_num + origin_data.start_num_in_test:,
  33. # config.label_in_feature_index]
  34. # loss_norm = np.mean((label_norm_data[config.predict_day:] - predict_norm_data[:-config.predict_day]) ** 2, axis=0)
  35. # logger.info("The mean squared error of stock {} is ".format(label_name) + str(loss_norm))
  36. loss = np.sum((label_data - predict_data) ** 2)/len(label_data) # mse
  37. # loss = np.mean((label_data - predict_data) ** 2, axis=0)
  38. loss_sqrt = np.sqrt(loss) # rmse
  39. loss_norm = 1 - loss_sqrt / self.opt.cap
  40. # loss_norm = loss/(ds.std[opt.label_in_feature_index] ** 2)
  41. self.logger.info("The mean squared error of power {} is ".format(label_name) + str(loss_norm))
  42. # loss1 = np.sum((label_data - dq_data) ** 2) / len(label_data) # mse
  43. # loss_sqrt1 = np.sqrt(loss1) # rmse
  44. # loss_norm1 = 1 - loss_sqrt1 / self.opt.cap
  45. # self.logger.info("The mean squared error1 of power {} is ".format(label_name) + str(loss_norm1))
  46. if self.opt.is_continuous_predict:
  47. label_X = range(int((self.ds.data_num - self.ds.train_num - 32)))
  48. else:
  49. label_X = range(int((self.ds.data_num - self.ds.train_num - self.ds.start_num_in_test)/2))
  50. print("label_x = ", label_X)
  51. predict_X = [x for x in label_X]
  52. if not sys.platform.startswith('linux'): # 无桌面的Linux下无法输出,如果是有桌面的Linux,如Ubuntu,可去掉这一行
  53. for i in range(label_column_num):
  54. plt.figure(i+1) # 预测数据绘制
  55. plt.plot(label_X, label_data[:, i], label='label', color='b')
  56. plt.plot(predict_X, predict_data[:, i], label='predict', color='g')
  57. plt.plot(predict_X, dq_data[:, i], label='dq', color='y')
  58. # plt.title("Predict actual {} power with {}".format(label_name[i], self.opt.used_frame))
  59. self.logger.info("The predicted power {} for the last {} point(s) is: ".format(label_name[i], self.opt.predict_points) +
  60. str(np.squeeze(predict_data[-self.opt.predict_points:, i])))
  61. if self.opt.do_figure_save:
  62. plt.savefig(self.opt.figure_save_path+"{}predict_{}_with_{}.png".format(self.opt.continue_flag, label_name[i], opt.used_frame))
  63. plt.show()