data_analysis.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. # !usr/bin/env python
  2. # -*- coding:utf-8 _*-
  3. """
  4. @Author:Lijiaxing
  5. @File:data_analysis.py
  6. @Time:2023/4/24 15:16
  7. """
  8. import os.path
  9. import matplotlib.colors as mcolors
  10. colors = list(mcolors.XKCD_COLORS.keys())
  11. import pandas as pd
  12. # from mpl_toolkits.basemap import Basemap
  13. from scipy.signal import savgol_filter
  14. import numpy as np
  15. import matplotlib.pyplot as plt
  16. from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
  17. from sklearn.metrics import silhouette_samples, silhouette_score
  18. def paint_others(y):
  19. """ 绘制其他数据 """
  20. plt.plot([j for j in range(y)], y)
  21. # 添加标题和标签
  22. plt.xlabel('x')
  23. plt.ylabel('y')
  24. # 显示图形
  25. plt.show()
  26. def compute_cos_similarity(a, b):
  27. """
  28. 计算两个向量的余弦相似度
  29. :param a: 向量a
  30. :param b: 向量b
  31. :return: 余弦相似度值
  32. """
  33. return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
  34. def compute_pearsonr(a):
  35. """
  36. 计算数据皮尔逊相关系数并返回相似度矩阵
  37. :param a: 数据格式为n*m的矩阵,n为数据个数,m为数据维度
  38. :return: 返回相似度矩阵,数据格式为n*n的矩阵
  39. """
  40. return np.corrcoef(a)
  41. def compute_distance(a, b):
  42. """
  43. 计算两个向量的欧式距离
  44. :param a:
  45. :param b:
  46. :return: 返回两个向量的欧式距离
  47. """
  48. return np.linalg.norm(a - b)
  49. def hierarchical_clustering(data, threshold, similarity_func):
  50. """
  51. 层次聚类,使用工具包scipy.cluster.hierarchy中的linkage和fcluster函数进行层次聚类
  52. :param data: 二维数据,格式为n*m的矩阵,n为数据个数,m为数据维度
  53. :param threshold: 阈值,当两个数据的距离小于阈值时,将两个数据归为一类,阈值为根据相似度矩阵层次聚类后的类别距离阈值,可根据需求进行调整,可大于1
  54. :param similarity_func: 相似度计算函数,用于计算两个数据的相似度,可以进行替换,若替换为计算距离的函数需对内部进行修改
  55. :return: 返回聚类结果,格式为n*1的矩阵,n为数据个数,每个数据的值为该数据所属的类别
  56. """
  57. # 计算数据的相似度矩阵
  58. similarity_matrix = similarity_func(data)
  59. # 计算数据的距离矩阵
  60. distance_matrix = 1 - similarity_matrix
  61. # 进行层次聚类返回聚类结果
  62. Z = linkage(distance_matrix, method='ward')
  63. # 根据相似度阈值获取聚类结果
  64. clusters = fcluster(Z, t=threshold, criterion='distance')
  65. print(clusters)
  66. # 画出层次聚类树形结构
  67. # fig = plt.figure(figsize=(5, 3))
  68. # dn = dendrogram(Z)
  69. # plt.show()
  70. # clusters[42] = 1
  71. silhouette = silhouette_samples(np.abs(distance_matrix), clusters, metric='euclidean')
  72. silhouette1 = silhouette_score(np.abs(distance_matrix), clusters, metric='euclidean')
  73. print(f"平均轮廓系数为:{silhouette1}, 单个样本的轮廓系数:{silhouette}")
  74. return clusters
  75. class DataAnalysis:
  76. """
  77. 数据分析类
  78. """
  79. def __init__(self, data_length, data_start, data_end):
  80. """
  81. 初始化
  82. :param data_length: 分析数据段长度
  83. :param data_start: 分析数据段开始位置
  84. :param data_end: 分析数据段结束位置
  85. """
  86. # 原始风机功率数据傅里叶变换滤波后的数据
  87. self.ori_turbine_power = None
  88. self.ori_turbine_fft = None
  89. # 原始风机功率数据片段
  90. self.ori_turbine_pic = None
  91. # 聚类结果
  92. self.cluster = None
  93. # 风机功率差分平滑后的结果
  94. self.smooth_turbine_diff = None
  95. # 风机功率差分变化情况
  96. self.diff_change = None
  97. # 风机功率差分
  98. self.turbine_diff = None
  99. # 全部风机数据
  100. self.turbine = None
  101. # 风机的标号顺序
  102. self.turbine_id = None
  103. # b1b4 = [142, 143, 144, 145]
  104. # self.turbine_id = [id for id in self.turbine_id if id not in b1b4]
  105. # 风机功率数据15分钟级别
  106. self.power_15min = None
  107. # 风机经纬度信息
  108. self.info = None
  109. # 使用数据长度
  110. self.data_length = data_length
  111. # 使用数据开始位置
  112. self.data_start = data_start
  113. # 使用数据结束位置
  114. self.data_end = data_end
  115. # 导入数据
  116. self.load_data(normalize=True)
  117. # 计算风机功率差分
  118. self.compute_turbine_diff()
  119. def load_data(self, normalize=False):
  120. """
  121. 加载数据
  122. :return:
  123. """
  124. import pickle
  125. with open('./turbine_dict_common.pickle', 'rb') as f:
  126. turbine_dict = pickle.load(f)
  127. self.turbine_id = list(turbine_dict.keys())
  128. # self.info = pd.read_csv('../data-process/data/风机信息.csv', encoding='utf-8')
  129. # power_15min = pd.read_csv('../data/power_15min.csv')
  130. # for i in range(len(power_15min)):
  131. # if power_15min.loc[i, 'C_REAL_VALUE'] == -9999:
  132. # # 方便在曲线中看出缺失数据位置
  133. # power_15min.loc[i, 'C_REAL_VALUE'] = -34.56789
  134. # self.power_15min = power_15min
  135. turbine_path = '../data-process/data/output_filtered_csv_files/turbine-{}.csv'
  136. self.turbine, turbines = {}, []
  137. for i in self.turbine_id:
  138. group = turbine_dict[i]
  139. group.rename(columns={"时间":"C_TIME", "平均风速":"C_WS", "平均有功功率":"C_REAL_VALUE"}, inplace=True)
  140. group = group.loc[:, ['C_TIME', 'C_WS', 'C_REAL_VALUE']]
  141. self.turbine[i] = group
  142. # for i in self.turbine_id:
  143. # self.turbine[i] = pd.read_csv(turbine_path.format(i))[20:].reset_index(drop=True)
  144. if normalize is True:
  145. self.normalize()
  146. def normalize(self):
  147. turbines = [self.turbine[i].values[:, 1:].astype(np.float32) for i in self.turbine_id]
  148. turbines = np.vstack(turbines)
  149. mean, std = np.mean(turbines, axis=0), np.std(turbines, axis=0)
  150. for i in self.turbine_id:
  151. c_time = self.turbine[i]['C_TIME']
  152. self.turbine[i] = (self.turbine[i].iloc[:, 1:] - mean) / std
  153. self.turbine[i].insert(loc=0, column='C_TIME', value=c_time)
  154. return self.turbine
  155. def compute_turbine_diff(self):
  156. """
  157. 计算风机功率差分
  158. :return:
  159. """
  160. turbine_diff = []
  161. ori_turbine_pic = []
  162. ori_turbine_power = []
  163. for turbine_i in self.turbine_id:
  164. ori = np.array(self.turbine[turbine_i]['C_WS'].values[self.data_start:self.data_end + 1])
  165. diff_array = np.diff(ori)
  166. smoothness_value = np.std(diff_array)
  167. print("turbine-{}的平滑度是:{}".format(turbine_i, round(smoothness_value, 2)))
  168. turbine_diff.append(diff_array)
  169. ori_turbine_pic.append(self.turbine[turbine_i]['C_WS'].values[self.data_start:self.data_end])
  170. ori_turbine_power.append(self.turbine[turbine_i]['C_REAL_VALUE'].values[self.data_start:self.data_end])
  171. self.ori_turbine_power = ori_turbine_power
  172. self.ori_turbine_pic = ori_turbine_pic
  173. self.turbine_diff = turbine_diff
  174. diff_change = []
  175. for diff_i in turbine_diff:
  176. single_diff_change = []
  177. for diff_i_i in diff_i:
  178. if diff_i_i > 0:
  179. single_diff_change.append(1)
  180. elif diff_i_i < 0:
  181. single_diff_change.append(-1)
  182. else:
  183. single_diff_change.append(0)
  184. diff_change.append(single_diff_change)
  185. self.diff_change = diff_change
  186. self.ori_turbine_fft = [self.turbine_fft(i + 1) for i in range(len(self.ori_turbine_pic))]
  187. # 平滑
  188. self.turbine_smooth(window_size=21)
  189. def paint_map(self):
  190. """
  191. 绘制经纬度地图
  192. :return:
  193. """
  194. lats = self.info['纬度'].values
  195. lons = self.info['经度'].values
  196. map = Basemap()
  197. # 绘制海岸线和国家边界
  198. map.drawcoastlines()
  199. map.drawcountries()
  200. # 绘制经纬度坐标
  201. map.drawmeridians(range(0, 360, 30))
  202. map.drawparallels(range(-90, 90, 30))
  203. # 绘制点
  204. x, y = map(lons, lats)
  205. map.plot(x, y, 'bo', markersize=10)
  206. # 显示图表
  207. plt.show()
  208. def paint_power15min(self):
  209. """
  210. 绘制15分钟功率曲线
  211. :return:
  212. """
  213. plt.plot(self.power_15min['C_REAL_VALUE'])
  214. # 设置图表标题和轴标签
  215. plt.title('Data Time Change Curve')
  216. plt.xlabel('Date')
  217. plt.ylabel('Value')
  218. # 显示图表
  219. plt.show()
  220. def paint_lats_lons(self):
  221. """
  222. 绘制经纬度图
  223. :return:
  224. """
  225. x = self.info['纬度'].values
  226. y = self.info['经度'].values
  227. # 绘制散点图
  228. fig, ax = plt.subplots()
  229. plt.scatter(x, y)
  230. for i, txt in enumerate(self.info['id'].values):
  231. ax.annotate(txt, (x[i], y[i]))
  232. # 设置图表标题和轴标签
  233. plt.xlabel('lats')
  234. plt.ylabel('lons')
  235. # 显示图表
  236. plt.show()
  237. def similarity_score(self, turbine_diff, threshold=0.5):
  238. """
  239. 使用余弦相似度计算相似度分数并返回相似度大于阈值的index矩阵
  240. :param turbine_diff: 需要计算相似的矩阵,数据格式n*m,n为数据条数,m为数据维数
  241. :param threshold: 相似度阈值
  242. :return: 返回相似计算后的矩阵
  243. """
  244. similarity = {i: [] for i in range(49)}
  245. similarity_index = {i: [] for i in range(49)}
  246. for turbine_i in range(49):
  247. for turbine_j in range(49):
  248. cos_similarity = compute_cos_similarity(turbine_diff[turbine_i], turbine_diff[turbine_j])
  249. similarity[turbine_i].append(cos_similarity)
  250. if cos_similarity > threshold:
  251. similarity_index[turbine_i].append(turbine_j)
  252. return similarity_index
  253. def mapping_turbines(self):
  254. for a, b in zip(self.turbine_id, self.cluster):
  255. print("风机编号:{},类别:{}".format(a, b))
  256. def paint_turbine(self, paint_default=True):
  257. """
  258. 绘制风机地理位置图
  259. :param paint_default:默认True,绘制聚类后每个类别的数据折线图
  260. :return: None
  261. """
  262. # y = self.info['纬度'].values
  263. # x = self.info['经度'].values
  264. #
  265. # fig, ax = plt.subplots(figsize=(15, 15))
  266. #
  267. # plt.scatter(x, y, c=self.cluster)
  268. # for i, txt in enumerate(self.info['C_ID'].values):
  269. # ax.annotate(txt, (x[i], y[i]))
  270. # 设置图表标题和轴标签
  271. # plt.xlabel('lons')
  272. # plt.ylabel('lats')
  273. # plt.legend()
  274. #
  275. # # 显示图表
  276. # plt.savefig('analysis_img/turbine_cluster.png')
  277. # plt.show()
  278. plt.figure(figsize=(60, 40))
  279. cmap = plt.get_cmap('viridis')
  280. linestyle= ['solid', 'dashed', 'dotted', 'dashdot']
  281. for i in range(max(self.cluster)):
  282. cluster, cluster_fft, cluster_power, = [], [], []
  283. for j, item in enumerate(self.cluster):
  284. if item == i + 1:
  285. cluster.append(self.ori_turbine_pic[j])
  286. cluster_fft.append(self.ori_turbine_fft[j])
  287. cluster_power.append(self.ori_turbine_power[j])
  288. cluster_power = np.average(cluster_power, axis=0)
  289. cluster_fft = np.average(cluster_fft, axis=0)
  290. cluster = np.average(cluster, axis=0)
  291. diff_array = np.diff(cluster)
  292. smoothness_value = np.std(diff_array)
  293. print("聚类-{}的平滑度是:{}".format(i+1, smoothness_value))
  294. color = cmap(i*200)
  295. plt.figure(1)
  296. # plt.subplot(max(self.cluster), 1, 1)
  297. # print("----", cluster, linestyle[i])
  298. # plt.plot([j for j in range(len(cluster))], cluster, color=color, label='cluster'+str(i))
  299. # plt.subplot(max(self.cluster), 1, 2)
  300. # plt.plot([j for j in range(len(cluster_fft))], cluster_fft, color=color, label='cluster'+str(i))
  301. # ws_power_dict = {}
  302. # for c, p in zip(cluster, cluster_power):
  303. # ws_power_dict.setdefault(round(c, 2), []).append(round(p, 2))
  304. #
  305. # for key, value in ws_power_dict.items():
  306. # ws_power_dict[key] = round(np.average(value), 2)
  307. # print(ws_power_dict)
  308. # plt.scatter(cluster, cluster_power, color=color, label='cluster' + str(i),
  309. # linestyle=linestyle[i], s=1, alpha=0.2)
  310. # 添加图例
  311. # plt.legend()
  312. # # 显示图形
  313. # plt.savefig('./clusters.png')
  314. # plt.show()
  315. # if paint_default:
  316. # for i in range(max(self.cluster)):
  317. # self.paint_turbine_k(i + 1) # 画出聚类中每个风机的曲线
  318. def turbine_smooth(self, window_size=50):
  319. """
  320. 使用滑动平均平滑数据。
  321. 参数:
  322. data -- 需要平滑的数据,numpy数组类型
  323. window_size -- 滑动窗口大小,整数类型
  324. 返回值:
  325. smooth_data -- 平滑后的数据,numpy数组类型
  326. """
  327. # weights = np.repeat(1.0, window_size) / window_size
  328. smooth_data = []
  329. for turbine_diff_i in self.turbine_diff:
  330. smooth_y = savgol_filter(turbine_diff_i, window_length=window_size, polyorder=3)
  331. smooth_data.append(smooth_y)
  332. # smooth_data.append(np.convolve(turbine_diff_i, weights, 'valid'))
  333. self.smooth_turbine_diff = smooth_data
  334. def paint_turbine_k(self, k):
  335. """
  336. 绘制第k聚类的风机数据折线图
  337. :param k:
  338. :return:
  339. """
  340. pic_label = []
  341. y = []
  342. plt.figure(figsize=(20, 10))
  343. cmap = plt.get_cmap('viridis')
  344. for i, item in enumerate(self.cluster):
  345. if item == k:
  346. pic_label.append('turbine-' + str(self.turbine_id[i]))
  347. y.append(self.ori_turbine_fft[i])
  348. for i in range(len(y)):
  349. color = cmap(i / 10)
  350. plt.plot([j for j in range(len(y[i]))], y[i], color=color, label=pic_label[i])
  351. # 添加标签和标题
  352. plt.xlabel('x')
  353. plt.ylabel('y')
  354. plt.title('Cluster {}'.format(k))
  355. # 添加图例
  356. plt.legend()
  357. # 显示图形
  358. plt.savefig('analysis_img/cluster/cluster_{}.png'.format(k))
  359. plt.show()
  360. def turbine_fft(self, k):
  361. """
  362. 对第k台原始风机数据进行傅里叶变换,并绘制变换前后曲线
  363. :param k: 数据读入时的风机顺序index,从1开始
  364. :return: 傅里叶变换清洗后的数据,数据格式
  365. """
  366. y = self.ori_turbine_pic
  367. t = np.linspace(0, 1, self.data_length)
  368. signal = y[k - 1]
  369. # 进行傅里叶变换
  370. freq = np.fft.fftfreq(len(signal), t[1] - t[0])
  371. spectrum = np.fft.fft(signal)
  372. spectrum_abs = np.abs(spectrum)
  373. threshold = np.percentile(spectrum_abs, 98)
  374. indices = spectrum_abs > threshold
  375. spectrum_clean = indices * spectrum
  376. # 进行傅里叶逆变换
  377. signal_clean = np.fft.ifft(spectrum_clean)
  378. # plt.figure(figsize=(20, 10))
  379. #
  380. # # 绘制时域信号
  381. # plt.subplot(4, 1, 1)
  382. # plt.plot(t, signal)
  383. # plt.title(self.turbine_id[k-1])
  384. #
  385. # # 绘制频域信号
  386. # plt.subplot(4, 1, 2)
  387. # plt.plot(freq, np.abs(spectrum))
  388. #
  389. # # 绘制过滤后的频域信号
  390. # plt.subplot(4, 1, 3)
  391. # plt.plot(freq, np.abs(spectrum_clean))
  392. #
  393. # # 绘制过滤后的时域信号
  394. # plt.subplot(4, 1, 4)
  395. # plt.plot(t, signal_clean)
  396. #
  397. # plt.savefig('analysis_img/fft/{}_turbine_fft.png'.format(self.turbine_id[k-1]))
  398. # plt.show()
  399. return signal_clean
  400. def paint_double(self, i, j):
  401. """
  402. 绘制两台风机的数据变换对比
  403. :param i: 风机数据读入时数据编号,从1开始
  404. :param j: 风机数据读入时数据编号,从1开始
  405. :return:
  406. """
  407. y = self.ori_turbine_fft
  408. x = [index for index in range(self.data_length)]
  409. data_i = y[i - 1]
  410. data_j = y[j - 1]
  411. plt.figure(figsize=(20, 10))
  412. plt.plot(x, data_i, label='turbine {}'.format(self.turbine_id[i - 1]), linestyle='solid')
  413. plt.plot(x, data_j, label='turbine {}'.format(self.turbine_id[j - 1]), linestyle='dashed')
  414. plt.title('{} and {}'.format(i, j))
  415. plt.legend()
  416. plt.savefig('analysis_img/{}_{}_turbine.png'.format(self.turbine_id[i - 1], self.turbine_id[j - 1]))
  417. plt.show()
  418. def process_ori_data(self):
  419. """
  420. 对原始数据进行处理,聚类和绘图
  421. :return:
  422. """
  423. self.turbine_clusters(self.ori_turbine_fft)
  424. self.paint_turbine()
  425. def turbine_clusters(self, data=None):
  426. """
  427. 风机数据聚类,聚类信息保存在self.cluster中
  428. :param data: 默认为空,也可以使用其他数据聚类,并体现在绘图中,
  429. 数据格式:二维数据n*m,n为数据条数,m为每条数据维数
  430. :return: None
  431. """
  432. if data is None:
  433. cluster = hierarchical_clustering(self.turbine_diff, threshold=1.4,
  434. similarity_func=compute_pearsonr) # 层次聚类
  435. else:
  436. cluster = hierarchical_clustering(data, threshold=0.6,
  437. similarity_func=compute_pearsonr)
  438. self.cluster = cluster
  439. # 在这里保存cluster变量
  440. # from cluster_analysis import cluster_power_list_file, cluster_power_list_folder
  441. #
  442. # output_path = '../data-process/data/cluster_power/'
  443. # cluster_power_list_file(self.cluster, self.turbine_id,
  444. # input_path='../data-process/data/output_filtered_csv_files/', output_path=output_path)
  445. # cluster_power_list_folder(self.cluster, self.turbine_id, input_path='../data-process/data/continuous_data/',
  446. # output_path=output_path)
  447. if __name__ == '__main__':
  448. import pickle
  449. with open('./turbine_dict.pickle', 'rb') as f:
  450. turbine_dict = pickle.load(f)
  451. # number, dt = 0, []
  452. # for key, turbine in turbine_dict.items():
  453. # if number == 0:
  454. # dt = turbine['时间']
  455. # number += 1
  456. # else:
  457. # dt = pd.to_datetime(list(set(dt) & set(turbine['时间'])))
  458. # for key, turbine in turbine_dict.items():
  459. # turbine_dict[key] = turbine[turbine['时间'].isin(dt)]
  460. # with open('./turbine_dict_common.pickle', 'wb') as f:
  461. # pickle.dump(turbine_dict, f)
  462. data_analysis = DataAnalysis(data_length=34550,
  463. data_start=0,
  464. data_end=34550)
  465. data_analysis.process_ori_data()
  466. data_analysis.mapping_turbines()
  467. # data_analysis.paint_double(1, 56)