anomalyPCA.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. #热失控预警:PCA异常指数
  2. import pandas as pd
  3. import numpy as np
  4. from scipy.signal import savgol_filter
  5. from sklearn.preprocessing import RobustScaler
  6. from sklearn.decomposition import PCA
  7. #筛选特征
  8. def makedataset1(df_data):
  9. df_data=df_data.drop(['Unnamed: 0','总电流[A]','GSM信号','外电压','SOH[%]','开关状态','充电状态','故障等级','故障代码','绝缘电阻','上锁状态','加热状态','单体均衡状态','总输出状态'],axis=1,errors='ignore')
  10. df_data=df_data.drop(["单体温度"+str(i) for i in range(1,5)],axis=1,errors='ignore')
  11. df_data=df_data.drop(["其他温度"+str(i) for i in range(1,7)],axis=1,errors='ignore')
  12. listV=[s for s in list(df_data) if '单体电压' in s]
  13. for i in range(1,len(listV)+1):
  14. df_data=df_data[(df_data['单体电压'+str(i)]>2200) & (df_data['单体电压'+str(i)]<4800)]
  15. df_data=df_data[df_data['SOC[%]']>20]
  16. df_data['时间']=[df_data.loc[i,'时间戳'][0:15] for i in df_data.index]
  17. data_set=df_data.groupby('时间').mean()
  18. for k in data_set.columns:
  19. data_set[k]=savgol_filter(data_set[k],3,2)
  20. return data_set
  21. #新建统计特征
  22. def makedataset2(df_data):
  23. data_set=makedataset1(df_data)
  24. listV=[s for s in list(df_data) if '单体电压' in s]
  25. data_set["最低单体电压"]=data_set[["单体电压"+str(i) for i in range(1,len(listV)+1)]].min(axis=1)
  26. data_set["最高单体电压"]=data_set[["单体电压"+str(i) for i in range(1,len(listV)+1)]].max(axis=1)
  27. data_set["平均单体电压"]=data_set[["单体电压"+str(i) for i in range(1,len(listV)+1)]].mean(axis=1)
  28. data_set["最大单体压差"]=[data_set.loc[k,"最高单体电压"]-data_set.loc[k,"最低单体电压"] for k in data_set.index]
  29. data_set["低压差"]=[data_set.loc[k,"平均单体电压"]-data_set.loc[k,"最低单体电压"] for k in data_set.index]
  30. data_set=data_set.drop(["单体电压"+str(i) for i in range(1,len(listV)+1)],axis=1)
  31. return data_set
  32. #标准化
  33. def process(data_set):
  34. features=data_set.columns
  35. sX=RobustScaler(copy=True)
  36. data_set2=data_set.copy()
  37. data_set2.loc[:,features]=sX.fit_transform(data_set2[features])
  38. return data_set2
  39. #异常指数函数
  40. def anomalyScores(originalDF,reducedDF):
  41. loss=np.sum((np.array(originalDF)-np.array(reducedDF))**2,axis=1)
  42. loss=pd.Series(data=loss,index=originalDF.index)
  43. loss=(loss-np.min(loss))/(np.max(loss)-np.min(loss))
  44. return loss
  45. #建立PCA模型
  46. def anomalyPCA(x_train_pro):
  47. n_components=4
  48. whiten=True
  49. random_state=2
  50. pca=PCA(n_components=n_components,whiten=whiten,random_state=random_state)
  51. pca.fit(x_train_pro)
  52. return pca
  53. #判断PCA异常指数
  54. def transform(df_data_pro,model,df_data):
  55. #降维
  56. X_train=model.transform(df_data_pro)
  57. X_train=pd.DataFrame(data=X_train,index=df_data_pro.index)
  58. #还原
  59. X_train_inverse=model.inverse_transform(X_train)
  60. X_train_inverse=pd.DataFrame(data=X_train_inverse,index=df_data_pro.index)
  61. #异常指数
  62. anomalyScoresModel=anomalyScores(df_data_pro,X_train_inverse)
  63. anomalyScoresModel=savgol_filter(anomalyScoresModel,15,3)
  64. df_data2=df_data.copy()
  65. df_data2['anomalyScores_'+str(model)]=anomalyScoresModel
  66. return df_data2
  67. #判断离群
  68. def detect_outliers(data,pred,threshold=3):
  69. anomaly=data['anomalyScores_PCA(n_components=4, random_state=2, whiten=True)']
  70. anomalypred=pred['anomalyScores_PCA(n_components=4, random_state=2, whiten=True)']
  71. mean_d=np.mean(anomaly.values)
  72. std_d=np.std(anomaly.values)
  73. max_score=np.max(anomaly.values)
  74. outliers2=pd.DataFrame()
  75. for k in anomalypred.index:
  76. z_score= (anomalypred[k]-mean_d)/std_d
  77. if (np.abs(z_score) >threshold) & (anomalypred[k]>max_score):
  78. outliers2=outliers2.append(pred[anomalypred.values==anomalypred[k]])
  79. return outliers2
  80. #训练模型
  81. def train_model(data_train):
  82. x_train1=makedataset1(data_train)
  83. x_train2=makedataset2(data_train)
  84. x_train_pro1=process(x_train1)
  85. x_train_pro2=process(x_train2)
  86. pca1=anomalyPCA(x_train_pro1)
  87. pca2=anomalyPCA(x_train_pro2)
  88. res1=transform(x_train_pro1,pca1,x_train1)
  89. res2=transform(x_train_pro2,pca2,x_train2)
  90. return pca1,pca2,res1,res2
  91. #预测
  92. def prediction(data_test,pca1,pca2):
  93. x_test1=makedataset1(data_test)
  94. x_test2=makedataset2(data_test)
  95. x_test_pro1=process(x_test1)
  96. x_test_pro2=process(x_test2)
  97. pred1=transform(x_test_pro1,pca1,x_test1)
  98. pred2=transform(x_test_pro2,pca2,x_test2)
  99. return pred1,pred2
  100. #判定异常
  101. def check_anomaly(outliers1,outliers2):
  102. if (len(outliers1)>0) & (len(outliers2)>0):
  103. outliers=pd.merge(outliers1,outliers2,on='时间')
  104. outliers=outliers[outliers['SOC[%]_x']>45]
  105. outliers=outliers.drop(['总电压[V]_y','单体压差_y','SOC[%]_y'],axis=1)
  106. return outliers