faultclass.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. import pandas as pd
  2. import numpy as np
  3. import datetime
  4. from random import shuffle
  5. from sklearn.preprocessing import StandardScaler
  6. from keras.layers import Activation,Dense,Input
  7. from keras.layers.recurrent import GRU
  8. from keras.models import Model
  9. from keras.optimizers import adam_v2
  10. from keras.layers import Dropout
  11. import random
  12. #### Process1 - Prediction - Model1+Model2 ###
  13. # Step1 Features
  14. # Model1
  15. def features1(dataset2):
  16. dataset2=dataset2.drop(['GSM信号','故障等级','故障代码','开关状态','绝缘电阻','外电压','总输出状态','上锁状态','加热状态','单体均衡状态','充电状态','SOH[%]','SOC[%]','总电流[A]'],axis=1,errors='ignore')
  17. cellvolt_list = [s for s in list(dataset2) if '单体电压' in s]
  18. celltemp_name = [s for s in list(dataset2) if '温度' in s]
  19. dataset2=dataset2.drop(celltemp_name,axis=1)
  20. dataset2['volt_max']=dataset2[cellvolt_list].max(axis=1)
  21. dataset2['volt_min']=dataset2[cellvolt_list].min(axis=1)
  22. dataset2=dataset2.drop(cellvolt_list,axis=1)
  23. dataset2.reset_index(drop=True,inplace=True)
  24. return dataset2
  25. # Model2
  26. def features2(dataset2):
  27. dataset2=dataset2.drop(['GSM信号','故障等级','故障代码','开关状态','绝缘电阻','外电压','总输出状态','上锁状态','加热状态','单体均衡状态','充电状态','SOH[%]','SOC[%]','单体压差','总电压[V]'],axis=1,errors='ignore')
  28. cellvolt_list = [s for s in list(dataset2) if '单体电压' in s]
  29. celltemp_name = [s for s in list(dataset2) if '单体温度' in s]
  30. celltemp_name2 = [s for s in list(dataset2) if '其他温度' in s]
  31. dataset2=dataset2.drop(cellvolt_list+celltemp_name2,axis=1)
  32. dataset2['temp_max']=dataset2[celltemp_name].max(axis=1)
  33. dataset2['temp_min']=dataset2[celltemp_name].min(axis=1)
  34. dataset2['temp_diff']=list(np.array(dataset2['temp_max'])-np.array(dataset2['temp_min']))
  35. dataset2=dataset2.drop(celltemp_name,axis=1)
  36. dataset2.reset_index(drop=True,inplace=True)
  37. return dataset2
  38. # Step2 Splits
  39. def split(df_bms_tot):
  40. df_bms_tot['split']=0
  41. for k in range(1,len(df_bms_tot)):
  42. timek=df_bms_tot.loc[k,'时间戳']
  43. timek1=df_bms_tot.loc[k-1,'时间戳']
  44. timek=datetime.datetime.strptime(timek,'%Y-%m-%d %H:%M:%S') #type: datetime
  45. timek1=datetime.datetime.strptime(timek1,'%Y-%m-%d %H:%M:%S')
  46. deltatime=(timek-timek1).total_seconds()
  47. if (deltatime>600) | (df_bms_tot.loc[k,'sn']!=df_bms_tot.loc[k-1,'sn']):
  48. df_bms_tot.loc[k,'split']=df_bms_tot.loc[k-1,'split']+1
  49. else:
  50. df_bms_tot.loc[k,'split']=df_bms_tot.loc[k-1,'split']
  51. return df_bms_tot
  52. # Step3 MakeDataset: TimeSeries
  53. def makedataset(dataset):
  54. df_bms=pd.DataFrame()
  55. for split in list(set(dataset['split'])):
  56. set2=dataset[dataset['split']==split]
  57. set2.reset_index(drop=True,inplace=True)
  58. data_set=pd.DataFrame()
  59. start=set2.loc[0,'时间戳']
  60. end=set2.loc[len(set2)-1,'时间戳']
  61. data_set['Time']=pd.date_range(start=start, end=end, freq='S') #每秒一条记录
  62. data_set['Time']=list(map(lambda x:str(x),list(data_set['Time'])))
  63. dfbms=pd.merge(data_set,set2,left_on='Time',right_on='时间戳',how='left')
  64. dfbms=dfbms.fillna(method='ffill')
  65. dfbms=dfbms.fillna(method='bfill')
  66. dfbms=dfbms.drop(['时间戳'],axis=1)
  67. dfbms['Time']=list(map(lambda x:x[:18]+'0',list(dfbms['Time'])))
  68. dfbms.drop_duplicates(subset='Time',keep='last',inplace=True)
  69. df_bms=df_bms.append(dfbms)
  70. df_bms.reset_index(drop=True,inplace=True)
  71. return df_bms
  72. # Step4 Scaler
  73. def scaler_pred(df_bms,scaler):
  74. Xtest=df_bms.drop(['Time','sn','split'],axis=1)
  75. Xsc_colnames=list(Xtest.columns)
  76. Xtsc=scaler.transform(np.array(Xtest))
  77. Xtsc=pd.DataFrame(Xtsc)
  78. Xtsc.columns=Xsc_colnames
  79. return Xtsc
  80. # Step5 MakeIndex
  81. def make_index(train):
  82. indextr=[]
  83. for i in list(set(train['split'])):
  84. tr=train[train['split'] == i].index.tolist()
  85. indextr.append(min(tr))
  86. indextr=sorted(indextr)
  87. indextr.append(len(train))
  88. return indextr
  89. # Step5 CreateWindows
  90. def create_win_pred(X2,Xtest,index,time_steps=12):
  91. conf=pd.DataFrame()
  92. a=[]
  93. for k in range(1,len(index)):
  94. dataset=X2[index[k-1]:index[k]]
  95. dataset=dataset.reset_index(drop=True)
  96. dataset2=Xtest[index[k-1]:index[k]]
  97. dataset2=dataset2.reset_index(drop=True)
  98. if len(dataset)>time_steps:
  99. dataX = []
  100. win_step=[]
  101. for i in range(len(dataset)-time_steps):
  102. win_step.append(i)
  103. #v1 = np.array(dataset.iloc[i:(i+time_steps)],dtype='float32')
  104. v1 = dataset.iloc[i:(i+time_steps)].values
  105. dataX.append(v1)
  106. test=dataset2.iloc[:len(dataset)-time_steps]
  107. dataX2=np.array(dataX,dtype='float32')
  108. conf=conf.append(test)
  109. a.append(dataX2)
  110. if len(a)>0:
  111. aa=np.vstack(a)
  112. else:
  113. aa=[]
  114. conf.reset_index(drop=True,inplace=True)
  115. return aa,conf
  116. # Step6 Prediction
  117. def prediction(model,cc,conf,col):
  118. predict_dd = model.predict(cc)
  119. df_pred=pd.DataFrame(predict_dd)
  120. df_pred.columns=col
  121. df_pred2 = df_pred.idxmax(axis=1)
  122. conf['pred']=df_pred2
  123. return conf
  124. # Step7 Output
  125. def makeres(res,end_time):
  126. df_res=pd.DataFrame(columns=['product_id','start_time','end_time','fault_class','update_time'])
  127. result_faults=res[res['pred']!='正常']
  128. list_faults=list(set(list(result_faults['pred'])))
  129. for fault in list_faults:
  130. res_faults=result_faults[result_faults['pred']==fault]
  131. res_faults.reset_index(drop=True,inplace=True)
  132. update_time=str(res_faults.loc[len(res_faults)-1,'Time'])
  133. end=datetime.datetime.strptime(str(res_faults.loc[len(res_faults)-1,'Time']),'%Y-%m-%d %H:%M:%S')
  134. end_time=datetime.datetime.strptime(str(end_time),'%Y-%m-%d %H:%M:%S')
  135. if (end_time-end).total_seconds()<900:
  136. res_faults.loc[len(res_faults)-1,'Time']='0000-00-00 00:00:00'
  137. df_res=df_res.append(pd.DataFrame({'product_id':[res_faults.loc[0,'sn']],'start_time':[str(res_faults.loc[0,'Time'])],
  138. 'end_time':[str(res_faults.loc[len(res_faults)-1,'Time'])],'fault_class':[res_faults.loc[0,'pred']],
  139. 'update_time':[update_time]}))
  140. return df_res
  141. # Step7 Process
  142. def pred(data_fea,model,scaler,col,end_time,time_steps):
  143. df_res=pd.DataFrame()
  144. fea=split(data_fea)
  145. f=makedataset(fea)
  146. sc=scaler_pred(f,scaler)
  147. index=make_index(f)
  148. dataX,pred=create_win_pred(sc,f,index,time_steps=time_steps)
  149. if len(dataX)>0:
  150. res=prediction(model,dataX,pred,col)
  151. df_res=makeres(res,end_time)
  152. return df_res
  153. # Step8 Merge
  154. def arrange(result,result_final):
  155. result.reset_index(drop=True,inplace=True)
  156. res_update=pd.DataFrame()
  157. res_new=result.copy()
  158. if len(result)>0:
  159. st=datetime.datetime.strptime(str(result.loc[0,'start_time']),'%Y-%m-%d %H:%M:%S')
  160. end=datetime.datetime.strptime(str(result_final['update_time']),'%Y-%m-%d %H:%M:%S')
  161. if (st-end).total_seconds()<3600:
  162. result_final['end_time']=result.loc[0,'end_time']
  163. result_final['update_time']=result.loc[0,'update_time']
  164. res_update=result_final.copy()
  165. res_new.drop(result.index,inplace=True)
  166. else:
  167. result_final['end_time']=result_final['update_time']
  168. res_update=result_final.copy()
  169. res_new.drop(result.index,inplace=True)
  170. else:
  171. result_final['end_time']=result_final['update_time']
  172. res_update=result_final.copy()
  173. return res_new,res_update
  174. def arrange2(dataorg,df_res,time_stepsi):
  175. res_new=df_res.copy()
  176. res_update=pd.DataFrame()
  177. if len(dataorg)>0:
  178. res_new,res_update=arrange(df_res,dataorg)
  179. if len(res_new)>0:
  180. for i in range(len(res_new)):
  181. if res_new.loc[i,'end_time'] != '0000-00-00 00:00:00':
  182. st1=datetime.datetime.strptime(str(res_new.loc[i,'start_time']),'%Y-%m-%d %H:%M:%S')
  183. end1=datetime.datetime.strptime(str(res_new.loc[i,'end_time']),'%Y-%m-%d %H:%M:%S')
  184. if (end1-st1).total_seconds()<time_stepsi:
  185. res_new.drop([i],axis=0,inplace=True)
  186. if len(res_update)>0:
  187. if res_update['end_time']!= '0000-00-00 00:00:00':
  188. st2=datetime.datetime.strptime(str(res_update['start_time']),'%Y-%m-%d %H:%M:%S')
  189. end2=datetime.datetime.strptime(str(res_update['end_time']),'%Y-%m-%d %H:%M:%S')
  190. res_update=pd.DataFrame(pd.DataFrame({'product_id':[res_update['product_id']],'start_time':[str(res_update['start_time'])],
  191. 'end_time':[str(res_update['end_time'])],'fault_class':[res_update['fault_class']],
  192. 'update_time':[res_update['update_time']]}))
  193. if (end2-st2).total_seconds()<time_stepsi:
  194. res_update=pd.DataFrame()
  195. else:
  196. res_update=pd.DataFrame(pd.DataFrame({'product_id':[res_update['product_id']],'start_time':[str(res_update['start_time'])],
  197. 'end_time':[str(res_update['end_time'])],'fault_class':[res_update['fault_class']],
  198. 'update_time':[res_update['update_time']]}))
  199. return res_new,res_update
  200. #################################################################################################################################
  201. #### Process1 - New Model ###
  202. # Step1 Features Filtre
  203. def features_filtre(dataset2,cols):
  204. dataset2=dataset2.drop(['GSM信号','故障等级','故障代码','开关状态','绝缘电阻','外电压','总输出状态','上锁状态','加热状态','单体均衡状态','充电状态','SOH[%]'],axis=1,errors='ignore')
  205. cellvolt_list = [s for s in list(dataset2) if '单体电压' in s]
  206. celltemp_name = [s for s in list(dataset2) if '单体温度' in s]
  207. celltemp_name2 = [s for s in list(dataset2) if '其他温度' in s]
  208. dataset2['volt_max']=dataset2[cellvolt_list].max(axis=1)
  209. dataset2['volt_min']=dataset2[cellvolt_list].min(axis=1)
  210. dataset2['volt_mean'] = round(dataset2[cellvolt_list].mean(axis=1),3) #每行平均
  211. dataset2['volt_sigma'] =list(dataset2[cellvolt_list].apply(lambda x: np.std(x.values),axis=1))
  212. cell_volt_max =list(dataset2[cellvolt_list].apply(lambda x: np.argmax(x.values)+1,axis=1))
  213. cell_volt_min =list(dataset2[cellvolt_list].apply(lambda x: np.argmin(x.values)+1,axis=1))
  214. dataset2['mm_volt_cont'] = list(np.array(cell_volt_max) - np.array(cell_volt_min))
  215. dataset2['mm_volt_cont']=list(map(lambda x : 1 if (abs(x)==1) | (abs(x)==len(cellvolt_list)-1) else 0, list(dataset2['mm_volt_cont'])))
  216. #for k in range(len(dataset2)):
  217. #dataset2.loc[k,'mm_volt_cont']=1 if (abs(list(dataset2['mm_volt_cont'])[k])==1) | (abs(list(dataset2['mm_volt_cont'])[k])==len(cellvolt_list)-1) else 0
  218. dataset2=dataset2.drop(cellvolt_list+celltemp_name2,axis=1)
  219. dataset2['temp_max']=dataset2[celltemp_name].max(axis=1)
  220. dataset2['temp_min']=dataset2[celltemp_name].min(axis=1)
  221. dataset2['temp_diff']=list(np.array(dataset2['temp_max'])-np.array(dataset2['temp_min']))
  222. dataset2=dataset2.drop(celltemp_name,axis=1)
  223. datatest3=dataset2[cols]
  224. datatest3.reset_index(drop=True,inplace=True)
  225. return datatest3
  226. # Step2 Data Filtre
  227. def data_filtre(datatest3,col_key,compare,threshold):
  228. if compare==0:
  229. datatest4=datatest3[datatest3[col_key]==threshold]
  230. elif compare==1:
  231. datatest4=datatest3[datatest3[col_key]>threshold]
  232. else:
  233. datatest4=datatest3[datatest3[col_key]<threshold]
  234. datatest4.reset_index(drop=True,inplace=True)
  235. return datatest4
  236. # Step3 Faults Pre-processing
  237. def make_fault_set(dataset,cols,col_key,compare,threshold_filtre,fault_name):
  238. datatest3=features_filtre(dataset,cols)
  239. datatest4=data_filtre(datatest3,col_key,compare,threshold_filtre)
  240. df_tot=split(datatest4)
  241. df_bms=makedataset(df_tot)
  242. df_bms['fault_class']=fault_name
  243. return df_bms
  244. # Step4 Normal Pre-processing
  245. def normalset(df_bms,cols):
  246. df_bms.drop(['Unnamed: 0'],axis=1,inplace=True)
  247. nor_fea1=features_filtre(df_bms,cols)
  248. norfea1=split(nor_fea1)
  249. normalf1=makedataset(norfea1)
  250. normalf1['fault_class']='正常'
  251. return normalf1
  252. def normalset2(df_bms1,df_bms2,df_bms3,df_bms4,df_bms5,df_bms6,cols):
  253. normalf1=normalset(df_bms1,cols)
  254. normalf2=normalset(df_bms2,cols)
  255. normalf3=normalset(df_bms3,cols)
  256. normalf4=normalset(df_bms4,cols)
  257. normalf5=normalset(df_bms5,cols)
  258. normalf6=normalset(df_bms6,cols)
  259. nor=pd.concat([normalf1,normalf2,normalf3,normalf4,normalf5,normalf6])
  260. nor.reset_index(drop=True,inplace=True)
  261. return nor
  262. # Step5 Resample
  263. def resample(nor,df_bms):
  264. if len(nor)>2*len(df_bms):
  265. sp=list(set(list(nor['split'])))
  266. sp_ran=random.sample(sp, k=int(len(sp)*(len(df_bms)/len(nor))))
  267. nor=nor[nor['split'].isin(sp_ran)]
  268. nor.reset_index(drop=True,inplace=True)
  269. if 2*len(nor)<len(df_bms):
  270. sp=list(set(list(df_bms['split'])))
  271. sp_ran=random.sample(sp, k=int(len(sp)*(len(nor)/len(df_bms))))
  272. df_bms=df_bms[df_bms['split'].isin(sp_ran)]
  273. df_bms.reset_index(drop=True,inplace=True)
  274. return nor,df_bms
  275. # Step6 Shuffle Data
  276. def shuffle_data(nor,dataset_faults):
  277. sn_nor=list(set(nor['sn']))
  278. sn_fau=list(set(dataset_faults['sn']))
  279. shuffle(sn_nor)
  280. shuffle(sn_fau)
  281. newtrain=pd.DataFrame()
  282. newtest=pd.DataFrame()
  283. for s1 in sn_nor[:int(0.8*len(sn_nor))]:
  284. nortrain=nor[nor['sn']==s1]
  285. nortrain.reset_index(drop=True,inplace=True)
  286. newtrain=newtrain.append(nortrain)
  287. for s2 in sn_nor[int(0.8*len(sn_nor)):]:
  288. nortest=nor[nor['sn']==s2]
  289. nortest.reset_index(drop=True,inplace=True)
  290. newtest=newtest.append(nortest)
  291. for s3 in sn_fau[:int(0.8*len(sn_fau))]:
  292. fautrain=dataset_faults[dataset_faults['sn']==s3]
  293. fautrain.reset_index(drop=True,inplace=True)
  294. newtrain=newtrain.append(fautrain)
  295. for s4 in sn_fau[int(0.8*len(sn_fau)):]:
  296. fautest=dataset_faults[dataset_faults['sn']==s4]
  297. fautest.reset_index(drop=True,inplace=True)
  298. newtest=newtest.append(fautest)
  299. newtrain.reset_index(drop=True,inplace=True)
  300. newtest.reset_index(drop=True,inplace=True)
  301. return newtrain,newtest
  302. def shuffle_data2(dftrain):
  303. sp=list(set(dftrain['sn']))
  304. shuffle(sp)
  305. newtrain=pd.DataFrame()
  306. for s in sp:
  307. ntr=dftrain[dftrain['sn']==s]
  308. newtrain=newtrain.append(ntr)
  309. newtrain.reset_index(drop=True,inplace=True)
  310. return newtrain
  311. # Step7 X & Y
  312. def xy(train):
  313. Xtrain=train.drop(['fault_class','Time','sn','split'],axis=1)
  314. Ytrain=train[['fault_class']]
  315. Ytrain2=pd.get_dummies(Ytrain,columns=['fault_class'],prefix_sep='_')
  316. cols=list(map(lambda x:x[12:],list(Ytrain2.columns)))
  317. return Xtrain,Ytrain,Ytrain2,cols
  318. # Step8 Scaler
  319. def scaler_train(Xtrain):
  320. Xsc_colnames=list(Xtrain.columns)
  321. scaler=StandardScaler()
  322. scaler.fit(Xtrain) #保存train_sc的均值和标准差
  323. Xsc=scaler.transform(np.array(Xtrain))
  324. Xsc=pd.DataFrame(Xsc)
  325. Xsc.columns=Xsc_colnames
  326. return Xsc,scaler
  327. def scaler_test(Xtest,scaler):
  328. Xsc_colnames=list(Xtest.columns)
  329. Xtsc=scaler.transform(np.array(Xtest))
  330. Xtsc=pd.DataFrame(Xtsc)
  331. Xtsc.columns=Xsc_colnames
  332. return Xtsc
  333. # Step9 Create windows
  334. def create_win_train(X2,Y2,index,time_steps=6):
  335. a,b=[],[]
  336. for k in range(1,len(index)):
  337. dataset=X2[index[k-1]:index[k]]
  338. dataset=dataset.reset_index(drop=True)
  339. datay=Y2[index[k-1]:index[k]]
  340. datay=datay.reset_index(drop=True)
  341. if len(dataset)>time_steps:
  342. dataX, dataY = [], []
  343. for i in range(len(dataset)-time_steps):
  344. v1 = dataset.iloc[i:(i+time_steps)].values
  345. v2 = datay.iloc[i].values
  346. dataX.append(v1)
  347. dataY.append(v2)
  348. dataX2=np.array(dataX,dtype='float32')
  349. dataY2=np.array(dataY)
  350. else:
  351. continue
  352. a.append(dataX2)
  353. b.append(dataY2)
  354. aa=np.vstack(a)
  355. bb=np.vstack(b)
  356. return aa,bb
  357. def create_win_test(X2,Y2,Xtest,index,time_steps=12):
  358. a,b=[],[]
  359. conf=pd.DataFrame()
  360. for k in range(1,len(index)):
  361. dataset=X2[index[k-1]:index[k]]
  362. dataset=dataset.reset_index(drop=True)
  363. datay=Y2[index[k-1]:index[k]]
  364. datay=datay.reset_index(drop=True)
  365. dataset2=Xtest[index[k-1]:index[k]]
  366. dataset2=dataset2.reset_index(drop=True)
  367. if len(dataset)>time_steps:
  368. dataX, dataY = [], []
  369. win_step=[]
  370. for i in range(len(dataset)-time_steps):
  371. win_step.append(i)
  372. v1 = dataset.iloc[i:(i+time_steps)].values
  373. v2 = datay.iloc[i].values
  374. dataX.append(v1)
  375. dataY.append(v2)
  376. test=dataset2.iloc[:len(dataset)-time_steps]
  377. test['win']=win_step
  378. test=pd.merge(test,datay,left_index=True,right_index=True)
  379. dataX2=np.array(dataX,dtype='float32')
  380. dataY2=np.array(dataY)
  381. else:
  382. continue
  383. a.append(dataX2)
  384. b.append(dataY2)
  385. conf=conf.append(test)
  386. aa=np.vstack(a)
  387. bb=np.vstack(b)
  388. conf.reset_index(drop=True,inplace=True)
  389. return aa,bb,conf
  390. # Step10 Create Model
  391. def modelGRU(time_steps,nbr_features,nbr_neurons,nbr_class,Xwin,Ywin,Xtwin,Ytwin,batch_size,epochs,dropout,lr,activation,loss,metrics):
  392. time_steps=time_steps
  393. inputs = Input(shape=[time_steps,nbr_features])
  394. x = GRU(nbr_neurons, input_shape = (time_steps,nbr_features),return_sequences=False, return_state=False)(inputs)
  395. x = Dropout(dropout)(x)
  396. x = Dense(nbr_class)(x)
  397. x = Dropout(dropout)(x)
  398. x = Activation(activation)(x)
  399. LR = lr
  400. model = Model(inputs,x)
  401. adam = adam_v2.Adam(LR)
  402. model.compile(loss = loss,optimizer = adam,metrics = [metrics])
  403. model.fit(Xwin,Ywin,epochs=epochs,validation_data=(Xtwin,Ytwin),batch_size=batch_size,verbose=1,shuffle=True)
  404. return model
  405. # Step11 Process
  406. def pre_model(nor,df_bms,time_steps,nbr_features,nbr_neurons,nbr_class,batch_size,epochs,dropout,lr,activation,loss):
  407. nor,df_bms=resample(nor,df_bms)
  408. newtrain,newtest=shuffle_data(nor,df_bms)
  409. train_sh=shuffle_data2(newtrain)
  410. test_sh=shuffle_data2(newtest)
  411. Xtrain,Ytrain,Ytrain2,cols_train=xy(train_sh)
  412. Xtest,Ytest,Ytest2,cols_test=xy(test_sh)
  413. Xsc,scaler=scaler_train(Xtrain)
  414. Xtsc=scaler_test(Xtest,scaler)
  415. indextr=make_index(train_sh)
  416. indexte=make_index(test_sh)
  417. Xwin,Ywin=create_win_train(Xsc,Ytrain2,indextr,time_steps=time_steps)
  418. Xtwin,Ytwin,conf=create_win_test(Xtsc,Ytest2,test_sh,indexte,time_steps=time_steps)
  419. model=modelGRU(time_steps=time_steps,nbr_features=nbr_features,nbr_neurons=nbr_neurons,nbr_class=nbr_class,Xwin=Xwin,Ywin=Ywin,
  420. Xtwin=Xtwin,Ytwin=Ytwin,batch_size=batch_size,epochs=epochs,dropout=dropout,lr=lr,activation=activation,
  421. loss=loss,metrics='accuracy')
  422. loss,acc=model.evaluate(Xtwin,Ytwin)
  423. return scaler,model,acc,cols_train