FeiShuData.py 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263
  1. from numpy import empty
  2. import pandas as pd
  3. from pandas.core.frame import DataFrame
  4. import requests
  5. import json
  6. import datetime,time
  7. import numpy as np
  8. def getFeiShuDATA():
  9. print('飞书数据获取中......')
  10. Columns=["状态", "发生时间","代理商","电池编码","用户信息","业务分类A","业务分类B","操作","信息来源", "内容描述", "客服处理时间","客服处理结果","记录人","当前处理人","处理部门","跟进记录","问题类型A","问题类型B","事件处理完结时间","运维紧急程度","维修信息","返厂时间"]
  11. # 获取token
  12. app_id = 'cli_a1e503bb3d78100c'
  13. app_secret = 'oZBbGSRYsf9sXy8t8e8kbhVLciekyvMt'
  14. url = 'https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal'
  15. headers = {'Content-Type':"application/json; charset=utf-8"}
  16. datas = json.dumps({'app_id':app_id, 'app_secret':app_secret})
  17. response = requests.post(url, data=datas, headers=headers)
  18. auth_token = json.loads(response.text)['tenant_access_token']
  19. # 获取数据
  20. app_token = 'bascnpaW50pJsCNd1AyIRFIc24b'
  21. table_id = 'tblo3wnR2HFWI6rA'
  22. sheet_id = 'vewZh94xDC'
  23. url = 'https://open.feishu.cn/open-apis/bitable/v1/apps/{}/tables/{}/records'.format(app_token, table_id)
  24. headers = {"Authorization":"Bearer {}".format(auth_token)}
  25. df_file = pd.DataFrame(columns=Columns)
  26. timesort=['发生时间','客服处理时间','事件处理完结时间','返厂时间']
  27. count=0
  28. GotPageToken=str()
  29. while True:
  30. # 筛选条件加载这里
  31. datas = {'view_id':sheet_id, 'page_size':100, 'text_field_as_array':"true", 'page_token':GotPageToken,'field_names':'["状态", "发生时间","代理商","电池编码","用户信息","业务分类A","业务分类B","操作","信息来源", "内容描述", "客服处理时间","客服处理结果","记录人","当前处理人","处理部门","跟进记录","问题类型A","问题类型B","事件处理完结时间","运维紧急程度","维修信息","返厂时间"]'}
  32. try:
  33. response = requests.get(url, params=datas, headers=headers)
  34. #print(response.text)
  35. results = json.loads(response.text)['data']['items']
  36. GotPageToken=json.loads(response.text)['data']['page_token']
  37. for r in results:
  38. df_file = df_file.append([r['fields']], ignore_index=True)
  39. except:
  40. break
  41. for i in range(0,len(df_file)):
  42. for t in range(0,len(timesort)):
  43. if not df_file.loc[i,timesort[t]] is np.nan and not df_file.loc[i,timesort[t]] is None:
  44. timeTemp= float(df_file.loc[i,timesort[t]])
  45. timeTTmp=time.localtime(timeTemp/1000)
  46. df_file.loc[i,timesort[t]] = time.strftime('%Y-%m-%d %H:%M:%S',timeTTmp)
  47. for col in Columns:
  48. df_file[col]=df_file[col].map(lambda x:str(x).lstrip('[{\'text\': ').rstrip('\', \'type\': \'text\'}]').replace('\\n\', \'type\': \'text\'}, {\'text\': \'',' '))
  49. df_file.to_excel('df_file.xlsx')
  50. print('飞书数据获取完成')
  51. return df_file