Python爬取騰訊疫情實時數(shù)據(jù)并存儲到mysql數(shù)據(jù)庫的示例代碼
思路:
在騰訊疫情數(shù)據(jù)網(wǎng)站F12解析網(wǎng)站結構,使用Python爬取當日疫情數(shù)據(jù)和歷史疫情數(shù)據(jù),分別存儲到details和history兩個mysql表。
①此方法用于爬取每日詳細疫情數(shù)據(jù)
import requests import json import time def get_details(): url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5&callback=jQuery34102848205531413024_1584924641755&_=1584924641756' headers ={ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400' } res = requests.get(url,headers=headers) #輸出全部信息 # print(res.text) response_data = json.loads(res.text.replace('jQuery34102848205531413024_1584924641755(','')[:-1]) #輸出這個字典的鍵值 dict_keys(['ret', 'data'])ret是響應值,0代表請求成功,data里是我們需要的數(shù)據(jù) # print(response_data.keys()) """上面已經(jīng)轉化過一次字典,然后獲取里面的data,因為data是字符串,所以需要再次轉化字典 print(json.loads(reponse_data['data']).keys()) 結果: dict_keys(['lastUpdateTime', 'chinaTotal', 'chinaAdd', 'isShowAdd', 'showAddSwitch', 'areaTree', 'chinaDayList', 'chinaDayAddList', 'dailyNewAddHistory', 'dailyHistory', 'wuhanDayList', 'articleList']) lastUpdateTime是最新更新時間,chinaTotal是全國疫情總數(shù),chinaAdd是全國新增數(shù)據(jù), isShowAdd代表是否展示新增數(shù)據(jù),showAddSwitch是顯示哪些數(shù)據(jù),areaTree中有全國疫情數(shù)據(jù) """ areaTree_data = json.loads(response_data['data'])['areaTree'] temp=json.loads(response_data['data']) # print(temp.keys()) # print(areaTree_data[0].keys()) """ 獲取上一級字典里的areaTree 然后查看里面中國鍵值 print(areaTree_data[0].keys()) dict_keys(['name', 'today', 'total', 'children']) name代表國家名稱,today代表今日數(shù)據(jù),total代表總數(shù),children里有全國各地數(shù)據(jù),我們需要獲取全國各地數(shù)據(jù),查看children數(shù)據(jù) print(areaTree_data[0]['children']) 這里面是 name是地區(qū)名稱,today是今日數(shù)據(jù),total是總數(shù),children是市級數(shù)據(jù), 我們通過這個接口可以獲取每個地區(qū)的總數(shù)據(jù)。我們遍歷這個列表,取出name,這個是省級的數(shù)據(jù),還需要獲取市級數(shù)據(jù), 需要取出name,children(市級數(shù)據(jù))下的name、total(歷史總數(shù))下的confirm、heal、dead,today(今日數(shù)據(jù))下的confirm(增加數(shù)), 這些就是我們需要的數(shù)據(jù) """ # print(areaTree_data[0]['children']) # for province_data in areaTree_data[0]['children']: # print(province_data) ds= temp['lastUpdateTime'] details=[] for pro_infos in areaTree_data[0]['children']: province_name = pro_infos['name'] # 省名 for city_infos in pro_infos['children']: city_name = city_infos['name'] # 市名 confirm = city_infos['total']['confirm']#歷史總數(shù) confirm_add = city_infos['today']['confirm']#今日增加數(shù) heal = city_infos['total']['heal']#治愈 dead = city_infos['total']['dead']#死亡 # print(ds,province_name,city_name,confirm,confirm_add,heal,dead) details.append([ds,province_name,city_name,confirm,confirm_add,heal,dead]) return details
單獨測試方法:
# d=get_details() # print(d)
②此方法用于爬取歷史詳細數(shù)據(jù)
import requests import json import time def get_history(): url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other&callback=jQuery341026745307075030955_1584946267054&_=1584946267055' headers ={ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400' } res = requests.get(url,headers=headers) # print(res.text) response_data = json.loads(res.text.replace('jQuery341026745307075030955_1584946267054(','')[:-1]) # print(response_data) data = json.loads(response_data['data']) # print(data.keys()) chinaDayList = data['chinaDayList']#歷史記錄 chinaDayAddList = data['chinaDayAddList']#歷史新增記錄 history = {} for i in chinaDayList: ds = '2021.' + i['date']#時間 tup = time.strptime(ds,'%Y.%m.%d') ds = time.strftime('%Y-%m-%d',tup)#改變時間格式,插入數(shù)據(jù)庫 confirm = i['confirm'] suspect = i['suspect'] heal = i['heal'] dead = i['dead'] history[ds] = {'confirm':confirm,'suspect':suspect,'heal':heal,'dead':dead} for i in chinaDayAddList: ds = '2021.' + i['date']#時間 tup = time.strptime(ds,'%Y.%m.%d') ds = time.strftime('%Y-%m-%d',tup)#改變時間格式,插入數(shù)據(jù)庫 confirm_add = i['confirm'] suspect_add = i['suspect'] heal_add = i['heal'] dead_add = i['dead'] history[ds].update({'confirm_add':confirm_add,'suspect_add':suspect_add,'heal_add':heal_add,'dead_add':dead_add}) return history
單獨測試此方法:
# h=get_history() # print(h)
③此方法用于數(shù)據(jù)庫的連接與關閉:
import time import pymysql import traceback def get_conn(): """ :return: 連接,游標 """ # 創(chuàng)建連接 conn = pymysql.connect(host="127.0.0.1", user="root", password="000429", db="mydb", charset="utf8") # 創(chuàng)建游標 cursor = conn.cursor() # 執(zhí)行完畢返回的結果集默認以元組顯示 return conn, cursor def close_conn(conn, cursor): if cursor: cursor.close() if conn: conn.close()
④此方法用于更新并插入每日詳細數(shù)據(jù)到數(shù)據(jù)庫表:
def update_details(): """ 更新 details 表 :return: """ cursor = None conn = None try: li = get_details() conn, cursor = get_conn() sql = "insert into details(update_time,province,city,confirm,confirm_add,heal,dead) values(%s,%s,%s,%s,%s,%s,%s)" sql_query = 'select %s=(select update_time from details order by id desc limit 1)' #對比當前最大時間戳 cursor.execute(sql_query,li[0][0]) if not cursor.fetchone()[0]: print(f"{time.asctime()}開始更新最新數(shù)據(jù)") for item in li: cursor.execute(sql, item) conn.commit() # 提交事務 update delete insert操作 print(f"{time.asctime()}更新最新數(shù)據(jù)完畢") else: print(f"{time.asctime()}已是最新數(shù)據(jù)!") except: traceback.print_exc() finally: close_conn(conn, cursor)
單獨測試能否插入數(shù)據(jù)到details表:
update_details()
⑤此方法用于插入歷史數(shù)據(jù)到history表
def insert_history(): """ 插入歷史數(shù)據(jù) :return: """ cursor = None conn = None try: dic = get_history() print(f"{time.asctime()}開始插入歷史數(shù)據(jù)") conn, cursor = get_conn() sql = "insert into history values(%s,%s,%s,%s,%s,%s,%s,%s,%s)" for k, v in dic.items(): # item 格式 {'2021-01-13': {'confirm': 41, 'suspect': 0, 'heal': 0, 'dead': 1} cursor.execute(sql, [k, v.get("confirm"), v.get("confirm_add"), v.get("suspect"), v.get("suspect_add"), v.get("heal"), v.get("heal_add"), v.get("dead"), v.get("dead_add")]) conn.commit() # 提交事務 update delete insert操作 print(f"{time.asctime()}插入歷史數(shù)據(jù)完畢") except: traceback.print_exc() finally: close_conn(conn, cursor)
單獨測試能否插入數(shù)據(jù)到history表:
# insert_history()
⑥此方法用于根據(jù)時間來更新歷史數(shù)據(jù)表的內(nèi)容:
def update_history(): """ 更新歷史數(shù)據(jù) :return: """ cursor = None conn = None try: dic = get_history() print(f"{time.asctime()}開始更新歷史數(shù)據(jù)") conn, cursor = get_conn() sql = "insert into history values(%s,%s,%s,%s,%s,%s,%s,%s,%s)" sql_query = "select confirm from history where ds=%s" for k, v in dic.items(): # item 格式 {'2020-01-13': {'confirm': 41, 'suspect': 0, 'heal': 0, 'dead': 1} if not cursor.execute(sql_query, k): cursor.execute(sql, [k, v.get("confirm"), v.get("confirm_add"), v.get("suspect"), v.get("suspect_add"), v.get("heal"), v.get("heal_add"), v.get("dead"), v.get("dead_add")]) conn.commit() # 提交事務 update delete insert操作 print(f"{time.asctime()}歷史數(shù)據(jù)更新完畢") except: traceback.print_exc() finally: close_conn(conn, cursor)
單獨測試更新歷史數(shù)據(jù)表的方法:
# update_history()
最后是兩個數(shù)據(jù)表的詳細建立代碼(也可以使用mysql可視化工具直接建立):
create table history( ds datetime not null comment '日期', confirm int(11) default null comment '累計確診', confirm_add int(11) default null comment '當日新增確診', suspect int(11) default null comment '剩余疑似', suspect_add int(11) default null comment '當日新增疑似', heal int(11) default null comment '累計治愈', heal_add int(11) default null comment '當日新增治愈', dead int(11) default null comment '累計死亡', dead_add int(11) default null comment '當日新增死亡', primary key(ds) using btree )engine=InnoDB DEFAULT charset=utf8mb4; create table details( id int(11) not null auto_increment, update_time datetime default null comment '數(shù)據(jù)最后更新時間', province varchar(50) default null comment '省', city varchar(50) default null comment '市', confirm int(11) default null comment '累計確診', confirm_add int(11) default null comment '新增確診', heal int(11) default null comment '累計治愈', dead int(11) default null comment '累計死亡', primary key(id) )engine=InnoDB default charset=utf8mb4;
Tomorrowthe birds will singing.
到此這篇關于Python爬取騰訊疫情實時數(shù)據(jù)并存儲到mysql數(shù)據(jù)庫的文章就介紹到這了,更多相關Python爬取數(shù)據(jù)存儲到mysql數(shù)據(jù)庫內(nèi)容請搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關文章希望大家以后多多支持腳本之家!
- Python3爬蟲學習之MySQL數(shù)據(jù)庫存儲爬取的信息詳解
- Python爬蟲爬取全球疫情數(shù)據(jù)并存儲到mysql數(shù)據(jù)庫的步驟
- python+selenium爬取微博熱搜存入Mysql的實現(xiàn)方法
- Python如何爬取51cto數(shù)據(jù)并存入MySQL
- python 爬取古詩文存入mysql數(shù)據(jù)庫的方法
- python3爬取數(shù)據(jù)至mysql的方法
- Python爬取數(shù)據(jù)并寫入MySQL數(shù)據(jù)庫的實例
- Python3實現(xiàn)的爬蟲爬取數(shù)據(jù)并存入mysql數(shù)據(jù)庫操作示例
- python Selenium爬取內(nèi)容并存儲至MySQL數(shù)據(jù)庫的實現(xiàn)代碼
- Python爬取京東商品信息評論存并進MySQL
相關文章
Python中chinesecalendar簡介、安裝、使用方法詳細講解
這篇文章主要介紹了Python中chinesecalendar簡介、安裝、使用方法詳細講解,該庫是判斷某年某月某一天是不是工作日/節(jié)假日。 支持 2004年 至 2023年,包括 2020年 的春節(jié)延長,需要的朋友可以參考下2023-03-03解決pyecharts運行后產(chǎn)生的html文件用瀏覽器打開空白
這篇文章主要介紹了解決pyecharts運行后產(chǎn)生的html文件用瀏覽器打開空白,文中通過示例代碼介紹的非常詳細,對大家的學習或者工作具有一定的參考學習價值,需要的朋友們下面隨著小編來一起學習學習吧2020-03-03用Python從零實現(xiàn)貝葉斯分類器的機器學習的教程
這篇文章主要介紹了用Python從零實現(xiàn)貝葉斯分類器的教程,樸素貝葉斯算法屬于機器學習中的基礎內(nèi)容、實用而高效,本文詳細展示了用Python語言實現(xiàn)的步驟,需要的朋友可以參考下2015-03-03