腾讯位置大数据
人口迁徙图
我已拿到从16年至今此网站能提供的城市迁徙大数据,请自行确认自己想要的城市此网站是否提供,部分三四线城市不提供
还有一点:网站不提供具体人数,如果需要具体人数的话我有联通的2020年1-6月份的扩样后的具体人数数据
如需数据请加我qq,在我博客的其他文章里可以找到
爬取简单思路:
1、获取URL
2、遍历所有城市名,遍历从16年至今的日期
3、访问URL,记得捕获异常,不然有可能会断掉
import os
from datetime import datetime, timedelta
import requests
from utils.read_write import readTxt, writeOneJson, writeCsv
from utils.time_change import getBetweenDay
os.chdir(r'D:\data\腾讯迁徙\城市\\')
# 把Txt文件读取成字符串数组
lines = readTxt(r'E:\project\python\JacksonProject\baidu\BaiduMap_cityCode_1102.txt')
headers = {
"User-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0"}
# 发送请求
def requerts_url(url, riqi, type):
try:
response = requests.get(url, timeout=1000, headers=headers)
json = eval(response.text)
if json['data']:
# data2 = data2.decode("unicode_escape") #
writeOneJson(json, '城市' + type + "_北京" + "_" + riqi + ".json")
except Exception as e:
print(datetime.now())
print(e)
print(url)
begin_date = datetime.strptime(riqi, "%Y-%m-%d")
begin_date += timedelta(days=1)
date_change(riqi)
city_list = []
# 先将数据下载为Json文件
def city_range(riqi):
# for i in range(n, 367):
# 把城市id号和城市名分开
# obj = lines[i].split(',')
# city = obj[1].replace('市', '')
city = '北京'
file = "城市迁出_" + city + "_" + riqi + ".json"
if not os.path.exists(file):
firsturl = "https://heat.qq.com/api/getLbsMigrateDataByBeijing.php?city=" \
+ city + "&direction=1&type=6&date=" + riqi
requerts_url(firsturl, riqi, '迁出')
file = "城市迁入_" + city + "_" + riqi + ".json"
if not os.path.exists(file):
firsturl = "https://heat.qq.com/api/getLbsMigrateDataByBeijing.php?city=" \
+ city + "&direction=0&type=6&date=" + riqi
requerts_url(firsturl, riqi, '迁入')
def date_change(date):
date_list = getBetweenDay(date)
for riqi in date_list:
print(riqi)
city_range(riqi)
# writeCsv(city_list, 'E:\project\python\JacksonProject\spider\city.csv')
if __name__ == '__main__':
date_change('2016-01-01')
转载:https://blog.csdn.net/qq_30803353/article/details/111462597
查看评论