爬取雪球房产数据随意页数

import requests
import json

 # 'https://xueqiu.com/v4/statuses/public_timeline_by_category.json?since_id=-1&max_id=184086&count=15&category=111'

headers={

    'Cookie':'aliyungf_tc=AQAAAIHm1RMLuQIA8UM9dXeFqMSHz5wr; xq_a_token=584d0cf8d5a5a9809761f2244d8d272bac729ed4; xq_a_token.sig=x0gT9jm6qnwd-ddLu66T3A8KiVA; xq_r_token=98f278457fc4e1e5eb0846e36a7296e642b8138a; xq_r_token.sig=2Uxv_DgYTcCjz7qx4j570JpNHIs; Hm_lvt_1db88642e346389874251b5a1eded6e3=1534334492; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1534334492; _gat_gtag_UA_16079156_4=1; u=731534334496952; device_id=75251303465ff010b359be25472725d2; _ga=GA1.2.1771636590.1534334494; _gid=GA1.2.1900251104.1534334494',

    'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'
}
n=-1
page=10
for j in range(5):
    url = 'https://xueqiu.com/v4/statuses/public_timeline_by_category.json?since_id=-1&max_id={}&count={}&category=111'.format(
        n,page)
    response= requests.get(url,headers=headers)
    res=response.content
    lt_dict=json.loads(res.decode('utf-8'))
    lt=lt_dict['list']
    for  i in lt:
        lt_1=i['data']
        print(lt_1)
    n=html_id = lt_dict['next_max_id']
    page=15
    print(n)

猜你喜欢

转载自blog.csdn.net/weixin_42958164/article/details/81712138
今日推荐