sys设置递归深度

发现了一个练习爬虫的网站GlidedSky

过完了第一关,第二关需要爬取1000个网页,爬取到第956个网页时报错:

RecursionError: maximum recursion depth exceeded while calling a Python object

 解决方法:

import sys
sys.setrecursionlimit(100000)

https://blog.csdn.net/Ren_ger/article/details/85068955

附上我爬取第二关的代码:

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from lxml import etree
import sys

sys.setrecursionlimit(100000)
url='http://glidedsky.com/level/web/crawler-basic-2?page=1'
browser=webdriver.Chrome()
wait=WebDriverWait(browser,10)
browser.get(url)

def index_from_page(page):
    print('正在爬取第',page,'页')
    try:
        if page<1000:
            wait.until(EC.presence_of_element_located((By.XPATH,'//*[@id="app"]/main/div[1]/div/div')))
            button=wait.until(EC.element_to_be_clickable((By.XPATH,'//*[@id="app"]/main/div[1]/ul/li//a[@rel="next"] ')))
            get_sum(page)
            time.sleep(1)
            button.click()
            index_from_page(page+1)
        elif page==1000:
            get_sum(page)
    except TimeoutException:
            index_from_page(page)


n=0
def get_sum(url):
    global n
    html=browser.page_source
    html=etree.HTML(html)
    item=html.xpath('/html/body/div/main/div[1]/div/div/div/div/text()')
    for num in item:
        num=eval(get_number(num))
        n+=num
def get_number(num):
    for i in range(2):
        num=num.strip(' ')
        num=num.strip('\n')
    return num                      


a=time.perf_counter()
index_from_page(1)
b=time.perf_counter()
print('time:',b-a)

猜你喜欢

转载自www.cnblogs.com/fran-py-/p/12329296.html
sys