Crawler notes

‘’’

Crawling ideas:
1, requests (url)
2, requests + json
3, requests + XPath
4, requests + BeautifulSoup
5, selenium
6, scrapy framework
7, scrapy-redis and distributed

===============================================
OS:
import os
os.system(“C: && p.txt”)
os.system(“ping 127.0.0.1”)

===============================================
requests:
requests.get(url, headers=headers, data={’’:’’}, proxies=proxies)

===============================================
Proxies:
proxies = {‘http’: ‘124.207.82.166:8008’} # 47.98.129.198
response = requests.get(request_url, proxies=proxies) # 发起请求

===============================================
File:
with open(path,‘w’) as f:
f.write(text)

===============================================
Threading:
import threading
threading.Thread(target=fun, kwargs={‘list_url’:list_url,‘path_order’:path_order1}).start()

===============================================
requests、json:
1.data = json.load(open(“package1.json”,encoding=“utf-8”))
response = requests.get(url, headers=headers)
print(response.text)

2.response = requests.get(url)
data = response.text
obj = json.loads(data)

===============================================
requests, XPath
from lxml import etree
response = requests.get(list_url, headers=headers)
content = response.content
selector = etree.HTML(scontent) # Load the page into the
etree tree items = selector.xpath(path_order) # Find the tree according to XPath, Return to the iteration,
title = item.xpath("./div/p[1]/a/text()")[0].strip() # The iterative object item can continue to be searched by XPath

===============================================
requests、BeautifulSoup
from bs4 import BeautifulSoup
response = requests.get(url)
html= response.text
soup = BeautifulSoup(html, ‘lxml’)
soup_str = soup.prettify() # 标准化html
tag = soup.b
tag的一系类操作

===============================================
Selenium: Installation Selenium driver corresponding to the chrome version https://www.cnblogs.com/JHblogs/p/7699951.html
and install the dependency library pip install selenium
from selenium import webdriver
chromedriver = "G:/4Anaconda/chromedriver.exe" # If the driver is in This step can be omitted under the python path
browser = webdriver.Chrome(chromedriver) #Open
a web page
browser.get("http://www.baidu.com")
browser.find_element_by_id("kw").send_keys("selenium" )
browser.find_element_by_id("su").click()
browser.title
browser.set_window_size(480, 800)
#parameter numbers are pixels browser.back()
browser.forward() #Exit
and close each related window driver

browser.quit()


#Close current window #browser.close()

Implicit wait

from selenium import webdriver

browser = webdriver.Chrome()

Here, implicitly_wait() is used to implement implicit waiting

browser.implicitly_wait(10)
browser.get(‘https://www.zhihu.com/explore’)
input = browser.find_element_by_class_name(‘zu-top-add-question’)
print(input)

显示等待
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

browser = webdriver.Chrome()
browser.get(‘https://www.taobao.com/’)
wait = WebDriverWait(browser, 10)
input = wait.until(EC.presence_of_element_located((By.ID, ‘q’)))
button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, ‘.btn-search’)))
print(input, button)

‘’’

'''
#1. Create a scrapy project (cmd):
scrapy startproject weibospider
cd weibospider

    #二、 创建sipder语句cmd:scrapy genspider WeiboSpider image.baidu.com
    
    拒绝爬虫协议 ROBOTSTXT_OBEY = False 
    
    运行爬虫 scrapy crawl baiduimg
    
    #三、 设置数据结构
    name = scrapy.Field()
    
    #四、 导入 数据 from hotnewsSpider.items import WeiboSpiderItem
    使用 weiboitem = WeiboSpiderItem()
    weiboitem['name'] = '123'
    返回 yield weiboitem
    
    #五、 发送请求传递 (在parse中)
    yield scrapy.Request(url=url, headers=self.headers, cookies=self.cookies, callback=self.clickFindMore)
    # 发送请求传递并回调,加参 callback
    yield scrapy.Request(link,callback=self.parse_detail)
    
    #六、重写初始化请求
    def start_requests(self):
        for url in self.urls:
            yield scrapy.Request(url=url, headers=self.headers, cookies=self.cookies, callback=self.parse)
            
    #七、接收response
    def parse(self,response):
        pass

‘’’

Guess you like

Origin blog.csdn.net/GeniusXYT/article/details/101516801