Simple reptile experiment

1.bs3 crawls to Starbucks data

Please add a picture description



import urllib.request

url = 'https://www.starbucks.com.cn/menu/'

response = urllib.request.urlopen(url)

content = response.read().decode('utf-8')


from bs4 import BeautifulSoup

soup = BeautifulSoup(content,'lxml')

# //ul[@class="grid padded-3 product"]//strong/text()
name_list = soup.select('ul[class="grid padded-3 product"] strong')

for name in name_list:
    print(name.get_text())

2. Climb to the pictures in the webmaster material

insert image description here

# (1) 请求对象的定制
# (2)获取网页的源码
# (3)下载
# 需求 下载的前十页的图片
# https://sc.chinaz.com/tupian/qinglvtupian.html   1
# https://sc.chinaz.com/tupian/qinglvtupian_page.html
import urllib.request
from lxml import etree

def create_request(page):
    if(page == 1):
        url = 'https://sc.chinaz.com/tupian/qinglvtupian.html'
    else:
        url = 'https://sc.chinaz.com/tupian/qinglvtupian_' + str(page) + '.html'

    headers = {
    
    
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
    }

    request = urllib.request.Request(url = url, headers = headers)
    return request
def get_content(request):
    response = urllib.request.urlopen(request)
    content = response.read().decode('utf-8')
    return content

def down_load(content):
#     下载图片
    # urllib.request.urlretrieve('图片地址','文件的名字')
    #解析服务器响应文件使用的就是HTML。解析本地文件使用的是parse。
    tree = etree.HTML(content)
    #//div[@class="container"]//div/img/@alt
    name_list = tree.xpath('//div[@class="container"]//div/img/@alt')
    #//div[@class="container"]//div/img/@src
    # 一般设计图片的网站都会进行懒加载
    src_list = tree.xpath('//div[@class="container"]//div/img/@data-original')
    print(name_list)
    print(src_list)
    for i in range(len(name_list)):
        name = name_list[i]
        src = src_list[i]
        url = 'https:' + src

        urllib.request.urlretrieve(url=url,filename='./loveImg/' + name + '.jpg')
if __name__ == '__main__':
    # start_page = int(input('请输入起始页码'))
    # end_page = int(input('请输入结束页码'))
    start_page = 1
    end_page = 3
    for page in range(start_page,end_page+1):
        # (1) 请求对象的定制
        request = create_request(page)
        # (2)获取网页的源码
        content = get_content(request)
        # (3)下载
        down_load(content)

Guess you like

Origin blog.csdn.net/guoguozgw/article/details/128848615