Day2-《青春有你2》选手信息爬取

书写代码部分

def crawl_pic_urls():

    '''

    爬取每个选手的百度百科图片,并保存

    ''' 

    with open('work/'+ today + '.json', 'r', encoding='UTF-8') as file:

         json_array = json.loads(file.read())

    headers = { 

        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36' 

     }

    import pdb

    for star in json_array:

        name = star['name']

        link = star['link']

        #!!!请在以下完成对每个选手图片的爬取,将所有图片url存储在一个列表pic_urls中!!!

        # pdb.set_trace()

        pic_urls=[]

        print(name,link)

        try:

            response = requests.get(link,headers=headers)

            print(response.status_code)

            #将一段文档传入BeautifulSoup的构造方法,就能得到一个文档的对象, 可以传入一段字符串

            soup = BeautifulSoup(response.text,'lxml')

            

            #返回的是class为table-view log-set-param的<table>所有标签

            tables = soup.find_all('div',{'class':'summary-pic'})

            

            # crawl_table_title = "参赛学员"

            for table in  tables:           

                #对当前节点前面的标签和字符串进行查找

                # table_hrefs = table.select("img")

                table_hrefs = table.select("a")

                for href in table_hrefs:

                    # pdb.set_trace()

                    # data=href.get('src')

                    # pic_urls.append(data)  

                    data=href.get('href') 

                    # data_list=data.split('/')

                    url_data='https://baike.baidu.com'+data

                    # pdb.set_trace()

                    try:

                        response = requests.get(url_data,headers=headers)

                        print(response.status_code)

                        soup = BeautifulSoup(response.text,'lxml')

                        pics=soup.select('.pic-list img ')

                        for pic in pics:

                            src=pic.get('src') 

                            pic_urls.append(src)

                        # pics = soup.find_all('div',{'class':'pic-list'})

                        # for pic in pics:

                        #     pic_urls = pic.select("a")

                        #     for pu in pic_urls:

                        #         imgs=pu.select('img')

                        #         for img in imgs:

                        #             src=img.get('src') 

                        #             if src:

                        #                 pic_urls.append(src)

                    except Exception as e:

                        print(e)

                    # pic_urls.append(url_data)

        except Exception as e:

            print(e)

      

        #!!!根据图片链接列表pic_urls, 下载所有图片,保存在以name命名的文件夹中!!!

        down_pic(name,pic_urls)

     

原创文章 13 获赞 5 访问量 1万+

猜你喜欢

转载自blog.csdn.net/cgq081616/article/details/105814073
今日推荐