爬取搜狐网有关篮球的网站

方式一:正则

import requests
import re
contents = requests.get('http://www.sohu.com')
links = re.findall('href="(.*?)"', contents.text)
vaild_link = []
for i in links:
    if 'sohu' not in i:
        continue
    elif re.search('\.jpg|\.pdf|\.css|\.ico|\.tif|\.gif|mailto', i):
        continue
    elif i.strip().startswith('//'):
        vaild_link.append('http:'+i.strip())
    else:
        vaild_link.append(i.strip())

for link in vaild_link:
    data = requests.get(link)
    if "篮球" in data.text:
        with open('D:\\搜狐网关于篮球的网站.txt', 'a', encoding='utf-8') as f:
            f.write(link+'\n')

方式二:bs4

import requests
from bs4 import BeautifulSoup
import re

def hava_href(tag):
    return tag.has_attr('href')
contents = requests.get('http://www.sohu.com')
soup = BeautifulSoup(contents.text, 'html.parser')
links = [i.get('href') for i in soup.find_all(hava_href)]
vaild_link = []
for i in links:
    if 'sohu' not in i:
        continue
    elif re.search('\.jpg|\.pdf|\.css|\.ico|\.tif|\.gif|mailto', i):
        continue
    elif i.strip().startswith('//'):
        vaild_link.append('http:'+i.strip())
    else:
        vaild_link.append(i.strip())

for link in vaild_link:
    data = requests.get(link)
    if "篮球" in data.text:
        with open('D:\\搜狐网关于篮球的网站.txt', 'a', encoding='utf-8') as f:
            f.write(link+'\n')

猜你喜欢

转载自www.cnblogs.com/su-sir/p/12636654.html