python第一个爬虫脚本

import urllib.request
import re
import os

url = "http://www.budejie.com/" # 爬的地址

def get_page(url):
page = urllib.request.urlopen(url).read() # 获取到该地址的所有内容
# page=page.decode('gbk') #转码
page = page.decode('utf8')
return page
#print(page)
def get_content(html):
zz = r'<div class="j-r-list-c">.+?</div>.+?</div>'
rge = re.findall(zz,html,re.S)
# reg = re.compile(r'<div class="j-r-list-c">.+?</div>.+?</div>')
# return re.findall(reg,html)
return "".join(rge)

# print(get_page(url))
# print (get_content(get_page(url)))

zz = r'data-original="(.+?)" title=".+?" alt="(.+?)"/>'
html = re.findall(zz,get_content(get_page(url)))
print(html)


def mkdir(path):
folder = os.path.exists(path)

if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs(path) # makedirs 创建文件时如果路径不存在会创建这个路径
print("创建新文件夹")

print("创建成功")
else:
print("该文件夹已经存在")

img_path = 'D:/photo/'
mkdir(img_path)

img_path = "D:/photo/"
mkdir(img_path)
i = 0

for line in html:
line = html[i]
# 判断是否是gif图片
# if str(line[0]).endswith(".gif"):
p1 = line[0]
p2 = line[1]
print(p2 + " " + p1)
url = p1
# 下载gif图片放到D:/photo/文件夹里面
web = urllib.request.urlopen(url)
data = web.read()
f = open(img_path + p2 + ".gif", "wb")
f.write(data)
f.close()
i = i + 1

猜你喜欢

转载自www.cnblogs.com/lanail/p/9127167.html