# !bin/usr/env python3
# coding=utf-8
import re
import urllib.request
from bs4 import BeautifulSoup
'''获取网址'''
def get_url(url):
Url = []
#url = 'http://www.kekenet.com/video/16692/'
f = urllib.request.urlopen(url)
html = f.read()
soup = BeautifulSoup(html,'html.parser')
content = soup.find_all('ul',id='menu-list')
for tag in content:
li = tag.find_all('li') #类型<class 'bs4.element.ResultSet'>
#print(type(li))
for tag2 in li:
h = tag2.find_all('h2')
for tag3 in h:
h1 = tag3.find_all('a',text=re.compile(r'MP3+')) # re.compile() 正则表达匹配,匹配内容不能为中文
for tag4 in h1:
t = tag4.get_text() #获取内容
get_url = tag4.get('href') #获取URL
Url.append(str(get_url))
Url = reversed(Url) #将列表中的所有元素进行倒序排列
return (list(Url)) #必须添加list,将其转换为list
#print(list(Url))
'''获取网页中的内容'''
url1 = []
base_url = 'http://www.kekenet.com/video/15830/List_'
for i in range(18):
full_url = base_url + str(i+1) +'.shtml'
url1.append(full_url)
url1.append('http://www.kekenet.com/video/15830/')
# print(url1)
# print(len(url1))
n = 0
for i in range (len(url1)):
url = url1[i]
Get_Url = get_url(url)
for i in range(len(Get_Url)):
f = urllib.request.urlopen(Get_Url[i])
html = f.read()
soup = BeautifulSoup(html,'html.parser') #解析抓取到到html
title = soup.title.get_text() #获取html的title
en_contents = soup.find_all('div',class_='qh_en')
zg_contents = soup.find_all('div',class_='qh_zg')
file = 'M_S6.txt'
with open(file,'a') as f: #追加的方式写入文件
f.write(title)
n = n+1
for content in zip(en_contents,zg_contents): #同时输出两个列表中的元素,先将其转化为元组
f.write(content[0].get_text()) #输出元组中的第一个元素,即第一个列表中的第一个元素
f.write(content[1].get_text()) #输出元组中的第2个元素,即第2个列表中的第1个元素
f.write('\n') #换行
#break
print(n)