python multithreading crawl the web

Python implementation crawl the web
The following procedure is relatively Python crawled pages 0 basis, only the first page can fetch page url belongs only to a predetermined URL enough. To ensure that the pages you crawl infinite level of Kazakhstan, the following is the code:


##coding:utf-8
'''
	无限抓取网页
	@author wangbingyu
	@date 2014-06-26
'''
import sys,urllib,re,thread,time,threading

'''
创建下载线程类
'''
class download(threading.Thread):
	def __init__(self,url,threadName):
		threading.Thread.__init__(self,name=threadName)
		self.thread_stop = False
		self.url = url
	
	def run(self):
		while not self.thread_stop:
			self.list = self.getUrl(self.url)
			self.downloading(self.list)
	
	def stop(self):
		self.thread_stop = True
			
	def downloading(self,list):
		try:
			for i in range(len(list) - 1):
				urllib.urlretrieve(list[i],'E:\upload\download\%s.html' %  time.time())
		except Exception,ex:
			print Exception,'_upload:',ex
	
	def getUrl(self,url):
		result = []
		s = urllib.urlopen(url).read();
		ss = s.replace(' ','')
		urls=re.findall('<a.*?href=.*?<\/a>',ss,re.I)
		for i in urls:
			tmp = i.split('"')
			try:
				if tmp[1]:
					if re.match(r'\http://.*',tmp[1]):
						result.append(tmp[1])
			except Exception,ex:
				print Exception,":getUrl",ex 
		return result

if __name__ == '__main__':
	list = ['http://www.baidu.com','http://www.qq.com','http://www.taobao.com','http://www.sina.com.cn']
	for i in range(len(list)):
		#print list[i]
		download(list[i],'thread%s' % i).start()
	#list = ['http://www.baidu.com','http://www.sina.com.cn']
	#obj = download('http://www.baidu.com','threadName')
	#obj.start();
	
input()



Guess you like

Origin www.cnblogs.com/ldxsuanfa/p/10951568.html