python threadpool多线程池的多种玩法

        在写爬虫下载一个网页中的多个链接文件时(http://blog.sina.com.cn/s/blog_740773f40100ywyg.html  ),使用多线程会提高下载速度。
        使用线程池能够简单的解决这个问题。首先使用threadpool模块,虽然这个模块很老了,pypi上也建议使用multiprocessing 代替它。

This module is  OBSOLETE  and is only provided on PyPI to support old projects that still use it. Please  DO NOT USE IT FOR NEW PROJECTS!  Use modern alternatives like the  multiprocessing  module in the standard library or even an asynchroneous approach with  asyncio .


         但是,对于小白来说,利用这个模块来实现多线程异常简单,就5行代码。。
 
   
from threadpool import *
pool = ThreadPool ( poolsize )
requests = makeRequests(some_callable, list_of_args, callback)
[pool.putRequest(req) for req in requests]
pool.wait()

         首先,没有安装的话,使用 pip install threadpool 命令安装这个模块。
        上面5行中,特别要注意第三行的参数,第1,2个参数是必须的,第3个可有可无。
        some_callable 是子线程的处理函数,list_of_args 为需要传递的参数的列表。官网给出的说明是:
args_list contains the parameters for each invocation of callable. Each item in args_list should be either a 2-item tuple of the list of positional arguments and a dictionary of keyword arguments or a single, non-tuple argument.

在实际调用时,使用的是:
 
   
result = request.callable(*request.args, **request.kwds)
          *args作为形参时,作为一个元组匹配没有指定参数名的参数。而**kwds作为字典,匹配指定了参数名的参数。( http://my.oschina.net/935572630/blog/393539  
        举个例子,用三种方法爬取开头网页中所有的pdf。
1、单个参数传递
 
   
import cookielib
import urllib2
import socket
import os
from bs4 import BeautifulSoup
import threadpool
import threading
 
def download(pdfUrl):
folder = 'matlab_pdf'
mutex.acquire(10)
if not os.path.exists(folder):
os.makedirs(folder)
mutex.release()
name = pdfUrl.split('/')[-1]
try:
status = False
f = open(os.path.join(folder,name),'wb')
f.write(urllib2.urlopen(pdfUrl).read())
f.close()
status = True
except Exception as err:
print err
f.close()
return (name,status)
def print_result(request, result):
print "the %s is %s" % (result[0], 'downloaded!' if result[1] else 'can not find.')
 
initUrl = r"http://blog.sina.com.cn/s/blog_740773f40100ywyg.html"
socket.setdefaulttimeout(10)
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')]
urllib2.install_opener(opener)
resp = urllib2.urlopen(initUrl).read()
a_list = BeautifulSoup(resp).find_all('a')
urls = [a.get('href') for a in a_list]
pdfUrls = [url for url in urls if url and len(url)>3 and url[-3:]=='pdf']
mutex = threading.Lock()
pool = threadpool.ThreadPool(30)
requests = threadpool.makeRequests(download,pdfUrls,print_result)
[pool.putRequest(req) for req in requests]
pool.wait()
2、传递未指明参数名的参数。
 
    
import cookielib
import urllib2
import socket
import os
from bs4 import BeautifulSoup
import threadpool
import threading
def download(name,pdfUrl):
folder = 'matlab_pdf_test'
mutex.acquire(10)
if not os.path.exists(folder):
os.makedirs(folder)
mutex.release()
# name = pdfUrl.split('/')[-1]
#print 'this is '+name+pdfUrl
try:
status = False
f = open(os.path.join(folder,name),'wb')
f.write(urllib2.urlopen(pdfUrl).read())
f.close()
status = True
except Exception as err:
print err
f.close()
return (name,status)
def print_result(request, result):
print "the %s is %s" % (result[0], 'downloaded!' if result[1] else 'can not find.')
 
initUrl = r"http://blog.sina.com.cn/s/blog_740773f40100ywyg.html"
socket.setdefaulttimeout(10)
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')]
urllib2.install_opener(opener)
resp = urllib2.urlopen(initUrl).read()
a_list = BeautifulSoup(resp).find_all('a')
urls = [a.get('href') for a in a_list]
pdfUrls = [url for url in urls if url and len(url)>3 and url[-3:]=='pdf']
v = list(map(lambda x: [x.split('/')[-1],x], pdfUrls))
u = [None for i in range(len(pdfUrls))]
 
mutex = threading.Lock()
pool = threadpool.ThreadPool(30)
requests = threadpool.makeRequests(download,zip(v,u),print_result)
[pool.putRequest(req) for req in requests]
pool.wait()
3、传递指明参数名的参数
 
    
import cookielib
import urllib2
import socket
import os
from bs4 import BeautifulSoup
import threadpool
import threading
def download(name,pdfUrl):
folder = 'matlab_pdf_test'
mutex.acquire(10)
if not os.path.exists(folder):
os.makedirs(folder)
mutex.release()
# name = pdfUrl.split('/')[-1]
#print 'this is '+name+pdfUrl
try:
status = False
f = open(os.path.join(folder,name),'wb')
f.write(urllib2.urlopen(pdfUrl).read())
f.close()
status = True
except Exception as err:
print err
f.close()
return (name,status)
def print_result(request, result):
print "the %s is %s" % (result[0], 'downloaded!' if result[1] else 'can not find.')
 
initUrl = r"http://blog.sina.com.cn/s/blog_740773f40100ywyg.html"
socket.setdefaulttimeout(10)
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')]
urllib2.install_opener(opener)
resp = urllib2.urlopen(initUrl).read()
a_list = BeautifulSoup(resp).find_all('a')
urls = [a.get('href') for a in a_list]
pdfUrls = [url for url in urls if url and len(url)>3 and url[-3:]=='pdf']
#v = list(map(lambda x: [x.split('/')[-1],x], pdfUrls))
v = list(map(lambda x: {'name':x.split('/')[-1],'pdfUrl':x}, pdfUrls))
u = [None for i in range(len(pdfUrls))]
 
mutex = threading.Lock()
pool = threadpool.ThreadPool(30)
requests = threadpool.makeRequests(download,zip(u,v),print_result)
[pool.putRequest(req) for req in requests]
pool.wait()
注意:
1、 传递指明参数名的参数时, list_of_args  里 dict的key要跟 some_callable  的参数名称保持一致。
2、 传递未指明参数名的参数和 传递指明参数名的参数时,参数打包成tuple的位置不一样,一个是(list,None),一个是(None,dict)。参见  http://bbs.csdn.net/topics/391886273

猜你喜欢

转载自blog.csdn.net/shawpan/article/details/52013448