11 单线程+多任务异步协程 爬虫

#

from lxml import etree
import asyncio
import aiohttp
import time
def callback(task):  # 回调函数
    page = task.result()
    tree = etree.HTML(page)
    name = tree.xpath('/html/body/div[3]/div[4]/ul/li/a/span[2]/p[1]/text()')
    print(name)
    # print('I am callback', task.result())  #接收task的return

async def get_page(url):  #
    async with aiohttp.ClientSession() as session:
         async with await session.get(url=url) as response:
            page_text = await response.text()  # read() 二进制形式的响应数据,json()
            return page_text
            # print('响应数据:',page_text)
            # print('ok %s'%url)

start = time.time()
urls = [
    'http://ly6080.com.cn/vod/type/id/1.html',
    'http://ly6080.com.cn/vod/type/id/2.html',
    'http://ly6080.com.cn/vod/type/id/3.html',
]
tasks = []  #任务列表 放置多个任务对象
loop = asyncio.get_event_loop()
for url in urls:
    c = get_page(url)
    task = asyncio.ensure_future(c)
    tasks.append(task)

    task.add_done_callback(callback) # 添加要执行的回调函数
loop.run_until_complete(asyncio.wait(tasks))

print('总耗时',time.time()-start)

猜你喜欢

转载自www.cnblogs.com/zhangchen-sx/p/11093805.html