asyncio aiohttp限制并发数量和超时时间

转载:https://www.jianshu.com/p/6f8980cf0948

import asyncio
import random
import traceback

from aiohttp import ClientSession, TCPConnector, client_exceptions
import time

URL = 'http://127.0.0.1:5000/?delay={}'


async def fetch(session, i):
    dly = random.randint(1,8)
    url = URL.format(dly)
    start_time = time.time()
    try:
        async with session.get(url=url) as response:
            r = await response.read()
            end_time = time.time()
            cost = end_time - start_time
            msg = "第{}个查询请求,花费时间: {}s, 返回信息: {}\n".format(i, cost, r.decode('unicode-escape'))
            print("running %d" % i, msg)
    except client_exceptions.ServerTimeoutError as timeout_error:
        print("request timeout error: {}, url: {}".format(timeout_error, url))
    except Exception as e:
        print("request unknown error: {}".format(traceback.format_exc()))


async def chunks(sem, session, i):
    """
    限制并发数
    """
    # 使用Semaphore, 它会在第一批2000个请求发出且返回结果(是否等待返回结果取决于你的register_user方法的定义)后
    # 检查本地TCP连接池(最大2000个)的空闲数(连接池某个插槽是否空闲,在这里,取决于请求是否返回)
    # 有空闲插槽,就PUT入一个请求并发出(完全不同于Jmeter的rame up in period的线性发起机制).
    # 所以,在结果log里,你会看到第一批请求(开始时间)是同一秒发起,而后面的则完全取决于服务器的吞吐量
    async with sem:
        await fetch(session, i)


async def run(num):
    tasks = []
    # Semaphore, 相当于基于服务器的处理速度和测试客户端的硬件条件,一批批的发
    # 直至发送完全部(下面定义的number/num)
    sem = asyncio.Semaphore(400)
    # 创建session,且对本地的TCP连接不做限制limit=400
    # 超时时间指定
    # total:全部请求最终完成时间
    # connect: aiohttp从本机连接池里取出一个将要进行的请求的时间
    # sock_connect:单个请求连接到服务器的时间
    # sock_read:单个请求从服务器返回的时间
    import aiohttp
    timeout = aiohttp.ClientTimeout(total=330, connect=2, sock_connect=15, sock_read=10)
    async with ClientSession(connector=TCPConnector(limit=400), timeout=timeout) as session:
        for i in range(0, num):
            # 如果是分批的发,就使用并传递Semaphore
            task = asyncio.ensure_future(
                chunks(sem, session, i))
                # register_user(session, i))
            tasks.append(task)
        responses = asyncio.gather(*tasks)
        await responses


start = time.time()
number = 380
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run(number))
loop.run_until_complete(future)
end = time.time()
total = end - start
with open("log", "a+", encoding="utf-8") as f:
    f.write('总耗时:{}秒,平均速度:{}秒\n'.format(total, total / number))

猜你喜欢

转载自www.cnblogs.com/zhzhlong/p/13198320.html