qemu AIO线程模型

qemu2事件处理机制 这篇文章虽然分析了事件机制,但是对线程模型,和锁的模型还是云里雾里,为什么这么说呢,因为当时分析的qemu版本对aio这里的管理也是相当混乱,比方说aio_set_fd_handler这个函数,明明只能在aio 线程调用,却又执行notify aio线程,这本身是自相矛盾的,让分析者百思不得其解。

不过在新版本的qemu有了很大改善,我们可以再来看下aio 的api

QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
{
    QEMUBH *bh;
    bh = g_new(QEMUBH, 1);
    *bh = (QEMUBH){
        .ctx = ctx,
        .cb = cb,
        .opaque = opaque,
    };
    qemu_lockcnt_lock(&ctx->list_lock);
    bh->next = ctx->first_bh;
    /* Make sure that the members are ready before putting bh into list */
    smp_wmb();
    ctx->first_bh = bh;
    qemu_lockcnt_unlock(&ctx->list_lock);
    return bh;
}

这段代码已经使用ctx->list_lock进行保护了

void aio_set_fd_handler(AioContext *ctx,
                        int fd,
                        bool is_external,
                        IOHandler *io_read,
                        IOHandler *io_write,
                        AioPollFn *io_poll,
                        void *opaque)
{
    AioHandler *node;
    bool is_new = false;
    bool deleted = false;
    int poll_disable_change;

    qemu_lockcnt_lock(&ctx->list_lock);

    node = find_aio_handler(ctx, fd);

    /* Are we deleting the fd handler? */
    if (!io_read && !io_write && !io_poll) {
        if (node == NULL) {
            qemu_lockcnt_unlock(&ctx->list_lock);
            return;
        }

        /* If the GSource is in the process of being destroyed then
         * g_source_remove_poll() causes an assertion failure.  Skip
         * removal in that case, because glib cleans up its state during
         * destruction anyway.
         */
        if (!g_source_is_destroyed(&ctx->source)) {
            g_source_remove_poll(&ctx->source, &node->pfd);
        }

        /* If a read is in progress, just mark the node as deleted */
        if (qemu_lockcnt_count(&ctx->list_lock)) {
            node->deleted = 1;
            node->pfd.revents = 0;
        } else {
            /* Otherwise, delete it for real.  We can't just mark it as
             * deleted because deleted nodes are only cleaned up while
             * no one is walking the handlers list.
             */
            QLIST_REMOVE(node, node);
            deleted = true;
        }

        poll_disable_change = -!node->io_poll;
    } else {
        poll_disable_change = !io_poll - (node && !node->io_poll);
        if (node == NULL) {
            /* Alloc and insert if it's not already there */
            node = g_new0(AioHandler, 1);
            node->pfd.fd = fd;
            QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);

            g_source_add_poll(&ctx->source, &node->pfd);
            is_new = true;
        }

        /* Update handler with latest information */
        node->io_read = io_read;
        node->io_write = io_write;
        node->io_poll = io_poll;
        node->opaque = opaque;
        node->is_external = is_external;

        node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
        node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
    }

    /* No need to order poll_disable_cnt writes against other updates;
     * the counter is only used to avoid wasting time and latency on
     * iterated polling when the system call will be ultimately necessary.
     * Changing handlers is a rare event, and a little wasted polling until
     * the aio_notify below is not an issue.
     */
    atomic_set(&ctx->poll_disable_cnt,
               atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);

    aio_epoll_update(ctx, node, is_new);
    qemu_lockcnt_unlock(&ctx->list_lock);
    aio_notify(ctx);

    if (deleted) {
        g_free(node);
    }
}

同样aio_set_fd_handler 也使用ctx->list_lock进行了保护。

另外timer还是只能在aio thread中调用。

BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
        ThreadPoolFunc *func, void *arg,
        BlockCompletionFunc *cb, void *opaque)
{
    ThreadPoolElement *req;

    req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque);
    req->func = func;
    req->arg = arg;
    req->state = THREAD_QUEUED;
    req->pool = pool;

    QLIST_INSERT_HEAD(&pool->head, req, all);

    trace_thread_pool_submit(pool, req, arg);

    qemu_mutex_lock(&pool->lock);
    if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) {
        spawn_thread(pool);
    }
    QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs);
    qemu_mutex_unlock(&pool->lock);
    qemu_sem_post(&pool->sem);
    return &req->common;
}

thread_pool_submit_aio没有锁保护,只能在aio thread调用,注意pool->lock只能保护ThreadPool状态,并不能保护pool->head。

所以在新版的qemu中。线程状态混乱的问题应该得到了很多改善。另外文件描述符事件的设置也可以放到非aio 线程执行了。

发布了113 篇原创文章 · 获赞 22 · 访问量 9万+

猜你喜欢

转载自blog.csdn.net/woai110120130/article/details/100050152