OVS源码connmgr_run分析

 

 

connmgr即connect manager连接管理器,主要完成OVS网桥的连接管理。每一个网桥ofproto都有一个connmgr实体来管理连接。connmgr主要完成两种连接管理,一种是主动连接(即与控制器连接,作为客户端),一种是被动连接(主要提供ofctl等工具连接请求服务和snoop服务,作为服务端)。connmgr结构体中,all_conns是主动连接,services和snoops等提供被动连接服务。与connmgr_run流程相关的结构体如下图:

一个connmgr对应多个ofconn(如一个网桥连接多个控制器,则需要建立相应多个ofconn实例;和ofctl等工具连接时也要建立ofconn实例);一个ofconn对应一个rconn;一个rconn对应多个vconn(因为和控制器建立连接外,可能还会实现snoop功能,即监测并复制交互的of消息,则一个rconn会对应一个vconn和一个vconn结构体类型的数组monitor。

connmgr_run主要包含in_band_run, ofconn_run,以及一些与被动连接相关的流程。

  1.  
    connmgr_run(struct connmgr *mgr,
  2.  
    void (*handle_openflow)(struct ofconn *,
  3.  
    const struct ofpbuf *ofp_msg))
  4.  
    OVS_EXCLUDED(ofproto_mutex)
  5.  
    {
  6.  
    struct ofconn *ofconn, *next_ofconn;
  7.  
    struct ofservice *ofservice;
  8.  
    size_t i;
  9.  
     
  10.  
    if (mgr->in_band) {
  11.  
    if (!in_band_run(mgr->in_band)) {
  12.  
    in_band_destroy(mgr->in_band);
  13.  
    mgr->in_band = NULL;
  14.  
    }
  15.  
    }
  16.  
     
  17.  
    LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &mgr->all_conns) {
  18.  
    ofconn_run(ofconn, handle_openflow);
  19.  
    }
  20.  
     
  21.  
    /*When too many flow monitor notifications back up in the transmit buffer,
  22.  
    * we pause the transmission of further notifications. These members track
  23.  
    * the flow control state. */
  24.  
    ofmonitor_run(mgr);
  25.  
     
  26.  
    /* Fail-open maintenance. Do this after processing the ofconns since
  27.  
    * fail-open checks the status of the controller rconn. */
  28.  
    if (mgr->fail_open) {
  29.  
    fail_open_run(mgr->fail_open);
  30.  
    }
  31.  
     
  32.  
    HMAP_FOR_EACH (ofservice, node, &mgr->services) {
  33.  
    struct vconn *vconn;
  34.  
    int retval;
  35.  
     
  36.  
    /*最终将调用pstream_class->accept实现与punix,pssl,ptcp等被动连接*/
  37.  
    retval = pvconn_accept(ofservice->pvconn, &vconn);
  38.  
    if (!retval) {
  39.  
    struct rconn *rconn;
  40.  
    char *name;
  41.  
     
  42.  
    /* 对被动连接创建默认rconn实例 */
  43.  
    rconn = rconn_create(ofservice->probe_interval, 0, ofservice->dscp,
  44.  
    vconn_get_allowed_versions(vconn));
  45.  
    name = ofconn_make_name(mgr, vconn_get_name(vconn));
  46.  
    /*断开rconn现有连接,并将rconn与被动连接vconn相连*/
  47.  
    rconn_connect_unreliably(rconn, vconn, name);
  48.  
    free(name);
  49.  
     
  50.  
    ovs_mutex_lock(&ofproto_mutex);
  51.  
     
  52.  
    /*根据rconn对被动连接创建ofconn实例*/
  53.  
    ofconn = ofconn_create(mgr, rconn, OFCONN_SERVICE,
  54.  
    ofservice->enable_async_msgs);
  55.  
    ovs_mutex_unlock(&ofproto_mutex);
  56.  
     
  57.  
    ofconn_set_rate_limit(ofconn, ofservice->rate_limit,
  58.  
    ofservice->burst_limit);
  59.  
    } else if (retval != EAGAIN) {
  60.  
    VLOG_WARN_RL(&rl, "accept failed (%s)", ovs_strerror(retval));
  61.  
    }
  62.  
    }
  63.  
     
  64.  
    for (i = 0; i < mgr->n_snoops; i++) {
  65.  
    struct vconn *vconn;
  66.  
    int retval;
  67.  
     
  68.  
    /*为snoops创建vconn*/
  69.  
    retval = pvconn_accept(mgr->snoops[i], &vconn);
  70.  
    if (!retval) {
  71.  
    /*snooper与ofservice不同在于snooper不用创建ofconn与rconn实例
  72.  
    *而是在所有的ofconn中根据ofconn->role等级挑选最佳的ofconn
  73.  
    *然后将snoops的vconn加入到最佳ofconn的monitor中,对该rconn进行监听*/
  74.  
    add_snooper(mgr, vconn);
  75.  
    } else if (retval != EAGAIN) {
  76.  
    VLOG_WARN_RL(&rl, "accept failed (%s)", ovs_strerror(retval));
  77.  
    }
  78.  
    }
  79.  
    }
  80.  
     

in_band_run:因为OpenvSwitch不仅仅是一个OpenFlow Switch,它的流表组成除了of流表外,还有其他一些(隐藏)流表。这些隐藏流表是由于默认交换机和控制器在同一网络中(in-band),因此要保证两者互通。要关闭默认的inband可以通过“ovs-vsctl set controller br0 connection-mode=out-of-band”

  1.  
    in_band_run(struct in_band *ib)
  2.  
    {
  3.  
    uint64_t ofpacts_stub[ 128 / 8];
  4.  
    struct ofpbuf ofpacts;
  5.  
     
  6.  
    struct in_band_rule *rule, *next;
  7.  
     
  8.  
    ...
  9.  
     
  10.  
    refresh_local(ib); //刷新本地MAC
  11.  
    refresh_remotes(ib); //刷新远端MAC
  12.  
     
  13.  
    update_rules(ib); //处理本地local port以及所有远端port的ARP,DHCP,TCP等协议
  14.  
     
  15.  
    HMAP_FOR_EACH_SAFE (rule, next, hmap_node, &ib->rules) {
  16.  
    switch (rule->op) {
  17.  
    case ADD:
  18.  
    ofproto_add_flow(ib->ofproto, &rule->match, rule->priority,
  19.  
    ofpacts.data, ofpacts.size);
  20.  
    break;
  21.  
     
  22.  
    case DEL:
  23.  
    ovs_mutex_lock(&ofproto_mutex);
  24.  
    ofproto_delete_flow(ib->ofproto, &rule->match, rule->priority);
  25.  
    ovs_mutex_unlock(&ofproto_mutex);
  26.  
    hmap_remove(&ib->rules, &rule->hmap_node);
  27.  
    free(rule);
  28.  
    break;
  29.  
    }
  30.  
    }
  31.  
     
  32.  
    ofpbuf_uninit(&ofpacts);
  33.  
     
  34.  
    return ib->n_remotes || !hmap_is_empty(&ib->rules);
  35.  
    }
ofconn_run:对与connmgr所有的主动连接all_conns产生的ofconn进行处理,主要完成网桥与控制器连接的状态转移管理和of消息的接收处理等。
  1.  
    ofconn_run(struct ofconn *ofconn,
  2.  
    void (*handle_openflow)(struct ofconn *,
  3.  
    const struct ofpbuf *ofp_msg))
  4.  
    {
  5.  
    struct connmgr *mgr = ofconn->connmgr;
  6.  
    size_t i;
  7.  
     
  8.  
    for (i = 0; i < N_SCHEDULERS; i++) {
  9.  
            struct ovs_list txq;
  10.  
     
  11.  
     
  12.  
            pinsched_run(ofconn->schedulers[i], &txq); //处理OFPT_PACKET_IN异步队列流程
  13.  
            do_send_packet_ins(ofconn, &txq);
  14.  
        }
  15.  
     
  16.  
    rconn_run(ofconn->rconn); //发送报文流程
  17.  
     
  18.  
    /* Limit the number of iterations to avoid starving other tasks. */
  19.  
    for (i = 0; i < 50 && ofconn_may_recv(ofconn); i++) {
  20.  
    struct ofpbuf *of_msg = rconn_recv(ofconn->rconn); //接收报文流程
  21.  
     
  22.  
    ...
  23.  
     
  24.  
    handle_openflow(ofconn, of_msg); //调用handle_openflow对openflow消息具体处理
  25.  
     
  26.  
    ...
  27.  
    }
  28.  
     
  29.  
    ...
  30.  
    }

以下将具体分析openflow消息收发流程

pinsched_run根据令牌桶分配将pinqueue队列中的报文出队,然后将该该报文放入txq中

  1.  
    pinsched_run( struct pinsched *ps, struct ovs_list *txq)
  2.  
    {
  3.  
    ovs_list_init(txq);
  4.  
    if (ps) {
  5.  
    int i;
  6.  
     
  7.  
    /* Drain some packets out of the bucket if possible, but limit the
  8.  
    * number of iterations to allow other code to get work done too. */
  9.  
    for (i = 0; ps->n_queued && get_token(ps) && i < 50; i++) {
  10.  
    struct ofpbuf *packet = get_tx_packet(ps); //在ps中删除该报文,并且返回该报文
  11.  
    ovs_list_push_back(txq, &packet->list_node);
  12.  
    }
  13.  
    }
  14.  
    }
do_send_packet_ins最终将调用vconn_send发送报文。

rconn_run-->vconn_run-->vconn_stream_run-->stream_send-->fd_send最终发送报文。

  1.  
    rconn_run(struct rconn *rc)
  2.  
    {
  3.  
    int old_state;
  4.  
    size_t i;
  5.  
     
  6.  
    if (rc->vconn) {
  7.  
    ...
  8.  
     
  9.  
    vconn_run(rc->vconn);
  10.  
     
  11.  
    ...
  12.  
    }
  13.  
     
  14.  
    /*monitor连接后,复制得到的openflow消息发送回ofctl客户端
  15.  
    并将监测复制得到的of消息接收到相应rc->n_monitors[i]实例中*/
  16.  
    for (i = 0; i < rc->n_monitors; ) {
  17.  
    struct ofpbuf *msg;
  18.  
    int retval;
  19.  
     
  20.  
    vconn_run(rc->monitors[i]);
  21.  
     
  22.  
    /* Drain any stray message that came in on the monitor connection. */
  23.  
    retval = vconn_recv(rc->monitors[i], &msg);
  24.  
    if (!retval) {
  25.  
    ofpbuf_delete(msg);
  26.  
    } else if (retval != EAGAIN) {
  27.  
    close_monitor(rc, i, retval);
  28.  
    continue;
  29.  
    }
  30.  
    i++;
  31.  
    }
  32.  
     
  33.  
    ...
  34.  
    }
rconn_recv->vconn_recv->do_recv->vconn_stream_recv->vconn_stream_recv__->stream_recv
  1.  
    rconn_recv(struct rconn *rc)
  2.  
    {
  3.  
    struct ofpbuf *buffer = NULL;
  4.  
     
  5.  
    ovs_mutex_lock(&rc->mutex);
  6.  
    if (rc->state & (S_ACTIVE | S_IDLE)) {
  7.  
    int error = vconn_recv(rc->vconn, &buffer);
  8.  
    if (!error) {
  9.  
    copy_to_monitor(rc, buffer); //将vconn收的报文复制到每一个monitor
  10.  
    ...
  11.  
    }
  12.  
    return buffer;
  13.  
    }
收发包过程最终都是操作ofpbuf结构体,该结构体图如下:



本文主要参考:https://www.sdnlab.com/16144.html,加上了一点点自己的理解,如有错误,请悉心指正,谢谢。

猜你喜欢

转载自www.cnblogs.com/liuhongru/p/11399046.html
ovs
今日推荐