⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 handlers.c

📁 非常经典的一个分布式系统
💻 C
📖 第 1 页 / 共 3 页
字号:
                        rc = usocklnd_passiveconn_hellorecv(conn);                                break;                        default:                LBUG(); /* unknown state */        }        return rc;}/* All actions that we need after receiving hello on active conn: * 1) Schedule removing if we're zombie * 2) Restart active conn if we lost the race * 3) Else: update RX part to receive KSM header */intusocklnd_activeconn_hellorecv(usock_conn_t *conn){        int                rc    = 0;        ksock_hello_msg_t *hello = conn->uc_rx_hello;        usock_peer_t      *peer  = conn->uc_peer;        /* Active conn with peer==NULL is zombie.         * Don't try to link it to peer because the conn         * has already had a chance to proceed at the beginning */        if (peer == NULL) {                LASSERT(list_empty(&conn->uc_tx_list) &&                        list_empty(&conn->uc_zcack_list));                usocklnd_conn_kill(conn);                return 0;        }        peer->up_last_alive = cfs_time_current();                /* peer says that we lost the race */        if (hello->kshm_ctype == SOCKLND_CONN_NONE) {                /* Start new active conn, relink txs and zc_acks from                 * the conn to new conn, schedule removing the conn.                 * Actually, we're expecting that a passive conn will                 * make us zombie soon and take care of our txs and                 * zc_acks */                                 struct list_head tx_list, zcack_list;                usock_conn_t *conn2;                int idx = usocklnd_type2idx(conn->uc_type);                CFS_INIT_LIST_HEAD (&tx_list);                CFS_INIT_LIST_HEAD (&zcack_list);                /* Block usocklnd_send() to check peer->up_conns[idx]                 * and to enqueue more txs */                pthread_mutex_lock(&peer->up_lock);                pthread_mutex_lock(&conn->uc_lock);                /* usocklnd_shutdown() could kill us */                if (conn->uc_state == UC_DEAD) {                        pthread_mutex_unlock(&conn->uc_lock);                        pthread_mutex_unlock(&peer->up_lock);                        return 0;                }                                LASSERT (peer == conn->uc_peer);                LASSERT (peer->up_conns[idx] == conn);                rc = usocklnd_create_active_conn(peer, conn->uc_type, &conn2);                if (rc) {                        conn->uc_errored = 1;                        pthread_mutex_unlock(&conn->uc_lock);                        pthread_mutex_unlock(&peer->up_lock);                        return rc;                }                                                usocklnd_link_conn_to_peer(conn2, peer, idx);                conn2->uc_peer = peer;                                /* unlink txs and zcack from the conn */                list_add(&tx_list, &conn->uc_tx_list);                list_del_init(&conn->uc_tx_list);                list_add(&zcack_list, &conn->uc_zcack_list);                list_del_init(&conn->uc_zcack_list);                                /* link they to the conn2 */                list_add(&conn2->uc_tx_list, &tx_list);                list_del_init(&tx_list);                list_add(&conn2->uc_zcack_list, &zcack_list);                list_del_init(&zcack_list);                                /* make conn zombie */                conn->uc_peer = NULL;                usocklnd_peer_decref(peer);                /* schedule conn2 for processing */                rc = usocklnd_add_pollrequest(conn2, POLL_ADD_REQUEST, POLLOUT);                if (rc) {                        peer->up_conns[idx] = NULL;                        usocklnd_conn_decref(conn2); /* should destroy conn */                } else {                        usocklnd_conn_kill_locked(conn);                }                                pthread_mutex_unlock(&conn->uc_lock);                                pthread_mutex_unlock(&peer->up_lock);                usocklnd_conn_decref(conn);                        } else { /* hello->kshm_ctype != SOCKLND_CONN_NONE */                if (conn->uc_type != usocklnd_invert_type(hello->kshm_ctype))                        return -EPROTO;                pthread_mutex_lock(&peer->up_lock);                usocklnd_cleanup_stale_conns(peer, hello->kshm_src_incarnation,                                             conn);                pthread_mutex_unlock(&peer->up_lock);                                                /* safely transit to UC_READY state */                /* rc == 0 */                pthread_mutex_lock(&conn->uc_lock);                if (conn->uc_state != UC_DEAD) {                        usocklnd_rx_ksmhdr_state_transition(conn);                        /* POLLIN is already set because we just                         * received hello, but maybe we've smth. to                         * send? */                        LASSERT (conn->uc_sending == 0);                        if ( !list_empty(&conn->uc_tx_list) ||                             !list_empty(&conn->uc_zcack_list) ) {                                                                conn->uc_tx_deadline =                                        cfs_time_shift(usock_tuns.ut_timeout);                                conn->uc_tx_flag = 1;                                rc = usocklnd_add_pollrequest(conn,                                                              POLL_SET_REQUEST,                                                              POLLIN | POLLOUT);                        }                        if (rc == 0)                                conn->uc_state = UC_READY;                }                pthread_mutex_unlock(&conn->uc_lock);                        }        return rc;}/* All actions that we need after receiving hello on passive conn: * 1) Stash peer's nid, pid, incarnation and conn type * 2) Cope with easy case: conn[idx] is empty - just save conn there * 3) Resolve race: *    a) if our nid is higher - reply with CONN_NONE and make us zombie *    b) if peer's nid is higher - postpone race resolution till *       READY state * 4) Anyhow, send reply hello*/intusocklnd_passiveconn_hellorecv(usock_conn_t *conn){        ksock_hello_msg_t *hello = conn->uc_rx_hello;        int                type;        int                idx;        int                rc;        usock_peer_t      *peer;        lnet_ni_t         *ni        = conn->uc_ni;        __u32              peer_ip   = conn->uc_peer_ip;        __u16              peer_port = conn->uc_peer_port;        /* don't know parent peer yet and not zombie */        LASSERT (conn->uc_peer == NULL &&                 ni != NULL);                /* don't know peer's nid and incarnation yet */        if (peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {                /* do not trust liblustre clients */                conn->uc_peerid.pid = peer_port | LNET_PID_USERFLAG;                conn->uc_peerid.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),                                                 peer_ip);                if (hello->kshm_ctype != SOCKLND_CONN_ANY) {                        lnet_ni_decref(ni);                        conn->uc_ni = NULL;                                        CERROR("Refusing to accept connection of type=%d from "                               "userspace process %u.%u.%u.%u:%d\n", hello->kshm_ctype,                               HIPQUAD(peer_ip), peer_port);                        return -EINVAL;                }        } else {                conn->uc_peerid.pid = hello->kshm_src_pid;                conn->uc_peerid.nid = hello->kshm_src_nid;        }        conn->uc_type = type = usocklnd_invert_type(hello->kshm_ctype);                        rc = usocklnd_find_or_create_peer(ni, conn->uc_peerid, &peer);        if (rc) {                lnet_ni_decref(ni);                conn->uc_ni = NULL;                return rc;        }        peer->up_last_alive = cfs_time_current();                idx = usocklnd_type2idx(conn->uc_type);        /* safely check whether we're first */        pthread_mutex_lock(&peer->up_lock);        usocklnd_cleanup_stale_conns(peer, hello->kshm_src_incarnation, NULL);                if (peer->up_conns[idx] == NULL) {                peer->up_last_alive = cfs_time_current();                conn->uc_peer = peer;                conn->uc_ni = NULL;                usocklnd_link_conn_to_peer(conn, peer, idx);                usocklnd_conn_addref(conn);        } else {                usocklnd_peer_decref(peer);                                                         /* Resolve race in favour of higher NID */                if (conn->uc_peerid.nid < conn->uc_ni->ni_nid) {                        /* make us zombie */                        conn->uc_ni = NULL;                        type = SOCKLND_CONN_NONE;                }                /* if conn->uc_peerid.nid > conn->uc_ni->ni_nid,                 * postpone race resolution till READY state                 * (hopefully that conn[idx] will die because of                 * incoming hello of CONN_NONE type) */                         }        pthread_mutex_unlock(&peer->up_lock);        /* allocate and initialize fake tx with hello */        conn->uc_tx_hello = usocklnd_create_hello_tx(ni, type,                                                     conn->uc_peerid.nid);        if (conn->uc_ni == NULL)                lnet_ni_decref(ni);        if (conn->uc_tx_hello == NULL)                return -ENOMEM;        /* rc == 0 */        pthread_mutex_lock(&conn->uc_lock);        if (conn->uc_state == UC_DEAD)                goto passive_hellorecv_done;        conn->uc_state = UC_SENDING_HELLO;        conn->uc_tx_deadline = cfs_time_shift(usock_tuns.ut_timeout);        conn->uc_tx_flag = 1;        rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST, POLLOUT);          passive_hellorecv_done:        pthread_mutex_unlock(&conn->uc_lock);        return rc;        }intusocklnd_write_handler(usock_conn_t *conn){        usock_tx_t   *tx;        int           ret;        int           rc = 0;        int           state;        usock_peer_t *peer;        lnet_ni_t    *ni;                pthread_mutex_lock(&conn->uc_lock); /* like membar */        state = conn->uc_state;        pthread_mutex_unlock(&conn->uc_lock);        switch (state) {        case UC_CONNECTING:                /* hello_tx has already been initialized                 * in usocklnd_create_active_conn() */                usocklnd_conn_new_state(conn, UC_SENDING_HELLO);                /* fall through */        case UC_SENDING_HELLO:                rc = usocklnd_send_tx(conn, conn->uc_tx_hello);                if (rc <= 0) /* error or partial send or connection closed */                        break;                /* tx with hello was sent successfully */                usocklnd_destroy_tx(NULL, conn->uc_tx_hello);                conn->uc_tx_hello = NULL;                if (conn->uc_activeflag == 1) /* active conn */                        rc = usocklnd_activeconn_hellosent(conn);                else                          /* passive conn */                        rc = usocklnd_passiveconn_hellosent(conn);                                break;                        case UC_READY:                pthread_mutex_lock(&conn->uc_lock);                peer = conn->uc_peer;                LASSERT (peer != NULL);                ni = peer->up_ni;                if (list_empty(&conn->uc_tx_list) &&                    list_empty(&conn->uc_zcack_list)) {                        LASSERT(usock_tuns.ut_fair_limit > 1);                        pthread_mutex_unlock(&conn->uc_lock);                        return 0;                }                                tx = usocklnd_try_piggyback(&conn->uc_tx_list,                                            &conn->uc_zcack_list);                if (tx != NULL)                        conn->uc_sending = 1;                else                        rc = -ENOMEM;                pthread_mutex_unlock(&conn->uc_lock);                if (rc)                        break;                                rc = usocklnd_send_tx(conn, tx);                if (rc == 0) { /* partial send or connection closed */                        pthread_mutex_lock(&conn->uc_lock);                        list_add(&tx->tx_list, &conn->uc_tx_list);                        conn->uc_sending = 0;                        pthread_mutex_unlock(&conn->uc_lock);                        break;                }                if (rc < 0) { /* real error */                        usocklnd_destroy_tx(ni, tx);                        break;                }                /* rc == 1: tx was sent completely */                usocklnd_destroy_tx(ni, tx);                pthread_mutex_lock(&conn->uc_lock);                conn->uc_sending = 0;                if (conn->uc_state != UC_DEAD &&                    list_empty(&conn->uc_tx_list) &&

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -