📄 ptllnd_tx.c
字号:
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * * Copyright (C) 2005 Cluster File Systems, Inc. All rights reserved. * Author: PJ Kirner <pjkirner@clusterfs.com> * * This file is part of the Lustre file system, http://www.lustre.org * Lustre is a trademark of Cluster File Systems, Inc. * * This file is confidential source code owned by Cluster File Systems. * No viewing, modification, compilation, redistribution, or any other * form of use is permitted except through a signed license agreement. * * If you have not signed such an agreement, then you have no rights to * this file. Please destroy it immediately and contact CFS. * */ #include "ptllnd.h"voidkptllnd_free_tx(kptl_tx_t *tx){ if (tx->tx_msg != NULL) LIBCFS_FREE(tx->tx_msg, sizeof(*tx->tx_msg)); if (tx->tx_frags != NULL) LIBCFS_FREE(tx->tx_frags, sizeof(*tx->tx_frags)); LIBCFS_FREE(tx, sizeof(*tx)); atomic_dec(&kptllnd_data.kptl_ntx); /* Keep the tunable in step for visibility */ *kptllnd_tunables.kptl_ntx = atomic_read(&kptllnd_data.kptl_ntx);}kptl_tx_t *kptllnd_alloc_tx(void){ kptl_tx_t *tx; LIBCFS_ALLOC(tx, sizeof(*tx)); if (tx == NULL) { CERROR("Failed to allocate TX\n"); return NULL; } atomic_inc(&kptllnd_data.kptl_ntx); /* Keep the tunable in step for visibility */ *kptllnd_tunables.kptl_ntx = atomic_read(&kptllnd_data.kptl_ntx); tx->tx_idle = 1; tx->tx_rdma_mdh = PTL_INVALID_HANDLE; tx->tx_msg_mdh = PTL_INVALID_HANDLE; tx->tx_rdma_eventarg.eva_type = PTLLND_EVENTARG_TYPE_RDMA; tx->tx_msg_eventarg.eva_type = PTLLND_EVENTARG_TYPE_MSG; tx->tx_msg = NULL; tx->tx_frags = NULL; LIBCFS_ALLOC(tx->tx_msg, sizeof(*tx->tx_msg)); if (tx->tx_msg == NULL) { CERROR("Failed to allocate TX payload\n"); goto failed; } LIBCFS_ALLOC(tx->tx_frags, sizeof(*tx->tx_frags)); if (tx->tx_frags == NULL) { CERROR("Failed to allocate TX frags\n"); goto failed; } return tx; failed: kptllnd_free_tx(tx); return NULL;}intkptllnd_setup_tx_descs(){ int n = *kptllnd_tunables.kptl_ntx; int i; for (i = 0; i < n; i++) { kptl_tx_t *tx = kptllnd_alloc_tx(); if (tx == NULL) return -ENOMEM; spin_lock(&kptllnd_data.kptl_tx_lock); list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs); spin_unlock(&kptllnd_data.kptl_tx_lock); } return 0;}voidkptllnd_cleanup_tx_descs(){ kptl_tx_t *tx; /* No locking; single threaded now */ LASSERT (kptllnd_data.kptl_shutdown == 2); while (!list_empty(&kptllnd_data.kptl_idle_txs)) { tx = list_entry(kptllnd_data.kptl_idle_txs.next, kptl_tx_t, tx_list); list_del(&tx->tx_list); kptllnd_free_tx(tx); } LASSERT (atomic_read(&kptllnd_data.kptl_ntx) == 0);}kptl_tx_t *kptllnd_get_idle_tx(enum kptl_tx_type type){ kptl_tx_t *tx = NULL; if (IS_SIMULATION_ENABLED(FAIL_TX_PUT_ALLOC) && type == TX_TYPE_PUT_REQUEST) { CERROR("FAIL_TX_PUT_ALLOC SIMULATION triggered\n"); return NULL; } if (IS_SIMULATION_ENABLED(FAIL_TX_GET_ALLOC) && type == TX_TYPE_GET_REQUEST) { CERROR ("FAIL_TX_GET_ALLOC SIMULATION triggered\n"); return NULL; } if (IS_SIMULATION_ENABLED(FAIL_TX)) { CERROR ("FAIL_TX SIMULATION triggered\n"); return NULL; } spin_lock(&kptllnd_data.kptl_tx_lock); if (list_empty (&kptllnd_data.kptl_idle_txs)) { spin_unlock(&kptllnd_data.kptl_tx_lock); tx = kptllnd_alloc_tx(); if (tx == NULL) return NULL; } else { tx = list_entry(kptllnd_data.kptl_idle_txs.next, kptl_tx_t, tx_list); list_del(&tx->tx_list); spin_unlock(&kptllnd_data.kptl_tx_lock); } LASSERT (atomic_read(&tx->tx_refcount)== 0); LASSERT (tx->tx_idle); LASSERT (!tx->tx_active); LASSERT (tx->tx_lnet_msg == NULL); LASSERT (tx->tx_lnet_replymsg == NULL); LASSERT (tx->tx_peer == NULL); LASSERT (PtlHandleIsEqual(tx->tx_rdma_mdh, PTL_INVALID_HANDLE)); LASSERT (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE)); tx->tx_type = type; atomic_set(&tx->tx_refcount, 1); tx->tx_status = 0; tx->tx_idle = 0; tx->tx_tposted = 0; tx->tx_acked = *kptllnd_tunables.kptl_ack_puts; CDEBUG(D_NET, "tx=%p\n", tx); return tx;}#ifdef LUSTRE_PORTALS_UNLINK_SEMANTICSintkptllnd_tx_abort_netio(kptl_tx_t *tx){ kptl_peer_t *peer = tx->tx_peer; ptl_handle_md_t msg_mdh; ptl_handle_md_t rdma_mdh; unsigned long flags; LASSERT (atomic_read(&tx->tx_refcount) == 0); LASSERT (!tx->tx_active); spin_lock_irqsave(&peer->peer_lock, flags); msg_mdh = tx->tx_msg_mdh; rdma_mdh = tx->tx_rdma_mdh; if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) && PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) { spin_unlock_irqrestore(&peer->peer_lock, flags); return 0; } /* Uncompleted comms: there must have been some error and it must be * propagated to LNET... */ LASSERT (tx->tx_status != 0 || (tx->tx_lnet_msg == NULL && tx->tx_lnet_replymsg == NULL)); /* stash the tx on its peer until it completes */ atomic_set(&tx->tx_refcount, 1); tx->tx_active = 1; list_add_tail(&tx->tx_list, &peer->peer_activeq); spin_unlock_irqrestore(&peer->peer_lock, flags); /* These unlinks will ensure completion events (normal or unlink) will * happen ASAP */ if (!PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE)) PtlMDUnlink(msg_mdh); if (!PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) PtlMDUnlink(rdma_mdh); return -EAGAIN;}#elseintkptllnd_tx_abort_netio(kptl_tx_t *tx){ ptl_peer_t *peer = tx->tx_peer; ptl_handle_md_t msg_mdh; ptl_handle_md_t rdma_mdh; unsigned long flags; ptl_err_t prc; LASSERT (atomic_read(&tx->tx_refcount) == 0); LASSERT (!tx->tx_active); spin_lock_irqsave(&peer->peer_lock, flags); msg_mdh = tx->tx_msg_mdh; rdma_mdh = tx->tx_rdma_mdh; if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) && PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) { spin_unlock_irqrestore(&peer->peer_lock, flags); return 0; } /* Uncompleted comms: there must have been some error and it must be * propagated to LNET... */ LASSERT (tx->tx_status != 0 || (tx->tx_lnet_msg == NULL &&
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -