📄 qswlnd_cb.c
字号:
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * * Copyright (C) 2002 Cluster File Systems, Inc. * Author: Eric Barton <eric@bartonsoftware.com> * * This file is part of Portals, http://www.lustre.org * * Portals is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * Portals is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Portals; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */#include "qswlnd.h"voidkqswnal_notify_peer_down(kqswnal_tx_t *ktx){ struct timeval now; time_t then; do_gettimeofday (&now); then = now.tv_sec - (jiffies - ktx->ktx_launchtime)/HZ; lnet_notify(kqswnal_data.kqn_ni, ktx->ktx_nid, 0, then);}voidkqswnal_unmap_tx (kqswnal_tx_t *ktx){ int i; ktx->ktx_rail = -1; /* unset rail */ if (ktx->ktx_nmappedpages == 0) return; CDEBUG(D_NET, "%p unloading %d frags starting at %d\n", ktx, ktx->ktx_nfrag, ktx->ktx_firsttmpfrag); for (i = ktx->ktx_firsttmpfrag; i < ktx->ktx_nfrag; i++) ep_dvma_unload(kqswnal_data.kqn_ep, kqswnal_data.kqn_ep_tx_nmh, &ktx->ktx_frags[i]); ktx->ktx_nmappedpages = 0;}intkqswnal_map_tx_kiov (kqswnal_tx_t *ktx, int offset, int nob, unsigned int niov, lnet_kiov_t *kiov){ int nfrags = ktx->ktx_nfrag; int nmapped = ktx->ktx_nmappedpages; int maxmapped = ktx->ktx_npages; __u32 basepage = ktx->ktx_basepage + nmapped; char *ptr; EP_RAILMASK railmask; int rail; if (ktx->ktx_rail < 0) ktx->ktx_rail = ep_xmtr_prefrail(kqswnal_data.kqn_eptx, EP_RAILMASK_ALL, kqswnal_nid2elanid(ktx->ktx_nid)); rail = ktx->ktx_rail; if (rail < 0) { CERROR("No rails available for %s\n", libcfs_nid2str(ktx->ktx_nid)); return (-ENETDOWN); } railmask = 1 << rail; LASSERT (nmapped <= maxmapped); LASSERT (nfrags >= ktx->ktx_firsttmpfrag); LASSERT (nfrags <= EP_MAXFRAG); LASSERT (niov > 0); LASSERT (nob > 0); /* skip complete frags before 'offset' */ while (offset >= kiov->kiov_len) { offset -= kiov->kiov_len; kiov++; niov--; LASSERT (niov > 0); } do { int fraglen = kiov->kiov_len - offset; /* each page frag is contained in one page */ LASSERT (kiov->kiov_offset + kiov->kiov_len <= PAGE_SIZE); if (fraglen > nob) fraglen = nob; nmapped++; if (nmapped > maxmapped) { CERROR("Can't map message in %d pages (max %d)\n", nmapped, maxmapped); return (-EMSGSIZE); } if (nfrags == EP_MAXFRAG) { CERROR("Message too fragmented in Elan VM (max %d frags)\n", EP_MAXFRAG); return (-EMSGSIZE); } /* XXX this is really crap, but we'll have to kmap until * EKC has a page (rather than vaddr) mapping interface */ ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset; CDEBUG(D_NET, "%p[%d] loading %p for %d, page %d, %d total\n", ktx, nfrags, ptr, fraglen, basepage, nmapped); ep_dvma_load(kqswnal_data.kqn_ep, NULL, ptr, fraglen, kqswnal_data.kqn_ep_tx_nmh, basepage, &railmask, &ktx->ktx_frags[nfrags]); if (nfrags == ktx->ktx_firsttmpfrag || !ep_nmd_merge(&ktx->ktx_frags[nfrags - 1], &ktx->ktx_frags[nfrags - 1], &ktx->ktx_frags[nfrags])) { /* new frag if this is the first or can't merge */ nfrags++; } kunmap (kiov->kiov_page); /* keep in loop for failure case */ ktx->ktx_nmappedpages = nmapped; basepage++; kiov++; niov--; nob -= fraglen; offset = 0; /* iov must not run out before end of data */ LASSERT (nob == 0 || niov > 0); } while (nob > 0); ktx->ktx_nfrag = nfrags; CDEBUG (D_NET, "%p got %d frags over %d pages\n", ktx, ktx->ktx_nfrag, ktx->ktx_nmappedpages); return (0);}#if KQSW_CKSUM__u32kqswnal_csum_kiov (__u32 csum, int offset, int nob, unsigned int niov, lnet_kiov_t *kiov){ char *ptr; if (nob == 0) return csum; LASSERT (niov > 0); LASSERT (nob > 0); /* skip complete frags before 'offset' */ while (offset >= kiov->kiov_len) { offset -= kiov->kiov_len; kiov++; niov--; LASSERT (niov > 0); } do { int fraglen = kiov->kiov_len - offset; /* each page frag is contained in one page */ LASSERT (kiov->kiov_offset + kiov->kiov_len <= PAGE_SIZE); if (fraglen > nob) fraglen = nob; ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset; csum = kqswnal_csum(csum, ptr, fraglen); kunmap (kiov->kiov_page); kiov++; niov--; nob -= fraglen; offset = 0; /* iov must not run out before end of data */ LASSERT (nob == 0 || niov > 0); } while (nob > 0); return csum;}#endifintkqswnal_map_tx_iov (kqswnal_tx_t *ktx, int offset, int nob, unsigned int niov, struct iovec *iov){ int nfrags = ktx->ktx_nfrag; int nmapped = ktx->ktx_nmappedpages; int maxmapped = ktx->ktx_npages; __u32 basepage = ktx->ktx_basepage + nmapped; EP_RAILMASK railmask; int rail; if (ktx->ktx_rail < 0) ktx->ktx_rail = ep_xmtr_prefrail(kqswnal_data.kqn_eptx, EP_RAILMASK_ALL, kqswnal_nid2elanid(ktx->ktx_nid)); rail = ktx->ktx_rail; if (rail < 0) { CERROR("No rails available for %s\n", libcfs_nid2str(ktx->ktx_nid)); return (-ENETDOWN); } railmask = 1 << rail; LASSERT (nmapped <= maxmapped); LASSERT (nfrags >= ktx->ktx_firsttmpfrag); LASSERT (nfrags <= EP_MAXFRAG); LASSERT (niov > 0); LASSERT (nob > 0); /* skip complete frags before offset */ while (offset >= iov->iov_len) { offset -= iov->iov_len; iov++; niov--; LASSERT (niov > 0); } do { int fraglen = iov->iov_len - offset; long npages; if (fraglen > nob) fraglen = nob; npages = kqswnal_pages_spanned (iov->iov_base, fraglen); nmapped += npages; if (nmapped > maxmapped) { CERROR("Can't map message in %d pages (max %d)\n", nmapped, maxmapped); return (-EMSGSIZE); } if (nfrags == EP_MAXFRAG) { CERROR("Message too fragmented in Elan VM (max %d frags)\n", EP_MAXFRAG); return (-EMSGSIZE); } CDEBUG(D_NET, "%p[%d] loading %p for %d, pages %d for %ld, %d total\n", ktx, nfrags, iov->iov_base + offset, fraglen, basepage, npages, nmapped); ep_dvma_load(kqswnal_data.kqn_ep, NULL, iov->iov_base + offset, fraglen, kqswnal_data.kqn_ep_tx_nmh, basepage, &railmask, &ktx->ktx_frags[nfrags]); if (nfrags == ktx->ktx_firsttmpfrag || !ep_nmd_merge(&ktx->ktx_frags[nfrags - 1], &ktx->ktx_frags[nfrags - 1], &ktx->ktx_frags[nfrags])) { /* new frag if this is the first or can't merge */ nfrags++; } /* keep in loop for failure case */ ktx->ktx_nmappedpages = nmapped; basepage += npages; iov++; niov--; nob -= fraglen; offset = 0; /* iov must not run out before end of data */ LASSERT (nob == 0 || niov > 0); } while (nob > 0); ktx->ktx_nfrag = nfrags; CDEBUG (D_NET, "%p got %d frags over %d pages\n", ktx, ktx->ktx_nfrag, ktx->ktx_nmappedpages); return (0);}#if KQSW_CKSUM__u32kqswnal_csum_iov (__u32 csum, int offset, int nob, unsigned int niov, struct iovec *iov){ if (nob == 0) return csum; LASSERT (niov > 0); LASSERT (nob > 0); /* skip complete frags before offset */ while (offset >= iov->iov_len) { offset -= iov->iov_len; iov++; niov--; LASSERT (niov > 0); } do { int fraglen = iov->iov_len - offset; if (fraglen > nob) fraglen = nob; csum = kqswnal_csum(csum, iov->iov_base + offset, fraglen); iov++; niov--; nob -= fraglen;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -