aoecmd.c
来自「linux 内核源代码」· C语言 代码 · 共 801 行 · 第 1/2 页
C
801 行
/* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. *//* * aoecmd.c * Filesystem request handling methods */#include <linux/hdreg.h>#include <linux/blkdev.h>#include <linux/skbuff.h>#include <linux/netdevice.h>#include <linux/genhd.h>#include <net/net_namespace.h>#include <asm/unaligned.h>#include "aoe.h"#define TIMERTICK (HZ / 10)#define MINTIMER (2 * TIMERTICK)#define MAXTIMER (HZ << 1)static int aoe_deadsecs = 60 * 3;module_param(aoe_deadsecs, int, 0644);MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");struct sk_buff *new_skb(ulong len){ struct sk_buff *skb; skb = alloc_skb(len, GFP_ATOMIC); if (skb) { skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); skb->priority = 0; skb->next = skb->prev = NULL; /* tell the network layer not to perform IP checksums * or to get the NIC to do it */ skb->ip_summed = CHECKSUM_NONE; } return skb;}static struct frame *getframe(struct aoedev *d, int tag){ struct frame *f, *e; f = d->frames; e = f + d->nframes; for (; f<e; f++) if (f->tag == tag) return f; return NULL;}/* * Leave the top bit clear so we have tagspace for userland. * The bottom 16 bits are the xmit tick for rexmit/rttavg processing. * This driver reserves tag -1 to mean "unused frame." */static intnewtag(struct aoedev *d){ register ulong n; n = jiffies & 0xffff; return n |= (++d->lasttag & 0x7fff) << 16;}static intaoehdr_atainit(struct aoedev *d, struct aoe_hdr *h){ u32 host_tag = newtag(d); memcpy(h->src, d->ifp->dev_addr, sizeof h->src); memcpy(h->dst, d->addr, sizeof h->dst); h->type = __constant_cpu_to_be16(ETH_P_AOE); h->verfl = AOE_HVER; h->major = cpu_to_be16(d->aoemajor); h->minor = d->aoeminor; h->cmd = AOECMD_ATA; h->tag = cpu_to_be32(host_tag); return host_tag;}static inline voidput_lba(struct aoe_atahdr *ah, sector_t lba){ ah->lba0 = lba; ah->lba1 = lba >>= 8; ah->lba2 = lba >>= 8; ah->lba3 = lba >>= 8; ah->lba4 = lba >>= 8; ah->lba5 = lba >>= 8;}static voidaoecmd_ata_rw(struct aoedev *d, struct frame *f){ struct aoe_hdr *h; struct aoe_atahdr *ah; struct buf *buf; struct sk_buff *skb; ulong bcnt; register sector_t sector; char writebit, extbit; writebit = 0x10; extbit = 0x4; buf = d->inprocess; sector = buf->sector; bcnt = buf->bv_resid; if (bcnt > d->maxbcnt) bcnt = d->maxbcnt; /* initialize the headers & frame */ skb = f->skb; h = (struct aoe_hdr *) skb_mac_header(skb); ah = (struct aoe_atahdr *) (h+1); skb_put(skb, sizeof *h + sizeof *ah); memset(h, 0, skb->len); f->tag = aoehdr_atainit(d, h); f->waited = 0; f->buf = buf; f->bufaddr = buf->bufaddr; f->bcnt = bcnt; f->lba = sector; /* set up ata header */ ah->scnt = bcnt >> 9; put_lba(ah, sector); if (d->flags & DEVFL_EXT) { ah->aflags |= AOEAFL_EXT; } else { extbit = 0; ah->lba3 &= 0x0f; ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ } if (bio_data_dir(buf->bio) == WRITE) { skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), offset_in_page(f->bufaddr), bcnt); ah->aflags |= AOEAFL_WRITE; skb->len += bcnt; skb->data_len = bcnt; } else { writebit = 0; } ah->cmdstat = WIN_READ | writebit | extbit; /* mark all tracking fields and load out */ buf->nframesout += 1; buf->bufaddr += bcnt; buf->bv_resid -= bcnt;/* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */ buf->resid -= bcnt; buf->sector += bcnt >> 9; if (buf->resid == 0) { d->inprocess = NULL; } else if (buf->bv_resid == 0) { buf->bv++; WARN_ON(buf->bv->bv_len == 0); buf->bv_resid = buf->bv->bv_len; buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset; } skb->dev = d->ifp; skb = skb_clone(skb, GFP_ATOMIC); if (skb == NULL) return; if (d->sendq_hd) d->sendq_tl->next = skb; else d->sendq_hd = skb; d->sendq_tl = skb;}/* some callers cannot sleep, and they can call this function, * transmitting the packets later, when interrupts are on */static struct sk_buff *aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail){ struct aoe_hdr *h; struct aoe_cfghdr *ch; struct sk_buff *skb, *sl, *sl_tail; struct net_device *ifp; sl = sl_tail = NULL; read_lock(&dev_base_lock); for_each_netdev(&init_net, ifp) { dev_hold(ifp); if (!is_aoe_netif(ifp)) goto cont; skb = new_skb(sizeof *h + sizeof *ch); if (skb == NULL) { printk(KERN_INFO "aoe: skb alloc failure\n"); goto cont; } skb_put(skb, sizeof *h + sizeof *ch); skb->dev = ifp; if (sl_tail == NULL) sl_tail = skb; h = (struct aoe_hdr *) skb_mac_header(skb); memset(h, 0, sizeof *h + sizeof *ch); memset(h->dst, 0xff, sizeof h->dst); memcpy(h->src, ifp->dev_addr, sizeof h->src); h->type = __constant_cpu_to_be16(ETH_P_AOE); h->verfl = AOE_HVER; h->major = cpu_to_be16(aoemajor); h->minor = aoeminor; h->cmd = AOECMD_CFG; skb->next = sl; sl = skb;cont: dev_put(ifp); } read_unlock(&dev_base_lock); if (tail != NULL) *tail = sl_tail; return sl;}static struct frame *freeframe(struct aoedev *d){ struct frame *f, *e; int n = 0; f = d->frames; e = f + d->nframes; for (; f<e; f++) { if (f->tag != FREETAG) continue; if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) { skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0; skb_trim(f->skb, 0); return f; } n++; } if (n == d->nframes) /* wait for network layer */ d->flags |= DEVFL_KICKME; return NULL;}/* enters with d->lock held */voidaoecmd_work(struct aoedev *d){ struct frame *f; struct buf *buf; if (d->flags & DEVFL_PAUSE) { if (!aoedev_isbusy(d)) d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor, d->aoeminor, &d->sendq_tl); return; }loop: f = freeframe(d); if (f == NULL) return; if (d->inprocess == NULL) { if (list_empty(&d->bufq)) return; buf = container_of(d->bufq.next, struct buf, bufs); list_del(d->bufq.next);/*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */ d->inprocess = buf; } aoecmd_ata_rw(d, f); goto loop;}static voidrexmit(struct aoedev *d, struct frame *f){ struct sk_buff *skb; struct aoe_hdr *h; struct aoe_atahdr *ah; char buf[128]; u32 n; n = newtag(d); snprintf(buf, sizeof buf, "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n", "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n); aoechr_error(buf); skb = f->skb; h = (struct aoe_hdr *) skb_mac_header(skb); ah = (struct aoe_atahdr *) (h+1); f->tag = n; h->tag = cpu_to_be32(n); memcpy(h->dst, d->addr, sizeof h->dst); memcpy(h->src, d->ifp->dev_addr, sizeof h->src); n = DEFAULTBCNT / 512; if (ah->scnt > n) { ah->scnt = n; if (ah->aflags & AOEAFL_WRITE) { skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), offset_in_page(f->bufaddr), DEFAULTBCNT); skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT; skb->data_len = DEFAULTBCNT; } if (++d->lostjumbo > (d->nframes << 1)) if (d->maxbcnt != DEFAULTBCNT) { printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n", d->aoemajor, d->aoeminor, d->ifp->name); d->maxbcnt = DEFAULTBCNT; d->flags |= DEVFL_MAXBCNT; } } skb->dev = d->ifp; skb = skb_clone(skb, GFP_ATOMIC); if (skb == NULL) return; if (d->sendq_hd) d->sendq_tl->next = skb; else d->sendq_hd = skb; d->sendq_tl = skb;}static inttsince(int tag){ int n; n = jiffies & 0xffff; n -= tag & 0xffff; if (n < 0) n += 1<<16; return n;}static voidrexmit_timer(ulong vp){ struct aoedev *d; struct frame *f, *e; struct sk_buff *sl; register long timeout; ulong flags, n; d = (struct aoedev *) vp; sl = NULL; /* timeout is always ~150% of the moving average */ timeout = d->rttavg; timeout += timeout >> 1; spin_lock_irqsave(&d->lock, flags); if (d->flags & DEVFL_TKILL) { spin_unlock_irqrestore(&d->lock, flags); return; } f = d->frames; e = f + d->nframes; for (; f<e; f++) { if (f->tag != FREETAG && tsince(f->tag) >= timeout) { n = f->waited += timeout; n /= HZ; if (n > aoe_deadsecs) { /* waited too long for response */ aoedev_downdev(d); break; } rexmit(d, f); } } if (d->flags & DEVFL_KICKME) { d->flags &= ~DEVFL_KICKME; aoecmd_work(d); } sl = d->sendq_hd; d->sendq_hd = d->sendq_tl = NULL; if (sl) { n = d->rttavg <<= 1; if (n > MAXTIMER) d->rttavg = MAXTIMER; }
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?