📄 i2o_block.c
字号:
/* * I2O Random Block Storage Class OSM * * (C) Copyright 1999 Red Hat Software * * Written by Alan Cox, Building Number Three Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This is a beta test release. Most of the good code was taken * from the nbd driver by Pavel Machek, who in turn took some of it * from loop.c. Isn't free software great for reusability 8) * * Fixes/additions: * Steve Ralston: * Multiple device handling error fixes, * Added a queue depth. * Alan Cox: * FC920 has an rmw bug. Dont or in the end marker. * Removed queue walk, fixed for 64bitness. * Deepak Saxena: * Independent queues per IOP * Support for dynamic device creation/deletion * Code cleanup * Support for larger I/Os through merge* functions * (taken from DAC960 driver) * Boji T Kannanthanam: * Set the I2O Block devices to be detected in increasing * order of TIDs during boot. * Search and set the I2O block device that we boot off from as * the first device to be claimed (as /dev/i2o/hda) * Properly attach/detach I2O gendisk structure from the system * gendisk list. The I2O block devices now appear in * /proc/partitions. * * To do: * Serial number scanning to find duplicates for FC multipathing */#error Please convert me to Documentation/DMA-mapping.txt#include <linux/major.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/fs.h>#include <linux/stat.h>#include <linux/pci.h>#include <linux/errno.h>#include <linux/file.h>#include <linux/ioctl.h>#include <linux/i2o.h>#include <linux/blkdev.h>#include <linux/blkpg.h>#include <linux/slab.h>#include <linux/hdreg.h>#include <linux/spinlock.h>#include <linux/notifier.h>#include <linux/reboot.h>#include <asm/uaccess.h>#include <asm/semaphore.h>#include <linux/completion.h>#include <asm/io.h>#include <asm/atomic.h>#include <linux/smp_lock.h>#include <linux/wait.h>#define MAJOR_NR I2O_MAJOR#include <linux/blk.h>#define MAX_I2OB 16#define MAX_I2OB_DEPTH 128#define MAX_I2OB_RETRIES 4//#define DRIVERDEBUG#ifdef DRIVERDEBUG#define DEBUG( s ) printk( s )#else#define DEBUG( s )#endif/* * Events that this OSM is interested in */#define I2OB_EVENT_MASK (I2O_EVT_IND_BSA_VOLUME_LOAD | \ I2O_EVT_IND_BSA_VOLUME_UNLOAD | \ I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ | \ I2O_EVT_IND_BSA_CAPACITY_CHANGE | \ I2O_EVT_IND_BSA_SCSI_SMART )/* * I2O Block Error Codes - should be in a header file really... */#define I2O_BSA_DSC_SUCCESS 0x0000#define I2O_BSA_DSC_MEDIA_ERROR 0x0001#define I2O_BSA_DSC_ACCESS_ERROR 0x0002#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008#define I2O_BSA_DSC_BUS_FAILURE 0x0009#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B#define I2O_BSA_DSC_DEVICE_RESET 0x000C#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D#define I2O_BSA_DSC_TIMEOUT 0x000E#define I2O_UNIT(dev) (i2ob_dev[minor((dev)) & 0xf0])#define I2O_LOCK(unit) (i2ob_dev[(unit)].req_queue->queue_lock)/* * Some of these can be made smaller later */static int i2ob_blksizes[MAX_I2OB<<4];static int i2ob_sizes[MAX_I2OB<<4];static int i2ob_media_change_flag[MAX_I2OB];static int i2ob_context;/* * I2O Block device descriptor */struct i2ob_device{ struct i2o_controller *controller; struct i2o_device *i2odev; int unit; int tid; int flags; int refcnt; struct request *head, *tail; request_queue_t *req_queue; int max_segments; int done_flag; int constipated; int depth;};/* * FIXME: * We should cache align these to avoid ping-ponging lines on SMP * boxes under heavy I/O load... */struct i2ob_request{ struct i2ob_request *next; struct request *req; int num;};/* * Per IOP requst queue information * * We have a separate requeust_queue_t per IOP so that a heavilly * loaded I2O block device on an IOP does not starve block devices * across all I2O controllers. * */struct i2ob_iop_queue{ atomic_t queue_depth; struct i2ob_request request_queue[MAX_I2OB_DEPTH]; struct i2ob_request *i2ob_qhead; request_queue_t req_queue;};static struct i2ob_iop_queue *i2ob_queues[MAX_I2O_CONTROLLERS];static struct i2ob_request *i2ob_backlog[MAX_I2O_CONTROLLERS];static struct i2ob_request *i2ob_backlog_tail[MAX_I2O_CONTROLLERS];/* * Each I2O disk is one of these. */static struct i2ob_device i2ob_dev[MAX_I2OB<<4];static int i2ob_dev_count = 0;static struct hd_struct i2ob[MAX_I2OB<<4];static struct gendisk i2ob_gendisk; /* Declared later *//* * Mutex and spin lock for event handling synchronization * evt_msg contains the last event. */static DECLARE_MUTEX_LOCKED(i2ob_evt_sem);static DECLARE_COMPLETION(i2ob_thread_dead);static spinlock_t i2ob_evt_lock = SPIN_LOCK_UNLOCKED;static u32 evt_msg[MSG_FRAME_SIZE>>2];static struct timer_list i2ob_timer;static int i2ob_timer_started = 0;static void i2o_block_reply(struct i2o_handler *, struct i2o_controller *, struct i2o_message *);static void i2ob_new_device(struct i2o_controller *, struct i2o_device *);static void i2ob_del_device(struct i2o_controller *, struct i2o_device *);static void i2ob_reboot_event(void);static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int);static void i2ob_end_request(struct request *);static void i2ob_request(request_queue_t *);static int i2ob_backlog_request(struct i2o_controller *, struct i2ob_device *);static int i2ob_init_iop(unsigned int);static request_queue_t* i2ob_get_queue(kdev_t);static int i2ob_query_device(struct i2ob_device *, int, int, void*, int);static int do_i2ob_revalidate(kdev_t, int);static int i2ob_evt(void *);static int evt_pid = 0;static int evt_running = 0;static int scan_unit = 0;/* * I2O OSM registration structure...keeps getting bigger and bigger :) */static struct i2o_handler i2o_block_handler ={ i2o_block_reply, i2ob_new_device, i2ob_del_device, i2ob_reboot_event, "I2O Block OSM", 0, I2O_CLASS_RANDOM_BLOCK_STORAGE};/* * Get a message */static u32 i2ob_get(struct i2ob_device *dev){ struct i2o_controller *c=dev->controller; return I2O_POST_READ32(c);} /* * Turn a Linux block request into an I2O block read/write. */static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq, u32 base, int unit){ struct i2o_controller *c = dev->controller; int tid = dev->tid; unsigned long msg; unsigned long mptr; u64 offset; struct request *req = ireq->req; struct bio *bio = req->bio; int count = req->nr_sectors<<9; unsigned long last = ~0UL; unsigned short size = 0; // printk(KERN_INFO "i2ob_send called\n"); /* Map the message to a virtual address */ msg = c->mem_offset + m; /* * Build the message based on the request. */ __raw_writel(i2ob_context|(unit<<8), msg+8); __raw_writel(ireq->num, msg+12); __raw_writel(req->nr_sectors << 9, msg+20); /* * Mask out partitions from now on */ unit &= 0xF0; /* This can be optimised later - just want to be sure its right for starters */ offset = ((u64)(req->sector+base)) << 9; __raw_writel( offset & 0xFFFFFFFF, msg+24); __raw_writel(offset>>32, msg+28); mptr=msg+32; if(req->cmd == READ) { __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, msg+4); while(bio) { if (bio_to_phys(bio) == last) { size += bio->bi_size; last += bio->bi_size; if(bio->bi_next) __raw_writel(0x14000000|(size), mptr-8); else __raw_writel(0xD4000000|(size), mptr-8); } else { if(bio->bi_next) __raw_writel(0x10000000|bio->bi_size, mptr); else __raw_writel(0xD0000000|bio->bi_size, mptr); __raw_writel(bio_to_phys(bio), mptr+4); mptr += 8; size = bio->bi_size; last = bio_to_phys(bio) + bio->bi_size; } count -= bio->bi_size; bio = bio->bi_next; } /* * Heuristic for now since the block layer doesnt give * us enough info. If its a big write assume sequential * readahead on controller. If its small then don't read * ahead but do use the controller cache. */ if(size >= 8192) __raw_writel((8<<24)|(1<<16)|8, msg+16); else __raw_writel((8<<24)|(1<<16)|4, msg+16); } else if(req->cmd == WRITE) { __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, msg+4); while(bio) { if (bio_to_phys(bio) == last) { size += bio->bi_size; last += bio->bi_size; if(bio->bi_next) __raw_writel(0x14000000|(size), mptr-8); else __raw_writel(0xD4000000|(size), mptr-8); } else { if(bio->bi_next) __raw_writel(0x14000000|bio->bi_size, mptr); else __raw_writel(0xD4000000|bio->bi_size, mptr); __raw_writel(bio_to_phys(bio), mptr+4); mptr += 8; size = bio->bi_size; last = bio_to_phys(bio) + bio->bi_size; } count -= bio->bi_size; bio = bio->bi_next; } if(c->battery) { if(size>16384) __raw_writel(4, msg+16); else /* * Allow replies to come back once data is cached in the controller * This allows us to handle writes quickly thus giving more of the * queue to reads. */ __raw_writel(16, msg+16); } else { /* Large write, don't cache */ if(size>8192) __raw_writel(4, msg+16); else /* write through */ __raw_writel(8, msg+16); } } __raw_writel(I2O_MESSAGE_SIZE(mptr-msg)>>2 | SGL_OFFSET_8, msg); if(count != 0) { printk(KERN_ERR "Request count botched by %d.\n", count); } i2o_post_message(c,m); atomic_inc(&i2ob_queues[c->unit]->queue_depth); return 0;}/* * Remove a request from the _locked_ request list. We update both the * list chain and if this is the last item the tail pointer. Caller * must hold the lock. */ static inline void i2ob_unhook_request(struct i2ob_request *ireq, unsigned int iop){ ireq->next = i2ob_queues[iop]->i2ob_qhead; i2ob_queues[iop]->i2ob_qhead = ireq;}/* * Request completion handler */ static inline void i2ob_end_request(struct request *req){ /* * Loop until all of the buffers that are linked * to this request have been marked updated and * unlocked. */ while (end_that_request_first(req, !req->errors, req->hard_cur_sectors)) ; /* * It is now ok to complete the request. */ end_that_request_last( req );}static int i2ob_flush(struct i2o_controller *c, struct i2ob_device *d, int unit){ unsigned long msg; u32 m = i2ob_get(d); if(m == 0xFFFFFFFF) return -1; msg = c->mem_offset + m; /* * Ask the controller to write the cache back. This sorts out * the supertrak firmware flaw and also does roughly the right * thing for other cases too. */ __raw_writel(FIVE_WORD_MSG_SIZE|SGL_OFFSET_0, msg); __raw_writel(I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|d->tid, msg+4); __raw_writel(i2ob_context|(unit<<8), msg+8); __raw_writel(0, msg+12); __raw_writel(60<<16, msg+16); i2o_post_message(c,m); return 0;} /* * OSM reply handler. This gets all the message replies */static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg){ unsigned long flags; struct i2ob_request *ireq = NULL; u8 st; u32 *m = (u32 *)msg; u8 unit = (m[2]>>8)&0xF0; /* low 4 bits are partition */ struct i2ob_device *dev = &i2ob_dev[(unit&0xF0)]; /* * FAILed message */ if(m[0] & (1<<13)) { /* * FAILed message from controller * We increment the error count and abort it * * In theory this will never happen. The I2O block class * speficiation states that block devices never return * FAILs but instead use the REQ status field...but * better be on the safe side since no one really follows * the spec to the book :) */ ireq=&i2ob_queues[c->unit]->request_queue[m[3]]; ireq->req->errors++; spin_lock_irqsave(&I2O_LOCK(c->unit), flags); i2ob_unhook_request(ireq, c->unit); i2ob_end_request(ireq->req); spin_unlock_irqrestore(&I2O_LOCK(c->unit), flags); /* Now flush the message by making it a NOP */ m[0]&=0x00FFFFFF; m[0]|=(I2O_CMD_UTIL_NOP)<<24; i2o_post_message(c,virt_to_bus(m)); return;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -