📄 i2o_block.c
字号:
/* * I2O Random Block Storage Class OSM * * (C) Copyright 1999 Red Hat Software * * Written by Alan Cox, Building Number Three Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This is a beta test release. Most of the good code was taken * from the nbd driver by Pavel Machek, who in turn took some of it * from loop.c. Isn't free software great for reusability 8) * * Fixes/additions: * Steve Ralston: * Multiple device handling error fixes, * Added a queue depth. * Alan Cox: * FC920 has an rmw bug. Dont or in the end marker. * Removed queue walk, fixed for 64bitness. * Rewrote much of the code over time * Added indirect block lists * Handle 64K limits on many controllers * Don't use indirects on the Promise (breaks) * Heavily chop down the queue depths * Deepak Saxena: * Independent queues per IOP * Support for dynamic device creation/deletion * Code cleanup * Support for larger I/Os through merge* functions * (taken from DAC960 driver) * Boji T Kannanthanam: * Set the I2O Block devices to be detected in increasing * order of TIDs during boot. * Search and set the I2O block device that we boot off from as * the first device to be claimed (as /dev/i2o/hda) * Properly attach/detach I2O gendisk structure from the system * gendisk list. The I2O block devices now appear in * /proc/partitions. * * To do: * Serial number scanning to find duplicates for FC multipathing */#include <linux/major.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/fs.h>#include <linux/stat.h>#include <linux/pci.h>#include <linux/errno.h>#include <linux/file.h>#include <linux/ioctl.h>#include <linux/i2o.h>#include <linux/blkdev.h>#include <linux/blkpg.h>#include <linux/slab.h>#include <linux/hdreg.h>#include <linux/spinlock.h>#include <linux/notifier.h>#include <linux/reboot.h>#include <asm/uaccess.h>#include <asm/semaphore.h>#include <linux/completion.h>#include <asm/io.h>#include <asm/atomic.h>#include <linux/smp_lock.h>#include <linux/wait.h>#define MAJOR_NR I2O_MAJOR#include <linux/blk.h>#define MAX_I2OB 16#define MAX_I2OB_DEPTH 8#define MAX_I2OB_RETRIES 4//#define DRIVERDEBUG#ifdef DRIVERDEBUG#define DEBUG( s ) printk( s )#else#define DEBUG( s )#endif/* * Events that this OSM is interested in */#define I2OB_EVENT_MASK (I2O_EVT_IND_BSA_VOLUME_LOAD | \ I2O_EVT_IND_BSA_VOLUME_UNLOAD | \ I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ | \ I2O_EVT_IND_BSA_CAPACITY_CHANGE | \ I2O_EVT_IND_BSA_SCSI_SMART )/* * I2O Block Error Codes - should be in a header file really... */#define I2O_BSA_DSC_SUCCESS 0x0000#define I2O_BSA_DSC_MEDIA_ERROR 0x0001#define I2O_BSA_DSC_ACCESS_ERROR 0x0002#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008#define I2O_BSA_DSC_BUS_FAILURE 0x0009#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B#define I2O_BSA_DSC_DEVICE_RESET 0x000C#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D#define I2O_BSA_DSC_TIMEOUT 0x000E/* * Some of these can be made smaller later */static int i2ob_blksizes[MAX_I2OB<<4];static int i2ob_hardsizes[MAX_I2OB<<4];static int i2ob_sizes[MAX_I2OB<<4];static int i2ob_media_change_flag[MAX_I2OB];static u32 i2ob_max_sectors[MAX_I2OB<<4];static int i2ob_context;/* * I2O Block device descriptor */struct i2ob_device{ struct i2o_controller *controller; struct i2o_device *i2odev; int unit; int tid; int flags; int refcnt; struct request *head, *tail; request_queue_t *req_queue; int max_segments; int max_direct; /* Not yet used properly */ int done_flag; int depth; int rcache; int wcache; int power;};/* * FIXME: * We should cache align these to avoid ping-ponging lines on SMP * boxes under heavy I/O load... */struct i2ob_request{ struct i2ob_request *next; struct request *req; int num;};/* * Per IOP requst queue information * * We have a separate requeust_queue_t per IOP so that a heavilly * loaded I2O block device on an IOP does not starve block devices * across all I2O controllers. * */struct i2ob_iop_queue{ atomic_t queue_depth; struct i2ob_request request_queue[MAX_I2OB_DEPTH]; struct i2ob_request *i2ob_qhead; request_queue_t req_queue;};static struct i2ob_iop_queue *i2ob_queues[MAX_I2O_CONTROLLERS];/* * Each I2O disk is one of these. */static struct i2ob_device i2ob_dev[MAX_I2OB<<4];static int i2ob_dev_count = 0;static struct hd_struct i2ob[MAX_I2OB<<4];static struct gendisk i2ob_gendisk; /* Declared later *//* * Mutex and spin lock for event handling synchronization * evt_msg contains the last event. */static DECLARE_MUTEX_LOCKED(i2ob_evt_sem);static DECLARE_COMPLETION(i2ob_thread_dead);static spinlock_t i2ob_evt_lock = SPIN_LOCK_UNLOCKED;static u32 evt_msg[MSG_FRAME_SIZE];static void i2o_block_reply(struct i2o_handler *, struct i2o_controller *, struct i2o_message *);static void i2ob_new_device(struct i2o_controller *, struct i2o_device *);static void i2ob_del_device(struct i2o_controller *, struct i2o_device *);static void i2ob_reboot_event(void);static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int);static void i2ob_end_request(struct request *);static void i2ob_request(request_queue_t *);static int i2ob_init_iop(unsigned int);static request_queue_t* i2ob_get_queue(kdev_t);static int i2ob_query_device(struct i2ob_device *, int, int, void*, int);static int do_i2ob_revalidate(kdev_t, int);static int i2ob_evt(void *);static int evt_pid = 0;static int evt_running = 0;static int scan_unit = 0;/* * I2O OSM registration structure...keeps getting bigger and bigger :) */static struct i2o_handler i2o_block_handler ={ i2o_block_reply, i2ob_new_device, i2ob_del_device, i2ob_reboot_event, "I2O Block OSM", 0, I2O_CLASS_RANDOM_BLOCK_STORAGE};/** * i2ob_get - Get an I2O message * @dev: I2O block device * * Get a message from the FIFO used for this block device. The message is returned * or the I2O 'no message' value of 0xFFFFFFFF if nothing is available. */static u32 i2ob_get(struct i2ob_device *dev){ struct i2o_controller *c=dev->controller; return I2O_POST_READ32(c);} /** * i2ob_send - Turn a request into a message and send it * @m: Message offset * @dev: I2O device * @ireq: Request structure * @base: Partition offset * @unit: Device identity * * Generate an I2O BSAREAD request. This interface function is called for devices that * appear to explode when they are fed indirect chain pointers (notably right now this * appears to afflict Promise hardwre, so be careful what you feed the hardware * * No cleanup is done by this interface. It is done on the interrupt side when the * reply arrives * * To Fix: Generate PCI maps of the buffers */ static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq, u32 base, int unit){ struct i2o_controller *c = dev->controller; int tid = dev->tid; unsigned long msg; unsigned long mptr; u64 offset; struct request *req = ireq->req; struct buffer_head *bh = req->bh; int count = req->nr_sectors<<9; char *last = NULL; unsigned short size = 0; // printk(KERN_INFO "i2ob_send called\n"); /* Map the message to a virtual address */ msg = c->mem_offset + m; /* * Build the message based on the request. */ __raw_writel(i2ob_context|(unit<<8), msg+8); __raw_writel(ireq->num, msg+12); __raw_writel(req->nr_sectors << 9, msg+20); /* * Mask out partitions from now on */ unit &= 0xF0; /* This can be optimised later - just want to be sure its right for starters */ offset = ((u64)(req->sector+base)) << 9; __raw_writel( offset & 0xFFFFFFFF, msg+24); __raw_writel(offset>>32, msg+28); mptr=msg+32; if(req->cmd == READ) { DEBUG("READ\n"); __raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, msg+4); while(bh!=NULL) { if(bh->b_data == last) { size += bh->b_size; last += bh->b_size; if(bh->b_reqnext) __raw_writel(0x10000000|(size), mptr-8); else __raw_writel(0xD0000000|(size), mptr-8); } else { if(bh->b_reqnext) __raw_writel(0x10000000|(bh->b_size), mptr); else __raw_writel(0xD0000000|(bh->b_size), mptr); __raw_writel(virt_to_bus(bh->b_data), mptr+4); mptr += 8; size = bh->b_size; last = bh->b_data + size; } count -= bh->b_size; bh = bh->b_reqnext; } switch(dev->rcache) { case CACHE_NULL: __raw_writel(0, msg+16);break; case CACHE_PREFETCH: __raw_writel(0x201F0008, msg+16);break; case CACHE_SMARTFETCH: if(req->nr_sectors > 16) __raw_writel(0x201F0008, msg+16); else __raw_writel(0x001F0000, msg+16); break; } // printk("Reading %d entries %d bytes.\n",// mptr-msg-8, req->nr_sectors<<9); } else if(req->cmd == WRITE) { DEBUG("WRITE\n"); __raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, msg+4); while(bh!=NULL) { if(bh->b_data == last) { size += bh->b_size; last += bh->b_size; if(bh->b_reqnext) __raw_writel(0x14000000|(size), mptr-8); else __raw_writel(0xD4000000|(size), mptr-8); } else { if(bh->b_reqnext) __raw_writel(0x14000000|(bh->b_size), mptr); else __raw_writel(0xD4000000|(bh->b_size), mptr); __raw_writel(virt_to_bus(bh->b_data), mptr+4); mptr += 8; size = bh->b_size; last = bh->b_data + size; } count -= bh->b_size; bh = bh->b_reqnext; } switch(dev->wcache) { case CACHE_NULL: __raw_writel(0, msg+16);break; case CACHE_WRITETHROUGH: __raw_writel(0x001F0008, msg+16);break; case CACHE_WRITEBACK: __raw_writel(0x001F0010, msg+16);break; case CACHE_SMARTBACK: if(req->nr_sectors > 16) __raw_writel(0x001F0004, msg+16); else __raw_writel(0x001F0010, msg+16); break; case CACHE_SMARTTHROUGH: if(req->nr_sectors > 16) __raw_writel(0x001F0004, msg+16); else __raw_writel(0x001F0010, msg+16); } // printk("Writing %d entries %d bytes.\n",// mptr-msg-8, req->nr_sectors<<9); } __raw_writel(I2O_MESSAGE_SIZE(mptr-msg)>>2 | SGL_OFFSET_8, msg); if(count != 0) { printk(KERN_ERR "Request count botched by %d.\n", count); } i2o_post_message(c,m); atomic_inc(&i2ob_queues[c->unit]->queue_depth); return 0;}/* * Remove a request from the _locked_ request list. We update both the * list chain and if this is the last item the tail pointer. Caller * must hold the lock. */ static inline void i2ob_unhook_request(struct i2ob_request *ireq, unsigned int iop){ ireq->next = i2ob_queues[iop]->i2ob_qhead; i2ob_queues[iop]->i2ob_qhead = ireq;}/* * Request completion handler */ static inline void i2ob_end_request(struct request *req){ /* FIXME - pci unmap the request */ /* * Loop until all of the buffers that are linked * to this request have been marked updated and * unlocked. */ while (end_that_request_first( req, !req->errors, "i2o block" )); /* * It is now ok to complete the request. */ end_that_request_last( req ); DEBUG("IO COMPLETED\n");}/* * Request merging functions */static inline int i2ob_new_segment(request_queue_t *q, struct request *req, int __max_segments){ int max_segments = i2ob_dev[MINOR(req->rq_dev)].max_segments; if (__max_segments < max_segments) max_segments = __max_segments; if (req->nr_segments < max_segments) { req->nr_segments++; return 1; } return 0;}static int i2ob_back_merge(request_queue_t *q, struct request *req, struct buffer_head *bh, int __max_segments){ if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data) return 1; return i2ob_new_segment(q, req, __max_segments);}static int i2ob_front_merge(request_queue_t *q, struct request *req, struct buffer_head *bh, int __max_segments){ if (bh->b_data + bh->b_size == req->bh->b_data) return 1; return i2ob_new_segment(q, req, __max_segments);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -