📄 ibmvscsi.c
字号:
/* ------------------------------------------------------------ * ibmvscsi.c * (C) Copyright IBM Corporation 1994, 2004 * Authors: Colin DeVilbiss (devilbis@us.ibm.com) * Santiago Leon (santil@us.ibm.com) * Dave Boutcher (sleddog@us.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * * ------------------------------------------------------------ * Emulation of a SCSI host adapter for Virtual I/O devices * * This driver supports the SCSI adapter implemented by the IBM * Power5 firmware. That SCSI adapter is not a physical adapter, * but allows Linux SCSI peripheral drivers to directly * access devices in another logical partition on the physical system. * * The virtual adapter(s) are present in the open firmware device * tree just like real adapters. * * One of the capabilities provided on these systems is the ability * to DMA between partitions. The architecture states that for VSCSI, * the server side is allowed to DMA to and from the client. The client * is never trusted to DMA to or from the server directly. * * Messages are sent between partitions on a "Command/Response Queue" * (CRQ), which is just a buffer of 16 byte entries in the receiver's * Senders cannot access the buffer directly, but send messages by * making a hypervisor call and passing in the 16 bytes. The hypervisor * puts the message in the next 16 byte space in round-robbin fashion, * turns on the high order bit of the message (the valid bit), and * generates an interrupt to the receiver (if interrupts are turned on.) * The receiver just turns off the valid bit when they have copied out * the message. * * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit * (IU) (as defined in the T10 standard available at www.t10.org), gets * a DMA address for the message, and sends it to the server as the * payload of a CRQ message. The server DMAs the SRP IU and processes it, * including doing any additional data transfers. When it is done, it * DMAs the SRP response back to the same address as the request came from, * and sends a CRQ message back to inform the client that the request has * completed. * * Note that some of the underlying infrastructure is different between * machines conforming to the "RS/6000 Platform Architecture" (RPA) and * the older iSeries hypervisor models. To support both, some low level * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c. * The Makefile should pick one, not two, not zero, of these. * * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor * interfaces. It would be really nice to abstract this above an RDMA * layer. */#include <linux/module.h>#include <linux/moduleparam.h>#include <linux/dma-mapping.h>#include <linux/delay.h>#include <asm/vio.h>#include <scsi/scsi.h>#include <scsi/scsi_cmnd.h>#include <scsi/scsi_host.h>#include <scsi/scsi_device.h>#include "ibmvscsi.h"/* The values below are somewhat arbitrary default values, but * OS/400 will use 3 busses (disks, CDs, tapes, I think.) * Note that there are 3 bits of channel value, 6 bits of id, and * 5 bits of LUN. */static int max_id = 64;static int max_channel = 3;static int init_timeout = 5;static int max_requests = 50;#define IBMVSCSI_VERSION "1.5.7"MODULE_DESCRIPTION("IBM Virtual SCSI");MODULE_AUTHOR("Dave Boutcher");MODULE_LICENSE("GPL");MODULE_VERSION(IBMVSCSI_VERSION);module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);MODULE_PARM_DESC(max_id, "Largest ID value for each channel");module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);MODULE_PARM_DESC(max_channel, "Largest channel value");module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");/* ------------------------------------------------------------ * Routines for the event pool and event structs *//** * initialize_event_pool: - Allocates and initializes the event pool for a host * @pool: event_pool to be initialized * @size: Number of events in pool * @hostdata: ibmvscsi_host_data who owns the event pool * * Returns zero on success.*/static int initialize_event_pool(struct event_pool *pool, int size, struct ibmvscsi_host_data *hostdata){ int i; pool->size = size; pool->next = 0; pool->events = kmalloc(pool->size * sizeof(*pool->events), GFP_KERNEL); if (!pool->events) return -ENOMEM; memset(pool->events, 0x00, pool->size * sizeof(*pool->events)); pool->iu_storage = dma_alloc_coherent(hostdata->dev, pool->size * sizeof(*pool->iu_storage), &pool->iu_token, 0); if (!pool->iu_storage) { kfree(pool->events); return -ENOMEM; } for (i = 0; i < pool->size; ++i) { struct srp_event_struct *evt = &pool->events[i]; memset(&evt->crq, 0x00, sizeof(evt->crq)); atomic_set(&evt->free, 1); evt->crq.valid = 0x80; evt->crq.IU_length = sizeof(*evt->xfer_iu); evt->crq.IU_data_ptr = pool->iu_token + sizeof(*evt->xfer_iu) * i; evt->xfer_iu = pool->iu_storage + i; evt->hostdata = hostdata; evt->ext_list = NULL; evt->ext_list_token = 0; } return 0;}/** * release_event_pool: - Frees memory of an event pool of a host * @pool: event_pool to be released * @hostdata: ibmvscsi_host_data who owns the even pool * * Returns zero on success.*/static void release_event_pool(struct event_pool *pool, struct ibmvscsi_host_data *hostdata){ int i, in_use = 0; for (i = 0; i < pool->size; ++i) { if (atomic_read(&pool->events[i].free) != 1) ++in_use; if (pool->events[i].ext_list) { dma_free_coherent(hostdata->dev, SG_ALL * sizeof(struct memory_descriptor), pool->events[i].ext_list, pool->events[i].ext_list_token); } } if (in_use) printk(KERN_WARNING "ibmvscsi: releasing event pool with %d " "events still in use?\n", in_use); kfree(pool->events); dma_free_coherent(hostdata->dev, pool->size * sizeof(*pool->iu_storage), pool->iu_storage, pool->iu_token);}/** * valid_event_struct: - Determines if event is valid. * @pool: event_pool that contains the event * @evt: srp_event_struct to be checked for validity * * Returns zero if event is invalid, one otherwise.*/static int valid_event_struct(struct event_pool *pool, struct srp_event_struct *evt){ int index = evt - pool->events; if (index < 0 || index >= pool->size) /* outside of bounds */ return 0; if (evt != pool->events + index) /* unaligned */ return 0; return 1;}/** * ibmvscsi_free-event_struct: - Changes status of event to "free" * @pool: event_pool that contains the event * @evt: srp_event_struct to be modified **/static void free_event_struct(struct event_pool *pool, struct srp_event_struct *evt){ if (!valid_event_struct(pool, evt)) { printk(KERN_ERR "ibmvscsi: Freeing invalid event_struct %p " "(not in pool %p)\n", evt, pool->events); return; } if (atomic_inc_return(&evt->free) != 1) { printk(KERN_ERR "ibmvscsi: Freeing event_struct %p " "which is not in use!\n", evt); return; }}/** * get_evt_struct: - Gets the next free event in pool * @pool: event_pool that contains the events to be searched * * Returns the next event in "free" state, and NULL if none are free. * Note that no synchronization is done here, we assume the host_lock * will syncrhonze things.*/static struct srp_event_struct *get_event_struct(struct event_pool *pool){ int i; int poolsize = pool->size; int offset = pool->next; for (i = 0; i < poolsize; i++) { offset = (offset + 1) % poolsize; if (!atomic_dec_if_positive(&pool->events[offset].free)) { pool->next = offset; return &pool->events[offset]; } } printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n"); return NULL;}/** * init_event_struct: Initialize fields in an event struct that are always * required. * @evt: The event * @done: Routine to call when the event is responded to * @format: SRP or MAD format * @timeout: timeout value set in the CRQ */static void init_event_struct(struct srp_event_struct *evt_struct, void (*done) (struct srp_event_struct *), u8 format, int timeout){ evt_struct->cmnd = NULL; evt_struct->cmnd_done = NULL; evt_struct->sync_srp = NULL; evt_struct->crq.format = format; evt_struct->crq.timeout = timeout; evt_struct->done = done;}/* ------------------------------------------------------------ * Routines for receiving SCSI responses from the hosting partition *//** * set_srp_direction: Set the fields in the srp related to data * direction and number of buffers based on the direction in * the scsi_cmnd and the number of buffers */static void set_srp_direction(struct scsi_cmnd *cmd, struct srp_cmd *srp_cmd, int numbuf){ if (numbuf == 0) return; if (numbuf == 1) { if (cmd->sc_data_direction == DMA_TO_DEVICE) srp_cmd->data_out_format = SRP_DIRECT_BUFFER; else srp_cmd->data_in_format = SRP_DIRECT_BUFFER; } else { if (cmd->sc_data_direction == DMA_TO_DEVICE) { srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; srp_cmd->data_out_count = numbuf < MAX_INDIRECT_BUFS ? numbuf: MAX_INDIRECT_BUFS; } else { srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; srp_cmd->data_in_count = numbuf < MAX_INDIRECT_BUFS ? numbuf: MAX_INDIRECT_BUFS; } }}static void unmap_sg_list(int num_entries, struct device *dev, struct memory_descriptor *md){ int i; for (i = 0; i < num_entries; ++i) { dma_unmap_single(dev, md[i].virtual_address, md[i].length, DMA_BIDIRECTIONAL); }}/** * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format * @cmd: srp_cmd whose additional_data member will be unmapped * @dev: device for which the memory is mapped **/static void unmap_cmd_data(struct srp_cmd *cmd, struct srp_event_struct *evt_struct, struct device *dev){ if ((cmd->data_out_format == SRP_NO_BUFFER) && (cmd->data_in_format == SRP_NO_BUFFER)) return; else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) || (cmd->data_in_format == SRP_DIRECT_BUFFER)) { struct memory_descriptor *data = (struct memory_descriptor *)cmd->additional_data; dma_unmap_single(dev, data->virtual_address, data->length, DMA_BIDIRECTIONAL); } else { struct indirect_descriptor *indirect = (struct indirect_descriptor *)cmd->additional_data; int num_mapped = indirect->head.length / sizeof(indirect->list[0]); if (num_mapped <= MAX_INDIRECT_BUFS) { unmap_sg_list(num_mapped, dev, &indirect->list[0]); return; } unmap_sg_list(num_mapped, dev, evt_struct->ext_list); }}static int map_sg_list(int num_entries, struct scatterlist *sg, struct memory_descriptor *md){ int i; u64 total_length = 0; for (i = 0; i < num_entries; ++i) { struct memory_descriptor *descr = md + i; struct scatterlist *sg_entry = &sg[i]; descr->virtual_address = sg_dma_address(sg_entry); descr->length = sg_dma_len(sg_entry); descr->memory_handle = 0; total_length += sg_dma_len(sg_entry); } return total_length;}/** * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields * @cmd: Scsi_Cmnd with the scatterlist * @srp_cmd: srp_cmd that contains the memory descriptor * @dev: device for which to map dma memory * * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. * Returns 1 on success.*/static int map_sg_data(struct scsi_cmnd *cmd, struct srp_event_struct *evt_struct, struct srp_cmd *srp_cmd, struct device *dev){ int sg_mapped; u64 total_length = 0; struct scatterlist *sg = cmd->request_buffer; struct memory_descriptor *data = (struct memory_descriptor *)srp_cmd->additional_data;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -