hptiop.c

来自「linux 内核源代码」· C语言 代码 · 共 948 行 · 第 1/2 页

C
948
字号
/* * HighPoint RR3xxx controller driver for Linux * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * Please report bugs/comments/suggestions to linux@highpoint-tech.com * * For more information, visit http://www.highpoint-tech.com */#include <linux/module.h>#include <linux/types.h>#include <linux/string.h>#include <linux/kernel.h>#include <linux/pci.h>#include <linux/interrupt.h>#include <linux/errno.h>#include <linux/delay.h>#include <linux/timer.h>#include <linux/spinlock.h>#include <linux/hdreg.h>#include <asm/uaccess.h>#include <asm/io.h>#include <asm/div64.h>#include <scsi/scsi_cmnd.h>#include <scsi/scsi_device.h>#include <scsi/scsi.h>#include <scsi/scsi_tcq.h>#include <scsi/scsi_host.h>#include "hptiop.h"MODULE_AUTHOR("HighPoint Technologies, Inc.");MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");static char driver_name[] = "hptiop";static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";static const char driver_ver[] = "v1.2 (070830)";static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop){	readl(&iop->outbound_intstatus);}static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec){	u32 req = 0;	int i;	for (i = 0; i < millisec; i++) {		req = readl(&iop->inbound_queue);		if (req != IOPMU_QUEUE_EMPTY)			break;		msleep(1);	}	if (req != IOPMU_QUEUE_EMPTY) {		writel(req, &iop->outbound_queue);		hptiop_pci_posting_flush(iop);		return 0;	}	return -1;}static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag){	if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)		return hptiop_host_request_callback(hba,				tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);	else		return hptiop_iop_request_callback(hba, tag);}static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba){	u32 req;	while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {		if (req & IOPMU_QUEUE_MASK_HOST_BITS)			hptiop_request_callback(hba, req);		else {			struct hpt_iop_request_header __iomem * p;			p = (struct hpt_iop_request_header __iomem *)				((char __iomem *)hba->iop + req);			if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {				if (readl(&p->context))					hptiop_request_callback(hba, req);				else					writel(1, &p->context);			}			else				hptiop_request_callback(hba, req);		}	}}static int __iop_intr(struct hptiop_hba *hba){	struct hpt_iopmu __iomem *iop = hba->iop;	u32 status;	int ret = 0;	status = readl(&iop->outbound_intstatus);	if (status & IOPMU_OUTBOUND_INT_MSG0) {		u32 msg = readl(&iop->outbound_msgaddr0);		dprintk("received outbound msg %x\n", msg);		writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);		hptiop_message_callback(hba, msg);		ret = 1;	}	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {		hptiop_drain_outbound_queue(hba);		ret = 1;	}	return ret;}static int iop_send_sync_request(struct hptiop_hba *hba,					void __iomem *_req, u32 millisec){	struct hpt_iop_request_header __iomem *req = _req;	u32 i;	writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,			&req->flags);	writel(0, &req->context);	writel((unsigned long)req - (unsigned long)hba->iop,			&hba->iop->inbound_queue);	hptiop_pci_posting_flush(hba->iop);	for (i = 0; i < millisec; i++) {		__iop_intr(hba);		if (readl(&req->context))			return 0;		msleep(1);	}	return -1;}static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec){	u32 i;	hba->msg_done = 0;	writel(msg, &hba->iop->inbound_msgaddr0);	hptiop_pci_posting_flush(hba->iop);	for (i = 0; i < millisec; i++) {		spin_lock_irq(hba->host->host_lock);		__iop_intr(hba);		spin_unlock_irq(hba->host->host_lock);		if (hba->msg_done)			break;		msleep(1);	}	return hba->msg_done? 0 : -1;}static int iop_get_config(struct hptiop_hba *hba,				struct hpt_iop_request_get_config *config){	u32 req32;	struct hpt_iop_request_get_config __iomem *req;	req32 = readl(&hba->iop->inbound_queue);	if (req32 == IOPMU_QUEUE_EMPTY)		return -1;	req = (struct hpt_iop_request_get_config __iomem *)			((unsigned long)hba->iop + req32);	writel(0, &req->header.flags);	writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);	writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);	writel(IOP_RESULT_PENDING, &req->header.result);	if (iop_send_sync_request(hba, req, 20000)) {		dprintk("Get config send cmd failed\n");		return -1;	}	memcpy_fromio(config, req, sizeof(*config));	writel(req32, &hba->iop->outbound_queue);	return 0;}static int iop_set_config(struct hptiop_hba *hba,				struct hpt_iop_request_set_config *config){	u32 req32;	struct hpt_iop_request_set_config __iomem *req;	req32 = readl(&hba->iop->inbound_queue);	if (req32 == IOPMU_QUEUE_EMPTY)		return -1;	req = (struct hpt_iop_request_set_config __iomem *)			((unsigned long)hba->iop + req32);	memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),		(u8 *)config + sizeof(struct hpt_iop_request_header),		sizeof(struct hpt_iop_request_set_config) -			sizeof(struct hpt_iop_request_header));	writel(0, &req->header.flags);	writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);	writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);	writel(IOP_RESULT_PENDING, &req->header.result);	if (iop_send_sync_request(hba, req, 20000)) {		dprintk("Set config send cmd failed\n");		return -1;	}	writel(req32, &hba->iop->outbound_queue);	return 0;}static int hptiop_initialize_iop(struct hptiop_hba *hba){	struct hpt_iopmu __iomem *iop = hba->iop;	/* enable interrupts */	writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),			&iop->outbound_intmask);	hba->initialized = 1;	/* start background tasks */	if (iop_send_sync_msg(hba,			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {		printk(KERN_ERR "scsi%d: fail to start background task\n",			hba->host->host_no);		return -1;	}	return 0;}static int hptiop_map_pci_bar(struct hptiop_hba *hba){	u32 mem_base_phy, length;	void __iomem *mem_base_virt;	struct pci_dev *pcidev = hba->pcidev;	if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {		printk(KERN_ERR "scsi%d: pci resource invalid\n",				hba->host->host_no);		return -1;	}	mem_base_phy = pci_resource_start(pcidev, 0);	length = pci_resource_len(pcidev, 0);	mem_base_virt = ioremap(mem_base_phy, length);	if (!mem_base_virt) {		printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",				hba->host->host_no);		return -1;	}	hba->iop = mem_base_virt;	dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);	return 0;}static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg){	dprintk("iop message 0x%x\n", msg);	if (!hba->initialized)		return;	if (msg == IOPMU_INBOUND_MSG0_RESET) {		atomic_set(&hba->resetting, 0);		wake_up(&hba->reset_wq);	}	else if (msg <= IOPMU_INBOUND_MSG0_MAX)		hba->msg_done = 1;}static inline struct hptiop_request *get_req(struct hptiop_hba *hba){	struct hptiop_request *ret;	dprintk("get_req : req=%p\n", hba->req_list);	ret = hba->req_list;	if (ret)		hba->req_list = ret->next;	return ret;}static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req){	dprintk("free_req(%d, %p)\n", req->index, req);	req->next = hba->req_list;	hba->req_list = req;}static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag){	struct hpt_iop_request_scsi_command *req;	struct scsi_cmnd *scp;	u32 tag;	if (hba->iopintf_v2) {		tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT;		req = hba->reqs[tag].req_virt;		if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))			req->header.result = IOP_RESULT_SUCCESS;	} else {		tag = _tag;		req = hba->reqs[tag].req_virt;	}	dprintk("hptiop_host_request_callback: req=%p, type=%d, "			"result=%d, context=0x%x tag=%d\n",			req, req->header.type, req->header.result,			req->header.context, tag);	BUG_ON(!req->header.result);	BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));	scp = hba->reqs[tag].scp;	if (HPT_SCP(scp)->mapped)		scsi_dma_unmap(scp);	switch (le32_to_cpu(req->header.result)) {	case IOP_RESULT_SUCCESS:		scp->result = (DID_OK<<16);		break;	case IOP_RESULT_BAD_TARGET:		scp->result = (DID_BAD_TARGET<<16);		break;	case IOP_RESULT_BUSY:		scp->result = (DID_BUS_BUSY<<16);		break;	case IOP_RESULT_RESET:		scp->result = (DID_RESET<<16);		break;	case IOP_RESULT_FAIL:		scp->result = (DID_ERROR<<16);		break;	case IOP_RESULT_INVALID_REQUEST:		scp->result = (DID_ABORT<<16);		break;	case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:		scp->result = SAM_STAT_CHECK_CONDITION;		memset(&scp->sense_buffer,				0, sizeof(scp->sense_buffer));		memcpy(&scp->sense_buffer, &req->sg_list,				min(sizeof(scp->sense_buffer),					le32_to_cpu(req->dataxfer_length)));		break;	default:		scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |					(DID_ABORT<<16);		break;	}	dprintk("scsi_done(%p)\n", scp);	scp->scsi_done(scp);	free_req(hba, &hba->reqs[tag]);}void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag){	struct hpt_iop_request_header __iomem *req;	struct hpt_iop_request_ioctl_command __iomem *p;	struct hpt_ioctl_k *arg;	req = (struct hpt_iop_request_header __iomem *)			((unsigned long)hba->iop + tag);	dprintk("hptiop_iop_request_callback: req=%p, type=%d, "			"result=%d, context=0x%x tag=%d\n",			req, readl(&req->type), readl(&req->result),			readl(&req->context), tag);	BUG_ON(!readl(&req->result));	BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);	p = (struct hpt_iop_request_ioctl_command __iomem *)req;	arg = (struct hpt_ioctl_k *)(unsigned long)		(readl(&req->context) |			((u64)readl(&req->context_hi32)<<32));	if (readl(&req->result) == IOP_RESULT_SUCCESS) {		arg->result = HPT_IOCTL_RESULT_OK;		if (arg->outbuf_size)			memcpy_fromio(arg->outbuf,				&p->buf[(readl(&p->inbuf_size) + 3)& ~3],				arg->outbuf_size);		if (arg->bytes_returned)			*arg->bytes_returned = arg->outbuf_size;	}	else		arg->result = HPT_IOCTL_RESULT_FAILED;	arg->done(arg);	writel(tag, &hba->iop->outbound_queue);}static irqreturn_t hptiop_intr(int irq, void *dev_id){	struct hptiop_hba  *hba = dev_id;	int  handled;	unsigned long flags;	spin_lock_irqsave(hba->host->host_lock, flags);	handled = __iop_intr(hba);	spin_unlock_irqrestore(hba->host->host_lock, flags);	return handled;}static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg){	struct Scsi_Host *host = scp->device->host;	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;	struct scatterlist *sg;	int idx, nseg;	nseg = scsi_dma_map(scp);	BUG_ON(nseg < 0);	if (!nseg)		return 0;	HPT_SCP(scp)->sgcnt = nseg;	HPT_SCP(scp)->mapped = 1;	BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);	scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {		psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));		psg[idx].size = cpu_to_le32(sg_dma_len(sg));		psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?			cpu_to_le32(1) : 0;	}	return HPT_SCP(scp)->sgcnt;}static int hptiop_queuecommand(struct scsi_cmnd *scp,				void (*done)(struct scsi_cmnd *)){

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?