⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 aic79xx_osm.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 5 页
字号:
voidahd_send_async(struct ahd_softc *ahd, char channel,	       u_int target, u_int lun, ac_code code, void *arg){	switch (code) {	case AC_TRANSFER_NEG:	{		char	buf[80];		struct	ahd_linux_target *targ;		struct	info_str info;		struct	ahd_initiator_tinfo *tinfo;		struct	ahd_tmode_tstate *tstate;		info.buffer = buf;		info.length = sizeof(buf);		info.offset = 0;		info.pos = 0;		tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,					    target, &tstate);		/*		 * Don't bother reporting results while		 * negotiations are still pending.		 */		if (tinfo->curr.period != tinfo->goal.period		 || tinfo->curr.width != tinfo->goal.width		 || tinfo->curr.offset != tinfo->goal.offset		 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)			if (bootverbose == 0)				break;		/*		 * Don't bother reporting results that		 * are identical to those last reported.		 */		targ = ahd->platform_data->targets[target];		if (targ == NULL)			break;		if (tinfo->curr.period == targ->last_tinfo.period		 && tinfo->curr.width == targ->last_tinfo.width		 && tinfo->curr.offset == targ->last_tinfo.offset		 && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options)			if (bootverbose == 0)				break;		targ->last_tinfo.period = tinfo->curr.period;		targ->last_tinfo.width = tinfo->curr.width;		targ->last_tinfo.offset = tinfo->curr.offset;		targ->last_tinfo.ppr_options = tinfo->curr.ppr_options;		printf("(%s:%c:", ahd_name(ahd), channel);		if (target == CAM_TARGET_WILDCARD)			printf("*): ");		else			printf("%d): ", target);		ahd_format_transinfo(&info, &tinfo->curr);		if (info.pos < info.length)			*info.buffer = '\0';		else			buf[info.length - 1] = '\0';		printf("%s", buf);		break;	}        case AC_SENT_BDR:		break;        case AC_BUS_RESET:#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)		if (ahd->platform_data->host != NULL) {			scsi_report_bus_reset(ahd->platform_data->host,					      channel - 'A');		}#endif                break;        default:                panic("ahd_send_async: Unexpected async event");        }}/* * Calls the higher level scsi done function and frees the scb. */voidahd_done(struct ahd_softc *ahd, struct scb * scb){	Scsi_Cmnd *cmd;	struct ahd_linux_device *dev;	LIST_REMOVE(scb, pending_links);	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {		struct scb_tailq *untagged_q;		int target_offset;		target_offset = SCB_GET_TARGET_OFFSET(ahd, scb);		untagged_q = &(ahd->untagged_queues[target_offset]);		TAILQ_REMOVE(untagged_q, scb, links.tqe);		ahd_run_untagged_queue(ahd, untagged_q);	}	if ((scb->flags & SCB_ACTIVE) == 0) {		printf("SCB %d done'd twice\n", scb->hscb->tag);		ahd_dump_card_state(ahd);		panic("Stopping for safety");	}	cmd = scb->io_ctx;	dev = scb->platform_data->dev;	dev->active--;	dev->openings++;	ahd_linux_unmap_scb(ahd, scb);	if (scb->flags & SCB_SENSE) {		memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));		memcpy(cmd->sense_buffer, ahd_get_sense_buf(ahd, scb),		       MIN(sizeof(struct scsi_sense_data),			   sizeof(cmd->sense_buffer)));		cmd->result |= (DRIVER_SENSE << 24);	} else if (scb->flags & SCB_PKT_SENSE) {		struct scsi_status_iu_header *siu;		u_int sense_len;		/*		 * Copy only the sense data into the provided buffer.		 */		siu = (struct scsi_status_iu_header *)scb->sense_data;		sense_len = MIN(scsi_4btoul(siu->sense_length),				sizeof(cmd->sense_buffer));		memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));		memcpy(cmd->sense_buffer,		       ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu),		       sense_len);#ifdef AHD_DEBUG		if (ahd_debug & AHD_SHOW_SENSE) {			int i;			printf("Copied %d bytes of sense data offset %d:",			       sense_len, SIU_SENSE_OFFSET(siu));			for (i = 0; i < sense_len; i++)				printf(" 0x%x", cmd->sense_buffer[i]);			printf("\n");		}#endif		cmd->result |= (DRIVER_SENSE << 24);	} else {		/*		 * Guard against stale sense data.		 * The Linux mid-layer assumes that sense		 * was retrieved anytime the first byte of		 * the sense buffer looks "sane".		 */		cmd->sense_buffer[0] = 0;	}	if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {		uint32_t amount_xferred;		amount_xferred =		    ahd_get_transfer_length(scb) - ahd_get_residual(scb);		if (amount_xferred < scb->io_ctx->underflow) {			printf("Saw underflow (%ld of %ld bytes). "			       "Treated as error\n",				ahd_get_residual(scb),				ahd_get_transfer_length(scb));			ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);		} else {			ahd_set_transaction_status(scb, CAM_REQ_CMP);			ahd_linux_sniff_command(ahd, cmd, scb);		}	} else if (ahd_get_transaction_status(scb) == DID_OK) {		ahd_linux_handle_scsi_status(ahd, dev, scb);	} else if (ahd_get_transaction_status(scb) == DID_NO_CONNECT) {		/*		 * Should a selection timeout kill the device?		 * That depends on whether the selection timeout		 * is persistent.  Since we have no guarantee that		 * the mid-layer will issue an inquiry for this device		 * again, we can't just kill it off.		dev->flags |= AHD_DEV_UNCONFIGURED;		 */	}	if (dev->openings == 1	 && ahd_get_transaction_status(scb) == CAM_REQ_CMP	 && ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)		dev->tag_success_count++;	/*	 * Some devices deal with temporary internal resource	 * shortages by returning queue full.  When the queue	 * full occurrs, we throttle back.  Slowly try to get	 * back to our previous queue depth.	 */	if ((dev->openings + dev->active) < dev->maxtags	 && dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) {		dev->tag_success_count = 0;		dev->openings++;	}	if (dev->active == 0)		dev->commands_since_idle_or_otag = 0;	if (TAILQ_EMPTY(&dev->busyq)) {		if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0		 && dev->active == 0)			ahd_linux_free_device(ahd, dev);	} else if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) {		TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links);		dev->flags |= AHD_DEV_ON_RUN_LIST;	}	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {		printf("Recovery SCB completes\n");		up(&ahd->platform_data->eh_sem);	}	ahd_free_scb(ahd, scb);	ahd_linux_queue_cmd_complete(ahd, cmd);}static voidahd_linux_handle_scsi_status(struct ahd_softc *ahd,			     struct ahd_linux_device *dev, struct scb *scb){	/*	 * We don't currently trust the mid-layer to	 * properly deal with queue full or busy.  So,	 * when one occurs, we tell the mid-layer to	 * unconditionally requeue the command to us	 * so that we can retry it ourselves.  We also	 * implement our own throttling mechanism so	 * we don't clobber the device with too many	 * commands.	 */	switch (ahd_get_scsi_status(scb)) {	default:		break;	case SCSI_STATUS_QUEUE_FULL:	{		/*		 * By the time the core driver has returned this		 * command, all other commands that were queued		 * to us but not the device have been returned.		 * This ensures that dev->active is equal to		 * the number of commands actually queued to		 * the device.		 */		dev->tag_success_count = 0;		if (dev->active != 0) {			/*			 * Drop our opening count to the number			 * of commands currently outstanding.			 */			dev->openings = 0;#ifdef AHD_DEBUG			if (ahd_debug & AHD_SHOW_QFULL) {				ahd_print_path(ahd, scb);				printf("Dropping tag count to %d\n",				       dev->active);			}#endif			if (dev->active == dev->tags_on_last_queuefull) {				dev->last_queuefull_same_count++;				/*				 * If we repeatedly see a queue full				 * at the same queue depth, this				 * device has a fixed number of tag				 * slots.  Lock in this tag depth				 * so we stop seeing queue fulls from				 * this device.				 */				if (dev->last_queuefull_same_count				 == AHD_LOCK_TAGS_COUNT) {					dev->maxtags = dev->active;					ahd_print_path(ahd, scb);					printf("Locking max tag count at %d\n",					       dev->active);				}			} else {				dev->tags_on_last_queuefull = dev->active;				dev->last_queuefull_same_count = 0;			}			ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);			ahd_set_scsi_status(scb, SCSI_STATUS_OK);			break;		}		/*		 * Drop down to a single opening, and treat this		 * as if the target return BUSY SCSI status.		 */		dev->openings = 1;		/* FALLTHROUGH */	}	case SCSI_STATUS_BUSY:		/*		 * Set a short timer to defer sending commands for		 * a bit since Linux will not delay in this case.		 */		if ((dev->flags & AHD_DEV_TIMER_ACTIVE) != 0) {			printf("%s:%c:%d: Device Timer still active during "			       "busy processing\n", ahd_name(ahd),				dev->target->channel, dev->target->target);			break;		}		dev->flags |= AHD_DEV_TIMER_ACTIVE;		dev->qfrozen++;		init_timer(&dev->timer);		dev->timer.data = (u_long)dev;		dev->timer.expires = jiffies + (HZ/2);		dev->timer.function = ahd_linux_dev_timed_unfreeze;		add_timer(&dev->timer);		break;	}}static voidahd_linux_filter_command(struct ahd_softc *ahd, Scsi_Cmnd *cmd, struct scb *scb){	switch (cmd->cmnd[0]) {	case INQUIRY:	{		struct	ahd_devinfo devinfo;		struct	scsi_inquiry *inq;		struct	scsi_inquiry_data *sid;		struct	ahd_initiator_tinfo *tinfo;		struct	ahd_transinfo *user;		struct	ahd_transinfo *goal;		struct	ahd_transinfo *curr;		struct	ahd_tmode_tstate *tstate;		struct	ahd_linux_device *dev;		u_int	scsiid;		int	transferred_len;		int	minlen;		int	was_configured;		u_int	width;		u_int	period;		u_int	offset;		u_int	ppr_options;		u_int	trans_version;		u_int	prot_version;		static	int warned_user;		 /*		  * Validate the command.  We only want to filter		  * standard inquiry commands, not those querying		  * Vital Product Data.		  */		inq = (struct scsi_inquiry *)cmd->cmnd;		if ((inq->byte2 & SI_EVPD) != 0		 || inq->page_code != 0)			break;		if (cmd->use_sg != 0) {			printf("%s: SG Inquiry response ignored\n",			       ahd_name(ahd));			break;		}		transferred_len = ahd_get_transfer_length(scb)				- ahd_get_residual(scb);		sid = (struct scsi_inquiry_data *)cmd->request_buffer;		/*		 * Determine if this lun actually exists.  If so,		 * hold on to its corresponding device structure.		 * If not, make sure we release the device and		 * don't bother processing the rest of this inquiry		 * command.		 */		dev = ahd_linux_get_device(ahd, cmd->channel,					   cmd->target, cmd->lun,					   /*alloc*/FALSE);		was_configured = dev->flags & AHD_DEV_UNCONFIGURED;		if (transferred_len >= 1		 && SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {			dev->flags &= ~AHD_DEV_UNCONFIGURED;		} else {			dev->flags |= AHD_DEV_UNCONFIGURED;			break;		}		/*		 * Update our notion of this device's transfer		 * negotiation capabilities.		 */		scsiid = BUILD_SCSIID(ahd, cmd);		ahd_compile_devinfo(&devinfo, SCSIID_OUR_ID(scsiid),				    cmd->target, cmd->lun,				    SCSIID_CHANNEL(ahd, scsiid),				    ROLE_INITIATOR);		tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,					    devinfo.our_scsiid,					    devinfo.target, &tstate);		user = &tinfo->user;		goal = &tinfo->goal;		curr = &tinfo->curr;		width = user->width;		period = user->period;		offset = user->offset;		ppr_options = user->ppr_options;		trans_version = user->transport_version;		prot_version = user->protocol_version;		/*		 * If we have read streaming info for this controller,		 * apply it to this target.		 */		if (warned_user == 0		 && ahd->unit >= NUM_ELEMENTS(aic79xx_rd_strm_info)) {			printf("aic79xx: WARNING, insufficient "			       "rd_strm instances for installed "			       "controllers. Using defaults\n");			printf("aic79xx: Please update the "			       "aic79xx_rd_strm_info array in the "			       "aic79xx.c source file.\n");			warned_user++;		} else {			uint16_t rd_strm_mask;			rd_strm_mask = aic79xx_rd_strm_info[ahd->unit];			if ((rd_strm_mask & devinfo.target_mask) == 0)				ppr_options &= ~MSG_EXT_PPR_RD_STRM;		}				minlen = offsetof(struct scsi_inquiry_data, version) + 1;		if (transferred_len >= minlen) {			prot_version = SID_ANSI_REV(sid);			/*			 * Only attempt SPI3 once we've verified that			 * the device claims to support SPI3 features.			 */			if (prot_version < SCSI_REV_2)				trans_version = SID_ANSI_REV(sid);			else				trans_version = SCSI_REV_2;		}		minlen = offsetof(struct scsi_inquiry_data, flags) + 1;		if (transferred_len >= minlen		 && (sid->additional_length + 4) >= minlen) {			if ((sid->flags & SID_WBus16) == 0)				width = MSG_EXT_WDTR_BUS_8_BIT;			if ((sid->flags & SID_Sync) == 0) {				period = 0;				offset = 0;				ppr_options = 0;			}		} else {			/* Keep current settings */			break;		}		minlen = offsetof(struct scsi_inquiry_data, spi3data) + 1;		/*		 * This is a kludge to deal with inquiry requests that		 * are not large enough for us to pull the spi3/4 bits.		 * In this case, we assume that a device that tells us		 * they can provide inquiry data that spans the SPI3		 * bits and says its SCSI3 can handle a PPR request.		 * If the inquiry request has sufficient buffer space to		 * cover SPI3 bits, we honor them regardless of reported		 * SCSI REV.  We also allow any device that has had its		 * goal ppr_options set to allow DT speeds to keep that		 * option if a short inquiry occurs that would fail the		 * normal tests outlined above.		 */		if ((sid->additional_length + 4) >= minlen) {			if (transferred_len >= minlen) {				 if ((sid->spi3data & SID_SPI_CLOCK_DT) ==

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -