⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 libata-core.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/** *	ata_port_flush_task - Flush port_task *	@ap: The ata_port to flush port_task for * *	After this function completes, port_task is guranteed not to *	be running or scheduled. * *	LOCKING: *	Kernel thread context (may sleep) */void ata_port_flush_task(struct ata_port *ap){	DPRINTK("ENTER\n");	cancel_rearming_delayed_work(&ap->port_task);	if (ata_msg_ctl(ap))		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);}static void ata_qc_complete_internal(struct ata_queued_cmd *qc){	struct completion *waiting = qc->private_data;	complete(waiting);}/** *	ata_exec_internal_sg - execute libata internal command *	@dev: Device to which the command is sent *	@tf: Taskfile registers for the command and the result *	@cdb: CDB for packet command *	@dma_dir: Data tranfer direction of the command *	@sgl: sg list for the data buffer of the command *	@n_elem: Number of sg entries *	@timeout: Timeout in msecs (0 for default) * *	Executes libata internal command with timeout.  @tf contains *	command on entry and result on return.  Timeout and error *	conditions are reported via return value.  No recovery action *	is taken after a command times out.  It's caller's duty to *	clean up after timeout. * *	LOCKING: *	None.  Should be called with kernel context, might sleep. * *	RETURNS: *	Zero on success, AC_ERR_* mask on failure */unsigned ata_exec_internal_sg(struct ata_device *dev,			      struct ata_taskfile *tf, const u8 *cdb,			      int dma_dir, struct scatterlist *sgl,			      unsigned int n_elem, unsigned long timeout){	struct ata_link *link = dev->link;	struct ata_port *ap = link->ap;	u8 command = tf->command;	struct ata_queued_cmd *qc;	unsigned int tag, preempted_tag;	u32 preempted_sactive, preempted_qc_active;	int preempted_nr_active_links;	DECLARE_COMPLETION_ONSTACK(wait);	unsigned long flags;	unsigned int err_mask;	int rc;	spin_lock_irqsave(ap->lock, flags);	/* no internal command while frozen */	if (ap->pflags & ATA_PFLAG_FROZEN) {		spin_unlock_irqrestore(ap->lock, flags);		return AC_ERR_SYSTEM;	}	/* initialize internal qc */	/* XXX: Tag 0 is used for drivers with legacy EH as some	 * drivers choke if any other tag is given.  This breaks	 * ata_tag_internal() test for those drivers.  Don't use new	 * EH stuff without converting to it.	 */	if (ap->ops->error_handler)		tag = ATA_TAG_INTERNAL;	else		tag = 0;	if (test_and_set_bit(tag, &ap->qc_allocated))		BUG();	qc = __ata_qc_from_tag(ap, tag);	qc->tag = tag;	qc->scsicmd = NULL;	qc->ap = ap;	qc->dev = dev;	ata_qc_reinit(qc);	preempted_tag = link->active_tag;	preempted_sactive = link->sactive;	preempted_qc_active = ap->qc_active;	preempted_nr_active_links = ap->nr_active_links;	link->active_tag = ATA_TAG_POISON;	link->sactive = 0;	ap->qc_active = 0;	ap->nr_active_links = 0;	/* prepare & issue qc */	qc->tf = *tf;	if (cdb)		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);	qc->flags |= ATA_QCFLAG_RESULT_TF;	qc->dma_dir = dma_dir;	if (dma_dir != DMA_NONE) {		unsigned int i, buflen = 0;		struct scatterlist *sg;		for_each_sg(sgl, sg, n_elem, i)			buflen += sg->length;		ata_sg_init(qc, sgl, n_elem);		qc->nbytes = buflen;	}	qc->private_data = &wait;	qc->complete_fn = ata_qc_complete_internal;	ata_qc_issue(qc);	spin_unlock_irqrestore(ap->lock, flags);	if (!timeout)		timeout = ata_probe_timeout * 1000 / HZ;	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));	ata_port_flush_task(ap);	if (!rc) {		spin_lock_irqsave(ap->lock, flags);		/* We're racing with irq here.  If we lose, the		 * following test prevents us from completing the qc		 * twice.  If we win, the port is frozen and will be		 * cleaned up by ->post_internal_cmd().		 */		if (qc->flags & ATA_QCFLAG_ACTIVE) {			qc->err_mask |= AC_ERR_TIMEOUT;			if (ap->ops->error_handler)				ata_port_freeze(ap);			else				ata_qc_complete(qc);			if (ata_msg_warn(ap))				ata_dev_printk(dev, KERN_WARNING,					"qc timeout (cmd 0x%x)\n", command);		}		spin_unlock_irqrestore(ap->lock, flags);	}	/* do post_internal_cmd */	if (ap->ops->post_internal_cmd)		ap->ops->post_internal_cmd(qc);	/* perform minimal error analysis */	if (qc->flags & ATA_QCFLAG_FAILED) {		if (qc->result_tf.command & (ATA_ERR | ATA_DF))			qc->err_mask |= AC_ERR_DEV;		if (!qc->err_mask)			qc->err_mask |= AC_ERR_OTHER;		if (qc->err_mask & ~AC_ERR_OTHER)			qc->err_mask &= ~AC_ERR_OTHER;	}	/* finish up */	spin_lock_irqsave(ap->lock, flags);	*tf = qc->result_tf;	err_mask = qc->err_mask;	ata_qc_free(qc);	link->active_tag = preempted_tag;	link->sactive = preempted_sactive;	ap->qc_active = preempted_qc_active;	ap->nr_active_links = preempted_nr_active_links;	/* XXX - Some LLDDs (sata_mv) disable port on command failure.	 * Until those drivers are fixed, we detect the condition	 * here, fail the command with AC_ERR_SYSTEM and reenable the	 * port.	 *	 * Note that this doesn't change any behavior as internal	 * command failure results in disabling the device in the	 * higher layer for LLDDs without new reset/EH callbacks.	 *	 * Kill the following code as soon as those drivers are fixed.	 */	if (ap->flags & ATA_FLAG_DISABLED) {		err_mask |= AC_ERR_SYSTEM;		ata_port_probe(ap);	}	spin_unlock_irqrestore(ap->lock, flags);	return err_mask;}/** *	ata_exec_internal - execute libata internal command *	@dev: Device to which the command is sent *	@tf: Taskfile registers for the command and the result *	@cdb: CDB for packet command *	@dma_dir: Data tranfer direction of the command *	@buf: Data buffer of the command *	@buflen: Length of data buffer *	@timeout: Timeout in msecs (0 for default) * *	Wrapper around ata_exec_internal_sg() which takes simple *	buffer instead of sg list. * *	LOCKING: *	None.  Should be called with kernel context, might sleep. * *	RETURNS: *	Zero on success, AC_ERR_* mask on failure */unsigned ata_exec_internal(struct ata_device *dev,			   struct ata_taskfile *tf, const u8 *cdb,			   int dma_dir, void *buf, unsigned int buflen,			   unsigned long timeout){	struct scatterlist *psg = NULL, sg;	unsigned int n_elem = 0;	if (dma_dir != DMA_NONE) {		WARN_ON(!buf);		sg_init_one(&sg, buf, buflen);		psg = &sg;		n_elem++;	}	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,				    timeout);}/** *	ata_do_simple_cmd - execute simple internal command *	@dev: Device to which the command is sent *	@cmd: Opcode to execute * *	Execute a 'simple' command, that only consists of the opcode *	'cmd' itself, without filling any other registers * *	LOCKING: *	Kernel thread context (may sleep). * *	RETURNS: *	Zero on success, AC_ERR_* mask on failure */unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd){	struct ata_taskfile tf;	ata_tf_init(dev, &tf);	tf.command = cmd;	tf.flags |= ATA_TFLAG_DEVICE;	tf.protocol = ATA_PROT_NODATA;	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);}/** *	ata_pio_need_iordy	-	check if iordy needed *	@adev: ATA device * *	Check if the current speed of the device requires IORDY. Used *	by various controllers for chip configuration. */unsigned int ata_pio_need_iordy(const struct ata_device *adev){	/* Controller doesn't support  IORDY. Probably a pointless check	   as the caller should know this */	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)		return 0;	/* PIO3 and higher it is mandatory */	if (adev->pio_mode > XFER_PIO_2)		return 1;	/* We turn it on when possible */	if (ata_id_has_iordy(adev->id))		return 1;	return 0;}/** *	ata_pio_mask_no_iordy	-	Return the non IORDY mask *	@adev: ATA device * *	Compute the highest mode possible if we are not using iordy. Return *	-1 if no iordy mode is available. */static u32 ata_pio_mask_no_iordy(const struct ata_device *adev){	/* If we have no drive specific rule, then PIO 2 is non IORDY */	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */		u16 pio = adev->id[ATA_ID_EIDE_PIO];		/* Is the speed faster than the drive allows non IORDY ? */		if (pio) {			/* This is cycle times not frequency - watch the logic! */			if (pio > 240)	/* PIO2 is 240nS per cycle */				return 3 << ATA_SHIFT_PIO;			return 7 << ATA_SHIFT_PIO;		}	}	return 3 << ATA_SHIFT_PIO;}/** *	ata_dev_read_id - Read ID data from the specified device *	@dev: target device *	@p_class: pointer to class of the target device (may be changed) *	@flags: ATA_READID_* flags *	@id: buffer to read IDENTIFY data into * *	Read ID data from the specified device.  ATA_CMD_ID_ATA is *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS *	for pre-ATA4 drives. * *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right *	now we abort if we hit that case. * *	LOCKING: *	Kernel thread context (may sleep) * *	RETURNS: *	0 on success, -errno otherwise. */int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,		    unsigned int flags, u16 *id){	struct ata_port *ap = dev->link->ap;	unsigned int class = *p_class;	struct ata_taskfile tf;	unsigned int err_mask = 0;	const char *reason;	int may_fallback = 1, tried_spinup = 0;	int rc;	if (ata_msg_ctl(ap))		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ retry:	ata_tf_init(dev, &tf);	switch (class) {	case ATA_DEV_ATA:		tf.command = ATA_CMD_ID_ATA;		break;	case ATA_DEV_ATAPI:		tf.command = ATA_CMD_ID_ATAPI;		break;	default:		rc = -ENODEV;		reason = "unsupported class";		goto err_out;	}	tf.protocol = ATA_PROT_PIO;	/* Some devices choke if TF registers contain garbage.  Make	 * sure those are properly initialized.	 */	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;	/* Device presence detection is unreliable on some	 * controllers.  Always poll IDENTIFY if available.	 */	tf.flags |= ATA_TFLAG_POLLING;	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);	if (err_mask) {		if (err_mask & AC_ERR_NODEV_HINT) {			DPRINTK("ata%u.%d: NODEV after polling detection\n",				ap->print_id, dev->devno);			return -ENOENT;		}		/* Device or controller might have reported the wrong		 * device class.  Give a shot at the other IDENTIFY if		 * the current one is aborted by the device.		 */		if (may_fallback &&		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {			may_fallback = 0;			if (class == ATA_DEV_ATA)				class = ATA_DEV_ATAPI;			else				class = ATA_DEV_ATA;			goto retry;		}		rc = -EIO;		reason = "I/O error";		goto err_out;	}	/* Falling back doesn't make sense if ID data was read	 * successfully at least once.	 */	may_fallback = 0;	swap_buf_le16(id, ATA_ID_WORDS);	/* sanity check */	rc = -EINVAL;	reason = "device reports invalid type";	if (class == ATA_DEV_ATA) {		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))			goto err_out;	} else {		if (ata_id_is_ata(id))			goto err_out;	}	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {		tried_spinup = 1;		/*		 * Drive powered-up in standby mode, and requires a specific		 * SET_FEATURES spin-up subcommand before it will accept		 * anything other than the original IDENTIFY command.		 */		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);		if (err_mask && id[2] != 0x738c) {			rc = -EIO;			reason = "SPINUP failed";			goto err_out;		}		/*		 * If the drive initially returned incomplete IDENTIFY info,		 * we now must reissue the IDENTIFY command.		 */		if (id[2] == 0x37c8)			goto retry;	}	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {		/*		 * The exact sequence expected by certain pre-ATA4 drives is:		 * SRST RESET		 * IDENTIFY (optional in early ATA)		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)		 * anything else..		 * Some drives were very specific about that exact sequence.		 *		 * Note that ATA4 says lba is mandatory so the second check		 * shoud never trigger.		 */		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {			err_mask = ata_dev_init_params(dev, id[3], id[6]);			if (err_mask) {				rc = -EIO;				reason = "INIT_DEV_PARAMS failed";				goto err_out;			}			/* current CHS translation info (id[53-58]) might be			 * changed. reread the identify device info.			 */			flags &= ~ATA_READID_POSTRESET;			goto retry;		}	}	*p_class = class;	return 0; err_out:	if (ata_msg_warn(ap))		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "			       "(%s, err_mask=0x%x)\n", reason, err_mask);	return rc;}static inline u8 ata_dev_knobble(struct ata_device *dev){	struct ata_port *ap = dev->link->ap;	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));}static void ata_dev_config_ncq(struct ata_device *dev,			       char *desc, size_t desc_sz){	struct ata_port *ap = dev->link->ap;	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);	if (!ata_id_has_ncq(dev->id)) {		desc[0] = '\0';		return;	}	if (dev->horkage & ATA_HORKAGE_NONCQ) {		snprintf(desc, desc_sz, "NCQ (not used)");		return;	}	if (ap->flags & ATA_FLAG_NCQ) {		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -