⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 parport_ip32.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			printk("%cnFault", sep);		if (!(dsr & DSR_nPRINT))			printk("%c(Print)", sep);		if (dsr & DSR_TIMEOUT)			printk("%cTimeout", sep);		printk("\n");	}#undef sep}#else /* DEBUG_PARPORT_IP32 < 2 */#define parport_ip32_dump_state(...)	do { } while (0)#endif/* * CHECK_EXTRA_BITS - track and log extra bits * @p:		pointer to &struct parport * @b:		byte to inspect * @m:		bit mask of authorized bits * * This is used to track and log extra bits that should not be there in * parport_ip32_write_control() and parport_ip32_frob_control().  It is only * defined if %DEBUG_PARPORT_IP32 >= 1. */#if DEBUG_PARPORT_IP32 >= 1#define CHECK_EXTRA_BITS(p, b, m)					\	do {								\		unsigned int __b = (b), __m = (m);			\		if (__b & ~__m)						\			pr_debug1(PPIP32 "%s: extra bits in %s(%s): "	\				  "0x%02x/0x%02x\n",			\				  (p)->name, __func__, #b, __b, __m);	\	} while (0)#else /* DEBUG_PARPORT_IP32 < 1 */#define CHECK_EXTRA_BITS(...)	do { } while (0)#endif/*--- IP32 parallel port DMA operations --------------------------------*//** * struct parport_ip32_dma_data - private data needed for DMA operation * @dir:	DMA direction (from or to device) * @buf:	buffer physical address * @len:	buffer length * @next:	address of next bytes to DMA transfer * @left:	number of bytes remaining * @ctx:	next context to write (0: context_a; 1: context_b) * @irq_on:	are the DMA IRQs currently enabled? * @lock:	spinlock to protect access to the structure */struct parport_ip32_dma_data {	enum dma_data_direction		dir;	dma_addr_t			buf;	dma_addr_t			next;	size_t				len;	size_t				left;	unsigned int			ctx;	unsigned int			irq_on;	spinlock_t			lock;};static struct parport_ip32_dma_data parport_ip32_dma;/** * parport_ip32_dma_setup_context - setup next DMA context * @limit:	maximum data size for the context * * The alignment constraints must be verified in caller function, and the * parameter @limit must be set accordingly. */static void parport_ip32_dma_setup_context(unsigned int limit){	unsigned long flags;	spin_lock_irqsave(&parport_ip32_dma.lock, flags);	if (parport_ip32_dma.left > 0) {		/* Note: ctxreg is "volatile" here only because		 * mace->perif.ctrl.parport.context_a and context_b are		 * "volatile".  */		volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?			&mace->perif.ctrl.parport.context_a :			&mace->perif.ctrl.parport.context_b;		u64 count;		u64 ctxval;		if (parport_ip32_dma.left <= limit) {			count = parport_ip32_dma.left;			ctxval = MACEPAR_CONTEXT_LASTFLAG;		} else {			count = limit;			ctxval = 0;		}		pr_trace(NULL,			 "(%u): 0x%04x:0x%04x, %u -> %u%s",			 limit,			 (unsigned int)parport_ip32_dma.buf,			 (unsigned int)parport_ip32_dma.next,			 (unsigned int)count,			 parport_ip32_dma.ctx, ctxval ? "*" : "");		ctxval |= parport_ip32_dma.next &			MACEPAR_CONTEXT_BASEADDR_MASK;		ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) &			MACEPAR_CONTEXT_DATALEN_MASK;		writeq(ctxval, ctxreg);		parport_ip32_dma.next += count;		parport_ip32_dma.left -= count;		parport_ip32_dma.ctx ^= 1U;	}	/* If there is nothing more to send, disable IRQs to avoid to	 * face an IRQ storm which can lock the machine.  Disable them	 * only once. */	if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {		pr_debug(PPIP32 "IRQ off (ctx)\n");		disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);		disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);		parport_ip32_dma.irq_on = 0;	}	spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);}/** * parport_ip32_dma_interrupt - DMA interrupt handler * @irq:	interrupt number * @dev_id:	unused */static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id){	if (parport_ip32_dma.left)		pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx);	parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);	return IRQ_HANDLED;}#if DEBUG_PARPORT_IP32static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id){	pr_trace1(NULL, "(%d)", irq);	return IRQ_HANDLED;}#endif/** * parport_ip32_dma_start - begins a DMA transfer * @dir:	DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE * @addr:	pointer to data buffer * @count:	buffer size * * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be * correctly balanced. */static int parport_ip32_dma_start(enum dma_data_direction dir,				  void *addr, size_t count){	unsigned int limit;	u64 ctrl;	pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count);	/* FIXME - add support for DMA_FROM_DEVICE.  In this case, buffer must	 * be 64 bytes aligned. */	BUG_ON(dir != DMA_TO_DEVICE);	/* Reset DMA controller */	ctrl = MACEPAR_CTLSTAT_RESET;	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);	/* DMA IRQs should normally be enabled */	if (!parport_ip32_dma.irq_on) {		WARN_ON(1);		enable_irq(MACEISA_PAR_CTXA_IRQ);		enable_irq(MACEISA_PAR_CTXB_IRQ);		parport_ip32_dma.irq_on = 1;	}	/* Prepare DMA pointers */	parport_ip32_dma.dir = dir;	parport_ip32_dma.buf = dma_map_single(NULL, addr, count, dir);	parport_ip32_dma.len = count;	parport_ip32_dma.next = parport_ip32_dma.buf;	parport_ip32_dma.left = parport_ip32_dma.len;	parport_ip32_dma.ctx = 0;	/* Setup DMA direction and first two contexts */	ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION;	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);	/* Single transfer should not cross a 4K page boundary */	limit = MACEPAR_CONTEXT_DATA_BOUND -		(parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1));	parport_ip32_dma_setup_context(limit);	parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);	/* Real start of DMA transfer */	ctrl |= MACEPAR_CTLSTAT_ENABLE;	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);	return 0;}/** * parport_ip32_dma_stop - ends a running DMA transfer * * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be * correctly balanced. */static void parport_ip32_dma_stop(void){	u64 ctx_a;	u64 ctx_b;	u64 ctrl;	u64 diag;	size_t res[2];	/* {[0] = res_a, [1] = res_b} */	pr_trace(NULL, "()");	/* Disable IRQs */	spin_lock_irq(&parport_ip32_dma.lock);	if (parport_ip32_dma.irq_on) {		pr_debug(PPIP32 "IRQ off (stop)\n");		disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);		disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);		parport_ip32_dma.irq_on = 0;	}	spin_unlock_irq(&parport_ip32_dma.lock);	/* Force IRQ synchronization, even if the IRQs were disabled	 * elsewhere. */	synchronize_irq(MACEISA_PAR_CTXA_IRQ);	synchronize_irq(MACEISA_PAR_CTXB_IRQ);	/* Stop DMA transfer */	ctrl = readq(&mace->perif.ctrl.parport.cntlstat);	ctrl &= ~MACEPAR_CTLSTAT_ENABLE;	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);	/* Adjust residue (parport_ip32_dma.left) */	ctx_a = readq(&mace->perif.ctrl.parport.context_a);	ctx_b = readq(&mace->perif.ctrl.parport.context_b);	ctrl = readq(&mace->perif.ctrl.parport.cntlstat);	diag = readq(&mace->perif.ctrl.parport.diagnostic);	res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ?		1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >>		     MACEPAR_CONTEXT_DATALEN_SHIFT) :		0;	res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ?		1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>		     MACEPAR_CONTEXT_DATALEN_SHIFT) :		0;	if (diag & MACEPAR_DIAG_DMACTIVE)		res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] =			1 + ((diag & MACEPAR_DIAG_CTRMASK) >>			     MACEPAR_DIAG_CTRSHIFT);	parport_ip32_dma.left += res[0] + res[1];	/* Reset DMA controller, and re-enable IRQs */	ctrl = MACEPAR_CTLSTAT_RESET;	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);	pr_debug(PPIP32 "IRQ on (stop)\n");	enable_irq(MACEISA_PAR_CTXA_IRQ);	enable_irq(MACEISA_PAR_CTXB_IRQ);	parport_ip32_dma.irq_on = 1;	dma_unmap_single(NULL, parport_ip32_dma.buf, parport_ip32_dma.len,			 parport_ip32_dma.dir);}/** * parport_ip32_dma_get_residue - get residue from last DMA transfer * * Returns the number of bytes remaining from last DMA transfer. */static inline size_t parport_ip32_dma_get_residue(void){	return parport_ip32_dma.left;}/** * parport_ip32_dma_register - initialize DMA engine * * Returns zero for success. */static int parport_ip32_dma_register(void){	int err;	spin_lock_init(&parport_ip32_dma.lock);	parport_ip32_dma.irq_on = 1;	/* Reset DMA controller */	writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat);	/* Request IRQs */	err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt,			  0, "parport_ip32", NULL);	if (err)		goto fail_a;	err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt,			  0, "parport_ip32", NULL);	if (err)		goto fail_b;#if DEBUG_PARPORT_IP32	/* FIXME - what is this IRQ for? */	err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt,			  0, "parport_ip32", NULL);	if (err)		goto fail_merr;#endif	return 0;#if DEBUG_PARPORT_IP32fail_merr:	free_irq(MACEISA_PAR_CTXB_IRQ, NULL);#endiffail_b:	free_irq(MACEISA_PAR_CTXA_IRQ, NULL);fail_a:	return err;}/** * parport_ip32_dma_unregister - release and free resources for DMA engine */static void parport_ip32_dma_unregister(void){#if DEBUG_PARPORT_IP32	free_irq(MACEISA_PAR_MERR_IRQ, NULL);#endif	free_irq(MACEISA_PAR_CTXB_IRQ, NULL);	free_irq(MACEISA_PAR_CTXA_IRQ, NULL);}/*--- Interrupt handlers and associates --------------------------------*//** * parport_ip32_wakeup - wakes up code waiting for an interrupt * @p:		pointer to &struct parport */static inline void parport_ip32_wakeup(struct parport *p){	struct parport_ip32_private * const priv = p->physport->private_data;	complete(&priv->irq_complete);}/** * parport_ip32_interrupt - interrupt handler * @irq:	interrupt number * @dev_id:	pointer to &struct parport * * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is * %PARPORT_IP32_IRQ_FWD. */static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id){	struct parport * const p = dev_id;	struct parport_ip32_private * const priv = p->physport->private_data;	enum parport_ip32_irq_mode irq_mode = priv->irq_mode;	switch (irq_mode) {	case PARPORT_IP32_IRQ_FWD:		return parport_irq_handler(irq, dev_id);	case PARPORT_IP32_IRQ_HERE:		parport_ip32_wakeup(p);		break;	}	return IRQ_HANDLED;}/*--- Some utility function to manipulate ECR register -----------------*//** * parport_ip32_read_econtrol - read contents of the ECR register * @p:		pointer to &struct parport */static inline unsigned int parport_ip32_read_econtrol(struct parport *p){	struct parport_ip32_private * const priv = p->physport->private_data;	return readb(priv->regs.ecr);}/** * parport_ip32_write_econtrol - write new contents to the ECR register * @p:		pointer to &struct parport * @c:		new value to write */static inline void parport_ip32_write_econtrol(struct parport *p,					       unsigned int c){	struct parport_ip32_private * const priv = p->physport->private_data;	writeb(c, priv->regs.ecr);}/** * parport_ip32_frob_econtrol - change bits from the ECR register * @p:		pointer to &struct parport * @mask:	bit mask of bits to change * @val:	new value for changed bits * * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits * in @val, and write the result to the ECR. */static inline void parport_ip32_frob_econtrol(struct parport *p,					      unsigned int mask,					      unsigned int val){	unsigned int c;	c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;	parport_ip32_write_econtrol(p, c);}/** * parport_ip32_set_mode - change mode of ECP port * @p:		pointer to &struct parport * @mode:	new mode to write in ECR * * ECR is reset in a sane state (interrupts and DMA disabled), and placed in * mode @mode.  Go through PS2 mode if needed. */static void parport_ip32_set_mode(struct parport *p, unsigned int mode){	unsigned int omode;	mode &= ECR_MODE_MASK;	omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;	if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2	      || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) {		/* We have to go through PS2 mode */		unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -