📄 sio_ioc4.c
字号:
SPIN((PCI_INW(&p->ip_serial->sscr) & IOC4_SSCR_PAUSE_STATE) == 0, spin_success); if (!spin_success) return(-1); } if (enb) { p->ip_flags &= ~TX_DISABLED; PCI_OUTW(&p->ip_serial->stpir, p->ip_tx_prod); enable_intrs(p, H_INTR_TX_MT); } else { ioc4reg_t txcons = PCI_INW(&p->ip_serial->stcir) & PROD_CONS_MASK; p->ip_flags |= TX_DISABLED; disable_intrs(p, H_INTR_TX_MT); /* Only move the transmit producer pointer back if the * transmitter is not already empty, otherwise we'll be * generating a bogus entry. */ if (txcons != p->ip_tx_prod) PCI_OUTW(&p->ip_serial->stpir, (txcons + (int) sizeof(struct ring_entry)) & PROD_CONS_MASK); } /* Re-enable the DMA interface if necessary */ if (p->ip_sscr & IOC4_SSCR_DMA_EN) PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr); return(0);}/* * Read in bytes from the hardware. Return the number of bytes * actually read. */static intioc4_read(sioport_t *port, char *buf, int len){ int prod_ptr, cons_ptr, total, x, spin_success; struct ring *inring; ioc4port_t *p = LPORT(port); struct hooks *hooks = p->ip_hooks;#ifdef NOT_YET ASSERT(L_LOCKED(port, L_READ));#endif dprintf(("read port 0x%p, len %d\n", (void *)port, len)); DEBUGINC(read_bytes, len); DEBUGINC(read_cnt, 1); ASSERT(len >= 0); /* There is a nasty timing issue in the IOC4. When the RX_TIMER * expires or the RX_HIGH condition arises, we take an interrupt. * At some point while servicing the interrupt, we read bytes from * the ring buffer and re-arm the RX_TIMER. However the RX_TIMER is * not started until the first byte is received *after* it is armed, * and any bytes pending in the RX construction buffers are not drained * to memory until either there are 4 bytes available or the RX_TIMER * expires. This leads to a potential situation where data is left * in the construction buffers forever because 1 to 3 bytes were received * after the interrupt was generated but before the RX_TIMER was re-armed. * At that point as long as no subsequent bytes are received the * timer will never be started and the bytes will remain in the * construction buffer forever. The solution is to execute a DRAIN * command after rearming the timer. This way any bytes received before * the DRAIN will be drained to memory, and any bytes received after * the DRAIN will start the TIMER and be drained when it expires. * Luckily, this only needs to be done when the DMA buffer is empty * since there is no requirement that this function return all * available data as long as it returns some. */ /* Re-arm the timer */ PCI_OUTW(&p->ip_serial->srcir, p->ip_rx_cons | IOC4_SRCIR_ARM); prod_ptr = PCI_INW(&p->ip_serial->srpir) & PROD_CONS_MASK; cons_ptr = p->ip_rx_cons; if (prod_ptr == cons_ptr) { int reset_dma = 0; /* Input buffer appears empty, do a flush. */ /* DMA must be enabled for this to work. */ if (!(p->ip_sscr & IOC4_SSCR_DMA_EN)) { p->ip_sscr |= IOC4_SSCR_DMA_EN; reset_dma = 1; } /* Potential race condition: we must reload the srpir after * issuing the drain command, otherwise we could think the RX * buffer is empty, then take a very long interrupt, and when * we come back it's full and we wait forever for the drain to * complete. */ PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr | IOC4_SSCR_RX_DRAIN); prod_ptr = PCI_INW(&p->ip_serial->srpir) & PROD_CONS_MASK; DEBUGINC(drain, 1); /* We must not wait for the DRAIN to complete unless there are * at least 8 bytes (2 ring entries) available to receive the data * otherwise the DRAIN will never complete and we'll deadlock here. * In fact, to make things easier, I'll just ignore the flush if * there is any data at all now available. */ if (prod_ptr == cons_ptr) { DEBUGINC(drainwait, 1); SPIN(PCI_INW(&p->ip_serial->sscr) & IOC4_SSCR_RX_DRAIN, spin_success); if (!spin_success) return(-1); /* SIGH. We have to reload the prod_ptr *again* since * the drain may have caused it to change */ prod_ptr = PCI_INW(&p->ip_serial->srpir) & PROD_CONS_MASK; } if (reset_dma) { DEBUGINC(resetdma, 1); p->ip_sscr &= ~IOC4_SSCR_DMA_EN; PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr); } } inring = p->ip_inring; p->ip_flags &= ~READ_ABORTED; total = 0; /* Grab bytes from the hardware */ while(prod_ptr != cons_ptr && len > 0) { struct ring_entry *entry; entry = (struct ring_entry *) ((caddr_t)inring + cons_ptr); /* According to the producer pointer, this ring entry * must contain some data. But if the PIO happened faster * than the DMA, the data may not be available yet, so let's * wait until it arrives. */ if ((((volatile struct ring_entry *) entry)->ring_allsc & RING_ANY_VALID) == 0) { /* Indicate the read is aborted so we don't disable * the interrupt thinking that the consumer is * congested. */ p->ip_flags |= READ_ABORTED; DEBUGINC(read_aborted, 1); len = 0; break; } /* Load the bytes/status out of the ring entry */ for(x = 0; x < 4 && len > 0; x++) { char *sc = &(entry->ring_sc[x]); /* Check for change in modem state or overrun */ if (*sc & IOC4_RXSB_MODEM_VALID) { if (p->ip_notify & N_DDCD) { /* Notify upper layer if DCD dropped */ if ((p->ip_flags & DCD_ON) && !(*sc & IOC4_RXSB_DCD)) { /* If we have already copied some data, return * it. We'll pick up the carrier drop on the next * pass. That way we don't throw away the data * that has already been copied back to the caller's * buffer. */ if (total > 0) { len = 0; break; } p->ip_flags &= ~DCD_ON; /* Turn off this notification so the carrier * drop protocol won't see it again when it * does a read. */ *sc &= ~IOC4_RXSB_MODEM_VALID; /* To keep things consistent, we need to update * the consumer pointer so the next reader won't * come in and try to read the same ring entries * again. This must be done here before the call * to UP_DDCD since UP_DDCD may do a recursive * read! */ if ((entry->ring_allsc & RING_ANY_VALID) == 0) cons_ptr = (cons_ptr + (int) sizeof(struct ring_entry)) & PROD_CONS_MASK; PCI_OUTW(&p->ip_serial->srcir, cons_ptr); p->ip_rx_cons = cons_ptr; /* Notify upper layer of carrier drop */ if (p->ip_notify & N_DDCD) UP_DDCD(port, 0); DEBUGINC(read_ddcd, 1); /* If we had any data to return, we would have * returned it above. */ return(0); } } /* Notify upper layer that an input overrun occurred */ if ((*sc & IOC4_RXSB_OVERRUN) && (p->ip_notify & N_OVERRUN_ERROR)) { DEBUGINC(rx_overrun, 1); UP_NCS(port, NCS_OVERRUN); } /* Don't look at this byte again */ *sc &= ~IOC4_RXSB_MODEM_VALID; } /* Check for valid data or RX errors */ if (*sc & IOC4_RXSB_DATA_VALID) { if ((*sc & (IOC4_RXSB_PAR_ERR | IOC4_RXSB_FRAME_ERR | IOC4_RXSB_BREAK)) && (p->ip_notify & (N_PARITY_ERROR | N_FRAMING_ERROR | N_BREAK))) { /* There is an error condition on the next byte. If * we have already transferred some bytes, we'll stop * here. Otherwise if this is the first byte to be read, * we'll just transfer it alone after notifying the * upper layer of its status. */ if (total > 0) { len = 0; break; } else { if ((*sc & IOC4_RXSB_PAR_ERR) && (p->ip_notify & N_PARITY_ERROR)) { DEBUGINC(parity, 1); UP_NCS(port, NCS_PARITY); } if ((*sc & IOC4_RXSB_FRAME_ERR) && (p->ip_notify & N_FRAMING_ERROR)) { DEBUGINC(framing, 1); UP_NCS(port, NCS_FRAMING); } if ((*sc & IOC4_RXSB_BREAK) && (p->ip_notify & N_BREAK)) { DEBUGINC(brk, 1); UP_NCS(port, NCS_BREAK); } len = 1; } } *sc &= ~IOC4_RXSB_DATA_VALID; *buf++ = entry->ring_data[x]; len--; total++; } } DEBUGINC(rx_buf_used, x); DEBUGINC(rx_buf_cnt, 1); /* If we used up this entry entirely, go on to the next one, * otherwise we must have run out of buffer space, so * leave the consumer pointer here for the next read in case * there are still unread bytes in this entry. */ if ((entry->ring_allsc & RING_ANY_VALID) == 0) cons_ptr = (cons_ptr + (int) sizeof(struct ring_entry)) & PROD_CONS_MASK; } /* Update consumer pointer and re-arm RX timer interrupt */ PCI_OUTW(&p->ip_serial->srcir, cons_ptr); p->ip_rx_cons = cons_ptr; /* If we have now dipped below the RX high water mark and we have * RX_HIGH interrupt turned off, we can now turn it back on again. */ if ((p->ip_flags & INPUT_HIGH) && (((prod_ptr - cons_ptr) & PROD_CONS_MASK) < ((p->ip_sscr & IOC4_SSCR_RX_THRESHOLD) << IOC4_PROD_CONS_PTR_OFF))) { p->ip_flags &= ~INPUT_HIGH; enable_intrs(p, H_INTR_RX_HIGH); } DEBUGINC(red_bytes, total); return(total);}/* * Modify event notification */static intioc4_notification(sioport_t *port, int mask, int on){ ioc4port_t *p = LPORT(port); struct hooks *hooks = p->ip_hooks; ioc4reg_t intrbits, sscrbits;#ifdef NOT_YET ASSERT(L_LOCKED(port, L_NOTIFICATION));#endif ASSERT(mask); intrbits = sscrbits = 0; if (mask & N_DATA_READY) intrbits |= (H_INTR_RX_TIMER | H_INTR_RX_HIGH); if (mask & N_OUTPUT_LOWAT) intrbits |= H_INTR_TX_EXPLICIT; if (mask & N_DDCD) { intrbits |= H_INTR_DELTA_DCD; sscrbits |= IOC4_SSCR_RX_RING_DCD; } if (mask & N_DCTS) intrbits |= H_INTR_DELTA_CTS; if (on) { enable_intrs(p, intrbits); p->ip_notify |= mask; p->ip_sscr |= sscrbits; } else { disable_intrs(p, intrbits); p->ip_notify &= ~mask; p->ip_sscr &= ~sscrbits; } /* We require DMA if either DATA_READY or DDCD notification is * currently requested. If neither of these is requested and * there is currently no TX in progress, DMA may be disabled. */ if (p->ip_notify & (N_DATA_READY | N_DDCD)) p->ip_sscr |= IOC4_SSCR_DMA_EN; else if (!(p->ip_ienb & H_INTR_TX_MT)) p->ip_sscr &= ~IOC4_SSCR_DMA_EN; PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr); return(0);}/* * Set RX timeout and threshold values. The upper layer passes in a * timeout value. In all cases it would like to be notified at least this * often when there are RX chars coming in. We set the RX timeout and * RX threshold (based on baud) to ensure that the upper layer is called * at roughly this interval during normal RX. * The input timeout value is in ticks. */static intioc4_rx_timeout(sioport_t *port, int timeout){ int threshold; ioc4port_t *p = LPORT(port);#ifdef NOT_YET ASSERT(L_LOCKED(port, L_RX_TIMEOUT));#endif p->ip_rx_timeout = timeout; /* Timeout is in ticks. Let's figure out how many chars we * can receive at the current baud rate in that interval * and set the RX threshold to that amount. There are 4 chars * per ring entry, so we'll divide the number of chars that will * arrive in timeout by 4. */ threshold = timeout * p->ip_baud / 10 / HZ / 4; if (threshold == 0) threshold = 1; /* otherwise we'll intr all the time! */ if ((unsigned) threshold > (unsigned) IOC4_SSCR_RX_THRESHOLD) return(1); p->ip_sscr &= ~IOC4_SSCR_RX_THRESHOLD; p->ip_sscr |= threshold; PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr); /* Now set the RX timeout to the given value */ timeout = timeout * IOC4_SRTR_HZ / HZ; if (timeout > IOC4_SRTR_CNT) timeout = IOC4_SRTR_CNT; PCI_OUTW(&p->ip_serial->srtr, timeout); return(0);}static intset_DTRRTS(sioport_t *port, int val, int mask1, int mask2){ ioc4port_t *p = LPORT(port); ioc4reg_t shadow; int spin_success; char mcr; /* XXX need lock for pretty much this entire routine. Makes * me nervous to hold it for so long. If we crash or hit * a breakpoint in here, we're hosed. */ /* Pause the DMA interface if necessary */ if (p->ip_sscr & IOC4_SSCR_DMA_EN) { PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr | IOC4_SSCR_DMA_PAUSE); SPIN((PCI_INW(&p->ip_serial->sscr) & IOC4_SSCR_PAUSE_STATE) == 0, spin_success); if (!spin_success) return(-1); } shadow = PCI_INW(&p->ip_serial->shadow); mcr = (shadow & 0xff000000) >> 24; /* Set new value */ if (val) { mcr |= mask1; shadow |= mask2; } else { mcr &= ~mask1; shadow &= ~mask2; } PCI_OUTB(&p->ip_uart->i4u_mcr, mcr); PCI_OUTW(&p->ip_serial->shadow, shadow); /* Re-enable the DMA interface if necessary */ if (p->ip_sscr & IOC4_SSCR_DMA_EN) PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr); return(0);}static intioc4_set_DTR(sioport_t *port, int dtr){#ifdef NOT_YET ASSERT(L_LOCKED(port, L_SET_DTR));#endif dprintf(("set dtr port 0x%p, dtr %d\n", (void *)port, dtr)); return(set_DTRRTS(port, dtr, MCR_DTR, IOC4_SHADOW_DTR));}static intioc4_set_RTS(sioport_t *port, int rts){#ifdef NOT_YET ASSERT(L_LOCKED(port, L_SET_RTS));#endif dprintf(("set rts port 0x%p, rts %d\n", (void *)port, rts)); return(set_DTRRTS(port, rts, MCR_RTS, IOC4_SHADOW_RTS));}static intioc4_query_DCD(sioport_t *port){ ioc4port_t *p = LPORT(port); ioc4reg_t shadow;#ifdef NOT_YET ASSERT(L_LOCKED(port, L_QUERY_DCD));#endif dprintf(("get dcd port 0x%p\n", (void *)port)); shadow = PCI_INW(&p->ip_serial->shadow); return(shadow & IOC4_SHADOW_DCD);}static intioc4_query_CTS(sioport_t *port){ ioc4port_t *p = LPORT(port); ioc4reg_t shadow;#ifdef NOT_YET ASSERT(L_LOCKED(port, L_QUERY_CTS));#endif dprintf(("get cts port 0x%p\n", (void *)port)); shadow = PCI_INW(&p->ip_serial->shadow); return(shadow & IOC4_SHADOW_CTS);}static intioc4_set_proto(sioport_t *port, enum sio_proto proto){ ioc4port_t *p = LPORT(port); struct hooks *hooks = p->ip_hooks;#ifdef NOT_YET ASSERT(L_LOCKED(port, L_SET_PROTOCOL));#endif switch(proto) { case PROTO_RS232: /* Clear the appropriate GIO pin */ PCI_OUTW((&p->ip_ioc4->gppr_0 + H_RS422), 0); break; case PROTO_RS422: /* Set the appropriate GIO pin */ PCI_OUTW((&p->ip_ioc4->gppr_0 + H_RS422), 1); break; default: return(1); } return(0);}// #define IS_PORT_0(p) ((p)->ip_hooks == &hooks_array[0])static intioc4_get_mapid(sioport_t *port, void *arg){ return(0);}static intioc4_set_sscr(sioport_t *port, int arg, int flag){ ioc4port_t *p = LPORT(port); if ( flag ) { /* reset arg bits in p->ip_sscr */ p->ip_sscr &= ~arg; } else { /* set bits in p->ip_sscr */ p->ip_sscr |= arg; } PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr); return(p->ip_sscr);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -