📄 omap_udc.c
字号:
u8 *buf;
unsigned count, avail;
int is_last;
buf = req->req.buf + req->req.actual;
prefetchw(buf);
for (;;) {
u16 ep_stat = UDC_STAT_FLG_REG;
is_last = 0;
if (ep_stat & FIFO_EMPTY) {
if (!ep->double_buf)
break;
ep->fnf = 1;
}
if (ep_stat & UDC_EP_HALTED)
break;
if (ep_stat & UDC_FIFO_FULL)
avail = ep->ep.maxpacket;
else {
avail = UDC_RXFSTAT_REG;
ep->fnf = ep->double_buf;
}
count = read_packet(buf, req, avail);
/* partial packet reads may not be errors */
if (count < ep->ep.maxpacket) {
is_last = 1;
/* overflowed this request? flush extra data */
if (count != avail) {
req->req.status = -EOVERFLOW;
avail -= count;
while (avail--)
(void) *(volatile u8 *)&UDC_DATA_REG;
}
} else if (req->req.length == req->req.actual)
is_last = 1;
else
is_last = 0;
if (!ep->bEndpointAddress)
break;
if (is_last)
done(ep, req, 0);
break;
}
return is_last;
}
/*-------------------------------------------------------------------------*/
static inline dma_addr_t dma_csac(unsigned lch)
{
dma_addr_t csac;
/* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
* read before the DMA controller finished disabling the channel.
*/
csac = omap_readw(OMAP_DMA_CSAC(lch));
if (csac == 0)
csac = omap_readw(OMAP_DMA_CSAC(lch));
return csac;
}
static inline dma_addr_t dma_cdac(unsigned lch)
{
dma_addr_t cdac;
/* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
* read before the DMA controller finished disabling the channel.
*/
cdac = omap_readw(OMAP_DMA_CDAC(lch));
if (cdac == 0)
cdac = omap_readw(OMAP_DMA_CDAC(lch));
return cdac;
}
static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
{
dma_addr_t end;
/* IN-DMA needs this on fault/cancel paths, so 15xx misreports
* the last transfer's bytecount by more than a FIFO's worth.
*/
if (cpu_is_omap15xx())
return 0;
end = dma_csac(ep->lch);
if (end == ep->dma_counter)
return 0;
end |= start & (0xffff << 16);
if (end < start)
end += 0x10000;
return end - start;
}
#define DMA_DEST_LAST(x) (cpu_is_omap15xx() \
? omap_readw(OMAP_DMA_CSAC(x)) /* really: CPC */ \
: dma_cdac(x))
static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
{
dma_addr_t end;
end = DMA_DEST_LAST(ep->lch);
if (end == ep->dma_counter)
return 0;
end |= start & (0xffff << 16);
if (cpu_is_omap15xx())
end++;
if (end < start)
end += 0x10000;
return end - start;
}
/* Each USB transfer request using DMA maps to one or more DMA transfers.
* When DMA completion isn't request completion, the UDC continues with
* the next DMA transfer for that USB transfer.
*/
static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
{
u16 txdma_ctrl;
unsigned length = req->req.length - req->req.actual;
const int sync_mode = cpu_is_omap15xx()
? OMAP_DMA_SYNC_FRAME
: OMAP_DMA_SYNC_ELEMENT;
/* measure length in either bytes or packets */
if ((cpu_is_omap16xx() && length <= UDC_TXN_TSC)
|| (cpu_is_omap15xx() && length < ep->maxpacket)) {
txdma_ctrl = UDC_TXN_EOT | length;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
length, 1, sync_mode);
} else {
length = min(length / ep->maxpacket,
(unsigned) UDC_TXN_TSC + 1);
txdma_ctrl = length;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
ep->ep.maxpacket >> 1, length, sync_mode);
length *= ep->maxpacket;
}
omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual);
omap_start_dma(ep->lch);
ep->dma_counter = dma_csac(ep->lch);
UDC_DMA_IRQ_EN_REG |= UDC_TX_DONE_IE(ep->dma_channel);
UDC_TXDMA_REG(ep->dma_channel) = UDC_TXN_START | txdma_ctrl;
req->dma_bytes = length;
}
static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
{
if (status == 0) {
req->req.actual += req->dma_bytes;
/* return if this request needs to send data or zlp */
if (req->req.actual < req->req.length)
return;
if (req->req.zero
&& req->dma_bytes != 0
&& (req->req.actual % ep->maxpacket) == 0)
return;
} else
req->req.actual += dma_src_len(ep, req->req.dma
+ req->req.actual);
/* tx completion */
omap_stop_dma(ep->lch);
UDC_DMA_IRQ_EN_REG &= ~UDC_TX_DONE_IE(ep->dma_channel);
done(ep, req, status);
}
static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
{
unsigned packets;
/* NOTE: we filtered out "short reads" before, so we know
* the buffer has only whole numbers of packets.
*/
/* set up this DMA transfer, enable the fifo, start */
packets = (req->req.length - req->req.actual) / ep->ep.maxpacket;
packets = min(packets, (unsigned)UDC_RXN_TC + 1);
req->dma_bytes = packets * ep->ep.maxpacket;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
ep->ep.maxpacket >> 1, packets,
OMAP_DMA_SYNC_ELEMENT);
omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual);
ep->dma_counter = DMA_DEST_LAST(ep->lch);
UDC_RXDMA_REG(ep->dma_channel) = UDC_RXN_STOP | (packets - 1);
UDC_DMA_IRQ_EN_REG |= UDC_RX_EOT_IE(ep->dma_channel);
UDC_EP_NUM_REG = (ep->bEndpointAddress & 0xf);
UDC_CTRL_REG = UDC_SET_FIFO_EN;
omap_start_dma(ep->lch);
}
static void
finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status)
{
u16 count;
if (status == 0)
ep->dma_counter = (u16) (req->req.dma + req->req.actual);
count = dma_dest_len(ep, req->req.dma + req->req.actual);
count += req->req.actual;
if (count <= req->req.length)
req->req.actual = count;
if (count != req->dma_bytes || status)
omap_stop_dma(ep->lch);
/* if this wasn't short, request may need another transfer */
else if (req->req.actual < req->req.length)
return;
/* rx completion */
UDC_DMA_IRQ_EN_REG &= ~UDC_RX_EOT_IE(ep->dma_channel);
done(ep, req, status);
}
static void dma_irq(struct omap_udc *udc, u16 irq_src)
{
u16 dman_stat = UDC_DMAN_STAT_REG;
struct omap_ep *ep;
struct omap_req *req;
/* IN dma: tx to host */
if (irq_src & UDC_TXN_DONE) {
ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)];
ep->irqs++;
/* can see TXN_DONE after dma abort */
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
finish_in_dma(ep, req, 0);
}
UDC_IRQ_SRC_REG = UDC_TXN_DONE;
if (!list_empty (&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
next_in_dma(ep, req);
}
}
/* OUT dma: rx from host */
if (irq_src & UDC_RXN_EOT) {
ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
ep->irqs++;
/* can see RXN_EOT after dma abort */
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
finish_out_dma(ep, req, 0);
}
UDC_IRQ_SRC_REG = UDC_RXN_EOT;
if (!list_empty (&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
next_out_dma(ep, req);
}
}
if (irq_src & UDC_RXN_CNT) {
ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
ep->irqs++;
/* omap15xx does this unasked... */
VDBG("%s, RX_CNT irq?\n", ep->ep.name);
UDC_IRQ_SRC_REG = UDC_RXN_CNT;
}
}
static void dma_error(int lch, u16 ch_status, void *data)
{
struct omap_ep *ep = data;
/* if ch_status & OMAP_DMA_DROP_IRQ ... */
/* if ch_status & OMAP_DMA_TOUT_IRQ ... */
ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status);
/* complete current transfer ... */
}
static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
{
u16 reg;
int status, restart, is_in;
is_in = ep->bEndpointAddress & USB_DIR_IN;
if (is_in)
reg = UDC_TXDMA_CFG_REG;
else
reg = UDC_RXDMA_CFG_REG;
reg |= UDC_DMA_REQ; /* "pulse" activated */
ep->dma_channel = 0;
ep->lch = -1;
if (channel == 0 || channel > 3) {
if ((reg & 0x0f00) == 0)
channel = 3;
else if ((reg & 0x00f0) == 0)
channel = 2;
else if ((reg & 0x000f) == 0) /* preferred for ISO */
channel = 1;
else {
status = -EMLINK;
goto just_restart;
}
}
reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1));
ep->dma_channel = channel;
if (is_in) {
status = omap_request_dma(OMAP_DMA_USB_W2FC_TX0 - 1 + channel,
ep->ep.name, dma_error, ep, &ep->lch);
if (status == 0) {
UDC_TXDMA_CFG_REG = reg;
/* EMIFF */
omap_set_dma_src_burst_mode(ep->lch,
OMAP_DMA_DATA_BURST_4);
omap_set_dma_src_data_pack(ep->lch, 1);
/* TIPB */
omap_set_dma_dest_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
(unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG));
}
} else {
status = omap_request_dma(OMAP_DMA_USB_W2FC_RX0 - 1 + channel,
ep->ep.name, dma_error, ep, &ep->lch);
if (status == 0) {
UDC_RXDMA_CFG_REG = reg;
/* TIPB */
omap_set_dma_src_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
(unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG));
/* EMIFF */
omap_set_dma_dest_burst_mode(ep->lch,
OMAP_DMA_DATA_BURST_4);
omap_set_dma_dest_data_pack(ep->lch, 1);
}
}
if (status)
ep->dma_channel = 0;
else {
ep->has_dma = 1;
omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ);
/* channel type P: hw synch (fifo) */
if (!cpu_is_omap15xx())
omap_writew(2, OMAP_DMA_LCH_CTRL(ep->lch));
}
just_restart:
/* restart any queue, even if the claim failed */
restart = !ep->stopped && !list_empty(&ep->queue);
if (status)
DBG("%s no dma channel: %d%s\n", ep->ep.name, status,
restart ? " (restart)" : "");
else
DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name,
is_in ? 't' : 'r',
ep->dma_channel - 1, ep->lch,
restart ? " (restart)" : "");
if (restart) {
struct omap_req *req;
req = container_of(ep->queue.next, struct omap_req, queue);
if (ep->has_dma)
(is_in ? next_in_dma : next_out_dma)(ep, req);
else {
use_ep(ep, UDC_EP_SEL);
(is_in ? write_fifo : read_fifo)(ep, req);
deselect_ep();
if (!is_in) {
UDC_CTRL_REG = UDC_SET_FIFO_EN;
ep->ackwait = 1 + ep->double_buf;
}
/* IN: 6 wait states before it'll tx */
}
}
}
static void dma_channel_release(struct omap_ep *ep)
{
int shift = 4 * (ep->dma_channel - 1);
u16 mask = 0x0f << shift;
struct omap_req *req;
int active;
/* abort any active usb transfer request */
if (!list_empty(&ep->queue))
req = container_of(ep->queue.next, struct omap_req, queue);
else
req = NULL;
active = ((1 << 7) & omap_readl(OMAP_DMA_CCR(ep->lch))) != 0;
DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
active ? "active" : "idle",
(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
ep->dma_channel - 1, req);
/* NOTE: re-setting RX_REQ/TX_REQ because of a chip bug (before
* OMAP 1710 ES2.0) where reading the DMA_CFG can clear them.
*/
/* wait till current packet DMA finishes, and fifo empties */
if (ep->bEndpointAddress & USB_DIR_IN) {
UDC_TXDMA_CFG_REG = (UDC_TXDMA_CFG_REG & ~mask) | UDC_DMA_REQ;
if (req) {
finish_in_dma(ep, req, -ECONNRESET);
/* clear FIFO; hosts probably won't empty it */
use_ep(ep, UDC_EP_SEL);
UDC_CTRL_REG = UDC_CLR_EP;
deselect_ep();
}
while (UDC_TXDMA_CFG_REG & mask)
udelay(10);
} else {
UDC_RXDMA_CFG_REG = (UDC_RXDMA_CFG_REG & ~mask) | UDC_DMA_REQ;
/* dma empties the fifo */
while (UDC_RXDMA_CFG_REG & mask)
udelay(10);
if (req)
finish_out_dma(ep, req, -ECONNRESET);
}
omap_free_dma(ep->lch);
ep->dma_channel = 0;
ep->lch = -1;
/* has_dma still set, till endpoint is fully quiesced */
}
/*-------------------------------------------------------------------------*/
static int
omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_req *req = container_of(_req, struct omap_req, req);
struct omap_udc *udc;
unsigned long flags;
int is_iso = 0;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
DBG("%s, bad params\n", __FUNCTION__);
return -EINVAL;
}
if (!_ep || (!ep->desc && ep->bEndpointAddress)) {
DBG("%s, bad ep\n", __FUNCTION__);
return -EINVAL;
}
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
is_iso = 1;
}
/* this isn't bogus, but OMAP DMA isn't the only hardware to
* have a hard time with partial packet reads... reject it.
*/
if (use_dma
&& ep->has_dma
&& ep->bEndpointAddress != 0
&& (ep->bEndpointAddress & USB_DIR_IN) == 0
&& (req->req.length % ep->ep.maxpacket) != 0) {
DBG("%s, no partial packet OUT reads\n", __FUNCTION__);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -