📄 omap_udc.c
字号:
return -EMSGSIZE;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (use_dma && ep->has_dma) {
if (req->req.dma == DMA_ADDR_INVALID) {
req->req.dma = dma_map_single(
ep->udc->gadget.dev.parent,
req->req.buf,
req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->mapped = 1;
} else {
dma_sync_single_for_device(
ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->mapped = 0;
}
}
VDBG("%s queue req %p, len %d buf %p\n",
ep->ep.name, _req, _req->length, _req->buf);
spin_lock_irqsave(&udc->lock, flags);
req->req.status = -EINPROGRESS;
req->req.actual = 0;
/* maybe kickstart non-iso i/o queues */
if (is_iso)
UDC_IRQ_EN_REG |= UDC_SOF_IE;
else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
int is_in;
if (ep->bEndpointAddress == 0) {
if (!udc->ep0_pending || !list_empty (&ep->queue)) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EL2HLT;
}
/* empty DATA stage? */
is_in = udc->ep0_in;
if (!req->req.length) {
/* chip became CONFIGURED or ADDRESSED
* earlier; drivers may already have queued
* requests to non-control endpoints
*/
if (udc->ep0_set_config) {
u16 irq_en = UDC_IRQ_EN_REG;
irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE;
if (!udc->ep0_reset_config)
irq_en |= UDC_EPN_RX_IE
| UDC_EPN_TX_IE;
UDC_IRQ_EN_REG = irq_en;
}
/* STATUS for zero length DATA stages is
* always an IN ... even for IN transfers,
* a wierd case which seem to stall OMAP.
*/
UDC_EP_NUM_REG = (UDC_EP_SEL|UDC_EP_DIR);
UDC_CTRL_REG = UDC_CLR_EP;
UDC_CTRL_REG = UDC_SET_FIFO_EN;
UDC_EP_NUM_REG = UDC_EP_DIR;
/* cleanup */
udc->ep0_pending = 0;
done(ep, req, 0);
req = NULL;
/* non-empty DATA stage */
} else if (is_in) {
UDC_EP_NUM_REG = UDC_EP_SEL|UDC_EP_DIR;
} else {
if (udc->ep0_setup)
goto irq_wait;
UDC_EP_NUM_REG = UDC_EP_SEL;
}
} else {
is_in = ep->bEndpointAddress & USB_DIR_IN;
if (!ep->has_dma)
use_ep(ep, UDC_EP_SEL);
/* if ISO: SOF IRQs must be enabled/disabled! */
}
if (ep->has_dma)
(is_in ? next_in_dma : next_out_dma)(ep, req);
else if (req) {
if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
req = NULL;
deselect_ep();
if (!is_in) {
UDC_CTRL_REG = UDC_SET_FIFO_EN;
ep->ackwait = 1 + ep->double_buf;
}
/* IN: 6 wait states before it'll tx */
}
}
irq_wait:
/* irq handler advances the queue */
if (req != NULL)
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_req *req;
unsigned long flags;
if (!_ep || !_req)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == _req)
break;
}
if (&req->req != _req) {
spin_unlock_irqrestore(&ep->udc->lock, flags);
return -EINVAL;
}
if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
int channel = ep->dma_channel;
/* releasing the channel cancels the request,
* reclaiming the channel restarts the queue
*/
dma_channel_release(ep);
dma_channel_claim(ep, channel);
} else
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int omap_ep_set_halt(struct usb_ep *_ep, int value)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
unsigned long flags;
int status = -EOPNOTSUPP;
spin_lock_irqsave(&ep->udc->lock, flags);
/* just use protocol stalls for ep0; real halts are annoying */
if (ep->bEndpointAddress == 0) {
if (!ep->udc->ep0_pending)
status = -EINVAL;
else if (value) {
if (ep->udc->ep0_set_config) {
WARN("error changing config?\n");
UDC_SYSCON2_REG = UDC_CLR_CFG;
}
UDC_SYSCON2_REG = UDC_STALL_CMD;
ep->udc->ep0_pending = 0;
status = 0;
} else /* NOP */
status = 0;
/* otherwise, all active non-ISO endpoints can halt */
} else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->desc) {
/* IN endpoints must already be idle */
if ((ep->bEndpointAddress & USB_DIR_IN)
&& !list_empty(&ep->queue)) {
status = -EAGAIN;
goto done;
}
if (value) {
int channel;
if (use_dma && ep->dma_channel
&& !list_empty(&ep->queue)) {
channel = ep->dma_channel;
dma_channel_release(ep);
} else
channel = 0;
use_ep(ep, UDC_EP_SEL);
if (UDC_STAT_FLG_REG & UDC_NON_ISO_FIFO_EMPTY) {
UDC_CTRL_REG = UDC_SET_HALT;
status = 0;
} else
status = -EAGAIN;
deselect_ep();
if (channel)
dma_channel_claim(ep, channel);
} else {
use_ep(ep, 0);
UDC_CTRL_REG = ep->udc->clr_halt;
ep->ackwait = 0;
if (!(ep->bEndpointAddress & USB_DIR_IN)) {
UDC_CTRL_REG = UDC_SET_FIFO_EN;
ep->ackwait = 1 + ep->double_buf;
}
}
}
done:
VDBG("%s %s halt stat %d\n", ep->ep.name,
value ? "set" : "clear", status);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return status;
}
static struct usb_ep_ops omap_ep_ops = {
.enable = omap_ep_enable,
.disable = omap_ep_disable,
.alloc_request = omap_alloc_request,
.free_request = omap_free_request,
.alloc_buffer = omap_alloc_buffer,
.free_buffer = omap_free_buffer,
.queue = omap_ep_queue,
.dequeue = omap_ep_dequeue,
.set_halt = omap_ep_set_halt,
// fifo_status ... report bytes in fifo
// fifo_flush ... flush fifo
};
/*-------------------------------------------------------------------------*/
static int omap_get_frame(struct usb_gadget *gadget)
{
u16 sof = UDC_SOF_REG;
return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC;
}
static int omap_wakeup(struct usb_gadget *gadget)
{
struct omap_udc *udc;
unsigned long flags;
int retval = -EHOSTUNREACH;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
if (udc->devstat & UDC_SUS) {
/* NOTE: OTG spec erratum says that OTG devices may
* issue wakeups without host enable.
*/
if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) {
DBG("remote wakeup...\n");
UDC_SYSCON2_REG = UDC_RMT_WKP;
retval = 0;
}
/* NOTE: non-OTG systems may use SRP TOO... */
} else if (!(udc->devstat & UDC_ATT)) {
if (udc->transceiver)
retval = otg_start_srp(udc->transceiver);
}
spin_unlock_irqrestore(&udc->lock, flags);
return retval;
}
static int
omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
{
struct omap_udc *udc;
unsigned long flags;
u16 syscon1;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
syscon1 = UDC_SYSCON1_REG;
if (is_selfpowered)
syscon1 |= UDC_SELF_PWR;
else
syscon1 &= ~UDC_SELF_PWR;
UDC_SYSCON1_REG = syscon1;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int can_pullup(struct omap_udc *udc)
{
return udc->driver && udc->softconnect && udc->vbus_active;
}
static void pullup_enable(struct omap_udc *udc)
{
udc->gadget.dev.parent->power.power_state = PMSG_ON;
udc->gadget.dev.power.power_state = PMSG_ON;
UDC_SYSCON1_REG |= UDC_PULLUP_EN;
#ifndef CONFIG_USB_OTG
if (!cpu_is_omap15xx())
OTG_CTRL_REG |= OTG_BSESSVLD;
#endif
UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
}
static void pullup_disable(struct omap_udc *udc)
{
#ifndef CONFIG_USB_OTG
if (!cpu_is_omap15xx())
OTG_CTRL_REG &= ~OTG_BSESSVLD;
#endif
UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
UDC_SYSCON1_REG &= ~UDC_PULLUP_EN;
}
/*
* Called by whatever detects VBUS sessions: external transceiver
* driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock.
*/
static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct omap_udc *udc;
unsigned long flags;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
VDBG("VBUS %s\n", is_active ? "on" : "off");
udc->vbus_active = (is_active != 0);
if (cpu_is_omap15xx()) {
/* "software" detect, ignored if !VBUS_MODE_1510 */
if (is_active)
FUNC_MUX_CTRL_0_REG |= VBUS_CTRL_1510;
else
FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510;
}
if (can_pullup(udc))
pullup_enable(udc);
else
pullup_disable(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct omap_udc *udc;
udc = container_of(gadget, struct omap_udc, gadget);
if (udc->transceiver)
return otg_set_power(udc->transceiver, mA);
return -EOPNOTSUPP;
}
static int omap_pullup(struct usb_gadget *gadget, int is_on)
{
struct omap_udc *udc;
unsigned long flags;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
udc->softconnect = (is_on != 0);
if (can_pullup(udc))
pullup_enable(udc);
else
pullup_disable(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static struct usb_gadget_ops omap_gadget_ops = {
.get_frame = omap_get_frame,
.wakeup = omap_wakeup,
.set_selfpowered = omap_set_selfpowered,
.vbus_session = omap_vbus_session,
.vbus_draw = omap_vbus_draw,
.pullup = omap_pullup,
};
/*-------------------------------------------------------------------------*/
/* dequeue ALL requests; caller holds udc->lock */
static void nuke(struct omap_ep *ep, int status)
{
struct omap_req *req;
ep->stopped = 1;
if (use_dma && ep->dma_channel)
dma_channel_release(ep);
use_ep(ep, 0);
UDC_CTRL_REG = UDC_CLR_EP;
if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
UDC_CTRL_REG = UDC_SET_HALT;
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct omap_req, queue);
done(ep, req, status);
}
}
/* caller holds udc->lock */
static void udc_quiesce(struct omap_udc *udc)
{
struct omap_ep *ep;
udc->gadget.speed = USB_SPEED_UNKNOWN;
nuke(&udc->ep[0], -ESHUTDOWN);
list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list)
nuke(ep, -ESHUTDOWN);
}
/*-------------------------------------------------------------------------*/
static void update_otg(struct omap_udc *udc)
{
u16 devstat;
if (!udc->gadget.is_otg)
return;
if (OTG_CTRL_REG & OTG_ID)
devstat = UDC_DEVSTAT_REG;
else
devstat = 0;
udc->gadget.b_hnp_enable = !!(devstat & UDC_B_HNP_ENABLE);
udc->gadget.a_hnp_support = !!(devstat & UDC_A_HNP_SUPPORT);
udc->gadget.a_alt_hnp_support = !!(devstat & UDC_A_ALT_HNP_SUPPORT);
/* Enable HNP early, avoiding races on suspend irq path.
* ASSUMES OTG state machine B_BUS_REQ input is true.
*/
if (udc->gadget.b_hnp_enable)
OTG_CTRL_REG = (OTG_CTRL_REG | OTG_B_HNPEN | OTG_B_BUSREQ)
& ~OTG_PULLUP;
}
static void ep0_irq(struct omap_udc *udc, u16 irq_src)
{
struct omap_ep *ep0 = &udc->ep[0];
struct omap_req *req = NULL;
ep0->irqs++;
/* Clear any pending requests and then scrub any rx/tx state
* before starting to handle the SETUP request.
*/
if (irq_src & UDC_SETUP) {
u16 ack = irq_src & (UDC_EP0_TX|UDC_EP0_RX);
nuke(ep0, 0);
if (ack) {
UDC_IRQ_SRC_REG = ack;
irq_src = UDC_SETUP;
}
}
/* IN/OUT packets mean we're in the DATA or STATUS stage.
* This driver uses only uses protocol stalls (ep0 never halts),
* and if we got this far the gadget driver already had a
* chance to stall. Tries to be forgiving of host oddities.
*
* NOTE: the last chance gadget drivers have to stall control
* requests is during their request completion callback.
*/
if (!list_empty(&ep0->queue))
req = container_of(ep0->queue.next, struct omap_req, queue);
/* IN == TX to host */
if (irq_src & UDC_EP0_TX) {
int stat;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -