📄 gserial.c
字号:
return( 0 ); } if( config == dev->dev_config ) return( 0 ); gs_reset_config( dev ); if( config == GS_NO_CONFIG_ID ) return( 0 ); if( config != GS_BULK_CONFIG_ID ) return( -EINVAL ); /* device specific optimizations */ if (gadget_is_net2280(gadget)) net2280_set_fifo_mode(gadget, 1); gadget_for_each_ep( ep, gadget ) { if( strcmp( ep->name, EP_IN_NAME ) == 0 ) { ret = usb_ep_enable( ep, gadget->speed == USB_SPEED_HIGH ? &gs_highspeed_in_desc : &gs_fullspeed_in_desc ); if( ret == 0 ) { ep->driver_data = dev; dev->dev_in_ep = ep; } else { printk( KERN_ERR "gs_set_config: cannot enable in endpoint %s, ret=%d\n", ep->name, ret ); gs_reset_config( dev ); return( ret ); } } else if( strcmp( ep->name, EP_OUT_NAME ) == 0 ) { ret = usb_ep_enable( ep, gadget->speed == USB_SPEED_HIGH ? &gs_highspeed_out_desc : &gs_fullspeed_out_desc ); if( ret == 0 ) { ep->driver_data = dev; dev->dev_out_ep = ep; } else { printk( KERN_ERR "gs_set_config: cannot enable out endpoint %s, ret=%d\n", ep->name, ret ); gs_reset_config( dev ); return( ret ); } } } if( dev->dev_in_ep == NULL || dev->dev_out_ep == NULL ) { gs_reset_config( dev ); printk( KERN_ERR "gs_set_config: cannot find endpoints\n" ); return( -ENODEV ); } /* allocate and queue read requests */ ep = dev->dev_out_ep; for( i=0; i<read_q_size && ret == 0; i++ ) { if( (req=gs_alloc_req( ep, ep->maxpacket, GFP_ATOMIC )) ) { req->complete = gs_read_complete; if( (ret=usb_ep_queue( ep, req, GFP_ATOMIC )) ) { printk( KERN_ERR "gs_set_config: cannot queue read request, ret=%d\n", ret ); } } else { gs_reset_config( dev ); printk( KERN_ERR "gs_set_config: cannot allocate read requests\n" ); return( -ENOMEM ); } } /* allocate write requests, and put on free list */ ep = dev->dev_in_ep; for( i=0; i<write_q_size; i++ ) { if( (req_entry=gs_alloc_req_entry( ep, ep->maxpacket, GFP_ATOMIC )) ) { req_entry->re_req->complete = gs_write_complete; list_add( &req_entry->re_entry, &dev->dev_req_list ); } else { gs_reset_config( dev ); printk( KERN_ERR "gs_set_config: cannot allocate write requests\n" ); return( -ENOMEM ); } } dev->dev_config = config; printk( KERN_INFO "gs_set_config: %s configured for %s speed\n", GS_LONG_NAME, gadget->speed == USB_SPEED_HIGH ? "high" : "full" ); return( 0 );}/* * gs_reset_config * * Mark the device as not configured, disable all endpoints, * which forces completion of pending I/O and frees queued * requests, and free the remaining write requests on the * free list. * * The device lock must be held when calling this function. */static void gs_reset_config( struct gs_dev *dev ){ struct gs_req_entry *req_entry; if( dev == NULL ) { printk( KERN_ERR "gs_reset_config: NULL device pointer\n" ); return; } if( dev->dev_config == GS_NO_CONFIG_ID ) return; dev->dev_config = GS_NO_CONFIG_ID; /* free write requests on the free list */ while( !list_empty( &dev->dev_req_list ) ) { req_entry = list_entry( dev->dev_req_list.next, struct gs_req_entry, re_entry ); list_del( &req_entry->re_entry ); gs_free_req_entry( dev->dev_in_ep, req_entry ); } /* disable endpoints, forcing completion of pending i/o; */ /* completion handlers free their requests in this case */ if( dev->dev_in_ep ) { usb_ep_disable( dev->dev_in_ep ); dev->dev_in_ep = NULL; } if( dev->dev_out_ep ) { usb_ep_disable( dev->dev_out_ep ); dev->dev_out_ep = NULL; }}/* * gs_build_config_desc * * Builds a config descriptor in the given buffer and returns the * length, or a negative error number. */static int gs_build_config_desc( u8 *buf, enum usb_device_speed speed, u8 type, unsigned int index ){ int high_speed; int len = USB_DT_CONFIG_SIZE + USB_DT_INTERFACE_SIZE + GS_NUM_ENDPOINTS * USB_DT_ENDPOINT_SIZE; /* only one config */ if( index != 0 ) return( -EINVAL ); memcpy( buf, &gs_config_desc, USB_DT_CONFIG_SIZE ); ((struct usb_config_descriptor *)buf)->bDescriptorType = type; ((struct usb_config_descriptor *)buf)->wTotalLength = __constant_cpu_to_le16( len ); buf += USB_DT_CONFIG_SIZE; memcpy( buf, &gs_interface_desc, USB_DT_INTERFACE_SIZE ); buf += USB_DT_INTERFACE_SIZE; /* other speed switches high and full speed */ high_speed = (speed == USB_SPEED_HIGH); if( type == USB_DT_OTHER_SPEED_CONFIG ) high_speed = !high_speed; memcpy( buf, high_speed ? &gs_highspeed_in_desc : &gs_fullspeed_in_desc, USB_DT_ENDPOINT_SIZE ); buf += USB_DT_ENDPOINT_SIZE; memcpy( buf, high_speed ? &gs_highspeed_out_desc : &gs_fullspeed_out_desc, USB_DT_ENDPOINT_SIZE ); return( len );}/* * gs_alloc_req * * Allocate a usb_request and its buffer. Returns a pointer to the * usb_request or NULL if there is an error. */static struct usb_request *gs_alloc_req( struct usb_ep *ep, unsigned int len, int kmalloc_flags ){ struct usb_request *req; if( ep == NULL ) return( NULL ); req = usb_ep_alloc_request( ep, kmalloc_flags ); if( req != NULL ) { req->length = len; req->buf = usb_ep_alloc_buffer( ep, len, &req->dma, kmalloc_flags ); if( req->buf == NULL ) { usb_ep_free_request( ep, req ); return( NULL ); } } return( req );}/* * gs_free_req * * Free a usb_request and its buffer. */static void gs_free_req( struct usb_ep *ep, struct usb_request *req ){ if( ep != NULL && req != NULL ) { if( req->buf != NULL ) usb_ep_free_buffer( ep, req->buf, req->dma, req->length ); usb_ep_free_request( ep, req ); }}/* * gs_alloc_req_entry * * Allocates a request and its buffer, using the given * endpoint, buffer len, and kmalloc flags. */static struct gs_req_entry *gs_alloc_req_entry( struct usb_ep *ep, unsigned len, int kmalloc_flags ){ struct gs_req_entry *req; req = kmalloc( sizeof(struct gs_req_entry), kmalloc_flags ); if( req == NULL ) return( NULL ); req->re_req = gs_alloc_req( ep, len, kmalloc_flags ); if( req->re_req == NULL ) { kfree( req ); return( NULL ); } req->re_req->context = req; return( req );}/* * gs_free_req_entry * * Frees a request and its buffer. */static void gs_free_req_entry( struct usb_ep *ep, struct gs_req_entry *req ){ if( ep != NULL && req != NULL ) { if( req->re_req != NULL ) gs_free_req( ep, req->re_req ); kfree( req ); }}/* * gs_alloc_ports * * Allocate all ports and set the gs_dev struct to point to them. * Return 0 if successful, or a negative error number. * * The device lock is normally held when calling this function. */static int gs_alloc_ports( struct gs_dev *dev, int kmalloc_flags ){ int i; struct gs_port *port; if( dev == NULL ) return( -EIO ); for( i=0; i<GS_NUM_PORTS; i++ ) { if( (port=(struct gs_port *)kmalloc( sizeof(struct gs_port), kmalloc_flags )) == NULL ) return( -ENOMEM ); memset( port, 0, sizeof( struct gs_port ) ); port->port_dev = dev; port->port_num = i; spin_lock_init( &port->port_lock ); init_waitqueue_head( &port->port_write_wait ); dev->dev_port[i] = port; } return( 0 );}/* * gs_free_ports * * Free all closed ports. Open ports are disconnected by * freeing their write buffers, setting their device pointers * and the pointers to them in the device to NULL. These * ports will be freed when closed. * * The device lock is normally held when calling this function. */static void gs_free_ports( struct gs_dev *dev ){ int i; unsigned long flags; struct gs_port *port; if( dev == NULL ) return; for( i=0; i<GS_NUM_PORTS; i++ ) { if( (port=dev->dev_port[i]) != NULL ) { dev->dev_port[i] = NULL; spin_lock_irqsave(&port->port_lock, flags ); if( port->port_write_buf != NULL ) { gs_buf_free( port->port_write_buf ); port->port_write_buf = NULL; } if( port->port_open_count > 0 || port->port_in_use ) { port->port_dev = NULL; wake_up_interruptible( &port->port_write_wait ); wake_up_interruptible( &port->port_tty->read_wait ); wake_up_interruptible( &port->port_tty->write_wait ); } else { kfree( port ); } spin_unlock_irqrestore(&port->port_lock, flags ); } }}/* Circular Buffer *//* * gs_buf_alloc * * Allocate a circular buffer and all associated memory. */static struct gs_buf *gs_buf_alloc( unsigned int size, int kmalloc_flags ){ struct gs_buf *gb; if( size == 0 ) return( NULL ); gb = (struct gs_buf *)kmalloc( sizeof(struct gs_buf), kmalloc_flags ); if( gb == NULL ) return( NULL ); gb->buf_buf = kmalloc( size, kmalloc_flags ); if( gb->buf_buf == NULL ) { kfree( gb ); return( NULL ); } gb->buf_size = size; gb->buf_get = gb->buf_put = gb->buf_buf; return( gb );}/* * gs_buf_free * * Free the buffer and all associated memory. */void gs_buf_free( struct gs_buf *gb ){ if( gb != NULL ) { if( gb->buf_buf != NULL ) kfree( gb->buf_buf ); kfree( gb ); }}/* * gs_buf_clear * * Clear out all data in the circular buffer. */void gs_buf_clear( struct gs_buf *gb ){ if( gb != NULL ) gb->buf_get = gb->buf_put; /* equivalent to a get of all data available */}/* * gs_buf_data_avail * * Return the number of bytes of data available in the circular * buffer. */unsigned int gs_buf_data_avail( struct gs_buf *gb ){ if( gb != NULL ) return( (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size ); else return( 0 );}/* * gs_buf_space_avail * * Return the number of bytes of space available in the circular * buffer. */unsigned int gs_buf_space_avail( struct gs_buf *gb ){ if( gb != NULL ) return( (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size ); else return( 0 );}/* * gs_buf_put * * Copy data data from a user buffer and put it into the circular buffer. * Restrict to the amount of space available. * * Return the number of bytes copied. */unsigned int gs_buf_put( struct gs_buf *gb, const char *buf, unsigned int count ){ unsigned int len; if( gb == NULL ) return( 0 ); len = gs_buf_space_avail( gb ); if( count > len ) count = len; if( count == 0 ) return( 0 ); len = gb->buf_buf + gb->buf_size - gb->buf_put; if( count > len ) { memcpy( gb->buf_put, buf, len ); memcpy( gb->buf_buf, buf+len, count - len ); gb->buf_put = gb->buf_buf + count - len; } else { memcpy( gb->buf_put, buf, count ); if( count < len ) gb->buf_put += count; else /* count == len */ gb->buf_put = gb->buf_buf; } return( count );}/* * gs_buf_get * * Get data from the circular buffer and copy to the given buffer. * Restrict to the amount of data available. * * Return the number of bytes copied. */unsigned int gs_buf_get( struct gs_buf *gb, char *buf, unsigned int count ){ unsigned int len; if( gb == NULL ) return( 0 ); len = gs_buf_data_avail( gb ); if( count > len ) count = len; if( count == 0 ) return( 0 ); len = gb->buf_buf + gb->buf_size - gb->buf_get; if( count > len ) { memcpy( buf, gb->buf_get, len ); memcpy( buf+len, gb->buf_buf, count - len ); gb->buf_get = gb->buf_buf + count - len; } else { memcpy( buf, gb->buf_get, count ); if( count < len ) gb->buf_get += count; else /* count == len */ gb->buf_get = gb->buf_buf; } return( count );}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -