📄 rawtx.c
字号:
return -EINVAL; hhlen = 0; if ( priv->proto != RAWTX_PROTO_RAW ) hhlen = priv->dev->hard_header_len; nhlen = 0; if ( priv->proto == RAWTX_PROTO_UDPIP || priv->proto == RAWTX_PROTO_UDPIP_NOSUM ) nhlen = UDP_IP_HEADER_LEN; /* Check packet length */ if (count + nhlen > dev->mtu) return -EMSGSIZE; /* Allocate sk_buff */#ifdef WITH_KIOBUF skb = alloc_skb(hhlen + nhlen, GFP_KERNEL);#else skb = alloc_skb(hhlen + nhlen + count, GFP_KERNEL);#endif if (skb == NULL) { printk(KERN_ERR MODNAME "alloc_skb failed\n"); return -ENOBUFS; } /* Allocate a position in the transmission queue */// Log queue length// printk(KERN_NOTICE MODNAME "rawtx_write: queue_sem=%d\n",// atomic_read(&priv->queue_sem.count)); down(&priv->queue_sem); /* Fill in skb structure */ skb->sk = priv; /* Can we do this ?? */ skb->destructor = rawtx_kiobuf_skb_destructor; skb->priority = 0; skb->dst = NULL; skb->dev = dev; skb->protocol = htons(ETH_P_IP); skb->ip_summed = CHECKSUM_NONE; skb_reserve(skb, hhlen + nhlen);#ifdef WITH_KIOBUF /* Only one thread at a time may use the global kiobuf */ down(&global_iobuf_mutex); iobuf = global_iobuf; /* Map data in the kiobuf */ err = map_user_kiobuf(WRITE, iobuf, (unsigned long) buf, count); if (err) { printk(KERN_ERR MODNAME "map_user_kiobuf failed"); up(&global_iobuf_mutex); kfree(skb); return err; } /* Setup packet fragment(s) */ setup_fragments(iobuf, skb); /* Release kiobuf */ unmap_kiobuf(iobuf); iobuf = NULL; up(&global_iobuf_mutex);#else /* Copy data to skb */ skb->h.raw = skb_put(skb, count); if ( copy_from_user(skb->h.raw, buf, count) ) { printk(KERN_ERR MODNAME "rawtx_write: copy_from_user failed\n"); kfree(skb); return -EFAULT; }#endif /* Build IP header */ if ( priv->proto == RAWTX_PROTO_UDPIP || priv->proto == RAWTX_PROTO_UDPIP_NOSUM ) { skb_push(skb, UDP_IP_HEADER_LEN); build_udp_ip_header(skb->data, count, &priv->srcaddr, &priv->dstaddr, (priv->proto == RAWTX_PROTO_UDPIP) ); } /* Add ethernet header */ if ( priv->proto != RAWTX_PROTO_RAW ) { err = dev->hard_header(skb, dev, ntohs(skb->protocol), priv->daddr, priv->saddr, nhlen + count); if (err < 0) { printk(KERN_ERR MODNAME "hard_header failed"); kfree_skb(skb); return err; } }// tell them what we're doing// printk(KERN_NOTICE MODNAME "rawtx_write: nr_frags=%d, len=%d, data_len=%d, frags[0].size=%d, frags[0].page=%p\n",// skb_shinfo(skb)->nr_frags, skb->len, skb->data_len,// skb_shinfo(skb)->frags[0].size, page_address(skb_shinfo(skb)->frags[0].page)); /* Send packet */ err = dev_queue_xmit(skb); return (err == 0) ? count : 0;}/* * Modify the queue length limit. * This is not thread safe. */static int set_tx_queue_len(struct rawtx_private_data *priv, int newlen){ while (priv->queue_len > newlen) { down(&priv->queue_sem); priv->queue_len--; } while (priv->queue_len < newlen) { up(&priv->queue_sem); priv->queue_len++; } return 0;}static int rawtx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ struct rawtx_private_data *priv = filp->private_data; char devicename[IFNAMSIZ]; struct net_device *dev; int err; switch (cmd) { case RAWTX_IOCTL_SETDEVICE: err = strncpy_from_user(devicename, (char *) arg, sizeof(devicename)); if (err < 0) return err; dev = dev_get_by_name(devicename); if (dev == NULL) return -ENOENT; if (priv->dev != NULL) dev_put(priv->dev); priv->dev = dev; memcpy(priv->saddr, dev->dev_addr, sizeof(priv->saddr)); return 0; case RAWTX_IOCTL_SETPROTO: if ( arg != RAWTX_PROTO_ETHERNET && arg != RAWTX_PROTO_UDPIP && arg != RAWTX_PROTO_UDPIP_NOSUM ) return -EINVAL; priv->proto = arg; return 0; case RAWTX_IOCTL_SETSADDR: if ( copy_from_user(priv->saddr, (void *) arg, sizeof(priv->saddr)) ) return -EFAULT; return 0; case RAWTX_IOCTL_SETDADDR: if ( copy_from_user(priv->daddr, (void *) arg, sizeof(priv->daddr)) ) return -EFAULT; return 0; case RAWTX_IOCTL_SETSRCADDR: if ( copy_from_user(&priv->srcaddr, (void *) arg, sizeof(priv->srcaddr)) ) return -EFAULT; return 0; case RAWTX_IOCTL_SETDSTADDR: if ( copy_from_user(&priv->dstaddr, (void *) arg, sizeof(priv->dstaddr)) ) return -EFAULT; return 0; case RAWTX_IOCTL_SETQUEUELEN: if (arg < 1 || arg > MAX_TXQUEUE_LEN) return -EINVAL; return set_tx_queue_len(priv, arg); case RAWTX_IOCTL_SENDBUFFER: return rawtx_send_buffer(priv, arg); } return -EINVAL;}/* * Allocate skb buffer pool and map it into process memory space. */static int rawtx_mmap(struct file *filp, struct vm_area_struct *vma){ struct rawtx_private_data *priv = filp->private_data; unsigned long start = (unsigned long) vma->vm_start; unsigned long size = (unsigned long) (vma->vm_end - vma->vm_start); unsigned long pos, err; if (priv->dbuffer == NULL) { /* Must allocate new buffer */ mem_map_t *page, *lastpage; /* Check requested size */ if (size > RAWTX_DBUFFER_MAX_SIZE) return -EINVAL; /* Allocate buffer */ priv->dbuffer = kmalloc(size, GFP_KERNEL); if (priv->dbuffer == NULL) { printk(KERN_ERR MODNAME "Cannot allocate skb buffer pool.\n"); return -ENOMEM; } priv->dbuffer_size = size; /* Loop through buffer pages and lock them in main memory */ lastpage = virt_to_page(priv->dbuffer + priv->dbuffer_size); for (page = virt_to_page(priv->dbuffer); page < lastpage; page++) mem_map_reserve(page); } /* Check mmap size */ if (size > priv->dbuffer_size) return -EINVAL; /* Loop through buffer pages and map them into process space */ for (pos = 0; pos < size; pos += PAGE_SIZE) { unsigned long page; page = virt_to_phys(((void *)priv->dbuffer) + pos); err = remap_page_range(start + pos, page, PAGE_SIZE, PAGE_SHARED); if (err < 0) return err; } return 0;}static int rawtx_open(struct inode *inode, struct file *filp){ struct rawtx_private_data *priv; priv = kmalloc(sizeof(struct rawtx_private_data), GFP_KERNEL); if (priv == NULL) { printk(KERN_ERR MODNAME "Cannot allocate rawtx_private_data\n"); return -ENOMEM; } filp->private_data = priv; priv->dev = NULL; priv->proto = RAWTX_PROTO_ETHERNET; priv->queue_len = 1; priv->dbuffer = NULL; sema_init(&priv->queue_sem, priv->queue_len); MOD_INC_USE_COUNT; printk(KERN_INFO MODNAME "opened\n"); return 0;}/* * Wait until transmission queue is empty, release memory mapped * buffer, free private data, update module use count. * Called when process closes the device. */static int rawtx_release(struct inode *inode, struct file *filp){ struct rawtx_private_data *priv = filp->private_data; set_tx_queue_len(priv, 0); if (priv->dbuffer != NULL) kfree(priv->dbuffer); if (priv->dev != NULL) dev_put(priv->dev); kfree(priv); printk(KERN_INFO MODNAME "released\n"); MOD_DEC_USE_COUNT; return 0;}/* Device file operations structure */static struct file_operations rawtx_fops = { write: rawtx_write, ioctl: rawtx_ioctl, mmap: rawtx_mmap, open: rawtx_open, release: rawtx_release};int rawtx_init(void){ int err; printk(KERN_INFO MODNAME "Initializing.\n"); /* Allocate kiobuf */ if ( alloc_kiovec(1, &global_iobuf) ) { printk(KERN_ERR MODNAME "Cannot allocate kiobuf\n"); return -ENOMEM; } /* Allocate master sk_buff */ master_skb = alloc_skb(0, GFP_KERNEL); if (master_skb == NULL) { printk(KERN_ERR MODNAME "Cannot allocate master skb.\n"); free_kiovec(1, &global_iobuf); return -ENOBUFS; } /* Register character device */ err = register_chrdev(RAWTX_MAJOR, "rawtx", &rawtx_fops); if (err < 0) { printk(KERN_ERR MODNAME "Cannot register character device %d.\n", RAWTX_MAJOR); kfree_skb(master_skb); free_kiovec(1, &global_iobuf); return err; } printk(KERN_INFO MODNAME "Ready.\n"); return 0;}void rawtx_cleanup(void){ printk(KERN_INFO MODNAME "Cleaning up.\n"); unregister_chrdev(RAWTX_MAJOR, "rawtx"); kfree_skb(master_skb); free_kiovec(1, &global_iobuf); printk(KERN_INFO MODNAME "Done.\n");}module_init(rawtx_init);module_exit(rawtx_cleanup);/* end */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -