📄 dev.c
字号:
/* dev.c * linqianghe@163.com * 2006-09-18 */#include "dev_dummy.h"#include "dev.h"#include "log.h"#include <linux/netpoll.h>#include <linux/list.h>#include <linux/rtnetlink.h>#include <net/sch_generic.h>#include <linux/delay.h>#include <linux/seq_file.h>#include <linux/proc_fs.h>#include <net/sock.h>#include <linux/inetdevice.h>extern struct neigh_table myarp_tbl;static gifconf_func_t * gifconf_list[NPROTO];static DEFINE_SPINLOCK(myptype_lock);static struct list_head myptype_base[16];static struct list_head myptype_all;DEFINE_PER_CPU(struct softnet_data, mysoftnet_data) = { NULL };DEFINE_PER_CPU(struct netif_rx_stats, mynetdev_rx_stat) = { 0, };int mynetdev_max_backlog = 1000;int mynetdev_budget = 300;int myweight_p = 64; /* old backlog weight */int mynetdev_nit;void mydev_add_pack(struct packet_type *pt){ int hash; spin_lock_bh( &myptype_lock ); if( pt->type == htons(ETH_P_ALL) ){ mynetdev_nit++; list_add_rcu(&pt->list, &myptype_all); } else { hash = ntohs( pt->type ) & 15; list_add_rcu( &pt->list, &myptype_base[hash] ); } spin_unlock_bh( &myptype_lock );}void __mydev_remove_pack(struct packet_type *pt){ struct list_head *head; struct packet_type *pt1; spin_lock_bh( &myptype_lock ); if( pt->type == htons(ETH_P_ALL) ){ mynetdev_nit--; head = &myptype_all; } else head = &myptype_base[ntohs(pt->type) & 15]; list_for_each_entry(pt1, head, list) { if( pt == pt1 ){ list_del_rcu( &pt->list ); goto out; } } PR_WARN( "dev_remove_pack: %p not found.\n", pt );out: spin_unlock_bh( &myptype_lock );}void mydev_remove_pack(struct packet_type *pt){ __mydev_remove_pack(pt); synchronize_net();}static inline void __mynetif_rx_schedule(struct net_device *dev){ unsigned long flags; local_irq_save( flags ); dev_hold( dev ); list_add_tail(&dev->poll_list, &__get_cpu_var( mysoftnet_data ).poll_list); if (dev->quota < 0) dev->quota += dev->weight; else dev->quota = dev->weight; __raise_softirq_irqoff( MYNET_RX_SOFTIRQ ); local_irq_restore( flags );}static inline void mynetif_rx_schedule(struct net_device *dev){ if (netif_rx_schedule_prep(dev)) __mynetif_rx_schedule(dev);}static atomic_t mynetstamp_needed = ATOMIC_INIT(0);void __mynet_timestamp(struct sk_buff *skb){ struct timeval tv; do_gettimeofday(&tv); skb_set_timestamp(skb, &tv);}static inline void mynet_timestamp(struct sk_buff *skb){ if( atomic_read(&mynetstamp_needed) ) __mynet_timestamp(skb); else { skb->tstamp.off_sec = 0; skb->tstamp.off_usec = 0; }}int mynetif_rx( struct sk_buff *skb ){ struct softnet_data *queue; unsigned long flags; if( strcmp( skb->dev->name, "myeth0" ) == 0 && skb->protocol == __constant_htons(ETH_P_IP) ){ kfree(skb); return 0; } if( !skb->tstamp.off_sec ) mynet_timestamp(skb); local_irq_save( flags ); queue = &__get_cpu_var( mysoftnet_data ); __get_cpu_var( mynetdev_rx_stat ).total++; if( queue->input_pkt_queue.qlen <= mynetdev_max_backlog ){ if( queue->input_pkt_queue.qlen ){enqueue: dev_hold(skb->dev); __skb_queue_tail( &queue->input_pkt_queue, skb ); local_irq_restore( flags ); return NET_RX_SUCCESS; } mynetif_rx_schedule( &queue->backlog_dev ); goto enqueue; } __get_cpu_var( mynetdev_rx_stat ).dropped++; local_irq_restore(flags); kfree_skb(skb); return NET_RX_DROP;}static __inline__ int mydeliver_skb(struct sk_buff *skb, struct packet_type *pt_prev, struct net_device *orig_dev){ atomic_inc( &skb->users ); return pt_prev->func( skb, skb->dev, pt_prev, orig_dev );}static inline struct net_device *myskb_bond(struct sk_buff *skb){ struct net_device *dev = skb->dev; if( dev->master ) skb->dev = dev->master; return dev;}int mynetif_receive_skb(struct sk_buff *skb){ struct packet_type *ptype, *pt_prev; struct net_device *orig_dev; int ret = 0; unsigned short type; if (!skb->tstamp.off_sec) mynet_timestamp(skb); if( !skb->input_dev ) skb->input_dev = skb->dev; __get_cpu_var(mynetdev_rx_stat).total++; orig_dev = myskb_bond(skb); skb->h.raw = skb->nh.raw = skb->data; skb->mac_len = skb->nh.raw - skb->mac.raw; pt_prev = NULL; rcu_read_lock(); list_for_each_entry_rcu(ptype, &myptype_all, list) { if (!ptype->dev || ptype->dev == skb->dev) { if( pt_prev ) ret = mydeliver_skb( skb, pt_prev, orig_dev ); pt_prev = ptype; } } type = skb->protocol; list_for_each_entry_rcu( ptype, &myptype_base[ntohs(type)&15], list ){ if( ptype->type == type && (!ptype->dev || ptype->dev == skb->dev) ){ if( pt_prev ) ret = mydeliver_skb( skb, pt_prev, orig_dev ); pt_prev = ptype; } } if (pt_prev) { ret = pt_prev->func( skb, skb->dev, pt_prev, orig_dev ); }else{ kfree_skb( skb ); ret = NET_RX_DROP; } rcu_read_unlock(); return ret;}static int myprocess_backlog(struct net_device *backlog_dev, int *budget){ int work = 0; int quota = min(backlog_dev->quota, *budget); struct softnet_data *queue = &__get_cpu_var( mysoftnet_data ); unsigned long start_time = jiffies; backlog_dev->weight = myweight_p; for (;;) { struct sk_buff *skb; struct net_device *dev; local_irq_disable(); skb = __skb_dequeue( &queue->input_pkt_queue ); if (!skb) goto job_done; local_irq_enable(); dev = skb->dev; mynetif_receive_skb(skb); dev_put(dev); work++; if (work >= quota || jiffies - start_time > 1) break; } backlog_dev->quota -= work; *budget -= work; return -1;job_done: backlog_dev->quota -= work; *budget -= work; list_del( &backlog_dev->poll_list ); smp_mb__before_clear_bit(); netif_poll_enable(backlog_dev); local_irq_enable(); return 0;}static void mynet_tx_action(struct softirq_action *h){}static void mynet_rx_action(struct softirq_action *h){ struct softnet_data *queue = &__get_cpu_var( mysoftnet_data ); unsigned long start_time = jiffies; int budget = mynetdev_budget; void *have; local_irq_disable(); while( !list_empty(&queue->poll_list) ){ struct net_device *dev; if (budget <= 0 || jiffies - start_time > 1) goto softnet_break; local_irq_enable(); dev = list_entry(queue->poll_list.next, struct net_device, poll_list); have = netpoll_poll_lock(dev); if (dev->quota <= 0 || dev->poll(dev, &budget)) { netpoll_poll_unlock(have); local_irq_disable(); list_del(&dev->poll_list); list_add_tail(&dev->poll_list, &queue->poll_list); if (dev->quota < 0) dev->quota += dev->weight; else dev->quota = dev->weight; } else { netpoll_poll_unlock(have); dev_put(dev); local_irq_disable(); } }out: local_irq_enable(); return;softnet_break: __get_cpu_var( mynetdev_rx_stat ).time_squeeze++; __raise_softirq_irqoff( MYNET_RX_SOFTIRQ ); goto out;}int myregister_gifconf( unsigned int family, gifconf_func_t * gifconf ){ if (family >= NPROTO) return -EINVAL; gifconf_list[family] = gifconf; return 0;}void mydev_load(const char *name){ struct net_device *dev; read_lock(&dev_base_lock); dev = __dev_get_by_name(name); read_unlock(&dev_base_lock); if (!dev && capable(CAP_SYS_MODULE)) request_module("%s", name);}unsigned mydev_get_flags(const struct net_device *dev){ unsigned flags; flags = (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI | IFF_RUNNING)) | (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); if( netif_running(dev) && netif_carrier_ok(dev) ) flags |= IFF_RUNNING; return flags;}static int mydev_ifsioc(struct ifreq *ifr, unsigned int cmd){ int err = 0; struct net_device *dev = __dev_get_by_name(ifr->ifr_name); if (!dev) return -ENODEV; switch( cmd ){ case SIOCGIFFLAGS: ifr->ifr_flags = mydev_get_flags(dev); return 0; case SIOCGIFMTU: ifr->ifr_mtu = dev->mtu; return 0; case SIOCGIFMETRIC: ifr->ifr_metric = 0; return 0; case SIOCSIFFLAGS: return -ENOIOCTLCMD; case SIOCSIFMTU: return -ENOIOCTLCMD; case SIOCSIFMETRIC: return -EOPNOTSUPP; case SIOCGIFHWADDR: if( !dev->addr_len ) memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); else memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); ifr->ifr_hwaddr.sa_family = dev->type; return 0; case SIOCGIFMAP: ifr->ifr_map.mem_start = dev->mem_start; ifr->ifr_map.mem_end = dev->mem_end; ifr->ifr_map.base_addr = dev->base_addr; ifr->ifr_map.irq = dev->irq; ifr->ifr_map.dma = dev->dma; ifr->ifr_map.port = dev->if_port; return 0; case SIOCSIFMAP: if( dev->set_config ){ if( !netif_device_present(dev) ) return -ENODEV; return dev->set_config(dev, &ifr->ifr_map); } return -EOPNOTSUPP; case SIOCGIFTXQLEN: ifr->ifr_qlen = dev->tx_queue_len; return 0; case SIOCSIFTXQLEN: if( ifr->ifr_qlen < 0 ) return -EINVAL; dev->tx_queue_len = ifr->ifr_qlen; return 0; case SIOCGIFINDEX: ifr->ifr_ifindex = dev->ifindex; return 0; case SIOCSIFHWADDR: return -ENOIOCTLCMD; case SIOCSIFNAME: return -ENOIOCTLCMD; case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFHWBROADCAST: return -ENOIOCTLCMD; default: if ((cmd >= SIOCDEVPRIVATE && cmd <= SIOCDEVPRIVATE + 15) || cmd == SIOCBONDENSLAVE || cmd == SIOCBONDRELEASE || cmd == SIOCBONDSETHWADDR || cmd == SIOCBONDSLAVEINFOQUERY || cmd == SIOCBONDINFOQUERY || cmd == SIOCBONDCHANGEACTIVE || cmd == SIOCGMIIPHY || cmd == SIOCGMIIREG || cmd == SIOCSMIIREG || cmd == SIOCBRADDIF || cmd == SIOCBRDELIF || cmd == SIOCWANDEV) { err = -EOPNOTSUPP; if( dev->do_ioctl ){ if( netif_device_present(dev) ) err = dev->do_ioctl( dev, ifr, cmd ); else err = -ENODEV; } }else err = -EINVAL; } return err;}static int mydev_ifconf(char __user *arg){ struct ifconf ifc; struct net_device *dev; char __user *pos; int len; int total; int i; if( copy_from_user(&ifc, arg, sizeof(struct ifconf)) ) return -EFAULT; pos = ifc.ifc_buf; len = ifc.ifc_len; total = 0; for( dev = dev_base; dev; dev = dev->next) { for (i = 0; i < NPROTO; i++) { if (gifconf_list[i]) { int done; if( !pos ) done = gifconf_list[i](dev, NULL, 0); else done = gifconf_list[i](dev, pos + total, len - total); if (done < 0) return -EFAULT; total += done; } } } ifc.ifc_len = total; return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;}static int mydev_ifname(struct ifreq __user *arg){ struct net_device *dev; struct ifreq ifr; if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; read_lock(&dev_base_lock); dev = __dev_get_by_index( ifr.ifr_ifindex ); if (!dev) { read_unlock(&dev_base_lock); return -ENODEV; } strcpy(ifr.ifr_name, dev->name); read_unlock(&dev_base_lock); if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) return -EFAULT; return 0;}int mydev_ioctl(unsigned int cmd, void __user *arg){ struct ifreq ifr; int ret; char *colon; if (cmd == SIOCGIFCONF) { rtnl_shlock(); ret = mydev_ifconf((char __user *) arg); rtnl_shunlock(); return ret; } if (cmd == SIOCGIFNAME) return mydev_ifname((struct ifreq __user *)arg); if( copy_from_user(&ifr, arg, sizeof(struct ifreq)) ) return -EFAULT; ifr.ifr_name[IFNAMSIZ-1] = 0; colon = strchr(ifr.ifr_name, ':'); if( colon ) *colon = 0; switch( cmd ){ case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFHWADDR: case SIOCGIFSLAVE: case SIOCGIFMAP: case SIOCGIFINDEX: case SIOCGIFTXQLEN: mydev_load(ifr.ifr_name); read_lock(&dev_base_lock); ret = mydev_ifsioc(&ifr, cmd); read_unlock(&dev_base_lock); if( !ret ){ if( colon ) *colon = ':'; if( copy_to_user(arg, &ifr, sizeof(struct ifreq)) ) ret = -EFAULT; } return ret; case SIOCETHTOOL: dev_load(ifr.ifr_name); rtnl_lock(); ret = dev_ethtool(&ifr); rtnl_unlock(); if (!ret) { if (colon) *colon = ':'; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; } return ret; case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSIFNAME: if (!capable(CAP_NET_ADMIN)) return -EPERM; dev_load(ifr.ifr_name); rtnl_lock(); ret = mydev_ifsioc(&ifr, cmd); rtnl_unlock(); if (!ret) { if (colon) *colon = ':'; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; } return ret; case SIOCSIFFLAGS: case SIOCSIFMETRIC: case SIOCSIFMTU: case SIOCSIFMAP: case SIOCSIFHWADDR: case SIOCSIFSLAVE: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFHWBROADCAST: case SIOCSIFTXQLEN: case SIOCSMIIREG: case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: case SIOCBRADDIF: case SIOCBRDELIF: if (!capable(CAP_NET_ADMIN)) return -EPERM; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: dev_load(ifr.ifr_name); rtnl_lock(); ret = mydev_ifsioc(&ifr, cmd); rtnl_unlock(); return ret; case SIOCGIFMEM: case SIOCSIFMEM: case SIOCSIFLINK: return -EINVAL; default: if (cmd == SIOCWANDEV || (cmd >= SIOCDEVPRIVATE && cmd <= SIOCDEVPRIVATE + 15)) { dev_load(ifr.ifr_name); rtnl_lock(); ret = mydev_ifsioc(&ifr, cmd); rtnl_unlock(); if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; return ret; } return -EINVAL; } return 0;}int mydev_queue_xmit(struct sk_buff *skb){ return dev_queue_xmit_dummy(skb);}int __init mynet_dev_init(void){ int i; INIT_LIST_HEAD( &myptype_all ); for (i = 0; i < 16; i++) INIT_LIST_HEAD( &myptype_base[i] ); PR_DEBUG( "init the softnet_data queue...\n"); for_each_cpu(i) { struct softnet_data *queue; queue = &per_cpu( mysoftnet_data, i ); skb_queue_head_init( &queue->input_pkt_queue ); queue->completion_queue = NULL; INIT_LIST_HEAD( &queue->poll_list ); set_bit( __LINK_STATE_START, &queue->backlog_dev.state ); queue->backlog_dev.weight = myweight_p; queue->backlog_dev.poll = myprocess_backlog; atomic_set( &queue->backlog_dev.refcnt, 1 ); } open_softirq( MYNET_TX_SOFTIRQ, mynet_tx_action, NULL ); open_softirq( MYNET_RX_SOFTIRQ, mynet_rx_action, NULL ); return 0;}void __exit mynet_dev_exit(void){}EXPORT_SYMBOL_GPL( mynetif_rx );EXPORT_SYMBOL_GPL( mydev_add_pack );EXPORT_SYMBOL_GPL( mydev_remove_pack );EXPORT_SYMBOL_GPL( mydev_queue_xmit );EXPORT_SYMBOL_GPL( myregister_gifconf );EXPORT_SYMBOL_GPL( mydev_ioctl );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -