📄 rtnetlink.h
字号:
__u8 ndm_flags; __u8 ndm_type;};enum{ NDA_UNSPEC, NDA_DST, NDA_LLADDR, NDA_CACHEINFO};#define NDA_MAX NDA_CACHEINFO#define NDA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg))))#define NDA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ndmsg))/* * Neighbor Cache Entry Flags */#define NTF_PROXY 0x08 /* == ATF_PUBL */#define NTF_ROUTER 0x80/* * Neighbor Cache Entry States. */#define NUD_INCOMPLETE 0x01#define NUD_REACHABLE 0x02#define NUD_STALE 0x04#define NUD_DELAY 0x08#define NUD_PROBE 0x10#define NUD_FAILED 0x20/* Dummy states */#define NUD_NOARP 0x40#define NUD_PERMANENT 0x80#define NUD_NONE 0x00struct nda_cacheinfo{ __u32 ndm_confirmed; __u32 ndm_used; __u32 ndm_updated; __u32 ndm_refcnt;};/**** * General form of address family dependent message. ****/struct rtgenmsg{ unsigned char rtgen_family;};/***************************************************************** * Link layer specific messages. ****//* struct ifinfomsg * passes link level specific information, not dependent * on network protocol. */struct ifinfomsg{ unsigned char ifi_family; unsigned char __ifi_pad; unsigned short ifi_type; /* ARPHRD_* */ int ifi_index; /* Link index */ unsigned ifi_flags; /* IFF_* flags */ unsigned ifi_change; /* IFF_* change mask */};enum{ IFLA_UNSPEC, IFLA_ADDRESS, IFLA_BROADCAST, IFLA_IFNAME, IFLA_MTU, IFLA_LINK, IFLA_QDISC, IFLA_STATS};#define IFLA_MAX IFLA_STATS#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))/* ifi_flags. IFF_* flags. The only change is: IFF_LOOPBACK, IFF_BROADCAST and IFF_POINTOPOINT are more not changeable by user. They describe link media characteristics and set by device driver. Comments: - Combination IFF_BROADCAST|IFF_POINTOPOINT is invalid - If neiher of these three flags are set; the interface is NBMA. - IFF_MULTICAST does not mean anything special: multicasts can be used on all not-NBMA links. IFF_MULTICAST means that this media uses special encapsulation for multicast frames. Apparently, all IFF_POINTOPOINT and IFF_BROADCAST devices are able to use multicasts too. *//* ifi_link. For usual devices it is equal ifi_index. If it is a "virtual interface" (f.e. tunnel), ifi_link can point to real physical interface (f.e. for bandwidth calculations), or maybe 0, what means, that real media is unknown (usual for IPIP tunnels, when route to endpoint is allowed to change) *//***************************************************************** * Traffic control messages. ****/struct tcmsg{ unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info;};enum{ TCA_UNSPEC, TCA_KIND, TCA_OPTIONS, TCA_STATS, TCA_XSTATS, TCA_RATE,};#define TCA_MAX TCA_RATE#define TCA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcmsg))))#define TCA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcmsg))/* SUMMARY: maximal rtattr understood by kernel */#define RTATTR_MAX RTA_MAX/* RTnetlink multicast groups */#define RTMGRP_LINK 1#define RTMGRP_NOTIFY 2#define RTMGRP_NEIGH 4#define RTMGRP_TC 8#define RTMGRP_IPV4_IFADDR 0x10#define RTMGRP_IPV4_MROUTE 0x20#define RTMGRP_IPV4_ROUTE 0x40#define RTMGRP_IPV6_IFADDR 0x100#define RTMGRP_IPV6_MROUTE 0x200#define RTMGRP_IPV6_ROUTE 0x400/* End of information exported to user level */#ifdef __KERNEL__extern atomic_t rtnl_rlockct;extern struct wait_queue *rtnl_wait;extern __inline__ int rtattr_strcmp(struct rtattr *rta, char *str){ int len = strlen(str) + 1; return len > rta->rta_len || memcmp(RTA_DATA(rta), str, len);}extern int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len);#ifdef CONFIG_RTNETLINKextern struct sock *rtnl;struct rtnetlink_link{ int (*doit)(struct sk_buff *, struct nlmsghdr*, void *attr); int (*dumpit)(struct sk_buff *, struct netlink_callback *cb);};extern struct rtnetlink_link * rtnetlink_links[NPROTO];extern int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb);extern int rtnetlink_send(struct sk_buff *skb, u32 pid, u32 group, int echo);extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data);#define RTA_PUT(skb, attrtype, attrlen, data) \({ if (skb_tailroom(skb) < (int)RTA_SPACE(attrlen)) goto rtattr_failure; \ __rta_fill(skb, attrtype, attrlen, data); })extern unsigned long rtnl_wlockct;/* NOTE: these locks are not interrupt safe, are not SMP safe, * they are even not atomic. 8)8)8) ... and it is not a bug. * Really, if these locks will be programmed correctly, * all the addressing/routing machine would become SMP safe, * but is absolutely useless at the moment, because all the kernel * is not reenterable in any case. --ANK * * Well, atomic_* and set_bit provide the only thing here: * gcc is confused not to overoptimize them, that's all. * I remember as gcc splitted ++ operation, but cannot reproduce * it with gcc-2.7.*. --ANK * * One more note: rwlock facility should be written and put * to a kernel wide location: f.e. current implementation of semaphores * (especially, for x86) looks like a wonder. It would be good * to have something similar for rwlock. Recursive lock could be also * useful thing. --ANK */extern __inline__ int rtnl_shlock_nowait(void){ atomic_inc(&rtnl_rlockct); if (test_bit(0, &rtnl_wlockct)) { atomic_dec(&rtnl_rlockct); return -EAGAIN; } return 0;}extern __inline__ void rtnl_shlock(void){ while (rtnl_shlock_nowait()) sleep_on(&rtnl_wait);}/* Check for possibility to PROMOTE shared lock to exclusive. Shared lock must be already grabbed with rtnl_shlock*(). */extern __inline__ int rtnl_exlock_nowait(void){ if (atomic_read(&rtnl_rlockct) > 1) return -EAGAIN; if (test_and_set_bit(0, &rtnl_wlockct)) return -EAGAIN; return 0;}extern __inline__ void rtnl_exlock(void){ while (rtnl_exlock_nowait()) sleep_on(&rtnl_wait);}#if 0extern __inline__ void rtnl_shunlock(void){ atomic_dec(&rtnl_rlockct); if (atomic_read(&rtnl_rlockct) <= 1) { wake_up(&rtnl_wait); if (rtnl && rtnl->receive_queue.qlen) rtnl->data_ready(rtnl, 0); }}#else/* The problem: inline requires to include <net/sock.h> and, hence, almost all of net includes :-( */#define rtnl_shunlock() ({ \ atomic_dec(&rtnl_rlockct); \ if (atomic_read(&rtnl_rlockct) <= 1) { \ wake_up(&rtnl_wait); \ if (rtnl && rtnl->receive_queue.qlen) \ rtnl->data_ready(rtnl, 0); \ } \})#endif/* Release exclusive lock. Note, that we do not wake up rtnetlink socket, * it will be done later after releasing shared lock. */extern __inline__ void rtnl_exunlock(void){ clear_bit(0, &rtnl_wlockct); wake_up(&rtnl_wait);}#elseextern __inline__ void rtnl_shlock(void){ while (atomic_read(&rtnl_rlockct)) sleep_on(&rtnl_wait); atomic_inc(&rtnl_rlockct);}extern __inline__ void rtnl_shunlock(void){ if (atomic_dec_and_test(&rtnl_rlockct)) wake_up(&rtnl_wait);}extern __inline__ void rtnl_exlock(void){}extern __inline__ void rtnl_exunlock(void){}#endifextern void rtnl_lock(void);extern void rtnl_unlock(void);extern void rtnetlink_init(void);#endif /* __KERNEL__ */#endif /* __LINUX_RTNETLINK_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -