📄 sock.h
字号:
extern ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);/* * Default socket callbacks and setup code */ extern void sock_def_destruct(struct sock *);/* Initialise core socket variables */extern void sock_init_data(struct socket *sock, struct sock *sk);extern void sklist_remove_socket(struct sock **list, struct sock *sk);extern void sklist_insert_socket(struct sock **list, struct sock *sk);extern void sklist_destroy_socket(struct sock **list, struct sock *sk);#ifdef CONFIG_FILTER/** * sk_filter - run a packet through a socket filter * @skb: buffer to filter * @filter: filter to apply * * Run the filter code and then cut skb->data to correct size returned by * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level * wrapper to sk_run_filter. It returns 0 if the packet should * be accepted or 1 if the packet should be tossed. */ static inline int sk_filter(struct sk_buff *skb, struct sk_filter *filter){ int pkt_len; pkt_len = sk_run_filter(skb, filter->insns, filter->len); if(!pkt_len) return 1; /* Toss Packet */ else skb_trim(skb, pkt_len); return 0;}/** * sk_filter_release: Release a socket filter * @sk: socket * @fp: filter to remove * * Remove a filter from a socket and release its resources. */ static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp){ unsigned int size = sk_filter_len(fp); atomic_sub(size, &sk->omem_alloc); if (atomic_dec_and_test(&fp->refcnt)) kfree(fp);}static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp){ atomic_inc(&fp->refcnt); atomic_add(sk_filter_len(fp), &sk->omem_alloc);}#endif /* CONFIG_FILTER *//* * Socket reference counting postulates. * * * Each user of socket SHOULD hold a reference count. * * Each access point to socket (an hash table bucket, reference from a list, * running timer, skb in flight MUST hold a reference count. * * When reference count hits 0, it means it will never increase back. * * When reference count hits 0, it means that no references from * outside exist to this socket and current process on current CPU * is last user and may/should destroy this socket. * * sk_free is called from any context: process, BH, IRQ. When * it is called, socket has no references from outside -> sk_free * may release descendant resources allocated by the socket, but * to the time when it is called, socket is NOT referenced by any * hash tables, lists etc. * * Packets, delivered from outside (from network or from another process) * and enqueued on receive/error queues SHOULD NOT grab reference count, * when they sit in queue. Otherwise, packets will leak to hole, when * socket is looked up by one cpu and unhasing is made by another CPU. * It is true for udp/raw, netlink (leak to receive and error queues), tcp * (leak to backlog). Packet socket does all the processing inside * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets * use separate SMP lock, so that they are prone too. *//* Grab socket reference count. This operation is valid only when sk is ALREADY grabbed f.e. it is found in hash table or a list and the lookup is made under lock preventing hash table modifications. */static inline void sock_hold(struct sock *sk){ atomic_inc(&sk->refcnt);}/* Ungrab socket in the context, which assumes that socket refcnt cannot hit zero, f.e. it is true in context of any socketcall. */static inline void __sock_put(struct sock *sk){ atomic_dec(&sk->refcnt);}/* Ungrab socket and destroy it, if it was the last reference. */static inline void sock_put(struct sock *sk){ if (atomic_dec_and_test(&sk->refcnt)) sk_free(sk);}/* Detach socket from process context. * Announce socket dead, detach it from wait queue and inode. * Note that parent inode held reference count on this struct sock, * we do not release it in this function, because protocol * probably wants some additional cleanups or even continuing * to work with this socket (TCP). */static inline void sock_orphan(struct sock *sk){ write_lock_bh(&sk->callback_lock); sk->dead = 1; sk->socket = NULL; sk->sleep = NULL; write_unlock_bh(&sk->callback_lock);}static inline void sock_graft(struct sock *sk, struct socket *parent){ write_lock_bh(&sk->callback_lock); sk->sleep = &parent->wait; parent->sk = sk; sk->socket = parent; write_unlock_bh(&sk->callback_lock);}static inline int sock_i_uid(struct sock *sk){ int uid; read_lock(&sk->callback_lock); uid = sk->socket ? sk->socket->inode->i_uid : 0; read_unlock(&sk->callback_lock); return uid;}static inline unsigned long sock_i_ino(struct sock *sk){ unsigned long ino; read_lock(&sk->callback_lock); ino = sk->socket ? sk->socket->inode->i_ino : 0; read_unlock(&sk->callback_lock); return ino;}static inline struct dst_entry *__sk_dst_get(struct sock *sk){ return sk->dst_cache;}static inline struct dst_entry *sk_dst_get(struct sock *sk){ struct dst_entry *dst; read_lock(&sk->dst_lock); dst = sk->dst_cache; if (dst) dst_hold(dst); read_unlock(&sk->dst_lock); return dst;}static inline void__sk_dst_set(struct sock *sk, struct dst_entry *dst){ struct dst_entry *old_dst; old_dst = sk->dst_cache; sk->dst_cache = dst; dst_release(old_dst);}static inline voidsk_dst_set(struct sock *sk, struct dst_entry *dst){ write_lock(&sk->dst_lock); __sk_dst_set(sk, dst); write_unlock(&sk->dst_lock);}static inline void__sk_dst_reset(struct sock *sk){ struct dst_entry *old_dst; old_dst = sk->dst_cache; sk->dst_cache = NULL; dst_release(old_dst);}static inline voidsk_dst_reset(struct sock *sk){ write_lock(&sk->dst_lock); __sk_dst_reset(sk); write_unlock(&sk->dst_lock);}static inline struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie){ struct dst_entry *dst = sk->dst_cache; if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { sk->dst_cache = NULL; return NULL; } return dst;}static inline struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie){ struct dst_entry *dst = sk_dst_get(sk); if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { sk_dst_reset(sk); return NULL; } return dst;}/* * Queue a received datagram if it will fit. Stream and sequenced * protocols can't normally use this as they need to fit buffers in * and play with them. * * Inlined as it's very short and called for pretty much every * packet ever received. */static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk){ sock_hold(sk); skb->sk = sk; skb->destructor = sock_wfree; atomic_add(skb->truesize, &sk->wmem_alloc);}static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk){ skb->sk = sk; skb->destructor = sock_rfree; atomic_add(skb->truesize, &sk->rmem_alloc);}static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb){ /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK */ if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) return -ENOMEM;#ifdef CONFIG_FILTER if (sk->filter) { int err = 0; struct sk_filter *filter; /* It would be deadlock, if sock_queue_rcv_skb is used with socket lock! We assume that users of this function are lock free. */ bh_lock_sock(sk); if ((filter = sk->filter) != NULL && sk_filter(skb, filter)) err = -EPERM; bh_unlock_sock(sk); if (err) return err; /* Toss packet */ }#endif /* CONFIG_FILTER */ skb->dev = NULL; skb_set_owner_r(skb, sk); skb_queue_tail(&sk->receive_queue, skb); if (!sk->dead) sk->data_ready(sk,skb->len); return 0;}static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb){ /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK */ if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) return -ENOMEM; skb_set_owner_r(skb, sk); skb_queue_tail(&sk->error_queue,skb); if (!sk->dead) sk->data_ready(sk,skb->len); return 0;}/* * Recover an error report and clear atomically */ static inline int sock_error(struct sock *sk){ int err=xchg(&sk->err,0); return -err;}static inline unsigned long sock_wspace(struct sock *sk){ int amt = 0; if (!(sk->shutdown & SEND_SHUTDOWN)) { amt = sk->sndbuf - atomic_read(&sk->wmem_alloc); if (amt < 0) amt = 0; } return amt;}static inline void sk_wake_async(struct sock *sk, int how, int band){ if (sk->socket && sk->socket->fasync_list) sock_wake_async(sk->socket, how, band);}#define SOCK_MIN_SNDBUF 2048#define SOCK_MIN_RCVBUF 256/* Must be less or equal SOCK_MIN_SNDBUF */#define SOCK_MIN_WRITE_SPACE SOCK_MIN_SNDBUF/* * Default write policy as shown to user space via poll/select/SIGIO * Kernel internally doesn't use the MIN_WRITE_SPACE threshold. */static inline int sock_writeable(struct sock *sk) { return sock_wspace(sk) >= SOCK_MIN_WRITE_SPACE;}static inline int gfp_any(void){ return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;}static inline long sock_rcvtimeo(struct sock *sk, int noblock){ return noblock ? 0 : sk->rcvtimeo;}static inline long sock_sndtimeo(struct sock *sk, int noblock){ return noblock ? 0 : sk->sndtimeo;}static inline int sock_rcvlowat(struct sock *sk, int waitall, int len){ return (waitall ? len : min_t(int, sk->rcvlowat, len)) ? : 1;}/* Alas, with timeout socket operations are not restartable. * Compare this to poll(). */static inline int sock_intr_errno(long timeo){ return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;}static __inline__ voidsock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb){ if (sk->rcvtstamp) put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(skb->stamp), &skb->stamp); else sk->stamp = skb->stamp;}/* * Enable debug/info messages */#if 0#define NETDEBUG(x) do { } while (0)#else#define NETDEBUG(x) do { x; } while (0)#endif/* * Macros for sleeping on a socket. Use them like this: * * SOCK_SLEEP_PRE(sk) * if (condition) * schedule(); * SOCK_SLEEP_POST(sk) * */#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \ DECLARE_WAITQUEUE(wait, tsk); \ tsk->state = TASK_INTERRUPTIBLE; \ add_wait_queue((sk)->sleep, &wait); \ release_sock(sk);#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \ remove_wait_queue((sk)->sleep, &wait); \ lock_sock(sk); \ }extern __u32 sysctl_wmem_max;extern __u32 sysctl_rmem_max;#endif /* _SOCK_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -