sock.h
来自「linux 内核源代码」· C头文件 代码 · 共 1,417 行 · 第 1/3 页
H
1,417 行
filter = rcu_dereference(sk->sk_filter); if (filter) { unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len); err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; } rcu_read_unlock_bh(); return err;}/** * sk_filter_release: Release a socket filter * @sk: socket * @fp: filter to remove * * Remove a filter from a socket and release its resources. */static inline void sk_filter_release(struct sk_filter *fp){ if (atomic_dec_and_test(&fp->refcnt)) kfree(fp);}static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp){ unsigned int size = sk_filter_len(fp); atomic_sub(size, &sk->sk_omem_alloc); sk_filter_release(fp);}static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp){ atomic_inc(&fp->refcnt); atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);}/* * Socket reference counting postulates. * * * Each user of socket SHOULD hold a reference count. * * Each access point to socket (an hash table bucket, reference from a list, * running timer, skb in flight MUST hold a reference count. * * When reference count hits 0, it means it will never increase back. * * When reference count hits 0, it means that no references from * outside exist to this socket and current process on current CPU * is last user and may/should destroy this socket. * * sk_free is called from any context: process, BH, IRQ. When * it is called, socket has no references from outside -> sk_free * may release descendant resources allocated by the socket, but * to the time when it is called, socket is NOT referenced by any * hash tables, lists etc. * * Packets, delivered from outside (from network or from another process) * and enqueued on receive/error queues SHOULD NOT grab reference count, * when they sit in queue. Otherwise, packets will leak to hole, when * socket is looked up by one cpu and unhasing is made by another CPU. * It is true for udp/raw, netlink (leak to receive and error queues), tcp * (leak to backlog). Packet socket does all the processing inside * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets * use separate SMP lock, so that they are prone too. *//* Ungrab socket and destroy it, if it was the last reference. */static inline void sock_put(struct sock *sk){ if (atomic_dec_and_test(&sk->sk_refcnt)) sk_free(sk);}extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);/* Detach socket from process context. * Announce socket dead, detach it from wait queue and inode. * Note that parent inode held reference count on this struct sock, * we do not release it in this function, because protocol * probably wants some additional cleanups or even continuing * to work with this socket (TCP). */static inline void sock_orphan(struct sock *sk){ write_lock_bh(&sk->sk_callback_lock); sock_set_flag(sk, SOCK_DEAD); sk->sk_socket = NULL; sk->sk_sleep = NULL; write_unlock_bh(&sk->sk_callback_lock);}static inline void sock_graft(struct sock *sk, struct socket *parent){ write_lock_bh(&sk->sk_callback_lock); sk->sk_sleep = &parent->wait; parent->sk = sk; sk->sk_socket = parent; security_sock_graft(sk, parent); write_unlock_bh(&sk->sk_callback_lock);}extern int sock_i_uid(struct sock *sk);extern unsigned long sock_i_ino(struct sock *sk);static inline struct dst_entry *__sk_dst_get(struct sock *sk){ return sk->sk_dst_cache;}static inline struct dst_entry *sk_dst_get(struct sock *sk){ struct dst_entry *dst; read_lock(&sk->sk_dst_lock); dst = sk->sk_dst_cache; if (dst) dst_hold(dst); read_unlock(&sk->sk_dst_lock); return dst;}static inline void__sk_dst_set(struct sock *sk, struct dst_entry *dst){ struct dst_entry *old_dst; old_dst = sk->sk_dst_cache; sk->sk_dst_cache = dst; dst_release(old_dst);}static inline voidsk_dst_set(struct sock *sk, struct dst_entry *dst){ write_lock(&sk->sk_dst_lock); __sk_dst_set(sk, dst); write_unlock(&sk->sk_dst_lock);}static inline void__sk_dst_reset(struct sock *sk){ struct dst_entry *old_dst; old_dst = sk->sk_dst_cache; sk->sk_dst_cache = NULL; dst_release(old_dst);}static inline voidsk_dst_reset(struct sock *sk){ write_lock(&sk->sk_dst_lock); __sk_dst_reset(sk); write_unlock(&sk->sk_dst_lock);}extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);static inline int sk_can_gso(const struct sock *sk){ return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);}extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb){ sk->sk_wmem_queued += skb->truesize; sk->sk_forward_alloc -= skb->truesize;}static inline int skb_copy_to_page(struct sock *sk, char __user *from, struct sk_buff *skb, struct page *page, int off, int copy){ if (skb->ip_summed == CHECKSUM_NONE) { int err = 0; __wsum csum = csum_and_copy_from_user(from, page_address(page) + off, copy, 0, &err); if (err) return err; skb->csum = csum_block_add(skb->csum, csum, skb->len); } else if (copy_from_user(page_address(page) + off, from, copy)) return -EFAULT; skb->len += copy; skb->data_len += copy; skb->truesize += copy; sk->sk_wmem_queued += copy; sk->sk_forward_alloc -= copy; return 0;}/* * Queue a received datagram if it will fit. Stream and sequenced * protocols can't normally use this as they need to fit buffers in * and play with them. * * Inlined as it's very short and called for pretty much every * packet ever received. */static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk){ sock_hold(sk); skb->sk = sk; skb->destructor = sock_wfree; atomic_add(skb->truesize, &sk->sk_wmem_alloc);}static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk){ skb->sk = sk; skb->destructor = sock_rfree; atomic_add(skb->truesize, &sk->sk_rmem_alloc);}extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, unsigned long expires);extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb){ /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK */ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned)sk->sk_rcvbuf) return -ENOMEM; skb_set_owner_r(skb, sk); skb_queue_tail(&sk->sk_error_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); return 0;}/* * Recover an error report and clear atomically */ static inline int sock_error(struct sock *sk){ int err; if (likely(!sk->sk_err)) return 0; err = xchg(&sk->sk_err, 0); return -err;}static inline unsigned long sock_wspace(struct sock *sk){ int amt = 0; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); if (amt < 0) amt = 0; } return amt;}static inline void sk_wake_async(struct sock *sk, int how, int band){ if (sk->sk_socket && sk->sk_socket->fasync_list) sock_wake_async(sk->sk_socket, how, band);}#define SOCK_MIN_SNDBUF 2048#define SOCK_MIN_RCVBUF 256static inline void sk_stream_moderate_sndbuf(struct sock *sk){ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2); sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); }}static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, int size, int mem, gfp_t gfp){ struct sk_buff *skb; /* The TCP header must be at least 32-bit aligned. */ size = ALIGN(size, 4); skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); if (skb) { skb->truesize += mem; if (sk_stream_wmem_schedule(sk, skb->truesize)) { /* * Make sure that we have exactly size bytes * available to the caller, no more, no less. */ skb_reserve(skb, skb_tailroom(skb) - size); return skb; } __kfree_skb(skb); } else { sk->sk_prot->enter_memory_pressure(); sk_stream_moderate_sndbuf(sk); } return NULL;}static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp){ return sk_stream_alloc_pskb(sk, size, 0, gfp);}static inline struct page *sk_stream_alloc_page(struct sock *sk){ struct page *page = NULL; page = alloc_pages(sk->sk_allocation, 0); if (!page) { sk->sk_prot->enter_memory_pressure(); sk_stream_moderate_sndbuf(sk); } return page;}/* * Default write policy as shown to user space via poll/select/SIGIO */static inline int sock_writeable(const struct sock *sk) { return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);}static inline gfp_t gfp_any(void){ return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;}static inline long sock_rcvtimeo(const struct sock *sk, int noblock){ return noblock ? 0 : sk->sk_rcvtimeo;}static inline long sock_sndtimeo(const struct sock *sk, int noblock){ return noblock ? 0 : sk->sk_sndtimeo;}static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len){ return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;}/* Alas, with timeout socket operations are not restartable. * Compare this to poll(). */static inline int sock_intr_errno(long timeo){ return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;}extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb);static __inline__ voidsock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb){ ktime_t kt = skb->tstamp; if (sock_flag(sk, SOCK_RCVTSTAMP)) __sock_recv_timestamp(msg, sk, skb); else sk->sk_stamp = kt;}/** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from * @skb: socket buffer to eat * @copied_early: flag indicating whether DMA operations copied this data early * * This routine must be called with interrupts disabled or with the socket * locked so that the sk_buff queue operation is ok.*/#ifdef CONFIG_NET_DMAstatic inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early){ __skb_unlink(skb, &sk->sk_receive_queue); if (!copied_early) __kfree_skb(skb); else __skb_queue_tail(&sk->sk_async_wait_queue, skb);}#elsestatic inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early){ __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb);}#endifextern void sock_enable_timestamp(struct sock *sk);extern int sock_get_timestamp(struct sock *, struct timeval __user *);extern int sock_get_timestampns(struct sock *, struct timespec __user *);/* * Enable debug/info messages */extern int net_msg_warn;#define NETDEBUG(fmt, args...) \ do { if (net_msg_warn) printk(fmt,##args); } while (0)#define LIMIT_NETDEBUG(fmt, args...) \ do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)/* * Macros for sleeping on a socket. Use them like this: * * SOCK_SLEEP_PRE(sk) * if (condition) * schedule(); * SOCK_SLEEP_POST(sk) * * N.B. These are now obsolete and were, afaik, only ever used in DECnet * and when the last use of them in DECnet has gone, I'm intending to * remove them. */#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \ DECLARE_WAITQUEUE(wait, tsk); \ tsk->state = TASK_INTERRUPTIBLE; \ add_wait_queue((sk)->sk_sleep, &wait); \ release_sock(sk);#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \ remove_wait_queue((sk)->sk_sleep, &wait); \ lock_sock(sk); \ }static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool){ if (valbool) sock_set_flag(sk, bit); else sock_reset_flag(sk, bit);}extern __u32 sysctl_wmem_max;extern __u32 sysctl_rmem_max;extern void sk_init(void);#ifdef CONFIG_SYSCTLextern struct ctl_table core_table[];#endifextern int sysctl_optmem_max;extern __u32 sysctl_wmem_default;extern __u32 sysctl_rmem_default;#endif /* _SOCK_H */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?