📄 af_unix.c
字号:
skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; wake_up_interruptible(&u->peer_wait); if (msg->msg_name) unix_copy_addr(msg, skb->sk); if (size > skb->len) size = skb->len; else if (size < skb->len) msg->msg_flags |= MSG_TRUNC; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size); if (err) goto out_free; if (!siocb->scm) { siocb->scm = &tmp_scm; memset(&tmp_scm, 0, sizeof(tmp_scm)); } siocb->scm->creds = *UNIXCREDS(skb); if (!(flags & MSG_PEEK)) { if (UNIXCB(skb).fp) unix_detach_fds(siocb->scm, skb); } else { /* It is questionable: on PEEK we could: - do not return fds - good, but too simple 8) - return fds, and do not return them on read (old strategy, apparently wrong) - clone fds (I chose it for now, it is the most universal solution) POSIX 1003.1g does not actually define this clearly at all. POSIX 1003.1g doesn't define a lot of things clearly however! */ if (UNIXCB(skb).fp) siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); } err = size; scm_recv(sock, msg, siocb->scm, flags);out_free: skb_free_datagram(sk,skb);out: return err;}/* * Sleep until data has arrive. But check for races.. */ static long unix_stream_data_wait(struct sock * sk, long timeo){ DEFINE_WAIT(wait); unix_state_rlock(sk); for (;;) { prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || !timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); unix_state_runlock(sk); timeo = schedule_timeout(timeo); unix_state_rlock(sk); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } finish_wait(sk->sk_sleep, &wait); unix_state_runlock(sk); return timeo;}static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags){ struct sock_iocb *siocb = kiocb_to_siocb(iocb); struct scm_cookie tmp_scm; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr=msg->msg_name; int copied = 0; int check_creds = 0; int target; int err = 0; long timeo; err = -EINVAL; if (sk->sk_state != TCP_ESTABLISHED) goto out; err = -EOPNOTSUPP; if (flags&MSG_OOB) goto out; target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); msg->msg_namelen = 0; /* Lock the socket to prevent queue disordering * while sleeps in memcpy_tomsg */ if (!siocb->scm) { siocb->scm = &tmp_scm; memset(&tmp_scm, 0, sizeof(tmp_scm)); } down(&u->readsem); do { int chunk; struct sk_buff *skb; skb = skb_dequeue(&sk->sk_receive_queue); if (skb==NULL) { if (copied >= target) break; /* * POSIX 1003.1g mandates this order. */ if ((err = sock_error(sk)) != 0) break; if (sk->sk_shutdown & RCV_SHUTDOWN) break; err = -EAGAIN; if (!timeo) break; up(&u->readsem); timeo = unix_stream_data_wait(sk, timeo); if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } down(&u->readsem); continue; } if (check_creds) { /* Never glue messages from different writers */ if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) { skb_queue_head(&sk->sk_receive_queue, skb); break; } } else { /* Copy credentials */ siocb->scm->creds = *UNIXCREDS(skb); check_creds = 1; } /* Copy address just once */ if (sunaddr) { unix_copy_addr(msg, skb->sk); sunaddr = NULL; } chunk = min_t(unsigned int, skb->len, size); if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (copied == 0) copied = -EFAULT; break; } copied += chunk; size -= chunk; /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { skb_pull(skb, chunk); if (UNIXCB(skb).fp) unix_detach_fds(siocb->scm, skb); /* put the skb back if we didn't use it up.. */ if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); if (siocb->scm->fp) break; } else { /* It is questionable, see note in unix_dgram_recvmsg. */ if (UNIXCB(skb).fp) siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); break; } } while (size); up(&u->readsem); scm_recv(sock, msg, siocb->scm, flags);out: return copied ? : err;}static int unix_shutdown(struct socket *sock, int mode){ struct sock *sk = sock->sk; struct sock *other; mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN); if (mode) { unix_state_wlock(sk); sk->sk_shutdown |= mode; other=unix_peer(sk); if (other) sock_hold(other); unix_state_wunlock(sk); sk->sk_state_change(sk); if (other && (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { int peer_mode = 0; if (mode&RCV_SHUTDOWN) peer_mode |= SEND_SHUTDOWN; if (mode&SEND_SHUTDOWN) peer_mode |= RCV_SHUTDOWN; unix_state_wlock(other); other->sk_shutdown |= peer_mode; unix_state_wunlock(other); other->sk_state_change(other); read_lock(&other->sk_callback_lock); if (peer_mode == SHUTDOWN_MASK) sk_wake_async(other,1,POLL_HUP); else if (peer_mode & RCV_SHUTDOWN) sk_wake_async(other,1,POLL_IN); read_unlock(&other->sk_callback_lock); } if (other) sock_put(other); } return 0;}static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg){ struct sock *sk = sock->sk; long amount=0; int err; switch(cmd) { case SIOCOUTQ: amount = atomic_read(&sk->sk_wmem_alloc); err = put_user(amount, (int __user *)arg); break; case SIOCINQ: { struct sk_buff *skb; if (sk->sk_state == TCP_LISTEN) { err = -EINVAL; break; } spin_lock(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) amount=skb->len; spin_unlock(&sk->sk_receive_queue.lock); err = put_user(amount, (int __user *)arg); break; } default: err = dev_ioctl(cmd, (void __user *)arg); break; } return err;}static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait){ struct sock *sk = sock->sk; unsigned int mask; poll_wait(file, sk->sk_sleep, wait); mask = 0; /* exceptional events? */ if (sk->sk_err) mask |= POLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; /* Connection-based need to check for termination and startup */ if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE) mask |= POLLHUP; /* * we set writable also when the other side has shut down the * connection. This prevents stuck sockets. */ if (unix_writable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; return mask;}#ifdef CONFIG_PROC_FSstatic struct sock *unix_seq_idx(int *iter, loff_t pos){ loff_t off = 0; struct sock *s; for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) { if (off == pos) return s; ++off; } return NULL;}static void *unix_seq_start(struct seq_file *seq, loff_t *pos){ read_lock(&unix_table_lock); return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);}static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos){ ++*pos; if (v == (void *)1) return first_unix_socket(seq->private); return next_unix_socket(seq->private, v);}static void unix_seq_stop(struct seq_file *seq, void *v){ read_unlock(&unix_table_lock);}static int unix_seq_show(struct seq_file *seq, void *v){ if (v == (void *)1) seq_puts(seq, "Num RefCount Protocol Flags Type St " "Inode Path\n"); else { struct sock *s = v; struct unix_sock *u = unix_sk(s); unix_state_rlock(s); seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu", s, atomic_read(&s->sk_refcnt), 0, s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0, s->sk_type, s->sk_socket ? (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) : (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), sock_i_ino(s)); if (u->addr) { int i, len; seq_putc(seq, ' '); i = 0; len = u->addr->len - sizeof(short); if (!UNIX_ABSTRACT(s)) len--; else { seq_putc(seq, '@'); i++; } for ( ; i < len; i++) seq_putc(seq, u->addr->name->sun_path[i]); } unix_state_runlock(s); seq_putc(seq, '\n'); } return 0;}static struct seq_operations unix_seq_ops = { .start = unix_seq_start, .next = unix_seq_next, .stop = unix_seq_stop, .show = unix_seq_show,};static int unix_seq_open(struct inode *inode, struct file *file){ struct seq_file *seq; int rc = -ENOMEM; int *iter = kmalloc(sizeof(int), GFP_KERNEL); if (!iter) goto out; rc = seq_open(file, &unix_seq_ops); if (rc) goto out_kfree; seq = file->private_data; seq->private = iter; *iter = 0;out: return rc;out_kfree: kfree(iter); goto out;}static struct file_operations unix_seq_fops = { .owner = THIS_MODULE, .open = unix_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private,};#endifstatic struct net_proto_family unix_family_ops = { .family = PF_UNIX, .create = unix_create, .owner = THIS_MODULE,};#ifdef CONFIG_SYSCTLextern void unix_sysctl_register(void);extern void unix_sysctl_unregister(void);#elsestatic inline void unix_sysctl_register(void) {}static inline void unix_sysctl_unregister(void) {}#endifstatic int __init af_unix_init(void){ struct sk_buff *dummy_skb; if (sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb)) { printk(KERN_CRIT "%s: panic\n", __FUNCTION__); return -1; } /* allocate our sock slab cache */ unix_sk_cachep = kmem_cache_create("unix_sock", sizeof(struct unix_sock), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (!unix_sk_cachep) printk(KERN_CRIT "af_unix_init: Cannot create unix_sock SLAB cache!\n"); sock_register(&unix_family_ops);#ifdef CONFIG_PROC_FS proc_net_fops_create("unix", 0, &unix_seq_fops);#endif unix_sysctl_register(); return 0;}static void __exit af_unix_exit(void){ sock_unregister(PF_UNIX); unix_sysctl_unregister(); proc_net_remove("unix"); kmem_cache_destroy(unix_sk_cachep);}module_init(af_unix_init);module_exit(af_unix_exit);MODULE_LICENSE("GPL");MODULE_ALIAS_NETPROTO(PF_UNIX);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -