⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 af_netlink.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	if (addr->sa_family == AF_UNSPEC) {		sk->sk_state	= NETLINK_UNCONNECTED;		nlk->dst_pid	= 0;		nlk->dst_group  = 0;		return 0;	}	if (addr->sa_family != AF_NETLINK)		return -EINVAL;	/* Only superuser is allowed to send multicasts */	if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))		return -EPERM;	if (!nlk->pid)		err = netlink_autobind(sock);	if (err == 0) {		sk->sk_state	= NETLINK_CONNECTED;		nlk->dst_pid 	= nladdr->nl_pid;		nlk->dst_group  = ffs(nladdr->nl_groups);	}	return err;}static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer){	struct sock *sk = sock->sk;	struct netlink_sock *nlk = nlk_sk(sk);	struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;	nladdr->nl_family = AF_NETLINK;	nladdr->nl_pad = 0;	*addr_len = sizeof(*nladdr);	if (peer) {		nladdr->nl_pid = nlk->dst_pid;		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);	} else {		nladdr->nl_pid = nlk->pid;		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;	}	return 0;}static void netlink_overrun(struct sock *sk){	if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {		sk->sk_err = ENOBUFS;		sk->sk_error_report(sk);	}}static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid){	struct sock *sock;	struct netlink_sock *nlk;	sock = netlink_lookup(ssk->sk_net, ssk->sk_protocol, pid);	if (!sock)		return ERR_PTR(-ECONNREFUSED);	/* Don't bother queuing skb if kernel socket has no input function */	nlk = nlk_sk(sock);	if (sock->sk_state == NETLINK_CONNECTED &&	    nlk->dst_pid != nlk_sk(ssk)->pid) {		sock_put(sock);		return ERR_PTR(-ECONNREFUSED);	}	return sock;}struct sock *netlink_getsockbyfilp(struct file *filp){	struct inode *inode = filp->f_path.dentry->d_inode;	struct sock *sock;	if (!S_ISSOCK(inode->i_mode))		return ERR_PTR(-ENOTSOCK);	sock = SOCKET_I(inode)->sk;	if (sock->sk_family != AF_NETLINK)		return ERR_PTR(-EINVAL);	sock_hold(sock);	return sock;}/* * Attach a skb to a netlink socket. * The caller must hold a reference to the destination socket. On error, the * reference is dropped. The skb is not send to the destination, just all * all error checks are performed and memory in the queue is reserved. * Return values: * < 0: error. skb freed, reference to sock dropped. * 0: continue * 1: repeat lookup - reference dropped while waiting for socket memory. */int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,		      long *timeo, struct sock *ssk){	struct netlink_sock *nlk;	nlk = nlk_sk(sk);	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||	    test_bit(0, &nlk->state)) {		DECLARE_WAITQUEUE(wait, current);		if (!*timeo) {			if (!ssk || netlink_is_kernel(ssk))				netlink_overrun(sk);			sock_put(sk);			kfree_skb(skb);			return -EAGAIN;		}		__set_current_state(TASK_INTERRUPTIBLE);		add_wait_queue(&nlk->wait, &wait);		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||		     test_bit(0, &nlk->state)) &&		    !sock_flag(sk, SOCK_DEAD))			*timeo = schedule_timeout(*timeo);		__set_current_state(TASK_RUNNING);		remove_wait_queue(&nlk->wait, &wait);		sock_put(sk);		if (signal_pending(current)) {			kfree_skb(skb);			return sock_intr_errno(*timeo);		}		return 1;	}	skb_set_owner_r(skb, sk);	return 0;}int netlink_sendskb(struct sock *sk, struct sk_buff *skb){	int len = skb->len;	skb_queue_tail(&sk->sk_receive_queue, skb);	sk->sk_data_ready(sk, len);	sock_put(sk);	return len;}void netlink_detachskb(struct sock *sk, struct sk_buff *skb){	kfree_skb(skb);	sock_put(sk);}static inline struct sk_buff *netlink_trim(struct sk_buff *skb,					   gfp_t allocation){	int delta;	skb_orphan(skb);	delta = skb->end - skb->tail;	if (delta * 2 < skb->truesize)		return skb;	if (skb_shared(skb)) {		struct sk_buff *nskb = skb_clone(skb, allocation);		if (!nskb)			return skb;		kfree_skb(skb);		skb = nskb;	}	if (!pskb_expand_head(skb, 0, -delta, allocation))		skb->truesize -= delta;	return skb;}static inline void netlink_rcv_wake(struct sock *sk){	struct netlink_sock *nlk = nlk_sk(sk);	if (skb_queue_empty(&sk->sk_receive_queue))		clear_bit(0, &nlk->state);	if (!test_bit(0, &nlk->state))		wake_up_interruptible(&nlk->wait);}static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb){	int ret;	struct netlink_sock *nlk = nlk_sk(sk);	ret = -ECONNREFUSED;	if (nlk->netlink_rcv != NULL) {		ret = skb->len;		skb_set_owner_r(skb, sk);		nlk->netlink_rcv(skb);	}	kfree_skb(skb);	sock_put(sk);	return ret;}int netlink_unicast(struct sock *ssk, struct sk_buff *skb,		    u32 pid, int nonblock){	struct sock *sk;	int err;	long timeo;	skb = netlink_trim(skb, gfp_any());	timeo = sock_sndtimeo(ssk, nonblock);retry:	sk = netlink_getsockbypid(ssk, pid);	if (IS_ERR(sk)) {		kfree_skb(skb);		return PTR_ERR(sk);	}	if (netlink_is_kernel(sk))		return netlink_unicast_kernel(sk, skb);	err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);	if (err == 1)		goto retry;	if (err)		return err;	return netlink_sendskb(sk, skb);}int netlink_has_listeners(struct sock *sk, unsigned int group){	int res = 0;	unsigned long *listeners;	BUG_ON(!netlink_is_kernel(sk));	rcu_read_lock();	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);	if (group - 1 < nl_table[sk->sk_protocol].groups)		res = test_bit(group - 1, listeners);	rcu_read_unlock();	return res;}EXPORT_SYMBOL_GPL(netlink_has_listeners);static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb){	struct netlink_sock *nlk = nlk_sk(sk);	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&	    !test_bit(0, &nlk->state)) {		skb_set_owner_r(skb, sk);		skb_queue_tail(&sk->sk_receive_queue, skb);		sk->sk_data_ready(sk, skb->len);		return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;	}	return -1;}struct netlink_broadcast_data {	struct sock *exclude_sk;	struct net *net;	u32 pid;	u32 group;	int failure;	int congested;	int delivered;	gfp_t allocation;	struct sk_buff *skb, *skb2;};static inline int do_one_broadcast(struct sock *sk,				   struct netlink_broadcast_data *p){	struct netlink_sock *nlk = nlk_sk(sk);	int val;	if (p->exclude_sk == sk)		goto out;	if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||	    !test_bit(p->group - 1, nlk->groups))		goto out;	if ((sk->sk_net != p->net))		goto out;	if (p->failure) {		netlink_overrun(sk);		goto out;	}	sock_hold(sk);	if (p->skb2 == NULL) {		if (skb_shared(p->skb)) {			p->skb2 = skb_clone(p->skb, p->allocation);		} else {			p->skb2 = skb_get(p->skb);			/*			 * skb ownership may have been set when			 * delivered to a previous socket.			 */			skb_orphan(p->skb2);		}	}	if (p->skb2 == NULL) {		netlink_overrun(sk);		/* Clone failed. Notify ALL listeners. */		p->failure = 1;	} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {		netlink_overrun(sk);	} else {		p->congested |= val;		p->delivered = 1;		p->skb2 = NULL;	}	sock_put(sk);out:	return 0;}int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,		      u32 group, gfp_t allocation){	struct net *net = ssk->sk_net;	struct netlink_broadcast_data info;	struct hlist_node *node;	struct sock *sk;	skb = netlink_trim(skb, allocation);	info.exclude_sk = ssk;	info.net = net;	info.pid = pid;	info.group = group;	info.failure = 0;	info.congested = 0;	info.delivered = 0;	info.allocation = allocation;	info.skb = skb;	info.skb2 = NULL;	/* While we sleep in clone, do not allow to change socket list */	netlink_lock_table();	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)		do_one_broadcast(sk, &info);	kfree_skb(skb);	netlink_unlock_table();	if (info.skb2)		kfree_skb(info.skb2);	if (info.delivered) {		if (info.congested && (allocation & __GFP_WAIT))			yield();		return 0;	}	if (info.failure)		return -ENOBUFS;	return -ESRCH;}struct netlink_set_err_data {	struct sock *exclude_sk;	u32 pid;	u32 group;	int code;};static inline int do_one_set_err(struct sock *sk,				 struct netlink_set_err_data *p){	struct netlink_sock *nlk = nlk_sk(sk);	if (sk == p->exclude_sk)		goto out;	if (sk->sk_net != p->exclude_sk->sk_net)		goto out;	if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||	    !test_bit(p->group - 1, nlk->groups))		goto out;	sk->sk_err = p->code;	sk->sk_error_report(sk);out:	return 0;}void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code){	struct netlink_set_err_data info;	struct hlist_node *node;	struct sock *sk;	info.exclude_sk = ssk;	info.pid = pid;	info.group = group;	info.code = code;	read_lock(&nl_table_lock);	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)		do_one_set_err(sk, &info);	read_unlock(&nl_table_lock);}/* must be called with netlink table grabbed */static void netlink_update_socket_mc(struct netlink_sock *nlk,				     unsigned int group,				     int is_new){	int old, new = !!is_new, subscriptions;	old = test_bit(group - 1, nlk->groups);	subscriptions = nlk->subscriptions - old + new;	if (new)		__set_bit(group - 1, nlk->groups);	else		__clear_bit(group - 1, nlk->groups);	netlink_update_subscriptions(&nlk->sk, subscriptions);	netlink_update_listeners(&nlk->sk);}static int netlink_setsockopt(struct socket *sock, int level, int optname,			      char __user *optval, int optlen){	struct sock *sk = sock->sk;	struct netlink_sock *nlk = nlk_sk(sk);	unsigned int val = 0;	int err;	if (level != SOL_NETLINK)		return -ENOPROTOOPT;	if (optlen >= sizeof(int) &&	    get_user(val, (unsigned int __user *)optval))		return -EFAULT;	switch (optname) {	case NETLINK_PKTINFO:		if (val)			nlk->flags |= NETLINK_RECV_PKTINFO;		else			nlk->flags &= ~NETLINK_RECV_PKTINFO;		err = 0;		break;	case NETLINK_ADD_MEMBERSHIP:	case NETLINK_DROP_MEMBERSHIP: {		if (!netlink_capable(sock, NL_NONROOT_RECV))			return -EPERM;		err = netlink_realloc_groups(sk);		if (err)			return err;		if (!val || val - 1 >= nlk->ngroups)			return -EINVAL;		netlink_table_grab();		netlink_update_socket_mc(nlk, val,					 optname == NETLINK_ADD_MEMBERSHIP);		netlink_table_ungrab();		err = 0;		break;	}	default:		err = -ENOPROTOOPT;	}	return err;}static int netlink_getsockopt(struct socket *sock, int level, int optname,			      char __user *optval, int __user *optlen){	struct sock *sk = sock->sk;	struct netlink_sock *nlk = nlk_sk(sk);	int len, val, err;	if (level != SOL_NETLINK)		return -ENOPROTOOPT;	if (get_user(len, optlen))		return -EFAULT;	if (len < 0)		return -EINVAL;	switch (optname) {	case NETLINK_PKTINFO:		if (len < sizeof(int))			return -EINVAL;		len = sizeof(int);		val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;		if (put_user(len, optlen) ||		    put_user(val, optval))			return -EFAULT;		err = 0;		break;	default:		err = -ENOPROTOOPT;	}	return err;}static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb){	struct nl_pktinfo info;	info.group = NETLINK_CB(skb).dst_group;	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);}static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,			   struct msghdr *msg, size_t len){	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);	struct sock *sk = sock->sk;	struct netlink_sock *nlk = nlk_sk(sk);	struct sockaddr_nl *addr=msg->msg_name;	u32 dst_pid;	u32 dst_group;	struct sk_buff *skb;	int err;	struct scm_cookie scm;	if (msg->msg_flags&MSG_OOB)		return -EOPNOTSUPP;	if (NULL == siocb->scm)		siocb->scm = &scm;	err = scm_send(sock, msg, siocb->scm);	if (err < 0)		return err;	if (msg->msg_namelen) {		if (addr->nl_family != AF_NETLINK)			return -EINVAL;		dst_pid = addr->nl_pid;		dst_group = ffs(addr->nl_groups);		if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))			return -EPERM;	} else {		dst_pid = nlk->dst_pid;		dst_group = nlk->dst_group;	}	if (!nlk->pid) {		err = netlink_autobind(sock);		if (err)			goto out;	}	err = -EMSGSIZE;	if (len > sk->sk_sndbuf - 32)		goto out;	err = -ENOBUFS;	skb = alloc_skb(len, GFP_KERNEL);	if (skb==NULL)		goto out;	NETLINK_CB(skb).pid	= nlk->pid;	NETLINK_CB(skb).dst_group = dst_group;	NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);	selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));	memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));	/* What can I do? Netlink is asynchronous, so that	   we will have to save current capabilities to	   check them, when this message will be delivered	   to corresponding kernel module.   --ANK (980802)	 */	err = -EFAULT;	if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {		kfree_skb(skb);		goto out;	}	err = security_netlink_send(sk, skb);	if (err) {		kfree_skb(skb);		goto out;	}	if (dst_group) {		atomic_inc(&skb->users);		netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);	}	err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);out:	return err;}static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,			   struct msghdr *msg, size_t len,			   int flags){	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);	struct scm_cookie scm;	struct sock *sk = sock->sk;	struct netlink_sock *nlk = nlk_sk(sk);	int noblock = flags&MSG_DONTWAIT;	size_t copied;	struct sk_buff *skb;	int err;	if (flags&MSG_OOB)		return -EOPNOTSUPP;	copied = 0;	skb = skb_recv_datagram(sk,flags,noblock,&err);	if (skb==NULL)		goto out;	msg->msg_namelen = 0;	copied = skb->len;	if (len < copied) {		msg->msg_flags |= MSG_TRUNC;		copied = len;	}	skb_reset_transport_header(skb);	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);	if (msg->msg_name) {		struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;		addr->nl_family = AF_NETLINK;		addr->nl_pad    = 0;		addr->nl_pid	= NETLINK_CB(skb).pid;		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);		msg->msg_namelen = sizeof(*addr);	}	if (nlk->flags & NETLINK_RECV_PKTINFO)		netlink_cmsg_recv_pktinfo(msg, skb);	if (NULL == siocb->scm) {		memset(&scm, 0, sizeof(scm));		siocb->scm = &scm;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -