📄 xdr.c
字号:
/* * linux/net/sunrpc/xdr.c * * Generic XDR support. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */#include <linux/types.h>#include <linux/socket.h>#include <linux/string.h>#include <linux/kernel.h>#include <linux/pagemap.h>#include <linux/errno.h>#include <linux/in.h>#include <linux/net.h>#include <net/sock.h>#include <linux/sunrpc/xdr.h>#include <linux/sunrpc/msg_prot.h>/* * XDR functions for basic NFS types */u32 *xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj){ unsigned int quadlen = XDR_QUADLEN(obj->len); p[quadlen] = 0; /* zero trailing bytes */ *p++ = htonl(obj->len); memcpy(p, obj->data, obj->len); return p + XDR_QUADLEN(obj->len);}u32 *xdr_decode_netobj_fixed(u32 *p, void *obj, unsigned int len){ if (ntohl(*p++) != len) return NULL; memcpy(obj, p, len); return p + XDR_QUADLEN(len);}u32 *xdr_decode_netobj(u32 *p, struct xdr_netobj *obj){ unsigned int len; if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ) return NULL; obj->len = len; obj->data = (u8 *) p; return p + XDR_QUADLEN(len);}/** * xdr_encode_opaque_fixed - Encode fixed length opaque data * @p - pointer to current position in XDR buffer. * @ptr - pointer to data to encode (or NULL) * @nbytes - size of data. * * Copy the array of data of length nbytes at ptr to the XDR buffer * at position p, then align to the next 32-bit boundary by padding * with zero bytes (see RFC1832). * Note: if ptr is NULL, only the padding is performed. * * Returns the updated current XDR buffer position * */u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes){ if (likely(nbytes != 0)) { unsigned int quadlen = XDR_QUADLEN(nbytes); unsigned int padding = (quadlen << 2) - nbytes; if (ptr != NULL) memcpy(p, ptr, nbytes); if (padding != 0) memset((char *)p + nbytes, 0, padding); p += quadlen; } return p;}EXPORT_SYMBOL(xdr_encode_opaque_fixed);/** * xdr_encode_opaque - Encode variable length opaque data * @p - pointer to current position in XDR buffer. * @ptr - pointer to data to encode (or NULL) * @nbytes - size of data. * * Returns the updated current XDR buffer position */u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes){ *p++ = htonl(nbytes); return xdr_encode_opaque_fixed(p, ptr, nbytes);}EXPORT_SYMBOL(xdr_encode_opaque);u32 *xdr_encode_string(u32 *p, const char *string){ return xdr_encode_array(p, string, strlen(string));}u32 *xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen){ unsigned int len; char *string; if ((len = ntohl(*p++)) > maxlen) return NULL; if (lenp) *lenp = len; if ((len % 4) != 0) { string = (char *) p; } else { string = (char *) (p - 1); memmove(string, p, len); } string[len] = '\0'; *sp = string; return p + XDR_QUADLEN(len);}u32 *xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen){ unsigned int len; if ((len = ntohl(*p++)) > maxlen) return NULL; *lenp = len; *sp = (char *) p; return p + XDR_QUADLEN(len);}voidxdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, unsigned int len){ struct kvec *tail = xdr->tail; u32 *p; xdr->pages = pages; xdr->page_base = base; xdr->page_len = len; p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len); tail->iov_base = p; tail->iov_len = 0; if (len & 3) { unsigned int pad = 4 - (len & 3); *p = 0; tail->iov_base = (char *)p + (len & 3); tail->iov_len = pad; len += pad; } xdr->buflen += len; xdr->len += len;}voidxdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, struct page **pages, unsigned int base, unsigned int len){ struct kvec *head = xdr->head; struct kvec *tail = xdr->tail; char *buf = (char *)head->iov_base; unsigned int buflen = head->iov_len; head->iov_len = offset; xdr->pages = pages; xdr->page_base = base; xdr->page_len = len; tail->iov_base = buf + offset; tail->iov_len = buflen - offset; xdr->buflen += len;}/* * Realign the kvec if the server missed out some reply elements * (such as post-op attributes,...) * Note: This is a simple implementation that assumes that * len <= iov->iov_len !!! * The RPC header (assumed to be the 1st element in the iov array) * is not shifted. */void xdr_shift_iovec(struct kvec *iov, int nr, size_t len){ struct kvec *pvec; for (pvec = iov + nr - 1; nr > 1; nr--, pvec--) { struct kvec *svec = pvec - 1; if (len > pvec->iov_len) { printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n"); return; } memmove((char *)pvec->iov_base + len, pvec->iov_base, pvec->iov_len - len); if (len > svec->iov_len) { printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n"); return; } memcpy(pvec->iov_base, (char *)svec->iov_base + svec->iov_len - len, len); }}/* * Map a struct xdr_buf into an kvec array. */int xdr_kmap(struct kvec *iov_base, struct xdr_buf *xdr, size_t base){ struct kvec *iov = iov_base; struct page **ppage = xdr->pages; unsigned int len, pglen = xdr->page_len; len = xdr->head[0].iov_len; if (base < len) { iov->iov_len = len - base; iov->iov_base = (char *)xdr->head[0].iov_base + base; iov++; base = 0; } else base -= len; if (pglen == 0) goto map_tail; if (base >= pglen) { base -= pglen; goto map_tail; } if (base || xdr->page_base) { pglen -= base; base += xdr->page_base; ppage += base >> PAGE_CACHE_SHIFT; base &= ~PAGE_CACHE_MASK; } do { len = PAGE_CACHE_SIZE; iov->iov_base = kmap(*ppage); if (base) { iov->iov_base += base; len -= base; base = 0; } if (pglen < len) len = pglen; iov->iov_len = len; iov++; ppage++; } while ((pglen -= len) != 0);map_tail: if (xdr->tail[0].iov_len) { iov->iov_len = xdr->tail[0].iov_len - base; iov->iov_base = (char *)xdr->tail[0].iov_base + base; iov++; } return (iov - iov_base);}void xdr_kunmap(struct xdr_buf *xdr, size_t base){ struct page **ppage = xdr->pages; unsigned int pglen = xdr->page_len; if (!pglen) return; if (base > xdr->head[0].iov_len) base -= xdr->head[0].iov_len; else base = 0; if (base >= pglen) return; if (base || xdr->page_base) { pglen -= base; base += xdr->page_base; ppage += base >> PAGE_CACHE_SHIFT; /* Note: The offset means that the length of the first * page is really (PAGE_CACHE_SIZE - (base & ~PAGE_CACHE_MASK)). * In order to avoid an extra test inside the loop, * we bump pglen here, and just subtract PAGE_CACHE_SIZE... */ pglen += base & ~PAGE_CACHE_MASK; } for (;;) { flush_dcache_page(*ppage); kunmap(*ppage); if (pglen <= PAGE_CACHE_SIZE) break; pglen -= PAGE_CACHE_SIZE; ppage++; }}voidxdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor){ struct page **ppage = xdr->pages; unsigned int len, pglen = xdr->page_len; int ret; len = xdr->head[0].iov_len; if (base < len) { len -= base; ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); if (ret != len || !desc->count) return; base = 0; } else base -= len; if (pglen == 0) goto copy_tail; if (base >= pglen) { base -= pglen; goto copy_tail; } if (base || xdr->page_base) { pglen -= base; base += xdr->page_base; ppage += base >> PAGE_CACHE_SHIFT; base &= ~PAGE_CACHE_MASK; } do { char *kaddr; len = PAGE_CACHE_SIZE; kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); if (base) { len -= base; if (pglen < len) len = pglen; ret = copy_actor(desc, kaddr + base, len); base = 0; } else { if (pglen < len) len = pglen; ret = copy_actor(desc, kaddr, len); } flush_dcache_page(*ppage); kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); if (ret != len || !desc->count) return; ppage++; } while ((pglen -= len) != 0);copy_tail: len = xdr->tail[0].iov_len; if (base < len) copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);}intxdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, int msgflags){ struct page **ppage = xdr->pages; unsigned int len, pglen = xdr->page_len; int err, ret = 0; ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); len = xdr->head[0].iov_len; if (base < len || (addr != NULL && base == 0)) { struct kvec iov = { .iov_base = xdr->head[0].iov_base + base, .iov_len = len - base, }; struct msghdr msg = { .msg_name = addr, .msg_namelen = addrlen, .msg_flags = msgflags, }; if (xdr->len > len) msg.msg_flags |= MSG_MORE; if (iov.iov_len != 0) err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); else err = kernel_sendmsg(sock, &msg, NULL, 0, 0); if (ret == 0) ret = err; else if (err > 0) ret += err; if (err != iov.iov_len) goto out; base = 0; } else base -= len; if (pglen == 0) goto copy_tail; if (base >= pglen) { base -= pglen; goto copy_tail; } if (base || xdr->page_base) { pglen -= base; base += xdr->page_base; ppage += base >> PAGE_CACHE_SHIFT; base &= ~PAGE_CACHE_MASK; } sendpage = sock->ops->sendpage ? : sock_no_sendpage; do { int flags = msgflags; len = PAGE_CACHE_SIZE; if (base) len -= base; if (pglen < len) len = pglen; if (pglen != len || xdr->tail[0].iov_len != 0) flags |= MSG_MORE; /* Hmm... We might be dealing with highmem pages */ if (PageHighMem(*ppage)) sendpage = sock_no_sendpage; err = sendpage(sock, *ppage, base, len, flags); if (ret == 0) ret = err; else if (err > 0) ret += err; if (err != len) goto out; base = 0; ppage++; } while ((pglen -= len) != 0);copy_tail: len = xdr->tail[0].iov_len; if (base < len) { struct kvec iov = { .iov_base = xdr->tail[0].iov_base + base, .iov_len = len - base, }; struct msghdr msg = { .msg_flags = msgflags, }; err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); if (ret == 0) ret = err; else if (err > 0) ret += err; }out: return ret;}/* * Helper routines for doing 'memmove' like operations on a struct xdr_buf * * _shift_data_right_pages * @pages: vector of pages containing both the source and dest memory area. * @pgto_base: page vector address of destination * @pgfrom_base: page vector address of source * @len: number of bytes to copy * * Note: the addresses pgto_base and pgfrom_base are both calculated in * the same way: * if a memory area starts at byte 'base' in page 'pages[i]', * then its address is given as (i << PAGE_CACHE_SHIFT) + base * Also note: pgfrom_base must be < pgto_base, but the memory areas * they point to may overlap. */static void_shift_data_right_pages(struct page **pages, size_t pgto_base, size_t pgfrom_base, size_t len){ struct page **pgfrom, **pgto; char *vfrom, *vto; size_t copy; BUG_ON(pgto_base <= pgfrom_base); pgto_base += len; pgfrom_base += len; pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); pgto_base &= ~PAGE_CACHE_MASK; pgfrom_base &= ~PAGE_CACHE_MASK; do { /* Are any pointers crossing a page boundary? */ if (pgto_base == 0) { pgto_base = PAGE_CACHE_SIZE; pgto--; } if (pgfrom_base == 0) { pgfrom_base = PAGE_CACHE_SIZE; pgfrom--; } copy = len; if (copy > pgto_base) copy = pgto_base; if (copy > pgfrom_base) copy = pgfrom_base; pgto_base -= copy; pgfrom_base -= copy; vto = kmap_atomic(*pgto, KM_USER0); vfrom = kmap_atomic(*pgfrom, KM_USER1); memmove(vto + pgto_base, vfrom + pgfrom_base, copy); kunmap_atomic(vfrom, KM_USER1); kunmap_atomic(vto, KM_USER0);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -