1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/xdr.c
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/pagemap.h>
16 #include <linux/errno.h>
17 #include <linux/sunrpc/xdr.h>
18 #include <linux/sunrpc/msg_prot.h>
19 #include <linux/bvec.h>
20 #include <trace/events/sunrpc.h>
23 * XDR functions for basic NFS types
26 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
28 unsigned int quadlen = XDR_QUADLEN(obj->len);
30 p[quadlen] = 0; /* zero trailing bytes */
31 *p++ = cpu_to_be32(obj->len);
32 memcpy(p, obj->data, obj->len);
33 return p + XDR_QUADLEN(obj->len);
35 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
38 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
42 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
46 return p + XDR_QUADLEN(len);
48 EXPORT_SYMBOL_GPL(xdr_decode_netobj);
51 * xdr_encode_opaque_fixed - Encode fixed length opaque data
52 * @p: pointer to current position in XDR buffer.
53 * @ptr: pointer to data to encode (or NULL)
54 * @nbytes: size of data.
56 * Copy the array of data of length nbytes at ptr to the XDR buffer
57 * at position p, then align to the next 32-bit boundary by padding
58 * with zero bytes (see RFC1832).
59 * Note: if ptr is NULL, only the padding is performed.
61 * Returns the updated current XDR buffer position
64 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
66 if (likely(nbytes != 0)) {
67 unsigned int quadlen = XDR_QUADLEN(nbytes);
68 unsigned int padding = (quadlen << 2) - nbytes;
71 memcpy(p, ptr, nbytes);
73 memset((char *)p + nbytes, 0, padding);
78 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
81 * xdr_encode_opaque - Encode variable length opaque data
82 * @p: pointer to current position in XDR buffer.
83 * @ptr: pointer to data to encode (or NULL)
84 * @nbytes: size of data.
86 * Returns the updated current XDR buffer position
88 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
90 *p++ = cpu_to_be32(nbytes);
91 return xdr_encode_opaque_fixed(p, ptr, nbytes);
93 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
96 xdr_encode_string(__be32 *p, const char *string)
98 return xdr_encode_array(p, string, strlen(string));
100 EXPORT_SYMBOL_GPL(xdr_encode_string);
103 xdr_decode_string_inplace(__be32 *p, char **sp,
104 unsigned int *lenp, unsigned int maxlen)
108 len = be32_to_cpu(*p++);
113 return p + XDR_QUADLEN(len);
115 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
118 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
119 * @buf: XDR buffer where string resides
120 * @len: length of string, in bytes
124 xdr_terminate_string(struct xdr_buf *buf, const u32 len)
128 kaddr = kmap_atomic(buf->pages[0]);
129 kaddr[buf->page_base + len] = '\0';
130 kunmap_atomic(kaddr);
132 EXPORT_SYMBOL_GPL(xdr_terminate_string);
135 xdr_buf_pagecount(struct xdr_buf *buf)
139 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
143 xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
145 size_t i, n = xdr_buf_pagecount(buf);
147 if (n != 0 && buf->bvec == NULL) {
148 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
151 for (i = 0; i < n; i++) {
152 buf->bvec[i].bv_page = buf->pages[i];
153 buf->bvec[i].bv_len = PAGE_SIZE;
154 buf->bvec[i].bv_offset = 0;
161 xdr_free_bvec(struct xdr_buf *buf)
168 * xdr_inline_pages - Prepare receive buffer for a large reply
169 * @xdr: xdr_buf into which reply will be placed
170 * @offset: expected offset where data payload will start, in bytes
171 * @pages: vector of struct page pointers
172 * @base: offset in first page where receive should start, in bytes
173 * @len: expected size of the upper layer data payload, in bytes
177 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
178 struct page **pages, unsigned int base, unsigned int len)
180 struct kvec *head = xdr->head;
181 struct kvec *tail = xdr->tail;
182 char *buf = (char *)head->iov_base;
183 unsigned int buflen = head->iov_len;
185 head->iov_len = offset;
188 xdr->page_base = base;
191 tail->iov_base = buf + offset;
192 tail->iov_len = buflen - offset;
193 if ((xdr->page_len & 3) == 0)
194 tail->iov_len -= sizeof(__be32);
198 EXPORT_SYMBOL_GPL(xdr_inline_pages);
201 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
205 * _shift_data_right_pages
206 * @pages: vector of pages containing both the source and dest memory area.
207 * @pgto_base: page vector address of destination
208 * @pgfrom_base: page vector address of source
209 * @len: number of bytes to copy
211 * Note: the addresses pgto_base and pgfrom_base are both calculated in
213 * if a memory area starts at byte 'base' in page 'pages[i]',
214 * then its address is given as (i << PAGE_SHIFT) + base
215 * Also note: pgfrom_base must be < pgto_base, but the memory areas
216 * they point to may overlap.
219 _shift_data_right_pages(struct page **pages, size_t pgto_base,
220 size_t pgfrom_base, size_t len)
222 struct page **pgfrom, **pgto;
226 BUG_ON(pgto_base <= pgfrom_base);
231 pgto = pages + (pgto_base >> PAGE_SHIFT);
232 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
234 pgto_base &= ~PAGE_MASK;
235 pgfrom_base &= ~PAGE_MASK;
238 /* Are any pointers crossing a page boundary? */
239 if (pgto_base == 0) {
240 pgto_base = PAGE_SIZE;
243 if (pgfrom_base == 0) {
244 pgfrom_base = PAGE_SIZE;
249 if (copy > pgto_base)
251 if (copy > pgfrom_base)
256 vto = kmap_atomic(*pgto);
257 if (*pgto != *pgfrom) {
258 vfrom = kmap_atomic(*pgfrom);
259 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
260 kunmap_atomic(vfrom);
262 memmove(vto + pgto_base, vto + pgfrom_base, copy);
263 flush_dcache_page(*pgto);
266 } while ((len -= copy) != 0);
271 * @pages: array of pages
272 * @pgbase: page vector address of destination
273 * @p: pointer to source data
276 * Copies data from an arbitrary memory location into an array of pages
277 * The copy is assumed to be non-overlapping.
280 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
286 pgto = pages + (pgbase >> PAGE_SHIFT);
287 pgbase &= ~PAGE_MASK;
290 copy = PAGE_SIZE - pgbase;
294 vto = kmap_atomic(*pgto);
295 memcpy(vto + pgbase, p, copy);
303 if (pgbase == PAGE_SIZE) {
304 flush_dcache_page(*pgto);
310 flush_dcache_page(*pgto);
315 * @p: pointer to destination
316 * @pages: array of pages
317 * @pgbase: offset of source data
320 * Copies data into an arbitrary memory location from an array of pages
321 * The copy is assumed to be non-overlapping.
324 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
326 struct page **pgfrom;
330 pgfrom = pages + (pgbase >> PAGE_SHIFT);
331 pgbase &= ~PAGE_MASK;
334 copy = PAGE_SIZE - pgbase;
338 vfrom = kmap_atomic(*pgfrom);
339 memcpy(p, vfrom + pgbase, copy);
340 kunmap_atomic(vfrom);
343 if (pgbase == PAGE_SIZE) {
349 } while ((len -= copy) != 0);
351 EXPORT_SYMBOL_GPL(_copy_from_pages);
356 * @len: bytes to remove from buf->head[0]
358 * Shrinks XDR buffer's header kvec buf->head[0] by
359 * 'len' bytes. The extra data is not lost, but is instead
360 * moved into the inlined pages and/or the tail.
363 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
365 struct kvec *head, *tail;
367 unsigned int pglen = buf->page_len;
374 WARN_ON_ONCE(len > head->iov_len);
375 if (len > head->iov_len)
378 /* Shift the tail first */
379 if (tail->iov_len != 0) {
380 if (tail->iov_len > len) {
381 copy = tail->iov_len - len;
382 memmove((char *)tail->iov_base + len,
383 tail->iov_base, copy);
386 /* Copy from the inlined pages into the tail */
391 if (offs >= tail->iov_len)
393 else if (copy > tail->iov_len - offs)
394 copy = tail->iov_len - offs;
396 _copy_from_pages((char *)tail->iov_base + offs,
398 buf->page_base + pglen + offs - len,
402 /* Do we also need to copy data from the head into the tail ? */
404 offs = copy = len - pglen;
405 if (copy > tail->iov_len)
406 copy = tail->iov_len;
407 memcpy(tail->iov_base,
408 (char *)head->iov_base +
409 head->iov_len - offs,
414 /* Now handle pages */
417 _shift_data_right_pages(buf->pages,
418 buf->page_base + len,
424 _copy_to_pages(buf->pages, buf->page_base,
425 (char *)head->iov_base + head->iov_len - len,
429 head->iov_len -= len;
431 /* Have we truncated the message? */
432 if (buf->len > buf->buflen)
433 buf->len = buf->buflen;
439 * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes
441 * @len: bytes to remove from buf->pages
443 * The extra data is not lost, but is instead moved into buf->tail.
444 * Returns the actual number of bytes moved.
447 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
451 unsigned int pglen = buf->page_len;
452 unsigned int tailbuf_len;
457 if (len > buf->page_len)
458 len = buf-> page_len;
459 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
461 /* Shift the tail first */
462 if (tailbuf_len != 0) {
463 unsigned int free_space = tailbuf_len - tail->iov_len;
465 if (len < free_space)
467 tail->iov_len += free_space;
470 if (tail->iov_len > len) {
471 char *p = (char *)tail->iov_base + len;
472 memmove(p, tail->iov_base, tail->iov_len - len);
473 result += tail->iov_len - len;
475 copy = tail->iov_len;
476 /* Copy from the inlined pages into the tail */
477 _copy_from_pages((char *)tail->iov_base,
478 buf->pages, buf->page_base + pglen - len,
482 buf->page_len -= len;
484 /* Have we truncated the message? */
485 if (buf->len > buf->buflen)
486 buf->len = buf->buflen;
492 xdr_shift_buf(struct xdr_buf *buf, size_t len)
494 xdr_shrink_bufhead(buf, len);
496 EXPORT_SYMBOL_GPL(xdr_shift_buf);
499 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
500 * @xdr: pointer to struct xdr_stream
502 unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
504 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
506 EXPORT_SYMBOL_GPL(xdr_stream_pos);
509 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
510 * @xdr: pointer to xdr_stream struct
511 * @buf: pointer to XDR buffer in which to encode data
512 * @p: current pointer inside XDR buffer
513 * @rqst: pointer to controlling rpc_rqst, for debugging
515 * Note: at the moment the RPC client only passes the length of our
516 * scratch buffer in the xdr_buf's header kvec. Previously this
517 * meant we needed to call xdr_adjust_iovec() after encoding the
518 * data. With the new scheme, the xdr_stream manages the details
519 * of the buffer length, and takes care of adjusting the kvec
522 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
523 struct rpc_rqst *rqst)
525 struct kvec *iov = buf->head;
526 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
528 xdr_set_scratch_buffer(xdr, NULL, 0);
529 BUG_ON(scratch_len < 0);
532 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
533 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
534 BUG_ON(iov->iov_len > scratch_len);
536 if (p != xdr->p && p != NULL) {
539 BUG_ON(p < xdr->p || p > xdr->end);
540 len = (char *)p - (char *)xdr->p;
547 EXPORT_SYMBOL_GPL(xdr_init_encode);
550 * xdr_commit_encode - Ensure all data is written to buffer
551 * @xdr: pointer to xdr_stream
553 * We handle encoding across page boundaries by giving the caller a
554 * temporary location to write to, then later copying the data into
555 * place; xdr_commit_encode does that copying.
557 * Normally the caller doesn't need to call this directly, as the
558 * following xdr_reserve_space will do it. But an explicit call may be
559 * required at the end of encoding, or any other time when the xdr_buf
560 * data might be read.
562 inline void xdr_commit_encode(struct xdr_stream *xdr)
564 int shift = xdr->scratch.iov_len;
569 page = page_address(*xdr->page_ptr);
570 memcpy(xdr->scratch.iov_base, page, shift);
571 memmove(page, page + shift, (void *)xdr->p - page);
572 xdr->scratch.iov_len = 0;
574 EXPORT_SYMBOL_GPL(xdr_commit_encode);
576 static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
581 int frag1bytes, frag2bytes;
583 if (nbytes > PAGE_SIZE)
584 goto out_overflow; /* Bigger buffers require special handling */
585 if (xdr->buf->len + nbytes > xdr->buf->buflen)
586 goto out_overflow; /* Sorry, we're totally out of space */
587 frag1bytes = (xdr->end - xdr->p) << 2;
588 frag2bytes = nbytes - frag1bytes;
590 xdr->iov->iov_len += frag1bytes;
592 xdr->buf->page_len += frag1bytes;
596 * If the last encode didn't end exactly on a page boundary, the
597 * next one will straddle boundaries. Encode into the next
598 * page, then copy it back later in xdr_commit_encode. We use
599 * the "scratch" iov to track any temporarily unused fragment of
600 * space at the end of the previous buffer:
602 xdr->scratch.iov_base = xdr->p;
603 xdr->scratch.iov_len = frag1bytes;
604 p = page_address(*xdr->page_ptr);
606 * Note this is where the next encode will start after we've
607 * shifted this one back:
609 xdr->p = (void *)p + frag2bytes;
610 space_left = xdr->buf->buflen - xdr->buf->len;
611 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
612 xdr->buf->page_len += frag2bytes;
613 xdr->buf->len += nbytes;
616 trace_rpc_xdr_overflow(xdr, nbytes);
621 * xdr_reserve_space - Reserve buffer space for sending
622 * @xdr: pointer to xdr_stream
623 * @nbytes: number of bytes to reserve
625 * Checks that we have enough buffer space to encode 'nbytes' more
626 * bytes of data. If so, update the total xdr_buf length, and
627 * adjust the length of the current kvec.
629 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
634 xdr_commit_encode(xdr);
635 /* align nbytes on the next 32-bit boundary */
638 q = p + (nbytes >> 2);
639 if (unlikely(q > xdr->end || q < p))
640 return xdr_get_next_encode_buffer(xdr, nbytes);
643 xdr->iov->iov_len += nbytes;
645 xdr->buf->page_len += nbytes;
646 xdr->buf->len += nbytes;
649 EXPORT_SYMBOL_GPL(xdr_reserve_space);
652 * xdr_truncate_encode - truncate an encode buffer
653 * @xdr: pointer to xdr_stream
654 * @len: new length of buffer
656 * Truncates the xdr stream, so that xdr->buf->len == len,
657 * and xdr->p points at offset len from the start of the buffer, and
658 * head, tail, and page lengths are adjusted to correspond.
660 * If this means moving xdr->p to a different buffer, we assume that
661 * that the end pointer should be set to the end of the current page,
662 * except in the case of the head buffer when we assume the head
663 * buffer's current length represents the end of the available buffer.
665 * This is *not* safe to use on a buffer that already has inlined page
666 * cache pages (as in a zero-copy server read reply), except for the
667 * simple case of truncating from one position in the tail to another.
670 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
672 struct xdr_buf *buf = xdr->buf;
673 struct kvec *head = buf->head;
674 struct kvec *tail = buf->tail;
678 if (len > buf->len) {
682 xdr_commit_encode(xdr);
684 fraglen = min_t(int, buf->len - len, tail->iov_len);
685 tail->iov_len -= fraglen;
688 xdr->p = tail->iov_base + tail->iov_len;
689 WARN_ON_ONCE(!xdr->end);
690 WARN_ON_ONCE(!xdr->iov);
693 WARN_ON_ONCE(fraglen);
694 fraglen = min_t(int, buf->len - len, buf->page_len);
695 buf->page_len -= fraglen;
698 new = buf->page_base + buf->page_len;
700 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
703 xdr->p = page_address(*xdr->page_ptr);
704 xdr->end = (void *)xdr->p + PAGE_SIZE;
705 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
706 WARN_ON_ONCE(xdr->iov);
710 xdr->end = head->iov_base + head->iov_len;
711 /* (otherwise assume xdr->end is already set) */
715 xdr->p = head->iov_base + head->iov_len;
716 xdr->iov = buf->head;
718 EXPORT_SYMBOL(xdr_truncate_encode);
721 * xdr_restrict_buflen - decrease available buffer space
722 * @xdr: pointer to xdr_stream
723 * @newbuflen: new maximum number of bytes available
725 * Adjust our idea of how much space is available in the buffer.
726 * If we've already used too much space in the buffer, returns -1.
727 * If the available space is already smaller than newbuflen, returns 0
728 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
729 * and ensures xdr->end is set at most offset newbuflen from the start
732 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
734 struct xdr_buf *buf = xdr->buf;
735 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
736 int end_offset = buf->len + left_in_this_buf;
738 if (newbuflen < 0 || newbuflen < buf->len)
740 if (newbuflen > buf->buflen)
742 if (newbuflen < end_offset)
743 xdr->end = (void *)xdr->end + newbuflen - end_offset;
744 buf->buflen = newbuflen;
747 EXPORT_SYMBOL(xdr_restrict_buflen);
750 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
751 * @xdr: pointer to xdr_stream
752 * @pages: list of pages
753 * @base: offset of first byte
754 * @len: length of data in bytes
757 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
760 struct xdr_buf *buf = xdr->buf;
761 struct kvec *iov = buf->tail;
763 buf->page_base = base;
766 iov->iov_base = (char *)xdr->p;
771 unsigned int pad = 4 - (len & 3);
773 BUG_ON(xdr->p >= xdr->end);
774 iov->iov_base = (char *)xdr->p + (len & 3);
782 EXPORT_SYMBOL_GPL(xdr_write_pages);
784 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
787 if (len > iov->iov_len)
789 xdr->p = (__be32*)iov->iov_base;
790 xdr->end = (__be32*)(iov->iov_base + len);
792 xdr->page_ptr = NULL;
795 static int xdr_set_page_base(struct xdr_stream *xdr,
796 unsigned int base, unsigned int len)
804 maxlen = xdr->buf->page_len;
811 base += xdr->buf->page_base;
813 pgnr = base >> PAGE_SHIFT;
814 xdr->page_ptr = &xdr->buf->pages[pgnr];
815 kaddr = page_address(*xdr->page_ptr);
817 pgoff = base & ~PAGE_MASK;
818 xdr->p = (__be32*)(kaddr + pgoff);
821 if (pgend > PAGE_SIZE)
823 xdr->end = (__be32*)(kaddr + pgend);
828 static void xdr_set_next_page(struct xdr_stream *xdr)
830 unsigned int newbase;
832 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
833 newbase -= xdr->buf->page_base;
835 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
836 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
839 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
841 if (xdr->page_ptr != NULL)
842 xdr_set_next_page(xdr);
843 else if (xdr->iov == xdr->buf->head) {
844 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
845 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
847 return xdr->p != xdr->end;
851 * xdr_init_decode - Initialize an xdr_stream for decoding data.
852 * @xdr: pointer to xdr_stream struct
853 * @buf: pointer to XDR buffer from which to decode data
854 * @p: current pointer inside XDR buffer
855 * @rqst: pointer to controlling rpc_rqst, for debugging
857 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
858 struct rpc_rqst *rqst)
861 xdr->scratch.iov_base = NULL;
862 xdr->scratch.iov_len = 0;
863 xdr->nwords = XDR_QUADLEN(buf->len);
864 if (buf->head[0].iov_len != 0)
865 xdr_set_iov(xdr, buf->head, buf->len);
866 else if (buf->page_len != 0)
867 xdr_set_page_base(xdr, 0, buf->len);
869 xdr_set_iov(xdr, buf->head, buf->len);
870 if (p != NULL && p > xdr->p && xdr->end >= p) {
871 xdr->nwords -= p - xdr->p;
876 EXPORT_SYMBOL_GPL(xdr_init_decode);
879 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
880 * @xdr: pointer to xdr_stream struct
881 * @buf: pointer to XDR buffer from which to decode data
882 * @pages: list of pages to decode into
883 * @len: length in bytes of buffer in pages
885 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
886 struct page **pages, unsigned int len)
888 memset(buf, 0, sizeof(*buf));
893 xdr_init_decode(xdr, buf, NULL, NULL);
895 EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
897 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
899 unsigned int nwords = XDR_QUADLEN(nbytes);
901 __be32 *q = p + nwords;
903 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
906 xdr->nwords -= nwords;
911 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
912 * @xdr: pointer to xdr_stream struct
913 * @buf: pointer to an empty buffer
914 * @buflen: size of 'buf'
916 * The scratch buffer is used when decoding from an array of pages.
917 * If an xdr_inline_decode() call spans across page boundaries, then
918 * we copy the data into the scratch buffer in order to allow linear
921 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
923 xdr->scratch.iov_base = buf;
924 xdr->scratch.iov_len = buflen;
926 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
928 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
931 char *cpdest = xdr->scratch.iov_base;
932 size_t cplen = (char *)xdr->end - (char *)xdr->p;
934 if (nbytes > xdr->scratch.iov_len)
936 p = __xdr_inline_decode(xdr, cplen);
939 memcpy(cpdest, p, cplen);
940 if (!xdr_set_next_buffer(xdr))
944 p = __xdr_inline_decode(xdr, nbytes);
947 memcpy(cpdest, p, nbytes);
948 return xdr->scratch.iov_base;
950 trace_rpc_xdr_overflow(xdr, nbytes);
955 * xdr_inline_decode - Retrieve XDR data to decode
956 * @xdr: pointer to xdr_stream struct
957 * @nbytes: number of bytes of data to decode
959 * Check if the input buffer is long enough to enable us to decode
960 * 'nbytes' more bytes of data starting at the current position.
961 * If so return the current pointer, then update the current
964 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
968 if (unlikely(nbytes == 0))
970 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
972 p = __xdr_inline_decode(xdr, nbytes);
975 return xdr_copy_to_scratch(xdr, nbytes);
977 trace_rpc_xdr_overflow(xdr, nbytes);
980 EXPORT_SYMBOL_GPL(xdr_inline_decode);
982 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
984 struct xdr_buf *buf = xdr->buf;
986 unsigned int nwords = XDR_QUADLEN(len);
987 unsigned int cur = xdr_stream_pos(xdr);
988 unsigned int copied, offset;
990 if (xdr->nwords == 0)
993 /* Realign pages to current pointer position */
995 if (iov->iov_len > cur) {
996 offset = iov->iov_len - cur;
997 copied = xdr_shrink_bufhead(buf, offset);
998 trace_rpc_xdr_alignment(xdr, offset, copied);
999 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1002 if (nwords > xdr->nwords) {
1003 nwords = xdr->nwords;
1006 if (buf->page_len <= len)
1007 len = buf->page_len;
1008 else if (nwords < xdr->nwords) {
1009 /* Truncate page data and move it into the tail */
1010 offset = buf->page_len - len;
1011 copied = xdr_shrink_pagelen(buf, offset);
1012 trace_rpc_xdr_alignment(xdr, offset, copied);
1013 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1019 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
1020 * @xdr: pointer to xdr_stream struct
1021 * @len: number of bytes of page data
1023 * Moves data beyond the current pointer position from the XDR head[] buffer
1024 * into the page list. Any data that lies beyond current position + "len"
1025 * bytes is moved into the XDR tail[].
1027 * Returns the number of XDR encoded bytes now contained in the pages
1029 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1031 struct xdr_buf *buf = xdr->buf;
1033 unsigned int nwords;
1035 unsigned int padding;
1037 len = xdr_align_pages(xdr, len);
1040 nwords = XDR_QUADLEN(len);
1041 padding = (nwords << 2) - len;
1042 xdr->iov = iov = buf->tail;
1043 /* Compute remaining message length. */
1044 end = ((xdr->nwords - nwords) << 2) + padding;
1045 if (end > iov->iov_len)
1049 * Position current pointer at beginning of tail, and
1050 * set remaining message length.
1052 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
1053 xdr->end = (__be32 *)((char *)iov->iov_base + end);
1054 xdr->page_ptr = NULL;
1055 xdr->nwords = XDR_QUADLEN(end - padding);
1058 EXPORT_SYMBOL_GPL(xdr_read_pages);
1061 * xdr_enter_page - decode data from the XDR page
1062 * @xdr: pointer to xdr_stream struct
1063 * @len: number of bytes of page data
1065 * Moves data beyond the current pointer position from the XDR head[] buffer
1066 * into the page list. Any data that lies beyond current position + "len"
1067 * bytes is moved into the XDR tail[]. The current pointer is then
1068 * repositioned at the beginning of the first XDR page.
1070 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1072 len = xdr_align_pages(xdr, len);
1074 * Position current pointer at beginning of tail, and
1075 * set remaining message length.
1078 xdr_set_page_base(xdr, 0, len);
1080 EXPORT_SYMBOL_GPL(xdr_enter_page);
1082 static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1085 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1087 buf->head[0] = *iov;
1088 buf->tail[0] = empty_iov;
1090 buf->buflen = buf->len = iov->iov_len;
1092 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1095 * xdr_buf_subsegment - set subbuf to a portion of buf
1096 * @buf: an xdr buffer
1097 * @subbuf: the result buffer
1098 * @base: beginning of range in bytes
1099 * @len: length of range in bytes
1101 * sets @subbuf to an xdr buffer representing the portion of @buf of
1102 * length @len starting at offset @base.
1104 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1106 * Returns -1 if base of length are out of bounds.
1109 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1110 unsigned int base, unsigned int len)
1112 subbuf->buflen = subbuf->len = len;
1113 if (base < buf->head[0].iov_len) {
1114 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1115 subbuf->head[0].iov_len = min_t(unsigned int, len,
1116 buf->head[0].iov_len - base);
1117 len -= subbuf->head[0].iov_len;
1120 base -= buf->head[0].iov_len;
1121 subbuf->head[0].iov_len = 0;
1124 if (base < buf->page_len) {
1125 subbuf->page_len = min(buf->page_len - base, len);
1126 base += buf->page_base;
1127 subbuf->page_base = base & ~PAGE_MASK;
1128 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1129 len -= subbuf->page_len;
1132 base -= buf->page_len;
1133 subbuf->page_len = 0;
1136 if (base < buf->tail[0].iov_len) {
1137 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1138 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1139 buf->tail[0].iov_len - base);
1140 len -= subbuf->tail[0].iov_len;
1143 base -= buf->tail[0].iov_len;
1144 subbuf->tail[0].iov_len = 0;
1151 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1153 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1155 unsigned int this_len;
1157 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1158 memcpy(obj, subbuf->head[0].iov_base, this_len);
1161 this_len = min_t(unsigned int, len, subbuf->page_len);
1163 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1166 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1167 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1170 /* obj is assumed to point to allocated memory of size at least len: */
1171 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1173 struct xdr_buf subbuf;
1176 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1179 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1182 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
1184 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1186 unsigned int this_len;
1188 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1189 memcpy(subbuf->head[0].iov_base, obj, this_len);
1192 this_len = min_t(unsigned int, len, subbuf->page_len);
1194 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1197 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1198 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1201 /* obj is assumed to point to allocated memory of size at least len: */
1202 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1204 struct xdr_buf subbuf;
1207 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1210 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1213 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
1216 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1221 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1224 *obj = be32_to_cpu(raw);
1227 EXPORT_SYMBOL_GPL(xdr_decode_word);
1230 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
1232 __be32 raw = cpu_to_be32(obj);
1234 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1236 EXPORT_SYMBOL_GPL(xdr_encode_word);
1238 /* Returns 0 on success, or else a negative error code. */
1240 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1241 struct xdr_array2_desc *desc, int encode)
1243 char *elem = NULL, *c;
1244 unsigned int copied = 0, todo, avail_here;
1245 struct page **ppages = NULL;
1249 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1252 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1253 desc->array_len > desc->array_maxlen ||
1254 (unsigned long) base + 4 + desc->array_len *
1255 desc->elem_size > buf->len)
1263 todo = desc->array_len * desc->elem_size;
1266 if (todo && base < buf->head->iov_len) {
1267 c = buf->head->iov_base + base;
1268 avail_here = min_t(unsigned int, todo,
1269 buf->head->iov_len - base);
1272 while (avail_here >= desc->elem_size) {
1273 err = desc->xcode(desc, c);
1276 c += desc->elem_size;
1277 avail_here -= desc->elem_size;
1281 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1287 err = desc->xcode(desc, elem);
1290 memcpy(c, elem, avail_here);
1292 memcpy(elem, c, avail_here);
1293 copied = avail_here;
1295 base = buf->head->iov_len; /* align to start of pages */
1298 /* process pages array */
1299 base -= buf->head->iov_len;
1300 if (todo && base < buf->page_len) {
1301 unsigned int avail_page;
1303 avail_here = min(todo, buf->page_len - base);
1306 base += buf->page_base;
1307 ppages = buf->pages + (base >> PAGE_SHIFT);
1309 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1311 c = kmap(*ppages) + base;
1313 while (avail_here) {
1314 avail_here -= avail_page;
1315 if (copied || avail_page < desc->elem_size) {
1316 unsigned int l = min(avail_page,
1317 desc->elem_size - copied);
1319 elem = kmalloc(desc->elem_size,
1327 err = desc->xcode(desc, elem);
1331 memcpy(c, elem + copied, l);
1333 if (copied == desc->elem_size)
1336 memcpy(elem + copied, c, l);
1338 if (copied == desc->elem_size) {
1339 err = desc->xcode(desc, elem);
1348 while (avail_page >= desc->elem_size) {
1349 err = desc->xcode(desc, c);
1352 c += desc->elem_size;
1353 avail_page -= desc->elem_size;
1356 unsigned int l = min(avail_page,
1357 desc->elem_size - copied);
1359 elem = kmalloc(desc->elem_size,
1367 err = desc->xcode(desc, elem);
1371 memcpy(c, elem + copied, l);
1373 if (copied == desc->elem_size)
1376 memcpy(elem + copied, c, l);
1378 if (copied == desc->elem_size) {
1379 err = desc->xcode(desc, elem);
1392 avail_page = min(avail_here,
1393 (unsigned int) PAGE_SIZE);
1395 base = buf->page_len; /* align to start of tail */
1399 base -= buf->page_len;
1401 c = buf->tail->iov_base + base;
1403 unsigned int l = desc->elem_size - copied;
1406 memcpy(c, elem + copied, l);
1408 memcpy(elem + copied, c, l);
1409 err = desc->xcode(desc, elem);
1417 err = desc->xcode(desc, c);
1420 c += desc->elem_size;
1421 todo -= desc->elem_size;
1434 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1435 struct xdr_array2_desc *desc)
1437 if (base >= buf->len)
1440 return xdr_xcode_array2(buf, base, desc, 0);
1442 EXPORT_SYMBOL_GPL(xdr_decode_array2);
1445 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1446 struct xdr_array2_desc *desc)
1448 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1449 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1452 return xdr_xcode_array2(buf, base, desc, 1);
1454 EXPORT_SYMBOL_GPL(xdr_encode_array2);
1457 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1458 int (*actor)(struct scatterlist *, void *), void *data)
1461 unsigned int page_len, thislen, page_offset;
1462 struct scatterlist sg[1];
1464 sg_init_table(sg, 1);
1466 if (offset >= buf->head[0].iov_len) {
1467 offset -= buf->head[0].iov_len;
1469 thislen = buf->head[0].iov_len - offset;
1472 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1473 ret = actor(sg, data);
1482 if (offset >= buf->page_len) {
1483 offset -= buf->page_len;
1485 page_len = buf->page_len - offset;
1489 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1490 i = (offset + buf->page_base) >> PAGE_SHIFT;
1491 thislen = PAGE_SIZE - page_offset;
1493 if (thislen > page_len)
1495 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1496 ret = actor(sg, data);
1499 page_len -= thislen;
1502 thislen = PAGE_SIZE;
1503 } while (page_len != 0);
1508 if (offset < buf->tail[0].iov_len) {
1509 thislen = buf->tail[0].iov_len - offset;
1512 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1513 ret = actor(sg, data);
1521 EXPORT_SYMBOL_GPL(xdr_process_buf);
1524 * xdr_stream_decode_opaque - Decode variable length opaque
1525 * @xdr: pointer to xdr_stream
1526 * @ptr: location to store opaque data
1527 * @size: size of storage buffer @ptr
1530 * On success, returns size of object stored in *@ptr
1531 * %-EBADMSG on XDR buffer overflow
1532 * %-EMSGSIZE on overflow of storage buffer @ptr
1534 ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
1539 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1542 memcpy(ptr, p, ret);
1545 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
1548 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
1549 * @xdr: pointer to xdr_stream
1550 * @ptr: location to store pointer to opaque data
1551 * @maxlen: maximum acceptable object size
1552 * @gfp_flags: GFP mask to use
1555 * On success, returns size of object stored in *@ptr
1556 * %-EBADMSG on XDR buffer overflow
1557 * %-EMSGSIZE if the size of the object would exceed @maxlen
1558 * %-ENOMEM on memory allocation failure
1560 ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
1561 size_t maxlen, gfp_t gfp_flags)
1566 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1568 *ptr = kmemdup(p, ret, gfp_flags);
1576 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
1579 * xdr_stream_decode_string - Decode variable length string
1580 * @xdr: pointer to xdr_stream
1581 * @str: location to store string
1582 * @size: size of storage buffer @str
1585 * On success, returns length of NUL-terminated string stored in *@str
1586 * %-EBADMSG on XDR buffer overflow
1587 * %-EMSGSIZE on overflow of storage buffer @str
1589 ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
1594 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1596 memcpy(str, p, ret);
1603 EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
1606 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
1607 * @xdr: pointer to xdr_stream
1608 * @str: location to store pointer to string
1609 * @maxlen: maximum acceptable string length
1610 * @gfp_flags: GFP mask to use
1613 * On success, returns length of NUL-terminated string stored in *@ptr
1614 * %-EBADMSG on XDR buffer overflow
1615 * %-EMSGSIZE if the size of the string would exceed @maxlen
1616 * %-ENOMEM on memory allocation failure
1618 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
1619 size_t maxlen, gfp_t gfp_flags)
1624 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1626 char *s = kmalloc(ret + 1, gfp_flags);
1638 EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);