2 * linux/net/sunrpc/xdr.c
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pagemap.h>
15 #include <linux/errno.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
20 * XDR functions for basic NFS types
23 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
27 p[quadlen] = 0; /* zero trailing bytes */
28 *p++ = cpu_to_be32(obj->len);
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
32 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
35 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
43 return p + XDR_QUADLEN(len);
45 EXPORT_SYMBOL_GPL(xdr_decode_netobj);
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
58 * Returns the updated current XDR buffer position
61 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
68 memcpy(p, ptr, nbytes);
70 memset((char *)p + nbytes, 0, padding);
75 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
78 * xdr_encode_opaque - Encode variable length opaque data
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
83 * Returns the updated current XDR buffer position
85 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
87 *p++ = cpu_to_be32(nbytes);
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
90 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
93 xdr_encode_string(__be32 *p, const char *string)
95 return xdr_encode_array(p, string, strlen(string));
97 EXPORT_SYMBOL_GPL(xdr_encode_string);
100 xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
105 len = be32_to_cpu(*p++);
110 return p + XDR_QUADLEN(len);
112 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116 * @buf: XDR buffer where string resides
117 * @len: length of string, in bytes
121 xdr_terminate_string(struct xdr_buf *buf, const u32 len)
125 kaddr = kmap_atomic(buf->pages[0]);
126 kaddr[buf->page_base + len] = '\0';
127 kunmap_atomic(kaddr);
129 EXPORT_SYMBOL_GPL(xdr_terminate_string);
132 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
133 struct page **pages, unsigned int base, unsigned int len)
135 struct kvec *head = xdr->head;
136 struct kvec *tail = xdr->tail;
137 char *buf = (char *)head->iov_base;
138 unsigned int buflen = head->iov_len;
140 head->iov_len = offset;
143 xdr->page_base = base;
146 tail->iov_base = buf + offset;
147 tail->iov_len = buflen - offset;
151 EXPORT_SYMBOL_GPL(xdr_inline_pages);
154 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
158 * _shift_data_right_pages
159 * @pages: vector of pages containing both the source and dest memory area.
160 * @pgto_base: page vector address of destination
161 * @pgfrom_base: page vector address of source
162 * @len: number of bytes to copy
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap.
172 _shift_data_right_pages(struct page **pages, size_t pgto_base,
173 size_t pgfrom_base, size_t len)
175 struct page **pgfrom, **pgto;
179 BUG_ON(pgto_base <= pgfrom_base);
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
187 pgto_base &= ~PAGE_CACHE_MASK;
188 pgfrom_base &= ~PAGE_CACHE_MASK;
191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base == 0) {
193 pgto_base = PAGE_CACHE_SIZE;
196 if (pgfrom_base == 0) {
197 pgfrom_base = PAGE_CACHE_SIZE;
202 if (copy > pgto_base)
204 if (copy > pgfrom_base)
209 vto = kmap_atomic(*pgto);
210 vfrom = kmap_atomic(*pgfrom);
211 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
212 flush_dcache_page(*pgto);
213 kunmap_atomic(vfrom);
216 } while ((len -= copy) != 0);
221 * @pages: array of pages
222 * @pgbase: page vector address of destination
223 * @p: pointer to source data
226 * Copies data from an arbitrary memory location into an array of pages
227 * The copy is assumed to be non-overlapping.
230 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
236 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
237 pgbase &= ~PAGE_CACHE_MASK;
240 copy = PAGE_CACHE_SIZE - pgbase;
244 vto = kmap_atomic(*pgto);
245 memcpy(vto + pgbase, p, copy);
253 if (pgbase == PAGE_CACHE_SIZE) {
254 flush_dcache_page(*pgto);
260 flush_dcache_page(*pgto);
265 * @p: pointer to destination
266 * @pages: array of pages
267 * @pgbase: offset of source data
270 * Copies data into an arbitrary memory location from an array of pages
271 * The copy is assumed to be non-overlapping.
274 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
276 struct page **pgfrom;
280 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
281 pgbase &= ~PAGE_CACHE_MASK;
284 copy = PAGE_CACHE_SIZE - pgbase;
288 vfrom = kmap_atomic(*pgfrom);
289 memcpy(p, vfrom + pgbase, copy);
290 kunmap_atomic(vfrom);
293 if (pgbase == PAGE_CACHE_SIZE) {
299 } while ((len -= copy) != 0);
301 EXPORT_SYMBOL_GPL(_copy_from_pages);
306 * @len: bytes to remove from buf->head[0]
308 * Shrinks XDR buffer's header kvec buf->head[0] by
309 * 'len' bytes. The extra data is not lost, but is instead
310 * moved into the inlined pages and/or the tail.
313 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
315 struct kvec *head, *tail;
317 unsigned int pglen = buf->page_len;
322 WARN_ON_ONCE(len > head->iov_len);
323 if (len > head->iov_len)
326 /* Shift the tail first */
327 if (tail->iov_len != 0) {
328 if (tail->iov_len > len) {
329 copy = tail->iov_len - len;
330 memmove((char *)tail->iov_base + len,
331 tail->iov_base, copy);
333 /* Copy from the inlined pages into the tail */
338 if (offs >= tail->iov_len)
340 else if (copy > tail->iov_len - offs)
341 copy = tail->iov_len - offs;
343 _copy_from_pages((char *)tail->iov_base + offs,
345 buf->page_base + pglen + offs - len,
347 /* Do we also need to copy data from the head into the tail ? */
349 offs = copy = len - pglen;
350 if (copy > tail->iov_len)
351 copy = tail->iov_len;
352 memcpy(tail->iov_base,
353 (char *)head->iov_base +
354 head->iov_len - offs,
358 /* Now handle pages */
361 _shift_data_right_pages(buf->pages,
362 buf->page_base + len,
368 _copy_to_pages(buf->pages, buf->page_base,
369 (char *)head->iov_base + head->iov_len - len,
372 head->iov_len -= len;
374 /* Have we truncated the message? */
375 if (buf->len > buf->buflen)
376 buf->len = buf->buflen;
382 * @len: bytes to remove from buf->pages
384 * Shrinks XDR buffer's page array buf->pages by
385 * 'len' bytes. The extra data is not lost, but is instead
386 * moved into the tail.
389 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
393 unsigned int pglen = buf->page_len;
394 unsigned int tailbuf_len;
397 BUG_ON (len > pglen);
399 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
401 /* Shift the tail first */
402 if (tailbuf_len != 0) {
403 unsigned int free_space = tailbuf_len - tail->iov_len;
405 if (len < free_space)
407 tail->iov_len += free_space;
410 if (tail->iov_len > len) {
411 char *p = (char *)tail->iov_base + len;
412 memmove(p, tail->iov_base, tail->iov_len - len);
414 copy = tail->iov_len;
415 /* Copy from the inlined pages into the tail */
416 _copy_from_pages((char *)tail->iov_base,
417 buf->pages, buf->page_base + pglen - len,
420 buf->page_len -= len;
422 /* Have we truncated the message? */
423 if (buf->len > buf->buflen)
424 buf->len = buf->buflen;
428 xdr_shift_buf(struct xdr_buf *buf, size_t len)
430 xdr_shrink_bufhead(buf, len);
432 EXPORT_SYMBOL_GPL(xdr_shift_buf);
435 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
436 * @xdr: pointer to struct xdr_stream
438 unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
440 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
442 EXPORT_SYMBOL_GPL(xdr_stream_pos);
445 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
446 * @xdr: pointer to xdr_stream struct
447 * @buf: pointer to XDR buffer in which to encode data
448 * @p: current pointer inside XDR buffer
450 * Note: at the moment the RPC client only passes the length of our
451 * scratch buffer in the xdr_buf's header kvec. Previously this
452 * meant we needed to call xdr_adjust_iovec() after encoding the
453 * data. With the new scheme, the xdr_stream manages the details
454 * of the buffer length, and takes care of adjusting the kvec
457 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
459 struct kvec *iov = buf->head;
460 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
462 BUG_ON(scratch_len < 0);
465 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
466 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
467 BUG_ON(iov->iov_len > scratch_len);
469 if (p != xdr->p && p != NULL) {
472 BUG_ON(p < xdr->p || p > xdr->end);
473 len = (char *)p - (char *)xdr->p;
479 EXPORT_SYMBOL_GPL(xdr_init_encode);
482 * xdr_reserve_space - Reserve buffer space for sending
483 * @xdr: pointer to xdr_stream
484 * @nbytes: number of bytes to reserve
486 * Checks that we have enough buffer space to encode 'nbytes' more
487 * bytes of data. If so, update the total xdr_buf length, and
488 * adjust the length of the current kvec.
490 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
495 /* align nbytes on the next 32-bit boundary */
498 q = p + (nbytes >> 2);
499 if (unlikely(q > xdr->end || q < p))
502 xdr->iov->iov_len += nbytes;
503 xdr->buf->len += nbytes;
506 EXPORT_SYMBOL_GPL(xdr_reserve_space);
509 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
510 * @xdr: pointer to xdr_stream
511 * @pages: list of pages
512 * @base: offset of first byte
513 * @len: length of data in bytes
516 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
519 struct xdr_buf *buf = xdr->buf;
520 struct kvec *iov = buf->tail;
522 buf->page_base = base;
525 iov->iov_base = (char *)xdr->p;
530 unsigned int pad = 4 - (len & 3);
532 BUG_ON(xdr->p >= xdr->end);
533 iov->iov_base = (char *)xdr->p + (len & 3);
541 EXPORT_SYMBOL_GPL(xdr_write_pages);
543 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
546 if (len > iov->iov_len)
548 xdr->p = (__be32*)iov->iov_base;
549 xdr->end = (__be32*)(iov->iov_base + len);
551 xdr->page_ptr = NULL;
554 static int xdr_set_page_base(struct xdr_stream *xdr,
555 unsigned int base, unsigned int len)
563 maxlen = xdr->buf->page_len;
570 base += xdr->buf->page_base;
572 pgnr = base >> PAGE_SHIFT;
573 xdr->page_ptr = &xdr->buf->pages[pgnr];
574 kaddr = page_address(*xdr->page_ptr);
576 pgoff = base & ~PAGE_MASK;
577 xdr->p = (__be32*)(kaddr + pgoff);
580 if (pgend > PAGE_SIZE)
582 xdr->end = (__be32*)(kaddr + pgend);
587 static void xdr_set_next_page(struct xdr_stream *xdr)
589 unsigned int newbase;
591 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
592 newbase -= xdr->buf->page_base;
594 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
595 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
598 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
600 if (xdr->page_ptr != NULL)
601 xdr_set_next_page(xdr);
602 else if (xdr->iov == xdr->buf->head) {
603 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
604 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
606 return xdr->p != xdr->end;
610 * xdr_init_decode - Initialize an xdr_stream for decoding data.
611 * @xdr: pointer to xdr_stream struct
612 * @buf: pointer to XDR buffer from which to decode data
613 * @p: current pointer inside XDR buffer
615 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
618 xdr->scratch.iov_base = NULL;
619 xdr->scratch.iov_len = 0;
620 xdr->nwords = XDR_QUADLEN(buf->len);
621 if (buf->head[0].iov_len != 0)
622 xdr_set_iov(xdr, buf->head, buf->len);
623 else if (buf->page_len != 0)
624 xdr_set_page_base(xdr, 0, buf->len);
625 if (p != NULL && p > xdr->p && xdr->end >= p) {
626 xdr->nwords -= p - xdr->p;
630 EXPORT_SYMBOL_GPL(xdr_init_decode);
633 * xdr_init_decode - Initialize an xdr_stream for decoding data.
634 * @xdr: pointer to xdr_stream struct
635 * @buf: pointer to XDR buffer from which to decode data
636 * @pages: list of pages to decode into
637 * @len: length in bytes of buffer in pages
639 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
640 struct page **pages, unsigned int len)
642 memset(buf, 0, sizeof(*buf));
647 xdr_init_decode(xdr, buf, NULL);
649 EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
651 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
653 unsigned int nwords = XDR_QUADLEN(nbytes);
655 __be32 *q = p + nwords;
657 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
660 xdr->nwords -= nwords;
665 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
666 * @xdr: pointer to xdr_stream struct
667 * @buf: pointer to an empty buffer
668 * @buflen: size of 'buf'
670 * The scratch buffer is used when decoding from an array of pages.
671 * If an xdr_inline_decode() call spans across page boundaries, then
672 * we copy the data into the scratch buffer in order to allow linear
675 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
677 xdr->scratch.iov_base = buf;
678 xdr->scratch.iov_len = buflen;
680 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
682 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
685 void *cpdest = xdr->scratch.iov_base;
686 size_t cplen = (char *)xdr->end - (char *)xdr->p;
688 if (nbytes > xdr->scratch.iov_len)
690 memcpy(cpdest, xdr->p, cplen);
693 if (!xdr_set_next_buffer(xdr))
695 p = __xdr_inline_decode(xdr, nbytes);
698 memcpy(cpdest, p, nbytes);
699 return xdr->scratch.iov_base;
703 * xdr_inline_decode - Retrieve XDR data to decode
704 * @xdr: pointer to xdr_stream struct
705 * @nbytes: number of bytes of data to decode
707 * Check if the input buffer is long enough to enable us to decode
708 * 'nbytes' more bytes of data starting at the current position.
709 * If so return the current pointer, then update the current
712 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
718 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
720 p = __xdr_inline_decode(xdr, nbytes);
723 return xdr_copy_to_scratch(xdr, nbytes);
725 EXPORT_SYMBOL_GPL(xdr_inline_decode);
727 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
729 struct xdr_buf *buf = xdr->buf;
731 unsigned int nwords = XDR_QUADLEN(len);
732 unsigned int cur = xdr_stream_pos(xdr);
734 if (xdr->nwords == 0)
736 /* Realign pages to current pointer position */
738 if (iov->iov_len > cur) {
739 xdr_shrink_bufhead(buf, iov->iov_len - cur);
740 xdr->nwords = XDR_QUADLEN(buf->len - cur);
743 if (nwords > xdr->nwords) {
744 nwords = xdr->nwords;
747 if (buf->page_len <= len)
749 else if (nwords < xdr->nwords) {
750 /* Truncate page data and move it into the tail */
751 xdr_shrink_pagelen(buf, buf->page_len - len);
752 xdr->nwords = XDR_QUADLEN(buf->len - cur);
758 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
759 * @xdr: pointer to xdr_stream struct
760 * @len: number of bytes of page data
762 * Moves data beyond the current pointer position from the XDR head[] buffer
763 * into the page list. Any data that lies beyond current position + "len"
764 * bytes is moved into the XDR tail[].
766 * Returns the number of XDR encoded bytes now contained in the pages
768 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
770 struct xdr_buf *buf = xdr->buf;
774 unsigned int padding;
776 len = xdr_align_pages(xdr, len);
779 nwords = XDR_QUADLEN(len);
780 padding = (nwords << 2) - len;
781 xdr->iov = iov = buf->tail;
782 /* Compute remaining message length. */
783 end = ((xdr->nwords - nwords) << 2) + padding;
784 if (end > iov->iov_len)
788 * Position current pointer at beginning of tail, and
789 * set remaining message length.
791 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
792 xdr->end = (__be32 *)((char *)iov->iov_base + end);
793 xdr->page_ptr = NULL;
794 xdr->nwords = XDR_QUADLEN(end - padding);
797 EXPORT_SYMBOL_GPL(xdr_read_pages);
800 * xdr_enter_page - decode data from the XDR page
801 * @xdr: pointer to xdr_stream struct
802 * @len: number of bytes of page data
804 * Moves data beyond the current pointer position from the XDR head[] buffer
805 * into the page list. Any data that lies beyond current position + "len"
806 * bytes is moved into the XDR tail[]. The current pointer is then
807 * repositioned at the beginning of the first XDR page.
809 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
811 len = xdr_align_pages(xdr, len);
813 * Position current pointer at beginning of tail, and
814 * set remaining message length.
817 xdr_set_page_base(xdr, 0, len);
819 EXPORT_SYMBOL_GPL(xdr_enter_page);
821 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
824 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
827 buf->tail[0] = empty_iov;
829 buf->buflen = buf->len = iov->iov_len;
831 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
833 /* Sets subbuf to the portion of buf of length len beginning base bytes
834 * from the start of buf. Returns -1 if base of length are out of bounds. */
836 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
837 unsigned int base, unsigned int len)
839 subbuf->buflen = subbuf->len = len;
840 if (base < buf->head[0].iov_len) {
841 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
842 subbuf->head[0].iov_len = min_t(unsigned int, len,
843 buf->head[0].iov_len - base);
844 len -= subbuf->head[0].iov_len;
847 subbuf->head[0].iov_base = NULL;
848 subbuf->head[0].iov_len = 0;
849 base -= buf->head[0].iov_len;
852 if (base < buf->page_len) {
853 subbuf->page_len = min(buf->page_len - base, len);
854 base += buf->page_base;
855 subbuf->page_base = base & ~PAGE_CACHE_MASK;
856 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
857 len -= subbuf->page_len;
860 base -= buf->page_len;
861 subbuf->page_len = 0;
864 if (base < buf->tail[0].iov_len) {
865 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
866 subbuf->tail[0].iov_len = min_t(unsigned int, len,
867 buf->tail[0].iov_len - base);
868 len -= subbuf->tail[0].iov_len;
871 subbuf->tail[0].iov_base = NULL;
872 subbuf->tail[0].iov_len = 0;
873 base -= buf->tail[0].iov_len;
880 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
883 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
884 * @buf: buf to be trimmed
885 * @len: number of bytes to reduce "buf" by
887 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
888 * that it's possible that we'll trim less than that amount if the xdr_buf is
889 * too small, or if (for instance) it's all in the head and the parser has
890 * already read too far into it.
892 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
895 unsigned int trim = len;
897 if (buf->tail[0].iov_len) {
898 cur = min_t(size_t, buf->tail[0].iov_len, trim);
899 buf->tail[0].iov_len -= cur;
906 cur = min_t(unsigned int, buf->page_len, trim);
907 buf->page_len -= cur;
913 if (buf->head[0].iov_len) {
914 cur = min_t(size_t, buf->head[0].iov_len, trim);
915 buf->head[0].iov_len -= cur;
919 buf->len -= (len - trim);
921 EXPORT_SYMBOL_GPL(xdr_buf_trim);
923 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
925 unsigned int this_len;
927 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
928 memcpy(obj, subbuf->head[0].iov_base, this_len);
931 this_len = min_t(unsigned int, len, subbuf->page_len);
933 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
936 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
937 memcpy(obj, subbuf->tail[0].iov_base, this_len);
940 /* obj is assumed to point to allocated memory of size at least len: */
941 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
943 struct xdr_buf subbuf;
946 status = xdr_buf_subsegment(buf, &subbuf, base, len);
949 __read_bytes_from_xdr_buf(&subbuf, obj, len);
952 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
954 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
956 unsigned int this_len;
958 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
959 memcpy(subbuf->head[0].iov_base, obj, this_len);
962 this_len = min_t(unsigned int, len, subbuf->page_len);
964 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
967 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
968 memcpy(subbuf->tail[0].iov_base, obj, this_len);
971 /* obj is assumed to point to allocated memory of size at least len: */
972 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
974 struct xdr_buf subbuf;
977 status = xdr_buf_subsegment(buf, &subbuf, base, len);
980 __write_bytes_to_xdr_buf(&subbuf, obj, len);
983 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
986 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
991 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
994 *obj = be32_to_cpu(raw);
997 EXPORT_SYMBOL_GPL(xdr_decode_word);
1000 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
1002 __be32 raw = cpu_to_be32(obj);
1004 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1006 EXPORT_SYMBOL_GPL(xdr_encode_word);
1008 /* If the netobj starting offset bytes from the start of xdr_buf is contained
1009 * entirely in the head or the tail, set object to point to it; otherwise
1010 * try to find space for it at the end of the tail, copy it there, and
1011 * set obj to point to it. */
1012 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1014 struct xdr_buf subbuf;
1016 if (xdr_decode_word(buf, offset, &obj->len))
1018 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
1021 /* Is the obj contained entirely in the head? */
1022 obj->data = subbuf.head[0].iov_base;
1023 if (subbuf.head[0].iov_len == obj->len)
1025 /* ..or is the obj contained entirely in the tail? */
1026 obj->data = subbuf.tail[0].iov_base;
1027 if (subbuf.tail[0].iov_len == obj->len)
1030 /* use end of tail as storage for obj:
1031 * (We don't copy to the beginning because then we'd have
1032 * to worry about doing a potentially overlapping copy.
1033 * This assumes the object is at most half the length of the
1035 if (obj->len > buf->buflen - buf->len)
1037 if (buf->tail[0].iov_len != 0)
1038 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1040 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1041 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1044 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
1046 /* Returns 0 on success, or else a negative error code. */
1048 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1049 struct xdr_array2_desc *desc, int encode)
1051 char *elem = NULL, *c;
1052 unsigned int copied = 0, todo, avail_here;
1053 struct page **ppages = NULL;
1057 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1060 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1061 desc->array_len > desc->array_maxlen ||
1062 (unsigned long) base + 4 + desc->array_len *
1063 desc->elem_size > buf->len)
1071 todo = desc->array_len * desc->elem_size;
1074 if (todo && base < buf->head->iov_len) {
1075 c = buf->head->iov_base + base;
1076 avail_here = min_t(unsigned int, todo,
1077 buf->head->iov_len - base);
1080 while (avail_here >= desc->elem_size) {
1081 err = desc->xcode(desc, c);
1084 c += desc->elem_size;
1085 avail_here -= desc->elem_size;
1089 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1095 err = desc->xcode(desc, elem);
1098 memcpy(c, elem, avail_here);
1100 memcpy(elem, c, avail_here);
1101 copied = avail_here;
1103 base = buf->head->iov_len; /* align to start of pages */
1106 /* process pages array */
1107 base -= buf->head->iov_len;
1108 if (todo && base < buf->page_len) {
1109 unsigned int avail_page;
1111 avail_here = min(todo, buf->page_len - base);
1114 base += buf->page_base;
1115 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
1116 base &= ~PAGE_CACHE_MASK;
1117 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
1119 c = kmap(*ppages) + base;
1121 while (avail_here) {
1122 avail_here -= avail_page;
1123 if (copied || avail_page < desc->elem_size) {
1124 unsigned int l = min(avail_page,
1125 desc->elem_size - copied);
1127 elem = kmalloc(desc->elem_size,
1135 err = desc->xcode(desc, elem);
1139 memcpy(c, elem + copied, l);
1141 if (copied == desc->elem_size)
1144 memcpy(elem + copied, c, l);
1146 if (copied == desc->elem_size) {
1147 err = desc->xcode(desc, elem);
1156 while (avail_page >= desc->elem_size) {
1157 err = desc->xcode(desc, c);
1160 c += desc->elem_size;
1161 avail_page -= desc->elem_size;
1164 unsigned int l = min(avail_page,
1165 desc->elem_size - copied);
1167 elem = kmalloc(desc->elem_size,
1175 err = desc->xcode(desc, elem);
1179 memcpy(c, elem + copied, l);
1181 if (copied == desc->elem_size)
1184 memcpy(elem + copied, c, l);
1186 if (copied == desc->elem_size) {
1187 err = desc->xcode(desc, elem);
1200 avail_page = min(avail_here,
1201 (unsigned int) PAGE_CACHE_SIZE);
1203 base = buf->page_len; /* align to start of tail */
1207 base -= buf->page_len;
1209 c = buf->tail->iov_base + base;
1211 unsigned int l = desc->elem_size - copied;
1214 memcpy(c, elem + copied, l);
1216 memcpy(elem + copied, c, l);
1217 err = desc->xcode(desc, elem);
1225 err = desc->xcode(desc, c);
1228 c += desc->elem_size;
1229 todo -= desc->elem_size;
1242 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1243 struct xdr_array2_desc *desc)
1245 if (base >= buf->len)
1248 return xdr_xcode_array2(buf, base, desc, 0);
1250 EXPORT_SYMBOL_GPL(xdr_decode_array2);
1253 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1254 struct xdr_array2_desc *desc)
1256 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1257 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1260 return xdr_xcode_array2(buf, base, desc, 1);
1262 EXPORT_SYMBOL_GPL(xdr_encode_array2);
1265 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1266 int (*actor)(struct scatterlist *, void *), void *data)
1269 unsigned int page_len, thislen, page_offset;
1270 struct scatterlist sg[1];
1272 sg_init_table(sg, 1);
1274 if (offset >= buf->head[0].iov_len) {
1275 offset -= buf->head[0].iov_len;
1277 thislen = buf->head[0].iov_len - offset;
1280 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1281 ret = actor(sg, data);
1290 if (offset >= buf->page_len) {
1291 offset -= buf->page_len;
1293 page_len = buf->page_len - offset;
1297 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1298 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1299 thislen = PAGE_CACHE_SIZE - page_offset;
1301 if (thislen > page_len)
1303 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1304 ret = actor(sg, data);
1307 page_len -= thislen;
1310 thislen = PAGE_CACHE_SIZE;
1311 } while (page_len != 0);
1316 if (offset < buf->tail[0].iov_len) {
1317 thislen = buf->tail[0].iov_len - offset;
1320 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1321 ret = actor(sg, data);
1329 EXPORT_SYMBOL_GPL(xdr_process_buf);