]> Git Repo - linux.git/blame - net/sunrpc/xdr.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / net / sunrpc / xdr.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]>
7 */
8
a246b010 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1da177e4 11#include <linux/types.h>
1da177e4
LT
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/pagemap.h>
15#include <linux/errno.h>
1da177e4
LT
16#include <linux/sunrpc/xdr.h>
17#include <linux/sunrpc/msg_prot.h>
9d96acbc 18#include <linux/bvec.h>
1da177e4
LT
19
20/*
21 * XDR functions for basic NFS types
22 */
d8ed029d
AD
23__be32 *
24xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
1da177e4
LT
25{
26 unsigned int quadlen = XDR_QUADLEN(obj->len);
27
28 p[quadlen] = 0; /* zero trailing bytes */
9f162d2a 29 *p++ = cpu_to_be32(obj->len);
1da177e4
LT
30 memcpy(p, obj->data, obj->len);
31 return p + XDR_QUADLEN(obj->len);
32}
468039ee 33EXPORT_SYMBOL_GPL(xdr_encode_netobj);
1da177e4 34
d8ed029d
AD
35__be32 *
36xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
1da177e4
LT
37{
38 unsigned int len;
39
98866b5a 40 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
1da177e4
LT
41 return NULL;
42 obj->len = len;
43 obj->data = (u8 *) p;
44 return p + XDR_QUADLEN(len);
45}
468039ee 46EXPORT_SYMBOL_GPL(xdr_decode_netobj);
1da177e4
LT
47
48/**
49 * xdr_encode_opaque_fixed - Encode fixed length opaque data
4dc3b16b
PP
50 * @p: pointer to current position in XDR buffer.
51 * @ptr: pointer to data to encode (or NULL)
52 * @nbytes: size of data.
1da177e4
LT
53 *
54 * Copy the array of data of length nbytes at ptr to the XDR buffer
55 * at position p, then align to the next 32-bit boundary by padding
56 * with zero bytes (see RFC1832).
57 * Note: if ptr is NULL, only the padding is performed.
58 *
59 * Returns the updated current XDR buffer position
60 *
61 */
d8ed029d 62__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4
LT
63{
64 if (likely(nbytes != 0)) {
65 unsigned int quadlen = XDR_QUADLEN(nbytes);
66 unsigned int padding = (quadlen << 2) - nbytes;
67
68 if (ptr != NULL)
69 memcpy(p, ptr, nbytes);
70 if (padding != 0)
71 memset((char *)p + nbytes, 0, padding);
72 p += quadlen;
73 }
74 return p;
75}
468039ee 76EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
1da177e4
LT
77
78/**
79 * xdr_encode_opaque - Encode variable length opaque data
4dc3b16b
PP
80 * @p: pointer to current position in XDR buffer.
81 * @ptr: pointer to data to encode (or NULL)
82 * @nbytes: size of data.
1da177e4
LT
83 *
84 * Returns the updated current XDR buffer position
85 */
d8ed029d 86__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4 87{
9f162d2a 88 *p++ = cpu_to_be32(nbytes);
1da177e4
LT
89 return xdr_encode_opaque_fixed(p, ptr, nbytes);
90}
468039ee 91EXPORT_SYMBOL_GPL(xdr_encode_opaque);
1da177e4 92
d8ed029d
AD
93__be32 *
94xdr_encode_string(__be32 *p, const char *string)
1da177e4
LT
95{
96 return xdr_encode_array(p, string, strlen(string));
97}
468039ee 98EXPORT_SYMBOL_GPL(xdr_encode_string);
1da177e4 99
d8ed029d 100__be32 *
e5cff482
CL
101xdr_decode_string_inplace(__be32 *p, char **sp,
102 unsigned int *lenp, unsigned int maxlen)
1da177e4 103{
e5cff482 104 u32 len;
1da177e4 105
98866b5a 106 len = be32_to_cpu(*p++);
e5cff482 107 if (len > maxlen)
1da177e4
LT
108 return NULL;
109 *lenp = len;
110 *sp = (char *) p;
111 return p + XDR_QUADLEN(len);
112}
468039ee 113EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
1da177e4 114
b4687da7
CL
115/**
116 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
117 * @buf: XDR buffer where string resides
118 * @len: length of string, in bytes
119 *
120 */
121void
122xdr_terminate_string(struct xdr_buf *buf, const u32 len)
123{
124 char *kaddr;
125
b8541786 126 kaddr = kmap_atomic(buf->pages[0]);
b4687da7 127 kaddr[buf->page_base + len] = '\0';
b8541786 128 kunmap_atomic(kaddr);
b4687da7 129}
0d961aa9 130EXPORT_SYMBOL_GPL(xdr_terminate_string);
b4687da7 131
9d96acbc
TM
132size_t
133xdr_buf_pagecount(struct xdr_buf *buf)
134{
135 if (!buf->page_len)
136 return 0;
137 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
138}
139
140int
141xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
142{
143 size_t i, n = xdr_buf_pagecount(buf);
144
145 if (n != 0 && buf->bvec == NULL) {
146 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
147 if (!buf->bvec)
148 return -ENOMEM;
149 for (i = 0; i < n; i++) {
150 buf->bvec[i].bv_page = buf->pages[i];
151 buf->bvec[i].bv_len = PAGE_SIZE;
152 buf->bvec[i].bv_offset = 0;
153 }
154 }
155 return 0;
156}
157
158void
159xdr_free_bvec(struct xdr_buf *buf)
160{
161 kfree(buf->bvec);
162 buf->bvec = NULL;
163}
164
1da177e4
LT
165void
166xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
167 struct page **pages, unsigned int base, unsigned int len)
168{
169 struct kvec *head = xdr->head;
170 struct kvec *tail = xdr->tail;
171 char *buf = (char *)head->iov_base;
172 unsigned int buflen = head->iov_len;
173
174 head->iov_len = offset;
175
176 xdr->pages = pages;
177 xdr->page_base = base;
178 xdr->page_len = len;
179
180 tail->iov_base = buf + offset;
181 tail->iov_len = buflen - offset;
182
183 xdr->buflen += len;
184}
468039ee 185EXPORT_SYMBOL_GPL(xdr_inline_pages);
1da177e4 186
1da177e4
LT
187/*
188 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
2c53040f
BH
189 */
190
191/**
1da177e4
LT
192 * _shift_data_right_pages
193 * @pages: vector of pages containing both the source and dest memory area.
194 * @pgto_base: page vector address of destination
195 * @pgfrom_base: page vector address of source
196 * @len: number of bytes to copy
197 *
198 * Note: the addresses pgto_base and pgfrom_base are both calculated in
199 * the same way:
200 * if a memory area starts at byte 'base' in page 'pages[i]',
ea1754a0 201 * then its address is given as (i << PAGE_SHIFT) + base
1da177e4
LT
202 * Also note: pgfrom_base must be < pgto_base, but the memory areas
203 * they point to may overlap.
204 */
205static void
206_shift_data_right_pages(struct page **pages, size_t pgto_base,
207 size_t pgfrom_base, size_t len)
208{
209 struct page **pgfrom, **pgto;
210 char *vfrom, *vto;
211 size_t copy;
212
213 BUG_ON(pgto_base <= pgfrom_base);
214
215 pgto_base += len;
216 pgfrom_base += len;
217
09cbfeaf
KS
218 pgto = pages + (pgto_base >> PAGE_SHIFT);
219 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
1da177e4 220
09cbfeaf
KS
221 pgto_base &= ~PAGE_MASK;
222 pgfrom_base &= ~PAGE_MASK;
1da177e4
LT
223
224 do {
225 /* Are any pointers crossing a page boundary? */
226 if (pgto_base == 0) {
09cbfeaf 227 pgto_base = PAGE_SIZE;
1da177e4
LT
228 pgto--;
229 }
230 if (pgfrom_base == 0) {
09cbfeaf 231 pgfrom_base = PAGE_SIZE;
1da177e4
LT
232 pgfrom--;
233 }
234
235 copy = len;
236 if (copy > pgto_base)
237 copy = pgto_base;
238 if (copy > pgfrom_base)
239 copy = pgfrom_base;
240 pgto_base -= copy;
241 pgfrom_base -= copy;
242
b8541786 243 vto = kmap_atomic(*pgto);
347e2233
TM
244 if (*pgto != *pgfrom) {
245 vfrom = kmap_atomic(*pgfrom);
246 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
247 kunmap_atomic(vfrom);
248 } else
249 memmove(vto + pgto_base, vto + pgfrom_base, copy);
bce3481c 250 flush_dcache_page(*pgto);
b8541786 251 kunmap_atomic(vto);
1da177e4
LT
252
253 } while ((len -= copy) != 0);
1da177e4
LT
254}
255
2c53040f 256/**
1da177e4
LT
257 * _copy_to_pages
258 * @pages: array of pages
259 * @pgbase: page vector address of destination
260 * @p: pointer to source data
261 * @len: length
262 *
263 * Copies data from an arbitrary memory location into an array of pages
264 * The copy is assumed to be non-overlapping.
265 */
266static void
267_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
268{
269 struct page **pgto;
270 char *vto;
271 size_t copy;
272
09cbfeaf
KS
273 pgto = pages + (pgbase >> PAGE_SHIFT);
274 pgbase &= ~PAGE_MASK;
1da177e4 275
daeba89d 276 for (;;) {
09cbfeaf 277 copy = PAGE_SIZE - pgbase;
1da177e4
LT
278 if (copy > len)
279 copy = len;
280
b8541786 281 vto = kmap_atomic(*pgto);
1da177e4 282 memcpy(vto + pgbase, p, copy);
b8541786 283 kunmap_atomic(vto);
1da177e4 284
daeba89d
TM
285 len -= copy;
286 if (len == 0)
287 break;
288
1da177e4 289 pgbase += copy;
09cbfeaf 290 if (pgbase == PAGE_SIZE) {
1da177e4
LT
291 flush_dcache_page(*pgto);
292 pgbase = 0;
293 pgto++;
294 }
295 p += copy;
daeba89d 296 }
1da177e4
LT
297 flush_dcache_page(*pgto);
298}
299
2c53040f 300/**
1da177e4
LT
301 * _copy_from_pages
302 * @p: pointer to destination
303 * @pages: array of pages
304 * @pgbase: offset of source data
305 * @len: length
306 *
307 * Copies data into an arbitrary memory location from an array of pages
308 * The copy is assumed to be non-overlapping.
309 */
bf118a34 310void
1da177e4
LT
311_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
312{
313 struct page **pgfrom;
314 char *vfrom;
315 size_t copy;
316
09cbfeaf
KS
317 pgfrom = pages + (pgbase >> PAGE_SHIFT);
318 pgbase &= ~PAGE_MASK;
1da177e4
LT
319
320 do {
09cbfeaf 321 copy = PAGE_SIZE - pgbase;
1da177e4
LT
322 if (copy > len)
323 copy = len;
324
b8541786 325 vfrom = kmap_atomic(*pgfrom);
1da177e4 326 memcpy(p, vfrom + pgbase, copy);
b8541786 327 kunmap_atomic(vfrom);
1da177e4
LT
328
329 pgbase += copy;
09cbfeaf 330 if (pgbase == PAGE_SIZE) {
1da177e4
LT
331 pgbase = 0;
332 pgfrom++;
333 }
334 p += copy;
335
336 } while ((len -= copy) != 0);
337}
bf118a34 338EXPORT_SYMBOL_GPL(_copy_from_pages);
1da177e4 339
2c53040f 340/**
1da177e4
LT
341 * xdr_shrink_bufhead
342 * @buf: xdr_buf
343 * @len: bytes to remove from buf->head[0]
344 *
cca5172a 345 * Shrinks XDR buffer's header kvec buf->head[0] by
1da177e4
LT
346 * 'len' bytes. The extra data is not lost, but is instead
347 * moved into the inlined pages and/or the tail.
348 */
349static void
350xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
351{
352 struct kvec *head, *tail;
353 size_t copy, offs;
354 unsigned int pglen = buf->page_len;
355
356 tail = buf->tail;
357 head = buf->head;
18e624ad
WAA
358
359 WARN_ON_ONCE(len > head->iov_len);
360 if (len > head->iov_len)
361 len = head->iov_len;
1da177e4
LT
362
363 /* Shift the tail first */
364 if (tail->iov_len != 0) {
365 if (tail->iov_len > len) {
366 copy = tail->iov_len - len;
367 memmove((char *)tail->iov_base + len,
368 tail->iov_base, copy);
369 }
370 /* Copy from the inlined pages into the tail */
371 copy = len;
372 if (copy > pglen)
373 copy = pglen;
374 offs = len - copy;
375 if (offs >= tail->iov_len)
376 copy = 0;
377 else if (copy > tail->iov_len - offs)
378 copy = tail->iov_len - offs;
379 if (copy != 0)
380 _copy_from_pages((char *)tail->iov_base + offs,
381 buf->pages,
382 buf->page_base + pglen + offs - len,
383 copy);
384 /* Do we also need to copy data from the head into the tail ? */
385 if (len > pglen) {
386 offs = copy = len - pglen;
387 if (copy > tail->iov_len)
388 copy = tail->iov_len;
389 memcpy(tail->iov_base,
390 (char *)head->iov_base +
391 head->iov_len - offs,
392 copy);
393 }
394 }
395 /* Now handle pages */
396 if (pglen != 0) {
397 if (pglen > len)
398 _shift_data_right_pages(buf->pages,
399 buf->page_base + len,
400 buf->page_base,
401 pglen - len);
402 copy = len;
403 if (len > pglen)
404 copy = pglen;
405 _copy_to_pages(buf->pages, buf->page_base,
406 (char *)head->iov_base + head->iov_len - len,
407 copy);
408 }
409 head->iov_len -= len;
410 buf->buflen -= len;
411 /* Have we truncated the message? */
412 if (buf->len > buf->buflen)
413 buf->len = buf->buflen;
414}
415
2c53040f 416/**
1da177e4
LT
417 * xdr_shrink_pagelen
418 * @buf: xdr_buf
419 * @len: bytes to remove from buf->pages
420 *
cca5172a 421 * Shrinks XDR buffer's page array buf->pages by
1da177e4
LT
422 * 'len' bytes. The extra data is not lost, but is instead
423 * moved into the tail.
424 */
425static void
426xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
427{
428 struct kvec *tail;
429 size_t copy;
1da177e4 430 unsigned int pglen = buf->page_len;
cf187c2d 431 unsigned int tailbuf_len;
1da177e4
LT
432
433 tail = buf->tail;
434 BUG_ON (len > pglen);
435
cf187c2d
TM
436 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
437
1da177e4 438 /* Shift the tail first */
cf187c2d
TM
439 if (tailbuf_len != 0) {
440 unsigned int free_space = tailbuf_len - tail->iov_len;
441
442 if (len < free_space)
443 free_space = len;
444 tail->iov_len += free_space;
445
42d6d8ab 446 copy = len;
1da177e4 447 if (tail->iov_len > len) {
0fe62a35 448 char *p = (char *)tail->iov_base + len;
2e29ebb8 449 memmove(p, tail->iov_base, tail->iov_len - len);
42d6d8ab 450 } else
1da177e4 451 copy = tail->iov_len;
42d6d8ab 452 /* Copy from the inlined pages into the tail */
1da177e4
LT
453 _copy_from_pages((char *)tail->iov_base,
454 buf->pages, buf->page_base + pglen - len,
455 copy);
456 }
457 buf->page_len -= len;
458 buf->buflen -= len;
459 /* Have we truncated the message? */
460 if (buf->len > buf->buflen)
461 buf->len = buf->buflen;
462}
463
464void
465xdr_shift_buf(struct xdr_buf *buf, size_t len)
466{
467 xdr_shrink_bufhead(buf, len);
468}
468039ee 469EXPORT_SYMBOL_GPL(xdr_shift_buf);
1da177e4 470
4517d526
TM
471/**
472 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
473 * @xdr: pointer to struct xdr_stream
474 */
475unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
476{
477 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
478}
479EXPORT_SYMBOL_GPL(xdr_stream_pos);
480
1da177e4
LT
481/**
482 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
483 * @xdr: pointer to xdr_stream struct
484 * @buf: pointer to XDR buffer in which to encode data
485 * @p: current pointer inside XDR buffer
486 *
487 * Note: at the moment the RPC client only passes the length of our
488 * scratch buffer in the xdr_buf's header kvec. Previously this
489 * meant we needed to call xdr_adjust_iovec() after encoding the
490 * data. With the new scheme, the xdr_stream manages the details
491 * of the buffer length, and takes care of adjusting the kvec
492 * length for us.
493 */
d8ed029d 494void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
1da177e4
LT
495{
496 struct kvec *iov = buf->head;
334ccfd5 497 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
1da177e4 498
2825a7f9 499 xdr_set_scratch_buffer(xdr, NULL, 0);
334ccfd5 500 BUG_ON(scratch_len < 0);
1da177e4
LT
501 xdr->buf = buf;
502 xdr->iov = iov;
d8ed029d
AD
503 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
504 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
334ccfd5
TM
505 BUG_ON(iov->iov_len > scratch_len);
506
507 if (p != xdr->p && p != NULL) {
508 size_t len;
509
510 BUG_ON(p < xdr->p || p > xdr->end);
511 len = (char *)p - (char *)xdr->p;
512 xdr->p = p;
513 buf->len += len;
514 iov->iov_len += len;
515 }
1da177e4 516}
468039ee 517EXPORT_SYMBOL_GPL(xdr_init_encode);
1da177e4 518
2825a7f9
BF
519/**
520 * xdr_commit_encode - Ensure all data is written to buffer
521 * @xdr: pointer to xdr_stream
522 *
523 * We handle encoding across page boundaries by giving the caller a
524 * temporary location to write to, then later copying the data into
525 * place; xdr_commit_encode does that copying.
526 *
527 * Normally the caller doesn't need to call this directly, as the
528 * following xdr_reserve_space will do it. But an explicit call may be
529 * required at the end of encoding, or any other time when the xdr_buf
530 * data might be read.
531 */
532void xdr_commit_encode(struct xdr_stream *xdr)
533{
534 int shift = xdr->scratch.iov_len;
535 void *page;
536
537 if (shift == 0)
538 return;
539 page = page_address(*xdr->page_ptr);
540 memcpy(xdr->scratch.iov_base, page, shift);
541 memmove(page, page + shift, (void *)xdr->p - page);
542 xdr->scratch.iov_len = 0;
543}
544EXPORT_SYMBOL_GPL(xdr_commit_encode);
545
22cb4385
TM
546static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
547 size_t nbytes)
2825a7f9 548{
025911a5 549 __be32 *p;
2825a7f9
BF
550 int space_left;
551 int frag1bytes, frag2bytes;
552
553 if (nbytes > PAGE_SIZE)
554 return NULL; /* Bigger buffers require special handling */
555 if (xdr->buf->len + nbytes > xdr->buf->buflen)
556 return NULL; /* Sorry, we're totally out of space */
557 frag1bytes = (xdr->end - xdr->p) << 2;
558 frag2bytes = nbytes - frag1bytes;
559 if (xdr->iov)
560 xdr->iov->iov_len += frag1bytes;
05638dc7 561 else
2825a7f9 562 xdr->buf->page_len += frag1bytes;
05638dc7 563 xdr->page_ptr++;
2825a7f9
BF
564 xdr->iov = NULL;
565 /*
566 * If the last encode didn't end exactly on a page boundary, the
567 * next one will straddle boundaries. Encode into the next
568 * page, then copy it back later in xdr_commit_encode. We use
569 * the "scratch" iov to track any temporarily unused fragment of
570 * space at the end of the previous buffer:
571 */
572 xdr->scratch.iov_base = xdr->p;
573 xdr->scratch.iov_len = frag1bytes;
574 p = page_address(*xdr->page_ptr);
575 /*
576 * Note this is where the next encode will start after we've
577 * shifted this one back:
578 */
579 xdr->p = (void *)p + frag2bytes;
580 space_left = xdr->buf->buflen - xdr->buf->len;
581 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
582 xdr->buf->page_len += frag2bytes;
583 xdr->buf->len += nbytes;
584 return p;
585}
586
1da177e4
LT
587/**
588 * xdr_reserve_space - Reserve buffer space for sending
589 * @xdr: pointer to xdr_stream
590 * @nbytes: number of bytes to reserve
591 *
592 * Checks that we have enough buffer space to encode 'nbytes' more
593 * bytes of data. If so, update the total xdr_buf length, and
594 * adjust the length of the current kvec.
595 */
d8ed029d 596__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
1da177e4 597{
d8ed029d
AD
598 __be32 *p = xdr->p;
599 __be32 *q;
1da177e4 600
2825a7f9 601 xdr_commit_encode(xdr);
1da177e4
LT
602 /* align nbytes on the next 32-bit boundary */
603 nbytes += 3;
604 nbytes &= ~3;
605 q = p + (nbytes >> 2);
606 if (unlikely(q > xdr->end || q < p))
2825a7f9 607 return xdr_get_next_encode_buffer(xdr, nbytes);
1da177e4 608 xdr->p = q;
2825a7f9
BF
609 if (xdr->iov)
610 xdr->iov->iov_len += nbytes;
611 else
612 xdr->buf->page_len += nbytes;
1da177e4
LT
613 xdr->buf->len += nbytes;
614 return p;
615}
468039ee 616EXPORT_SYMBOL_GPL(xdr_reserve_space);
1da177e4 617
3e19ce76
BF
618/**
619 * xdr_truncate_encode - truncate an encode buffer
620 * @xdr: pointer to xdr_stream
621 * @len: new length of buffer
622 *
623 * Truncates the xdr stream, so that xdr->buf->len == len,
624 * and xdr->p points at offset len from the start of the buffer, and
625 * head, tail, and page lengths are adjusted to correspond.
626 *
627 * If this means moving xdr->p to a different buffer, we assume that
628 * that the end pointer should be set to the end of the current page,
629 * except in the case of the head buffer when we assume the head
630 * buffer's current length represents the end of the available buffer.
631 *
632 * This is *not* safe to use on a buffer that already has inlined page
633 * cache pages (as in a zero-copy server read reply), except for the
634 * simple case of truncating from one position in the tail to another.
635 *
636 */
637void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
638{
639 struct xdr_buf *buf = xdr->buf;
640 struct kvec *head = buf->head;
641 struct kvec *tail = buf->tail;
642 int fraglen;
49a068f8 643 int new;
3e19ce76
BF
644
645 if (len > buf->len) {
646 WARN_ON_ONCE(1);
647 return;
648 }
2825a7f9 649 xdr_commit_encode(xdr);
3e19ce76
BF
650
651 fraglen = min_t(int, buf->len - len, tail->iov_len);
652 tail->iov_len -= fraglen;
653 buf->len -= fraglen;
ed38c069 654 if (tail->iov_len) {
3e19ce76 655 xdr->p = tail->iov_base + tail->iov_len;
280caac0
BF
656 WARN_ON_ONCE(!xdr->end);
657 WARN_ON_ONCE(!xdr->iov);
3e19ce76
BF
658 return;
659 }
660 WARN_ON_ONCE(fraglen);
661 fraglen = min_t(int, buf->len - len, buf->page_len);
662 buf->page_len -= fraglen;
663 buf->len -= fraglen;
664
665 new = buf->page_base + buf->page_len;
49a068f8
BF
666
667 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
3e19ce76 668
ed38c069 669 if (buf->page_len) {
3e19ce76
BF
670 xdr->p = page_address(*xdr->page_ptr);
671 xdr->end = (void *)xdr->p + PAGE_SIZE;
672 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
280caac0 673 WARN_ON_ONCE(xdr->iov);
3e19ce76
BF
674 return;
675 }
5d7a5bcb 676 if (fraglen)
3e19ce76
BF
677 xdr->end = head->iov_base + head->iov_len;
678 /* (otherwise assume xdr->end is already set) */
5d7a5bcb 679 xdr->page_ptr--;
3e19ce76
BF
680 head->iov_len = len;
681 buf->len = len;
682 xdr->p = head->iov_base + head->iov_len;
683 xdr->iov = buf->head;
684}
685EXPORT_SYMBOL(xdr_truncate_encode);
686
db3f58a9
BF
687/**
688 * xdr_restrict_buflen - decrease available buffer space
689 * @xdr: pointer to xdr_stream
690 * @newbuflen: new maximum number of bytes available
691 *
692 * Adjust our idea of how much space is available in the buffer.
693 * If we've already used too much space in the buffer, returns -1.
694 * If the available space is already smaller than newbuflen, returns 0
695 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
696 * and ensures xdr->end is set at most offset newbuflen from the start
697 * of the buffer.
698 */
699int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
700{
701 struct xdr_buf *buf = xdr->buf;
702 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
703 int end_offset = buf->len + left_in_this_buf;
704
705 if (newbuflen < 0 || newbuflen < buf->len)
706 return -1;
707 if (newbuflen > buf->buflen)
708 return 0;
709 if (newbuflen < end_offset)
710 xdr->end = (void *)xdr->end + newbuflen - end_offset;
711 buf->buflen = newbuflen;
712 return 0;
713}
714EXPORT_SYMBOL(xdr_restrict_buflen);
715
1da177e4
LT
716/**
717 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
718 * @xdr: pointer to xdr_stream
719 * @pages: list of pages
720 * @base: offset of first byte
721 * @len: length of data in bytes
722 *
723 */
724void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
725 unsigned int len)
726{
727 struct xdr_buf *buf = xdr->buf;
728 struct kvec *iov = buf->tail;
729 buf->pages = pages;
730 buf->page_base = base;
731 buf->page_len = len;
732
733 iov->iov_base = (char *)xdr->p;
734 iov->iov_len = 0;
735 xdr->iov = iov;
736
737 if (len & 3) {
738 unsigned int pad = 4 - (len & 3);
739
740 BUG_ON(xdr->p >= xdr->end);
741 iov->iov_base = (char *)xdr->p + (len & 3);
742 iov->iov_len += pad;
743 len += pad;
744 *xdr->p++ = 0;
745 }
746 buf->buflen += len;
747 buf->len += len;
748}
468039ee 749EXPORT_SYMBOL_GPL(xdr_write_pages);
1da177e4 750
6650239a 751static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
1537693c 752 unsigned int len)
6650239a
TM
753{
754 if (len > iov->iov_len)
755 len = iov->iov_len;
1537693c 756 xdr->p = (__be32*)iov->iov_base;
6650239a
TM
757 xdr->end = (__be32*)(iov->iov_base + len);
758 xdr->iov = iov;
759 xdr->page_ptr = NULL;
760}
761
762static int xdr_set_page_base(struct xdr_stream *xdr,
763 unsigned int base, unsigned int len)
764{
765 unsigned int pgnr;
766 unsigned int maxlen;
767 unsigned int pgoff;
768 unsigned int pgend;
769 void *kaddr;
770
771 maxlen = xdr->buf->page_len;
772 if (base >= maxlen)
773 return -EINVAL;
774 maxlen -= base;
775 if (len > maxlen)
776 len = maxlen;
777
778 base += xdr->buf->page_base;
779
780 pgnr = base >> PAGE_SHIFT;
781 xdr->page_ptr = &xdr->buf->pages[pgnr];
782 kaddr = page_address(*xdr->page_ptr);
783
784 pgoff = base & ~PAGE_MASK;
785 xdr->p = (__be32*)(kaddr + pgoff);
786
787 pgend = pgoff + len;
788 if (pgend > PAGE_SIZE)
789 pgend = PAGE_SIZE;
790 xdr->end = (__be32*)(kaddr + pgend);
791 xdr->iov = NULL;
792 return 0;
793}
794
795static void xdr_set_next_page(struct xdr_stream *xdr)
796{
797 unsigned int newbase;
798
799 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
800 newbase -= xdr->buf->page_base;
801
802 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
a6cebd41 803 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
6650239a
TM
804}
805
806static bool xdr_set_next_buffer(struct xdr_stream *xdr)
807{
808 if (xdr->page_ptr != NULL)
809 xdr_set_next_page(xdr);
810 else if (xdr->iov == xdr->buf->head) {
811 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
a6cebd41 812 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
6650239a
TM
813 }
814 return xdr->p != xdr->end;
815}
816
1da177e4
LT
817/**
818 * xdr_init_decode - Initialize an xdr_stream for decoding data.
819 * @xdr: pointer to xdr_stream struct
820 * @buf: pointer to XDR buffer from which to decode data
821 * @p: current pointer inside XDR buffer
822 */
d8ed029d 823void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
1da177e4 824{
1da177e4 825 xdr->buf = buf;
6650239a
TM
826 xdr->scratch.iov_base = NULL;
827 xdr->scratch.iov_len = 0;
bfeea1dc 828 xdr->nwords = XDR_QUADLEN(buf->len);
6650239a 829 if (buf->head[0].iov_len != 0)
1537693c 830 xdr_set_iov(xdr, buf->head, buf->len);
6650239a
TM
831 else if (buf->page_len != 0)
832 xdr_set_page_base(xdr, 0, buf->len);
06ef26a0
BC
833 else
834 xdr_set_iov(xdr, buf->head, buf->len);
bfeea1dc
TM
835 if (p != NULL && p > xdr->p && xdr->end >= p) {
836 xdr->nwords -= p - xdr->p;
1537693c 837 xdr->p = p;
bfeea1dc 838 }
1da177e4 839}
468039ee 840EXPORT_SYMBOL_GPL(xdr_init_decode);
1da177e4 841
f7da7a12 842/**
7ecce75f 843 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
f7da7a12
BH
844 * @xdr: pointer to xdr_stream struct
845 * @buf: pointer to XDR buffer from which to decode data
846 * @pages: list of pages to decode into
847 * @len: length in bytes of buffer in pages
848 */
849void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
850 struct page **pages, unsigned int len)
851{
852 memset(buf, 0, sizeof(*buf));
853 buf->pages = pages;
854 buf->page_len = len;
855 buf->buflen = len;
856 buf->len = len;
857 xdr_init_decode(xdr, buf, NULL);
858}
859EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
860
6650239a 861static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
ba8e452a 862{
bfeea1dc 863 unsigned int nwords = XDR_QUADLEN(nbytes);
ba8e452a 864 __be32 *p = xdr->p;
bfeea1dc 865 __be32 *q = p + nwords;
ba8e452a 866
bfeea1dc 867 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
ba8e452a 868 return NULL;
6650239a 869 xdr->p = q;
bfeea1dc 870 xdr->nwords -= nwords;
ba8e452a
TM
871 return p;
872}
ba8e452a 873
1da177e4 874/**
6650239a
TM
875 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
876 * @xdr: pointer to xdr_stream struct
877 * @buf: pointer to an empty buffer
878 * @buflen: size of 'buf'
879 *
880 * The scratch buffer is used when decoding from an array of pages.
881 * If an xdr_inline_decode() call spans across page boundaries, then
882 * we copy the data into the scratch buffer in order to allow linear
883 * access.
884 */
885void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
886{
887 xdr->scratch.iov_base = buf;
888 xdr->scratch.iov_len = buflen;
889}
890EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
891
892static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
893{
894 __be32 *p;
ace0e14f 895 char *cpdest = xdr->scratch.iov_base;
6650239a
TM
896 size_t cplen = (char *)xdr->end - (char *)xdr->p;
897
898 if (nbytes > xdr->scratch.iov_len)
899 return NULL;
ace0e14f
TM
900 p = __xdr_inline_decode(xdr, cplen);
901 if (p == NULL)
902 return NULL;
903 memcpy(cpdest, p, cplen);
6650239a
TM
904 cpdest += cplen;
905 nbytes -= cplen;
906 if (!xdr_set_next_buffer(xdr))
907 return NULL;
908 p = __xdr_inline_decode(xdr, nbytes);
909 if (p == NULL)
910 return NULL;
911 memcpy(cpdest, p, nbytes);
912 return xdr->scratch.iov_base;
913}
914
915/**
916 * xdr_inline_decode - Retrieve XDR data to decode
1da177e4
LT
917 * @xdr: pointer to xdr_stream struct
918 * @nbytes: number of bytes of data to decode
919 *
920 * Check if the input buffer is long enough to enable us to decode
921 * 'nbytes' more bytes of data starting at the current position.
922 * If so return the current pointer, then update the current
923 * pointer position.
924 */
d8ed029d 925__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1da177e4 926{
6650239a 927 __be32 *p;
1da177e4 928
6650239a
TM
929 if (nbytes == 0)
930 return xdr->p;
931 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1da177e4 932 return NULL;
6650239a
TM
933 p = __xdr_inline_decode(xdr, nbytes);
934 if (p != NULL)
935 return p;
936 return xdr_copy_to_scratch(xdr, nbytes);
1da177e4 937}
468039ee 938EXPORT_SYMBOL_GPL(xdr_inline_decode);
1da177e4 939
3994ee6f 940static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
1da177e4
LT
941{
942 struct xdr_buf *buf = xdr->buf;
943 struct kvec *iov;
bfeea1dc 944 unsigned int nwords = XDR_QUADLEN(len);
b760b313 945 unsigned int cur = xdr_stream_pos(xdr);
1da177e4 946
bfeea1dc 947 if (xdr->nwords == 0)
c337d365 948 return 0;
1da177e4
LT
949 /* Realign pages to current pointer position */
950 iov = buf->head;
a11a2bf4 951 if (iov->iov_len > cur) {
b760b313 952 xdr_shrink_bufhead(buf, iov->iov_len - cur);
a11a2bf4
TM
953 xdr->nwords = XDR_QUADLEN(buf->len - cur);
954 }
1da177e4 955
a11a2bf4
TM
956 if (nwords > xdr->nwords) {
957 nwords = xdr->nwords;
958 len = nwords << 2;
959 }
960 if (buf->page_len <= len)
8a9a8b83 961 len = buf->page_len;
a11a2bf4
TM
962 else if (nwords < xdr->nwords) {
963 /* Truncate page data and move it into the tail */
964 xdr_shrink_pagelen(buf, buf->page_len - len);
965 xdr->nwords = XDR_QUADLEN(buf->len - cur);
966 }
3994ee6f
TM
967 return len;
968}
bd00f84b 969
1da177e4
LT
970/**
971 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
972 * @xdr: pointer to xdr_stream struct
973 * @len: number of bytes of page data
974 *
975 * Moves data beyond the current pointer position from the XDR head[] buffer
976 * into the page list. Any data that lies beyond current position + "len"
8b23ea7b 977 * bytes is moved into the XDR tail[].
3994ee6f
TM
978 *
979 * Returns the number of XDR encoded bytes now contained in the pages
1da177e4 980 */
3994ee6f 981unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1da177e4
LT
982{
983 struct xdr_buf *buf = xdr->buf;
984 struct kvec *iov;
3994ee6f 985 unsigned int nwords;
1da177e4 986 unsigned int end;
3994ee6f 987 unsigned int padding;
1da177e4 988
3994ee6f
TM
989 len = xdr_align_pages(xdr, len);
990 if (len == 0)
991 return 0;
992 nwords = XDR_QUADLEN(len);
bfeea1dc 993 padding = (nwords << 2) - len;
1da177e4
LT
994 xdr->iov = iov = buf->tail;
995 /* Compute remaining message length. */
bd00f84b
TM
996 end = ((xdr->nwords - nwords) << 2) + padding;
997 if (end > iov->iov_len)
998 end = iov->iov_len;
999
1da177e4
LT
1000 /*
1001 * Position current pointer at beginning of tail, and
1002 * set remaining message length.
1003 */
d8ed029d
AD
1004 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
1005 xdr->end = (__be32 *)((char *)iov->iov_base + end);
76cacaab 1006 xdr->page_ptr = NULL;
bfeea1dc 1007 xdr->nwords = XDR_QUADLEN(end - padding);
c337d365 1008 return len;
1da177e4 1009}
468039ee 1010EXPORT_SYMBOL_GPL(xdr_read_pages);
1da177e4 1011
8b23ea7b
TM
1012/**
1013 * xdr_enter_page - decode data from the XDR page
1014 * @xdr: pointer to xdr_stream struct
1015 * @len: number of bytes of page data
1016 *
1017 * Moves data beyond the current pointer position from the XDR head[] buffer
1018 * into the page list. Any data that lies beyond current position + "len"
1019 * bytes is moved into the XDR tail[]. The current pointer is then
1020 * repositioned at the beginning of the first XDR page.
1021 */
1022void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1023{
f8bb7f08 1024 len = xdr_align_pages(xdr, len);
8b23ea7b
TM
1025 /*
1026 * Position current pointer at beginning of tail, and
1027 * set remaining message length.
1028 */
f8bb7f08
TM
1029 if (len != 0)
1030 xdr_set_page_base(xdr, 0, len);
8b23ea7b 1031}
468039ee 1032EXPORT_SYMBOL_GPL(xdr_enter_page);
8b23ea7b 1033
1da177e4
LT
1034static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1035
1036void
1037xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1038{
1039 buf->head[0] = *iov;
1040 buf->tail[0] = empty_iov;
1041 buf->page_len = 0;
1042 buf->buflen = buf->len = iov->iov_len;
1043}
468039ee 1044EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1da177e4 1045
de4aee2e
BF
1046/**
1047 * xdr_buf_subsegment - set subbuf to a portion of buf
1048 * @buf: an xdr buffer
1049 * @subbuf: the result buffer
1050 * @base: beginning of range in bytes
1051 * @len: length of range in bytes
1052 *
1053 * sets @subbuf to an xdr buffer representing the portion of @buf of
1054 * length @len starting at offset @base.
1055 *
1056 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1057 *
1058 * Returns -1 if base of length are out of bounds.
1059 */
1da177e4
LT
1060int
1061xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1e78957e 1062 unsigned int base, unsigned int len)
1da177e4 1063{
1da177e4 1064 subbuf->buflen = subbuf->len = len;
1e78957e
TM
1065 if (base < buf->head[0].iov_len) {
1066 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1067 subbuf->head[0].iov_len = min_t(unsigned int, len,
1068 buf->head[0].iov_len - base);
1069 len -= subbuf->head[0].iov_len;
1070 base = 0;
1071 } else {
1e78957e 1072 base -= buf->head[0].iov_len;
de4aee2e 1073 subbuf->head[0].iov_len = 0;
1e78957e 1074 }
1da177e4
LT
1075
1076 if (base < buf->page_len) {
1e78957e
TM
1077 subbuf->page_len = min(buf->page_len - base, len);
1078 base += buf->page_base;
09cbfeaf
KS
1079 subbuf->page_base = base & ~PAGE_MASK;
1080 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1da177e4
LT
1081 len -= subbuf->page_len;
1082 base = 0;
1083 } else {
1084 base -= buf->page_len;
1085 subbuf->page_len = 0;
1086 }
1087
1e78957e
TM
1088 if (base < buf->tail[0].iov_len) {
1089 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1090 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1091 buf->tail[0].iov_len - base);
1092 len -= subbuf->tail[0].iov_len;
1093 base = 0;
1094 } else {
1e78957e 1095 base -= buf->tail[0].iov_len;
de4aee2e 1096 subbuf->tail[0].iov_len = 0;
1e78957e
TM
1097 }
1098
1da177e4
LT
1099 if (base || len)
1100 return -1;
1101 return 0;
1102}
468039ee 1103EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1da177e4 1104
4c190e2f
JL
1105/**
1106 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1107 * @buf: buf to be trimmed
1108 * @len: number of bytes to reduce "buf" by
1109 *
1110 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1111 * that it's possible that we'll trim less than that amount if the xdr_buf is
1112 * too small, or if (for instance) it's all in the head and the parser has
1113 * already read too far into it.
1114 */
1115void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1116{
1117 size_t cur;
1118 unsigned int trim = len;
1119
1120 if (buf->tail[0].iov_len) {
1121 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1122 buf->tail[0].iov_len -= cur;
1123 trim -= cur;
1124 if (!trim)
1125 goto fix_len;
1126 }
1127
1128 if (buf->page_len) {
1129 cur = min_t(unsigned int, buf->page_len, trim);
1130 buf->page_len -= cur;
1131 trim -= cur;
1132 if (!trim)
1133 goto fix_len;
1134 }
1135
1136 if (buf->head[0].iov_len) {
1137 cur = min_t(size_t, buf->head[0].iov_len, trim);
1138 buf->head[0].iov_len -= cur;
1139 trim -= cur;
1140 }
1141fix_len:
1142 buf->len -= (len - trim);
1143}
1144EXPORT_SYMBOL_GPL(xdr_buf_trim);
1145
4e3e43ad 1146static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1da177e4 1147{
1e78957e 1148 unsigned int this_len;
1da177e4 1149
4e3e43ad
TM
1150 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1151 memcpy(obj, subbuf->head[0].iov_base, this_len);
1da177e4
LT
1152 len -= this_len;
1153 obj += this_len;
4e3e43ad 1154 this_len = min_t(unsigned int, len, subbuf->page_len);
1da177e4 1155 if (this_len)
4e3e43ad 1156 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1da177e4
LT
1157 len -= this_len;
1158 obj += this_len;
4e3e43ad
TM
1159 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1160 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1da177e4
LT
1161}
1162
bd8100e7 1163/* obj is assumed to point to allocated memory of size at least len: */
4e3e43ad 1164int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
bd8100e7
AG
1165{
1166 struct xdr_buf subbuf;
bd8100e7
AG
1167 int status;
1168
1169 status = xdr_buf_subsegment(buf, &subbuf, base, len);
4e3e43ad
TM
1170 if (status != 0)
1171 return status;
1172 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1173 return 0;
1174}
468039ee 1175EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
4e3e43ad
TM
1176
1177static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1178{
1179 unsigned int this_len;
1180
1181 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1182 memcpy(subbuf->head[0].iov_base, obj, this_len);
bd8100e7
AG
1183 len -= this_len;
1184 obj += this_len;
4e3e43ad 1185 this_len = min_t(unsigned int, len, subbuf->page_len);
bd8100e7 1186 if (this_len)
4e3e43ad 1187 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
bd8100e7
AG
1188 len -= this_len;
1189 obj += this_len;
4e3e43ad
TM
1190 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1191 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1192}
1193
1194/* obj is assumed to point to allocated memory of size at least len: */
1195int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1196{
1197 struct xdr_buf subbuf;
1198 int status;
1199
1200 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1201 if (status != 0)
1202 return status;
1203 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1204 return 0;
bd8100e7 1205}
c43abaed 1206EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
bd8100e7
AG
1207
1208int
1e78957e 1209xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1da177e4 1210{
d8ed029d 1211 __be32 raw;
1da177e4
LT
1212 int status;
1213
1214 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1215 if (status)
1216 return status;
98866b5a 1217 *obj = be32_to_cpu(raw);
1da177e4
LT
1218 return 0;
1219}
468039ee 1220EXPORT_SYMBOL_GPL(xdr_decode_word);
1da177e4 1221
bd8100e7 1222int
1e78957e 1223xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
bd8100e7 1224{
9f162d2a 1225 __be32 raw = cpu_to_be32(obj);
bd8100e7
AG
1226
1227 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1228}
468039ee 1229EXPORT_SYMBOL_GPL(xdr_encode_word);
bd8100e7 1230
1da177e4
LT
1231/* If the netobj starting offset bytes from the start of xdr_buf is contained
1232 * entirely in the head or the tail, set object to point to it; otherwise
1233 * try to find space for it at the end of the tail, copy it there, and
1234 * set obj to point to it. */
bee57c99 1235int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1da177e4 1236{
bee57c99 1237 struct xdr_buf subbuf;
1da177e4 1238
bd8100e7 1239 if (xdr_decode_word(buf, offset, &obj->len))
bee57c99
TM
1240 return -EFAULT;
1241 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
1242 return -EFAULT;
1243
1244 /* Is the obj contained entirely in the head? */
1245 obj->data = subbuf.head[0].iov_base;
1246 if (subbuf.head[0].iov_len == obj->len)
1247 return 0;
1248 /* ..or is the obj contained entirely in the tail? */
1249 obj->data = subbuf.tail[0].iov_base;
1250 if (subbuf.tail[0].iov_len == obj->len)
1251 return 0;
1252
1253 /* use end of tail as storage for obj:
1254 * (We don't copy to the beginning because then we'd have
1255 * to worry about doing a potentially overlapping copy.
1256 * This assumes the object is at most half the length of the
1257 * tail.) */
1258 if (obj->len > buf->buflen - buf->len)
1259 return -ENOMEM;
1260 if (buf->tail[0].iov_len != 0)
1261 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1262 else
1263 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1264 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1da177e4 1265 return 0;
1da177e4 1266}
468039ee 1267EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
bd8100e7
AG
1268
1269/* Returns 0 on success, or else a negative error code. */
1270static int
1271xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1272 struct xdr_array2_desc *desc, int encode)
1273{
1274 char *elem = NULL, *c;
1275 unsigned int copied = 0, todo, avail_here;
1276 struct page **ppages = NULL;
1277 int err;
1278
1279 if (encode) {
1280 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1281 return -EINVAL;
1282 } else {
1283 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
58fcb8df 1284 desc->array_len > desc->array_maxlen ||
bd8100e7
AG
1285 (unsigned long) base + 4 + desc->array_len *
1286 desc->elem_size > buf->len)
1287 return -EINVAL;
1288 }
1289 base += 4;
1290
1291 if (!desc->xcode)
1292 return 0;
1293
1294 todo = desc->array_len * desc->elem_size;
1295
1296 /* process head */
1297 if (todo && base < buf->head->iov_len) {
1298 c = buf->head->iov_base + base;
1299 avail_here = min_t(unsigned int, todo,
1300 buf->head->iov_len - base);
1301 todo -= avail_here;
1302
1303 while (avail_here >= desc->elem_size) {
1304 err = desc->xcode(desc, c);
1305 if (err)
1306 goto out;
1307 c += desc->elem_size;
1308 avail_here -= desc->elem_size;
1309 }
1310 if (avail_here) {
1311 if (!elem) {
1312 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1313 err = -ENOMEM;
1314 if (!elem)
1315 goto out;
1316 }
1317 if (encode) {
1318 err = desc->xcode(desc, elem);
1319 if (err)
1320 goto out;
1321 memcpy(c, elem, avail_here);
1322 } else
1323 memcpy(elem, c, avail_here);
1324 copied = avail_here;
1325 }
1326 base = buf->head->iov_len; /* align to start of pages */
1327 }
1328
1329 /* process pages array */
1330 base -= buf->head->iov_len;
1331 if (todo && base < buf->page_len) {
1332 unsigned int avail_page;
1333
1334 avail_here = min(todo, buf->page_len - base);
1335 todo -= avail_here;
1336
1337 base += buf->page_base;
09cbfeaf
KS
1338 ppages = buf->pages + (base >> PAGE_SHIFT);
1339 base &= ~PAGE_MASK;
1340 avail_page = min_t(unsigned int, PAGE_SIZE - base,
bd8100e7
AG
1341 avail_here);
1342 c = kmap(*ppages) + base;
1343
1344 while (avail_here) {
1345 avail_here -= avail_page;
1346 if (copied || avail_page < desc->elem_size) {
1347 unsigned int l = min(avail_page,
1348 desc->elem_size - copied);
1349 if (!elem) {
1350 elem = kmalloc(desc->elem_size,
1351 GFP_KERNEL);
1352 err = -ENOMEM;
1353 if (!elem)
1354 goto out;
1355 }
1356 if (encode) {
1357 if (!copied) {
1358 err = desc->xcode(desc, elem);
1359 if (err)
1360 goto out;
1361 }
1362 memcpy(c, elem + copied, l);
1363 copied += l;
1364 if (copied == desc->elem_size)
1365 copied = 0;
1366 } else {
1367 memcpy(elem + copied, c, l);
1368 copied += l;
1369 if (copied == desc->elem_size) {
1370 err = desc->xcode(desc, elem);
1371 if (err)
1372 goto out;
1373 copied = 0;
1374 }
1375 }
1376 avail_page -= l;
1377 c += l;
1378 }
1379 while (avail_page >= desc->elem_size) {
1380 err = desc->xcode(desc, c);
1381 if (err)
1382 goto out;
1383 c += desc->elem_size;
1384 avail_page -= desc->elem_size;
1385 }
1386 if (avail_page) {
1387 unsigned int l = min(avail_page,
1388 desc->elem_size - copied);
1389 if (!elem) {
1390 elem = kmalloc(desc->elem_size,
1391 GFP_KERNEL);
1392 err = -ENOMEM;
1393 if (!elem)
1394 goto out;
1395 }
1396 if (encode) {
1397 if (!copied) {
1398 err = desc->xcode(desc, elem);
1399 if (err)
1400 goto out;
1401 }
1402 memcpy(c, elem + copied, l);
1403 copied += l;
1404 if (copied == desc->elem_size)
1405 copied = 0;
1406 } else {
1407 memcpy(elem + copied, c, l);
1408 copied += l;
1409 if (copied == desc->elem_size) {
1410 err = desc->xcode(desc, elem);
1411 if (err)
1412 goto out;
1413 copied = 0;
1414 }
1415 }
1416 }
1417 if (avail_here) {
1418 kunmap(*ppages);
1419 ppages++;
1420 c = kmap(*ppages);
1421 }
1422
1423 avail_page = min(avail_here,
09cbfeaf 1424 (unsigned int) PAGE_SIZE);
bd8100e7
AG
1425 }
1426 base = buf->page_len; /* align to start of tail */
1427 }
1428
1429 /* process tail */
1430 base -= buf->page_len;
1431 if (todo) {
1432 c = buf->tail->iov_base + base;
1433 if (copied) {
1434 unsigned int l = desc->elem_size - copied;
1435
1436 if (encode)
1437 memcpy(c, elem + copied, l);
1438 else {
1439 memcpy(elem + copied, c, l);
1440 err = desc->xcode(desc, elem);
1441 if (err)
1442 goto out;
1443 }
1444 todo -= l;
1445 c += l;
1446 }
1447 while (todo) {
1448 err = desc->xcode(desc, c);
1449 if (err)
1450 goto out;
1451 c += desc->elem_size;
1452 todo -= desc->elem_size;
1453 }
1454 }
1455 err = 0;
1456
1457out:
a51482bd 1458 kfree(elem);
bd8100e7
AG
1459 if (ppages)
1460 kunmap(*ppages);
1461 return err;
1462}
1463
1464int
1465xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1466 struct xdr_array2_desc *desc)
1467{
1468 if (base >= buf->len)
1469 return -EINVAL;
1470
1471 return xdr_xcode_array2(buf, base, desc, 0);
1472}
468039ee 1473EXPORT_SYMBOL_GPL(xdr_decode_array2);
bd8100e7
AG
1474
1475int
1476xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1477 struct xdr_array2_desc *desc)
1478{
1479 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1480 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1481 return -EINVAL;
1482
1483 return xdr_xcode_array2(buf, base, desc, 1);
1484}
468039ee 1485EXPORT_SYMBOL_GPL(xdr_encode_array2);
37a4e6cb
OK
1486
1487int
1488xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
cca5172a 1489 int (*actor)(struct scatterlist *, void *), void *data)
37a4e6cb
OK
1490{
1491 int i, ret = 0;
95c96174 1492 unsigned int page_len, thislen, page_offset;
37a4e6cb
OK
1493 struct scatterlist sg[1];
1494
68e3f5dd
HX
1495 sg_init_table(sg, 1);
1496
37a4e6cb
OK
1497 if (offset >= buf->head[0].iov_len) {
1498 offset -= buf->head[0].iov_len;
1499 } else {
1500 thislen = buf->head[0].iov_len - offset;
1501 if (thislen > len)
1502 thislen = len;
1503 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1504 ret = actor(sg, data);
1505 if (ret)
1506 goto out;
1507 offset = 0;
1508 len -= thislen;
1509 }
1510 if (len == 0)
1511 goto out;
1512
1513 if (offset >= buf->page_len) {
1514 offset -= buf->page_len;
1515 } else {
1516 page_len = buf->page_len - offset;
1517 if (page_len > len)
1518 page_len = len;
1519 len -= page_len;
09cbfeaf
KS
1520 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1521 i = (offset + buf->page_base) >> PAGE_SHIFT;
1522 thislen = PAGE_SIZE - page_offset;
37a4e6cb
OK
1523 do {
1524 if (thislen > page_len)
1525 thislen = page_len;
642f1490 1526 sg_set_page(sg, buf->pages[i], thislen, page_offset);
37a4e6cb
OK
1527 ret = actor(sg, data);
1528 if (ret)
1529 goto out;
1530 page_len -= thislen;
1531 i++;
1532 page_offset = 0;
09cbfeaf 1533 thislen = PAGE_SIZE;
37a4e6cb
OK
1534 } while (page_len != 0);
1535 offset = 0;
1536 }
1537 if (len == 0)
1538 goto out;
1539 if (offset < buf->tail[0].iov_len) {
1540 thislen = buf->tail[0].iov_len - offset;
1541 if (thislen > len)
1542 thislen = len;
1543 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1544 ret = actor(sg, data);
1545 len -= thislen;
1546 }
1547 if (len != 0)
1548 ret = -EINVAL;
1549out:
1550 return ret;
1551}
468039ee 1552EXPORT_SYMBOL_GPL(xdr_process_buf);
37a4e6cb 1553
0e779aa7
TM
1554/**
1555 * xdr_stream_decode_opaque - Decode variable length opaque
1556 * @xdr: pointer to xdr_stream
1557 * @ptr: location to store opaque data
1558 * @size: size of storage buffer @ptr
1559 *
1560 * Return values:
1561 * On success, returns size of object stored in *@ptr
1562 * %-EBADMSG on XDR buffer overflow
1563 * %-EMSGSIZE on overflow of storage buffer @ptr
1564 */
1565ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
1566{
1567 ssize_t ret;
1568 void *p;
1569
1570 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1571 if (ret <= 0)
1572 return ret;
1573 memcpy(ptr, p, ret);
1574 return ret;
1575}
1576EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
1577
1578/**
1579 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
1580 * @xdr: pointer to xdr_stream
1581 * @ptr: location to store pointer to opaque data
1582 * @maxlen: maximum acceptable object size
1583 * @gfp_flags: GFP mask to use
1584 *
1585 * Return values:
1586 * On success, returns size of object stored in *@ptr
1587 * %-EBADMSG on XDR buffer overflow
1588 * %-EMSGSIZE if the size of the object would exceed @maxlen
1589 * %-ENOMEM on memory allocation failure
1590 */
1591ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
1592 size_t maxlen, gfp_t gfp_flags)
1593{
1594 ssize_t ret;
1595 void *p;
1596
1597 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1598 if (ret > 0) {
1599 *ptr = kmemdup(p, ret, gfp_flags);
1600 if (*ptr != NULL)
1601 return ret;
1602 ret = -ENOMEM;
1603 }
1604 *ptr = NULL;
1605 return ret;
1606}
1607EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
1608
1609/**
1610 * xdr_stream_decode_string - Decode variable length string
1611 * @xdr: pointer to xdr_stream
1612 * @str: location to store string
1613 * @size: size of storage buffer @str
1614 *
1615 * Return values:
1616 * On success, returns length of NUL-terminated string stored in *@str
1617 * %-EBADMSG on XDR buffer overflow
1618 * %-EMSGSIZE on overflow of storage buffer @str
1619 */
1620ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
1621{
1622 ssize_t ret;
1623 void *p;
1624
1625 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1626 if (ret > 0) {
1627 memcpy(str, p, ret);
1628 str[ret] = '\0';
1629 return strlen(str);
1630 }
1631 *str = '\0';
1632 return ret;
1633}
1634EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
1635
5c741d4f
TM
1636/**
1637 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
1638 * @xdr: pointer to xdr_stream
1639 * @str: location to store pointer to string
1640 * @maxlen: maximum acceptable string length
1641 * @gfp_flags: GFP mask to use
1642 *
1643 * Return values:
1644 * On success, returns length of NUL-terminated string stored in *@ptr
1645 * %-EBADMSG on XDR buffer overflow
1646 * %-EMSGSIZE if the size of the string would exceed @maxlen
1647 * %-ENOMEM on memory allocation failure
1648 */
1649ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
1650 size_t maxlen, gfp_t gfp_flags)
1651{
1652 void *p;
1653 ssize_t ret;
1654
1655 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1656 if (ret > 0) {
1657 char *s = kmalloc(ret + 1, gfp_flags);
1658 if (s != NULL) {
1659 memcpy(s, p, ret);
1660 s[ret] = '\0';
1661 *str = s;
1662 return strlen(s);
1663 }
1664 ret = -ENOMEM;
1665 }
1666 *str = NULL;
1667 return ret;
1668}
1669EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);
This page took 1.031698 seconds and 4 git commands to generate.