]>
Commit | Line | Data |
---|---|---|
094bb20b CL |
1 | /* |
2 | * linux/net/sunrpc/socklib.c | |
3 | * | |
4 | * Common socket helper routines for RPC client and server | |
5 | * | |
6 | * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/udp.h> | |
12 | #include <linux/sunrpc/xdr.h> | |
13 | ||
14 | ||
15 | /** | |
16 | * skb_read_bits - copy some data bits from skb to internal buffer | |
17 | * @desc: sk_buff copy helper | |
18 | * @to: copy destination | |
19 | * @len: number of bytes to copy | |
20 | * | |
21 | * Possibly called several times to iterate over an sk_buff and copy | |
22 | * data out of it. | |
23 | */ | |
24 | static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) | |
25 | { | |
26 | if (len > desc->count) | |
27 | len = desc->count; | |
28 | if (skb_copy_bits(desc->skb, desc->offset, to, len)) | |
29 | return 0; | |
30 | desc->count -= len; | |
31 | desc->offset += len; | |
32 | return len; | |
33 | } | |
34 | ||
35 | /** | |
36 | * skb_read_and_csum_bits - copy and checksum from skb to buffer | |
37 | * @desc: sk_buff copy helper | |
38 | * @to: copy destination | |
39 | * @len: number of bytes to copy | |
40 | * | |
41 | * Same as skb_read_bits, but calculate a checksum at the same time. | |
42 | */ | |
43 | static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) | |
44 | { | |
45 | unsigned int csum2, pos; | |
46 | ||
47 | if (len > desc->count) | |
48 | len = desc->count; | |
49 | pos = desc->offset; | |
50 | csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); | |
51 | desc->csum = csum_block_add(desc->csum, csum2, pos); | |
52 | desc->count -= len; | |
53 | desc->offset += len; | |
54 | return len; | |
55 | } | |
56 | ||
57 | /** | |
58 | * xdr_partial_copy_from_skb - copy data out of an skb | |
59 | * @xdr: target XDR buffer | |
60 | * @base: starting offset | |
61 | * @desc: sk_buff copy helper | |
62 | * @copy_actor: virtual method for copying data | |
63 | * | |
64 | */ | |
65 | ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor) | |
66 | { | |
67 | struct page **ppage = xdr->pages; | |
68 | unsigned int len, pglen = xdr->page_len; | |
69 | ssize_t copied = 0; | |
70 | int ret; | |
71 | ||
72 | len = xdr->head[0].iov_len; | |
73 | if (base < len) { | |
74 | len -= base; | |
75 | ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); | |
76 | copied += ret; | |
77 | if (ret != len || !desc->count) | |
78 | goto out; | |
79 | base = 0; | |
80 | } else | |
81 | base -= len; | |
82 | ||
83 | if (unlikely(pglen == 0)) | |
84 | goto copy_tail; | |
85 | if (unlikely(base >= pglen)) { | |
86 | base -= pglen; | |
87 | goto copy_tail; | |
88 | } | |
89 | if (base || xdr->page_base) { | |
90 | pglen -= base; | |
91 | base += xdr->page_base; | |
92 | ppage += base >> PAGE_CACHE_SHIFT; | |
93 | base &= ~PAGE_CACHE_MASK; | |
94 | } | |
95 | do { | |
96 | char *kaddr; | |
97 | ||
98 | /* ACL likes to be lazy in allocating pages - ACLs | |
99 | * are small by default but can get huge. */ | |
100 | if (unlikely(*ppage == NULL)) { | |
101 | *ppage = alloc_page(GFP_ATOMIC); | |
102 | if (unlikely(*ppage == NULL)) { | |
103 | if (copied == 0) | |
104 | copied = -ENOMEM; | |
105 | goto out; | |
106 | } | |
107 | } | |
108 | ||
109 | len = PAGE_CACHE_SIZE; | |
110 | kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); | |
111 | if (base) { | |
112 | len -= base; | |
113 | if (pglen < len) | |
114 | len = pglen; | |
115 | ret = copy_actor(desc, kaddr + base, len); | |
116 | base = 0; | |
117 | } else { | |
118 | if (pglen < len) | |
119 | len = pglen; | |
120 | ret = copy_actor(desc, kaddr, len); | |
121 | } | |
122 | flush_dcache_page(*ppage); | |
123 | kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); | |
124 | copied += ret; | |
125 | if (ret != len || !desc->count) | |
126 | goto out; | |
127 | ppage++; | |
128 | } while ((pglen -= len) != 0); | |
129 | copy_tail: | |
130 | len = xdr->tail[0].iov_len; | |
131 | if (base < len) | |
132 | copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); | |
133 | out: | |
134 | return copied; | |
135 | } | |
136 | ||
137 | /** | |
138 | * csum_partial_copy_to_xdr - checksum and copy data | |
139 | * @xdr: target XDR buffer | |
140 | * @skb: source skb | |
141 | * | |
142 | * We have set things up such that we perform the checksum of the UDP | |
143 | * packet in parallel with the copies into the RPC client iovec. -DaveM | |
144 | */ | |
145 | int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | |
146 | { | |
147 | skb_reader_t desc; | |
148 | ||
149 | desc.skb = skb; | |
150 | desc.offset = sizeof(struct udphdr); | |
151 | desc.count = skb->len - desc.offset; | |
152 | ||
153 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) | |
154 | goto no_checksum; | |
155 | ||
156 | desc.csum = csum_partial(skb->data, desc.offset, skb->csum); | |
157 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) | |
158 | return -1; | |
159 | if (desc.offset != skb->len) { | |
160 | unsigned int csum2; | |
161 | csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); | |
162 | desc.csum = csum_block_add(desc.csum, csum2, desc.offset); | |
163 | } | |
164 | if (desc.count) | |
165 | return -1; | |
166 | if ((unsigned short)csum_fold(desc.csum)) | |
167 | return -1; | |
168 | return 0; | |
169 | no_checksum: | |
170 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) | |
171 | return -1; | |
172 | if (desc.count) | |
173 | return -1; | |
174 | return 0; | |
175 | } |