]>
Commit | Line | Data |
---|---|---|
094bb20b CL |
1 | /* |
2 | * linux/net/sunrpc/socklib.c | |
3 | * | |
4 | * Common socket helper routines for RPC client and server | |
5 | * | |
6 | * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> | |
7 | */ | |
8 | ||
fb286bb2 HX |
9 | #include <linux/compiler.h> |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/skbuff.h> | |
094bb20b CL |
12 | #include <linux/types.h> |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/udp.h> | |
15 | #include <linux/sunrpc/xdr.h> | |
16 | ||
17 | ||
18 | /** | |
19 | * skb_read_bits - copy some data bits from skb to internal buffer | |
20 | * @desc: sk_buff copy helper | |
21 | * @to: copy destination | |
22 | * @len: number of bytes to copy | |
23 | * | |
24 | * Possibly called several times to iterate over an sk_buff and copy | |
25 | * data out of it. | |
26 | */ | |
27 | static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) | |
28 | { | |
29 | if (len > desc->count) | |
30 | len = desc->count; | |
31 | if (skb_copy_bits(desc->skb, desc->offset, to, len)) | |
32 | return 0; | |
33 | desc->count -= len; | |
34 | desc->offset += len; | |
35 | return len; | |
36 | } | |
37 | ||
38 | /** | |
39 | * skb_read_and_csum_bits - copy and checksum from skb to buffer | |
40 | * @desc: sk_buff copy helper | |
41 | * @to: copy destination | |
42 | * @len: number of bytes to copy | |
43 | * | |
44 | * Same as skb_read_bits, but calculate a checksum at the same time. | |
45 | */ | |
46 | static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) | |
47 | { | |
48 | unsigned int csum2, pos; | |
49 | ||
50 | if (len > desc->count) | |
51 | len = desc->count; | |
52 | pos = desc->offset; | |
53 | csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); | |
54 | desc->csum = csum_block_add(desc->csum, csum2, pos); | |
55 | desc->count -= len; | |
56 | desc->offset += len; | |
57 | return len; | |
58 | } | |
59 | ||
60 | /** | |
61 | * xdr_partial_copy_from_skb - copy data out of an skb | |
62 | * @xdr: target XDR buffer | |
63 | * @base: starting offset | |
64 | * @desc: sk_buff copy helper | |
65 | * @copy_actor: virtual method for copying data | |
66 | * | |
67 | */ | |
68 | ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor) | |
69 | { | |
70 | struct page **ppage = xdr->pages; | |
71 | unsigned int len, pglen = xdr->page_len; | |
72 | ssize_t copied = 0; | |
73 | int ret; | |
74 | ||
75 | len = xdr->head[0].iov_len; | |
76 | if (base < len) { | |
77 | len -= base; | |
78 | ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); | |
79 | copied += ret; | |
80 | if (ret != len || !desc->count) | |
81 | goto out; | |
82 | base = 0; | |
83 | } else | |
84 | base -= len; | |
85 | ||
86 | if (unlikely(pglen == 0)) | |
87 | goto copy_tail; | |
88 | if (unlikely(base >= pglen)) { | |
89 | base -= pglen; | |
90 | goto copy_tail; | |
91 | } | |
92 | if (base || xdr->page_base) { | |
93 | pglen -= base; | |
94 | base += xdr->page_base; | |
95 | ppage += base >> PAGE_CACHE_SHIFT; | |
96 | base &= ~PAGE_CACHE_MASK; | |
97 | } | |
98 | do { | |
99 | char *kaddr; | |
100 | ||
101 | /* ACL likes to be lazy in allocating pages - ACLs | |
102 | * are small by default but can get huge. */ | |
103 | if (unlikely(*ppage == NULL)) { | |
104 | *ppage = alloc_page(GFP_ATOMIC); | |
105 | if (unlikely(*ppage == NULL)) { | |
106 | if (copied == 0) | |
107 | copied = -ENOMEM; | |
108 | goto out; | |
109 | } | |
110 | } | |
111 | ||
112 | len = PAGE_CACHE_SIZE; | |
113 | kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); | |
114 | if (base) { | |
115 | len -= base; | |
116 | if (pglen < len) | |
117 | len = pglen; | |
118 | ret = copy_actor(desc, kaddr + base, len); | |
119 | base = 0; | |
120 | } else { | |
121 | if (pglen < len) | |
122 | len = pglen; | |
123 | ret = copy_actor(desc, kaddr, len); | |
124 | } | |
125 | flush_dcache_page(*ppage); | |
126 | kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); | |
127 | copied += ret; | |
128 | if (ret != len || !desc->count) | |
129 | goto out; | |
130 | ppage++; | |
131 | } while ((pglen -= len) != 0); | |
132 | copy_tail: | |
133 | len = xdr->tail[0].iov_len; | |
134 | if (base < len) | |
135 | copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); | |
136 | out: | |
137 | return copied; | |
138 | } | |
139 | ||
140 | /** | |
141 | * csum_partial_copy_to_xdr - checksum and copy data | |
142 | * @xdr: target XDR buffer | |
143 | * @skb: source skb | |
144 | * | |
145 | * We have set things up such that we perform the checksum of the UDP | |
146 | * packet in parallel with the copies into the RPC client iovec. -DaveM | |
147 | */ | |
148 | int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | |
149 | { | |
150 | skb_reader_t desc; | |
151 | ||
152 | desc.skb = skb; | |
153 | desc.offset = sizeof(struct udphdr); | |
154 | desc.count = skb->len - desc.offset; | |
155 | ||
156 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) | |
157 | goto no_checksum; | |
158 | ||
159 | desc.csum = csum_partial(skb->data, desc.offset, skb->csum); | |
160 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) | |
161 | return -1; | |
162 | if (desc.offset != skb->len) { | |
163 | unsigned int csum2; | |
164 | csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); | |
165 | desc.csum = csum_block_add(desc.csum, csum2, desc.offset); | |
166 | } | |
167 | if (desc.count) | |
168 | return -1; | |
169 | if ((unsigned short)csum_fold(desc.csum)) | |
170 | return -1; | |
84fa7933 | 171 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 | 172 | netdev_rx_csum_fault(skb->dev); |
094bb20b CL |
173 | return 0; |
174 | no_checksum: | |
175 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) | |
176 | return -1; | |
177 | if (desc.count) | |
178 | return -1; | |
179 | return 0; | |
180 | } |