]>
Commit | Line | Data |
---|---|---|
b97bf3fd PL |
1 | /* |
2 | * net/tipc/msg.c: TIPC message header routines | |
c4307285 | 3 | * |
cf2157f8 | 4 | * Copyright (c) 2000-2006, 2014-2015, Ericsson AB |
741de3e9 | 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems |
b97bf3fd PL |
6 | * All rights reserved. |
7 | * | |
9ea1fd3c | 8 | * Redistribution and use in source and binary forms, with or without |
b97bf3fd PL |
9 | * modification, are permitted provided that the following conditions are met: |
10 | * | |
9ea1fd3c PL |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. Neither the names of the copyright holders nor the names of its | |
17 | * contributors may be used to endorse or promote products derived from | |
18 | * this software without specific prior written permission. | |
b97bf3fd | 19 | * |
9ea1fd3c PL |
20 | * Alternatively, this software may be distributed under the terms of the |
21 | * GNU General Public License ("GPL") version 2 as published by the Free | |
22 | * Software Foundation. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
b97bf3fd PL |
34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ | |
36 | ||
c93d3baa | 37 | #include <net/sock.h> |
b97bf3fd | 38 | #include "core.h" |
b97bf3fd | 39 | #include "msg.h" |
5a379074 JPM |
40 | #include "addr.h" |
41 | #include "name_table.h" | |
fc1b6d6d | 42 | #include "crypto.h" |
b97bf3fd | 43 | |
8db1bae3 | 44 | #define MAX_FORWARD_SIZE 1024 |
fc1b6d6d TL |
45 | #ifdef CONFIG_TIPC_CRYPTO |
46 | #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16) | |
47 | #define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE) | |
48 | #else | |
27777daa JPM |
49 | #define BUF_HEADROOM (LL_MAX_HEADER + 48) |
50 | #define BUF_TAILROOM 16 | |
fc1b6d6d | 51 | #endif |
8db1bae3 | 52 | |
4f1688b2 | 53 | static unsigned int align(unsigned int i) |
23461e83 | 54 | { |
4f1688b2 | 55 | return (i + 3) & ~3u; |
23461e83 AS |
56 | } |
57 | ||
859fc7c0 YX |
58 | /** |
59 | * tipc_buf_acquire - creates a TIPC message buffer | |
60 | * @size: message size (including TIPC header) | |
5fcb7d47 | 61 | * @gfp: memory allocation flags |
859fc7c0 | 62 | * |
637b77fd | 63 | * Return: a new buffer with data pointers set to the specified size. |
859fc7c0 | 64 | * |
5fcb7d47 RD |
65 | * NOTE: |
66 | * Headroom is reserved to allow prepending of a data link header. | |
67 | * There may also be unrequested tailroom present at the buffer's end. | |
859fc7c0 | 68 | */ |
57d5f64d | 69 | struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) |
859fc7c0 YX |
70 | { |
71 | struct sk_buff *skb; | |
fc1b6d6d TL |
72 | #ifdef CONFIG_TIPC_CRYPTO |
73 | unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u; | |
74 | #else | |
859fc7c0 | 75 | unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; |
fc1b6d6d | 76 | #endif |
859fc7c0 | 77 | |
57d5f64d | 78 | skb = alloc_skb_fclone(buf_size, gfp); |
859fc7c0 YX |
79 | if (skb) { |
80 | skb_reserve(skb, BUF_HEADROOM); | |
81 | skb_put(skb, size); | |
82 | skb->next = NULL; | |
83 | } | |
84 | return skb; | |
85 | } | |
86 | ||
c5898636 JPM |
87 | void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type, |
88 | u32 hsize, u32 dnode) | |
23461e83 AS |
89 | { |
90 | memset(m, 0, hsize); | |
91 | msg_set_version(m); | |
92 | msg_set_user(m, user); | |
93 | msg_set_hdr_sz(m, hsize); | |
94 | msg_set_size(m, hsize); | |
c5898636 | 95 | msg_set_prevnode(m, own_node); |
23461e83 | 96 | msg_set_type(m, type); |
1dd0bd2b | 97 | if (hsize > SHORT_H_SIZE) { |
c5898636 JPM |
98 | msg_set_orignode(m, own_node); |
99 | msg_set_destnode(m, dnode); | |
1dd0bd2b JPM |
100 | } |
101 | } | |
102 | ||
c5898636 | 103 | struct sk_buff *tipc_msg_create(uint user, uint type, |
34747539 YX |
104 | uint hdr_sz, uint data_sz, u32 dnode, |
105 | u32 onode, u32 dport, u32 oport, int errcode) | |
1dd0bd2b JPM |
106 | { |
107 | struct tipc_msg *msg; | |
108 | struct sk_buff *buf; | |
109 | ||
57d5f64d | 110 | buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC); |
1dd0bd2b JPM |
111 | if (unlikely(!buf)) |
112 | return NULL; | |
113 | ||
114 | msg = buf_msg(buf); | |
c5898636 | 115 | tipc_msg_init(onode, msg, user, type, hdr_sz, dnode); |
1dd0bd2b | 116 | msg_set_size(msg, hdr_sz + data_sz); |
1dd0bd2b JPM |
117 | msg_set_origport(msg, oport); |
118 | msg_set_destport(msg, dport); | |
119 | msg_set_errcode(msg, errcode); | |
120 | if (hdr_sz > SHORT_H_SIZE) { | |
121 | msg_set_orignode(msg, onode); | |
122 | msg_set_destnode(msg, dnode); | |
123 | } | |
124 | return buf; | |
23461e83 AS |
125 | } |
126 | ||
37e22164 | 127 | /* tipc_buf_append(): Append a buffer to the fragment list of another buffer |
29322d0d JPM |
128 | * @*headbuf: in: NULL for first frag, otherwise value returned from prev call |
129 | * out: set when successful non-complete reassembly, otherwise NULL | |
130 | * @*buf: in: the buffer to append. Always defined | |
b2ad5e5f | 131 | * out: head buf after successful complete reassembly, otherwise NULL |
29322d0d | 132 | * Returns 1 when reassembly complete, otherwise 0 |
37e22164 JPM |
133 | */ |
134 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |
135 | { | |
136 | struct sk_buff *head = *headbuf; | |
137 | struct sk_buff *frag = *buf; | |
45c8b7b1 | 138 | struct sk_buff *tail = NULL; |
13e9b997 JPM |
139 | struct tipc_msg *msg; |
140 | u32 fragid; | |
37e22164 | 141 | int delta; |
13e9b997 | 142 | bool headstolen; |
37e22164 | 143 | |
13e9b997 JPM |
144 | if (!frag) |
145 | goto err; | |
146 | ||
147 | msg = buf_msg(frag); | |
148 | fragid = msg_type(msg); | |
149 | frag->next = NULL; | |
37e22164 JPM |
150 | skb_pull(frag, msg_hdr_sz(msg)); |
151 | ||
152 | if (fragid == FIRST_FRAGMENT) { | |
13e9b997 JPM |
153 | if (unlikely(head)) |
154 | goto err; | |
ceb1eb2f TN |
155 | *buf = NULL; |
156 | frag = skb_unshare(frag, GFP_ATOMIC); | |
ff48b622 | 157 | if (unlikely(!frag)) |
13e9b997 | 158 | goto err; |
37e22164 | 159 | head = *headbuf = frag; |
45c8b7b1 JPM |
160 | TIPC_SKB_CB(head)->tail = NULL; |
161 | if (skb_is_nonlinear(head)) { | |
162 | skb_walk_frags(head, tail) { | |
163 | TIPC_SKB_CB(head)->tail = tail; | |
164 | } | |
165 | } else { | |
166 | skb_frag_list_init(head); | |
167 | } | |
37e22164 JPM |
168 | return 0; |
169 | } | |
13e9b997 | 170 | |
37e22164 | 171 | if (!head) |
13e9b997 JPM |
172 | goto err; |
173 | ||
37e22164 JPM |
174 | if (skb_try_coalesce(head, frag, &headstolen, &delta)) { |
175 | kfree_skb_partial(frag, headstolen); | |
176 | } else { | |
13e9b997 | 177 | tail = TIPC_SKB_CB(head)->tail; |
37e22164 JPM |
178 | if (!skb_has_frag_list(head)) |
179 | skb_shinfo(head)->frag_list = frag; | |
180 | else | |
181 | tail->next = frag; | |
182 | head->truesize += frag->truesize; | |
183 | head->data_len += frag->len; | |
184 | head->len += frag->len; | |
185 | TIPC_SKB_CB(head)->tail = frag; | |
186 | } | |
13e9b997 | 187 | |
37e22164 | 188 | if (fragid == LAST_FRAGMENT) { |
fc1b6d6d | 189 | TIPC_SKB_CB(head)->validated = 0; |
d618d09a | 190 | if (unlikely(!tipc_msg_validate(&head))) |
1149557d | 191 | goto err; |
37e22164 JPM |
192 | *buf = head; |
193 | TIPC_SKB_CB(head)->tail = NULL; | |
194 | *headbuf = NULL; | |
195 | return 1; | |
196 | } | |
197 | *buf = NULL; | |
198 | return 0; | |
13e9b997 | 199 | err: |
37e22164 | 200 | kfree_skb(*buf); |
29322d0d JPM |
201 | kfree_skb(*headbuf); |
202 | *buf = *headbuf = NULL; | |
37e22164 JPM |
203 | return 0; |
204 | } | |
4f1688b2 | 205 | |
c0bceb97 JM |
206 | /** |
207 | * tipc_msg_append(): Append data to tail of an existing buffer queue | |
d8141208 | 208 | * @_hdr: header to be used |
c0bceb97 JM |
209 | * @m: the data to be appended |
210 | * @mss: max allowable size of buffer | |
211 | * @dlen: size of data to be appended | |
637b77fd RD |
212 | * @txq: queue to append to |
213 | * | |
214 | * Return: the number of 1k blocks appended or errno value | |
c0bceb97 JM |
215 | */ |
216 | int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, | |
217 | int mss, struct sk_buff_head *txq) | |
218 | { | |
8298a419 | 219 | struct sk_buff *skb; |
c0bceb97 JM |
220 | int accounted, total, curr; |
221 | int mlen, cpy, rem = dlen; | |
222 | struct tipc_msg *hdr; | |
223 | ||
224 | skb = skb_peek_tail(txq); | |
225 | accounted = skb ? msg_blocks(buf_msg(skb)) : 0; | |
226 | total = accounted; | |
227 | ||
5e9eeccc | 228 | do { |
c0bceb97 | 229 | if (!skb || skb->len >= mss) { |
c0bceb97 JM |
230 | skb = tipc_buf_acquire(mss, GFP_KERNEL); |
231 | if (unlikely(!skb)) | |
232 | return -ENOMEM; | |
233 | skb_orphan(skb); | |
234 | skb_trim(skb, MIN_H_SIZE); | |
235 | hdr = buf_msg(skb); | |
236 | skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE); | |
237 | msg_set_hdr_sz(hdr, MIN_H_SIZE); | |
238 | msg_set_size(hdr, MIN_H_SIZE); | |
239 | __skb_queue_tail(txq, skb); | |
240 | total += 1; | |
c0bceb97 JM |
241 | } |
242 | hdr = buf_msg(skb); | |
243 | curr = msg_blocks(hdr); | |
244 | mlen = msg_size(hdr); | |
c9aa81fa | 245 | cpy = min_t(size_t, rem, mss - mlen); |
c0bceb97 JM |
246 | if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) |
247 | return -EFAULT; | |
248 | msg_set_size(hdr, mlen + cpy); | |
249 | skb_put(skb, cpy); | |
250 | rem -= cpy; | |
251 | total += msg_blocks(hdr) - curr; | |
c9aa81fa | 252 | } while (rem > 0); |
c0bceb97 JM |
253 | return total - accounted; |
254 | } | |
255 | ||
cf2157f8 JPM |
256 | /* tipc_msg_validate - validate basic format of received message |
257 | * | |
258 | * This routine ensures a TIPC message has an acceptable header, and at least | |
259 | * as much data as the header indicates it should. The routine also ensures | |
260 | * that the entire message header is stored in the main fragment of the message | |
261 | * buffer, to simplify future access to message header fields. | |
262 | * | |
263 | * Note: Having extra info present in the message header or data areas is OK. | |
264 | * TIPC will ignore the excess, under the assumption that it is optional info | |
265 | * introduced by a later release of the protocol. | |
266 | */ | |
d618d09a | 267 | bool tipc_msg_validate(struct sk_buff **_skb) |
cf2157f8 | 268 | { |
d618d09a JM |
269 | struct sk_buff *skb = *_skb; |
270 | struct tipc_msg *hdr; | |
cf2157f8 JPM |
271 | int msz, hsz; |
272 | ||
d618d09a | 273 | /* Ensure that flow control ratio condition is satisfied */ |
55b3280d HL |
274 | if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) { |
275 | skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC); | |
d618d09a JM |
276 | if (!skb) |
277 | return false; | |
278 | kfree_skb(*_skb); | |
279 | *_skb = skb; | |
280 | } | |
281 | ||
cf2157f8 JPM |
282 | if (unlikely(TIPC_SKB_CB(skb)->validated)) |
283 | return true; | |
fc1b6d6d | 284 | |
cf2157f8 JPM |
285 | if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) |
286 | return false; | |
287 | ||
288 | hsz = msg_hdr_sz(buf_msg(skb)); | |
289 | if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) | |
290 | return false; | |
291 | if (unlikely(!pskb_may_pull(skb, hsz))) | |
292 | return false; | |
293 | ||
d618d09a JM |
294 | hdr = buf_msg(skb); |
295 | if (unlikely(msg_version(hdr) != TIPC_VERSION)) | |
cf2157f8 JPM |
296 | return false; |
297 | ||
d618d09a | 298 | msz = msg_size(hdr); |
cf2157f8 JPM |
299 | if (unlikely(msz < hsz)) |
300 | return false; | |
301 | if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) | |
302 | return false; | |
303 | if (unlikely(skb->len < msz)) | |
304 | return false; | |
305 | ||
fc1b6d6d | 306 | TIPC_SKB_CB(skb)->validated = 1; |
cf2157f8 JPM |
307 | return true; |
308 | } | |
067608e9 | 309 | |
2320bcda TL |
310 | /** |
311 | * tipc_msg_fragment - build a fragment skb list for TIPC message | |
312 | * | |
313 | * @skb: TIPC message skb | |
314 | * @hdr: internal msg header to be put on the top of the fragments | |
315 | * @pktmax: max size of a fragment incl. the header | |
316 | * @frags: returned fragment skb list | |
317 | * | |
637b77fd | 318 | * Return: 0 if the fragmentation is successful, otherwise: -EINVAL |
2320bcda TL |
319 | * or -ENOMEM |
320 | */ | |
321 | int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr, | |
322 | int pktmax, struct sk_buff_head *frags) | |
323 | { | |
324 | int pktno, nof_fragms, dsz, dmax, eat; | |
325 | struct tipc_msg *_hdr; | |
326 | struct sk_buff *_skb; | |
327 | u8 *data; | |
328 | ||
329 | /* Non-linear buffer? */ | |
330 | if (skb_linearize(skb)) | |
331 | return -ENOMEM; | |
332 | ||
333 | data = (u8 *)skb->data; | |
334 | dsz = msg_size(buf_msg(skb)); | |
335 | dmax = pktmax - INT_H_SIZE; | |
336 | if (dsz <= dmax || !dmax) | |
337 | return -EINVAL; | |
338 | ||
339 | nof_fragms = dsz / dmax + 1; | |
340 | for (pktno = 1; pktno <= nof_fragms; pktno++) { | |
341 | if (pktno < nof_fragms) | |
342 | eat = dmax; | |
343 | else | |
344 | eat = dsz % dmax; | |
345 | /* Allocate a new fragment */ | |
346 | _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC); | |
347 | if (!_skb) | |
348 | goto error; | |
349 | skb_orphan(_skb); | |
350 | __skb_queue_tail(frags, _skb); | |
351 | /* Copy header & data to the fragment */ | |
352 | skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE); | |
353 | skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat); | |
354 | data += eat; | |
355 | /* Update the fragment's header */ | |
356 | _hdr = buf_msg(_skb); | |
357 | msg_set_fragm_no(_hdr, pktno); | |
358 | msg_set_nof_fragms(_hdr, nof_fragms); | |
359 | msg_set_size(_hdr, INT_H_SIZE + eat); | |
360 | } | |
361 | return 0; | |
362 | ||
363 | error: | |
364 | __skb_queue_purge(frags); | |
365 | __skb_queue_head_init(frags); | |
366 | return -ENOMEM; | |
367 | } | |
368 | ||
067608e9 | 369 | /** |
9fbfb8b1 | 370 | * tipc_msg_build - create buffer chain containing specified header and data |
067608e9 | 371 | * @mhdr: Message header, to be prepended to data |
45dcc687 | 372 | * @m: User message |
5fcb7d47 | 373 | * @offset: buffer offset for fragmented messages (FIXME) |
067608e9 JPM |
374 | * @dsz: Total length of user data |
375 | * @pktmax: Max packet size that can be used | |
a6ca1094 YX |
376 | * @list: Buffer or chain of buffers to be returned to caller |
377 | * | |
4c94cc2d JM |
378 | * Note that the recursive call we are making here is safe, since it can |
379 | * logically go only one further level down. | |
380 | * | |
637b77fd | 381 | * Return: message data size or errno: -ENOMEM, -EFAULT |
067608e9 | 382 | */ |
4c94cc2d JM |
383 | int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, |
384 | int dsz, int pktmax, struct sk_buff_head *list) | |
067608e9 JPM |
385 | { |
386 | int mhsz = msg_hdr_sz(mhdr); | |
4c94cc2d | 387 | struct tipc_msg pkthdr; |
067608e9 | 388 | int msz = mhsz + dsz; |
067608e9 | 389 | int pktrem = pktmax; |
a6ca1094 | 390 | struct sk_buff *skb; |
4c94cc2d JM |
391 | int drem = dsz; |
392 | int pktno = 1; | |
067608e9 | 393 | char *pktpos; |
4c94cc2d | 394 | int pktsz; |
067608e9 | 395 | int rc; |
a6ca1094 | 396 | |
067608e9 JPM |
397 | msg_set_size(mhdr, msz); |
398 | ||
399 | /* No fragmentation needed? */ | |
400 | if (likely(msz <= pktmax)) { | |
57d5f64d | 401 | skb = tipc_buf_acquire(msz, GFP_KERNEL); |
4c94cc2d JM |
402 | |
403 | /* Fall back to smaller MTU if node local message */ | |
404 | if (unlikely(!skb)) { | |
405 | if (pktmax != MAX_MSG_SIZE) | |
406 | return -ENOMEM; | |
407 | rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list); | |
408 | if (rc != dsz) | |
409 | return rc; | |
410 | if (tipc_msg_assemble(list)) | |
411 | return dsz; | |
067608e9 | 412 | return -ENOMEM; |
4c94cc2d | 413 | } |
c93d3baa | 414 | skb_orphan(skb); |
a6ca1094 YX |
415 | __skb_queue_tail(list, skb); |
416 | skb_copy_to_linear_data(skb, mhdr, mhsz); | |
417 | pktpos = skb->data + mhsz; | |
cbbd26b8 | 418 | if (copy_from_iter_full(pktpos, dsz, &m->msg_iter)) |
067608e9 JPM |
419 | return dsz; |
420 | rc = -EFAULT; | |
421 | goto error; | |
422 | } | |
423 | ||
424 | /* Prepare reusable fragment header */ | |
c5898636 JPM |
425 | tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER, |
426 | FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); | |
067608e9 JPM |
427 | msg_set_size(&pkthdr, pktmax); |
428 | msg_set_fragm_no(&pkthdr, pktno); | |
e3eea1eb | 429 | msg_set_importance(&pkthdr, msg_importance(mhdr)); |
067608e9 JPM |
430 | |
431 | /* Prepare first fragment */ | |
57d5f64d | 432 | skb = tipc_buf_acquire(pktmax, GFP_KERNEL); |
a6ca1094 | 433 | if (!skb) |
067608e9 | 434 | return -ENOMEM; |
c93d3baa | 435 | skb_orphan(skb); |
a6ca1094 YX |
436 | __skb_queue_tail(list, skb); |
437 | pktpos = skb->data; | |
438 | skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); | |
067608e9 JPM |
439 | pktpos += INT_H_SIZE; |
440 | pktrem -= INT_H_SIZE; | |
a6ca1094 | 441 | skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); |
067608e9 JPM |
442 | pktpos += mhsz; |
443 | pktrem -= mhsz; | |
444 | ||
445 | do { | |
446 | if (drem < pktrem) | |
447 | pktrem = drem; | |
448 | ||
cbbd26b8 | 449 | if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) { |
067608e9 JPM |
450 | rc = -EFAULT; |
451 | goto error; | |
452 | } | |
453 | drem -= pktrem; | |
067608e9 JPM |
454 | |
455 | if (!drem) | |
456 | break; | |
457 | ||
458 | /* Prepare new fragment: */ | |
459 | if (drem < (pktmax - INT_H_SIZE)) | |
460 | pktsz = drem + INT_H_SIZE; | |
461 | else | |
462 | pktsz = pktmax; | |
57d5f64d | 463 | skb = tipc_buf_acquire(pktsz, GFP_KERNEL); |
a6ca1094 | 464 | if (!skb) { |
067608e9 JPM |
465 | rc = -ENOMEM; |
466 | goto error; | |
467 | } | |
c93d3baa | 468 | skb_orphan(skb); |
a6ca1094 | 469 | __skb_queue_tail(list, skb); |
067608e9 JPM |
470 | msg_set_type(&pkthdr, FRAGMENT); |
471 | msg_set_size(&pkthdr, pktsz); | |
472 | msg_set_fragm_no(&pkthdr, ++pktno); | |
a6ca1094 YX |
473 | skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); |
474 | pktpos = skb->data + INT_H_SIZE; | |
067608e9 JPM |
475 | pktrem = pktsz - INT_H_SIZE; |
476 | ||
477 | } while (1); | |
a6ca1094 | 478 | msg_set_type(buf_msg(skb), LAST_FRAGMENT); |
067608e9 JPM |
479 | return dsz; |
480 | error: | |
a6ca1094 YX |
481 | __skb_queue_purge(list); |
482 | __skb_queue_head_init(list); | |
067608e9 JPM |
483 | return rc; |
484 | } | |
485 | ||
4f1688b2 | 486 | /** |
06e7c70c TL |
487 | * tipc_msg_bundle - Append contents of a buffer to tail of an existing one |
488 | * @bskb: the bundle buffer to append to | |
489 | * @msg: message to be appended | |
490 | * @max: max allowable size for the bundle buffer | |
491 | * | |
637b77fd | 492 | * Return: "true" if bundling has been performed, otherwise "false" |
4f1688b2 | 493 | */ |
06e7c70c TL |
494 | static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg, |
495 | u32 max) | |
4f1688b2 | 496 | { |
06e7c70c TL |
497 | struct tipc_msg *bmsg = buf_msg(bskb); |
498 | u32 msz, bsz, offset, pad; | |
4f1688b2 | 499 | |
06e7c70c | 500 | msz = msg_size(msg); |
05dcc5aa | 501 | bsz = msg_size(bmsg); |
06e7c70c TL |
502 | offset = align(bsz); |
503 | pad = offset - bsz; | |
05dcc5aa | 504 | |
06e7c70c | 505 | if (unlikely(skb_tailroom(bskb) < (pad + msz))) |
4f1688b2 | 506 | return false; |
06e7c70c | 507 | if (unlikely(max < (offset + msz))) |
4f1688b2 | 508 | return false; |
06e7c70c TL |
509 | |
510 | skb_put(bskb, pad + msz); | |
511 | skb_copy_to_linear_data_offset(bskb, offset, msg, msz); | |
512 | msg_set_size(bmsg, offset + msz); | |
513 | msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); | |
514 | return true; | |
515 | } | |
516 | ||
517 | /** | |
518 | * tipc_msg_try_bundle - Try to bundle a new message to the last one | |
519 | * @tskb: the last/target message to which the new one will be appended | |
520 | * @skb: the new message skb pointer | |
521 | * @mss: max message size (header inclusive) | |
522 | * @dnode: destination node for the message | |
523 | * @new_bundle: if this call made a new bundle or not | |
524 | * | |
525 | * Return: "true" if the new message skb is potential for bundling this time or | |
526 | * later, in the case a bundling has been done this time, the skb is consumed | |
527 | * (the skb pointer = NULL). | |
528 | * Otherwise, "false" if the skb cannot be bundled at all. | |
529 | */ | |
530 | bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss, | |
531 | u32 dnode, bool *new_bundle) | |
532 | { | |
533 | struct tipc_msg *msg, *inner, *outer; | |
534 | u32 tsz; | |
535 | ||
536 | /* First, check if the new buffer is suitable for bundling */ | |
537 | msg = buf_msg(*skb); | |
538 | if (msg_user(msg) == MSG_FRAGMENTER) | |
4f1688b2 | 539 | return false; |
06e7c70c | 540 | if (msg_user(msg) == TUNNEL_PROTOCOL) |
4f1688b2 | 541 | return false; |
06e7c70c | 542 | if (msg_user(msg) == BCAST_PROTOCOL) |
4f1688b2 | 543 | return false; |
06e7c70c | 544 | if (mss <= INT_H_SIZE + msg_size(msg)) |
f21e897e | 545 | return false; |
4f1688b2 | 546 | |
06e7c70c TL |
547 | /* Ok, but the last/target buffer can be empty? */ |
548 | if (unlikely(!tskb)) | |
549 | return true; | |
550 | ||
551 | /* Is it a bundle already? Try to bundle the new message to it */ | |
552 | if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) { | |
553 | *new_bundle = false; | |
554 | goto bundle; | |
555 | } | |
556 | ||
557 | /* Make a new bundle of the two messages if possible */ | |
558 | tsz = msg_size(buf_msg(tskb)); | |
559 | if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg))) | |
560 | return true; | |
561 | if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE, | |
562 | GFP_ATOMIC))) | |
563 | return true; | |
564 | inner = buf_msg(tskb); | |
565 | skb_push(tskb, INT_H_SIZE); | |
566 | outer = buf_msg(tskb); | |
567 | tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE, | |
568 | dnode); | |
569 | msg_set_importance(outer, msg_importance(inner)); | |
570 | msg_set_size(outer, INT_H_SIZE + tsz); | |
571 | msg_set_msgcnt(outer, 1); | |
572 | *new_bundle = true; | |
573 | ||
574 | bundle: | |
575 | if (likely(tipc_msg_bundle(tskb, msg, mss))) { | |
576 | consume_skb(*skb); | |
577 | *skb = NULL; | |
578 | } | |
4f1688b2 JPM |
579 | return true; |
580 | } | |
581 | ||
c637c103 JPM |
582 | /** |
583 | * tipc_msg_extract(): extract bundled inner packet from buffer | |
c1336ee4 | 584 | * @skb: buffer to be extracted from. |
c637c103 | 585 | * @iskb: extracted inner buffer, to be returned |
c1336ee4 | 586 | * @pos: position in outer message of msg to be extracted. |
5fcb7d47 | 587 | * Returns position of next msg. |
c637c103 | 588 | * Consumes outer buffer when last packet extracted |
637b77fd | 589 | * Return: true when there is an extracted buffer, otherwise false |
c637c103 JPM |
590 | */ |
591 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) | |
592 | { | |
ef9be755 TN |
593 | struct tipc_msg *hdr, *ihdr; |
594 | int imsz; | |
c637c103 | 595 | |
c1336ee4 | 596 | *iskb = NULL; |
1149557d | 597 | if (unlikely(skb_linearize(skb))) |
c1336ee4 JPM |
598 | goto none; |
599 | ||
ef9be755 TN |
600 | hdr = buf_msg(skb); |
601 | if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE))) | |
c637c103 | 602 | goto none; |
c637c103 | 603 | |
ef9be755 TN |
604 | ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos); |
605 | imsz = msg_size(ihdr); | |
606 | ||
607 | if ((*pos + imsz) > msg_data_sz(hdr)) | |
c637c103 | 608 | goto none; |
ef9be755 TN |
609 | |
610 | *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC); | |
611 | if (!*iskb) | |
612 | goto none; | |
613 | ||
614 | skb_copy_to_linear_data(*iskb, ihdr, imsz); | |
d618d09a | 615 | if (unlikely(!tipc_msg_validate(iskb))) |
c637c103 | 616 | goto none; |
ef9be755 | 617 | |
c637c103 JPM |
618 | *pos += align(imsz); |
619 | return true; | |
620 | none: | |
621 | kfree_skb(skb); | |
c1336ee4 | 622 | kfree_skb(*iskb); |
c637c103 JPM |
623 | *iskb = NULL; |
624 | return false; | |
625 | } | |
626 | ||
8db1bae3 JPM |
627 | /** |
628 | * tipc_msg_reverse(): swap source and destination addresses and add error code | |
29042e19 | 629 | * @own_node: originating node id for reversed message |
5cbdbd1a | 630 | * @skb: buffer containing message to be reversed; will be consumed |
29042e19 | 631 | * @err: error code to be set in message, if any |
5cbdbd1a | 632 | * Replaces consumed buffer with new one when successful |
637b77fd | 633 | * Return: true if success, otherwise false |
8db1bae3 | 634 | */ |
bcd3ffd4 | 635 | bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) |
8db1bae3 | 636 | { |
29042e19 | 637 | struct sk_buff *_skb = *skb; |
5cbdbd1a JM |
638 | struct tipc_msg *_hdr, *hdr; |
639 | int hlen, dlen; | |
8db1bae3 | 640 | |
29042e19 | 641 | if (skb_linearize(_skb)) |
8db1bae3 | 642 | goto exit; |
5cbdbd1a JM |
643 | _hdr = buf_msg(_skb); |
644 | dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE); | |
645 | hlen = msg_hdr_sz(_hdr); | |
646 | ||
647 | if (msg_dest_droppable(_hdr)) | |
ac0074ee | 648 | goto exit; |
5cbdbd1a | 649 | if (msg_errcode(_hdr)) |
8db1bae3 | 650 | goto exit; |
29042e19 | 651 | |
5cbdbd1a JM |
652 | /* Never return SHORT header */ |
653 | if (hlen == SHORT_H_SIZE) | |
654 | hlen = BASIC_H_SIZE; | |
29042e19 | 655 | |
67879274 TN |
656 | /* Don't return data along with SYN+, - sender has a clone */ |
657 | if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD) | |
658 | dlen = 0; | |
659 | ||
5cbdbd1a JM |
660 | /* Allocate new buffer to return */ |
661 | *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC); | |
662 | if (!*skb) | |
663 | goto exit; | |
664 | memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr)); | |
665 | memcpy((*skb)->data + hlen, msg_data(_hdr), dlen); | |
666 | ||
667 | /* Build reverse header in new buffer */ | |
668 | hdr = buf_msg(*skb); | |
669 | msg_set_hdr_sz(hdr, hlen); | |
29042e19 | 670 | msg_set_errcode(hdr, err); |
59a361bc | 671 | msg_set_non_seq(hdr, 0); |
5cbdbd1a JM |
672 | msg_set_origport(hdr, msg_destport(_hdr)); |
673 | msg_set_destport(hdr, msg_origport(_hdr)); | |
674 | msg_set_destnode(hdr, msg_prevnode(_hdr)); | |
29042e19 JPM |
675 | msg_set_prevnode(hdr, own_node); |
676 | msg_set_orignode(hdr, own_node); | |
5cbdbd1a | 677 | msg_set_size(hdr, hlen + dlen); |
29042e19 | 678 | skb_orphan(_skb); |
5cbdbd1a | 679 | kfree_skb(_skb); |
8db1bae3 JPM |
680 | return true; |
681 | exit: | |
29042e19 JPM |
682 | kfree_skb(_skb); |
683 | *skb = NULL; | |
8db1bae3 JPM |
684 | return false; |
685 | } | |
5a379074 | 686 | |
67879274 TN |
687 | bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy) |
688 | { | |
689 | struct sk_buff *skb, *_skb; | |
690 | ||
691 | skb_queue_walk(msg, skb) { | |
692 | _skb = skb_clone(skb, GFP_ATOMIC); | |
693 | if (!_skb) { | |
694 | __skb_queue_purge(cpy); | |
695 | pr_err_ratelimited("Failed to clone buffer chain\n"); | |
696 | return false; | |
697 | } | |
698 | __skb_queue_tail(cpy, _skb); | |
699 | } | |
700 | return true; | |
701 | } | |
702 | ||
5a379074 | 703 | /** |
e3a77561 | 704 | * tipc_msg_lookup_dest(): try to find new destination for named message |
5fcb7d47 | 705 | * @net: pointer to associated network namespace |
e3a77561 | 706 | * @skb: the buffer containing the message. |
cda3696d | 707 | * @err: error code to be used by caller if lookup fails |
5a379074 | 708 | * Does not consume buffer |
637b77fd | 709 | * Return: true if a destination is found, false otherwise |
5a379074 | 710 | */ |
cda3696d | 711 | bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) |
5a379074 | 712 | { |
e3a77561 | 713 | struct tipc_msg *msg = buf_msg(skb); |
cda3696d JPM |
714 | u32 dport, dnode; |
715 | u32 onode = tipc_own_addr(net); | |
5a379074 | 716 | |
e3a77561 JPM |
717 | if (!msg_isdata(msg)) |
718 | return false; | |
719 | if (!msg_named(msg)) | |
720 | return false; | |
d482994f JPM |
721 | if (msg_errcode(msg)) |
722 | return false; | |
aad06212 | 723 | *err = TIPC_ERR_NO_NAME; |
e3a77561 JPM |
724 | if (skb_linearize(skb)) |
725 | return false; | |
4e3ae001 | 726 | msg = buf_msg(skb); |
d482994f | 727 | if (msg_reroute_cnt(msg)) |
e3a77561 | 728 | return false; |
928df188 | 729 | dnode = tipc_scope2node(net, msg_lookup_scope(msg)); |
4ac1c8d0 | 730 | dport = tipc_nametbl_translate(net, msg_nametype(msg), |
cda3696d | 731 | msg_nameinst(msg), &dnode); |
5a379074 | 732 | if (!dport) |
e3a77561 | 733 | return false; |
5a379074 | 734 | msg_incr_reroute_cnt(msg); |
cda3696d JPM |
735 | if (dnode != onode) |
736 | msg_set_prevnode(msg, onode); | |
737 | msg_set_destnode(msg, dnode); | |
5a379074 | 738 | msg_set_destport(msg, dport); |
e3a77561 | 739 | *err = TIPC_OK; |
a9e2971b | 740 | |
e3a77561 | 741 | return true; |
5a379074 | 742 | } |
078bec82 | 743 | |
4c94cc2d JM |
744 | /* tipc_msg_assemble() - assemble chain of fragments into one message |
745 | */ | |
746 | bool tipc_msg_assemble(struct sk_buff_head *list) | |
747 | { | |
748 | struct sk_buff *skb, *tmp = NULL; | |
749 | ||
750 | if (skb_queue_len(list) == 1) | |
751 | return true; | |
752 | ||
753 | while ((skb = __skb_dequeue(list))) { | |
754 | skb->next = NULL; | |
755 | if (tipc_buf_append(&tmp, &skb)) { | |
756 | __skb_queue_tail(list, skb); | |
757 | return true; | |
758 | } | |
759 | if (!tmp) | |
760 | break; | |
761 | } | |
762 | __skb_queue_purge(list); | |
763 | __skb_queue_head_init(list); | |
764 | pr_warn("Failed do assemble buffer\n"); | |
765 | return false; | |
766 | } | |
767 | ||
078bec82 JPM |
768 | /* tipc_msg_reassemble() - clone a buffer chain of fragments and |
769 | * reassemble the clones into one message | |
770 | */ | |
2f566124 | 771 | bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq) |
078bec82 | 772 | { |
2f566124 | 773 | struct sk_buff *skb, *_skb; |
a6ca1094 | 774 | struct sk_buff *frag = NULL; |
078bec82 | 775 | struct sk_buff *head = NULL; |
2f566124 | 776 | int hdr_len; |
078bec82 JPM |
777 | |
778 | /* Copy header if single buffer */ | |
a6ca1094 YX |
779 | if (skb_queue_len(list) == 1) { |
780 | skb = skb_peek(list); | |
2f566124 JPM |
781 | hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); |
782 | _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC); | |
783 | if (!_skb) | |
784 | return false; | |
785 | __skb_queue_tail(rcvq, _skb); | |
786 | return true; | |
078bec82 JPM |
787 | } |
788 | ||
789 | /* Clone all fragments and reassemble */ | |
a6ca1094 YX |
790 | skb_queue_walk(list, skb) { |
791 | frag = skb_clone(skb, GFP_ATOMIC); | |
078bec82 JPM |
792 | if (!frag) |
793 | goto error; | |
794 | frag->next = NULL; | |
795 | if (tipc_buf_append(&head, &frag)) | |
796 | break; | |
797 | if (!head) | |
798 | goto error; | |
078bec82 | 799 | } |
2f566124 JPM |
800 | __skb_queue_tail(rcvq, frag); |
801 | return true; | |
078bec82 JPM |
802 | error: |
803 | pr_warn("Failed do clone local mcast rcv buffer\n"); | |
804 | kfree_skb(head); | |
2f566124 | 805 | return false; |
078bec82 | 806 | } |
8306f99a | 807 | |
a853e4c6 JPM |
808 | bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg, |
809 | struct sk_buff_head *cpy) | |
810 | { | |
811 | struct sk_buff *skb, *_skb; | |
812 | ||
813 | skb_queue_walk(msg, skb) { | |
814 | _skb = pskb_copy(skb, GFP_ATOMIC); | |
815 | if (!_skb) { | |
816 | __skb_queue_purge(cpy); | |
817 | return false; | |
818 | } | |
819 | msg_set_destnode(buf_msg(_skb), dst); | |
820 | __skb_queue_tail(cpy, _skb); | |
821 | } | |
822 | return true; | |
823 | } | |
824 | ||
8306f99a JPM |
825 | /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number |
826 | * @list: list to be appended to | |
827 | * @seqno: sequence number of buffer to add | |
828 | * @skb: buffer to add | |
829 | */ | |
03b6fefd | 830 | bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, |
8306f99a JPM |
831 | struct sk_buff *skb) |
832 | { | |
833 | struct sk_buff *_skb, *tmp; | |
834 | ||
835 | if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) { | |
836 | __skb_queue_head(list, skb); | |
03b6fefd | 837 | return true; |
8306f99a JPM |
838 | } |
839 | ||
840 | if (more(seqno, buf_seqno(skb_peek_tail(list)))) { | |
841 | __skb_queue_tail(list, skb); | |
03b6fefd | 842 | return true; |
8306f99a JPM |
843 | } |
844 | ||
845 | skb_queue_walk_safe(list, _skb, tmp) { | |
846 | if (more(seqno, buf_seqno(_skb))) | |
847 | continue; | |
848 | if (seqno == buf_seqno(_skb)) | |
849 | break; | |
850 | __skb_queue_before(list, _skb, skb); | |
03b6fefd | 851 | return true; |
8306f99a JPM |
852 | } |
853 | kfree_skb(skb); | |
03b6fefd | 854 | return false; |
8306f99a | 855 | } |
64ac5f59 JM |
856 | |
857 | void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb, | |
858 | struct sk_buff_head *xmitq) | |
859 | { | |
860 | if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) | |
861 | __skb_queue_tail(xmitq, skb); | |
862 | } |