2 * net/tipc/msg.c: TIPC message header routines
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_table.h"
44 #define MAX_FORWARD_SIZE 1024
45 #ifdef CONFIG_TIPC_CRYPTO
46 #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
47 #define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
49 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
50 #define BUF_TAILROOM 16
53 static unsigned int align(unsigned int i)
59 * tipc_buf_acquire - creates a TIPC message buffer
60 * @size: message size (including TIPC header)
62 * Returns a new buffer with data pointers set to the specified size.
64 * NOTE: Headroom is reserved to allow prepending of a data link header.
65 * There may also be unrequested tailroom present at the buffer's end.
67 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
70 #ifdef CONFIG_TIPC_CRYPTO
71 unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
73 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
76 skb = alloc_skb_fclone(buf_size, gfp);
78 skb_reserve(skb, BUF_HEADROOM);
85 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
90 msg_set_user(m, user);
91 msg_set_hdr_sz(m, hsize);
92 msg_set_size(m, hsize);
93 msg_set_prevnode(m, own_node);
94 msg_set_type(m, type);
95 if (hsize > SHORT_H_SIZE) {
96 msg_set_orignode(m, own_node);
97 msg_set_destnode(m, dnode);
101 struct sk_buff *tipc_msg_create(uint user, uint type,
102 uint hdr_sz, uint data_sz, u32 dnode,
103 u32 onode, u32 dport, u32 oport, int errcode)
105 struct tipc_msg *msg;
108 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
113 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
114 msg_set_size(msg, hdr_sz + data_sz);
115 msg_set_origport(msg, oport);
116 msg_set_destport(msg, dport);
117 msg_set_errcode(msg, errcode);
118 if (hdr_sz > SHORT_H_SIZE) {
119 msg_set_orignode(msg, onode);
120 msg_set_destnode(msg, dnode);
125 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
126 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
127 * out: set when successful non-complete reassembly, otherwise NULL
128 * @*buf: in: the buffer to append. Always defined
129 * out: head buf after successful complete reassembly, otherwise NULL
130 * Returns 1 when reassembly complete, otherwise 0
132 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
134 struct sk_buff *head = *headbuf;
135 struct sk_buff *frag = *buf;
136 struct sk_buff *tail = NULL;
137 struct tipc_msg *msg;
146 fragid = msg_type(msg);
148 skb_pull(frag, msg_hdr_sz(msg));
150 if (fragid == FIRST_FRAGMENT) {
153 if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
155 head = *headbuf = frag;
157 TIPC_SKB_CB(head)->tail = NULL;
158 if (skb_is_nonlinear(head)) {
159 skb_walk_frags(head, tail) {
160 TIPC_SKB_CB(head)->tail = tail;
163 skb_frag_list_init(head);
171 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
172 kfree_skb_partial(frag, headstolen);
174 tail = TIPC_SKB_CB(head)->tail;
175 if (!skb_has_frag_list(head))
176 skb_shinfo(head)->frag_list = frag;
179 head->truesize += frag->truesize;
180 head->data_len += frag->len;
181 head->len += frag->len;
182 TIPC_SKB_CB(head)->tail = frag;
185 if (fragid == LAST_FRAGMENT) {
186 TIPC_SKB_CB(head)->validated = 0;
187 if (unlikely(!tipc_msg_validate(&head)))
190 TIPC_SKB_CB(head)->tail = NULL;
199 *buf = *headbuf = NULL;
204 * tipc_msg_append(): Append data to tail of an existing buffer queue
205 * @hdr: header to be used
206 * @m: the data to be appended
207 * @mss: max allowable size of buffer
208 * @dlen: size of data to be appended
209 * @txq: queue to appand to
210 * Returns the number og 1k blocks appended or errno value
212 int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
213 int mss, struct sk_buff_head *txq)
216 int accounted, total, curr;
217 int mlen, cpy, rem = dlen;
218 struct tipc_msg *hdr;
220 skb = skb_peek_tail(txq);
221 accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
225 if (!skb || skb->len >= mss) {
226 skb = tipc_buf_acquire(mss, GFP_KERNEL);
230 skb_trim(skb, MIN_H_SIZE);
232 skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
233 msg_set_hdr_sz(hdr, MIN_H_SIZE);
234 msg_set_size(hdr, MIN_H_SIZE);
235 __skb_queue_tail(txq, skb);
239 curr = msg_blocks(hdr);
240 mlen = msg_size(hdr);
241 cpy = min_t(size_t, rem, mss - mlen);
242 if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
244 msg_set_size(hdr, mlen + cpy);
247 total += msg_blocks(hdr) - curr;
249 return total - accounted;
252 /* tipc_msg_validate - validate basic format of received message
254 * This routine ensures a TIPC message has an acceptable header, and at least
255 * as much data as the header indicates it should. The routine also ensures
256 * that the entire message header is stored in the main fragment of the message
257 * buffer, to simplify future access to message header fields.
259 * Note: Having extra info present in the message header or data areas is OK.
260 * TIPC will ignore the excess, under the assumption that it is optional info
261 * introduced by a later release of the protocol.
263 bool tipc_msg_validate(struct sk_buff **_skb)
265 struct sk_buff *skb = *_skb;
266 struct tipc_msg *hdr;
269 /* Ensure that flow control ratio condition is satisfied */
270 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
271 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
278 if (unlikely(TIPC_SKB_CB(skb)->validated))
281 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
284 hsz = msg_hdr_sz(buf_msg(skb));
285 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
287 if (unlikely(!pskb_may_pull(skb, hsz)))
291 if (unlikely(msg_version(hdr) != TIPC_VERSION))
295 if (unlikely(msz < hsz))
297 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
299 if (unlikely(skb->len < msz))
302 TIPC_SKB_CB(skb)->validated = 1;
307 * tipc_msg_fragment - build a fragment skb list for TIPC message
309 * @skb: TIPC message skb
310 * @hdr: internal msg header to be put on the top of the fragments
311 * @pktmax: max size of a fragment incl. the header
312 * @frags: returned fragment skb list
314 * Returns 0 if the fragmentation is successful, otherwise: -EINVAL
317 int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
318 int pktmax, struct sk_buff_head *frags)
320 int pktno, nof_fragms, dsz, dmax, eat;
321 struct tipc_msg *_hdr;
322 struct sk_buff *_skb;
325 /* Non-linear buffer? */
326 if (skb_linearize(skb))
329 data = (u8 *)skb->data;
330 dsz = msg_size(buf_msg(skb));
331 dmax = pktmax - INT_H_SIZE;
332 if (dsz <= dmax || !dmax)
335 nof_fragms = dsz / dmax + 1;
336 for (pktno = 1; pktno <= nof_fragms; pktno++) {
337 if (pktno < nof_fragms)
341 /* Allocate a new fragment */
342 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
346 __skb_queue_tail(frags, _skb);
347 /* Copy header & data to the fragment */
348 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
349 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
351 /* Update the fragment's header */
352 _hdr = buf_msg(_skb);
353 msg_set_fragm_no(_hdr, pktno);
354 msg_set_nof_fragms(_hdr, nof_fragms);
355 msg_set_size(_hdr, INT_H_SIZE + eat);
360 __skb_queue_purge(frags);
361 __skb_queue_head_init(frags);
366 * tipc_msg_build - create buffer chain containing specified header and data
367 * @mhdr: Message header, to be prepended to data
369 * @dsz: Total length of user data
370 * @pktmax: Max packet size that can be used
371 * @list: Buffer or chain of buffers to be returned to caller
373 * Note that the recursive call we are making here is safe, since it can
374 * logically go only one further level down.
376 * Returns message data size or errno: -ENOMEM, -EFAULT
378 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
379 int dsz, int pktmax, struct sk_buff_head *list)
381 int mhsz = msg_hdr_sz(mhdr);
382 struct tipc_msg pkthdr;
383 int msz = mhsz + dsz;
392 msg_set_size(mhdr, msz);
394 /* No fragmentation needed? */
395 if (likely(msz <= pktmax)) {
396 skb = tipc_buf_acquire(msz, GFP_KERNEL);
398 /* Fall back to smaller MTU if node local message */
399 if (unlikely(!skb)) {
400 if (pktmax != MAX_MSG_SIZE)
402 rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
405 if (tipc_msg_assemble(list))
410 __skb_queue_tail(list, skb);
411 skb_copy_to_linear_data(skb, mhdr, mhsz);
412 pktpos = skb->data + mhsz;
413 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
419 /* Prepare reusable fragment header */
420 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
421 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
422 msg_set_size(&pkthdr, pktmax);
423 msg_set_fragm_no(&pkthdr, pktno);
424 msg_set_importance(&pkthdr, msg_importance(mhdr));
426 /* Prepare first fragment */
427 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
431 __skb_queue_tail(list, skb);
433 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
434 pktpos += INT_H_SIZE;
435 pktrem -= INT_H_SIZE;
436 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
444 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
453 /* Prepare new fragment: */
454 if (drem < (pktmax - INT_H_SIZE))
455 pktsz = drem + INT_H_SIZE;
458 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
464 __skb_queue_tail(list, skb);
465 msg_set_type(&pkthdr, FRAGMENT);
466 msg_set_size(&pkthdr, pktsz);
467 msg_set_fragm_no(&pkthdr, ++pktno);
468 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
469 pktpos = skb->data + INT_H_SIZE;
470 pktrem = pktsz - INT_H_SIZE;
473 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
476 __skb_queue_purge(list);
477 __skb_queue_head_init(list);
482 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
483 * @bskb: the bundle buffer to append to
484 * @msg: message to be appended
485 * @max: max allowable size for the bundle buffer
487 * Returns "true" if bundling has been performed, otherwise "false"
489 static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
492 struct tipc_msg *bmsg = buf_msg(bskb);
493 u32 msz, bsz, offset, pad;
496 bsz = msg_size(bmsg);
500 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
502 if (unlikely(max < (offset + msz)))
505 skb_put(bskb, pad + msz);
506 skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
507 msg_set_size(bmsg, offset + msz);
508 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
513 * tipc_msg_try_bundle - Try to bundle a new message to the last one
514 * @tskb: the last/target message to which the new one will be appended
515 * @skb: the new message skb pointer
516 * @mss: max message size (header inclusive)
517 * @dnode: destination node for the message
518 * @new_bundle: if this call made a new bundle or not
520 * Return: "true" if the new message skb is potential for bundling this time or
521 * later, in the case a bundling has been done this time, the skb is consumed
522 * (the skb pointer = NULL).
523 * Otherwise, "false" if the skb cannot be bundled at all.
525 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
526 u32 dnode, bool *new_bundle)
528 struct tipc_msg *msg, *inner, *outer;
531 /* First, check if the new buffer is suitable for bundling */
533 if (msg_user(msg) == MSG_FRAGMENTER)
535 if (msg_user(msg) == TUNNEL_PROTOCOL)
537 if (msg_user(msg) == BCAST_PROTOCOL)
539 if (mss <= INT_H_SIZE + msg_size(msg))
542 /* Ok, but the last/target buffer can be empty? */
546 /* Is it a bundle already? Try to bundle the new message to it */
547 if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
552 /* Make a new bundle of the two messages if possible */
553 tsz = msg_size(buf_msg(tskb));
554 if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
556 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
559 inner = buf_msg(tskb);
560 skb_push(tskb, INT_H_SIZE);
561 outer = buf_msg(tskb);
562 tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
564 msg_set_importance(outer, msg_importance(inner));
565 msg_set_size(outer, INT_H_SIZE + tsz);
566 msg_set_msgcnt(outer, 1);
570 if (likely(tipc_msg_bundle(tskb, msg, mss))) {
578 * tipc_msg_extract(): extract bundled inner packet from buffer
579 * @skb: buffer to be extracted from.
580 * @iskb: extracted inner buffer, to be returned
581 * @pos: position in outer message of msg to be extracted.
582 * Returns position of next msg
583 * Consumes outer buffer when last packet extracted
584 * Returns true when when there is an extracted buffer, otherwise false
586 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
588 struct tipc_msg *hdr, *ihdr;
592 if (unlikely(skb_linearize(skb)))
596 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
599 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
600 imsz = msg_size(ihdr);
602 if ((*pos + imsz) > msg_data_sz(hdr))
605 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
609 skb_copy_to_linear_data(*iskb, ihdr, imsz);
610 if (unlikely(!tipc_msg_validate(iskb)))
623 * tipc_msg_reverse(): swap source and destination addresses and add error code
624 * @own_node: originating node id for reversed message
625 * @skb: buffer containing message to be reversed; will be consumed
626 * @err: error code to be set in message, if any
627 * Replaces consumed buffer with new one when successful
628 * Returns true if success, otherwise false
630 bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
632 struct sk_buff *_skb = *skb;
633 struct tipc_msg *_hdr, *hdr;
636 if (skb_linearize(_skb))
638 _hdr = buf_msg(_skb);
639 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
640 hlen = msg_hdr_sz(_hdr);
642 if (msg_dest_droppable(_hdr))
644 if (msg_errcode(_hdr))
647 /* Never return SHORT header */
648 if (hlen == SHORT_H_SIZE)
651 /* Don't return data along with SYN+, - sender has a clone */
652 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
655 /* Allocate new buffer to return */
656 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
659 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
660 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
662 /* Build reverse header in new buffer */
664 msg_set_hdr_sz(hdr, hlen);
665 msg_set_errcode(hdr, err);
666 msg_set_non_seq(hdr, 0);
667 msg_set_origport(hdr, msg_destport(_hdr));
668 msg_set_destport(hdr, msg_origport(_hdr));
669 msg_set_destnode(hdr, msg_prevnode(_hdr));
670 msg_set_prevnode(hdr, own_node);
671 msg_set_orignode(hdr, own_node);
672 msg_set_size(hdr, hlen + dlen);
682 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
684 struct sk_buff *skb, *_skb;
686 skb_queue_walk(msg, skb) {
687 _skb = skb_clone(skb, GFP_ATOMIC);
689 __skb_queue_purge(cpy);
690 pr_err_ratelimited("Failed to clone buffer chain\n");
693 __skb_queue_tail(cpy, _skb);
699 * tipc_msg_lookup_dest(): try to find new destination for named message
700 * @skb: the buffer containing the message.
701 * @err: error code to be used by caller if lookup fails
702 * Does not consume buffer
703 * Returns true if a destination is found, false otherwise
705 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
707 struct tipc_msg *msg = buf_msg(skb);
709 u32 onode = tipc_own_addr(net);
711 if (!msg_isdata(msg))
715 if (msg_errcode(msg))
717 *err = TIPC_ERR_NO_NAME;
718 if (skb_linearize(skb))
721 if (msg_reroute_cnt(msg))
723 dnode = tipc_scope2node(net, msg_lookup_scope(msg));
724 dport = tipc_nametbl_translate(net, msg_nametype(msg),
725 msg_nameinst(msg), &dnode);
728 msg_incr_reroute_cnt(msg);
730 msg_set_prevnode(msg, onode);
731 msg_set_destnode(msg, dnode);
732 msg_set_destport(msg, dport);
738 /* tipc_msg_assemble() - assemble chain of fragments into one message
740 bool tipc_msg_assemble(struct sk_buff_head *list)
742 struct sk_buff *skb, *tmp = NULL;
744 if (skb_queue_len(list) == 1)
747 while ((skb = __skb_dequeue(list))) {
749 if (tipc_buf_append(&tmp, &skb)) {
750 __skb_queue_tail(list, skb);
756 __skb_queue_purge(list);
757 __skb_queue_head_init(list);
758 pr_warn("Failed do assemble buffer\n");
762 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
763 * reassemble the clones into one message
765 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
767 struct sk_buff *skb, *_skb;
768 struct sk_buff *frag = NULL;
769 struct sk_buff *head = NULL;
772 /* Copy header if single buffer */
773 if (skb_queue_len(list) == 1) {
774 skb = skb_peek(list);
775 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
776 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
779 __skb_queue_tail(rcvq, _skb);
783 /* Clone all fragments and reassemble */
784 skb_queue_walk(list, skb) {
785 frag = skb_clone(skb, GFP_ATOMIC);
789 if (tipc_buf_append(&head, &frag))
794 __skb_queue_tail(rcvq, frag);
797 pr_warn("Failed do clone local mcast rcv buffer\n");
802 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
803 struct sk_buff_head *cpy)
805 struct sk_buff *skb, *_skb;
807 skb_queue_walk(msg, skb) {
808 _skb = pskb_copy(skb, GFP_ATOMIC);
810 __skb_queue_purge(cpy);
813 msg_set_destnode(buf_msg(_skb), dst);
814 __skb_queue_tail(cpy, _skb);
819 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
820 * @list: list to be appended to
821 * @seqno: sequence number of buffer to add
822 * @skb: buffer to add
824 bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
827 struct sk_buff *_skb, *tmp;
829 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
830 __skb_queue_head(list, skb);
834 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
835 __skb_queue_tail(list, skb);
839 skb_queue_walk_safe(list, _skb, tmp) {
840 if (more(seqno, buf_seqno(_skb)))
842 if (seqno == buf_seqno(_skb))
844 __skb_queue_before(list, _skb, skb);
851 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
852 struct sk_buff_head *xmitq)
854 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
855 __skb_queue_tail(xmitq, skb);