2 * net/tipc/group.c: TIPC group messaging code
4 * Copyright (c) 2017, Ericsson AB
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
44 #include "name_table.h"
47 #define ADV_UNIT (((MAX_MSG_SIZE + MAX_H_SIZE) / FLOWCTL_BLK_SZ) + 1)
48 #define ADV_IDLE ADV_UNIT
49 #define ADV_ACTIVE (ADV_UNIT * 12)
63 struct rb_node tree_node;
64 struct list_head list;
65 struct list_head small_win;
66 struct sk_buff_head deferredq;
67 struct tipc_group *group;
80 struct rb_root members;
81 struct list_head small_win;
82 struct list_head pending;
83 struct list_head active;
84 struct tipc_nlist dests;
101 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
102 int mtyp, struct sk_buff_head *xmitq);
104 static void tipc_group_open(struct tipc_member *m, bool *wakeup)
107 if (list_empty(&m->small_win))
109 list_del_init(&m->small_win);
110 *m->group->open = true;
114 static void tipc_group_decr_active(struct tipc_group *grp,
115 struct tipc_member *m)
117 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
118 m->state == MBR_REMITTED)
122 static int tipc_group_rcvbuf_limit(struct tipc_group *grp)
124 int max_active, active_pool, idle_pool;
125 int mcnt = grp->member_cnt + 1;
127 /* Limit simultaneous reception from other members */
128 max_active = min(mcnt / 8, 64);
129 max_active = max(max_active, 16);
130 grp->max_active = max_active;
132 /* Reserve blocks for active and idle members */
133 active_pool = max_active * ADV_ACTIVE;
134 idle_pool = (mcnt - max_active) * ADV_IDLE;
136 /* Scale to bytes, considering worst-case truesize/msgsize ratio */
137 return (active_pool + idle_pool) * FLOWCTL_BLK_SZ * 4;
140 u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
142 return grp->bc_snd_nxt;
145 static bool tipc_group_is_receiver(struct tipc_member *m)
147 return m && m->state != MBR_JOINING && m->state != MBR_LEAVING;
150 static bool tipc_group_is_sender(struct tipc_member *m)
152 return m && m->state != MBR_JOINING && m->state != MBR_PUBLISHED;
155 u32 tipc_group_exclude(struct tipc_group *grp)
162 struct tipc_group *tipc_group_create(struct net *net, u32 portid,
163 struct tipc_group_req *mreq,
166 u32 filter = TIPC_SUB_PORTS | TIPC_SUB_NO_STATUS;
167 bool global = mreq->scope != TIPC_NODE_SCOPE;
168 struct tipc_group *grp;
169 u32 type = mreq->type;
171 grp = kzalloc(sizeof(*grp), GFP_ATOMIC);
174 tipc_nlist_init(&grp->dests, tipc_own_addr(net));
175 INIT_LIST_HEAD(&grp->small_win);
176 INIT_LIST_HEAD(&grp->active);
177 INIT_LIST_HEAD(&grp->pending);
178 grp->members = RB_ROOT;
180 grp->portid = portid;
182 grp->instance = mreq->instance;
183 grp->scope = mreq->scope;
184 grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
185 grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
186 grp->open = group_is_open;
188 filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
189 if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
190 filter, &grp->subid))
196 void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
198 struct rb_root *tree = &grp->members;
199 struct tipc_member *m, *tmp;
200 struct sk_buff_head xmitq;
202 skb_queue_head_init(&xmitq);
203 rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
204 tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
205 tipc_group_update_member(m, 0);
207 tipc_node_distr_xmit(net, &xmitq);
208 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
211 void tipc_group_delete(struct net *net, struct tipc_group *grp)
213 struct rb_root *tree = &grp->members;
214 struct tipc_member *m, *tmp;
215 struct sk_buff_head xmitq;
217 __skb_queue_head_init(&xmitq);
219 rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
220 tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
224 tipc_node_distr_xmit(net, &xmitq);
225 tipc_nlist_purge(&grp->dests);
226 tipc_topsrv_kern_unsubscr(net, grp->subid);
230 static struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
233 struct rb_node *n = grp->members.rb_node;
234 u64 nkey, key = (u64)node << 32 | port;
235 struct tipc_member *m;
238 m = container_of(n, struct tipc_member, tree_node);
239 nkey = (u64)m->node << 32 | m->port;
250 static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
253 struct tipc_member *m;
255 m = tipc_group_find_member(grp, node, port);
256 if (m && tipc_group_is_receiver(m))
261 static struct tipc_member *tipc_group_find_node(struct tipc_group *grp,
264 struct tipc_member *m;
267 for (n = rb_first(&grp->members); n; n = rb_next(n)) {
268 m = container_of(n, struct tipc_member, tree_node);
275 static void tipc_group_add_to_tree(struct tipc_group *grp,
276 struct tipc_member *m)
278 u64 nkey, key = (u64)m->node << 32 | m->port;
279 struct rb_node **n, *parent = NULL;
280 struct tipc_member *tmp;
282 n = &grp->members.rb_node;
284 tmp = container_of(*n, struct tipc_member, tree_node);
286 tmp = container_of(parent, struct tipc_member, tree_node);
287 nkey = (u64)tmp->node << 32 | tmp->port;
295 rb_link_node(&m->tree_node, parent, n);
296 rb_insert_color(&m->tree_node, &grp->members);
299 static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
301 u32 instance, int state)
303 struct tipc_member *m;
305 m = kzalloc(sizeof(*m), GFP_ATOMIC);
308 INIT_LIST_HEAD(&m->list);
309 INIT_LIST_HEAD(&m->small_win);
310 __skb_queue_head_init(&m->deferredq);
314 m->instance = instance;
315 m->bc_acked = grp->bc_snd_nxt - 1;
317 tipc_group_add_to_tree(grp, m);
318 tipc_nlist_add(&grp->dests, m->node);
323 void tipc_group_add_member(struct tipc_group *grp, u32 node,
324 u32 port, u32 instance)
326 tipc_group_create_member(grp, node, port, instance, MBR_PUBLISHED);
329 static void tipc_group_delete_member(struct tipc_group *grp,
330 struct tipc_member *m)
332 rb_erase(&m->tree_node, &grp->members);
335 /* Check if we were waiting for replicast ack from this member */
336 if (grp->bc_ackers && less(m->bc_acked, grp->bc_snd_nxt - 1))
339 list_del_init(&m->list);
340 list_del_init(&m->small_win);
341 tipc_group_decr_active(grp, m);
343 /* If last member on a node, remove node from dest list */
344 if (!tipc_group_find_node(grp, m->node))
345 tipc_nlist_del(&grp->dests, m->node);
350 struct tipc_nlist *tipc_group_dests(struct tipc_group *grp)
355 void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq,
358 seq->type = grp->type;
359 seq->lower = grp->instance;
360 seq->upper = grp->instance;
364 void tipc_group_update_member(struct tipc_member *m, int len)
366 struct tipc_group *grp = m->group;
367 struct tipc_member *_m, *tmp;
369 if (!tipc_group_is_receiver(m))
374 if (m->window >= ADV_IDLE)
377 list_del_init(&m->small_win);
379 /* Sort member into small_window members' list */
380 list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
381 if (_m->window > m->window)
384 list_add_tail(&m->small_win, &_m->small_win);
387 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
389 u16 prev = grp->bc_snd_nxt - 1;
390 struct tipc_member *m;
394 for (n = rb_first(&grp->members); n; n = rb_next(n)) {
395 m = container_of(n, struct tipc_member, tree_node);
396 if (tipc_group_is_receiver(m)) {
397 tipc_group_update_member(m, len);
403 /* Mark number of acknowledges to expect, if any */
405 grp->bc_ackers = ackers;
409 bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
410 int len, struct tipc_member **mbr)
412 struct sk_buff_head xmitq;
413 struct tipc_member *m;
416 m = tipc_group_find_dest(grp, dnode, dport);
417 if (!tipc_group_is_receiver(m)) {
423 if (m->window >= len)
428 /* If not fully advertised, do it now to prevent mutual blocking */
431 if (state == MBR_JOINED && adv == ADV_IDLE)
433 if (state == MBR_ACTIVE && adv == ADV_ACTIVE)
435 if (state == MBR_PENDING && adv == ADV_IDLE)
437 skb_queue_head_init(&xmitq);
438 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq);
439 tipc_node_distr_xmit(grp->net, &xmitq);
443 bool tipc_group_bc_cong(struct tipc_group *grp, int len)
445 struct tipc_member *m = NULL;
447 /* If prev bcast was replicast, reject until all receivers have acked */
448 if (grp->bc_ackers) {
452 if (list_empty(&grp->small_win))
455 m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
456 if (m->window >= len)
459 return tipc_group_cong(grp, m->node, m->port, len, &m);
462 /* tipc_group_sort_msg() - sort msg into queue by bcast sequence number
464 static void tipc_group_sort_msg(struct sk_buff *skb, struct sk_buff_head *defq)
466 struct tipc_msg *_hdr, *hdr = buf_msg(skb);
467 u16 bc_seqno = msg_grp_bc_seqno(hdr);
468 struct sk_buff *_skb, *tmp;
469 int mtyp = msg_type(hdr);
471 /* Bcast/mcast may be bypassed by ucast or other bcast, - sort it in */
472 if (mtyp == TIPC_GRP_BCAST_MSG || mtyp == TIPC_GRP_MCAST_MSG) {
473 skb_queue_walk_safe(defq, _skb, tmp) {
474 _hdr = buf_msg(_skb);
475 if (!less(bc_seqno, msg_grp_bc_seqno(_hdr)))
477 __skb_queue_before(defq, _skb, skb);
480 /* Bcast was not bypassed, - add to tail */
482 /* Unicasts are never bypassed, - always add to tail */
483 __skb_queue_tail(defq, skb);
486 /* tipc_group_filter_msg() - determine if we should accept arriving message
488 void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
489 struct sk_buff_head *xmitq)
491 struct sk_buff *skb = __skb_dequeue(inputq);
492 bool ack, deliver, update, leave = false;
493 struct sk_buff_head *defq;
494 struct tipc_member *m;
495 struct tipc_msg *hdr;
503 node = msg_orignode(hdr);
504 port = msg_origport(hdr);
506 if (!msg_in_group(hdr))
509 m = tipc_group_find_member(grp, node, port);
510 if (!tipc_group_is_sender(m))
513 if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
516 TIPC_SKB_CB(skb)->orig_member = m->instance;
517 defq = &m->deferredq;
518 tipc_group_sort_msg(skb, defq);
520 while ((skb = skb_peek(defq))) {
522 mtyp = msg_type(hdr);
523 blks = msg_blocks(hdr);
528 if (more(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
531 /* Decide what to do with message */
533 case TIPC_GRP_MCAST_MSG:
534 if (msg_nameinst(hdr) != grp->instance) {
539 case TIPC_GRP_BCAST_MSG:
541 ack = msg_grp_bc_ack_req(hdr);
543 case TIPC_GRP_UCAST_MSG:
545 case TIPC_GRP_MEMBER_EVT:
546 if (m->state == MBR_LEAVING)
555 /* Execute decisions */
558 __skb_queue_tail(inputq, skb);
563 tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq);
566 __skb_queue_purge(defq);
567 tipc_group_delete_member(grp, m);
573 tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
580 void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
581 u32 port, struct sk_buff_head *xmitq)
583 struct list_head *active = &grp->active;
584 int max_active = grp->max_active;
585 int reclaim_limit = max_active * 3 / 4;
586 int active_cnt = grp->active_cnt;
587 struct tipc_member *m, *rm, *pm;
589 m = tipc_group_find_member(grp, node, port);
593 m->advertised -= blks;
597 /* First, decide if member can go active */
598 if (active_cnt <= max_active) {
599 m->state = MBR_ACTIVE;
600 list_add_tail(&m->list, active);
602 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
604 m->state = MBR_PENDING;
605 list_add_tail(&m->list, &grp->pending);
608 if (active_cnt < reclaim_limit)
611 /* Reclaim from oldest active member, if possible */
612 if (!list_empty(active)) {
613 rm = list_first_entry(active, struct tipc_member, list);
614 rm->state = MBR_RECLAIMING;
615 list_del_init(&rm->list);
616 tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
619 /* Nobody to reclaim from; - revert oldest pending to JOINED */
620 pm = list_first_entry(&grp->pending, struct tipc_member, list);
621 list_del_init(&pm->list);
622 pm->state = MBR_JOINED;
623 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
626 if (!list_is_last(&m->list, &grp->active))
627 list_move_tail(&m->list, &grp->active);
628 if (m->advertised > (ADV_ACTIVE * 3 / 4))
630 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
633 if (m->advertised > ADV_IDLE)
635 m->state = MBR_JOINED;
637 if (m->advertised < ADV_IDLE) {
638 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
639 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
642 if (list_empty(&grp->pending))
645 /* Set oldest pending member to active and advertise */
646 pm = list_first_entry(&grp->pending, struct tipc_member, list);
647 pm->state = MBR_ACTIVE;
648 list_move_tail(&pm->list, &grp->active);
650 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
660 static void tipc_group_create_event(struct tipc_group *grp,
661 struct tipc_member *m,
662 u32 event, u16 seqno,
663 struct sk_buff_head *inputq)
664 { u32 dnode = tipc_own_addr(grp->net);
665 struct tipc_event evt;
667 struct tipc_msg *hdr;
669 memset(&evt, 0, sizeof(evt));
671 evt.found_lower = m->instance;
672 evt.found_upper = m->instance;
673 evt.port.ref = m->port;
674 evt.port.node = m->node;
675 evt.s.seq.type = grp->type;
676 evt.s.seq.lower = m->instance;
677 evt.s.seq.upper = m->instance;
679 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_GRP_MEMBER_EVT,
680 GROUP_H_SIZE, sizeof(evt), dnode, m->node,
681 grp->portid, m->port, 0);
686 msg_set_nametype(hdr, grp->type);
687 msg_set_grp_evt(hdr, event);
688 msg_set_dest_droppable(hdr, true);
689 msg_set_grp_bc_seqno(hdr, seqno);
690 memcpy(msg_data(hdr), &evt, sizeof(evt));
691 TIPC_SKB_CB(skb)->orig_member = m->instance;
692 __skb_queue_tail(inputq, skb);
695 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
696 int mtyp, struct sk_buff_head *xmitq)
698 struct tipc_msg *hdr;
702 skb = tipc_msg_create(GROUP_PROTOCOL, mtyp, INT_H_SIZE, 0,
703 m->node, tipc_own_addr(grp->net),
704 m->port, grp->portid, 0);
708 if (m->state == MBR_ACTIVE)
709 adv = ADV_ACTIVE - m->advertised;
710 else if (m->state == MBR_JOINED || m->state == MBR_PENDING)
711 adv = ADV_IDLE - m->advertised;
715 if (mtyp == GRP_JOIN_MSG) {
716 msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
717 msg_set_adv_win(hdr, adv);
718 m->advertised += adv;
719 } else if (mtyp == GRP_LEAVE_MSG) {
720 msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
721 } else if (mtyp == GRP_ADV_MSG) {
722 msg_set_adv_win(hdr, adv);
723 m->advertised += adv;
724 } else if (mtyp == GRP_ACK_MSG) {
725 msg_set_grp_bc_acked(hdr, m->bc_rcv_nxt);
726 } else if (mtyp == GRP_REMIT_MSG) {
727 msg_set_grp_remitted(hdr, m->window);
729 msg_set_dest_droppable(hdr, true);
730 __skb_queue_tail(xmitq, skb);
733 void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
734 struct tipc_msg *hdr, struct sk_buff_head *inputq,
735 struct sk_buff_head *xmitq)
737 u32 node = msg_orignode(hdr);
738 u32 port = msg_origport(hdr);
739 struct tipc_member *m, *pm;
740 u16 remitted, in_flight;
745 if (grp->scope == TIPC_NODE_SCOPE && node != tipc_own_addr(grp->net))
748 m = tipc_group_find_member(grp, node, port);
750 switch (msg_type(hdr)) {
753 m = tipc_group_create_member(grp, node, port,
757 m->bc_syncpt = msg_grp_bc_syncpt(hdr);
758 m->bc_rcv_nxt = m->bc_syncpt;
759 m->window += msg_adv_win(hdr);
761 /* Wait until PUBLISH event is received if necessary */
762 if (m->state != MBR_PUBLISHED)
765 /* Member can be taken into service */
766 m->state = MBR_JOINED;
767 tipc_group_open(m, usr_wakeup);
768 tipc_group_update_member(m, 0);
769 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
770 tipc_group_create_event(grp, m, TIPC_PUBLISHED,
771 m->bc_syncpt, inputq);
776 m->bc_syncpt = msg_grp_bc_syncpt(hdr);
777 list_del_init(&m->list);
778 tipc_group_open(m, usr_wakeup);
779 tipc_group_decr_active(grp, m);
780 m->state = MBR_LEAVING;
781 tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
782 m->bc_syncpt, inputq);
787 m->window += msg_adv_win(hdr);
788 tipc_group_open(m, usr_wakeup);
793 m->bc_acked = msg_grp_bc_acked(hdr);
794 if (--grp->bc_ackers)
796 list_del_init(&m->small_win);
797 *m->group->open = true;
799 tipc_group_update_member(m, 0);
801 case GRP_RECLAIM_MSG:
804 tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq);
805 m->window = ADV_IDLE;
806 tipc_group_open(m, usr_wakeup);
809 if (!m || m->state != MBR_RECLAIMING)
812 remitted = msg_grp_remitted(hdr);
814 /* Messages preceding the REMIT still in receive queue */
815 if (m->advertised > remitted) {
816 m->state = MBR_REMITTED;
817 in_flight = m->advertised - remitted;
818 m->advertised = ADV_IDLE + in_flight;
821 /* This should never happen */
822 if (m->advertised < remitted)
823 pr_warn_ratelimited("Unexpected REMIT msg\n");
825 /* All messages preceding the REMIT have been read */
826 m->state = MBR_JOINED;
828 m->advertised = ADV_IDLE;
830 /* Set oldest pending member to active and advertise */
831 if (list_empty(&grp->pending))
833 pm = list_first_entry(&grp->pending, struct tipc_member, list);
834 pm->state = MBR_ACTIVE;
835 list_move_tail(&pm->list, &grp->active);
837 if (pm->advertised <= (ADV_ACTIVE * 3 / 4))
838 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
841 pr_warn("Received unknown GROUP_PROTO message\n");
845 /* tipc_group_member_evt() - receive and handle a member up/down event
847 void tipc_group_member_evt(struct tipc_group *grp,
850 struct tipc_msg *hdr,
851 struct sk_buff_head *inputq,
852 struct sk_buff_head *xmitq)
854 struct tipc_event *evt = (void *)msg_data(hdr);
855 u32 instance = evt->found_lower;
856 u32 node = evt->port.node;
857 u32 port = evt->port.ref;
858 int event = evt->event;
859 struct tipc_member *m;
867 self = tipc_own_addr(net);
868 if (!grp->loopback && node == self && port == grp->portid)
871 m = tipc_group_find_member(grp, node, port);
875 /* Send and wait for arrival of JOIN message if necessary */
877 m = tipc_group_create_member(grp, node, port, instance,
881 tipc_group_update_member(m, 0);
882 tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
886 if (m->state != MBR_JOINING)
889 /* Member can be taken into service */
890 m->instance = instance;
891 m->state = MBR_JOINED;
892 tipc_group_open(m, usr_wakeup);
893 tipc_group_update_member(m, 0);
894 tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
895 tipc_group_create_event(grp, m, TIPC_PUBLISHED,
896 m->bc_syncpt, inputq);
902 tipc_group_decr_active(grp, m);
903 m->state = MBR_LEAVING;
904 list_del_init(&m->list);
905 tipc_group_open(m, usr_wakeup);
907 /* Only send event if no LEAVE message can be expected */
908 if (!tipc_node_is_up(net, node))
909 tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
910 m->bc_rcv_nxt, inputq);
915 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
918 int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
920 struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP);
922 if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
924 nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
926 nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT,
928 goto group_msg_cancel;
930 if (grp->scope == TIPC_NODE_SCOPE)
931 if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE))
932 goto group_msg_cancel;
934 if (grp->scope == TIPC_CLUSTER_SCOPE)
935 if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE))
936 goto group_msg_cancel;
939 if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN))
940 goto group_msg_cancel;
942 nla_nest_end(skb, group);
946 nla_nest_cancel(skb, group);