2 * net/tipc/node.c: TIPC node management routines
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
49 #define INVALID_NODE_SIG 0x10000
50 #define NODE_CLEANUP_AFTER 300000
52 /* Flags used to take different actions according to flag type
53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
58 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
59 TIPC_NOTIFY_NODE_UP = (1 << 4),
60 TIPC_NOTIFY_LINK_UP = (1 << 6),
61 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
64 struct tipc_link_entry {
65 struct tipc_link *link;
66 spinlock_t lock; /* per link */
68 struct sk_buff_head inputq;
69 struct tipc_media_addr maddr;
72 struct tipc_bclink_entry {
73 struct tipc_link *link;
74 struct sk_buff_head inputq1;
75 struct sk_buff_head arrvq;
76 struct sk_buff_head inputq2;
77 struct sk_buff_head namedq;
83 * struct tipc_node - TIPC node structure
84 * @addr: network address of node
85 * @ref: reference counter to node object
86 * @lock: rwlock governing access to structure
87 * @net: the applicable net namespace
88 * @hash: links to adjacent nodes in unsorted hash chain
89 * @inputq: pointer to input queue containing messages for msg event
90 * @namedq: pointer to name table input queue with name table messages
91 * @active_links: bearer ids of active links, used as index into links[] array
92 * @links: array containing references to all links to node
93 * @action_flags: bit mask of different types of node actions
94 * @state: connectivity state vs peer node
95 * @preliminary: a preliminary node or not
96 * @sync_point: sequence number where synch/failover is finished
97 * @list: links to adjacent nodes in sorted list of cluster's nodes
98 * @working_links: number of working links to node (both active and standby)
99 * @link_cnt: number of links to node
100 * @capabilities: bitmap, indicating peer node's functional capabilities
101 * @signature: node instance identifier
102 * @link_id: local and remote bearer ids of changing link, if any
103 * @publ_list: list of publications
104 * @rcu: rcu struct for tipc_node
105 * @delete_at: indicates the time for deleting a down node
106 * @crypto_rx: RX crypto handler
113 struct hlist_node hash;
115 struct tipc_link_entry links[MAX_BEARERS];
116 struct tipc_bclink_entry bc_entry;
118 struct list_head list;
129 char peer_id_string[NODE_ID_STR_LEN];
130 struct list_head publ_list;
131 struct list_head conn_sks;
132 unsigned long keepalive_intv;
133 struct timer_list timer;
135 unsigned long delete_at;
136 struct net *peer_net;
138 #ifdef CONFIG_TIPC_CRYPTO
139 struct tipc_crypto *crypto_rx;
143 /* Node FSM states and events:
146 SELF_DOWN_PEER_DOWN = 0xdd,
147 SELF_UP_PEER_UP = 0xaa,
148 SELF_DOWN_PEER_LEAVING = 0xd1,
149 SELF_UP_PEER_COMING = 0xac,
150 SELF_COMING_PEER_UP = 0xca,
151 SELF_LEAVING_PEER_DOWN = 0x1d,
152 NODE_FAILINGOVER = 0xf0,
157 SELF_ESTABL_CONTACT_EVT = 0xece,
158 SELF_LOST_CONTACT_EVT = 0x1ce,
159 PEER_ESTABL_CONTACT_EVT = 0x9ece,
160 PEER_LOST_CONTACT_EVT = 0x91ce,
161 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
162 NODE_FAILOVER_END_EVT = 0xfee,
163 NODE_SYNCH_BEGIN_EVT = 0xcbe,
164 NODE_SYNCH_END_EVT = 0xcee
167 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
168 struct sk_buff_head *xmitq,
169 struct tipc_media_addr **maddr);
170 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
172 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
173 static void tipc_node_delete(struct tipc_node *node);
174 static void tipc_node_timeout(struct timer_list *t);
175 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
176 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
177 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
178 static bool node_is_up(struct tipc_node *n);
179 static void tipc_node_delete_from_list(struct tipc_node *node);
181 struct tipc_sock_conn {
185 struct list_head list;
188 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
190 int bearer_id = n->active_links[sel & 1];
192 if (unlikely(bearer_id == INVALID_BEARER_ID))
195 return n->links[bearer_id].link;
198 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
202 unsigned int mtu = MAX_MSG_SIZE;
204 n = tipc_node_find(net, addr);
208 /* Allow MAX_MSG_SIZE when building connection oriented message
209 * if they are in the same core network
211 if (n->peer_net && connected) {
216 bearer_id = n->active_links[sel & 1];
217 if (likely(bearer_id != INVALID_BEARER_ID))
218 mtu = n->links[bearer_id].mtu;
223 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
225 u8 *own_id = tipc_own_id(net);
231 if (addr == tipc_own_addr(net)) {
232 memcpy(id, own_id, TIPC_NODEID_LEN);
235 n = tipc_node_find(net, addr);
239 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
244 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
249 n = tipc_node_find(net, addr);
251 return TIPC_NODE_CAPABILITIES;
252 caps = n->capabilities;
257 u32 tipc_node_get_addr(struct tipc_node *node)
259 return (node) ? node->addr : 0;
262 char *tipc_node_get_id_str(struct tipc_node *node)
264 return node->peer_id_string;
267 #ifdef CONFIG_TIPC_CRYPTO
269 * tipc_node_crypto_rx - Retrieve crypto RX handle from node
270 * Note: node ref counter must be held first!
272 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
274 return (__n) ? __n->crypto_rx : NULL;
277 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
279 return container_of(pos, struct tipc_node, list)->crypto_rx;
283 static void tipc_node_free(struct rcu_head *rp)
285 struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
287 #ifdef CONFIG_TIPC_CRYPTO
288 tipc_crypto_stop(&n->crypto_rx);
293 static void tipc_node_kref_release(struct kref *kref)
295 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
297 kfree(n->bc_entry.link);
298 call_rcu(&n->rcu, tipc_node_free);
301 void tipc_node_put(struct tipc_node *node)
303 kref_put(&node->kref, tipc_node_kref_release);
306 static void tipc_node_get(struct tipc_node *node)
308 kref_get(&node->kref);
312 * tipc_node_find - locate specified node object, if it exists
314 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
316 struct tipc_net *tn = tipc_net(net);
317 struct tipc_node *node;
318 unsigned int thash = tipc_hashfn(addr);
321 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
322 if (node->addr != addr || node->preliminary)
324 if (!kref_get_unless_zero(&node->kref))
332 /* tipc_node_find_by_id - locate specified node object by its 128-bit id
333 * Note: this function is called only when a discovery request failed
334 * to find the node by its 32-bit id, and is not time critical
336 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
338 struct tipc_net *tn = tipc_net(net);
343 list_for_each_entry_rcu(n, &tn->node_list, list) {
344 read_lock_bh(&n->lock);
345 if (!memcmp(id, n->peer_id, 16) &&
346 kref_get_unless_zero(&n->kref))
348 read_unlock_bh(&n->lock);
353 return found ? n : NULL;
356 static void tipc_node_read_lock(struct tipc_node *n)
358 read_lock_bh(&n->lock);
361 static void tipc_node_read_unlock(struct tipc_node *n)
363 read_unlock_bh(&n->lock);
366 static void tipc_node_write_lock(struct tipc_node *n)
368 write_lock_bh(&n->lock);
371 static void tipc_node_write_unlock_fast(struct tipc_node *n)
373 write_unlock_bh(&n->lock);
376 static void tipc_node_write_unlock(struct tipc_node *n)
378 struct net *net = n->net;
380 u32 flags = n->action_flags;
383 struct list_head *publ_list;
385 if (likely(!flags)) {
386 write_unlock_bh(&n->lock);
391 link_id = n->link_id;
392 bearer_id = link_id & 0xffff;
393 publ_list = &n->publ_list;
395 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
396 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
398 write_unlock_bh(&n->lock);
400 if (flags & TIPC_NOTIFY_NODE_DOWN)
401 tipc_publ_notify(net, publ_list, addr, n->capabilities);
403 if (flags & TIPC_NOTIFY_NODE_UP)
404 tipc_named_node_up(net, addr, n->capabilities);
406 if (flags & TIPC_NOTIFY_LINK_UP) {
407 tipc_mon_peer_up(net, addr, bearer_id);
408 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
409 TIPC_NODE_SCOPE, link_id, link_id);
411 if (flags & TIPC_NOTIFY_LINK_DOWN) {
412 tipc_mon_peer_down(net, addr, bearer_id);
413 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
418 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
420 int net_id = tipc_netid(n->net);
421 struct tipc_net *tn_peer;
428 for_each_net_rcu(tmp) {
429 tn_peer = tipc_net(tmp);
432 /* Integrity checking whether node exists in namespace or not */
433 if (tn_peer->net_id != net_id)
435 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
437 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
438 if (hash_mixes ^ hash_chk)
441 n->peer_hash_mix = hash_mixes;
446 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
447 u16 capabilities, u32 hash_mixes,
450 struct tipc_net *tn = net_generic(net, tipc_net_id);
451 struct tipc_node *n, *temp_node;
457 spin_lock_bh(&tn->node_list_lock);
458 n = tipc_node_find(net, addr) ?:
459 tipc_node_find_by_id(net, peer_id);
465 /* A preliminary node becomes "real" now, refresh its data */
466 tipc_node_write_lock(n);
467 n->preliminary = false;
469 hlist_del_rcu(&n->hash);
470 hlist_add_head_rcu(&n->hash,
471 &tn->node_htable[tipc_hashfn(addr)]);
472 list_del_rcu(&n->list);
473 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
474 if (n->addr < temp_node->addr)
477 list_add_tail_rcu(&n->list, &temp_node->list);
478 tipc_node_write_unlock_fast(n);
481 if (n->peer_hash_mix ^ hash_mixes)
482 tipc_node_assign_peer_net(n, hash_mixes);
483 if (n->capabilities == capabilities)
485 /* Same node may come back with new capabilities */
486 tipc_node_write_lock(n);
487 n->capabilities = capabilities;
488 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
489 l = n->links[bearer_id].link;
491 tipc_link_update_caps(l, capabilities);
493 tipc_node_write_unlock_fast(n);
495 /* Calculate cluster capabilities */
496 tn->capabilities = TIPC_NODE_CAPABILITIES;
497 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
498 tn->capabilities &= temp_node->capabilities;
501 tipc_bcast_toggle_rcast(net,
502 (tn->capabilities & TIPC_BCAST_RCAST));
506 n = kzalloc(sizeof(*n), GFP_ATOMIC);
508 pr_warn("Node creation failed, no memory\n");
511 tipc_nodeid2string(n->peer_id_string, peer_id);
512 #ifdef CONFIG_TIPC_CRYPTO
513 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
514 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
521 n->preliminary = preliminary;
522 memcpy(&n->peer_id, peer_id, 16);
525 n->peer_hash_mix = 0;
526 /* Assign kernel local namespace if exists */
527 tipc_node_assign_peer_net(n, hash_mixes);
528 n->capabilities = capabilities;
530 rwlock_init(&n->lock);
531 INIT_HLIST_NODE(&n->hash);
532 INIT_LIST_HEAD(&n->list);
533 INIT_LIST_HEAD(&n->publ_list);
534 INIT_LIST_HEAD(&n->conn_sks);
535 skb_queue_head_init(&n->bc_entry.namedq);
536 skb_queue_head_init(&n->bc_entry.inputq1);
537 __skb_queue_head_init(&n->bc_entry.arrvq);
538 skb_queue_head_init(&n->bc_entry.inputq2);
539 for (i = 0; i < MAX_BEARERS; i++)
540 spin_lock_init(&n->links[i].lock);
541 n->state = SELF_DOWN_PEER_LEAVING;
542 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
543 n->signature = INVALID_NODE_SIG;
544 n->active_links[0] = INVALID_BEARER_ID;
545 n->active_links[1] = INVALID_BEARER_ID;
546 n->bc_entry.link = NULL;
548 timer_setup(&n->timer, tipc_node_timeout, 0);
549 /* Start a slow timer anyway, crypto needs it */
550 n->keepalive_intv = 10000;
551 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
552 if (!mod_timer(&n->timer, intv))
554 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
555 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
556 if (n->addr < temp_node->addr)
559 list_add_tail_rcu(&n->list, &temp_node->list);
560 /* Calculate cluster capabilities */
561 tn->capabilities = TIPC_NODE_CAPABILITIES;
562 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
563 tn->capabilities &= temp_node->capabilities;
565 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
566 trace_tipc_node_create(n, true, " ");
568 spin_unlock_bh(&tn->node_list_lock);
572 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
574 unsigned long tol = tipc_link_tolerance(l);
575 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
577 /* Link with lowest tolerance determines timer interval */
578 if (intv < n->keepalive_intv)
579 n->keepalive_intv = intv;
581 /* Ensure link's abort limit corresponds to current tolerance */
582 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
585 static void tipc_node_delete_from_list(struct tipc_node *node)
587 list_del_rcu(&node->list);
588 hlist_del_rcu(&node->hash);
592 static void tipc_node_delete(struct tipc_node *node)
594 trace_tipc_node_delete(node, true, " ");
595 tipc_node_delete_from_list(node);
597 del_timer_sync(&node->timer);
601 void tipc_node_stop(struct net *net)
603 struct tipc_net *tn = tipc_net(net);
604 struct tipc_node *node, *t_node;
606 spin_lock_bh(&tn->node_list_lock);
607 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
608 tipc_node_delete(node);
609 spin_unlock_bh(&tn->node_list_lock);
612 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
616 if (in_own_node(net, addr))
619 n = tipc_node_find(net, addr);
621 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
624 tipc_node_write_lock(n);
625 list_add_tail(subscr, &n->publ_list);
626 tipc_node_write_unlock_fast(n);
630 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
634 if (in_own_node(net, addr))
637 n = tipc_node_find(net, addr);
639 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
642 tipc_node_write_lock(n);
643 list_del_init(subscr);
644 tipc_node_write_unlock_fast(n);
648 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
650 struct tipc_node *node;
651 struct tipc_sock_conn *conn;
654 if (in_own_node(net, dnode))
657 node = tipc_node_find(net, dnode);
659 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
660 return -EHOSTUNREACH;
662 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
667 conn->peer_node = dnode;
669 conn->peer_port = peer_port;
671 tipc_node_write_lock(node);
672 list_add_tail(&conn->list, &node->conn_sks);
673 tipc_node_write_unlock(node);
679 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
681 struct tipc_node *node;
682 struct tipc_sock_conn *conn, *safe;
684 if (in_own_node(net, dnode))
687 node = tipc_node_find(net, dnode);
691 tipc_node_write_lock(node);
692 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
693 if (port != conn->port)
695 list_del(&conn->list);
698 tipc_node_write_unlock(node);
702 static void tipc_node_clear_links(struct tipc_node *node)
706 for (i = 0; i < MAX_BEARERS; i++) {
707 struct tipc_link_entry *le = &node->links[i];
717 /* tipc_node_cleanup - delete nodes that does not
718 * have active links for NODE_CLEANUP_AFTER time
720 static bool tipc_node_cleanup(struct tipc_node *peer)
722 struct tipc_node *temp_node;
723 struct tipc_net *tn = tipc_net(peer->net);
724 bool deleted = false;
726 /* If lock held by tipc_node_stop() the node will be deleted anyway */
727 if (!spin_trylock_bh(&tn->node_list_lock))
730 tipc_node_write_lock(peer);
732 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
733 tipc_node_clear_links(peer);
734 tipc_node_delete_from_list(peer);
737 tipc_node_write_unlock(peer);
740 spin_unlock_bh(&tn->node_list_lock);
744 /* Calculate cluster capabilities */
745 tn->capabilities = TIPC_NODE_CAPABILITIES;
746 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
747 tn->capabilities &= temp_node->capabilities;
749 tipc_bcast_toggle_rcast(peer->net,
750 (tn->capabilities & TIPC_BCAST_RCAST));
751 spin_unlock_bh(&tn->node_list_lock);
755 /* tipc_node_timeout - handle expiration of node timer
757 static void tipc_node_timeout(struct timer_list *t)
759 struct tipc_node *n = from_timer(n, t, timer);
760 struct tipc_link_entry *le;
761 struct sk_buff_head xmitq;
762 int remains = n->link_cnt;
766 trace_tipc_node_timeout(n, false, " ");
767 if (!node_is_up(n) && tipc_node_cleanup(n)) {
768 /*Removing the reference of Timer*/
773 #ifdef CONFIG_TIPC_CRYPTO
774 /* Take any crypto key related actions first */
775 tipc_crypto_timeout(n->crypto_rx);
777 __skb_queue_head_init(&xmitq);
779 /* Initial node interval to value larger (10 seconds), then it will be
780 * recalculated with link lowest tolerance
782 tipc_node_read_lock(n);
783 n->keepalive_intv = 10000;
784 tipc_node_read_unlock(n);
785 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
786 tipc_node_read_lock(n);
787 le = &n->links[bearer_id];
789 spin_lock_bh(&le->lock);
790 /* Link tolerance may change asynchronously: */
791 tipc_node_calculate_timer(n, le->link);
792 rc = tipc_link_timeout(le->link, &xmitq);
793 spin_unlock_bh(&le->lock);
796 tipc_node_read_unlock(n);
797 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
798 if (rc & TIPC_LINK_DOWN_EVT)
799 tipc_node_link_down(n, bearer_id, false);
801 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
805 * __tipc_node_link_up - handle addition of link
806 * Node lock must be held by caller
807 * Link becomes active (alone or shared) or standby, depending on its priority.
809 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
810 struct sk_buff_head *xmitq)
812 int *slot0 = &n->active_links[0];
813 int *slot1 = &n->active_links[1];
814 struct tipc_link *ol = node_active_link(n, 0);
815 struct tipc_link *nl = n->links[bearer_id].link;
817 if (!nl || tipc_link_is_up(nl))
820 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
821 if (!tipc_link_is_up(nl))
825 n->action_flags |= TIPC_NOTIFY_LINK_UP;
826 n->link_id = tipc_link_id(nl);
828 /* Leave room for tunnel header when returning 'mtu' to users: */
829 n->links[bearer_id].mtu = tipc_link_mss(nl);
831 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
832 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
834 pr_debug("Established link <%s> on network plane %c\n",
835 tipc_link_name(nl), tipc_link_plane(nl));
836 trace_tipc_node_link_up(n, true, " ");
838 /* Ensure that a STATE message goes first */
839 tipc_link_build_state_msg(nl, xmitq);
841 /* First link? => give it both slots */
845 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
846 n->action_flags |= TIPC_NOTIFY_NODE_UP;
847 tipc_link_set_active(nl, true);
848 tipc_bcast_add_peer(n->net, nl, xmitq);
852 /* Second link => redistribute slots */
853 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
854 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
857 tipc_link_set_active(nl, true);
858 tipc_link_set_active(ol, false);
859 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
860 tipc_link_set_active(nl, true);
863 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
866 /* Prepare synchronization with first link */
867 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
871 * tipc_node_link_up - handle addition of link
873 * Link becomes active (alone or shared) or standby, depending on its priority.
875 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
876 struct sk_buff_head *xmitq)
878 struct tipc_media_addr *maddr;
880 tipc_node_write_lock(n);
881 __tipc_node_link_up(n, bearer_id, xmitq);
882 maddr = &n->links[bearer_id].maddr;
883 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
884 tipc_node_write_unlock(n);
888 * tipc_node_link_failover() - start failover in case "half-failover"
890 * This function is only called in a very special situation where link
891 * failover can be already started on peer node but not on this node.
892 * This can happen when e.g.
893 * 1. Both links <1A-2A>, <1B-2B> down
894 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
895 * disturbance, wrong session, etc.)
897 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
898 * 5. Node 2 starts failover onto link <1B-2B>
900 * ==> Node 1 does never start link/node failover!
902 * @n: tipc node structure
903 * @l: link peer endpoint failingover (- can be NULL)
905 * @xmitq: queue for messages to be xmited on tnl link later
907 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
908 struct tipc_link *tnl,
909 struct sk_buff_head *xmitq)
911 /* Avoid to be "self-failover" that can never end */
912 if (!tipc_link_is_up(tnl))
915 /* Don't rush, failure link may be in the process of resetting */
916 if (l && !tipc_link_is_reset(l))
919 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
920 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
922 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
923 tipc_link_failover_prepare(l, tnl, xmitq);
926 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
927 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
931 * __tipc_node_link_down - handle loss of link
933 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
934 struct sk_buff_head *xmitq,
935 struct tipc_media_addr **maddr)
937 struct tipc_link_entry *le = &n->links[*bearer_id];
938 int *slot0 = &n->active_links[0];
939 int *slot1 = &n->active_links[1];
940 int i, highest = 0, prio;
941 struct tipc_link *l, *_l, *tnl;
943 l = n->links[*bearer_id].link;
944 if (!l || tipc_link_is_reset(l))
948 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
949 n->link_id = tipc_link_id(l);
951 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
953 pr_debug("Lost link <%s> on network plane %c\n",
954 tipc_link_name(l), tipc_link_plane(l));
956 /* Select new active link if any available */
957 *slot0 = INVALID_BEARER_ID;
958 *slot1 = INVALID_BEARER_ID;
959 for (i = 0; i < MAX_BEARERS; i++) {
960 _l = n->links[i].link;
961 if (!_l || !tipc_link_is_up(_l))
965 prio = tipc_link_prio(_l);
968 if (prio > highest) {
977 if (!node_is_up(n)) {
978 if (tipc_link_peer_is_down(l))
979 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
980 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
981 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
982 tipc_link_fsm_evt(l, LINK_RESET_EVT);
984 tipc_link_build_reset_msg(l, xmitq);
985 *maddr = &n->links[*bearer_id].maddr;
986 node_lost_contact(n, &le->inputq);
987 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
990 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
992 /* There is still a working link => initiate failover */
993 *bearer_id = n->active_links[0];
994 tnl = n->links[*bearer_id].link;
995 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
996 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
997 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
998 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
999 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
1001 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1002 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1003 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1004 *maddr = &n->links[*bearer_id].maddr;
1007 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1009 struct tipc_link_entry *le = &n->links[bearer_id];
1010 struct tipc_media_addr *maddr = NULL;
1011 struct tipc_link *l = le->link;
1012 int old_bearer_id = bearer_id;
1013 struct sk_buff_head xmitq;
1018 __skb_queue_head_init(&xmitq);
1020 tipc_node_write_lock(n);
1021 if (!tipc_link_is_establishing(l)) {
1022 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1024 /* Defuse pending tipc_node_link_up() */
1026 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1033 trace_tipc_node_link_down(n, true, "node link down or deleted!");
1034 tipc_node_write_unlock(n);
1036 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1037 if (!skb_queue_empty(&xmitq))
1038 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1039 tipc_sk_rcv(n->net, &le->inputq);
1042 static bool node_is_up(struct tipc_node *n)
1044 return n->active_links[0] != INVALID_BEARER_ID;
1047 bool tipc_node_is_up(struct net *net, u32 addr)
1049 struct tipc_node *n;
1050 bool retval = false;
1052 if (in_own_node(net, addr))
1055 n = tipc_node_find(net, addr);
1058 retval = node_is_up(n);
1063 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1065 struct tipc_node *n;
1067 addr ^= tipc_net(net)->random;
1068 while ((n = tipc_node_find(net, addr))) {
1075 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
1076 * Returns suggested address if any, otherwise 0
1078 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1080 struct tipc_net *tn = tipc_net(net);
1081 struct tipc_node *n;
1085 /* Suggest new address if some other peer is using this one */
1086 n = tipc_node_find(net, addr);
1088 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1093 return tipc_node_suggest_addr(net, addr);
1096 /* Suggest previously used address if peer is known */
1097 n = tipc_node_find_by_id(net, id);
1099 sugg_addr = n->addr;
1100 preliminary = n->preliminary;
1106 /* Even this node may be in conflict */
1107 if (tn->trial_addr == addr)
1108 return tipc_node_suggest_addr(net, addr);
1113 void tipc_node_check_dest(struct net *net, u32 addr,
1114 u8 *peer_id, struct tipc_bearer *b,
1115 u16 capabilities, u32 signature, u32 hash_mixes,
1116 struct tipc_media_addr *maddr,
1117 bool *respond, bool *dupl_addr)
1119 struct tipc_node *n;
1120 struct tipc_link *l, *snd_l;
1121 struct tipc_link_entry *le;
1122 bool addr_match = false;
1123 bool sign_match = false;
1124 bool link_up = false;
1125 bool accept_addr = false;
1134 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1139 tipc_node_write_lock(n);
1140 if (unlikely(!n->bc_entry.link)) {
1141 snd_l = tipc_bc_sndlink(net);
1142 if (!tipc_link_bc_create(net, tipc_own_addr(net),
1143 addr, peer_id, U16_MAX,
1144 tipc_link_min_win(snd_l),
1145 tipc_link_max_win(snd_l),
1147 &n->bc_entry.inputq1,
1148 &n->bc_entry.namedq, snd_l,
1149 &n->bc_entry.link)) {
1150 pr_warn("Broadcast rcv link creation failed, no mem\n");
1151 tipc_node_write_unlock_fast(n);
1157 le = &n->links[b->identity];
1159 /* Prepare to validate requesting node's signature and media address */
1161 link_up = l && tipc_link_is_up(l);
1162 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1163 sign_match = (signature == n->signature);
1165 /* These three flags give us eight permutations: */
1167 if (sign_match && addr_match && link_up) {
1168 /* All is fine. Do nothing. */
1170 /* Peer node is not a container/local namespace */
1171 if (!n->peer_hash_mix)
1172 n->peer_hash_mix = hash_mixes;
1173 } else if (sign_match && addr_match && !link_up) {
1174 /* Respond. The link will come up in due time */
1176 } else if (sign_match && !addr_match && link_up) {
1177 /* Peer has changed i/f address without rebooting.
1178 * If so, the link will reset soon, and the next
1179 * discovery will be accepted. So we can ignore it.
1180 * It may also be an cloned or malicious peer having
1181 * chosen the same node address and signature as an
1183 * Ignore requests until the link goes down, if ever.
1186 } else if (sign_match && !addr_match && !link_up) {
1187 /* Peer link has changed i/f address without rebooting.
1188 * It may also be a cloned or malicious peer; we can't
1189 * distinguish between the two.
1190 * The signature is correct, so we must accept.
1194 } else if (!sign_match && addr_match && link_up) {
1195 /* Peer node rebooted. Two possibilities:
1196 * - Delayed re-discovery; this link endpoint has already
1197 * reset and re-established contact with the peer, before
1198 * receiving a discovery message from that node.
1199 * (The peer happened to receive one from this node first).
1200 * - The peer came back so fast that our side has not
1201 * discovered it yet. Probing from this side will soon
1202 * reset the link, since there can be no working link
1203 * endpoint at the peer end, and the link will re-establish.
1204 * Accept the signature, since it comes from a known peer.
1206 n->signature = signature;
1207 } else if (!sign_match && addr_match && !link_up) {
1208 /* The peer node has rebooted.
1209 * Accept signature, since it is a known peer.
1211 n->signature = signature;
1213 } else if (!sign_match && !addr_match && link_up) {
1214 /* Peer rebooted with new address, or a new/duplicate peer.
1215 * Ignore until the link goes down, if ever.
1218 } else if (!sign_match && !addr_match && !link_up) {
1219 /* Peer rebooted with new address, or it is a new peer.
1220 * Accept signature and address.
1222 n->signature = signature;
1230 /* Now create new link if not already existing */
1232 if (n->link_cnt == 2)
1235 if_name = strchr(b->name, ':') + 1;
1236 get_random_bytes(&session, sizeof(u16));
1237 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1238 b->net_plane, b->mtu, b->priority,
1239 b->min_win, b->max_win, session,
1240 tipc_own_addr(net), addr, peer_id,
1242 tipc_bc_sndlink(n->net), n->bc_entry.link,
1244 &n->bc_entry.namedq, &l)) {
1248 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1250 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1251 if (n->state == NODE_FAILINGOVER)
1252 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1255 tipc_node_calculate_timer(n, l);
1256 if (n->link_cnt == 1) {
1257 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1258 if (!mod_timer(&n->timer, intv))
1262 memcpy(&le->maddr, maddr, sizeof(*maddr));
1264 tipc_node_write_unlock(n);
1265 if (reset && l && !tipc_link_is_reset(l))
1266 tipc_node_link_down(n, b->identity, false);
1270 void tipc_node_delete_links(struct net *net, int bearer_id)
1272 struct tipc_net *tn = net_generic(net, tipc_net_id);
1273 struct tipc_node *n;
1276 list_for_each_entry_rcu(n, &tn->node_list, list) {
1277 tipc_node_link_down(n, bearer_id, true);
1282 static void tipc_node_reset_links(struct tipc_node *n)
1286 pr_warn("Resetting all links to %x\n", n->addr);
1288 trace_tipc_node_reset_links(n, true, " ");
1289 for (i = 0; i < MAX_BEARERS; i++) {
1290 tipc_node_link_down(n, i, false);
1294 /* tipc_node_fsm_evt - node finite state machine
1295 * Determines when contact is allowed with peer node
1297 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1299 int state = n->state;
1302 case SELF_DOWN_PEER_DOWN:
1304 case SELF_ESTABL_CONTACT_EVT:
1305 state = SELF_UP_PEER_COMING;
1307 case PEER_ESTABL_CONTACT_EVT:
1308 state = SELF_COMING_PEER_UP;
1310 case SELF_LOST_CONTACT_EVT:
1311 case PEER_LOST_CONTACT_EVT:
1313 case NODE_SYNCH_END_EVT:
1314 case NODE_SYNCH_BEGIN_EVT:
1315 case NODE_FAILOVER_BEGIN_EVT:
1316 case NODE_FAILOVER_END_EVT:
1321 case SELF_UP_PEER_UP:
1323 case SELF_LOST_CONTACT_EVT:
1324 state = SELF_DOWN_PEER_LEAVING;
1326 case PEER_LOST_CONTACT_EVT:
1327 state = SELF_LEAVING_PEER_DOWN;
1329 case NODE_SYNCH_BEGIN_EVT:
1330 state = NODE_SYNCHING;
1332 case NODE_FAILOVER_BEGIN_EVT:
1333 state = NODE_FAILINGOVER;
1335 case SELF_ESTABL_CONTACT_EVT:
1336 case PEER_ESTABL_CONTACT_EVT:
1337 case NODE_SYNCH_END_EVT:
1338 case NODE_FAILOVER_END_EVT:
1344 case SELF_DOWN_PEER_LEAVING:
1346 case PEER_LOST_CONTACT_EVT:
1347 state = SELF_DOWN_PEER_DOWN;
1349 case SELF_ESTABL_CONTACT_EVT:
1350 case PEER_ESTABL_CONTACT_EVT:
1351 case SELF_LOST_CONTACT_EVT:
1353 case NODE_SYNCH_END_EVT:
1354 case NODE_SYNCH_BEGIN_EVT:
1355 case NODE_FAILOVER_BEGIN_EVT:
1356 case NODE_FAILOVER_END_EVT:
1361 case SELF_UP_PEER_COMING:
1363 case PEER_ESTABL_CONTACT_EVT:
1364 state = SELF_UP_PEER_UP;
1366 case SELF_LOST_CONTACT_EVT:
1367 state = SELF_DOWN_PEER_DOWN;
1369 case SELF_ESTABL_CONTACT_EVT:
1370 case PEER_LOST_CONTACT_EVT:
1371 case NODE_SYNCH_END_EVT:
1372 case NODE_FAILOVER_BEGIN_EVT:
1374 case NODE_SYNCH_BEGIN_EVT:
1375 case NODE_FAILOVER_END_EVT:
1380 case SELF_COMING_PEER_UP:
1382 case SELF_ESTABL_CONTACT_EVT:
1383 state = SELF_UP_PEER_UP;
1385 case PEER_LOST_CONTACT_EVT:
1386 state = SELF_DOWN_PEER_DOWN;
1388 case SELF_LOST_CONTACT_EVT:
1389 case PEER_ESTABL_CONTACT_EVT:
1391 case NODE_SYNCH_END_EVT:
1392 case NODE_SYNCH_BEGIN_EVT:
1393 case NODE_FAILOVER_BEGIN_EVT:
1394 case NODE_FAILOVER_END_EVT:
1399 case SELF_LEAVING_PEER_DOWN:
1401 case SELF_LOST_CONTACT_EVT:
1402 state = SELF_DOWN_PEER_DOWN;
1404 case SELF_ESTABL_CONTACT_EVT:
1405 case PEER_ESTABL_CONTACT_EVT:
1406 case PEER_LOST_CONTACT_EVT:
1408 case NODE_SYNCH_END_EVT:
1409 case NODE_SYNCH_BEGIN_EVT:
1410 case NODE_FAILOVER_BEGIN_EVT:
1411 case NODE_FAILOVER_END_EVT:
1416 case NODE_FAILINGOVER:
1418 case SELF_LOST_CONTACT_EVT:
1419 state = SELF_DOWN_PEER_LEAVING;
1421 case PEER_LOST_CONTACT_EVT:
1422 state = SELF_LEAVING_PEER_DOWN;
1424 case NODE_FAILOVER_END_EVT:
1425 state = SELF_UP_PEER_UP;
1427 case NODE_FAILOVER_BEGIN_EVT:
1428 case SELF_ESTABL_CONTACT_EVT:
1429 case PEER_ESTABL_CONTACT_EVT:
1431 case NODE_SYNCH_BEGIN_EVT:
1432 case NODE_SYNCH_END_EVT:
1439 case SELF_LOST_CONTACT_EVT:
1440 state = SELF_DOWN_PEER_LEAVING;
1442 case PEER_LOST_CONTACT_EVT:
1443 state = SELF_LEAVING_PEER_DOWN;
1445 case NODE_SYNCH_END_EVT:
1446 state = SELF_UP_PEER_UP;
1448 case NODE_FAILOVER_BEGIN_EVT:
1449 state = NODE_FAILINGOVER;
1451 case NODE_SYNCH_BEGIN_EVT:
1452 case SELF_ESTABL_CONTACT_EVT:
1453 case PEER_ESTABL_CONTACT_EVT:
1455 case NODE_FAILOVER_END_EVT:
1461 pr_err("Unknown node fsm state %x\n", state);
1464 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1469 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1470 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1473 static void node_lost_contact(struct tipc_node *n,
1474 struct sk_buff_head *inputq)
1476 struct tipc_sock_conn *conn, *safe;
1477 struct tipc_link *l;
1478 struct list_head *conns = &n->conn_sks;
1479 struct sk_buff *skb;
1482 pr_debug("Lost contact with %x\n", n->addr);
1483 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1484 trace_tipc_node_lost_contact(n, true, " ");
1486 /* Clean up broadcast state */
1487 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1488 __skb_queue_purge(&n->bc_entry.namedq);
1490 /* Abort any ongoing link failover */
1491 for (i = 0; i < MAX_BEARERS; i++) {
1492 l = n->links[i].link;
1494 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1497 /* Notify publications from this node */
1498 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1500 n->peer_hash_mix = 0;
1501 /* Notify sockets connected to node */
1502 list_for_each_entry_safe(conn, safe, conns, list) {
1503 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1504 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1505 conn->peer_node, conn->port,
1506 conn->peer_port, TIPC_ERR_NO_NODE);
1508 skb_queue_tail(inputq, skb);
1509 list_del(&conn->list);
1515 * tipc_node_get_linkname - get the name of a link
1517 * @bearer_id: id of the bearer
1518 * @addr: peer node address
1519 * @linkname: link name output buffer
1521 * Returns 0 on success
1523 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1524 char *linkname, size_t len)
1526 struct tipc_link *link;
1528 struct tipc_node *node = tipc_node_find(net, addr);
1533 if (bearer_id >= MAX_BEARERS)
1536 tipc_node_read_lock(node);
1537 link = node->links[bearer_id].link;
1539 strncpy(linkname, tipc_link_name(link), len);
1542 tipc_node_read_unlock(node);
1544 tipc_node_put(node);
1548 /* Caller should hold node lock for the passed node */
1549 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1552 struct nlattr *attrs;
1554 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1555 NLM_F_MULTI, TIPC_NL_NODE_GET);
1559 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1563 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1565 if (node_is_up(node))
1566 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1569 nla_nest_end(msg->skb, attrs);
1570 genlmsg_end(msg->skb, hdr);
1575 nla_nest_cancel(msg->skb, attrs);
1577 genlmsg_cancel(msg->skb, hdr);
1582 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1584 struct tipc_msg *hdr = buf_msg(skb_peek(list));
1585 struct sk_buff_head inputq;
1587 switch (msg_user(hdr)) {
1588 case TIPC_LOW_IMPORTANCE:
1589 case TIPC_MEDIUM_IMPORTANCE:
1590 case TIPC_HIGH_IMPORTANCE:
1591 case TIPC_CRITICAL_IMPORTANCE:
1592 if (msg_connected(hdr) || msg_named(hdr) ||
1594 tipc_loopback_trace(peer_net, list);
1595 spin_lock_init(&list->lock);
1596 tipc_sk_rcv(peer_net, list);
1599 if (msg_mcast(hdr)) {
1600 tipc_loopback_trace(peer_net, list);
1601 skb_queue_head_init(&inputq);
1602 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1603 __skb_queue_purge(list);
1604 skb_queue_purge(&inputq);
1608 case MSG_FRAGMENTER:
1609 if (tipc_msg_assemble(list)) {
1610 tipc_loopback_trace(peer_net, list);
1611 skb_queue_head_init(&inputq);
1612 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1613 __skb_queue_purge(list);
1614 skb_queue_purge(&inputq);
1617 case GROUP_PROTOCOL:
1619 tipc_loopback_trace(peer_net, list);
1620 spin_lock_init(&list->lock);
1621 tipc_sk_rcv(peer_net, list);
1624 case NAME_DISTRIBUTOR:
1625 case TUNNEL_PROTOCOL:
1626 case BCAST_PROTOCOL:
1634 * tipc_node_xmit() is the general link level function for message sending
1635 * @net: the applicable net namespace
1636 * @list: chain of buffers containing message
1637 * @dnode: address of destination node
1638 * @selector: a number used for deterministic link selection
1639 * Consumes the buffer chain.
1640 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1642 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1643 u32 dnode, int selector)
1645 struct tipc_link_entry *le = NULL;
1646 struct tipc_node *n;
1647 struct sk_buff_head xmitq;
1648 bool node_up = false;
1652 if (in_own_node(net, dnode)) {
1653 tipc_loopback_trace(net, list);
1654 spin_lock_init(&list->lock);
1655 tipc_sk_rcv(net, list);
1659 n = tipc_node_find(net, dnode);
1661 __skb_queue_purge(list);
1662 return -EHOSTUNREACH;
1665 tipc_node_read_lock(n);
1666 node_up = node_is_up(n);
1667 if (node_up && n->peer_net && check_net(n->peer_net)) {
1668 /* xmit inner linux container */
1669 tipc_lxc_xmit(n->peer_net, list);
1670 if (likely(skb_queue_empty(list))) {
1671 tipc_node_read_unlock(n);
1677 bearer_id = n->active_links[selector & 1];
1678 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1679 tipc_node_read_unlock(n);
1681 __skb_queue_purge(list);
1682 return -EHOSTUNREACH;
1685 __skb_queue_head_init(&xmitq);
1686 le = &n->links[bearer_id];
1687 spin_lock_bh(&le->lock);
1688 rc = tipc_link_xmit(le->link, list, &xmitq);
1689 spin_unlock_bh(&le->lock);
1690 tipc_node_read_unlock(n);
1692 if (unlikely(rc == -ENOBUFS))
1693 tipc_node_link_down(n, bearer_id, false);
1695 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1702 /* tipc_node_xmit_skb(): send single buffer to destination
1703 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
1704 * messages, which will not be rejected
1705 * The only exception is datagram messages rerouted after secondary
1706 * lookup, which are rare and safe to dispose of anyway.
1708 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1711 struct sk_buff_head head;
1713 __skb_queue_head_init(&head);
1714 __skb_queue_tail(&head, skb);
1715 tipc_node_xmit(net, &head, dnode, selector);
1719 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1720 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1722 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1724 struct sk_buff *skb;
1725 u32 selector, dnode;
1727 while ((skb = __skb_dequeue(xmitq))) {
1728 selector = msg_origport(buf_msg(skb));
1729 dnode = msg_destnode(buf_msg(skb));
1730 tipc_node_xmit_skb(net, skb, dnode, selector);
1735 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
1737 struct sk_buff_head xmitq;
1738 struct sk_buff *txskb;
1739 struct tipc_node *n;
1743 /* Use broadcast if all nodes support it */
1744 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
1745 __skb_queue_head_init(&xmitq);
1746 __skb_queue_tail(&xmitq, skb);
1747 tipc_bcast_xmit(net, &xmitq, &dummy);
1751 /* Otherwise use legacy replicast method */
1753 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1755 if (in_own_node(net, dst))
1759 txskb = pskb_copy(skb, GFP_ATOMIC);
1762 msg_set_destnode(buf_msg(txskb), dst);
1763 tipc_node_xmit_skb(net, txskb, dst, 0);
1769 static void tipc_node_mcast_rcv(struct tipc_node *n)
1771 struct tipc_bclink_entry *be = &n->bc_entry;
1773 /* 'arrvq' is under inputq2's lock protection */
1774 spin_lock_bh(&be->inputq2.lock);
1775 spin_lock_bh(&be->inputq1.lock);
1776 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1777 spin_unlock_bh(&be->inputq1.lock);
1778 spin_unlock_bh(&be->inputq2.lock);
1779 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1782 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1783 int bearer_id, struct sk_buff_head *xmitq)
1785 struct tipc_link *ucl;
1788 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1790 if (rc & TIPC_LINK_DOWN_EVT) {
1791 tipc_node_reset_links(n);
1795 if (!(rc & TIPC_LINK_SND_STATE))
1798 /* If probe message, a STATE response will be sent anyway */
1802 /* Produce a STATE message carrying broadcast NACK */
1803 tipc_node_read_lock(n);
1804 ucl = n->links[bearer_id].link;
1806 tipc_link_build_state_msg(ucl, xmitq);
1807 tipc_node_read_unlock(n);
1811 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1812 * @net: the applicable net namespace
1814 * @bearer_id: id of bearer message arrived on
1816 * Invoked with no locks held.
1818 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1821 struct sk_buff_head xmitq;
1822 struct tipc_bclink_entry *be;
1823 struct tipc_link_entry *le;
1824 struct tipc_msg *hdr = buf_msg(skb);
1825 int usr = msg_user(hdr);
1826 u32 dnode = msg_destnode(hdr);
1827 struct tipc_node *n;
1829 __skb_queue_head_init(&xmitq);
1831 /* If NACK for other node, let rcv link for that node peek into it */
1832 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1833 n = tipc_node_find(net, dnode);
1835 n = tipc_node_find(net, msg_prevnode(hdr));
1841 le = &n->links[bearer_id];
1843 rc = tipc_bcast_rcv(net, be->link, skb);
1845 /* Broadcast ACKs are sent on a unicast link */
1846 if (rc & TIPC_LINK_SND_STATE) {
1847 tipc_node_read_lock(n);
1848 tipc_link_build_state_msg(le->link, &xmitq);
1849 tipc_node_read_unlock(n);
1852 if (!skb_queue_empty(&xmitq))
1853 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1855 if (!skb_queue_empty(&be->inputq1))
1856 tipc_node_mcast_rcv(n);
1858 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1859 if (!skb_queue_empty(&n->bc_entry.namedq))
1860 tipc_named_rcv(net, &n->bc_entry.namedq,
1861 &n->bc_entry.named_rcv_nxt,
1862 &n->bc_entry.named_open);
1864 /* If reassembly or retransmission failure => reset all links to peer */
1865 if (rc & TIPC_LINK_DOWN_EVT)
1866 tipc_node_reset_links(n);
1872 * tipc_node_check_state - check and if necessary update node state
1874 * @bearer_id: identity of bearer delivering the packet
1875 * Returns true if state and msg are ok, otherwise false
1877 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1878 int bearer_id, struct sk_buff_head *xmitq)
1880 struct tipc_msg *hdr = buf_msg(skb);
1881 int usr = msg_user(hdr);
1882 int mtyp = msg_type(hdr);
1883 u16 oseqno = msg_seqno(hdr);
1884 u16 exp_pkts = msg_msgcnt(hdr);
1885 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1886 int state = n->state;
1887 struct tipc_link *l, *tnl, *pl = NULL;
1888 struct tipc_media_addr *maddr;
1891 if (trace_tipc_node_check_state_enabled()) {
1892 trace_tipc_skb_dump(skb, false, "skb for node state check");
1893 trace_tipc_node_check_state(n, true, " ");
1895 l = n->links[bearer_id].link;
1898 rcv_nxt = tipc_link_rcv_nxt(l);
1901 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1904 /* Find parallel link, if any */
1905 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1906 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1907 pl = n->links[pb_id].link;
1912 if (!tipc_link_validate_msg(l, hdr)) {
1913 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1914 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1918 /* Check and update node accesibility if applicable */
1919 if (state == SELF_UP_PEER_COMING) {
1920 if (!tipc_link_is_up(l))
1922 if (!msg_peer_link_is_up(hdr))
1924 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1927 if (state == SELF_DOWN_PEER_LEAVING) {
1928 if (msg_peer_node_is_up(hdr))
1930 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1934 if (state == SELF_LEAVING_PEER_DOWN)
1937 /* Ignore duplicate packets */
1938 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1941 /* Initiate or update failover mode if applicable */
1942 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1943 syncpt = oseqno + exp_pkts - 1;
1944 if (pl && !tipc_link_is_reset(pl)) {
1945 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1946 trace_tipc_node_link_down(n, true,
1947 "node link down <- failover!");
1948 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1949 tipc_link_inputq(l));
1952 /* If parallel link was already down, and this happened before
1953 * the tunnel link came up, node failover was never started.
1954 * Ensure that a FAILOVER_MSG is sent to get peer out of
1955 * NODE_FAILINGOVER state, also this node must accept
1956 * TUNNEL_MSGs from peer.
1958 if (n->state != NODE_FAILINGOVER)
1959 tipc_node_link_failover(n, pl, l, xmitq);
1961 /* If pkts arrive out of order, use lowest calculated syncpt */
1962 if (less(syncpt, n->sync_point))
1963 n->sync_point = syncpt;
1966 /* Open parallel link when tunnel link reaches synch point */
1967 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
1968 if (!more(rcv_nxt, n->sync_point))
1970 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
1972 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
1976 /* No synching needed if only one link */
1977 if (!pl || !tipc_link_is_up(pl))
1980 /* Initiate synch mode if applicable */
1981 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
1982 if (n->capabilities & TIPC_TUNNEL_ENHANCED)
1983 syncpt = msg_syncpt(hdr);
1985 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
1986 if (!tipc_link_is_up(l))
1987 __tipc_node_link_up(n, bearer_id, xmitq);
1988 if (n->state == SELF_UP_PEER_UP) {
1989 n->sync_point = syncpt;
1990 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
1991 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
1995 /* Open tunnel link when parallel link reaches synch point */
1996 if (n->state == NODE_SYNCHING) {
1997 if (tipc_link_is_synching(l)) {
2003 inputq_len = skb_queue_len(tipc_link_inputq(pl));
2004 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
2005 if (more(dlv_nxt, n->sync_point)) {
2006 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
2007 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
2012 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
2014 if (usr == LINK_PROTOCOL)
2022 * tipc_rcv - process TIPC packets/messages arriving from off-node
2023 * @net: the applicable net namespace
2025 * @b: pointer to bearer message arrived on
2027 * Invoked with no locks held. Bearer pointer must point to a valid bearer
2028 * structure (i.e. cannot be NULL), but bearer can be inactive.
2030 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2032 struct sk_buff_head xmitq;
2033 struct tipc_link_entry *le;
2034 struct tipc_msg *hdr;
2035 struct tipc_node *n;
2036 int bearer_id = b->identity;
2037 u32 self = tipc_own_addr(net);
2040 #ifdef CONFIG_TIPC_CRYPTO
2041 struct tipc_ehdr *ehdr;
2043 /* Check if message must be decrypted first */
2044 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2047 ehdr = (struct tipc_ehdr *)skb->data;
2048 if (likely(ehdr->user != LINK_CONFIG)) {
2049 n = tipc_node_find(net, ntohl(ehdr->addr));
2053 n = tipc_node_find_by_id(net, ehdr->id);
2055 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2061 /* Ensure message is well-formed before touching the header */
2062 if (unlikely(!tipc_msg_validate(&skb)))
2064 __skb_queue_head_init(&xmitq);
2066 usr = msg_user(hdr);
2067 bc_ack = msg_bcast_ack(hdr);
2069 /* Handle arrival of discovery or broadcast packet */
2070 if (unlikely(msg_non_seq(hdr))) {
2071 if (unlikely(usr == LINK_CONFIG))
2072 return tipc_disc_rcv(net, skb, b);
2074 return tipc_node_bc_rcv(net, skb, bearer_id);
2077 /* Discard unicast link messages destined for another node */
2078 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2081 /* Locate neighboring node that sent packet */
2082 n = tipc_node_find(net, msg_prevnode(hdr));
2085 le = &n->links[bearer_id];
2087 /* Ensure broadcast reception is in synch with peer's send state */
2088 if (unlikely(usr == LINK_PROTOCOL)) {
2089 if (unlikely(skb_linearize(skb))) {
2094 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2095 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
2096 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2099 /* Receive packet directly if conditions permit */
2100 tipc_node_read_lock(n);
2101 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2102 spin_lock_bh(&le->lock);
2104 rc = tipc_link_rcv(le->link, skb, &xmitq);
2107 spin_unlock_bh(&le->lock);
2109 tipc_node_read_unlock(n);
2111 /* Check/update node state before receiving */
2112 if (unlikely(skb)) {
2113 if (unlikely(skb_linearize(skb)))
2115 tipc_node_write_lock(n);
2116 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2118 rc = tipc_link_rcv(le->link, skb, &xmitq);
2122 tipc_node_write_unlock(n);
2125 if (unlikely(rc & TIPC_LINK_UP_EVT))
2126 tipc_node_link_up(n, bearer_id, &xmitq);
2128 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2129 tipc_node_link_down(n, bearer_id, false);
2131 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2132 tipc_named_rcv(net, &n->bc_entry.namedq,
2133 &n->bc_entry.named_rcv_nxt,
2134 &n->bc_entry.named_open);
2136 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2137 tipc_node_mcast_rcv(n);
2139 if (!skb_queue_empty(&le->inputq))
2140 tipc_sk_rcv(net, &le->inputq);
2142 if (!skb_queue_empty(&xmitq))
2143 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2151 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2154 struct tipc_net *tn = tipc_net(net);
2155 int bearer_id = b->identity;
2156 struct sk_buff_head xmitq;
2157 struct tipc_link_entry *e;
2158 struct tipc_node *n;
2160 __skb_queue_head_init(&xmitq);
2164 list_for_each_entry_rcu(n, &tn->node_list, list) {
2165 tipc_node_write_lock(n);
2166 e = &n->links[bearer_id];
2168 if (prop == TIPC_NLA_PROP_TOL)
2169 tipc_link_set_tolerance(e->link, b->tolerance,
2171 else if (prop == TIPC_NLA_PROP_MTU)
2172 tipc_link_set_mtu(e->link, b->mtu);
2174 tipc_node_write_unlock(n);
2175 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2181 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2183 struct net *net = sock_net(skb->sk);
2184 struct tipc_net *tn = net_generic(net, tipc_net_id);
2185 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2186 struct tipc_node *peer, *temp_node;
2190 /* We identify the peer by its net */
2191 if (!info->attrs[TIPC_NLA_NET])
2194 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2195 info->attrs[TIPC_NLA_NET],
2196 tipc_nl_net_policy, info->extack);
2200 if (!attrs[TIPC_NLA_NET_ADDR])
2203 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2205 if (in_own_node(net, addr))
2208 spin_lock_bh(&tn->node_list_lock);
2209 peer = tipc_node_find(net, addr);
2211 spin_unlock_bh(&tn->node_list_lock);
2215 tipc_node_write_lock(peer);
2216 if (peer->state != SELF_DOWN_PEER_DOWN &&
2217 peer->state != SELF_DOWN_PEER_LEAVING) {
2218 tipc_node_write_unlock(peer);
2223 tipc_node_clear_links(peer);
2224 tipc_node_write_unlock(peer);
2225 tipc_node_delete(peer);
2227 /* Calculate cluster capabilities */
2228 tn->capabilities = TIPC_NODE_CAPABILITIES;
2229 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2230 tn->capabilities &= temp_node->capabilities;
2232 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2235 tipc_node_put(peer);
2236 spin_unlock_bh(&tn->node_list_lock);
2241 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2244 struct net *net = sock_net(skb->sk);
2245 struct tipc_net *tn = net_generic(net, tipc_net_id);
2246 int done = cb->args[0];
2247 int last_addr = cb->args[1];
2248 struct tipc_node *node;
2249 struct tipc_nl_msg msg;
2255 msg.portid = NETLINK_CB(cb->skb).portid;
2256 msg.seq = cb->nlh->nlmsg_seq;
2260 node = tipc_node_find(net, last_addr);
2263 /* We never set seq or call nl_dump_check_consistent()
2264 * this means that setting prev_seq here will cause the
2265 * consistence check to fail in the netlink callback
2266 * handler. Resulting in the NLMSG_DONE message having
2267 * the NLM_F_DUMP_INTR flag set if the node state
2268 * changed while we released the lock.
2273 tipc_node_put(node);
2276 list_for_each_entry_rcu(node, &tn->node_list, list) {
2277 if (node->preliminary)
2280 if (node->addr == last_addr)
2286 tipc_node_read_lock(node);
2287 err = __tipc_nl_add_node(&msg, node);
2289 last_addr = node->addr;
2290 tipc_node_read_unlock(node);
2294 tipc_node_read_unlock(node);
2299 cb->args[1] = last_addr;
2305 /* tipc_node_find_by_name - locate owner node of link by link's name
2306 * @net: the applicable net namespace
2307 * @name: pointer to link name string
2308 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2310 * Returns pointer to node owning the link, or 0 if no matching link is found.
2312 static struct tipc_node *tipc_node_find_by_name(struct net *net,
2313 const char *link_name,
2314 unsigned int *bearer_id)
2316 struct tipc_net *tn = net_generic(net, tipc_net_id);
2317 struct tipc_link *l;
2318 struct tipc_node *n;
2319 struct tipc_node *found_node = NULL;
2324 list_for_each_entry_rcu(n, &tn->node_list, list) {
2325 tipc_node_read_lock(n);
2326 for (i = 0; i < MAX_BEARERS; i++) {
2327 l = n->links[i].link;
2328 if (l && !strcmp(tipc_link_name(l), link_name)) {
2334 tipc_node_read_unlock(n);
2343 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2349 struct tipc_link *link;
2350 struct tipc_node *node;
2351 struct sk_buff_head xmitq;
2352 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2353 struct net *net = sock_net(skb->sk);
2355 __skb_queue_head_init(&xmitq);
2357 if (!info->attrs[TIPC_NLA_LINK])
2360 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2361 info->attrs[TIPC_NLA_LINK],
2362 tipc_nl_link_policy, info->extack);
2366 if (!attrs[TIPC_NLA_LINK_NAME])
2369 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2371 if (strcmp(name, tipc_bclink_name) == 0)
2372 return tipc_nl_bc_link_set(net, attrs);
2374 node = tipc_node_find_by_name(net, name, &bearer_id);
2378 tipc_node_read_lock(node);
2380 link = node->links[bearer_id].link;
2386 if (attrs[TIPC_NLA_LINK_PROP]) {
2387 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2389 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
2395 if (props[TIPC_NLA_PROP_TOL]) {
2398 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2399 tipc_link_set_tolerance(link, tol, &xmitq);
2401 if (props[TIPC_NLA_PROP_PRIO]) {
2404 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2405 tipc_link_set_prio(link, prio, &xmitq);
2407 if (props[TIPC_NLA_PROP_WIN]) {
2410 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2411 tipc_link_set_queue_limits(link,
2412 tipc_link_min_win(link),
2418 tipc_node_read_unlock(node);
2419 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2424 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2426 struct net *net = genl_info_net(info);
2427 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2428 struct tipc_nl_msg msg;
2432 msg.portid = info->snd_portid;
2433 msg.seq = info->snd_seq;
2435 if (!info->attrs[TIPC_NLA_LINK])
2438 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2439 info->attrs[TIPC_NLA_LINK],
2440 tipc_nl_link_policy, info->extack);
2444 if (!attrs[TIPC_NLA_LINK_NAME])
2447 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2449 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2453 if (strcmp(name, tipc_bclink_name) == 0) {
2454 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
2459 struct tipc_node *node;
2460 struct tipc_link *link;
2462 node = tipc_node_find_by_name(net, name, &bearer_id);
2468 tipc_node_read_lock(node);
2469 link = node->links[bearer_id].link;
2471 tipc_node_read_unlock(node);
2476 err = __tipc_nl_add_link(net, &msg, link, 0);
2477 tipc_node_read_unlock(node);
2482 return genlmsg_reply(msg.skb, info);
2485 nlmsg_free(msg.skb);
2489 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2493 unsigned int bearer_id;
2494 struct tipc_link *link;
2495 struct tipc_node *node;
2496 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2497 struct net *net = sock_net(skb->sk);
2498 struct tipc_net *tn = tipc_net(net);
2499 struct tipc_link_entry *le;
2501 if (!info->attrs[TIPC_NLA_LINK])
2504 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2505 info->attrs[TIPC_NLA_LINK],
2506 tipc_nl_link_policy, info->extack);
2510 if (!attrs[TIPC_NLA_LINK_NAME])
2513 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2516 if (!strcmp(link_name, tipc_bclink_name)) {
2517 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
2521 } else if (strstr(link_name, tipc_bclink_name)) {
2523 list_for_each_entry_rcu(node, &tn->node_list, list) {
2524 tipc_node_read_lock(node);
2525 link = node->bc_entry.link;
2526 if (link && !strcmp(link_name, tipc_link_name(link))) {
2527 err = tipc_bclink_reset_stats(net, link);
2528 tipc_node_read_unlock(node);
2531 tipc_node_read_unlock(node);
2537 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2541 le = &node->links[bearer_id];
2542 tipc_node_read_lock(node);
2543 spin_lock_bh(&le->lock);
2544 link = node->links[bearer_id].link;
2546 spin_unlock_bh(&le->lock);
2547 tipc_node_read_unlock(node);
2550 tipc_link_reset_stats(link);
2551 spin_unlock_bh(&le->lock);
2552 tipc_node_read_unlock(node);
2556 /* Caller should hold node lock */
2557 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2558 struct tipc_node *node, u32 *prev_link,
2564 for (i = *prev_link; i < MAX_BEARERS; i++) {
2567 if (!node->links[i].link)
2570 err = __tipc_nl_add_link(net, msg,
2571 node->links[i].link, NLM_F_MULTI);
2578 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
2588 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2590 struct net *net = sock_net(skb->sk);
2591 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2592 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
2593 struct tipc_net *tn = net_generic(net, tipc_net_id);
2594 struct tipc_node *node;
2595 struct tipc_nl_msg msg;
2596 u32 prev_node = cb->args[0];
2597 u32 prev_link = cb->args[1];
2598 int done = cb->args[2];
2599 bool bc_link = cb->args[3];
2606 /* Check if broadcast-receiver links dumping is needed */
2607 if (attrs && attrs[TIPC_NLA_LINK]) {
2608 err = nla_parse_nested_deprecated(link,
2610 attrs[TIPC_NLA_LINK],
2611 tipc_nl_link_policy,
2615 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
2622 msg.portid = NETLINK_CB(cb->skb).portid;
2623 msg.seq = cb->nlh->nlmsg_seq;
2627 node = tipc_node_find(net, prev_node);
2629 /* We never set seq or call nl_dump_check_consistent()
2630 * this means that setting prev_seq here will cause the
2631 * consistence check to fail in the netlink callback
2632 * handler. Resulting in the last NLMSG_DONE message
2633 * having the NLM_F_DUMP_INTR flag set.
2638 tipc_node_put(node);
2640 list_for_each_entry_continue_rcu(node, &tn->node_list,
2642 tipc_node_read_lock(node);
2643 err = __tipc_nl_add_node_links(net, &msg, node,
2644 &prev_link, bc_link);
2645 tipc_node_read_unlock(node);
2649 prev_node = node->addr;
2652 err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
2656 list_for_each_entry_rcu(node, &tn->node_list, list) {
2657 tipc_node_read_lock(node);
2658 err = __tipc_nl_add_node_links(net, &msg, node,
2659 &prev_link, bc_link);
2660 tipc_node_read_unlock(node);
2664 prev_node = node->addr;
2671 cb->args[0] = prev_node;
2672 cb->args[1] = prev_link;
2674 cb->args[3] = bc_link;
2679 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2681 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2682 struct net *net = sock_net(skb->sk);
2685 if (!info->attrs[TIPC_NLA_MON])
2688 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2689 info->attrs[TIPC_NLA_MON],
2690 tipc_nl_monitor_policy,
2695 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2698 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2699 err = tipc_nl_monitor_set_threshold(net, val);
2707 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2709 struct nlattr *attrs;
2713 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2714 0, TIPC_NL_MON_GET);
2718 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2722 val = tipc_nl_monitor_get_threshold(net);
2724 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2727 nla_nest_end(msg->skb, attrs);
2728 genlmsg_end(msg->skb, hdr);
2733 nla_nest_cancel(msg->skb, attrs);
2735 genlmsg_cancel(msg->skb, hdr);
2740 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2742 struct net *net = sock_net(skb->sk);
2743 struct tipc_nl_msg msg;
2746 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2749 msg.portid = info->snd_portid;
2750 msg.seq = info->snd_seq;
2752 err = __tipc_nl_add_monitor_prop(net, &msg);
2754 nlmsg_free(msg.skb);
2758 return genlmsg_reply(msg.skb, info);
2761 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2763 struct net *net = sock_net(skb->sk);
2764 u32 prev_bearer = cb->args[0];
2765 struct tipc_nl_msg msg;
2769 if (prev_bearer == MAX_BEARERS)
2773 msg.portid = NETLINK_CB(cb->skb).portid;
2774 msg.seq = cb->nlh->nlmsg_seq;
2777 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2778 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2783 cb->args[0] = bearer_id;
2788 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2789 struct netlink_callback *cb)
2791 struct net *net = sock_net(skb->sk);
2792 u32 prev_node = cb->args[1];
2793 u32 bearer_id = cb->args[2];
2794 int done = cb->args[0];
2795 struct tipc_nl_msg msg;
2799 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2800 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2802 if (!attrs[TIPC_NLA_MON])
2805 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2806 attrs[TIPC_NLA_MON],
2807 tipc_nl_monitor_policy,
2812 if (!mon[TIPC_NLA_MON_REF])
2815 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2817 if (bearer_id >= MAX_BEARERS)
2825 msg.portid = NETLINK_CB(cb->skb).portid;
2826 msg.seq = cb->nlh->nlmsg_seq;
2829 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2835 cb->args[1] = prev_node;
2836 cb->args[2] = bearer_id;
2841 #ifdef CONFIG_TIPC_CRYPTO
2842 static int tipc_nl_retrieve_key(struct nlattr **attrs,
2843 struct tipc_aead_key **key)
2845 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2850 *key = (struct tipc_aead_key *)nla_data(attr);
2851 if (nla_len(attr) < tipc_aead_key_size(*key))
2857 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2859 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2864 if (nla_len(attr) < TIPC_NODEID_LEN)
2867 *node_id = (u8 *)nla_data(attr);
2871 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2873 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2874 struct net *net = sock_net(skb->sk);
2875 struct tipc_net *tn = tipc_net(net);
2876 struct tipc_node *n = NULL;
2877 struct tipc_aead_key *ukey;
2878 struct tipc_crypto *c;
2882 if (!info->attrs[TIPC_NLA_NODE])
2885 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2886 info->attrs[TIPC_NLA_NODE],
2887 tipc_nl_node_policy, info->extack);
2891 own_id = tipc_own_id(net);
2897 rc = tipc_nl_retrieve_key(attrs, &ukey);
2901 rc = tipc_aead_key_validate(ukey);
2905 rc = tipc_nl_retrieve_nodeid(attrs, &id);
2908 /* Cluster key mode */
2909 rc = tipc_crypto_key_init(tn->crypto_tx, ukey, CLUSTER_KEY);
2912 /* Per-node key mode */
2913 if (!memcmp(id, own_id, NODE_ID_LEN)) {
2916 n = tipc_node_find_by_id(net, id) ?:
2917 tipc_node_create(net, 0, id, 0xffffu, 0, true);
2925 rc = tipc_crypto_key_init(c, ukey, PER_NODE_KEY);
2934 return (rc < 0) ? rc : 0;
2937 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2942 err = __tipc_nl_node_set_key(skb, info);
2948 static int __tipc_nl_node_flush_key(struct sk_buff *skb,
2949 struct genl_info *info)
2951 struct net *net = sock_net(skb->sk);
2952 struct tipc_net *tn = tipc_net(net);
2953 struct tipc_node *n;
2955 tipc_crypto_key_flush(tn->crypto_tx);
2957 list_for_each_entry_rcu(n, &tn->node_list, list)
2958 tipc_crypto_key_flush(n->crypto_rx);
2961 pr_info("All keys are flushed!\n");
2965 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
2970 err = __tipc_nl_node_flush_key(skb, info);
2978 * tipc_node_dump - dump TIPC node data
2979 * @n: tipc node to be dumped
2981 * - false: dump only tipc node data
2982 * - true: dump node link data as well
2983 * @buf: returned buffer of dump data in format
2985 int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
2988 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
2991 i += scnprintf(buf, sz, "node data: (null)\n");
2995 i += scnprintf(buf, sz, "node data: %x", n->addr);
2996 i += scnprintf(buf + i, sz - i, " %x", n->state);
2997 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
2998 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
2999 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
3000 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
3001 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
3002 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
3003 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
3004 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
3005 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
3010 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
3011 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
3012 i += scnprintf(buf + i, sz - i, " media: ");
3013 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
3014 i += scnprintf(buf + i, sz - i, "\n");
3015 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
3016 i += scnprintf(buf + i, sz - i, " inputq: ");
3017 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
3019 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
3020 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
3021 i += scnprintf(buf + i, sz - i, " media: ");
3022 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
3023 i += scnprintf(buf + i, sz - i, "\n");
3024 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
3025 i += scnprintf(buf + i, sz - i, " inputq: ");
3026 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
3028 i += scnprintf(buf + i, sz - i, "bclink:\n ");
3029 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
3034 void tipc_node_pre_cleanup_net(struct net *exit_net)
3036 struct tipc_node *n;
3037 struct tipc_net *tn;
3041 for_each_net_rcu(tmp) {
3042 if (tmp == exit_net)
3047 spin_lock_bh(&tn->node_list_lock);
3048 list_for_each_entry_rcu(n, &tn->node_list, list) {
3051 if (n->peer_net != exit_net)
3053 tipc_node_write_lock(n);
3055 n->peer_hash_mix = 0;
3056 tipc_node_write_unlock_fast(n);
3059 spin_unlock_bh(&tn->node_list_lock);