1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
6 #include <linux/skbuff.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/pkt_sched.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <linux/timer.h>
14 #include <linux/ipv6.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_ether.h>
17 #include <linux/if_bonding.h>
18 #include <linux/if_vlan.h>
23 #include <asm/byteorder.h>
24 #include <net/bonding.h>
25 #include <net/bond_alb.h>
27 static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
28 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
30 static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
37 u8 padding[ETH_ZLEN - ETH_HLEN];
42 __be16 prot_addr_space;
46 u8 mac_src[ETH_ALEN]; /* sender hardware address */
47 __be32 ip_src; /* sender IP address */
48 u8 mac_dst[ETH_ALEN]; /* target hardware address */
49 __be32 ip_dst; /* target IP address */
53 /* Forward declaration */
54 static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
56 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
57 static void rlb_src_unlink(struct bonding *bond, u32 index);
58 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
61 static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
66 for (i = 0; i < hash_size; i++)
67 hash ^= hash_start[i];
72 /*********************** tlb specific functions ***************************/
74 static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
77 entry->load_history = 1 + entry->tx_bytes /
78 BOND_TLB_REBALANCE_INTERVAL;
82 entry->tx_slave = NULL;
83 entry->next = TLB_NULL_INDEX;
84 entry->prev = TLB_NULL_INDEX;
87 static inline void tlb_init_slave(struct slave *slave)
89 SLAVE_TLB_INFO(slave).load = 0;
90 SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
93 static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
96 struct tlb_client_info *tx_hash_table;
99 /* clear slave from tx_hashtbl */
100 tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
102 /* skip this if we've already freed the tx hash table */
104 index = SLAVE_TLB_INFO(slave).head;
105 while (index != TLB_NULL_INDEX) {
106 u32 next_index = tx_hash_table[index].next;
108 tlb_init_table_entry(&tx_hash_table[index], save_load);
113 tlb_init_slave(slave);
116 static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
119 spin_lock_bh(&bond->mode_lock);
120 __tlb_clear_slave(bond, slave, save_load);
121 spin_unlock_bh(&bond->mode_lock);
124 /* Must be called before starting the monitor timer */
125 static int tlb_initialize(struct bonding *bond)
127 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
128 int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
129 struct tlb_client_info *new_hashtbl;
132 new_hashtbl = kzalloc(size, GFP_KERNEL);
136 spin_lock_bh(&bond->mode_lock);
138 bond_info->tx_hashtbl = new_hashtbl;
140 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
141 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
143 spin_unlock_bh(&bond->mode_lock);
148 /* Must be called only after all slaves have been released */
149 static void tlb_deinitialize(struct bonding *bond)
151 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
153 spin_lock_bh(&bond->mode_lock);
155 kfree(bond_info->tx_hashtbl);
156 bond_info->tx_hashtbl = NULL;
158 spin_unlock_bh(&bond->mode_lock);
161 static long long compute_gap(struct slave *slave)
163 return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
164 (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
167 static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
169 struct slave *slave, *least_loaded;
170 struct list_head *iter;
176 /* Find the slave with the largest gap */
177 bond_for_each_slave_rcu(bond, slave, iter) {
178 if (bond_slave_can_tx(slave)) {
179 long long gap = compute_gap(slave);
182 least_loaded = slave;
191 static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
194 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
195 struct tlb_client_info *hash_table;
196 struct slave *assigned_slave;
198 hash_table = bond_info->tx_hashtbl;
199 assigned_slave = hash_table[hash_index].tx_slave;
200 if (!assigned_slave) {
201 assigned_slave = tlb_get_least_loaded_slave(bond);
203 if (assigned_slave) {
204 struct tlb_slave_info *slave_info =
205 &(SLAVE_TLB_INFO(assigned_slave));
206 u32 next_index = slave_info->head;
208 hash_table[hash_index].tx_slave = assigned_slave;
209 hash_table[hash_index].next = next_index;
210 hash_table[hash_index].prev = TLB_NULL_INDEX;
212 if (next_index != TLB_NULL_INDEX)
213 hash_table[next_index].prev = hash_index;
215 slave_info->head = hash_index;
217 hash_table[hash_index].load_history;
222 hash_table[hash_index].tx_bytes += skb_len;
224 return assigned_slave;
227 static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
230 struct slave *tx_slave;
232 /* We don't need to disable softirq here, because
233 * tlb_choose_channel() is only called by bond_alb_xmit()
234 * which already has softirq disabled.
236 spin_lock(&bond->mode_lock);
237 tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
238 spin_unlock(&bond->mode_lock);
243 /*********************** rlb specific functions ***************************/
245 /* when an ARP REPLY is received from a client update its info
248 static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
250 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
251 struct rlb_client_info *client_info;
254 spin_lock_bh(&bond->mode_lock);
256 hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
257 client_info = &(bond_info->rx_hashtbl[hash_index]);
259 if ((client_info->assigned) &&
260 (client_info->ip_src == arp->ip_dst) &&
261 (client_info->ip_dst == arp->ip_src) &&
262 (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
263 /* update the clients MAC address */
264 ether_addr_copy(client_info->mac_dst, arp->mac_src);
265 client_info->ntt = 1;
266 bond_info->rx_ntt = 1;
269 spin_unlock_bh(&bond->mode_lock);
272 static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
275 struct arp_pkt *arp, _arp;
277 if (skb->protocol != cpu_to_be16(ETH_P_ARP))
280 arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
284 /* We received an ARP from arp->ip_src.
285 * We might have used this IP address previously (on the bonding host
286 * itself or on a system that is bridged together with the bond).
287 * However, if arp->mac_src is different than what is stored in
288 * rx_hashtbl, some other host is now using the IP and we must prevent
289 * sending out client updates with this IP address and the old MAC
291 * Clean up all hash table entries that have this address as ip_src but
292 * have a different mac_src.
294 rlb_purge_src_ip(bond, arp);
296 if (arp->op_code == htons(ARPOP_REPLY)) {
297 /* update rx hash table for this ARP */
298 rlb_update_entry_from_arp(bond, arp);
299 slave_dbg(bond->dev, slave->dev, "Server received an ARP Reply from client\n");
302 return RX_HANDLER_ANOTHER;
305 /* Caller must hold rcu_read_lock() */
306 static struct slave *__rlb_next_rx_slave(struct bonding *bond)
308 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
309 struct slave *before = NULL, *rx_slave = NULL, *slave;
310 struct list_head *iter;
313 bond_for_each_slave_rcu(bond, slave, iter) {
314 if (!bond_slave_can_tx(slave))
317 if (!before || before->speed < slave->speed)
320 if (!rx_slave || rx_slave->speed < slave->speed)
323 if (slave == bond_info->rx_slave)
326 /* we didn't find anything after the current or we have something
327 * better before and up to the current slave
329 if (!rx_slave || (before && rx_slave->speed < before->speed))
333 bond_info->rx_slave = rx_slave;
338 /* Caller must hold RTNL, rcu_read_lock is obtained only to silence checkers */
339 static struct slave *rlb_next_rx_slave(struct bonding *bond)
341 struct slave *rx_slave;
346 rx_slave = __rlb_next_rx_slave(bond);
352 /* teach the switch the mac of a disabled slave
353 * on the primary for fault tolerance
355 * Caller must hold RTNL
357 static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
359 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
364 if (!bond->alb_info.primary_is_promisc) {
365 if (!dev_set_promiscuity(curr_active->dev, 1))
366 bond->alb_info.primary_is_promisc = 1;
368 bond->alb_info.primary_is_promisc = 0;
371 bond->alb_info.rlb_promisc_timeout_counter = 0;
373 alb_send_learning_packets(curr_active, addr, true);
376 /* slave being removed should not be active at this point
378 * Caller must hold rtnl.
380 static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
382 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
383 struct rlb_client_info *rx_hash_table;
384 u32 index, next_index;
386 /* clear slave from rx_hashtbl */
387 spin_lock_bh(&bond->mode_lock);
389 rx_hash_table = bond_info->rx_hashtbl;
390 index = bond_info->rx_hashtbl_used_head;
391 for (; index != RLB_NULL_INDEX; index = next_index) {
392 next_index = rx_hash_table[index].used_next;
393 if (rx_hash_table[index].slave == slave) {
394 struct slave *assigned_slave = rlb_next_rx_slave(bond);
396 if (assigned_slave) {
397 rx_hash_table[index].slave = assigned_slave;
398 if (is_valid_ether_addr(rx_hash_table[index].mac_dst)) {
399 bond_info->rx_hashtbl[index].ntt = 1;
400 bond_info->rx_ntt = 1;
401 /* A slave has been removed from the
402 * table because it is either disabled
403 * or being released. We must retry the
404 * update to avoid clients from not
405 * being updated & disconnecting when
408 bond_info->rlb_update_retry_counter =
411 } else { /* there is no active slave */
412 rx_hash_table[index].slave = NULL;
417 spin_unlock_bh(&bond->mode_lock);
419 if (slave != rtnl_dereference(bond->curr_active_slave))
420 rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
423 static void rlb_update_client(struct rlb_client_info *client_info)
427 if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
430 for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
433 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
435 client_info->slave->dev,
437 client_info->mac_dst,
438 client_info->slave->dev->dev_addr,
439 client_info->mac_dst);
441 slave_err(client_info->slave->bond->dev,
442 client_info->slave->dev,
443 "failed to create an ARP packet\n");
447 skb->dev = client_info->slave->dev;
449 if (client_info->vlan_id) {
450 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
451 client_info->vlan_id);
458 /* sends ARP REPLIES that update the clients that need updating */
459 static void rlb_update_rx_clients(struct bonding *bond)
461 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
462 struct rlb_client_info *client_info;
465 spin_lock_bh(&bond->mode_lock);
467 hash_index = bond_info->rx_hashtbl_used_head;
468 for (; hash_index != RLB_NULL_INDEX;
469 hash_index = client_info->used_next) {
470 client_info = &(bond_info->rx_hashtbl[hash_index]);
471 if (client_info->ntt) {
472 rlb_update_client(client_info);
473 if (bond_info->rlb_update_retry_counter == 0)
474 client_info->ntt = 0;
478 /* do not update the entries again until this counter is zero so that
479 * not to confuse the clients.
481 bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
483 spin_unlock_bh(&bond->mode_lock);
486 /* The slave was assigned a new mac address - update the clients */
487 static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *slave)
489 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
490 struct rlb_client_info *client_info;
494 spin_lock_bh(&bond->mode_lock);
496 hash_index = bond_info->rx_hashtbl_used_head;
497 for (; hash_index != RLB_NULL_INDEX;
498 hash_index = client_info->used_next) {
499 client_info = &(bond_info->rx_hashtbl[hash_index]);
501 if ((client_info->slave == slave) &&
502 is_valid_ether_addr(client_info->mac_dst)) {
503 client_info->ntt = 1;
508 /* update the team's flag only after the whole iteration */
510 bond_info->rx_ntt = 1;
511 /* fasten the change */
512 bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
515 spin_unlock_bh(&bond->mode_lock);
518 /* mark all clients using src_ip to be updated */
519 static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
521 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
522 struct rlb_client_info *client_info;
525 spin_lock(&bond->mode_lock);
527 hash_index = bond_info->rx_hashtbl_used_head;
528 for (; hash_index != RLB_NULL_INDEX;
529 hash_index = client_info->used_next) {
530 client_info = &(bond_info->rx_hashtbl[hash_index]);
532 if (!client_info->slave) {
533 netdev_err(bond->dev, "found a client with no channel in the client's hash table\n");
536 /* update all clients using this src_ip, that are not assigned
537 * to the team's address (curr_active_slave) and have a known
538 * unicast mac address.
540 if ((client_info->ip_src == src_ip) &&
541 !ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
542 bond->dev->dev_addr) &&
543 is_valid_ether_addr(client_info->mac_dst)) {
544 client_info->ntt = 1;
545 bond_info->rx_ntt = 1;
549 spin_unlock(&bond->mode_lock);
552 static struct slave *rlb_choose_channel(struct sk_buff *skb,
553 struct bonding *bond,
554 const struct arp_pkt *arp)
556 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
557 struct slave *assigned_slave, *curr_active_slave;
558 struct rlb_client_info *client_info;
561 spin_lock(&bond->mode_lock);
563 curr_active_slave = rcu_dereference(bond->curr_active_slave);
565 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
566 client_info = &(bond_info->rx_hashtbl[hash_index]);
568 if (client_info->assigned) {
569 if ((client_info->ip_src == arp->ip_src) &&
570 (client_info->ip_dst == arp->ip_dst)) {
571 /* the entry is already assigned to this client */
572 if (!is_broadcast_ether_addr(arp->mac_dst)) {
573 /* update mac address from arp */
574 ether_addr_copy(client_info->mac_dst, arp->mac_dst);
576 ether_addr_copy(client_info->mac_src, arp->mac_src);
578 assigned_slave = client_info->slave;
579 if (assigned_slave) {
580 spin_unlock(&bond->mode_lock);
581 return assigned_slave;
584 /* the entry is already assigned to some other client,
585 * move the old client to primary (curr_active_slave) so
586 * that the new client can be assigned to this entry.
588 if (curr_active_slave &&
589 client_info->slave != curr_active_slave) {
590 client_info->slave = curr_active_slave;
591 rlb_update_client(client_info);
595 /* assign a new slave */
596 assigned_slave = __rlb_next_rx_slave(bond);
598 if (assigned_slave) {
599 if (!(client_info->assigned &&
600 client_info->ip_src == arp->ip_src)) {
601 /* ip_src is going to be updated,
602 * fix the src hash list
604 u32 hash_src = _simple_hash((u8 *)&arp->ip_src,
605 sizeof(arp->ip_src));
606 rlb_src_unlink(bond, hash_index);
607 rlb_src_link(bond, hash_src, hash_index);
610 client_info->ip_src = arp->ip_src;
611 client_info->ip_dst = arp->ip_dst;
612 /* arp->mac_dst is broadcast for arp requests.
613 * will be updated with clients actual unicast mac address
614 * upon receiving an arp reply.
616 ether_addr_copy(client_info->mac_dst, arp->mac_dst);
617 ether_addr_copy(client_info->mac_src, arp->mac_src);
618 client_info->slave = assigned_slave;
620 if (is_valid_ether_addr(client_info->mac_dst)) {
621 client_info->ntt = 1;
622 bond->alb_info.rx_ntt = 1;
624 client_info->ntt = 0;
627 if (vlan_get_tag(skb, &client_info->vlan_id))
628 client_info->vlan_id = 0;
630 if (!client_info->assigned) {
631 u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
633 bond_info->rx_hashtbl_used_head = hash_index;
634 client_info->used_next = prev_tbl_head;
635 if (prev_tbl_head != RLB_NULL_INDEX) {
636 bond_info->rx_hashtbl[prev_tbl_head].used_prev =
639 client_info->assigned = 1;
643 spin_unlock(&bond->mode_lock);
645 return assigned_slave;
648 /* chooses (and returns) transmit channel for arp reply
649 * does not choose channel for other arp types since they are
650 * sent on the curr_active_slave
652 static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
654 struct slave *tx_slave = NULL;
657 if (!pskb_network_may_pull(skb, sizeof(*arp)))
659 arp = (struct arp_pkt *)skb_network_header(skb);
661 /* Don't modify or load balance ARPs that do not originate locally
662 * (e.g.,arrive via a bridge).
664 if (!bond_slave_has_mac_rx(bond, arp->mac_src))
667 if (arp->op_code == htons(ARPOP_REPLY)) {
668 /* the arp must be sent on the selected rx channel */
669 tx_slave = rlb_choose_channel(skb, bond, arp);
671 bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
672 tx_slave->dev->addr_len);
673 netdev_dbg(bond->dev, "(slave %s): Server sent ARP Reply packet\n",
674 tx_slave ? tx_slave->dev->name : "NULL");
675 } else if (arp->op_code == htons(ARPOP_REQUEST)) {
676 /* Create an entry in the rx_hashtbl for this client as a
678 * When the arp reply is received the entry will be updated
679 * with the correct unicast address of the client.
681 tx_slave = rlb_choose_channel(skb, bond, arp);
683 /* The ARP reply packets must be delayed so that
684 * they can cancel out the influence of the ARP request.
686 bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
688 /* arp requests are broadcast and are sent on the primary
689 * the arp request will collapse all clients on the subnet to
690 * the primary slave. We must register these clients to be
691 * updated with their assigned mac.
693 rlb_req_update_subnet_clients(bond, arp->ip_src);
694 netdev_dbg(bond->dev, "(slave %s): Server sent ARP Request packet\n",
695 tx_slave ? tx_slave->dev->name : "NULL");
701 static void rlb_rebalance(struct bonding *bond)
703 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
704 struct slave *assigned_slave;
705 struct rlb_client_info *client_info;
709 spin_lock_bh(&bond->mode_lock);
712 hash_index = bond_info->rx_hashtbl_used_head;
713 for (; hash_index != RLB_NULL_INDEX;
714 hash_index = client_info->used_next) {
715 client_info = &(bond_info->rx_hashtbl[hash_index]);
716 assigned_slave = __rlb_next_rx_slave(bond);
717 if (assigned_slave && (client_info->slave != assigned_slave)) {
718 client_info->slave = assigned_slave;
719 if (!is_zero_ether_addr(client_info->mac_dst)) {
720 client_info->ntt = 1;
726 /* update the team's flag only after the whole iteration */
728 bond_info->rx_ntt = 1;
729 spin_unlock_bh(&bond->mode_lock);
732 /* Caller must hold mode_lock */
733 static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
735 entry->used_next = RLB_NULL_INDEX;
736 entry->used_prev = RLB_NULL_INDEX;
741 static void rlb_init_table_entry_src(struct rlb_client_info *entry)
743 entry->src_first = RLB_NULL_INDEX;
744 entry->src_prev = RLB_NULL_INDEX;
745 entry->src_next = RLB_NULL_INDEX;
748 static void rlb_init_table_entry(struct rlb_client_info *entry)
750 memset(entry, 0, sizeof(struct rlb_client_info));
751 rlb_init_table_entry_dst(entry);
752 rlb_init_table_entry_src(entry);
755 static void rlb_delete_table_entry_dst(struct bonding *bond, u32 index)
757 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
758 u32 next_index = bond_info->rx_hashtbl[index].used_next;
759 u32 prev_index = bond_info->rx_hashtbl[index].used_prev;
761 if (index == bond_info->rx_hashtbl_used_head)
762 bond_info->rx_hashtbl_used_head = next_index;
763 if (prev_index != RLB_NULL_INDEX)
764 bond_info->rx_hashtbl[prev_index].used_next = next_index;
765 if (next_index != RLB_NULL_INDEX)
766 bond_info->rx_hashtbl[next_index].used_prev = prev_index;
769 /* unlink a rlb hash table entry from the src list */
770 static void rlb_src_unlink(struct bonding *bond, u32 index)
772 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
773 u32 next_index = bond_info->rx_hashtbl[index].src_next;
774 u32 prev_index = bond_info->rx_hashtbl[index].src_prev;
776 bond_info->rx_hashtbl[index].src_next = RLB_NULL_INDEX;
777 bond_info->rx_hashtbl[index].src_prev = RLB_NULL_INDEX;
779 if (next_index != RLB_NULL_INDEX)
780 bond_info->rx_hashtbl[next_index].src_prev = prev_index;
782 if (prev_index == RLB_NULL_INDEX)
785 /* is prev_index pointing to the head of this list? */
786 if (bond_info->rx_hashtbl[prev_index].src_first == index)
787 bond_info->rx_hashtbl[prev_index].src_first = next_index;
789 bond_info->rx_hashtbl[prev_index].src_next = next_index;
793 static void rlb_delete_table_entry(struct bonding *bond, u32 index)
795 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
796 struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
798 rlb_delete_table_entry_dst(bond, index);
799 rlb_init_table_entry_dst(entry);
801 rlb_src_unlink(bond, index);
804 /* add the rx_hashtbl[ip_dst_hash] entry to the list
805 * of entries with identical ip_src_hash
807 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
809 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
812 bond_info->rx_hashtbl[ip_dst_hash].src_prev = ip_src_hash;
813 next = bond_info->rx_hashtbl[ip_src_hash].src_first;
814 bond_info->rx_hashtbl[ip_dst_hash].src_next = next;
815 if (next != RLB_NULL_INDEX)
816 bond_info->rx_hashtbl[next].src_prev = ip_dst_hash;
817 bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash;
820 /* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does
821 * not match arp->mac_src
823 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
825 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
826 u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
829 spin_lock_bh(&bond->mode_lock);
831 index = bond_info->rx_hashtbl[ip_src_hash].src_first;
832 while (index != RLB_NULL_INDEX) {
833 struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
834 u32 next_index = entry->src_next;
836 if (entry->ip_src == arp->ip_src &&
837 !ether_addr_equal_64bits(arp->mac_src, entry->mac_src))
838 rlb_delete_table_entry(bond, index);
841 spin_unlock_bh(&bond->mode_lock);
844 static int rlb_initialize(struct bonding *bond)
846 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
847 struct rlb_client_info *new_hashtbl;
848 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
851 new_hashtbl = kmalloc(size, GFP_KERNEL);
855 spin_lock_bh(&bond->mode_lock);
857 bond_info->rx_hashtbl = new_hashtbl;
859 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
861 for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
862 rlb_init_table_entry(bond_info->rx_hashtbl + i);
864 spin_unlock_bh(&bond->mode_lock);
866 /* register to receive ARPs */
867 bond->recv_probe = rlb_arp_recv;
872 static void rlb_deinitialize(struct bonding *bond)
874 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
876 spin_lock_bh(&bond->mode_lock);
878 kfree(bond_info->rx_hashtbl);
879 bond_info->rx_hashtbl = NULL;
880 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
882 spin_unlock_bh(&bond->mode_lock);
885 static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
887 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
890 spin_lock_bh(&bond->mode_lock);
892 curr_index = bond_info->rx_hashtbl_used_head;
893 while (curr_index != RLB_NULL_INDEX) {
894 struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
895 u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
897 if (curr->vlan_id == vlan_id)
898 rlb_delete_table_entry(bond, curr_index);
900 curr_index = next_index;
903 spin_unlock_bh(&bond->mode_lock);
906 /*********************** tlb/rlb shared functions *********************/
908 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
909 __be16 vlan_proto, u16 vid)
911 struct learning_pkt pkt;
913 int size = sizeof(struct learning_pkt);
915 memset(&pkt, 0, size);
916 ether_addr_copy(pkt.mac_dst, mac_addr);
917 ether_addr_copy(pkt.mac_src, mac_addr);
918 pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
920 skb = dev_alloc_skb(size);
924 skb_put_data(skb, &pkt, size);
926 skb_reset_mac_header(skb);
927 skb->network_header = skb->mac_header + ETH_HLEN;
928 skb->protocol = pkt.type;
929 skb->priority = TC_PRIO_CONTROL;
930 skb->dev = slave->dev;
932 slave_dbg(slave->bond->dev, slave->dev,
933 "Send learning packet: mac %pM vlan %d\n", mac_addr, vid);
936 __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
941 struct alb_walk_data {
942 struct bonding *bond;
948 static int alb_upper_dev_walk(struct net_device *upper,
949 struct netdev_nested_priv *priv)
951 struct alb_walk_data *data = (struct alb_walk_data *)priv->data;
952 bool strict_match = data->strict_match;
953 struct bonding *bond = data->bond;
954 struct slave *slave = data->slave;
955 u8 *mac_addr = data->mac_addr;
956 struct bond_vlan_tag *tags;
958 if (is_vlan_dev(upper) &&
959 bond->dev->lower_level == upper->lower_level - 1) {
960 if (upper->addr_assign_type == NET_ADDR_STOLEN) {
961 alb_send_lp_vid(slave, mac_addr,
962 vlan_dev_vlan_proto(upper),
963 vlan_dev_vlan_id(upper));
965 alb_send_lp_vid(slave, upper->dev_addr,
966 vlan_dev_vlan_proto(upper),
967 vlan_dev_vlan_id(upper));
971 /* If this is a macvlan device, then only send updates
972 * when strict_match is turned off.
974 if (netif_is_macvlan(upper) && !strict_match) {
975 tags = bond_verify_device_path(bond->dev, upper, 0);
976 if (IS_ERR_OR_NULL(tags))
978 alb_send_lp_vid(slave, upper->dev_addr,
979 tags[0].vlan_proto, tags[0].vlan_id);
986 static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
989 struct bonding *bond = bond_get_bond_by_slave(slave);
990 struct netdev_nested_priv priv;
991 struct alb_walk_data data = {
992 .strict_match = strict_match,
993 .mac_addr = mac_addr,
998 priv.data = (void *)&data;
1000 alb_send_lp_vid(slave, mac_addr, 0, 0);
1002 /* loop through all devices and see if we need to send a packet
1006 netdev_walk_all_upper_dev_rcu(bond->dev, alb_upper_dev_walk, &priv);
1010 static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[],
1013 struct net_device *dev = slave->dev;
1014 struct sockaddr_storage ss;
1016 if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
1017 memcpy(dev->dev_addr, addr, len);
1021 /* for rlb each slave must have a unique hw mac addresses so that
1022 * each slave will receive packets destined to a different mac
1024 memcpy(ss.__data, addr, len);
1025 ss.ss_family = dev->type;
1026 if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
1027 slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
1033 /* Swap MAC addresses between two slaves.
1035 * Called with RTNL held, and no other locks.
1037 static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
1039 u8 tmp_mac_addr[MAX_ADDR_LEN];
1041 bond_hw_addr_copy(tmp_mac_addr, slave1->dev->dev_addr,
1042 slave1->dev->addr_len);
1043 alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr,
1044 slave2->dev->addr_len);
1045 alb_set_slave_mac_addr(slave2, tmp_mac_addr,
1046 slave1->dev->addr_len);
1050 /* Send learning packets after MAC address swap.
1052 * Called with RTNL and no other locks
1054 static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1055 struct slave *slave2)
1057 int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
1058 struct slave *disabled_slave = NULL;
1062 /* fasten the change in the switch */
1063 if (bond_slave_can_tx(slave1)) {
1064 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
1065 if (bond->alb_info.rlb_enabled) {
1066 /* inform the clients that the mac address
1069 rlb_req_update_slave_clients(bond, slave1);
1072 disabled_slave = slave1;
1075 if (bond_slave_can_tx(slave2)) {
1076 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
1077 if (bond->alb_info.rlb_enabled) {
1078 /* inform the clients that the mac address
1081 rlb_req_update_slave_clients(bond, slave2);
1084 disabled_slave = slave2;
1087 if (bond->alb_info.rlb_enabled && slaves_state_differ) {
1088 /* A disabled slave was assigned an active mac addr */
1089 rlb_teach_disabled_mac_on_primary(bond,
1090 disabled_slave->dev->dev_addr);
1095 * alb_change_hw_addr_on_detach
1096 * @bond: bonding we're working on
1097 * @slave: the slave that was just detached
1099 * We assume that @slave was already detached from the slave list.
1101 * If @slave's permanent hw address is different both from its current
1102 * address and from @bond's address, then somewhere in the bond there's
1103 * a slave that has @slave's permanet address as its current address.
1104 * We'll make sure that slave no longer uses @slave's permanent address.
1106 * Caller must hold RTNL and no other locks
1108 static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *slave)
1112 struct slave *found_slave;
1114 perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
1115 slave->dev->dev_addr);
1116 perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
1117 bond->dev->dev_addr);
1119 if (perm_curr_diff && perm_bond_diff) {
1120 found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr);
1123 alb_swap_mac_addr(slave, found_slave);
1124 alb_fasten_mac_swap(bond, slave, found_slave);
1130 * alb_handle_addr_collision_on_attach
1131 * @bond: bonding we're working on
1132 * @slave: the slave that was just attached
1134 * checks uniqueness of slave's mac address and handles the case the
1135 * new slave uses the bonds mac address.
1137 * If the permanent hw address of @slave is @bond's hw address, we need to
1138 * find a different hw address to give @slave, that isn't in use by any other
1139 * slave in the bond. This address must be, of course, one of the permanent
1140 * addresses of the other slaves.
1142 * We go over the slave list, and for each slave there we compare its
1143 * permanent hw address with the current address of all the other slaves.
1144 * If no match was found, then we've found a slave with a permanent address
1145 * that isn't used by any other slave in the bond, so we can assign it to
1148 * assumption: this function is called before @slave is attached to the
1151 static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
1153 struct slave *has_bond_addr = rcu_access_pointer(bond->curr_active_slave);
1154 struct slave *tmp_slave1, *free_mac_slave = NULL;
1155 struct list_head *iter;
1157 if (!bond_has_slaves(bond)) {
1158 /* this is the first slave */
1162 /* if slave's mac address differs from bond's mac address
1163 * check uniqueness of slave's mac address against the other
1164 * slaves in the bond.
1166 if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
1167 if (!bond_slave_has_mac(bond, slave->dev->dev_addr))
1170 /* Try setting slave mac to bond address and fall-through
1171 * to code handling that situation below...
1173 alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
1174 bond->dev->addr_len);
1177 /* The slave's address is equal to the address of the bond.
1178 * Search for a spare address in the bond for this slave.
1180 bond_for_each_slave(bond, tmp_slave1, iter) {
1181 if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
1182 /* no slave has tmp_slave1's perm addr
1185 free_mac_slave = tmp_slave1;
1189 if (!has_bond_addr) {
1190 if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
1191 bond->dev->dev_addr)) {
1193 has_bond_addr = tmp_slave1;
1198 if (free_mac_slave) {
1199 alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
1200 free_mac_slave->dev->addr_len);
1202 slave_warn(bond->dev, slave->dev, "the slave hw address is in use by the bond; giving it the hw address of %s\n",
1203 free_mac_slave->dev->name);
1205 } else if (has_bond_addr) {
1206 slave_err(bond->dev, slave->dev, "the slave hw address is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n");
1214 * alb_set_mac_address
1215 * @bond: bonding we're working on
1216 * @addr: MAC address to set
1218 * In TLB mode all slaves are configured to the bond's hw address, but set
1219 * their dev_addr field to different addresses (based on their permanent hw
1222 * For each slave, this function sets the interface to the new address and then
1223 * changes its dev_addr field to its previous value.
1225 * Unwinding assumes bond's mac address has not yet changed.
1227 static int alb_set_mac_address(struct bonding *bond, void *addr)
1229 struct slave *slave, *rollback_slave;
1230 struct list_head *iter;
1231 struct sockaddr_storage ss;
1232 char tmp_addr[MAX_ADDR_LEN];
1235 if (bond->alb_info.rlb_enabled)
1238 bond_for_each_slave(bond, slave, iter) {
1239 /* save net_device's current hw address */
1240 bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr,
1241 slave->dev->addr_len);
1243 res = dev_set_mac_address(slave->dev, addr, NULL);
1245 /* restore net_device's hw address */
1246 bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr,
1247 slave->dev->addr_len);
1256 memcpy(ss.__data, bond->dev->dev_addr, bond->dev->addr_len);
1257 ss.ss_family = bond->dev->type;
1259 /* unwind from head to the slave that failed */
1260 bond_for_each_slave(bond, rollback_slave, iter) {
1261 if (rollback_slave == slave)
1263 bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
1264 rollback_slave->dev->addr_len);
1265 dev_set_mac_address(rollback_slave->dev,
1266 (struct sockaddr *)&ss, NULL);
1267 bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr,
1268 rollback_slave->dev->addr_len);
1274 /************************ exported alb functions ************************/
1276 int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
1280 res = tlb_initialize(bond);
1285 bond->alb_info.rlb_enabled = 1;
1286 res = rlb_initialize(bond);
1288 tlb_deinitialize(bond);
1292 bond->alb_info.rlb_enabled = 0;
1298 void bond_alb_deinitialize(struct bonding *bond)
1300 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1302 tlb_deinitialize(bond);
1304 if (bond_info->rlb_enabled)
1305 rlb_deinitialize(bond);
1308 static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
1309 struct slave *tx_slave)
1311 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1312 struct ethhdr *eth_data = eth_hdr(skb);
1315 /* unbalanced or unassigned, send through primary */
1316 tx_slave = rcu_dereference(bond->curr_active_slave);
1317 if (bond->params.tlb_dynamic_lb)
1318 bond_info->unbalanced_load += skb->len;
1321 if (tx_slave && bond_slave_can_tx(tx_slave)) {
1322 if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) {
1323 ether_addr_copy(eth_data->h_source,
1324 tx_slave->dev->dev_addr);
1327 return bond_dev_queue_xmit(bond, skb, tx_slave->dev);
1330 if (tx_slave && bond->params.tlb_dynamic_lb) {
1331 spin_lock(&bond->mode_lock);
1332 __tlb_clear_slave(bond, tx_slave, 0);
1333 spin_unlock(&bond->mode_lock);
1336 /* no suitable interface, frame not sent */
1337 return bond_tx_drop(bond->dev, skb);
1340 struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
1341 struct sk_buff *skb)
1343 struct slave *tx_slave = NULL;
1344 struct ethhdr *eth_data;
1347 skb_reset_mac_header(skb);
1348 eth_data = eth_hdr(skb);
1350 /* Do not TX balance any multicast or broadcast */
1351 if (!is_multicast_ether_addr(eth_data->h_dest)) {
1352 switch (skb->protocol) {
1353 case htons(ETH_P_IP):
1354 case htons(ETH_P_IPX):
1355 /* In case of IPX, it will falback to L2 hash */
1356 case htons(ETH_P_IPV6):
1357 hash_index = bond_xmit_hash(bond, skb);
1358 if (bond->params.tlb_dynamic_lb) {
1359 tx_slave = tlb_choose_channel(bond,
1363 struct bond_up_slave *slaves;
1366 slaves = rcu_dereference(bond->usable_slaves);
1367 count = slaves ? READ_ONCE(slaves->count) : 0;
1369 tx_slave = slaves->arr[hash_index %
1378 netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1380 struct bonding *bond = netdev_priv(bond_dev);
1381 struct slave *tx_slave;
1383 tx_slave = bond_xmit_tlb_slave_get(bond, skb);
1384 return bond_do_alb_xmit(skb, bond, tx_slave);
1387 struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
1388 struct sk_buff *skb)
1390 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1391 static const __be32 ip_bcast = htonl(0xffffffff);
1392 struct slave *tx_slave = NULL;
1393 const u8 *hash_start = NULL;
1394 bool do_tx_balance = true;
1395 struct ethhdr *eth_data;
1399 skb_reset_mac_header(skb);
1400 eth_data = eth_hdr(skb);
1402 switch (ntohs(skb->protocol)) {
1404 const struct iphdr *iph;
1406 if (is_broadcast_ether_addr(eth_data->h_dest) ||
1407 !pskb_network_may_pull(skb, sizeof(*iph))) {
1408 do_tx_balance = false;
1412 if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
1413 do_tx_balance = false;
1416 hash_start = (char *)&(iph->daddr);
1417 hash_size = sizeof(iph->daddr);
1421 const struct ipv6hdr *ip6hdr;
1423 /* IPv6 doesn't really use broadcast mac address, but leave
1424 * that here just in case.
1426 if (is_broadcast_ether_addr(eth_data->h_dest)) {
1427 do_tx_balance = false;
1431 /* IPv6 uses all-nodes multicast as an equivalent to
1432 * broadcasts in IPv4.
1434 if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
1435 do_tx_balance = false;
1439 if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
1440 do_tx_balance = false;
1443 /* Additionally, DAD probes should not be tx-balanced as that
1444 * will lead to false positives for duplicate addresses and
1445 * prevent address configuration from working.
1447 ip6hdr = ipv6_hdr(skb);
1448 if (ipv6_addr_any(&ip6hdr->saddr)) {
1449 do_tx_balance = false;
1453 hash_start = (char *)&ip6hdr->daddr;
1454 hash_size = sizeof(ip6hdr->daddr);
1458 const struct ipxhdr *ipxhdr;
1460 if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
1461 do_tx_balance = false;
1464 ipxhdr = (struct ipxhdr *)skb_network_header(skb);
1466 if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
1467 /* something is wrong with this packet */
1468 do_tx_balance = false;
1472 if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
1473 /* The only protocol worth balancing in
1474 * this family since it has an "ARP" like
1477 do_tx_balance = false;
1481 eth_data = eth_hdr(skb);
1482 hash_start = (char *)eth_data->h_dest;
1483 hash_size = ETH_ALEN;
1487 do_tx_balance = false;
1488 if (bond_info->rlb_enabled)
1489 tx_slave = rlb_arp_xmit(skb, bond);
1492 do_tx_balance = false;
1496 if (do_tx_balance) {
1497 if (bond->params.tlb_dynamic_lb) {
1498 hash_index = _simple_hash(hash_start, hash_size);
1499 tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
1502 * do_tx_balance means we are free to select the tx_slave
1503 * So we do exactly what tlb would do for hash selection
1506 struct bond_up_slave *slaves;
1509 slaves = rcu_dereference(bond->usable_slaves);
1510 count = slaves ? READ_ONCE(slaves->count) : 0;
1512 tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
1519 netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1521 struct bonding *bond = netdev_priv(bond_dev);
1522 struct slave *tx_slave = NULL;
1524 tx_slave = bond_xmit_alb_slave_get(bond, skb);
1525 return bond_do_alb_xmit(skb, bond, tx_slave);
1528 void bond_alb_monitor(struct work_struct *work)
1530 struct bonding *bond = container_of(work, struct bonding,
1532 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1533 struct list_head *iter;
1534 struct slave *slave;
1536 if (!bond_has_slaves(bond)) {
1537 bond_info->tx_rebalance_counter = 0;
1538 bond_info->lp_counter = 0;
1544 bond_info->tx_rebalance_counter++;
1545 bond_info->lp_counter++;
1547 /* send learning packets */
1548 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
1551 bond_for_each_slave_rcu(bond, slave, iter) {
1552 /* If updating current_active, use all currently
1553 * user mac addresses (!strict_match). Otherwise, only
1554 * use mac of the slave device.
1555 * In RLB mode, we always use strict matches.
1557 strict_match = (slave != rcu_access_pointer(bond->curr_active_slave) ||
1558 bond_info->rlb_enabled);
1559 alb_send_learning_packets(slave, slave->dev->dev_addr,
1562 bond_info->lp_counter = 0;
1565 /* rebalance tx traffic */
1566 if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
1567 bond_for_each_slave_rcu(bond, slave, iter) {
1568 tlb_clear_slave(bond, slave, 1);
1569 if (slave == rcu_access_pointer(bond->curr_active_slave)) {
1570 SLAVE_TLB_INFO(slave).load =
1571 bond_info->unbalanced_load /
1572 BOND_TLB_REBALANCE_INTERVAL;
1573 bond_info->unbalanced_load = 0;
1576 bond_info->tx_rebalance_counter = 0;
1579 if (bond_info->rlb_enabled) {
1580 if (bond_info->primary_is_promisc &&
1581 (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
1583 /* dev_set_promiscuity requires rtnl and
1584 * nothing else. Avoid race with bond_close.
1587 if (!rtnl_trylock())
1590 bond_info->rlb_promisc_timeout_counter = 0;
1592 /* If the primary was set to promiscuous mode
1593 * because a slave was disabled then
1594 * it can now leave promiscuous mode.
1596 dev_set_promiscuity(rtnl_dereference(bond->curr_active_slave)->dev,
1598 bond_info->primary_is_promisc = 0;
1604 if (bond_info->rlb_rebalance) {
1605 bond_info->rlb_rebalance = 0;
1606 rlb_rebalance(bond);
1609 /* check if clients need updating */
1610 if (bond_info->rx_ntt) {
1611 if (bond_info->rlb_update_delay_counter) {
1612 --bond_info->rlb_update_delay_counter;
1614 rlb_update_rx_clients(bond);
1615 if (bond_info->rlb_update_retry_counter)
1616 --bond_info->rlb_update_retry_counter;
1618 bond_info->rx_ntt = 0;
1624 queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
1627 /* assumption: called before the slave is attached to the bond
1628 * and not locked by the bond lock
1630 int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
1634 res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
1635 slave->dev->addr_len);
1639 res = alb_handle_addr_collision_on_attach(bond, slave);
1643 tlb_init_slave(slave);
1645 /* order a rebalance ASAP */
1646 bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
1648 if (bond->alb_info.rlb_enabled)
1649 bond->alb_info.rlb_rebalance = 1;
1654 /* Remove slave from tlb and rlb hash tables, and fix up MAC addresses
1657 * Caller must hold RTNL and no other locks
1659 void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
1661 if (bond_has_slaves(bond))
1662 alb_change_hw_addr_on_detach(bond, slave);
1664 tlb_clear_slave(bond, slave, 0);
1666 if (bond->alb_info.rlb_enabled) {
1667 bond->alb_info.rx_slave = NULL;
1668 rlb_clear_slave(bond, slave);
1673 void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link)
1675 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1677 if (link == BOND_LINK_DOWN) {
1678 tlb_clear_slave(bond, slave, 0);
1679 if (bond->alb_info.rlb_enabled)
1680 rlb_clear_slave(bond, slave);
1681 } else if (link == BOND_LINK_UP) {
1682 /* order a rebalance ASAP */
1683 bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
1684 if (bond->alb_info.rlb_enabled) {
1685 bond->alb_info.rlb_rebalance = 1;
1686 /* If the updelay module parameter is smaller than the
1687 * forwarding delay of the switch the rebalance will
1688 * not work because the rebalance arp replies will
1689 * not be forwarded to the clients..
1694 if (bond_is_nondyn_tlb(bond)) {
1695 if (bond_update_slave_arr(bond, NULL))
1696 pr_err("Failed to build slave-array for TLB mode.\n");
1701 * bond_alb_handle_active_change - assign new curr_active_slave
1702 * @bond: our bonding struct
1703 * @new_slave: new slave to assign
1705 * Set the bond->curr_active_slave to @new_slave and handle
1706 * mac address swapping and promiscuity changes as needed.
1708 * Caller must hold RTNL
1710 void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
1712 struct slave *swap_slave;
1713 struct slave *curr_active;
1715 curr_active = rtnl_dereference(bond->curr_active_slave);
1716 if (curr_active == new_slave)
1719 if (curr_active && bond->alb_info.primary_is_promisc) {
1720 dev_set_promiscuity(curr_active->dev, -1);
1721 bond->alb_info.primary_is_promisc = 0;
1722 bond->alb_info.rlb_promisc_timeout_counter = 0;
1725 swap_slave = curr_active;
1726 rcu_assign_pointer(bond->curr_active_slave, new_slave);
1728 if (!new_slave || !bond_has_slaves(bond))
1731 /* set the new curr_active_slave to the bonds mac address
1732 * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
1735 swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr);
1737 /* Arrange for swap_slave and new_slave to temporarily be
1738 * ignored so we can mess with their MAC addresses without
1739 * fear of interference from transmit activity.
1742 tlb_clear_slave(bond, swap_slave, 1);
1743 tlb_clear_slave(bond, new_slave, 1);
1745 /* in TLB mode, the slave might flip down/up with the old dev_addr,
1746 * and thus filter bond->dev_addr's packets, so force bond's mac
1748 if (BOND_MODE(bond) == BOND_MODE_TLB) {
1749 struct sockaddr_storage ss;
1750 u8 tmp_addr[MAX_ADDR_LEN];
1752 bond_hw_addr_copy(tmp_addr, new_slave->dev->dev_addr,
1753 new_slave->dev->addr_len);
1755 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
1756 bond->dev->addr_len);
1757 ss.ss_family = bond->dev->type;
1758 /* we don't care if it can't change its mac, best effort */
1759 dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
1762 bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr,
1763 new_slave->dev->addr_len);
1766 /* curr_active_slave must be set before calling alb_swap_mac_addr */
1768 /* swap mac address */
1769 alb_swap_mac_addr(swap_slave, new_slave);
1770 alb_fasten_mac_swap(bond, swap_slave, new_slave);
1772 /* set the new_slave to the bond mac address */
1773 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
1774 bond->dev->addr_len);
1775 alb_send_learning_packets(new_slave, bond->dev->dev_addr,
1780 /* Called with RTNL */
1781 int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1783 struct bonding *bond = netdev_priv(bond_dev);
1784 struct sockaddr_storage *ss = addr;
1785 struct slave *curr_active;
1786 struct slave *swap_slave;
1789 if (!is_valid_ether_addr(ss->__data))
1790 return -EADDRNOTAVAIL;
1792 res = alb_set_mac_address(bond, addr);
1796 bond_hw_addr_copy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
1798 /* If there is no curr_active_slave there is nothing else to do.
1799 * Otherwise we'll need to pass the new address to it and handle
1802 curr_active = rtnl_dereference(bond->curr_active_slave);
1806 swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
1809 alb_swap_mac_addr(swap_slave, curr_active);
1810 alb_fasten_mac_swap(bond, swap_slave, curr_active);
1812 alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr,
1813 bond_dev->addr_len);
1815 alb_send_learning_packets(curr_active,
1816 bond_dev->dev_addr, false);
1817 if (bond->alb_info.rlb_enabled) {
1818 /* inform clients mac address has changed */
1819 rlb_req_update_slave_clients(bond, curr_active);
1826 void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
1828 if (bond->alb_info.rlb_enabled)
1829 rlb_clear_vlan(bond, vlan_id);