2 * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include "core_priv.h"
36 #include <linux/in6.h>
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
45 static struct workqueue_struct *gid_cache_wq;
52 struct update_gid_event_work {
53 struct work_struct work;
55 struct ib_gid_attr gid_attr;
56 enum gid_op_type gid_op;
59 #define ROCE_NETDEV_CALLBACK_SZ 3
60 struct netdev_event_work_cmd {
61 roce_netdev_callback cb;
62 roce_netdev_filter filter;
63 struct net_device *ndev;
64 struct net_device *filter_ndev;
67 struct netdev_event_work {
68 struct work_struct work;
69 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ];
73 bool (*is_supported)(const struct ib_device *device, u32 port_num);
74 enum ib_gid_type gid_type;
75 } PORT_CAP_TO_GID_TYPE[] = {
76 {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
77 {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
80 #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
82 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port)
85 unsigned int ret_flags = 0;
87 if (!rdma_protocol_roce(ib_dev, port))
88 return 1UL << IB_GID_TYPE_IB;
90 for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
91 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
92 ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
96 EXPORT_SYMBOL(roce_gid_type_mask_support);
98 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
99 u32 port, union ib_gid *gid,
100 struct ib_gid_attr *gid_attr)
103 unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
105 for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
106 if ((1UL << i) & gid_type_mask) {
107 gid_attr->gid_type = i;
110 ib_cache_gid_add(ib_dev, port,
114 ib_cache_gid_del(ib_dev, port,
122 enum bonding_slave_state {
123 BONDING_SLAVE_STATE_ACTIVE = 1UL << 0,
124 BONDING_SLAVE_STATE_INACTIVE = 1UL << 1,
125 /* No primary slave or the device isn't a slave in bonding */
126 BONDING_SLAVE_STATE_NA = 1UL << 2,
129 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
130 struct net_device *upper)
132 if (upper && netif_is_bond_master(upper)) {
133 struct net_device *pdev =
134 bond_option_active_slave_get_rcu(netdev_priv(upper));
137 return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
138 BONDING_SLAVE_STATE_INACTIVE;
141 return BONDING_SLAVE_STATE_NA;
144 #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
145 BONDING_SLAVE_STATE_NA)
147 is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port,
148 struct net_device *rdma_ndev, void *cookie)
150 struct net_device *real_dev;
157 real_dev = rdma_vlan_dev_real_dev(cookie);
161 res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
162 (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
163 REQUIRED_BOND_STATES)) ||
164 real_dev == rdma_ndev);
171 is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port,
172 struct net_device *rdma_ndev, void *cookie)
174 struct net_device *master_dev;
181 master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
182 res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
183 BONDING_SLAVE_STATE_INACTIVE;
190 * is_ndev_for_default_gid_filter - Check if a given netdevice
191 * can be considered for default GIDs or not.
192 * @ib_dev: IB device to check
193 * @port: Port to consider for adding default GID
194 * @rdma_ndev: rdma netdevice pointer
195 * @cookie: Netdevice to consider to form a default GID
197 * is_ndev_for_default_gid_filter() returns true if a given netdevice can be
198 * considered for deriving default RoCE GID, returns false otherwise.
201 is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port,
202 struct net_device *rdma_ndev, void *cookie)
204 struct net_device *cookie_ndev = cookie;
213 * When rdma netdevice is used in bonding, bonding master netdevice
214 * should be considered for default GIDs. Therefore, ignore slave rdma
215 * netdevices when bonding is considered.
216 * Additionally when event(cookie) netdevice is bond master device,
217 * make sure that it the upper netdevice of rdma netdevice.
219 res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) ||
220 (netif_is_bond_master(cookie_ndev) &&
221 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)));
227 static bool pass_all_filter(struct ib_device *ib_dev, u32 port,
228 struct net_device *rdma_ndev, void *cookie)
233 static bool upper_device_filter(struct ib_device *ib_dev, u32 port,
234 struct net_device *rdma_ndev, void *cookie)
241 if (rdma_ndev == cookie)
245 res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
252 * is_upper_ndev_bond_master_filter - Check if a given netdevice
253 * is bond master device of netdevice of the RDMA device of port.
254 * @ib_dev: IB device to check
255 * @port: Port to consider for adding default GID
256 * @rdma_ndev: Pointer to rdma netdevice
257 * @cookie: Netdevice to consider to form a default GID
259 * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev
260 * is bond master device and rdma_ndev is its lower netdevice. It might
261 * not have been established as slave device yet.
264 is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port,
265 struct net_device *rdma_ndev,
268 struct net_device *cookie_ndev = cookie;
275 if (netif_is_bond_master(cookie_ndev) &&
276 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
282 static void update_gid_ip(enum gid_op_type gid_op,
283 struct ib_device *ib_dev,
284 u32 port, struct net_device *ndev,
285 struct sockaddr *addr)
288 struct ib_gid_attr gid_attr;
290 rdma_ip2gid(addr, &gid);
291 memset(&gid_attr, 0, sizeof(gid_attr));
292 gid_attr.ndev = ndev;
294 update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
297 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
299 struct net_device *rdma_ndev,
300 struct net_device *event_ndev)
302 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
303 unsigned long gid_type_mask;
309 real_dev = event_ndev;
313 if (((rdma_ndev != event_ndev &&
314 !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
315 is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
317 BONDING_SLAVE_STATE_INACTIVE)) {
324 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
326 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
328 IB_CACHE_GID_DEFAULT_MODE_DELETE);
331 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
332 u32 port, struct net_device *ndev)
334 const struct in_ifaddr *ifa;
335 struct in_device *in_dev;
337 struct list_head list;
338 struct sockaddr_in ip;
340 struct sin_list *sin_iter;
341 struct sin_list *sin_temp;
344 if (ndev->reg_state >= NETREG_UNREGISTERING)
348 in_dev = __in_dev_get_rcu(ndev);
354 in_dev_for_each_ifa_rcu(ifa, in_dev) {
355 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
360 entry->ip.sin_family = AF_INET;
361 entry->ip.sin_addr.s_addr = ifa->ifa_address;
362 list_add_tail(&entry->list, &sin_list);
367 list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
368 update_gid_ip(GID_ADD, ib_dev, port, ndev,
369 (struct sockaddr *)&sin_iter->ip);
370 list_del(&sin_iter->list);
375 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
376 u32 port, struct net_device *ndev)
378 struct inet6_ifaddr *ifp;
379 struct inet6_dev *in6_dev;
381 struct list_head list;
382 struct sockaddr_in6 sin6;
384 struct sin6_list *sin6_iter;
385 struct sin6_list *sin6_temp;
386 struct ib_gid_attr gid_attr = {.ndev = ndev};
387 LIST_HEAD(sin6_list);
389 if (ndev->reg_state >= NETREG_UNREGISTERING)
392 in6_dev = in6_dev_get(ndev);
396 read_lock_bh(&in6_dev->lock);
397 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
398 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
403 entry->sin6.sin6_family = AF_INET6;
404 entry->sin6.sin6_addr = ifp->addr;
405 list_add_tail(&entry->list, &sin6_list);
407 read_unlock_bh(&in6_dev->lock);
409 in6_dev_put(in6_dev);
411 list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
414 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
415 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
416 list_del(&sin6_iter->list);
421 static void _add_netdev_ips(struct ib_device *ib_dev, u32 port,
422 struct net_device *ndev)
424 enum_netdev_ipv4_ips(ib_dev, port, ndev);
425 if (IS_ENABLED(CONFIG_IPV6))
426 enum_netdev_ipv6_ips(ib_dev, port, ndev);
429 static void add_netdev_ips(struct ib_device *ib_dev, u32 port,
430 struct net_device *rdma_ndev, void *cookie)
432 _add_netdev_ips(ib_dev, port, cookie);
435 static void del_netdev_ips(struct ib_device *ib_dev, u32 port,
436 struct net_device *rdma_ndev, void *cookie)
438 ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
442 * del_default_gids - Delete default GIDs of the event/cookie netdevice
443 * @ib_dev: RDMA device pointer
444 * @port: Port of the RDMA device whose GID table to consider
445 * @rdma_ndev: Unused rdma netdevice
446 * @cookie: Pointer to event netdevice
448 * del_default_gids() deletes the default GIDs of the event/cookie netdevice.
450 static void del_default_gids(struct ib_device *ib_dev, u32 port,
451 struct net_device *rdma_ndev, void *cookie)
453 struct net_device *cookie_ndev = cookie;
454 unsigned long gid_type_mask;
456 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
458 ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask,
459 IB_CACHE_GID_DEFAULT_MODE_DELETE);
462 static void add_default_gids(struct ib_device *ib_dev, u32 port,
463 struct net_device *rdma_ndev, void *cookie)
465 struct net_device *event_ndev = cookie;
466 unsigned long gid_type_mask;
468 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
469 ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask,
470 IB_CACHE_GID_DEFAULT_MODE_SET);
473 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
475 struct net_device *rdma_ndev,
479 struct net_device *ndev;
481 /* Lock the rtnl to make sure the netdevs does not move under
485 down_read(&net_rwsem);
487 for_each_netdev(net, ndev) {
489 * Filter and add default GIDs of the primary netdevice
490 * when not in bonding mode, or add default GIDs
491 * of bond master device, when in bonding mode.
493 if (is_ndev_for_default_gid_filter(ib_dev, port,
495 add_default_gids(ib_dev, port, rdma_ndev, ndev);
497 if (is_eth_port_of_netdev_filter(ib_dev, port,
499 _add_netdev_ips(ib_dev, port, ndev);
506 * rdma_roce_rescan_device - Rescan all of the network devices in the system
507 * and add their gids, as needed, to the relevant RoCE devices.
509 * @ib_dev: the rdma device
511 void rdma_roce_rescan_device(struct ib_device *ib_dev)
513 ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
514 enum_all_gids_of_dev_cb, NULL);
516 EXPORT_SYMBOL(rdma_roce_rescan_device);
519 * rdma_roce_rescan_port - Rescan all of the network devices in the system
520 * and add their gids if relevant to the port of the RoCE device.
525 void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port)
527 struct net_device *ndev = NULL;
529 if (rdma_protocol_roce(ib_dev, port)) {
530 ndev = ib_device_get_netdev(ib_dev, port);
533 enum_all_gids_of_dev_cb(ib_dev, port, ndev, ndev);
537 EXPORT_SYMBOL(rdma_roce_rescan_port);
539 static void callback_for_addr_gid_device_scan(struct ib_device *device,
541 struct net_device *rdma_ndev,
544 struct update_gid_event_work *parsed = cookie;
546 return update_gid(parsed->gid_op, device,
552 struct list_head list;
553 struct net_device *upper;
556 static int netdev_upper_walk(struct net_device *upper,
557 struct netdev_nested_priv *priv)
559 struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
560 struct list_head *upper_list = (struct list_head *)priv->data;
565 list_add_tail(&entry->list, upper_list);
567 entry->upper = upper;
572 static void handle_netdev_upper(struct ib_device *ib_dev, u32 port,
574 void (*handle_netdev)(struct ib_device *ib_dev,
576 struct net_device *ndev))
578 struct net_device *ndev = cookie;
579 struct netdev_nested_priv priv;
580 struct upper_list *upper_iter;
581 struct upper_list *upper_temp;
582 LIST_HEAD(upper_list);
584 priv.data = &upper_list;
586 netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &priv);
589 handle_netdev(ib_dev, port, ndev);
590 list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
592 handle_netdev(ib_dev, port, upper_iter->upper);
593 dev_put(upper_iter->upper);
594 list_del(&upper_iter->list);
599 void roce_del_all_netdev_gids(struct ib_device *ib_dev,
600 u32 port, struct net_device *ndev)
602 ib_cache_gid_del_all_netdev_gids(ib_dev, port, ndev);
604 EXPORT_SYMBOL(roce_del_all_netdev_gids);
606 static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
607 struct net_device *rdma_ndev, void *cookie)
609 handle_netdev_upper(ib_dev, port, cookie, roce_del_all_netdev_gids);
612 static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
613 struct net_device *rdma_ndev, void *cookie)
615 handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
618 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port,
619 struct net_device *rdma_ndev,
622 struct net_device *master_ndev;
625 master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
626 dev_hold(master_ndev);
630 bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev,
632 dev_put(master_ndev);
636 /* The following functions operate on all IB devices. netdevice_event and
637 * addr_event execute ib_enum_all_roce_netdevs through a work.
638 * ib_enum_all_roce_netdevs iterates through all IB devices.
641 static void netdevice_event_work_handler(struct work_struct *_work)
643 struct netdev_event_work *work =
644 container_of(_work, struct netdev_event_work, work);
647 for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
648 ib_enum_all_roce_netdevs(work->cmds[i].filter,
649 work->cmds[i].filter_ndev,
652 dev_put(work->cmds[i].ndev);
653 dev_put(work->cmds[i].filter_ndev);
659 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
660 struct net_device *ndev)
663 struct netdev_event_work *ndev_work =
664 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
669 memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
670 for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
671 if (!ndev_work->cmds[i].ndev)
672 ndev_work->cmds[i].ndev = ndev;
673 if (!ndev_work->cmds[i].filter_ndev)
674 ndev_work->cmds[i].filter_ndev = ndev;
675 dev_hold(ndev_work->cmds[i].ndev);
676 dev_hold(ndev_work->cmds[i].filter_ndev);
678 INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
680 queue_work(gid_cache_wq, &ndev_work->work);
685 static const struct netdev_event_work_cmd add_cmd = {
686 .cb = add_netdev_ips,
687 .filter = is_eth_port_of_netdev_filter
690 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
691 .cb = add_netdev_upper_ips,
692 .filter = is_eth_port_of_netdev_filter
696 ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info,
697 struct netdev_event_work_cmd *cmds)
699 static const struct netdev_event_work_cmd
700 upper_ips_del_cmd = {
701 .cb = del_netdev_upper_ips,
702 .filter = upper_device_filter
705 cmds[0] = upper_ips_del_cmd;
706 cmds[0].ndev = changeupper_info->upper_dev;
710 static const struct netdev_event_work_cmd bonding_default_add_cmd = {
711 .cb = add_default_gids,
712 .filter = is_upper_ndev_bond_master_filter
716 ndev_event_link(struct net_device *event_ndev,
717 struct netdev_notifier_changeupper_info *changeupper_info,
718 struct netdev_event_work_cmd *cmds)
720 static const struct netdev_event_work_cmd
721 bonding_default_del_cmd = {
722 .cb = del_default_gids,
723 .filter = is_upper_ndev_bond_master_filter
726 * When a lower netdev is linked to its upper bonding
727 * netdev, delete lower slave netdev's default GIDs.
729 cmds[0] = bonding_default_del_cmd;
730 cmds[0].ndev = event_ndev;
731 cmds[0].filter_ndev = changeupper_info->upper_dev;
733 /* Now add bonding upper device default GIDs */
734 cmds[1] = bonding_default_add_cmd;
735 cmds[1].ndev = changeupper_info->upper_dev;
736 cmds[1].filter_ndev = changeupper_info->upper_dev;
738 /* Now add bonding upper device IP based GIDs */
739 cmds[2] = add_cmd_upper_ips;
740 cmds[2].ndev = changeupper_info->upper_dev;
741 cmds[2].filter_ndev = changeupper_info->upper_dev;
744 static void netdevice_event_changeupper(struct net_device *event_ndev,
745 struct netdev_notifier_changeupper_info *changeupper_info,
746 struct netdev_event_work_cmd *cmds)
748 if (changeupper_info->linking)
749 ndev_event_link(event_ndev, changeupper_info, cmds);
751 ndev_event_unlink(changeupper_info, cmds);
754 static const struct netdev_event_work_cmd add_default_gid_cmd = {
755 .cb = add_default_gids,
756 .filter = is_ndev_for_default_gid_filter,
759 static int netdevice_event(struct notifier_block *this, unsigned long event,
762 static const struct netdev_event_work_cmd del_cmd = {
763 .cb = del_netdev_ips, .filter = pass_all_filter};
764 static const struct netdev_event_work_cmd
765 bonding_default_del_cmd_join = {
766 .cb = del_netdev_default_ips_join,
767 .filter = is_eth_port_inactive_slave_filter
769 static const struct netdev_event_work_cmd
771 .cb = del_netdev_ips,
772 .filter = is_eth_port_of_netdev_filter
774 static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
775 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
776 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
777 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
779 if (ndev->type != ARPHRD_ETHER)
783 case NETDEV_REGISTER:
785 cmds[0] = bonding_default_del_cmd_join;
786 cmds[1] = add_default_gid_cmd;
790 case NETDEV_UNREGISTER:
791 if (ndev->reg_state < NETREG_UNREGISTERED)
797 case NETDEV_CHANGEADDR:
798 cmds[0] = netdev_del_cmd;
799 if (ndev->reg_state == NETREG_REGISTERED) {
800 cmds[1] = add_default_gid_cmd;
805 case NETDEV_CHANGEUPPER:
806 netdevice_event_changeupper(ndev,
807 container_of(ptr, struct netdev_notifier_changeupper_info, info),
811 case NETDEV_BONDING_FAILOVER:
812 cmds[0] = bonding_event_ips_del_cmd;
813 /* Add default GIDs of the bond device */
814 cmds[1] = bonding_default_add_cmd;
815 /* Add IP based GIDs of the bond device */
816 cmds[2] = add_cmd_upper_ips;
823 return netdevice_queue_work(cmds, ndev);
826 static void update_gid_event_work_handler(struct work_struct *_work)
828 struct update_gid_event_work *work =
829 container_of(_work, struct update_gid_event_work, work);
831 ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter,
833 callback_for_addr_gid_device_scan, work);
835 dev_put(work->gid_attr.ndev);
839 static int addr_event(struct notifier_block *this, unsigned long event,
840 struct sockaddr *sa, struct net_device *ndev)
842 struct update_gid_event_work *work;
843 enum gid_op_type gid_op;
845 if (ndev->type != ARPHRD_ETHER)
861 work = kmalloc(sizeof(*work), GFP_ATOMIC);
865 INIT_WORK(&work->work, update_gid_event_work_handler);
867 rdma_ip2gid(sa, &work->gid);
868 work->gid_op = gid_op;
870 memset(&work->gid_attr, 0, sizeof(work->gid_attr));
872 work->gid_attr.ndev = ndev;
874 queue_work(gid_cache_wq, &work->work);
879 static int inetaddr_event(struct notifier_block *this, unsigned long event,
882 struct sockaddr_in in;
883 struct net_device *ndev;
884 struct in_ifaddr *ifa = ptr;
886 in.sin_family = AF_INET;
887 in.sin_addr.s_addr = ifa->ifa_address;
888 ndev = ifa->ifa_dev->dev;
890 return addr_event(this, event, (struct sockaddr *)&in, ndev);
893 static int inet6addr_event(struct notifier_block *this, unsigned long event,
896 struct sockaddr_in6 in6;
897 struct net_device *ndev;
898 struct inet6_ifaddr *ifa6 = ptr;
900 in6.sin6_family = AF_INET6;
901 in6.sin6_addr = ifa6->addr;
902 ndev = ifa6->idev->dev;
904 return addr_event(this, event, (struct sockaddr *)&in6, ndev);
907 static struct notifier_block nb_netdevice = {
908 .notifier_call = netdevice_event
911 static struct notifier_block nb_inetaddr = {
912 .notifier_call = inetaddr_event
915 static struct notifier_block nb_inet6addr = {
916 .notifier_call = inet6addr_event
919 int __init roce_gid_mgmt_init(void)
921 gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
925 register_inetaddr_notifier(&nb_inetaddr);
926 if (IS_ENABLED(CONFIG_IPV6))
927 register_inet6addr_notifier(&nb_inet6addr);
928 /* We relay on the netdevice notifier to enumerate all
929 * existing devices in the system. Register to this notifier
930 * last to make sure we will not miss any IP add/del
933 register_netdevice_notifier(&nb_netdevice);
938 void __exit roce_gid_mgmt_cleanup(void)
940 if (IS_ENABLED(CONFIG_IPV6))
941 unregister_inet6addr_notifier(&nb_inet6addr);
942 unregister_inetaddr_notifier(&nb_inetaddr);
943 unregister_netdevice_notifier(&nb_netdevice);
944 /* Ensure all gid deletion tasks complete before we go down,
945 * to avoid any reference to free'd memory. By the time
946 * ib-core is removed, all physical devices have been removed,
947 * so no issue with remaining hardware contexts.
949 destroy_workqueue(gid_cache_wq);