The dynamic key update for addr_list_lock still causes troubles,
for example the following race condition still exists:
CPU 0: CPU 1:
(RCU read lock) (RTNL lock)
dev_mc_seq_show() netdev_update_lockdep_key()
-> lockdep_unregister_key()
-> netif_addr_lock_bh()
because lockdep doesn't provide an API to update it atomically.
Therefore, we have to move it back to static keys and use subclass
for nest locking like before.
In commit
1a33e10e4a95 ("net: partially revert dynamic lockdep key
changes"), I already reverted most parts of commit
ab92d68fc22f
("net: core: add generic lockdep keys").
This patch reverts the rest and also part of commit
f3b0a18bb6cb
("net: remove unnecessary variables and callback"). After this
patch, addr_list_lock changes back to using static keys and
subclasses to satisfy lockdep. Thanks to dev->lower_level, we do
not have to change back to ->ndo_get_lock_subclass().
And hopefully this reduces some syzbot lockdep noises too.
Reported-by: [email protected]
Cc: Taehee Yoo <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
17 files changed:
case BOND_RELEASE_OLD:
case SIOCBONDRELEASE:
res = bond_release(bond_dev, slave_dev);
case BOND_RELEASE_OLD:
case SIOCBONDRELEASE:
res = bond_release(bond_dev, slave_dev);
- if (!res)
- netdev_update_lockdep_key(slave_dev);
break;
case BOND_SETHWADDR_OLD:
case SIOCBONDSETHWADDR:
break;
case BOND_SETHWADDR_OLD:
case SIOCBONDSETHWADDR:
case '-':
slave_dbg(bond->dev, dev, "Releasing interface\n");
ret = bond_release(bond->dev, dev);
case '-':
slave_dbg(bond->dev, dev, "Releasing interface\n");
ret = bond_release(bond->dev, dev);
- if (!ret)
- netdev_update_lockdep_key(dev);
* off into a separate class since they always nest.
*/
static struct lock_class_key bpq_netdev_xmit_lock_key;
* off into a separate class since they always nest.
*/
static struct lock_class_key bpq_netdev_xmit_lock_key;
+static struct lock_class_key bpq_netdev_addr_lock_key;
static void bpq_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
static void bpq_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
static void bpq_set_lockdep_class(struct net_device *dev)
{
static void bpq_set_lockdep_class(struct net_device *dev)
{
+ lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
}
netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
}
+static struct lock_class_key macsec_netdev_addr_lock_key;
+
static int macsec_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
static int macsec_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
return err;
netdev_lockdep_set_classes(dev);
return err;
netdev_lockdep_set_classes(dev);
+ lockdep_set_class_and_subclass(&dev->addr_list_lock,
+ &macsec_netdev_addr_lock_key,
+ dev->lower_level);
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
+static struct lock_class_key macvlan_netdev_addr_lock_key;
+
#define ALWAYS_ON_OFFLOADS \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
#define ALWAYS_ON_OFFLOADS \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+static void macvlan_set_lockdep_class(struct net_device *dev)
+{
+ netdev_lockdep_set_classes(dev);
+ lockdep_set_class_and_subclass(&dev->addr_list_lock,
+ &macvlan_netdev_addr_lock_key,
+ dev->lower_level);
+}
+
static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
dev->gso_max_size = lowerdev->gso_max_size;
dev->gso_max_segs = lowerdev->gso_max_segs;
dev->hard_header_len = lowerdev->hard_header_len;
dev->gso_max_size = lowerdev->gso_max_size;
dev->gso_max_segs = lowerdev->gso_max_segs;
dev->hard_header_len = lowerdev->hard_header_len;
-
- netdev_lockdep_set_classes(dev);
+ macvlan_set_lockdep_class(dev);
vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
mod_timer(&vxlan->age_timer, jiffies);
netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
mod_timer(&vxlan->age_timer, jiffies);
netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
- if (lowerdev && lowerdev != dst->remote_dev) {
+ if (lowerdev && lowerdev != dst->remote_dev)
dst->remote_dev = lowerdev;
dst->remote_dev = lowerdev;
- netdev_update_lockdep_key(lowerdev);
- }
vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
return 0;
}
vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
return 0;
}
* This is a natural nesting, which needs a split lock type.
*/
static struct lock_class_key hostap_netdev_xmit_lock_key;
* This is a natural nesting, which needs a split lock type.
*/
static struct lock_class_key hostap_netdev_xmit_lock_key;
+static struct lock_class_key hostap_netdev_addr_lock_key;
static void prism2_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
static void prism2_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
static void prism2_set_lockdep_class(struct net_device *dev)
{
static void prism2_set_lockdep_class(struct net_device *dev)
{
+ lockdep_set_class(&dev->addr_list_lock,
+ &hostap_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
}
netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
}
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
*
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
*
- * @addr_list_lock_key: lockdep class annotating
- * net_device->addr_list_lock spinlock
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
* @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
* @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
#endif
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
#endif
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
- struct lock_class_key addr_list_lock_key;
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
static struct lock_class_key qdisc_tx_busylock_key; \
static struct lock_class_key qdisc_running_key; \
static struct lock_class_key qdisc_xmit_lock_key; \
static struct lock_class_key qdisc_tx_busylock_key; \
static struct lock_class_key qdisc_running_key; \
static struct lock_class_key qdisc_xmit_lock_key; \
+ static struct lock_class_key dev_addr_list_lock_key; \
unsigned int i; \
\
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
(dev)->qdisc_running_key = &qdisc_running_key; \
unsigned int i; \
\
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
(dev)->qdisc_running_key = &qdisc_running_key; \
+ lockdep_set_class(&(dev)->addr_list_lock, \
+ &dev_addr_list_lock_key); \
for (i = 0; i < (dev)->num_tx_queues; i++) \
lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
&qdisc_xmit_lock_key); \
for (i = 0; i < (dev)->num_tx_queues; i++) \
lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
&qdisc_xmit_lock_key); \
}
void netif_tx_stop_all_queues(struct net_device *dev);
}
void netif_tx_stop_all_queues(struct net_device *dev);
-void netdev_update_lockdep_key(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
spin_lock(&dev->addr_list_lock);
}
spin_lock(&dev->addr_list_lock);
}
+static inline void netif_addr_lock_nested(struct net_device *dev)
+{
+ spin_lock_nested(&dev->addr_list_lock, dev->lower_level);
+}
+
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
* separate class since they always nest.
*/
static struct lock_class_key vlan_netdev_xmit_lock_key;
* separate class since they always nest.
*/
static struct lock_class_key vlan_netdev_xmit_lock_key;
+static struct lock_class_key vlan_netdev_addr_lock_key;
static void vlan_dev_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
static void vlan_dev_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
}
lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
}
-static void vlan_dev_set_lockdep_class(struct net_device *dev)
+static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+ lockdep_set_class_and_subclass(&dev->addr_list_lock,
+ &vlan_netdev_addr_lock_key,
+ subclass);
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
}
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
}
SET_NETDEV_DEVTYPE(dev, &vlan_type);
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- vlan_dev_set_lockdep_class(dev);
+ vlan_dev_set_lockdep_class(dev, dev->lower_level);
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
* separate class since they always nest.
*/
static struct lock_class_key batadv_netdev_xmit_lock_key;
* separate class since they always nest.
*/
static struct lock_class_key batadv_netdev_xmit_lock_key;
+static struct lock_class_key batadv_netdev_addr_lock_key;
/**
* batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
/**
* batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
*/
static void batadv_set_lockdep_class(struct net_device *dev)
{
*/
static void batadv_set_lockdep_class(struct net_device *dev)
{
+ lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
}
netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
}
+static struct lock_class_key bridge_netdev_addr_lock_key;
+
+static void br_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
+}
+
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
+ br_set_lockdep_class(dev);
"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
+static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{
static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{
lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
netdev_lock_name[i]);
}
lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
netdev_lock_name[i]);
}
+
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+{
+ int i;
+
+ i = netdev_lock_pos(dev->type);
+ lockdep_set_class_and_name(&dev->addr_list_lock,
+ &netdev_addr_lock_key[i],
+ netdev_lock_name[i]);
+}
#else
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{
}
#else
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{
}
+
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+{
+}
#endif
/*******************************************************************************
#endif
/*******************************************************************************
}
EXPORT_SYMBOL(netif_tx_stop_all_queues);
}
EXPORT_SYMBOL(netif_tx_stop_all_queues);
-void netdev_update_lockdep_key(struct net_device *dev)
-{
- lockdep_unregister_key(&dev->addr_list_lock_key);
- lockdep_register_key(&dev->addr_list_lock_key);
-
- lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
-}
-EXPORT_SYMBOL(netdev_update_lockdep_key);
-
/**
* register_netdevice - register a network device
* @dev: device to register
/**
* register_netdevice - register a network device
* @dev: device to register
return ret;
spin_lock_init(&dev->addr_list_lock);
return ret;
spin_lock_init(&dev->addr_list_lock);
- lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
+ netdev_set_addr_lockdep_class(dev);
ret = dev_get_valid_name(net, dev, dev->name);
if (ret < 0)
ret = dev_get_valid_name(net, dev, dev->name);
if (ret < 0)
dev_net_set(dev, &init_net);
dev_net_set(dev, &init_net);
- lockdep_register_key(&dev->addr_list_lock_key);
-
dev->gso_max_size = GSO_MAX_SIZE;
dev->gso_max_segs = GSO_MAX_SEGS;
dev->upper_level = 1;
dev->gso_max_size = GSO_MAX_SIZE;
dev->gso_max_segs = GSO_MAX_SEGS;
dev->upper_level = 1;
free_percpu(dev->xdp_bulkq);
dev->xdp_bulkq = NULL;
free_percpu(dev->xdp_bulkq);
dev->xdp_bulkq = NULL;
- lockdep_unregister_key(&dev->addr_list_lock_key);
-
/* Compatibility with error handling in drivers */
if (dev->reg_state == NETREG_UNINITIALIZED) {
netdev_freemem(dev);
/* Compatibility with error handling in drivers */
if (dev->reg_state == NETREG_UNINITIALIZED) {
netdev_freemem(dev);
if (to->addr_len != from->addr_len)
return -EINVAL;
if (to->addr_len != from->addr_len)
return -EINVAL;
+ netif_addr_lock_nested(to);
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
if (to->addr_len != from->addr_len)
return -EINVAL;
if (to->addr_len != from->addr_len)
return -EINVAL;
+ netif_addr_lock_nested(to);
err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
return;
netif_addr_lock_bh(from);
return;
netif_addr_lock_bh(from);
+ netif_addr_lock_nested(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
if (to->addr_len != from->addr_len)
return -EINVAL;
if (to->addr_len != from->addr_len)
return -EINVAL;
+ netif_addr_lock_nested(to);
err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
if (to->addr_len != from->addr_len)
return -EINVAL;
if (to->addr_len != from->addr_len)
return -EINVAL;
+ netif_addr_lock_nested(to);
err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
return;
netif_addr_lock_bh(from);
return;
netif_addr_lock_bh(from);
+ netif_addr_lock_nested(to);
__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
err = ops->ndo_del_slave(upper_dev, dev);
if (err)
return err;
err = ops->ndo_del_slave(upper_dev, dev);
if (err)
return err;
- netdev_update_lockdep_key(dev);
} else {
return -EOPNOTSUPP;
}
} else {
return -EOPNOTSUPP;
}
+static struct lock_class_key dsa_master_addr_list_lock_key;
+
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
int ret;
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
int ret;
wmb();
dev->dsa_ptr = cpu_dp;
wmb();
dev->dsa_ptr = cpu_dp;
+ lockdep_set_class(&dev->addr_list_lock,
+ &dsa_master_addr_list_lock_key);
ret = dsa_master_ethtool_setup(dev);
if (ret)
return ret;
ret = dsa_master_ethtool_setup(dev);
if (ret)
return ret;
* separate class since they always nest.
*/
static struct lock_class_key nr_netdev_xmit_lock_key;
* separate class since they always nest.
*/
static struct lock_class_key nr_netdev_xmit_lock_key;
+static struct lock_class_key nr_netdev_addr_lock_key;
static void nr_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
static void nr_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
static void nr_set_lockdep_key(struct net_device *dev)
{
static void nr_set_lockdep_key(struct net_device *dev)
{
+ lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
}
netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
}
* separate class since they always nest.
*/
static struct lock_class_key rose_netdev_xmit_lock_key;
* separate class since they always nest.
*/
static struct lock_class_key rose_netdev_xmit_lock_key;
+static struct lock_class_key rose_netdev_addr_lock_key;
static void rose_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
static void rose_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
static void rose_set_lockdep_key(struct net_device *dev)
{
static void rose_set_lockdep_key(struct net_device *dev)
{
+ lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
}
netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
}