From: Paolo Abeni Date: Thu, 8 Sep 2022 16:34:54 +0000 (+0200) Subject: Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net X-Git-Tag: v6.1-rc1~170^2~233 X-Git-Url: https://repo.jachan.dev/linux.git/commitdiff_plain/9f8f1933dce555d3c246f447f54fca8de8889da9?hp=-c Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net drivers/net/ethernet/freescale/fec.h 7d650df99d52 ("net: fec: add pm_qos support on imx6q platform") 40c79ce13b03 ("net: fec: add stop mode support for imx8 platform") Signed-off-by: Paolo Abeni --- 9f8f1933dce555d3c246f447f54fca8de8889da9 diff --combined MAINTAINERS index 9688a27deef1,f1390b8270b2..74036b51911d --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -878,13 -878,6 +878,13 @@@ L: netdev@vger.kernel.or S: Maintained F: drivers/net/ethernet/altera/ +ALTERA TSE PCS +M: Maxime Chevallier +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/pcs/pcs-altera-tse.c +F: include/linux/pcs-altera-tse.h + ALTERA UART/JTAG UART SERIAL DRIVERS M: Tobias Klauser L: linux-serial@vger.kernel.org @@@ -5729,6 -5722,13 +5729,6 @@@ F: include/linux/tfrc. F: include/uapi/linux/dccp.h F: net/dccp/ -DECnet NETWORK LAYER -L: linux-decnet-user@lists.sourceforge.net -S: Orphan -W: http://linux-decnet.sourceforge.net -F: Documentation/networking/decnet.rst -F: net/decnet/ - DECSTATION PLATFORM SUPPORT M: "Maciej W. Rozycki" L: linux-mips@vger.kernel.org @@@ -10032,6 -10032,7 +10032,7 @@@ F: Documentation/devicetree/bindings/in F: Documentation/devicetree/bindings/serio/ F: Documentation/input/ F: drivers/input/ + F: include/dt-bindings/input/ F: include/linux/input.h F: include/linux/input/ F: include/uapi/linux/input-event-codes.h @@@ -17531,9 -17532,19 +17532,19 @@@ M: Conor Dooley L: linux-riscv@lists.infradead.org S: Supported + F: Documentation/devicetree/bindings/clock/microchip,mpfs.yaml + F: Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml + F: Documentation/devicetree/bindings/i2c/microchip,corei2c.yaml + F: Documentation/devicetree/bindings/mailbox/microchip,mpfs-mailbox.yaml + F: Documentation/devicetree/bindings/net/can/microchip,mpfs-can.yaml + F: Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml + F: Documentation/devicetree/bindings/soc/microchip/microchip,mpfs-sys-controller.yaml + F: Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml + F: Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml F: arch/riscv/boot/dts/microchip/ F: drivers/char/hw_random/mpfs-rng.c F: drivers/clk/microchip/clk-mpfs.c + F: drivers/i2c/busses/i2c-microchip-core.c F: drivers/mailbox/mailbox-mpfs.c F: drivers/pci/controller/pcie-microchip-host.c F: drivers/rtc/rtc-mpfs.c @@@ -20764,6 -20775,7 +20775,7 @@@ UBLK USERSPACE BLOCK DRIVE M: Ming Lei L: linux-block@vger.kernel.org S: Maintained + F: Documentation/block/ublk.rst F: drivers/block/ublk_drv.c F: include/uapi/linux/ublk_cmd.h @@@ -21847,11 -21859,9 +21859,11 @@@ F: drivers/input/tablet/wacom_serial4. WANGXUN ETHERNET DRIVER M: Jiawen Wu +M: Mengyuan Lou +W: https://www.net-swift.com L: netdev@vger.kernel.org S: Maintained -F: Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst +F: Documentation/networking/device_drivers/ethernet/wangxun/* F: drivers/net/ethernet/wangxun/ WATCHDOG DEVICE DRIVERS @@@ -22307,7 -22317,7 +22319,7 @@@ M: Shubhrajyoti Datta R: Michal Simek S: Maintained - F: Documentation/devicetree/bindings/gpio/gpio-xilinx.txt + F: Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml F: Documentation/devicetree/bindings/gpio/gpio-zynq.yaml F: drivers/gpio/gpio-xilinx.c F: drivers/gpio/gpio-zynq.c diff --combined drivers/net/bonding/bond_main.c index dc618bf51c5e,5c2febe94428..ddd07395827a --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -3167,6 -3167,9 +3167,9 @@@ static void bond_ns_send_all(struct bon found: if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr)) bond_ns_send(slave, &targets[i], &saddr, tags); + else + bond_ns_send(slave, &targets[i], &in6addr_any, tags); + dst_release(dst); kfree(tags); } @@@ -3198,12 -3201,19 +3201,19 @@@ static bool bond_has_this_ip6(struct bo return ret; } - static void bond_validate_ns(struct bonding *bond, struct slave *slave, + static void bond_validate_na(struct bonding *bond, struct slave *slave, struct in6_addr *saddr, struct in6_addr *daddr) { int i; - if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) { + /* Ignore NAs that: + * 1. Source address is unspecified address. + * 2. Dest address is neither all-nodes multicast address nor + * exist on bond interface. + */ + if (ipv6_addr_any(saddr) || + (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) && + !bond_has_this_ip6(bond, daddr))) { slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n", __func__, saddr, daddr); return; @@@ -3246,14 -3256,14 +3256,14 @@@ static int bond_na_rcv(const struct sk_ * see bond_arp_rcv(). */ if (bond_is_active_slave(slave)) - bond_validate_ns(bond, slave, saddr, daddr); + bond_validate_na(bond, slave, saddr, daddr); else if (curr_active_slave && time_after(slave_last_rx(bond, curr_active_slave), curr_active_slave->last_link_up)) - bond_validate_ns(bond, slave, saddr, daddr); + bond_validate_na(bond, slave, saddr, daddr); else if (curr_arp_slave && bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) - bond_validate_ns(bond, slave, saddr, daddr); + bond_validate_na(bond, slave, saddr, daddr); out: return RX_HANDLER_ANOTHER; @@@ -5619,7 -5629,7 +5629,7 @@@ static int bond_ethtool_get_link_ksetti static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, struct ethtool_drvinfo *drvinfo) { - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d", BOND_ABI_VERSION); } diff --combined drivers/net/dsa/microchip/ksz_common.c index 986b7c8f5269,872aba63e7d4..3b3b2046da2e --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@@ -170,6 -170,13 +170,13 @@@ static const struct ksz_dev_ops ksz8_de .exit = ksz8_switch_exit, }; + static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev, int speed, + int duplex, bool tx_pause, + bool rx_pause); + static const struct ksz_dev_ops ksz9477_dev_ops = { .setup = ksz9477_setup, .get_port_addr = ksz9477_get_port_addr, @@@ -196,6 -203,7 +203,7 @@@ .mdb_del = ksz9477_mdb_del, .change_mtu = ksz9477_change_mtu, .max_mtu = ksz9477_max_mtu, + .phylink_mac_link_up = ksz9477_phylink_mac_link_up, .config_cpu_port = ksz9477_config_cpu_port, .enable_stp_addr = ksz9477_enable_stp_addr, .reset = ksz9477_reset_switch, @@@ -205,7 -213,6 +213,7 @@@ static const struct ksz_dev_ops lan937x_dev_ops = { .setup = lan937x_setup, + .teardown = lan937x_teardown, .get_port_addr = ksz9477_get_port_addr, .cfg_port_member = ksz9477_cfg_port_member, .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table, @@@ -231,6 -238,7 +239,7 @@@ .mdb_del = ksz9477_mdb_del, .change_mtu = lan937x_change_mtu, .max_mtu = ksz9477_max_mtu, + .phylink_mac_link_up = ksz9477_phylink_mac_link_up, .config_cpu_port = lan937x_config_cpu_port, .enable_stp_addr = ksz9477_enable_stp_addr, .reset = lan937x_reset_switch, @@@ -413,636 -421,7 +422,636 @@@ static const u8 lan937x_shifts[] = [ALU_STAT_INDEX] = 8, }; +static const struct regmap_range ksz8563_valid_regs[] = { + regmap_reg_range(0x0000, 0x0003), + regmap_reg_range(0x0006, 0x0006), + regmap_reg_range(0x000f, 0x001f), + regmap_reg_range(0x0100, 0x0100), + regmap_reg_range(0x0104, 0x0107), + regmap_reg_range(0x010d, 0x010d), + regmap_reg_range(0x0110, 0x0113), + regmap_reg_range(0x0120, 0x012b), + regmap_reg_range(0x0201, 0x0201), + regmap_reg_range(0x0210, 0x0213), + regmap_reg_range(0x0300, 0x0300), + regmap_reg_range(0x0302, 0x031b), + regmap_reg_range(0x0320, 0x032b), + regmap_reg_range(0x0330, 0x0336), + regmap_reg_range(0x0338, 0x033e), + regmap_reg_range(0x0340, 0x035f), + regmap_reg_range(0x0370, 0x0370), + regmap_reg_range(0x0378, 0x0378), + regmap_reg_range(0x037c, 0x037d), + regmap_reg_range(0x0390, 0x0393), + regmap_reg_range(0x0400, 0x040e), + regmap_reg_range(0x0410, 0x042f), + regmap_reg_range(0x0500, 0x0519), + regmap_reg_range(0x0520, 0x054b), + regmap_reg_range(0x0550, 0x05b3), + + /* port 1 */ + regmap_reg_range(0x1000, 0x1001), + regmap_reg_range(0x1004, 0x100b), + regmap_reg_range(0x1013, 0x1013), + regmap_reg_range(0x1017, 0x1017), + regmap_reg_range(0x101b, 0x101b), + regmap_reg_range(0x101f, 0x1021), + regmap_reg_range(0x1030, 0x1030), + regmap_reg_range(0x1100, 0x1111), + regmap_reg_range(0x111a, 0x111d), + regmap_reg_range(0x1122, 0x1127), + regmap_reg_range(0x112a, 0x112b), + regmap_reg_range(0x1136, 0x1139), + regmap_reg_range(0x113e, 0x113f), + regmap_reg_range(0x1400, 0x1401), + regmap_reg_range(0x1403, 0x1403), + regmap_reg_range(0x1410, 0x1417), + regmap_reg_range(0x1420, 0x1423), + regmap_reg_range(0x1500, 0x1507), + regmap_reg_range(0x1600, 0x1612), + regmap_reg_range(0x1800, 0x180f), + regmap_reg_range(0x1900, 0x1907), + regmap_reg_range(0x1914, 0x191b), + regmap_reg_range(0x1a00, 0x1a03), + regmap_reg_range(0x1a04, 0x1a08), + regmap_reg_range(0x1b00, 0x1b01), + regmap_reg_range(0x1b04, 0x1b04), + regmap_reg_range(0x1c00, 0x1c05), + regmap_reg_range(0x1c08, 0x1c1b), + + /* port 2 */ + regmap_reg_range(0x2000, 0x2001), + regmap_reg_range(0x2004, 0x200b), + regmap_reg_range(0x2013, 0x2013), + regmap_reg_range(0x2017, 0x2017), + regmap_reg_range(0x201b, 0x201b), + regmap_reg_range(0x201f, 0x2021), + regmap_reg_range(0x2030, 0x2030), + regmap_reg_range(0x2100, 0x2111), + regmap_reg_range(0x211a, 0x211d), + regmap_reg_range(0x2122, 0x2127), + regmap_reg_range(0x212a, 0x212b), + regmap_reg_range(0x2136, 0x2139), + regmap_reg_range(0x213e, 0x213f), + regmap_reg_range(0x2400, 0x2401), + regmap_reg_range(0x2403, 0x2403), + regmap_reg_range(0x2410, 0x2417), + regmap_reg_range(0x2420, 0x2423), + regmap_reg_range(0x2500, 0x2507), + regmap_reg_range(0x2600, 0x2612), + regmap_reg_range(0x2800, 0x280f), + regmap_reg_range(0x2900, 0x2907), + regmap_reg_range(0x2914, 0x291b), + regmap_reg_range(0x2a00, 0x2a03), + regmap_reg_range(0x2a04, 0x2a08), + regmap_reg_range(0x2b00, 0x2b01), + regmap_reg_range(0x2b04, 0x2b04), + regmap_reg_range(0x2c00, 0x2c05), + regmap_reg_range(0x2c08, 0x2c1b), + + /* port 3 */ + regmap_reg_range(0x3000, 0x3001), + regmap_reg_range(0x3004, 0x300b), + regmap_reg_range(0x3013, 0x3013), + regmap_reg_range(0x3017, 0x3017), + regmap_reg_range(0x301b, 0x301b), + regmap_reg_range(0x301f, 0x3021), + regmap_reg_range(0x3030, 0x3030), + regmap_reg_range(0x3300, 0x3301), + regmap_reg_range(0x3303, 0x3303), + regmap_reg_range(0x3400, 0x3401), + regmap_reg_range(0x3403, 0x3403), + regmap_reg_range(0x3410, 0x3417), + regmap_reg_range(0x3420, 0x3423), + regmap_reg_range(0x3500, 0x3507), + regmap_reg_range(0x3600, 0x3612), + regmap_reg_range(0x3800, 0x380f), + regmap_reg_range(0x3900, 0x3907), + regmap_reg_range(0x3914, 0x391b), + regmap_reg_range(0x3a00, 0x3a03), + regmap_reg_range(0x3a04, 0x3a08), + regmap_reg_range(0x3b00, 0x3b01), + regmap_reg_range(0x3b04, 0x3b04), + regmap_reg_range(0x3c00, 0x3c05), + regmap_reg_range(0x3c08, 0x3c1b), +}; + +static const struct regmap_access_table ksz8563_register_set = { + .yes_ranges = ksz8563_valid_regs, + .n_yes_ranges = ARRAY_SIZE(ksz8563_valid_regs), +}; + +static const struct regmap_range ksz9477_valid_regs[] = { + regmap_reg_range(0x0000, 0x0003), + regmap_reg_range(0x0006, 0x0006), + regmap_reg_range(0x0010, 0x001f), + regmap_reg_range(0x0100, 0x0100), + regmap_reg_range(0x0103, 0x0107), + regmap_reg_range(0x010d, 0x010d), + regmap_reg_range(0x0110, 0x0113), + regmap_reg_range(0x0120, 0x012b), + regmap_reg_range(0x0201, 0x0201), + regmap_reg_range(0x0210, 0x0213), + regmap_reg_range(0x0300, 0x0300), + regmap_reg_range(0x0302, 0x031b), + regmap_reg_range(0x0320, 0x032b), + regmap_reg_range(0x0330, 0x0336), + regmap_reg_range(0x0338, 0x033b), + regmap_reg_range(0x033e, 0x033e), + regmap_reg_range(0x0340, 0x035f), + regmap_reg_range(0x0370, 0x0370), + regmap_reg_range(0x0378, 0x0378), + regmap_reg_range(0x037c, 0x037d), + regmap_reg_range(0x0390, 0x0393), + regmap_reg_range(0x0400, 0x040e), + regmap_reg_range(0x0410, 0x042f), + regmap_reg_range(0x0444, 0x044b), + regmap_reg_range(0x0450, 0x046f), + regmap_reg_range(0x0500, 0x0519), + regmap_reg_range(0x0520, 0x054b), + regmap_reg_range(0x0550, 0x05b3), + regmap_reg_range(0x0604, 0x060b), + regmap_reg_range(0x0610, 0x0612), + regmap_reg_range(0x0614, 0x062c), + regmap_reg_range(0x0640, 0x0645), + regmap_reg_range(0x0648, 0x064d), + + /* port 1 */ + regmap_reg_range(0x1000, 0x1001), + regmap_reg_range(0x1013, 0x1013), + regmap_reg_range(0x1017, 0x1017), + regmap_reg_range(0x101b, 0x101b), + regmap_reg_range(0x101f, 0x1020), + regmap_reg_range(0x1030, 0x1030), + regmap_reg_range(0x1100, 0x1115), + regmap_reg_range(0x111a, 0x111f), + regmap_reg_range(0x1122, 0x1127), + regmap_reg_range(0x112a, 0x112b), + regmap_reg_range(0x1136, 0x1139), + regmap_reg_range(0x113e, 0x113f), + regmap_reg_range(0x1400, 0x1401), + regmap_reg_range(0x1403, 0x1403), + regmap_reg_range(0x1410, 0x1417), + regmap_reg_range(0x1420, 0x1423), + regmap_reg_range(0x1500, 0x1507), + regmap_reg_range(0x1600, 0x1613), + regmap_reg_range(0x1800, 0x180f), + regmap_reg_range(0x1820, 0x1827), + regmap_reg_range(0x1830, 0x1837), + regmap_reg_range(0x1840, 0x184b), + regmap_reg_range(0x1900, 0x1907), + regmap_reg_range(0x1914, 0x191b), + regmap_reg_range(0x1920, 0x1920), + regmap_reg_range(0x1923, 0x1927), + regmap_reg_range(0x1a00, 0x1a03), + regmap_reg_range(0x1a04, 0x1a07), + regmap_reg_range(0x1b00, 0x1b01), + regmap_reg_range(0x1b04, 0x1b04), + regmap_reg_range(0x1c00, 0x1c05), + regmap_reg_range(0x1c08, 0x1c1b), + + /* port 2 */ + regmap_reg_range(0x2000, 0x2001), + regmap_reg_range(0x2013, 0x2013), + regmap_reg_range(0x2017, 0x2017), + regmap_reg_range(0x201b, 0x201b), + regmap_reg_range(0x201f, 0x2020), + regmap_reg_range(0x2030, 0x2030), + regmap_reg_range(0x2100, 0x2115), + regmap_reg_range(0x211a, 0x211f), + regmap_reg_range(0x2122, 0x2127), + regmap_reg_range(0x212a, 0x212b), + regmap_reg_range(0x2136, 0x2139), + regmap_reg_range(0x213e, 0x213f), + regmap_reg_range(0x2400, 0x2401), + regmap_reg_range(0x2403, 0x2403), + regmap_reg_range(0x2410, 0x2417), + regmap_reg_range(0x2420, 0x2423), + regmap_reg_range(0x2500, 0x2507), + regmap_reg_range(0x2600, 0x2613), + regmap_reg_range(0x2800, 0x280f), + regmap_reg_range(0x2820, 0x2827), + regmap_reg_range(0x2830, 0x2837), + regmap_reg_range(0x2840, 0x284b), + regmap_reg_range(0x2900, 0x2907), + regmap_reg_range(0x2914, 0x291b), + regmap_reg_range(0x2920, 0x2920), + regmap_reg_range(0x2923, 0x2927), + regmap_reg_range(0x2a00, 0x2a03), + regmap_reg_range(0x2a04, 0x2a07), + regmap_reg_range(0x2b00, 0x2b01), + regmap_reg_range(0x2b04, 0x2b04), + regmap_reg_range(0x2c00, 0x2c05), + regmap_reg_range(0x2c08, 0x2c1b), + + /* port 3 */ + regmap_reg_range(0x3000, 0x3001), + regmap_reg_range(0x3013, 0x3013), + regmap_reg_range(0x3017, 0x3017), + regmap_reg_range(0x301b, 0x301b), + regmap_reg_range(0x301f, 0x3020), + regmap_reg_range(0x3030, 0x3030), + regmap_reg_range(0x3100, 0x3115), + regmap_reg_range(0x311a, 0x311f), + regmap_reg_range(0x3122, 0x3127), + regmap_reg_range(0x312a, 0x312b), + regmap_reg_range(0x3136, 0x3139), + regmap_reg_range(0x313e, 0x313f), + regmap_reg_range(0x3400, 0x3401), + regmap_reg_range(0x3403, 0x3403), + regmap_reg_range(0x3410, 0x3417), + regmap_reg_range(0x3420, 0x3423), + regmap_reg_range(0x3500, 0x3507), + regmap_reg_range(0x3600, 0x3613), + regmap_reg_range(0x3800, 0x380f), + regmap_reg_range(0x3820, 0x3827), + regmap_reg_range(0x3830, 0x3837), + regmap_reg_range(0x3840, 0x384b), + regmap_reg_range(0x3900, 0x3907), + regmap_reg_range(0x3914, 0x391b), + regmap_reg_range(0x3920, 0x3920), + regmap_reg_range(0x3923, 0x3927), + regmap_reg_range(0x3a00, 0x3a03), + regmap_reg_range(0x3a04, 0x3a07), + regmap_reg_range(0x3b00, 0x3b01), + regmap_reg_range(0x3b04, 0x3b04), + regmap_reg_range(0x3c00, 0x3c05), + regmap_reg_range(0x3c08, 0x3c1b), + + /* port 4 */ + regmap_reg_range(0x4000, 0x4001), + regmap_reg_range(0x4013, 0x4013), + regmap_reg_range(0x4017, 0x4017), + regmap_reg_range(0x401b, 0x401b), + regmap_reg_range(0x401f, 0x4020), + regmap_reg_range(0x4030, 0x4030), + regmap_reg_range(0x4100, 0x4115), + regmap_reg_range(0x411a, 0x411f), + regmap_reg_range(0x4122, 0x4127), + regmap_reg_range(0x412a, 0x412b), + regmap_reg_range(0x4136, 0x4139), + regmap_reg_range(0x413e, 0x413f), + regmap_reg_range(0x4400, 0x4401), + regmap_reg_range(0x4403, 0x4403), + regmap_reg_range(0x4410, 0x4417), + regmap_reg_range(0x4420, 0x4423), + regmap_reg_range(0x4500, 0x4507), + regmap_reg_range(0x4600, 0x4613), + regmap_reg_range(0x4800, 0x480f), + regmap_reg_range(0x4820, 0x4827), + regmap_reg_range(0x4830, 0x4837), + regmap_reg_range(0x4840, 0x484b), + regmap_reg_range(0x4900, 0x4907), + regmap_reg_range(0x4914, 0x491b), + regmap_reg_range(0x4920, 0x4920), + regmap_reg_range(0x4923, 0x4927), + regmap_reg_range(0x4a00, 0x4a03), + regmap_reg_range(0x4a04, 0x4a07), + regmap_reg_range(0x4b00, 0x4b01), + regmap_reg_range(0x4b04, 0x4b04), + regmap_reg_range(0x4c00, 0x4c05), + regmap_reg_range(0x4c08, 0x4c1b), + + /* port 5 */ + regmap_reg_range(0x5000, 0x5001), + regmap_reg_range(0x5013, 0x5013), + regmap_reg_range(0x5017, 0x5017), + regmap_reg_range(0x501b, 0x501b), + regmap_reg_range(0x501f, 0x5020), + regmap_reg_range(0x5030, 0x5030), + regmap_reg_range(0x5100, 0x5115), + regmap_reg_range(0x511a, 0x511f), + regmap_reg_range(0x5122, 0x5127), + regmap_reg_range(0x512a, 0x512b), + regmap_reg_range(0x5136, 0x5139), + regmap_reg_range(0x513e, 0x513f), + regmap_reg_range(0x5400, 0x5401), + regmap_reg_range(0x5403, 0x5403), + regmap_reg_range(0x5410, 0x5417), + regmap_reg_range(0x5420, 0x5423), + regmap_reg_range(0x5500, 0x5507), + regmap_reg_range(0x5600, 0x5613), + regmap_reg_range(0x5800, 0x580f), + regmap_reg_range(0x5820, 0x5827), + regmap_reg_range(0x5830, 0x5837), + regmap_reg_range(0x5840, 0x584b), + regmap_reg_range(0x5900, 0x5907), + regmap_reg_range(0x5914, 0x591b), + regmap_reg_range(0x5920, 0x5920), + regmap_reg_range(0x5923, 0x5927), + regmap_reg_range(0x5a00, 0x5a03), + regmap_reg_range(0x5a04, 0x5a07), + regmap_reg_range(0x5b00, 0x5b01), + regmap_reg_range(0x5b04, 0x5b04), + regmap_reg_range(0x5c00, 0x5c05), + regmap_reg_range(0x5c08, 0x5c1b), + + /* port 6 */ + regmap_reg_range(0x6000, 0x6001), + regmap_reg_range(0x6013, 0x6013), + regmap_reg_range(0x6017, 0x6017), + regmap_reg_range(0x601b, 0x601b), + regmap_reg_range(0x601f, 0x6020), + regmap_reg_range(0x6030, 0x6030), + regmap_reg_range(0x6300, 0x6301), + regmap_reg_range(0x6400, 0x6401), + regmap_reg_range(0x6403, 0x6403), + regmap_reg_range(0x6410, 0x6417), + regmap_reg_range(0x6420, 0x6423), + regmap_reg_range(0x6500, 0x6507), + regmap_reg_range(0x6600, 0x6613), + regmap_reg_range(0x6800, 0x680f), + regmap_reg_range(0x6820, 0x6827), + regmap_reg_range(0x6830, 0x6837), + regmap_reg_range(0x6840, 0x684b), + regmap_reg_range(0x6900, 0x6907), + regmap_reg_range(0x6914, 0x691b), + regmap_reg_range(0x6920, 0x6920), + regmap_reg_range(0x6923, 0x6927), + regmap_reg_range(0x6a00, 0x6a03), + regmap_reg_range(0x6a04, 0x6a07), + regmap_reg_range(0x6b00, 0x6b01), + regmap_reg_range(0x6b04, 0x6b04), + regmap_reg_range(0x6c00, 0x6c05), + regmap_reg_range(0x6c08, 0x6c1b), + + /* port 7 */ + regmap_reg_range(0x7000, 0x7001), + regmap_reg_range(0x7013, 0x7013), + regmap_reg_range(0x7017, 0x7017), + regmap_reg_range(0x701b, 0x701b), + regmap_reg_range(0x701f, 0x7020), + regmap_reg_range(0x7030, 0x7030), + regmap_reg_range(0x7200, 0x7203), + regmap_reg_range(0x7206, 0x7207), + regmap_reg_range(0x7300, 0x7301), + regmap_reg_range(0x7400, 0x7401), + regmap_reg_range(0x7403, 0x7403), + regmap_reg_range(0x7410, 0x7417), + regmap_reg_range(0x7420, 0x7423), + regmap_reg_range(0x7500, 0x7507), + regmap_reg_range(0x7600, 0x7613), + regmap_reg_range(0x7800, 0x780f), + regmap_reg_range(0x7820, 0x7827), + regmap_reg_range(0x7830, 0x7837), + regmap_reg_range(0x7840, 0x784b), + regmap_reg_range(0x7900, 0x7907), + regmap_reg_range(0x7914, 0x791b), + regmap_reg_range(0x7920, 0x7920), + regmap_reg_range(0x7923, 0x7927), + regmap_reg_range(0x7a00, 0x7a03), + regmap_reg_range(0x7a04, 0x7a07), + regmap_reg_range(0x7b00, 0x7b01), + regmap_reg_range(0x7b04, 0x7b04), + regmap_reg_range(0x7c00, 0x7c05), + regmap_reg_range(0x7c08, 0x7c1b), +}; + +static const struct regmap_access_table ksz9477_register_set = { + .yes_ranges = ksz9477_valid_regs, + .n_yes_ranges = ARRAY_SIZE(ksz9477_valid_regs), +}; + +static const struct regmap_range ksz9896_valid_regs[] = { + regmap_reg_range(0x0000, 0x0003), + regmap_reg_range(0x0006, 0x0006), + regmap_reg_range(0x0010, 0x001f), + regmap_reg_range(0x0100, 0x0100), + regmap_reg_range(0x0103, 0x0107), + regmap_reg_range(0x010d, 0x010d), + regmap_reg_range(0x0110, 0x0113), + regmap_reg_range(0x0120, 0x0127), + regmap_reg_range(0x0201, 0x0201), + regmap_reg_range(0x0210, 0x0213), + regmap_reg_range(0x0300, 0x0300), + regmap_reg_range(0x0302, 0x030b), + regmap_reg_range(0x0310, 0x031b), + regmap_reg_range(0x0320, 0x032b), + regmap_reg_range(0x0330, 0x0336), + regmap_reg_range(0x0338, 0x033b), + regmap_reg_range(0x033e, 0x033e), + regmap_reg_range(0x0340, 0x035f), + regmap_reg_range(0x0370, 0x0370), + regmap_reg_range(0x0378, 0x0378), + regmap_reg_range(0x037c, 0x037d), + regmap_reg_range(0x0390, 0x0393), + regmap_reg_range(0x0400, 0x040e), + regmap_reg_range(0x0410, 0x042f), + + /* port 1 */ + regmap_reg_range(0x1000, 0x1001), + regmap_reg_range(0x1013, 0x1013), + regmap_reg_range(0x1017, 0x1017), + regmap_reg_range(0x101b, 0x101b), + regmap_reg_range(0x101f, 0x1020), + regmap_reg_range(0x1030, 0x1030), + regmap_reg_range(0x1100, 0x1115), + regmap_reg_range(0x111a, 0x111f), + regmap_reg_range(0x1122, 0x1127), + regmap_reg_range(0x112a, 0x112b), + regmap_reg_range(0x1136, 0x1139), + regmap_reg_range(0x113e, 0x113f), + regmap_reg_range(0x1400, 0x1401), + regmap_reg_range(0x1403, 0x1403), + regmap_reg_range(0x1410, 0x1417), + regmap_reg_range(0x1420, 0x1423), + regmap_reg_range(0x1500, 0x1507), + regmap_reg_range(0x1600, 0x1612), + regmap_reg_range(0x1800, 0x180f), + regmap_reg_range(0x1820, 0x1827), + regmap_reg_range(0x1830, 0x1837), + regmap_reg_range(0x1840, 0x184b), + regmap_reg_range(0x1900, 0x1907), + regmap_reg_range(0x1914, 0x1915), + regmap_reg_range(0x1a00, 0x1a03), + regmap_reg_range(0x1a04, 0x1a07), + regmap_reg_range(0x1b00, 0x1b01), + regmap_reg_range(0x1b04, 0x1b04), + + /* port 2 */ + regmap_reg_range(0x2000, 0x2001), + regmap_reg_range(0x2013, 0x2013), + regmap_reg_range(0x2017, 0x2017), + regmap_reg_range(0x201b, 0x201b), + regmap_reg_range(0x201f, 0x2020), + regmap_reg_range(0x2030, 0x2030), + regmap_reg_range(0x2100, 0x2115), + regmap_reg_range(0x211a, 0x211f), + regmap_reg_range(0x2122, 0x2127), + regmap_reg_range(0x212a, 0x212b), + regmap_reg_range(0x2136, 0x2139), + regmap_reg_range(0x213e, 0x213f), + regmap_reg_range(0x2400, 0x2401), + regmap_reg_range(0x2403, 0x2403), + regmap_reg_range(0x2410, 0x2417), + regmap_reg_range(0x2420, 0x2423), + regmap_reg_range(0x2500, 0x2507), + regmap_reg_range(0x2600, 0x2612), + regmap_reg_range(0x2800, 0x280f), + regmap_reg_range(0x2820, 0x2827), + regmap_reg_range(0x2830, 0x2837), + regmap_reg_range(0x2840, 0x284b), + regmap_reg_range(0x2900, 0x2907), + regmap_reg_range(0x2914, 0x2915), + regmap_reg_range(0x2a00, 0x2a03), + regmap_reg_range(0x2a04, 0x2a07), + regmap_reg_range(0x2b00, 0x2b01), + regmap_reg_range(0x2b04, 0x2b04), + + /* port 3 */ + regmap_reg_range(0x3000, 0x3001), + regmap_reg_range(0x3013, 0x3013), + regmap_reg_range(0x3017, 0x3017), + regmap_reg_range(0x301b, 0x301b), + regmap_reg_range(0x301f, 0x3020), + regmap_reg_range(0x3030, 0x3030), + regmap_reg_range(0x3100, 0x3115), + regmap_reg_range(0x311a, 0x311f), + regmap_reg_range(0x3122, 0x3127), + regmap_reg_range(0x312a, 0x312b), + regmap_reg_range(0x3136, 0x3139), + regmap_reg_range(0x313e, 0x313f), + regmap_reg_range(0x3400, 0x3401), + regmap_reg_range(0x3403, 0x3403), + regmap_reg_range(0x3410, 0x3417), + regmap_reg_range(0x3420, 0x3423), + regmap_reg_range(0x3500, 0x3507), + regmap_reg_range(0x3600, 0x3612), + regmap_reg_range(0x3800, 0x380f), + regmap_reg_range(0x3820, 0x3827), + regmap_reg_range(0x3830, 0x3837), + regmap_reg_range(0x3840, 0x384b), + regmap_reg_range(0x3900, 0x3907), + regmap_reg_range(0x3914, 0x3915), + regmap_reg_range(0x3a00, 0x3a03), + regmap_reg_range(0x3a04, 0x3a07), + regmap_reg_range(0x3b00, 0x3b01), + regmap_reg_range(0x3b04, 0x3b04), + + /* port 4 */ + regmap_reg_range(0x4000, 0x4001), + regmap_reg_range(0x4013, 0x4013), + regmap_reg_range(0x4017, 0x4017), + regmap_reg_range(0x401b, 0x401b), + regmap_reg_range(0x401f, 0x4020), + regmap_reg_range(0x4030, 0x4030), + regmap_reg_range(0x4100, 0x4115), + regmap_reg_range(0x411a, 0x411f), + regmap_reg_range(0x4122, 0x4127), + regmap_reg_range(0x412a, 0x412b), + regmap_reg_range(0x4136, 0x4139), + regmap_reg_range(0x413e, 0x413f), + regmap_reg_range(0x4400, 0x4401), + regmap_reg_range(0x4403, 0x4403), + regmap_reg_range(0x4410, 0x4417), + regmap_reg_range(0x4420, 0x4423), + regmap_reg_range(0x4500, 0x4507), + regmap_reg_range(0x4600, 0x4612), + regmap_reg_range(0x4800, 0x480f), + regmap_reg_range(0x4820, 0x4827), + regmap_reg_range(0x4830, 0x4837), + regmap_reg_range(0x4840, 0x484b), + regmap_reg_range(0x4900, 0x4907), + regmap_reg_range(0x4914, 0x4915), + regmap_reg_range(0x4a00, 0x4a03), + regmap_reg_range(0x4a04, 0x4a07), + regmap_reg_range(0x4b00, 0x4b01), + regmap_reg_range(0x4b04, 0x4b04), + + /* port 5 */ + regmap_reg_range(0x5000, 0x5001), + regmap_reg_range(0x5013, 0x5013), + regmap_reg_range(0x5017, 0x5017), + regmap_reg_range(0x501b, 0x501b), + regmap_reg_range(0x501f, 0x5020), + regmap_reg_range(0x5030, 0x5030), + regmap_reg_range(0x5100, 0x5115), + regmap_reg_range(0x511a, 0x511f), + regmap_reg_range(0x5122, 0x5127), + regmap_reg_range(0x512a, 0x512b), + regmap_reg_range(0x5136, 0x5139), + regmap_reg_range(0x513e, 0x513f), + regmap_reg_range(0x5400, 0x5401), + regmap_reg_range(0x5403, 0x5403), + regmap_reg_range(0x5410, 0x5417), + regmap_reg_range(0x5420, 0x5423), + regmap_reg_range(0x5500, 0x5507), + regmap_reg_range(0x5600, 0x5612), + regmap_reg_range(0x5800, 0x580f), + regmap_reg_range(0x5820, 0x5827), + regmap_reg_range(0x5830, 0x5837), + regmap_reg_range(0x5840, 0x584b), + regmap_reg_range(0x5900, 0x5907), + regmap_reg_range(0x5914, 0x5915), + regmap_reg_range(0x5a00, 0x5a03), + regmap_reg_range(0x5a04, 0x5a07), + regmap_reg_range(0x5b00, 0x5b01), + regmap_reg_range(0x5b04, 0x5b04), + + /* port 6 */ + regmap_reg_range(0x6000, 0x6001), + regmap_reg_range(0x6013, 0x6013), + regmap_reg_range(0x6017, 0x6017), + regmap_reg_range(0x601b, 0x601b), + regmap_reg_range(0x601f, 0x6020), + regmap_reg_range(0x6030, 0x6030), + regmap_reg_range(0x6100, 0x6115), + regmap_reg_range(0x611a, 0x611f), + regmap_reg_range(0x6122, 0x6127), + regmap_reg_range(0x612a, 0x612b), + regmap_reg_range(0x6136, 0x6139), + regmap_reg_range(0x613e, 0x613f), + regmap_reg_range(0x6300, 0x6301), + regmap_reg_range(0x6400, 0x6401), + regmap_reg_range(0x6403, 0x6403), + regmap_reg_range(0x6410, 0x6417), + regmap_reg_range(0x6420, 0x6423), + regmap_reg_range(0x6500, 0x6507), + regmap_reg_range(0x6600, 0x6612), + regmap_reg_range(0x6800, 0x680f), + regmap_reg_range(0x6820, 0x6827), + regmap_reg_range(0x6830, 0x6837), + regmap_reg_range(0x6840, 0x684b), + regmap_reg_range(0x6900, 0x6907), + regmap_reg_range(0x6914, 0x6915), + regmap_reg_range(0x6a00, 0x6a03), + regmap_reg_range(0x6a04, 0x6a07), + regmap_reg_range(0x6b00, 0x6b01), + regmap_reg_range(0x6b04, 0x6b04), +}; + +static const struct regmap_access_table ksz9896_register_set = { + .yes_ranges = ksz9896_valid_regs, + .n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs), +}; + const struct ksz_chip_data ksz_switch_chips[] = { + [KSZ8563] = { + .chip_id = KSZ8563_CHIP_ID, + .dev_name = "KSZ8563", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x07, /* can be configured as cpu port */ + .port_cnt = 3, /* total port count */ + .ops = &ksz9477_dev_ops, + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, + .regs = ksz9477_regs, + .masks = ksz9477_masks, + .shifts = ksz9477_shifts, + .xmii_ctrl0 = ksz9477_xmii_ctrl0, + .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */ + .supports_mii = {false, false, true}, + .supports_rmii = {false, false, true}, + .supports_rgmii = {false, false, true}, + .internal_phy = {true, true, false}, + .gbit_capable = {false, false, true}, + .wr_table = &ksz8563_register_set, + .rd_table = &ksz8563_register_set, + }, + [KSZ8795] = { .chip_id = KSZ8795_CHIP_ID, .dev_name = "KSZ8795", @@@ -1175,40 -554,6 +1184,40 @@@ false, true, false}, .internal_phy = {true, true, true, true, true, false, false}, + .gbit_capable = {true, true, true, true, true, true, true}, + .wr_table = &ksz9477_register_set, + .rd_table = &ksz9477_register_set, + }, + + [KSZ9896] = { + .chip_id = KSZ9896_CHIP_ID, + .dev_name = "KSZ9896", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x3F, /* can be configured as cpu port */ + .port_cnt = 6, /* total physical port count */ + .ops = &ksz9477_dev_ops, + .phy_errata_9477 = true, + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, + .regs = ksz9477_regs, + .masks = ksz9477_masks, + .shifts = ksz9477_shifts, + .xmii_ctrl0 = ksz9477_xmii_ctrl0, + .xmii_ctrl1 = ksz9477_xmii_ctrl1, + .supports_mii = {false, false, false, false, + false, true}, + .supports_rmii = {false, false, false, false, + false, true}, + .supports_rgmii = {false, false, false, false, + false, true}, + .internal_phy = {true, true, true, true, + true, false}, + .gbit_capable = {true, true, true, true, true, true}, + .wr_table = &ksz9896_register_set, + .rd_table = &ksz9896_register_set, }, [KSZ9897] = { @@@ -1237,7 -582,6 +1246,7 @@@ false, true, true}, .internal_phy = {true, true, true, true, true, false, false}, + .gbit_capable = {true, true, true, true, true, true, true}, }, [KSZ9893] = { @@@ -1261,7 -605,6 +1270,7 @@@ .supports_rmii = {false, false, true}, .supports_rgmii = {false, false, true}, .internal_phy = {true, true, false}, + .gbit_capable = {true, true, true}, }, [KSZ9567] = { @@@ -1290,7 -633,6 +1299,7 @@@ false, true, true}, .internal_phy = {true, true, true, true, true, false, false}, + .gbit_capable = {true, true, true, true, true, true, true}, }, [LAN9370] = { @@@ -1690,14 -1032,6 +1699,14 @@@ static int ksz_setup(struct dsa_switch return 0; } +static void ksz_teardown(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + + if (dev->dev_ops->teardown) + dev->dev_ops->teardown(ds); +} + static void port_r_cnt(struct ksz_device *dev, int port) { struct ksz_port_mib *mib = &dev->ports[port].mib; @@@ -1779,11 -1113,8 +1788,11 @@@ static int ksz_phy_read16(struct dsa_sw { struct ksz_device *dev = ds->priv; u16 val = 0xffff; + int ret; - dev->dev_ops->r_phy(dev, addr, reg, &val); + ret = dev->dev_ops->r_phy(dev, addr, reg, &val); + if (ret) + return ret; return val; } @@@ -1791,11 -1122,8 +1800,11 @@@ static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) { struct ksz_device *dev = ds->priv; + int ret; - dev->dev_ops->w_phy(dev, addr, reg, val); + ret = dev->dev_ops->w_phy(dev, addr, reg, val); + if (ret) + return ret; return 0; } @@@ -2047,12 -1375,10 +2056,12 @@@ static enum dsa_tag_protocol ksz_get_ta proto = DSA_TAG_PROTO_KSZ8795; if (dev->chip_id == KSZ8830_CHIP_ID || + dev->chip_id == KSZ8563_CHIP_ID || dev->chip_id == KSZ9893_CHIP_ID) proto = DSA_TAG_PROTO_KSZ9893; if (dev->chip_id == KSZ9477_CHIP_ID || + dev->chip_id == KSZ9896_CHIP_ID || dev->chip_id == KSZ9897_CHIP_ID || dev->chip_id == KSZ9567_CHIP_ID) proto = DSA_TAG_PROTO_KSZ9477; @@@ -2167,8 -1493,7 +2176,8 @@@ static void ksz_set_xmii(struct ksz_dev case PHY_INTERFACE_MODE_RGMII_RXID: data8 |= bitval[P_RGMII_SEL]; /* On KSZ9893, disable RGMII in-band status support */ - if (dev->features & IS_9893) + if (dev->chip_id == KSZ9893_CHIP_ID || + dev->chip_id == KSZ8563_CHIP_ID) data8 &= ~P_MII_MAC_MODE; break; default: @@@ -2340,13 -1665,13 +2349,13 @@@ static void ksz_duplex_flowctrl(struct ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val); } - static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface, - struct phy_device *phydev, int speed, - int duplex, bool tx_pause, bool rx_pause) + static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev, int speed, + int duplex, bool tx_pause, + bool rx_pause) { - struct ksz_device *dev = ds->priv; struct ksz_port *p; p = &dev->ports[port]; @@@ -2360,6 -1685,15 +2369,15 @@@ ksz_port_set_xmii_speed(dev, port, speed); ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause); + } + + static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev, int speed, + int duplex, bool tx_pause, bool rx_pause) + { + struct ksz_device *dev = ds->priv; if (dev->dev_ops->phylink_mac_link_up) dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface, @@@ -2369,7 -1703,7 +2387,7 @@@ static int ksz_switch_detect(struct ksz_device *dev) { - u8 id1, id2; + u8 id1, id2, id4; u16 id16; u32 id32; int ret; @@@ -2414,8 -1748,8 +2432,8 @@@ switch (id32) { case KSZ9477_CHIP_ID: + case KSZ9896_CHIP_ID: case KSZ9897_CHIP_ID: - case KSZ9893_CHIP_ID: case KSZ9567_CHIP_ID: case LAN9370_CHIP_ID: case LAN9371_CHIP_ID: @@@ -2423,18 -1757,6 +2441,18 @@@ case LAN9373_CHIP_ID: case LAN9374_CHIP_ID: dev->chip_id = id32; + break; + case KSZ9893_CHIP_ID: + ret = ksz_read8(dev, REG_CHIP_ID4, + &id4); + if (ret) + return ret; + + if (id4 == SKU_ID_KSZ8563) + dev->chip_id = KSZ8563_CHIP_ID; + else + dev->chip_id = KSZ9893_CHIP_ID; + break; default: dev_err(dev->dev, @@@ -2449,7 -1771,6 +2467,7 @@@ static const struct dsa_switch_ops ksz_ .get_tag_protocol = ksz_get_tag_protocol, .get_phy_flags = ksz_get_phy_flags, .setup = ksz_setup, + .teardown = ksz_teardown, .phy_read = ksz_phy_read16, .phy_write = ksz_phy_write16, .phylink_get_caps = ksz_phylink_get_caps, @@@ -2614,9 -1935,6 +2632,9 @@@ int ksz_switch_register(struct ksz_devi GFP_KERNEL); if (!dev->ports[i].mib.counters) return -ENOMEM; + + dev->ports[i].ksz_dev = dev; + dev->ports[i].num = i; } /* set the real number of ports */ diff --combined drivers/net/ethernet/freescale/fec.h index 68bc16058bae,d77ee8936c6a..99cfe6ab41fc --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@@ -16,10 -16,9 +16,11 @@@ #include #include + #include #include #include +#include +#include #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ @@@ -500,6 -499,9 +501,9 @@@ struct bufdesc_ex /* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */ #define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22) + /* i.MX6Q adds pm_qos support */ + #define FEC_QUIRK_HAS_PMQOS BIT(23) + struct bufdesc_prop { int qid; /* Address of Rx and Tx buffers */ @@@ -559,7 -561,6 +563,6 @@@ struct fec_enet_private struct clk *clk_2x_txclk; bool ptp_clk_on; - struct mutex ptp_clk_mutex; unsigned int num_tx_queues; unsigned int num_rx_queues; @@@ -610,6 -611,7 +613,7 @@@ struct delayed_work time_keep; struct regulator *reg_phy; struct fec_stop_mode_gpr stop_gpr; + struct pm_qos_request pm_qos_req; unsigned int tx_align; unsigned int rx_align; @@@ -643,8 -645,6 +647,8 @@@ u8 at_inc_corr; } ptp_saved_state; + struct imx_sc_ipc *ipc_handle; + u64 ethtool_stats[]; }; diff --combined drivers/net/ethernet/freescale/fec_main.c index 8ba8eb340b92,6152f6dbf1bc..ad01db156972 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -111,7 -111,8 +111,8 @@@ static const struct fec_devinfo fec_imx .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | - FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | + FEC_QUIRK_HAS_PMQOS, }; static const struct fec_devinfo fec_mvf600_info = { @@@ -1181,34 -1182,6 +1182,34 @@@ fec_restart(struct net_device *ndev } +static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) +{ + if (!(of_machine_is_compatible("fsl,imx8qm") || + of_machine_is_compatible("fsl,imx8qxp") || + of_machine_is_compatible("fsl,imx8dxl"))) + return 0; + + return imx_scu_get_handle(&fep->ipc_handle); +} + +static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) +{ + struct device_node *np = fep->pdev->dev.of_node; + u32 rsrc_id, val; + int idx; + + if (!np || !fep->ipc_handle) + return; + + idx = of_alias_get_id(np, "ethernet"); + if (idx < 0) + idx = 0; + rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; + + val = enabled ? 1 : 0; + imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); +} + static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) { struct fec_platform_data *pdata = fep->pdev->dev.platform_data; @@@ -1224,8 -1197,6 +1225,8 @@@ BIT(stop_gpr->bit), 0); } else if (pdata && pdata->sleep_mode_enable) { pdata->sleep_mode_enable(enabled); + } else { + fec_enet_ipg_stop_set(fep, enabled); } } @@@ -2058,6 -2029,7 +2059,7 @@@ static void fec_enet_phy_reset_after_cl static int fec_enet_clk_enable(struct net_device *ndev, bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); + unsigned long flags; int ret; if (enable) { @@@ -2066,15 -2038,15 +2068,15 @@@ return ret; if (fep->clk_ptp) { - mutex_lock(&fep->ptp_clk_mutex); + spin_lock_irqsave(&fep->tmreg_lock, flags); ret = clk_prepare_enable(fep->clk_ptp); if (ret) { - mutex_unlock(&fep->ptp_clk_mutex); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); goto failed_clk_ptp; } else { fep->ptp_clk_on = true; } - mutex_unlock(&fep->ptp_clk_mutex); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); } ret = clk_prepare_enable(fep->clk_ref); @@@ -2089,10 -2061,10 +2091,10 @@@ } else { clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { - mutex_lock(&fep->ptp_clk_mutex); + spin_lock_irqsave(&fep->tmreg_lock, flags); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; - mutex_unlock(&fep->ptp_clk_mutex); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); } clk_disable_unprepare(fep->clk_ref); clk_disable_unprepare(fep->clk_2x_txclk); @@@ -2105,10 -2077,10 +2107,10 @@@ failed_clk_2x_txclk clk_disable_unprepare(fep->clk_ref); failed_clk_ref: if (fep->clk_ptp) { - mutex_lock(&fep->ptp_clk_mutex); + spin_lock_irqsave(&fep->tmreg_lock, flags); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; - mutex_unlock(&fep->ptp_clk_mutex); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); } failed_clk_ptp: clk_disable_unprepare(fep->clk_enet_out); @@@ -2168,13 -2140,13 +2170,13 @@@ static int fec_enet_mii_probe(struct ne continue; if (dev_id--) continue; - strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); break; } if (phy_id >= PHY_MAX_ADDR) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); phy_id = 0; } @@@ -2358,9 -2330,9 +2360,9 @@@ static void fec_enet_get_drvinfo(struc { struct fec_enet_private *fep = netdev_priv(ndev); - strlcpy(info->driver, fep->pdev->dev.driver->name, + strscpy(info->driver, fep->pdev->dev.driver->name, sizeof(info->driver)); - strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); + strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); } static int fec_enet_get_regs_len(struct net_device *ndev) @@@ -3274,6 -3246,9 +3276,9 @@@ fec_enet_open(struct net_device *ndev if (fep->quirks & FEC_QUIRK_ERR006687) imx6q_cpuidle_fec_irqs_used(); + if (fep->quirks & FEC_QUIRK_HAS_PMQOS) + cpu_latency_qos_add_request(&fep->pm_qos_req, 0); + napi_enable(&fep->napi); phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); @@@ -3315,6 -3290,9 +3320,9 @@@ fec_enet_close(struct net_device *ndev fec_enet_update_ethtool_stats(ndev); fec_enet_clk_enable(ndev, false); + if (fep->quirks & FEC_QUIRK_HAS_PMQOS) + cpu_latency_qos_remove_request(&fep->pm_qos_req); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); pm_runtime_mark_last_busy(&fep->pdev->dev); pm_runtime_put_autosuspend(&fep->pdev->dev); @@@ -3881,10 -3859,6 +3889,10 @@@ fec_probe(struct platform_device *pdev !of_property_read_bool(np, "fsl,err006687-workaround-present")) fep->quirks |= FEC_QUIRK_ERR006687; + ret = fec_enet_ipc_handle_init(fep); + if (ret) + goto failed_ipc_init; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; @@@ -3941,7 -3915,7 +3949,7 @@@ } fep->ptp_clk_on = false; - mutex_init(&fep->ptp_clk_mutex); + spin_lock_init(&fep->tmreg_lock); /* clk_ref is optional, depends on board */ fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); @@@ -4082,7 -4056,6 +4090,7 @@@ failed_rgmii_delay of_phy_deregister_fixed_link(np); of_node_put(phy_node); failed_stop_mode: +failed_ipc_init: failed_phy: dev_id--; failed_ioremap: diff --combined drivers/net/ethernet/freescale/fec_ptp.c index dc856eb1ce60,8dd5a2615a89..7be97ab84e50 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@@ -365,21 -365,19 +365,19 @@@ static int fec_ptp_adjtime(struct ptp_c */ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - struct fec_enet_private *adapter = + struct fec_enet_private *fep = container_of(ptp, struct fec_enet_private, ptp_caps); u64 ns; unsigned long flags; - mutex_lock(&adapter->ptp_clk_mutex); + spin_lock_irqsave(&fep->tmreg_lock, flags); /* Check the ptp clock */ - if (!adapter->ptp_clk_on) { - mutex_unlock(&adapter->ptp_clk_mutex); + if (!fep->ptp_clk_on) { + spin_unlock_irqrestore(&fep->tmreg_lock, flags); return -EINVAL; } - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_read(&adapter->tc); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - mutex_unlock(&adapter->ptp_clk_mutex); + ns = timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); *ts = ns_to_timespec64(ns); @@@ -404,10 -402,10 +402,10 @@@ static int fec_ptp_settime(struct ptp_c unsigned long flags; u32 counter; - mutex_lock(&fep->ptp_clk_mutex); + spin_lock_irqsave(&fep->tmreg_lock, flags); /* Check the ptp clock */ if (!fep->ptp_clk_on) { - mutex_unlock(&fep->ptp_clk_mutex); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); return -EINVAL; } @@@ -417,11 -415,9 +415,9 @@@ */ counter = ns & fep->cc.mask; - spin_lock_irqsave(&fep->tmreg_lock, flags); writel(counter, fep->hwp + FEC_ATIME); timecounter_init(&fep->tc, &fep->cc, ns); spin_unlock_irqrestore(&fep->tmreg_lock, flags); - mutex_unlock(&fep->ptp_clk_mutex); return 0; } @@@ -518,13 -514,11 +514,11 @@@ static void fec_time_keep(struct work_s struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); unsigned long flags; - mutex_lock(&fep->ptp_clk_mutex); + spin_lock_irqsave(&fep->tmreg_lock, flags); if (fep->ptp_clk_on) { - spin_lock_irqsave(&fep->tmreg_lock, flags); timecounter_read(&fep->tc); - spin_unlock_irqrestore(&fep->tmreg_lock, flags); } - mutex_unlock(&fep->ptp_clk_mutex); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); schedule_delayed_work(&fep->time_keep, HZ); } @@@ -578,7 -572,7 +572,7 @@@ void fec_ptp_init(struct platform_devic int ret; fep->ptp_caps.owner = THIS_MODULE; - strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); + strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); fep->ptp_caps.max_adj = 250000000; fep->ptp_caps.n_alarm = 0; @@@ -599,8 -593,6 +593,6 @@@ } fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; - spin_lock_init(&fep->tmreg_lock); - fec_ptp_start_cyclecounter(ndev); INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index 9b2f18dfd0c4,10c1e1ea83a1..5e60ff79450d --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -66,7 -66,6 +66,7 @@@ static const struct pci_device_id i40e_ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, @@@ -3879,7 -3878,7 +3879,7 @@@ static void i40e_vsi_configure_msix(str wr32(hw, I40E_PFINT_RATEN(vector - 1), i40e_intrl_usec_to_reg(vsi->int_rate_limit)); - /* Linked list for the queuepairs assigned to this vector */ + /* begin of linked list for RX queue assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; @@@ -3895,7 -3894,6 +3895,7 @@@ wr32(hw, I40E_QINT_RQCTL(qp), val); if (has_xdp) { + /* TX queue with next queue set to TX */ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | @@@ -3905,7 -3903,7 +3905,7 @@@ wr32(hw, I40E_QINT_TQCTL(nextqp), val); } - + /* TX queue with next RX or end of linked list */ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | @@@ -3974,6 -3972,7 +3974,6 @@@ static void i40e_configure_msi_and_lega struct i40e_q_vector *q_vector = vsi->q_vectors[0]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - u32 val; /* set the ITR configuration */ q_vector->rx.next_update = jiffies + 1; @@@ -3990,20 -3989,28 +3990,20 @@@ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); - /* Associate the queue pair to the vector and enable the queue int */ - val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | - (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | - (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| - (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - - wr32(hw, I40E_QINT_RQCTL(0), val); + /* Associate the queue pair to the vector and enable the queue + * interrupt RX queue in linked list with next queue set to TX + */ + wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX)); if (i40e_enabled_xdp_vsi(vsi)) { - val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)| - (I40E_QUEUE_TYPE_TX - << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - - wr32(hw, I40E_QINT_TQCTL(nextqp), val); + /* TX queue in linked list with next queue set to TX */ + wr32(hw, I40E_QINT_TQCTL(nextqp), + I40E_QINT_TQCTL_VAL(nextqp, 0, TX)); } - val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | - (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); - - wr32(hw, I40E_QINT_TQCTL(0), val); + /* last TX queue so the next RX queue doesn't matter */ + wr32(hw, I40E_QINT_TQCTL(0), + I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX)); i40e_flush(hw); } @@@ -6652,6 -6659,9 +6652,9 @@@ static int i40e_configure_queue_channel vsi->tc_seid_map[i] = ch->seid; } } + + /* reset to reconfigure TX queue contexts */ + i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); return ret; err_free: @@@ -10694,7 -10704,7 +10697,7 @@@ static void i40e_send_version(struct i4 dv.minor_version = 0xff; dv.build_version = 0xff; dv.subbuild_version = 0; - strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); + strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); i40e_aq_send_driver_version(&pf->hw, &dv, NULL); } @@@ -16042,23 -16052,23 +16045,23 @@@ static int i40e_probe(struct pci_dev *p switch (hw->bus.speed) { case i40e_bus_speed_8000: - strlcpy(speed, "8.0", PCI_SPEED_SIZE); break; + strscpy(speed, "8.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_5000: - strlcpy(speed, "5.0", PCI_SPEED_SIZE); break; + strscpy(speed, "5.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_2500: - strlcpy(speed, "2.5", PCI_SPEED_SIZE); break; + strscpy(speed, "2.5", PCI_SPEED_SIZE); break; default: break; } switch (hw->bus.width) { case i40e_bus_width_pcie_x8: - strlcpy(width, "8", PCI_WIDTH_SIZE); break; + strscpy(width, "8", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x4: - strlcpy(width, "4", PCI_WIDTH_SIZE); break; + strscpy(width, "4", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x2: - strlcpy(width, "2", PCI_WIDTH_SIZE); break; + strscpy(width, "2", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x1: - strlcpy(width, "1", PCI_WIDTH_SIZE); break; + strscpy(width, "1", PCI_WIDTH_SIZE); break; default: break; } diff --combined drivers/net/ethernet/intel/iavf/iavf_main.c index b62bf4eb6870,10aa99dfdcdb..1671e52b6ba2 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@@ -1270,138 -1270,66 +1270,138 @@@ static void iavf_up_complete(struct iav } /** - * iavf_down - Shutdown the connection processing + * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF + * yet and mark other to be removed. * @adapter: board private structure - * - * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ -void iavf_down(struct iavf_adapter *adapter) +static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - struct iavf_vlan_filter *vlf; - struct iavf_cloud_filter *cf; - struct iavf_fdir_fltr *fdir; - struct iavf_mac_filter *f; - struct iavf_adv_rss *rss; - - if (adapter->state <= __IAVF_DOWN_PENDING) - return; - - netif_carrier_off(netdev); - netif_tx_disable(netdev); - adapter->link_up = false; - iavf_napi_disable_all(adapter); - iavf_irq_disable(adapter); + struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_mac_filter *f, *ftmp; spin_lock_bh(&adapter->mac_vlan_list_lock); - /* clear the sync flag on all filters */ __dev_uc_unsync(adapter->netdev, NULL); __dev_mc_unsync(adapter->netdev, NULL); /* remove all MAC filters */ - list_for_each_entry(f, &adapter->mac_filter_list, list) { - f->remove = true; + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, + list) { + if (f->add) { + list_del(&f->list); + kfree(f); + } else { + f->remove = true; + } } /* remove all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->remove = true; + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, + list) { + if (vlf->add) { + list_del(&vlf->list); + kfree(vlf); + } else { + vlf->remove = true; + } } - spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and + * mark other to be removed. + * @adapter: board private structure + **/ +static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) +{ + struct iavf_cloud_filter *cf, *cftmp; /* remove all cloud filters */ spin_lock_bh(&adapter->cloud_filter_list_lock); - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - cf->del = true; + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, + list) { + if (cf->add) { + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } else { + cf->del = true; + } } spin_unlock_bh(&adapter->cloud_filter_list_lock); +} + +/** + * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark + * other to be removed. + * @adapter: board private structure + **/ +static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir, *fdirtmp; /* remove all Flow Director filters */ spin_lock_bh(&adapter->fdir_fltr_lock); - list_for_each_entry(fdir, &adapter->fdir_list_head, list) { - fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else { + fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + } } spin_unlock_bh(&adapter->fdir_fltr_lock); +} + +/** + * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark + * other to be removed. + * @adapter: board private structure + **/ +static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) +{ + struct iavf_adv_rss *rss, *rsstmp; /* remove all advance RSS configuration */ spin_lock_bh(&adapter->adv_rss_lock); - list_for_each_entry(rss, &adapter->adv_rss_list_head, list) - rss->state = IAVF_ADV_RSS_DEL_REQUEST; + list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, + list) { + if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { + list_del(&rss->list); + kfree(rss); + } else { + rss->state = IAVF_ADV_RSS_DEL_REQUEST; + } + } spin_unlock_bh(&adapter->adv_rss_lock); +} + +/** + * iavf_down - Shutdown the connection processing + * @adapter: board private structure + * + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. + **/ +void iavf_down(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->state <= __IAVF_DOWN_PENDING) + return; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter); + + iavf_clear_mac_vlan_filters(adapter); + iavf_clear_cloud_filters(adapter); + iavf_clear_fdir_filters(adapter); + iavf_clear_adv_rss_conf(adapter); if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { /* cancel any current operation */ @@@ -1410,16 -1338,11 +1410,16 @@@ * here for this to complete. The watchdog is still running * and it will take care of this. */ - adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; + if (!list_empty(&adapter->mac_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; + if (!list_empty(&adapter->vlan_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; + if (!list_empty(&adapter->cloud_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + if (!list_empty(&adapter->fdir_list_head)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + if (!list_empty(&adapter->adv_rss_list_head)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } @@@ -2954,6 -2877,11 +2954,11 @@@ static void iavf_reset_task(struct work int i = 0, err; bool running; + /* Detach interface to avoid subsequent NDO callbacks */ + rtnl_lock(); + netif_device_detach(netdev); + rtnl_unlock(); + /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ @@@ -2961,7 -2889,7 +2966,7 @@@ if (adapter->state != __IAVF_REMOVE) queue_work(iavf_wq, &adapter->reset_task); - return; + goto reset_finish; } while (!mutex_trylock(&adapter->client_lock)) @@@ -3031,7 -2959,6 +3036,6 @@@ continue_reset if (running) { netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); adapter->link_up = false; iavf_napi_disable_all(adapter); } @@@ -3161,7 -3088,7 +3165,7 @@@ mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - return; + goto reset_finish; reset_err: if (running) { set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); @@@ -3172,6 -3099,10 +3176,10 @@@ mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); + reset_finish: + rtnl_lock(); + netif_device_attach(netdev); + rtnl_unlock(); } /** @@@ -4250,7 -4181,6 +4258,7 @@@ err_unlock static int iavf_close(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); + u64 aq_to_restore; int status; mutex_lock(&adapter->crit_lock); @@@ -4263,29 -4193,6 +4271,29 @@@ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; + /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before + * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl + * deadlock with adminq_task() until iavf_close timeouts. We must send + * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make + * disable queues possible for vf. Give only necessary flags to + * iavf_down and save other to set them right before iavf_close() + * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and + * iavf will be in DOWN state. + */ + aq_to_restore = adapter->aq_required; + adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG; + + /* Remove flags which we do not want to send after close or we want to + * send before disable queues. + */ + aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG | + IAVF_FLAG_AQ_ENABLE_QUEUES | + IAVF_FLAG_AQ_CONFIGURE_QUEUES | + IAVF_FLAG_AQ_ADD_VLAN_FILTER | + IAVF_FLAG_AQ_ADD_MAC_FILTER | + IAVF_FLAG_AQ_ADD_CLOUD_FILTER | + IAVF_FLAG_AQ_ADD_FDIR_FILTER | + IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); iavf_down(adapter); iavf_change_state(adapter, __IAVF_DOWN_PENDING); @@@ -4309,10 -4216,6 +4317,10 @@@ msecs_to_jiffies(500)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); + + mutex_lock(&adapter->crit_lock); + adapter->aq_required |= aq_to_restore; + mutex_unlock(&adapter->crit_lock); return 0; } diff --combined drivers/net/ethernet/intel/ice/ice_base.c index 6f092e06054e,1e3243808178..1e97242a8f85 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@@ -7,18 -7,6 +7,6 @@@ #include "ice_dcb_lib.h" #include "ice_sriov.h" - static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring) - { - rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL); - return !!rx_ring->xdp_buf; - } - - static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring) - { - rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); - return !!rx_ring->rx_buf; - } - /** * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI * @qs_cfg: gathered variables needed for PF->VSI queues assignment @@@ -417,7 -405,7 +405,7 @@@ static int ice_setup_rx_ctx(struct ice_ /* Strip the Ethernet CRC bytes before the packet is posted to host * memory. */ - rlan_ctx.crcstrip = 1; + rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS); /* L2TSEL flag defines the reported L2 Tags in the receive descriptor * and it needs to remain 1 for non-DVM capable configurations to not @@@ -519,11 -507,8 +507,8 @@@ int ice_vsi_cfg_rxq(struct ice_rx_ring xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id); - kfree(ring->rx_buf); ring->xsk_pool = ice_xsk_pool(ring); if (ring->xsk_pool) { - if (!ice_alloc_rx_buf_zc(ring)) - return -ENOMEM; xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); ring->rx_buf_len = @@@ -538,8 -523,6 +523,6 @@@ dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); } else { - if (!ice_alloc_rx_buf(ring)) - return -ENOMEM; if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) /* coverity[check_return] */ xdp_rxq_info_reg(&ring->xdp_rxq, diff --combined drivers/net/ethernet/intel/ice/ice_main.c index 9d031271cfda,8c30eea61b6d..7f59050e4122 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@@ -2898,10 -2898,18 +2898,18 @@@ ice_xdp_setup_prog(struct ice_vsi *vsi if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); } + /* reallocate Rx queues that are used for zero-copy */ + xdp_ring_err = ice_realloc_zc_buf(vsi, true); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_ring_err = ice_destroy_xdp_rings(vsi); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); + /* reallocate Rx queues that were used for zero-copy */ + xdp_ring_err = ice_realloc_zc_buf(vsi, false); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); } else { /* safe to call even when prog == vsi->xdp_prog as * dev_xdp_install in net/core/dev.c incremented prog's @@@ -3385,11 -3393,6 +3393,11 @@@ static void ice_set_netdev_features(str if (is_dvm_ena) netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + + /* Leave CRC / FCS stripping enabled by default, but allow the value to + * be changed at runtime + */ + netdev->hw_features |= NETIF_F_RXFCS; } /** @@@ -3910,7 -3913,7 +3918,7 @@@ static int ice_init_pf(struct ice_pf *p pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); if (!pf->avail_rxqs) { - devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); + bitmap_free(pf->avail_txqs); pf->avail_txqs = NULL; return -ENOMEM; } @@@ -3921,135 -3924,88 +3929,135 @@@ return 0; } +/** + * ice_reduce_msix_usage - Reduce usage of MSI-X vectors + * @pf: board private structure + * @v_remain: number of remaining MSI-X vectors to be distributed + * + * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. + * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of + * remaining vectors. + */ +static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) +{ + int v_rdma; + + if (!ice_is_rdma_ena(pf)) { + pf->num_lan_msix = v_remain; + return; + } + + /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ + v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; + + if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { + dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + + pf->num_rdma_msix = 0; + pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; + } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || + (v_remain - v_rdma < v_rdma)) { + /* Support minimum RDMA and give remaining vectors to LAN MSIX */ + pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; + pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; + } else { + /* Split remaining MSIX with RDMA after accounting for AEQ MSIX + */ + pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + + ICE_RDMA_NUM_AEQ_MSIX; + pf->num_lan_msix = v_remain - pf->num_rdma_msix; + } +} + /** * ice_ena_msix_range - Request a range of MSIX vectors from the OS * @pf: board private structure * - * compute the number of MSIX vectors required (v_budget) and request from - * the OS. Return the number of vectors reserved or negative on failure + * Compute the number of MSIX vectors wanted and request from the OS. Adjust + * device usage if there are not enough vectors. Return the number of vectors + * reserved or negative on failure. */ static int ice_ena_msix_range(struct ice_pf *pf) { - int num_cpus, v_left, v_actual, v_other, v_budget = 0; + int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; struct device *dev = ice_pf_to_dev(pf); - int needed, err, i; + int err, i; - v_left = pf->hw.func_caps.common_cap.num_msix_vectors; + hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; num_cpus = num_online_cpus(); - /* reserve for LAN miscellaneous handler */ - needed = ICE_MIN_LAN_OICR_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - - /* reserve for flow director */ - if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { - needed = ICE_FDIR_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - } - - /* reserve for switchdev */ - needed = ICE_ESWITCH_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - - /* total used for non-traffic vectors */ - v_other = v_budget; - - /* reserve vectors for LAN traffic */ - needed = num_cpus; - if (v_left < needed) - goto no_hw_vecs_left_err; - pf->num_lan_msix = needed; - v_budget += needed; - v_left -= needed; - - /* reserve vectors for RDMA auxiliary driver */ + /* LAN miscellaneous handler */ + v_other = ICE_MIN_LAN_OICR_MSIX; + + /* Flow Director */ + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) + v_other += ICE_FDIR_MSIX; + + /* switchdev */ + v_other += ICE_ESWITCH_MSIX; + + v_wanted = v_other; + + /* LAN traffic */ + pf->num_lan_msix = num_cpus; + v_wanted += pf->num_lan_msix; + + /* RDMA auxiliary driver */ if (ice_is_rdma_ena(pf)) { - needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - pf->num_rdma_msix = needed; - v_budget += needed; - v_left -= needed; + pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; + v_wanted += pf->num_rdma_msix; } - pf->msix_entries = devm_kcalloc(dev, v_budget, + if (v_wanted > hw_num_msix) { + int v_remain; + + dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", + v_wanted, hw_num_msix); + + if (hw_num_msix < ICE_MIN_MSIX) { + err = -ERANGE; + goto exit_err; + } + + v_remain = hw_num_msix - v_other; + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { + v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; + v_remain = ICE_MIN_LAN_TXRX_MSIX; + } + + ice_reduce_msix_usage(pf, v_remain); + v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; + + dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", + pf->num_lan_msix); + if (ice_is_rdma_ena(pf)) + dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", + pf->num_rdma_msix); + } + + pf->msix_entries = devm_kcalloc(dev, v_wanted, sizeof(*pf->msix_entries), GFP_KERNEL); if (!pf->msix_entries) { err = -ENOMEM; goto exit_err; } - for (i = 0; i < v_budget; i++) + for (i = 0; i < v_wanted; i++) pf->msix_entries[i].entry = i; /* actually reserve the vectors */ v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, - ICE_MIN_MSIX, v_budget); + ICE_MIN_MSIX, v_wanted); if (v_actual < 0) { dev_err(dev, "unable to reserve MSI-X vectors\n"); err = v_actual; goto msix_err; } - if (v_actual < v_budget) { + if (v_actual < v_wanted) { dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", - v_budget, v_actual); + v_wanted, v_actual); if (v_actual < ICE_MIN_MSIX) { /* error if we can't get minimum vectors */ @@@ -4058,11 -4014,38 +4066,11 @@@ goto msix_err; } else { int v_remain = v_actual - v_other; - int v_rdma = 0, v_min_rdma = 0; - if (ice_is_rdma_ena(pf)) { - /* Need at least 1 interrupt in addition to - * AEQ MSIX - */ - v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; - v_min_rdma = ICE_MIN_RDMA_MSIX; - } + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) + v_remain = ICE_MIN_LAN_TXRX_MSIX; - if (v_actual == ICE_MIN_MSIX || - v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { - dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); - clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - - pf->num_rdma_msix = 0; - pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || - (v_remain - v_rdma < v_rdma)) { - /* Support minimum RDMA and give remaining - * vectors to LAN MSIX - */ - pf->num_rdma_msix = v_min_rdma; - pf->num_lan_msix = v_remain - v_min_rdma; - } else { - /* Split remaining MSIX with RDMA after - * accounting for AEQ MSIX - */ - pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + - ICE_RDMA_NUM_AEQ_MSIX; - pf->num_lan_msix = v_remain - pf->num_rdma_msix; - } + ice_reduce_msix_usage(pf, v_remain); dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", pf->num_lan_msix); @@@ -4077,7 -4060,12 +4085,7 @@@ msix_err: devm_kfree(dev, pf->msix_entries); - goto exit_err; -no_hw_vecs_left_err: - dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", - needed, v_left); - err = -ERANGE; exit_err: pf->num_rdma_msix = 0; pf->num_lan_msix = 0; @@@ -4696,6 -4684,8 +4704,6 @@@ ice_probe(struct pci_dev *pdev, const s ice_set_safe_mode_caps(hw); } - hw->ucast_shared = true; - err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); @@@ -5754,9 -5744,6 +5762,9 @@@ ice_fdb_del(struct ndmsg *ndm, __always NETIF_F_HW_VLAN_STAG_RX | \ NETIF_F_HW_VLAN_STAG_TX) +#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_STAG_RX) + #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_STAG_FILTER) @@@ -5843,14 -5830,6 +5851,14 @@@ ice_fix_features(struct net_device *net NETIF_F_HW_VLAN_STAG_TX); } + if (!(netdev->features & NETIF_F_RXFCS) && + (features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES) && + !ice_vsi_has_non_zero_vlans(np->vsi)) { + netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); + features &= ~NETIF_VLAN_STRIPPING_FEATURES; + } + return features; } @@@ -5944,13 -5923,6 +5952,13 @@@ ice_set_vlan_features(struct net_devic current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; if (current_vlan_features ^ requested_vlan_features) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); + return -EIO; + } + err = ice_set_vlan_offload_features(vsi, features); if (err) return err; @@@ -6032,23 -6004,6 +6040,23 @@@ ice_set_features(struct net_device *net if (ret) return ret; + /* Turn on receive of FCS aka CRC, and after setting this + * flag the packet data will have the 4 byte CRC appended + */ + if (changed & NETIF_F_RXFCS) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); + return -EIO; + } + + ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); + ret = ice_down_up(vsi); + if (ret) + return ret; + } + if (changed & NETIF_F_NTUPLE) { bool ena = !!(features & NETIF_F_NTUPLE); @@@ -6752,31 -6707,6 +6760,31 @@@ int ice_down(struct ice_vsi *vsi return 0; } +/** + * ice_down_up - shutdown the VSI connection and bring it up + * @vsi: the VSI to be reconnected + */ +int ice_down_up(struct ice_vsi *vsi) +{ + int ret; + + /* if DOWN already set, nothing to do */ + if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) + return 0; + + ret = ice_down(vsi); + if (ret) + return ret; + + ret = ice_up(vsi); + if (ret) { + netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); + return ret; + } + + return 0; +} + /** * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources * @vsi: VSI having resources allocated diff --combined drivers/net/ethernet/mediatek/mtk_ppe.h index 0dc10ad4aecd,69ffce04d630..8f786c47b61a --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@@ -293,6 -293,9 +293,9 @@@ mtk_ppe_check_skb(struct mtk_ppe *ppe, if (!ppe) return; + if (hash > MTK_PPE_HASH_MASK) + return; + now = (u16)jiffies; diff = now - ppe->foe_check_time[hash]; if (diff < HZ / 10) @@@ -302,6 -305,17 +305,6 @@@ __mtk_ppe_check_skb(ppe, skb, hash); } -static inline int -mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash) -{ - u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1); - - if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) - return -1; - - return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1); -} - int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, u8 pse_port, u8 *src_mac, u8 *dest_mac); int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port); diff --combined drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 7d3c7ca7caf4,9af25be42401..0a2afc1a3124 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@@ -610,6 -610,7 +610,6 @@@ static int intel_mgbe_common_data(struc plat->int_snapshot_num = AUX_SNAPSHOT1; plat->ext_snapshot_num = AUX_SNAPSHOT0; - plat->has_crossts = true; plat->crosststamp = intel_crosststamp; plat->int_snapshot_en = 0; @@@ -1135,8 -1136,6 +1135,6 @@@ static void intel_eth_pci_remove(struc clk_disable_unprepare(priv->plat->stmmac_clk); clk_unregister_fixed_rate(priv->plat->stmmac_clk); - - pcim_iounmap_regions(pdev, BIT(0)); } static int __maybe_unused intel_eth_pci_suspend(struct device *dev) diff --combined drivers/net/wireless/mac80211_hwsim.c index ad9330ea3c96,1f301a5fb396..ed3b1c84d547 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@@ -652,6 -652,7 +652,6 @@@ struct mac80211_hwsim_data u32 ciphers[ARRAY_SIZE(hwsim_ciphers)]; struct mac_address addresses[2]; - struct ieee80211_chanctx_conf *chanctx; int channels, idx; bool use_chanctx; bool destroy_on_close; @@@ -1298,8 -1299,6 +1298,8 @@@ static void mac80211_hwsim_config_mac_n struct sk_buff *skb; void *msg_head; + WARN_ON(!is_valid_ether_addr(addr)); + if (!_portid && !hwsim_virtio_enabled) return; @@@ -1562,19 -1561,6 +1562,19 @@@ static void mac80211_hwsim_add_vendor_r #endif } +static void mac80211_hwsim_rx(struct mac80211_hwsim_data *data, + struct ieee80211_rx_status *rx_status, + struct sk_buff *skb) +{ + memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); + + mac80211_hwsim_add_vendor_rtap(skb); + + data->rx_pkts++; + data->rx_bytes += skb->len; + ieee80211_rx_irqsafe(data->hw, skb); +} + static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan) @@@ -1702,7 -1688,13 +1702,7 @@@ rx_status.mactime = now + data2->tsf_offset; - memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); - - mac80211_hwsim_add_vendor_rtap(nskb); - - data2->rx_pkts++; - data2->rx_bytes += nskb->len; - ieee80211_rx_irqsafe(data2->hw, nskb); + mac80211_hwsim_rx(data2, &rx_status, nskb); } spin_unlock(&hwsim_radio_lock); @@@ -1722,7 -1714,12 +1722,7 @@@ mac80211_hwsim_select_tx_link(struct ma if (!vif->valid_links) return &vif->bss_conf; - /* FIXME: handle multicast TX properly */ - if (is_multicast_ether_addr(hdr->addr1) || WARN_ON_ONCE(!sta)) { - unsigned int first_link = ffs(vif->valid_links) - 1; - - return rcu_dereference(vif->link_conf[first_link]); - } + WARN_ON(is_multicast_ether_addr(hdr->addr1)); if (WARN_ON_ONCE(!sta->valid_links)) return &vif->bss_conf; @@@ -2869,6 -2866,11 +2869,6 @@@ static int mac80211_hwsim_croc(struct i static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { - struct mac80211_hwsim_data *hwsim = hw->priv; - - mutex_lock(&hwsim->mutex); - hwsim->chanctx = ctx; - mutex_unlock(&hwsim->mutex); hwsim_set_chanctx_magic(ctx); wiphy_dbg(hw->wiphy, "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", @@@ -2880,6 -2882,11 +2880,6 @@@ static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { - struct mac80211_hwsim_data *hwsim = hw->priv; - - mutex_lock(&hwsim->mutex); - hwsim->chanctx = NULL; - mutex_unlock(&hwsim->mutex); wiphy_dbg(hw->wiphy, "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", ctx->def.chan->center_freq, ctx->def.width, @@@ -2892,6 -2899,11 +2892,6 @@@ static void mac80211_hwsim_change_chanc struct ieee80211_chanctx_conf *ctx, u32 changed) { - struct mac80211_hwsim_data *hwsim = hw->priv; - - mutex_lock(&hwsim->mutex); - hwsim->chanctx = ctx; - mutex_unlock(&hwsim->mutex); hwsim_check_chanctx_magic(ctx); wiphy_dbg(hw->wiphy, "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", @@@ -2983,15 -2995,10 +2983,15 @@@ static int mac80211_hwsim_change_vif_li u16 old_links, u16 new_links, struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) { - unsigned long rem = old_links & ~new_links ?: BIT(0); + unsigned long rem = old_links & ~new_links; unsigned long add = new_links & ~old_links; int i; + if (!old_links) + rem |= BIT(0); + if (!new_links) + add |= BIT(0); + for_each_set_bit(i, &rem, IEEE80211_MLD_MAX_NUM_LINKS) mac80211_hwsim_config_mac_nl(hw, old[i]->addr, false); @@@ -3014,8 -3021,6 +3014,8 @@@ static int mac80211_hwsim_change_sta_li struct ieee80211_sta *sta, u16 old_links, u16 new_links) { + hwsim_check_sta_magic(sta); + return 0; } @@@ -3203,112 -3208,8 +3203,112 @@@ out_err static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = { { - .types_mask = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + .types_mask = BIT(NL80211_IFTYPE_STATION), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes + * unset, as DCM, beam forming, RU and PPE + * threshold information are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xffff), + .tx_mcs_160 = cpu_to_le16(0xffff), + .rx_mcs_80p80 = cpu_to_le16(0xffff), + .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, + }, + .eht_cap = { + .has_eht = true, + .eht_cap_elem = { + .mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, + .phy_cap_info[0] = + IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | + IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE, + .phy_cap_info[3] = + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, + .phy_cap_info[4] = + IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | + IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI | + IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK, + .phy_cap_info[5] = + IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT | + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK | + IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK, + .phy_cap_info[6] = + IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK | + IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK, + .phy_cap_info[7] = + IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW, + }, + + /* For all MCS and bandwidth, set 8 NSS for both Tx and + * Rx + */ + .eht_mcs_nss_supp = { + /* + * Since B0, B1, B2 and B3 are not set in + * the supported channel width set field in the + * HE PHY capabilities information field the + * device is a 20MHz only device on 2.4GHz band. + */ + .only_20mhz = { + .rx_tx_mcs7_max_nss = 0x88, + .rx_tx_mcs9_max_nss = 0x88, + .rx_tx_mcs11_max_nss = 0x88, + .rx_tx_mcs13_max_nss = 0x88, + }, + }, + /* PPE threshold information is not supported */ + }, + }, + { + .types_mask = BIT(NL80211_IFTYPE_AP), .he_cap = { .has_he = true, .he_cap_elem = { @@@ -3455,132 -3356,9 +3455,132 @@@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = { { - /* TODO: should we support other types, e.g., P2P?*/ - .types_mask = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + /* TODO: should we support other types, e.g., P2P? */ + .types_mask = BIT(NL80211_IFTYPE_STATION), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes + * unset, as DCM, beam forming, RU and PPE + * threshold information are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xfffa), + .tx_mcs_80p80 = cpu_to_le16(0xfffa), + }, + }, + .eht_cap = { + .has_eht = true, + .eht_cap_elem = { + .mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, + .phy_cap_info[0] = + IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | + IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE | + IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK, + .phy_cap_info[1] = + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK | + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK, + .phy_cap_info[2] = + IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK | + IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK, + .phy_cap_info[3] = + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, + .phy_cap_info[4] = + IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | + IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI | + IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK, + .phy_cap_info[5] = + IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT | + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK | + IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK, + .phy_cap_info[6] = + IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK | + IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK, + .phy_cap_info[7] = + IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ, + }, + + /* For all MCS and bandwidth, set 8 NSS for both Tx and + * Rx + */ + .eht_mcs_nss_supp = { + /* + * As B1 and B2 are set in the supported + * channel width set field in the HE PHY + * capabilities information field include all + * the following MCS/NSS. + */ + .bw._80 = { + .rx_tx_mcs9_max_nss = 0x88, + .rx_tx_mcs11_max_nss = 0x88, + .rx_tx_mcs13_max_nss = 0x88, + }, + .bw._160 = { + .rx_tx_mcs9_max_nss = 0x88, + .rx_tx_mcs11_max_nss = 0x88, + .rx_tx_mcs13_max_nss = 0x88, + }, + }, + /* PPE threshold information is not supported */ + }, + }, + { + .types_mask = BIT(NL80211_IFTYPE_AP), .he_cap = { .has_he = true, .he_cap_elem = { @@@ -3751,153 -3529,9 +3751,153 @@@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = { { - /* TODO: should we support other types, e.g., P2P?*/ - .types_mask = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + /* TODO: should we support other types, e.g., P2P? */ + .types_mask = BIT(NL80211_IFTYPE_STATION), + .he_6ghz_capa = { + .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START | + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP | + IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN | + IEEE80211_HE_6GHZ_CAP_SM_PS | + IEEE80211_HE_6GHZ_CAP_RD_RESPONDER | + IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS | + IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS), + }, + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes + * unset, as DCM, beam forming, RU and PPE + * threshold information are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xfffa), + .tx_mcs_80p80 = cpu_to_le16(0xfffa), + }, + }, + .eht_cap = { + .has_eht = true, + .eht_cap_elem = { + .mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, + .phy_cap_info[0] = + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ | + IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | + IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE | + IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK, + .phy_cap_info[1] = + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK | + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK | + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK, + .phy_cap_info[2] = + IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK | + IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK | + IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK, + .phy_cap_info[3] = + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, + .phy_cap_info[4] = + IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | + IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI | + IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK, + .phy_cap_info[5] = + IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT | + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK | + IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK, + .phy_cap_info[6] = + IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK | + IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK | + IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP, + .phy_cap_info[7] = + IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ, + }, + + /* For all MCS and bandwidth, set 8 NSS for both Tx and + * Rx + */ + .eht_mcs_nss_supp = { + /* + * As B1 and B2 are set in the supported + * channel width set field in the HE PHY + * capabilities information field and 320MHz in + * 6GHz is supported include all the following + * MCS/NSS. + */ + .bw._80 = { + .rx_tx_mcs9_max_nss = 0x88, + .rx_tx_mcs11_max_nss = 0x88, + .rx_tx_mcs13_max_nss = 0x88, + }, + .bw._160 = { + .rx_tx_mcs9_max_nss = 0x88, + .rx_tx_mcs11_max_nss = 0x88, + .rx_tx_mcs13_max_nss = 0x88, + }, + .bw._320 = { + .rx_tx_mcs9_max_nss = 0x88, + .rx_tx_mcs11_max_nss = 0x88, + .rx_tx_mcs13_max_nss = 0x88, + }, + }, + /* PPE threshold information is not supported */ + }, + }, + { + .types_mask = BIT(NL80211_IFTYPE_AP), .he_6ghz_capa = { .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START | IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP | @@@ -4262,6 -3896,7 +4262,6 @@@ static int mac80211_hwsim_new_radio(str hw->wiphy->max_remain_on_channel_duration = 1000; data->if_combination.radar_detect_widths = 0; data->if_combination.num_different_channels = data->channels; - data->chanctx = NULL; } else { data->if_combination.num_different_channels = 1; data->if_combination.radar_detect_widths = @@@ -4836,9 -4471,13 +4836,9 @@@ static int hwsim_cloned_frame_received_ if (data2->use_chanctx) { if (data2->tmp_chan) channel = data2->tmp_chan; - else if (data2->chanctx) - channel = data2->chanctx->def.chan; } else { channel = data2->channel; } - if (!channel) - goto out; if (!hwsim_virtio_enabled) { if (hwsim_net_get_netgroup(genl_info_net(info)) != @@@ -4869,7 -4508,6 +4869,7 @@@ rx_status.freq); if (!iter_data.channel) goto out; + rx_status.band = iter_data.channel->band; mutex_lock(&data2->mutex); if (!hwsim_chans_compat(iter_data.channel, channel)) { @@@ -4882,13 -4520,11 +4882,13 @@@ } } mutex_unlock(&data2->mutex); + } else if (!channel) { + goto out; } else { rx_status.freq = channel->center_freq; + rx_status.band = channel->band; } - rx_status.band = channel->band; rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); @@@ -4898,7 -4534,10 +4898,7 @@@ ieee80211_is_probe_resp(hdr->frame_control)) rx_status.boottime_ns = ktime_get_boottime_ns(); - memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); - data2->rx_pkts++; - data2->rx_bytes += skb->len; - ieee80211_rx_irqsafe(data2->hw, skb); + mac80211_hwsim_rx(data2, &rx_status, skb); return 0; err: @@@ -5273,7 -4912,6 +5273,7 @@@ static struct genl_family hwsim_genl_fa .module = THIS_MODULE, .small_ops = hwsim_ops, .n_small_ops = ARRAY_SIZE(hwsim_ops), + .resv_start_op = HWSIM_CMD_DEL_MAC_ADDR + 1, .mcgrps = hwsim_mcgrps, .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), }; @@@ -5422,6 -5060,10 +5422,10 @@@ static int hwsim_virtio_handle_cmd(stru nlh = nlmsg_hdr(skb); gnlh = nlmsg_data(nlh); + + if (skb->len < nlh->nlmsg_len) + return -EINVAL; + err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX, hwsim_genl_policy, NULL); if (err) { @@@ -5464,7 -5106,8 +5468,8 @@@ static void hwsim_virtio_rx_work(struc spin_unlock_irqrestore(&hwsim_virtio_lock, flags); skb->data = skb->head; - skb_set_tail_pointer(skb, len); + skb_reset_tail_pointer(skb); + skb_put(skb, len); hwsim_virtio_handle_cmd(skb); spin_lock_irqsave(&hwsim_virtio_lock, flags); diff --combined include/linux/ieee80211.h index 6f70394417ac,b6e6d5b40774..79690938d9a2 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@@ -310,9 -310,11 +310,11 @@@ static inline u16 ieee80211_sn_sub(u16 struct ieee80211_hdr { __le16 frame_control; __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 addr3[ETH_ALEN]; + struct_group(addrs, + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + ); __le16 seq_ctrl; u8 addr4[ETH_ALEN]; } __packed __aligned(2); @@@ -2886,8 -2888,7 +2888,8 @@@ ieee80211_he_spr_size(const u8 *he_spr_ /* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */ static inline u8 ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap, - const struct ieee80211_eht_cap_elem_fixed *eht_cap) + const struct ieee80211_eht_cap_elem_fixed *eht_cap, + bool from_ap) { u8 count = 0; @@@ -2908,10 -2909,7 +2910,10 @@@ if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) count += 3; - return count ? count : 4; + if (count) + return count; + + return from_ap ? 3 : 4; } /* 802.11be EHT PPE Thresholds */ @@@ -2947,8 -2945,7 +2949,8 @@@ ieee80211_eht_ppe_size(u16 ppe_thres_hd } static inline bool -ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len) +ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len, + bool from_ap) { const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data; u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed); @@@ -2957,8 -2954,7 +2959,8 @@@ return false; needed += ieee80211_eht_mcs_nss_size((const void *)he_capa, - (const void *)data); + (const void *)data, + from_ap); if (len < needed) return false; diff --combined include/linux/skbuff.h index 43c37385f1e9,18e163a3460d..f15d5b62539b --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@@ -1195,8 -1195,7 +1195,8 @@@ static inline bool skb_unref(struct sk_ return true; } -void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason); +void __fix_address +kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason); /** * kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason @@@ -1461,8 -1460,8 +1461,8 @@@ void skb_flow_dissector_init(struct flo unsigned int key_count); struct bpf_flow_dissector; -bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen, unsigned int flags); +u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, + __be16 proto, int nhoff, int hlen, unsigned int flags); bool __skb_flow_dissect(const struct net *net, const struct sk_buff *skb, @@@ -2445,6 -2444,27 +2445,27 @@@ static inline void skb_fill_page_desc(s skb_shinfo(skb)->nr_frags = i + 1; } + /** + * skb_fill_page_desc_noacc - initialise a paged fragment in an skb + * @skb: buffer containing fragment to be initialised + * @i: paged fragment index to initialise + * @page: the page to use for this fragment + * @off: the offset to the data with @page + * @size: the length of the data + * + * Variant of skb_fill_page_desc() which does not deal with + * pfmemalloc, if page is not owned by us. + */ + static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i, + struct page *page, int off, + int size) + { + struct skb_shared_info *shinfo = skb_shinfo(skb); + + __skb_fill_page_desc_noacc(shinfo, i, page, off, size); + shinfo->nr_frags = i + 1; + } + void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize); diff --combined net/core/skbuff.c index 48ecfbf29174,417463da4fac..f1b8b20fc20b --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -91,7 -91,11 +91,11 @@@ static struct kmem_cache *skbuff_ext_ca int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; EXPORT_SYMBOL(sysctl_max_skb_frags); - /* The array 'drop_reasons' is auto-generated in dropreason_str.c */ + #undef FN + #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, + const char * const drop_reasons[] = { + DEFINE_DROP_REASON(FN, FN) + }; EXPORT_SYMBOL(drop_reasons); /** @@@ -777,10 -781,9 +781,10 @@@ EXPORT_SYMBOL(__kfree_skb) * hit zero. Meanwhile, pass the drop reason to 'kfree_skb' * tracepoint. */ -void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) +void __fix_address +kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) { - if (!skb_unref(skb)) + if (unlikely(!skb_unref(skb))) return; DEBUG_NET_WARN_ON_ONCE(reason <= 0 || reason >= SKB_DROP_REASON_MAX); diff --combined net/ipv4/tcp.c index 52b8879e7d20,6cdfce6f2867..8230be00ecca --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -1015,7 -1015,7 +1015,7 @@@ new_segment skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else { get_page(page); - skb_fill_page_desc(skb, i, page, offset, copy); + skb_fill_page_desc_noacc(skb, i, page, offset, copy); } if (!(flags & MSG_NO_SHARED_FRAGS)) @@@ -3199,7 -3199,7 +3199,7 @@@ EXPORT_SYMBOL(tcp_disconnect) static inline bool tcp_can_repair_sock(const struct sock *sk) { - return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && + return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && (sk->sk_state != TCP_LISTEN); } @@@ -3476,8 -3476,8 +3476,8 @@@ int tcp_set_window_clamp(struct sock *s /* * Socket option code for TCP. */ -static int do_tcp_setsockopt(struct sock *sk, int level, int optname, - sockptr_t optval, unsigned int optlen) +int do_tcp_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); @@@ -3499,11 -3499,11 +3499,11 @@@ return -EFAULT; name[val] = 0; - lock_sock(sk); - err = tcp_set_congestion_control(sk, name, true, - ns_capable(sock_net(sk)->user_ns, - CAP_NET_ADMIN)); - release_sock(sk); + sockopt_lock_sock(sk); + err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), + sockopt_ns_capable(sock_net(sk)->user_ns, + CAP_NET_ADMIN)); + sockopt_release_sock(sk); return err; } case TCP_ULP: { @@@ -3519,9 -3519,9 +3519,9 @@@ return -EFAULT; name[val] = 0; - lock_sock(sk); + sockopt_lock_sock(sk); err = tcp_set_ulp(sk, name); - release_sock(sk); + sockopt_release_sock(sk); return err; } case TCP_FASTOPEN_KEY: { @@@ -3554,7 -3554,7 +3554,7 @@@ if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; - lock_sock(sk); + sockopt_lock_sock(sk); switch (optname) { case TCP_MAXSEG: @@@ -3776,7 -3776,7 +3776,7 @@@ break; } - release_sock(sk); + sockopt_release_sock(sk); return err; } @@@ -4040,15 -4040,15 +4040,15 @@@ struct sk_buff *tcp_get_timestamping_op return stats; } -static int do_tcp_getsockopt(struct sock *sk, int level, - int optname, char __user *optval, int __user *optlen) +int do_tcp_getsockopt(struct sock *sk, int level, + int optname, sockptr_t optval, sockptr_t optlen) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); int val, len; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); @@@ -4098,15 -4098,15 +4098,15 @@@ case TCP_INFO: { struct tcp_info info; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; tcp_get_info(sk, &info); len = min_t(unsigned int, len, sizeof(info)); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &info, len)) + if (copy_to_sockptr(optval, &info, len)) return -EFAULT; return 0; } @@@ -4116,7 -4116,7 +4116,7 @@@ size_t sz = 0; int attr; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; ca_ops = icsk->icsk_ca_ops; @@@ -4124,9 -4124,9 +4124,9 @@@ sz = ca_ops->get_info(sk, ~0U, &attr, &info); len = min_t(unsigned int, len, sz); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &info, len)) + if (copy_to_sockptr(optval, &info, len)) return -EFAULT; return 0; } @@@ -4135,28 -4135,27 +4135,28 @@@ break; case TCP_CONGESTION: - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, TCP_CA_NAME_MAX); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) + if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) return -EFAULT; return 0; case TCP_ULP: - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); if (!icsk->icsk_ulp_ops) { - if (put_user(0, optlen)) + len = 0; + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; return 0; } - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len)) + if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) return -EFAULT; return 0; @@@ -4164,15 -4163,15 +4164,15 @@@ u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; unsigned int key_len; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; key_len = tcp_fastopen_get_cipher(net, icsk, key) * TCP_FASTOPEN_KEY_LENGTH; len = min_t(unsigned int, len, key_len); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, key, len)) + if (copy_to_sockptr(optval, key, len)) return -EFAULT; return 0; } @@@ -4198,7 -4197,7 +4198,7 @@@ case TCP_REPAIR_WINDOW: { struct tcp_repair_window opt; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len != sizeof(opt)) @@@ -4213,7 -4212,7 +4213,7 @@@ opt.rcv_wnd = tp->rcv_wnd; opt.rcv_wup = tp->rcv_wup; - if (copy_to_user(optval, &opt, len)) + if (copy_to_sockptr(optval, &opt, len)) return -EFAULT; return 0; } @@@ -4259,35 -4258,35 +4259,35 @@@ val = tp->save_syn; break; case TCP_SAVED_SYN: { - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; - lock_sock(sk); + sockopt_lock_sock(sk); if (tp->saved_syn) { if (len < tcp_saved_syn_len(tp->saved_syn)) { - if (put_user(tcp_saved_syn_len(tp->saved_syn), - optlen)) { - release_sock(sk); + len = tcp_saved_syn_len(tp->saved_syn); + if (copy_to_sockptr(optlen, &len, sizeof(int))) { + sockopt_release_sock(sk); return -EFAULT; } - release_sock(sk); + sockopt_release_sock(sk); return -EINVAL; } len = tcp_saved_syn_len(tp->saved_syn); - if (put_user(len, optlen)) { - release_sock(sk); + if (copy_to_sockptr(optlen, &len, sizeof(int))) { + sockopt_release_sock(sk); return -EFAULT; } - if (copy_to_user(optval, tp->saved_syn->data, len)) { - release_sock(sk); + if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { + sockopt_release_sock(sk); return -EFAULT; } tcp_saved_syn_free(tp); - release_sock(sk); + sockopt_release_sock(sk); } else { - release_sock(sk); + sockopt_release_sock(sk); len = 0; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; } return 0; @@@ -4298,31 -4297,31 +4298,31 @@@ struct tcp_zerocopy_receive zc = {}; int err; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len < 0 || len < offsetofend(struct tcp_zerocopy_receive, length)) return -EINVAL; if (unlikely(len > sizeof(zc))) { - err = check_zeroed_user(optval + sizeof(zc), - len - sizeof(zc)); + err = check_zeroed_sockptr(optval, sizeof(zc), + len - sizeof(zc)); if (err < 1) return err == 0 ? -EINVAL : err; len = sizeof(zc); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; } - if (copy_from_user(&zc, optval, len)) + if (copy_from_sockptr(&zc, optval, len)) return -EFAULT; if (zc.reserved) return -EINVAL; if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) return -EINVAL; - lock_sock(sk); + sockopt_lock_sock(sk); err = tcp_zerocopy_receive(sk, &zc, &tss); err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, &zc, &len, err); - release_sock(sk); + sockopt_release_sock(sk); if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) goto zerocopy_rcv_cmsg; switch (len) { @@@ -4352,7 -4351,7 +4352,7 @@@ zerocopy_rcv_sk_err zerocopy_rcv_inq: zc.inq = tcp_inq_hint(sk); zerocopy_rcv_out: - if (!err && copy_to_user(optval, &zc, len)) + if (!err && copy_to_sockptr(optval, &zc, len)) err = -EFAULT; return err; } @@@ -4361,9 -4360,9 +4361,9 @@@ return -ENOPROTOOPT; } - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, len)) + if (copy_to_sockptr(optval, &val, len)) return -EFAULT; return 0; } @@@ -4388,8 -4387,7 +4388,8 @@@ int tcp_getsockopt(struct sock *sk, in if (level != SOL_TCP) return icsk->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); - return do_tcp_getsockopt(sk, level, optname, optval, optlen); + return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), + USER_SOCKPTR(optlen)); } EXPORT_SYMBOL(tcp_getsockopt); @@@ -4435,16 -4433,12 +4435,16 @@@ static void __tcp_alloc_md5sig_pool(voi * to memory. See smp_rmb() in tcp_get_md5sig_pool() */ smp_wmb(); - tcp_md5sig_pool_populated = true; + /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool() + * and tcp_get_md5sig_pool(). + */ + WRITE_ONCE(tcp_md5sig_pool_populated, true); } bool tcp_alloc_md5sig_pool(void) { - if (unlikely(!tcp_md5sig_pool_populated)) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { mutex_lock(&tcp_md5sig_mutex); if (!tcp_md5sig_pool_populated) { @@@ -4455,8 -4449,7 +4455,8 @@@ mutex_unlock(&tcp_md5sig_mutex); } - return tcp_md5sig_pool_populated; + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + return READ_ONCE(tcp_md5sig_pool_populated); } EXPORT_SYMBOL(tcp_alloc_md5sig_pool); @@@ -4472,8 -4465,7 +4472,8 @@@ struct tcp_md5sig_pool *tcp_get_md5sig_ { local_bh_disable(); - if (tcp_md5sig_pool_populated) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (READ_ONCE(tcp_md5sig_pool_populated)) { /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ smp_rmb(); return this_cpu_ptr(&tcp_md5sig_pool); @@@ -4744,12 -4736,6 +4744,12 @@@ void __init tcp_init(void SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); + tcp_hashinfo.bind2_bucket_cachep = + kmem_cache_create("tcp_bind2_bucket", + sizeof(struct inet_bind2_bucket), 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC | + SLAB_ACCOUNT, + NULL); /* Size and allocate the main established and bind bucket * hash tables. @@@ -4773,7 -4759,7 +4773,7 @@@ panic("TCP: failed to alloc ehash_locks"); tcp_hashinfo.bhash = alloc_large_system_hash("TCP bind", - sizeof(struct inet_bind_hashbucket), + 2 * sizeof(struct inet_bind_hashbucket), tcp_hashinfo.ehash_mask + 1, 17, /* one slot per 128 KB of memory */ 0, @@@ -4782,12 -4768,9 +4782,12 @@@ 0, 64 * 1024); tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; + tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; for (i = 0; i < tcp_hashinfo.bhash_size; i++) { spin_lock_init(&tcp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); + spin_lock_init(&tcp_hashinfo.bhash2[i].lock); + INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); } diff --combined net/ipv6/seg6.c index 5421cc7c935f,0b0e34ddc64e..29346a6eec9f --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@@ -191,6 -191,11 +191,11 @@@ static int seg6_genl_sethmac(struct sk_ goto out_unlock; } + if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) { + err = -EINVAL; + goto out_unlock; + } + if (hinfo) { err = seg6_hmac_info_del(net, hmackeyid); if (err) @@@ -499,7 -504,6 +504,7 @@@ static struct genl_family seg6_genl_fam .parallel_ops = true, .ops = seg6_genl_ops, .n_ops = ARRAY_SIZE(seg6_genl_ops), + .resv_start_op = SEG6_CMD_GET_TUNSRC + 1, .module = THIS_MODULE, }; diff --combined net/mac80211/mlme.c index 84a3e08a7e84,5265d2b6db12..699e409ef45a --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@@ -314,7 -314,7 +314,7 @@@ ieee80211_determine_chantype(struct iee if (eht_oper && (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) { struct cfg80211_chan_def eht_chandef = *chandef; - ieee80211_chandef_eht_oper(sdata, eht_oper, + ieee80211_chandef_eht_oper(eht_oper, eht_chandef.width == NL80211_CHAN_WIDTH_160, false, &eht_chandef); @@@ -695,7 -695,6 +695,7 @@@ static bool ieee80211_add_vht_ie(struc static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_supported_band *sband, + enum ieee80211_smps_mode smps_mode, ieee80211_conn_flags_t conn_flags) { u8 *pos, *pre_he_pos; @@@ -720,7 -719,7 +720,7 @@@ /* trim excess if any */ skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos)); - ieee80211_ie_build_he_6ghz_cap(sdata, skb); + ieee80211_ie_build_he_6ghz_cap(sdata, smps_mode, skb); } static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata, @@@ -747,13 -746,11 +747,13 @@@ eht_cap_size = 2 + 1 + sizeof(eht_cap->eht_cap_elem) + ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, - &eht_cap->eht_cap_elem) + + &eht_cap->eht_cap_elem, + false) + ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], eht_cap->eht_cap_elem.phy_cap_info); pos = skb_put(skb, eht_cap_size); - ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size); + ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size, + false); } static void ieee80211_assoc_add_rates(struct sk_buff *skb, @@@ -1101,7 -1098,7 +1101,7 @@@ static size_t ieee80211_assoc_link_elem offset); if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HE)) { - ieee80211_add_he_ie(sdata, skb, sband, + ieee80211_add_he_ie(sdata, skb, sband, smps_mode, assoc_data->link[link_id].conn_flags); ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY); } @@@ -1223,21 -1220,14 +1223,21 @@@ static void ieee80211_assoc_add_ml_elem ml_elem = skb_put(skb, sizeof(*ml_elem)); ml_elem->control = cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_BASIC | - IEEE80211_MLC_BASIC_PRES_EML_CAPA | IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP); common = skb_put(skb, sizeof(*common)); common->len = sizeof(*common) + - 2 + /* EML capabilities */ 2; /* MLD capa/ops */ memcpy(common->mld_mac_addr, sdata->vif.addr, ETH_ALEN); - skb_put_data(skb, &eml_capa, sizeof(eml_capa)); + + /* add EML_CAPA only if needed, see Draft P802.11be_D2.1, 35.3.17 */ + if (eml_capa & + cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP | + IEEE80211_EML_CAP_EMLMR_SUPPORT))) { + common->len += 2; /* EML capabilities */ + ml_elem->control |= + cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EML_CAPA); + skb_put_data(skb, &eml_capa, sizeof(eml_capa)); + } /* need indication from userspace to support this */ mld_capa_ops &= ~cpu_to_le16(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP); skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops)); @@@ -1912,7 -1902,7 +1912,7 @@@ ieee80211_sta_process_chanswitch(struc IEEE80211_QUEUE_STOP_REASON_CSA); mutex_unlock(&local->mtx); - cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, + cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, 0, csa_ie.count, csa_ie.mode); if (local->ops->channel_switch) { @@@ -2445,29 -2435,6 +2445,29 @@@ static void ieee80211_sta_handle_tspec_ ieee80211_sta_handle_tspec_ac_params(sdata); } +void ieee80211_mgd_set_link_qos_params(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_tx_queue_params *params = link->tx_conf; + u8 ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + mlme_dbg(sdata, + "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", + ac, params[ac].acm, + params[ac].aifs, params[ac].cw_min, params[ac].cw_max, + params[ac].txop, params[ac].uapsd, + ifmgd->tx_tspec[ac].downgraded); + if (!ifmgd->tx_tspec[ac].downgraded && + drv_conf_tx(local, link, ac, ¶ms[ac])) + link_err(link, + "failed to set TX queue parameters for AC %d\n", + ac); + } +} + /* MLME */ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, @@@ -2599,10 -2566,20 +2599,10 @@@ } } - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - mlme_dbg(sdata, - "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", - ac, params[ac].acm, - params[ac].aifs, params[ac].cw_min, params[ac].cw_max, - params[ac].txop, params[ac].uapsd, - ifmgd->tx_tspec[ac].downgraded); + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) link->tx_conf[ac] = params[ac]; - if (!ifmgd->tx_tspec[ac].downgraded && - drv_conf_tx(local, link, ac, ¶ms[ac])) - link_err(link, - "failed to set TX queue parameters for AC %d\n", - ac); - } + + ieee80211_mgd_set_link_qos_params(link); /* enable WMM or activate new settings */ link->conf->qos = true; @@@ -3443,11 -3420,11 +3443,11 @@@ static void ieee80211_destroy_auth_data ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_BSSID); sdata->u.mgd.flags = 0; + mutex_lock(&sdata->local->mtx); ieee80211_link_release_channel(&sdata->deflink); - mutex_unlock(&sdata->local->mtx); - ieee80211_vif_set_links(sdata, 0); + mutex_unlock(&sdata->local->mtx); } cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss); @@@ -3485,10 -3462,6 +3485,6 @@@ static void ieee80211_destroy_assoc_dat sdata->u.mgd.flags = 0; sdata->vif.bss_conf.mu_mimo_owner = false; - mutex_lock(&sdata->local->mtx); - ieee80211_link_release_channel(&sdata->deflink); - mutex_unlock(&sdata->local->mtx); - if (status != ASSOC_REJECTED) { struct cfg80211_assoc_failure data = { .timeout = status == ASSOC_TIMEOUT, @@@ -3507,7 -3480,10 +3503,10 @@@ cfg80211_assoc_failure(sdata->dev, &data); } + mutex_lock(&sdata->local->mtx); + ieee80211_link_release_channel(&sdata->deflink); ieee80211_vif_set_links(sdata, 0); + mutex_unlock(&sdata->local->mtx); } kfree(assoc_data); @@@ -3928,7 -3904,6 +3927,7 @@@ static bool ieee80211_assoc_config_link .len = elem_len, .bss = cbss, .link_id = link == &sdata->deflink ? -1 : link->link_id, + .from_ap = true, }; bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ; @@@ -4597,11 -4572,6 +4596,11 @@@ static int ieee80211_prep_channel(struc bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ; struct ieee80211_bss *bss = (void *)cbss->priv; + struct ieee80211_elems_parse_params parse_params = { + .bss = cbss, + .link_id = -1, + .from_ap = true, + }; struct ieee802_11_elems *elems; const struct cfg80211_bss_ies *ies; int ret; @@@ -4611,9 -4581,7 +4610,9 @@@ rcu_read_lock(); ies = rcu_dereference(cbss->ies); - elems = ieee802_11_parse_elems(ies->data, ies->len, false, cbss); + parse_params.start = ies->data; + parse_params.len = ies->len; + elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) { rcu_read_unlock(); return -ENOMEM; @@@ -4968,11 -4936,6 +4967,11 @@@ static void ieee80211_rx_mgmt_assoc_res struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u16 capab_info, status_code, aid; + struct ieee80211_elems_parse_params parse_params = { + .bss = NULL, + .link_id = -1, + .from_ap = true, + }; struct ieee802_11_elems *elems; int ac; const u8 *elem_start; @@@ -5027,9 -4990,7 +5026,9 @@@ return; elem_len = len - (elem_start - (u8 *)mgmt); - elems = ieee802_11_parse_elems(elem_start, elem_len, false, NULL); + parse_params.start = elem_start; + parse_params.len = elem_len; + elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) goto notify_driver; @@@ -5162,7 -5123,7 +5161,7 @@@ resp.req_ies = ifmgd->assoc_req_ies; resp.req_ies_len = ifmgd->assoc_req_ies_len; if (sdata->vif.valid_links) - resp.ap_mld_addr = assoc_data->ap_addr; + resp.ap_mld_addr = sdata->vif.cfg.ap_addr; cfg80211_rx_assoc_resp(sdata->dev, &resp); notify_driver: drv_mgd_complete_tx(sdata->local, sdata, &info); @@@ -5394,10 -5355,6 +5393,10 @@@ static void ieee80211_rx_mgmt_beacon(st u32 ncrc = 0; u8 *bssid, *variable = mgmt->u.beacon.variable; u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; + struct ieee80211_elems_parse_params parse_params = { + .link_id = -1, + .from_ap = true, + }; sdata_assert_lock(sdata); @@@ -5416,9 -5373,6 +5415,9 @@@ if (baselen > len) return; + parse_params.start = variable; + parse_params.len = len - baselen; + rcu_read_lock(); chanctx_conf = rcu_dereference(link->conf->chanctx_conf); if (!chanctx_conf) { @@@ -5437,8 -5391,8 +5436,8 @@@ if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon && !WARN_ON(sdata->vif.valid_links) && ieee80211_rx_our_beacon(bssid, ifmgd->assoc_data->link[0].bss)) { - elems = ieee802_11_parse_elems(variable, len - baselen, false, - ifmgd->assoc_data->link[0].bss); + parse_params.bss = ifmgd->assoc_data->link[0].bss; + elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) return; @@@ -5504,10 -5458,9 +5503,10 @@@ */ if (!ieee80211_is_s1g_beacon(hdr->frame_control)) ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); - elems = ieee802_11_parse_elems_crc(variable, len - baselen, - false, care_about_ies, ncrc, - link->u.mgd.bss); + parse_params.bss = link->u.mgd.bss; + parse_params.filter = care_about_ies; + parse_params.crc = ncrc; + elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) return; ncrc = elems->crc; @@@ -5717,13 -5670,6 +5716,13 @@@ void ieee80211_sta_rx_queued_mgmt(struc sdata_lock(sdata); + if (rx_status->link_valid) { + link = sdata_dereference(sdata->link[rx_status->link_id], + sdata); + if (!link) + goto out; + } + switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: ieee80211_rx_mgmt_beacon(link, (void *)mgmt, @@@ -5800,7 -5746,6 +5799,7 @@@ } break; } +out: sdata_unlock(sdata); } @@@ -6336,8 -6281,6 +6335,8 @@@ void ieee80211_mgd_setup_link(struct ie if (sdata->u.mgd.assoc_data) ether_addr_copy(link->conf->addr, sdata->u.mgd.assoc_data->link[link_id].addr); + else if (!is_valid_ether_addr(link->conf->addr)) + eth_random_addr(link->conf->addr); } /* scan finished notification */ @@@ -6425,6 -6368,9 +6424,6 @@@ static int ieee80211_prep_connection(st goto out_err; } - if (mlo && !is_valid_ether_addr(link->conf->addr)) - eth_random_addr(link->conf->addr); - if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) { err = -EINVAL; goto out_err; @@@ -6562,6 -6508,7 +6561,7 @@@ return 0; out_err: + ieee80211_link_release_channel(&sdata->deflink); ieee80211_vif_set_links(sdata, 0); return err; } @@@ -6906,10 -6853,6 +6906,10 @@@ int ieee80211_mgd_assoc(struct ieee8021 } } + /* FIXME: no support for 4-addr MLO yet */ + if (sdata->u.mgd.use_4addr && req->link_id >= 0) + return -EOPNOTSUPP; + assoc_data = kzalloc(size, GFP_KERNEL); if (!assoc_data) return -ENOMEM; diff --combined net/mac80211/rx.c index cc139fe5fb78,45d7e71661e3..511c809e2c6b --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@@ -215,19 -215,9 +215,19 @@@ ieee80211_rx_radiotap_hdrlen(struct iee } static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, + int link_id, struct sta_info *sta, struct sk_buff *skb) { + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + if (link_id >= 0) { + status->link_valid = 1; + status->link_id = link_id; + } else { + status->link_valid = 0; + } + skb_queue_tail(&sdata->skb_queue, skb); ieee80211_queue_work(&sdata->local->hw, &sdata->work); if (sta) @@@ -235,12 -225,11 +235,12 @@@ } static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, + int link_id, struct sta_info *sta, struct sk_buff *skb) { skb->protocol = 0; - __ieee80211_queue_skb_to_iface(sdata, sta, skb); + __ieee80211_queue_skb_to_iface(sdata, link_id, sta, skb); } static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, @@@ -283,7 -272,7 +283,7 @@@ if (!skb) return; - ieee80211_queue_skb_to_iface(sdata, NULL, skb); + ieee80211_queue_skb_to_iface(sdata, -1, NULL, skb); } /* @@@ -1405,7 -1394,7 +1405,7 @@@ static void ieee80211_rx_reorder_ampdu( /* if this mpdu is fragmented - terminate rx aggregation session */ sc = le16_to_cpu(hdr->seq_ctrl); if (sc & IEEE80211_SCTL_FRAG) { - ieee80211_queue_skb_to_iface(rx->sdata, NULL, skb); + ieee80211_queue_skb_to_iface(rx->sdata, rx->link_id, NULL, skb); return; } @@@ -1865,6 -1854,7 +1865,6 @@@ static struct ieee80211_key ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) { struct ieee80211_key *key = NULL; - struct ieee80211_sub_if_data *sdata = rx->sdata; int idx2; /* Make sure key gets set if either BIGTK key index is set so that @@@ -1883,14 -1873,14 +1883,14 @@@ idx2 = idx - 1; } - if (rx->sta) - key = rcu_dereference(rx->sta->deflink.gtk[idx]); + if (rx->link_sta) + key = rcu_dereference(rx->link_sta->gtk[idx]); if (!key) - key = rcu_dereference(sdata->deflink.gtk[idx]); - if (!key && rx->sta) - key = rcu_dereference(rx->sta->deflink.gtk[idx2]); + key = rcu_dereference(rx->link->gtk[idx]); + if (!key && rx->link_sta) + key = rcu_dereference(rx->link_sta->gtk[idx2]); if (!key) - key = rcu_dereference(sdata->deflink.gtk[idx2]); + key = rcu_dereference(rx->link->gtk[idx2]); return key; } @@@ -1996,15 -1986,15 +1996,15 @@@ ieee80211_rx_h_decrypt(struct ieee80211 if (mmie_keyidx < NUM_DEFAULT_KEYS || mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) return RX_DROP_MONITOR; /* unexpected BIP keyidx */ - if (rx->sta) { + if (rx->link_sta) { if (ieee80211_is_group_privacy_action(skb) && test_sta_flag(rx->sta, WLAN_STA_MFP)) return RX_DROP_MONITOR; - rx->key = rcu_dereference(rx->sta->deflink.gtk[mmie_keyidx]); + rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]); } if (!rx->key) - rx->key = rcu_dereference(rx->sdata->deflink.gtk[mmie_keyidx]); + rx->key = rcu_dereference(rx->link->gtk[mmie_keyidx]); } else if (!ieee80211_has_protected(fc)) { /* * The frame was not protected, so skip decryption. However, we @@@ -2013,24 -2003,25 +2013,24 @@@ * have been expected. */ struct ieee80211_key *key = NULL; - struct ieee80211_sub_if_data *sdata = rx->sdata; int i; if (ieee80211_is_beacon(fc)) { key = ieee80211_rx_get_bigtk(rx, -1); } else if (ieee80211_is_mgmt(fc) && is_multicast_ether_addr(hdr->addr1)) { - key = rcu_dereference(rx->sdata->deflink.default_mgmt_key); + key = rcu_dereference(rx->link->default_mgmt_key); } else { - if (rx->sta) { + if (rx->link_sta) { for (i = 0; i < NUM_DEFAULT_KEYS; i++) { - key = rcu_dereference(rx->sta->deflink.gtk[i]); + key = rcu_dereference(rx->link_sta->gtk[i]); if (key) break; } } if (!key) { for (i = 0; i < NUM_DEFAULT_KEYS; i++) { - key = rcu_dereference(sdata->deflink.gtk[i]); + key = rcu_dereference(rx->link->gtk[i]); if (key) break; } @@@ -2059,13 -2050,13 +2059,13 @@@ return RX_DROP_UNUSABLE; /* check per-station GTK first, if multicast packet */ - if (is_multicast_ether_addr(hdr->addr1) && rx->sta) - rx->key = rcu_dereference(rx->sta->deflink.gtk[keyidx]); + if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta) + rx->key = rcu_dereference(rx->link_sta->gtk[keyidx]); /* if not found, try default key */ if (!rx->key) { if (is_multicast_ether_addr(hdr->addr1)) - rx->key = rcu_dereference(rx->sdata->deflink.gtk[keyidx]); + rx->key = rcu_dereference(rx->link->gtk[keyidx]); if (!rx->key) rx->key = rcu_dereference(rx->sdata->keys[keyidx]); @@@ -3055,8 -3046,7 +3055,8 @@@ ieee80211_rx_h_data(struct ieee80211_rx (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { rx->skb->protocol = cpu_to_be16(ETH_P_TDLS); - __ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); + __ieee80211_queue_skb_to_iface(sdata, rx->link_id, + rx->sta, rx->skb); return RX_QUEUED; } } @@@ -3646,7 -3636,7 +3646,7 @@@ ieee80211_rx_h_action(struct ieee80211_ return RX_QUEUED; queue: - ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); + ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); return RX_QUEUED; } @@@ -3804,7 -3794,7 +3804,7 @@@ ieee80211_rx_h_ext(struct ieee80211_rx_ return RX_DROP_MONITOR; /* for now only beacons are ext, so queue them */ - ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); + ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); return RX_QUEUED; } @@@ -3861,7 -3851,7 +3861,7 @@@ ieee80211_rx_h_mgmt(struct ieee80211_rx return RX_DROP_MONITOR; } - ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); + ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); return RX_QUEUED; } @@@ -4084,6 -4074,7 +4084,7 @@@ void ieee80211_release_reorder_timeout( .link_id = -1, }; struct tid_ampdu_rx *tid_agg_rx; + u8 link_id; tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); if (!tid_agg_rx) @@@ -4103,6 -4094,9 +4104,9 @@@ }; drv_event_callback(rx.local, rx.sdata, &event); } + /* FIXME: statistics won't be right with this */ + link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0; + rx.link = rcu_dereference(sta->sdata->link[link_id]); ieee80211_rx_handlers(&rx, &frames); } @@@ -4518,15 -4512,6 +4522,15 @@@ void ieee80211_check_fast_rx_iface(stru mutex_unlock(&local->sta_mtx); } +static bool +ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) +{ + if (!sta->mlo) + return false; + + return !!(sta->valid_links & BIT(link_id)); +} + static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, struct ieee80211_fast_rx *fast_rx, int orig_len) @@@ -4534,30 -4519,19 +4538,30 @@@ struct ieee80211_sta_rx_stats *stats; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); struct sta_info *sta = rx->sta; + struct link_sta_info *link_sta; struct sk_buff *skb = rx->skb; void *sa = skb->data + ETH_ALEN; void *da = skb->data; - stats = &sta->deflink.rx_stats; + if (rx->link_id >= 0) { + link_sta = rcu_dereference(sta->link[rx->link_id]); + if (WARN_ON_ONCE(!link_sta)) { + dev_kfree_skb(rx->skb); + return; + } + } else { + link_sta = &sta->deflink; + } + + stats = &link_sta->rx_stats; if (fast_rx->uses_rss) - stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats); + stats = this_cpu_ptr(link_sta->pcpu_rx_stats); /* statistics part of ieee80211_rx_h_sta_process() */ if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { stats->last_signal = status->signal; if (!fast_rx->uses_rss) - ewma_signal_add(&sta->deflink.rx_stats_avg.signal, + ewma_signal_add(&link_sta->rx_stats_avg.signal, -status->signal); } @@@ -4573,7 -4547,7 +4577,7 @@@ stats->chain_signal_last[i] = signal; if (!fast_rx->uses_rss) - ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i], + ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], -signal); } } @@@ -4649,8 -4623,7 +4653,8 @@@ static bool ieee80211_invoke_fast_rx(st u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; } addrs __aligned(2); - struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats; + struct link_sta_info *link_sta; + struct ieee80211_sta_rx_stats *stats; /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write * to a common data structure; drivers can implement that per queue @@@ -4751,19 -4724,8 +4755,19 @@@ return true; drop: dev_kfree_skb(skb); + + if (rx->link_id >= 0) { + link_sta = rcu_dereference(sta->link[rx->link_id]); + if (!link_sta) + return true; + } else { + link_sta = &sta->deflink; + } + if (fast_rx->uses_rss) - stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats); + stats = this_cpu_ptr(link_sta->pcpu_rx_stats); + else + stats = &link_sta->rx_stats; stats->dropped++; return true; @@@ -4811,17 -4773,7 +4815,17 @@@ static bool ieee80211_prepare_and_rx_ha if (!link) return true; rx->link = link; + + if (rx->sta) { + rx->link_sta = + rcu_dereference(rx->sta->link[rx->link_id]); + if (!rx->link_sta) + return true; + } } else { + if (rx->sta) + rx->link_sta = &rx->sta->deflink; + rx->link = &sdata->deflink; } @@@ -4879,7 -4831,6 +4883,7 @@@ static void __ieee80211_rx_handle_8023( struct list_head *list) { struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_fast_rx *fast_rx; struct ieee80211_rx_data rx; @@@ -4900,31 -4851,7 +4904,31 @@@ rx.sta = container_of(pubsta, struct sta_info, sta); rx.sdata = rx.sta->sdata; - rx.link = &rx.sdata->deflink; + + if (status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id)) + goto drop; + + /* + * TODO: Should the frame be dropped if the right link_id is not + * available? Or may be it is fine in the current form to proceed with + * the frame processing because with frame being in 802.3 format, + * link_id is used only for stats purpose and updating the stats on + * the deflink is fine? + */ + if (status->link_valid) + rx.link_id = status->link_id; + + if (rx.link_id >= 0) { + struct ieee80211_link_data *link; + + link = rcu_dereference(rx.sdata->link[rx.link_id]); + if (!link) + goto drop; + rx.link = link; + } else { + rx.link = &rx.sdata->deflink; + } fast_rx = rcu_dereference(rx.sta->fast_rx); if (!fast_rx) @@@ -4954,19 -4881,7 +4958,19 @@@ static bool ieee80211_rx_for_interface( rx->sta = link_sta->sta; rx->link_id = link_sta->link_id; } else { + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2); + if (rx->sta) { + if (status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, + status->link_id)) + return false; + + rx->link_id = status->link_valid ? status->link_id : -1; + } else { + rx->link_id = -1; + } } return ieee80211_prepare_and_rx_handle(rx, skb, consume); @@@ -4982,7 -4897,6 +4986,7 @@@ static void __ieee80211_rx_handle_packe struct list_head *list) { struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_sub_if_data *sdata; struct ieee80211_hdr *hdr; __le16 fc; @@@ -5027,39 -4941,10 +5031,39 @@@ if (ieee80211_is_data(fc)) { struct sta_info *sta, *prev_sta; + u8 link_id = status->link_id; if (pubsta) { rx.sta = container_of(pubsta, struct sta_info, sta); rx.sdata = rx.sta->sdata; + + if (status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(pubsta, link_id)) + goto out; + + if (status->link_valid) + rx.link_id = status->link_id; + + /* + * In MLO connection, fetch the link_id using addr2 + * when the driver does not pass link_id in status. + * When the address translation is already performed by + * driver/hw, the valid link_id must be passed in + * status. + */ + + if (!status->link_valid && pubsta->mlo) { + struct ieee80211_hdr *hdr = (void *)skb->data; + struct link_sta_info *link_sta; + + link_sta = link_sta_info_get_bss(rx.sdata, + hdr->addr2); + if (!link_sta) + goto out; + + rx.link_id = link_sta->link_id; + } + if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) return; goto out; @@@ -5073,13 -4958,6 +5077,13 @@@ continue; } + if ((status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta, + link_id)) || + (!status->link_valid && prev_sta->sta.mlo)) + continue; + + rx.link_id = status->link_valid ? link_id : -1; rx.sta = prev_sta; rx.sdata = prev_sta->sdata; ieee80211_prepare_and_rx_handle(&rx, skb, false); @@@ -5088,13 -4966,6 +5092,13 @@@ } if (prev_sta) { + if ((status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta, + link_id)) || + (!status->link_valid && prev_sta->sta.mlo)) + goto out; + + rx.link_id = status->link_valid ? link_id : -1; rx.sta = prev_sta; rx.sdata = prev_sta->sdata; @@@ -5237,9 -5108,6 +5241,9 @@@ void ieee80211_rx_list(struct ieee80211 } } + if (WARN_ON_ONCE(status->link_id >= IEEE80211_LINK_UNSPECIFIED)) + goto drop; + status->rx_flags = 0; kcov_remote_start_common(skb_get_kcov_handle(skb)); diff --combined net/netfilter/nf_conntrack_core.c index da65c6e8eeeb,1357a2729a4b..c5851e1321e7 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@@ -1782,7 -1782,7 +1782,7 @@@ init_conntrack(struct net *net, struct } spin_unlock_bh(&nf_conntrack_expect_lock); } - if (!exp) + if (!exp && tmpl) __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); /* Other CPU might have obtained a pointer to this object before it was @@@ -2068,10 -2068,6 +2068,6 @@@ void nf_conntrack_alter_reply(struct nf ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; if (ct->master || (help && !hlist_empty(&help->expectations))) return; - - rcu_read_lock(); - __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); - rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); @@@ -2797,7 -2793,6 +2793,6 @@@ int nf_conntrack_init_net(struct net *n nf_conntrack_acct_pernet_init(net); nf_conntrack_tstamp_pernet_init(net); nf_conntrack_ecache_pernet_init(net); - nf_conntrack_helper_pernet_init(net); nf_conntrack_proto_pernet_init(net); return 0; @@@ -2807,6 -2802,10 +2802,6 @@@ err_expect return ret; } -#if (IS_BUILTIN(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \ - (IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES) || \ - IS_ENABLED(CONFIG_NF_CT_NETLINK)) - /* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */ int __nf_ct_change_timeout(struct nf_conn *ct, u64 timeout) @@@ -2862,3 -2861,5 +2857,3 @@@ int nf_ct_change_status_common(struct n return 0; } EXPORT_SYMBOL_GPL(nf_ct_change_status_common); - -#endif diff --combined net/sched/sch_sfb.c index 31717fa45a4f,2829455211f8..1be8d04d69dc --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@@ -135,15 -135,15 +135,15 @@@ static void increment_one_qlen(u32 sfbh } } - static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) + static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) { u32 sfbhash; - sfbhash = sfb_hash(skb, 0); + sfbhash = cb->hashes[0]; if (sfbhash) increment_one_qlen(sfbhash, 0, q); - sfbhash = sfb_hash(skb, 1); + sfbhash = cb->hashes[1]; if (sfbhash) increment_one_qlen(sfbhash, 1, q); } @@@ -281,8 -281,10 +281,10 @@@ static int sfb_enqueue(struct sk_buff * { struct sfb_sched_data *q = qdisc_priv(sch); + unsigned int len = qdisc_pkt_len(skb); struct Qdisc *child = q->qdisc; struct tcf_proto *fl; + struct sfb_skb_cb cb; int i; u32 p_min = ~0; u32 minqlen = ~0; @@@ -399,11 -401,12 +401,12 @@@ } enqueue: + memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; - increment_qlen(skb, q); + increment_qlen(&cb, q); } else if (net_xmit_drop_count(ret)) { q->stats.childdrop++; qdisc_qstats_drop(sch); @@@ -453,6 -456,8 +456,6 @@@ static void sfb_reset(struct Qdisc *sch struct sfb_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q);