--- /dev/null
+SMSC/MicroChip LAN9303 three port ethernet switch
+-------------------------------------------------
+
+Required properties:
+
+- compatible: should be
+ - "smsc,lan9303-i2c" for I2C managed mode
+ or
+ - "smsc,lan9303-mdio" for mdio managed mode
+
+Optional properties:
+
+- reset-gpios: GPIO to be used to reset the whole device
+- reset-duration: reset duration in milliseconds, defaults to 200 ms
+
+Subnodes:
+
+The integrated switch subnode should be specified according to the binding
+described in dsa/dsa.txt. The CPU port of this switch is always port 0.
+
+Note: always use 'reg = <0/1/2>;' for the three DSA ports, even if the device is
+configured to use 1/2/3 instead. This hardware configuration will be
+auto-detected and mapped accordingly.
+
+Example:
+
+I2C managed mode:
+
+ master: masterdevice@X {
+ status = "okay";
+
+ fixed-link { /* RMII fixed link to LAN9303 */
+ speed = <100>;
+ full-duplex;
+ };
+ };
+
+ switch: switch@a {
+ compatible = "smsc,lan9303-i2c";
+ reg = <0xa>;
+ status = "okay";
+ reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>;
+ reset-duration = <200>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 { /* RMII fixed link to master */
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&master>;
+ };
+
+ port@1 { /* external port 1 */
+ reg = <1>;
+ label = "lan1;
+ };
+
+ port@2 { /* external port 2 */
+ reg = <2>;
+ label = "lan2";
+ };
+ };
+ };
+
+MDIO managed mode:
+
+ master: masterdevice@X {
+ status = "okay";
+ phy-handle = <&switch>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch: switch-phy@0 {
+ compatible = "smsc,lan9303-mdio";
+ reg = <0>;
+ reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>;
+ reset-duration = <100>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&master>;
+ };
+
+ port@1 { /* external port 1 */
+ reg = <1>;
+ label = "lan1;
+ };
+
+ port@2 { /* external port 2 */
+ reg = <2>;
+ label = "lan2";
+ };
+ };
+ };
+ };
+ };
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
- interrupts: Should contain the STMMAC interrupts
-- interrupt-names: Should contain the interrupt names "macirq"
- "eth_wake_irq" if this interrupt is supported in the "interrupts"
- property
+- interrupt-names: Should contain a list of interrupt names corresponding to
+ the interrupts in the interrupts property, if available.
+ Valid interrupt names are:
+ - "macirq" (combined signal for various interrupt events)
+ - "eth_wake_irq" (the interrupt to manage the remote wake-up packet detection)
+ - "eth_lpi" (the interrupt that occurs when Tx or Rx enters/exits LPI state)
- phy-mode: See ethernet.txt file in the same directory.
- snps,reset-gpio gpio number for phy reset.
- snps,reset-active-low boolean flag to indicate if phy reset is active low.
compatible = "st,spear600-gmac";
reg = <0xe0800000 0x8000>;
interrupt-parent = <&vic1>;
- interrupts = <24 23>;
- interrupt-names = "macirq", "eth_wake_irq";
+ interrupts = <24 23 22>;
+ interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
mac-address = [000000000000]; /* Filled in by U-Boot */
max-frame-size = <3800>;
phy-mode = "gmii";
cfg80211 subsystem
==================
+.. kernel-doc:: include/net/cfg80211.h
+ :doc: Introduction
+
Device registration
===================
.. kernel-doc:: include/net/cfg80211.h
:functions: cfg80211_ibss_joined
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_connect_resp_params
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_connect_done
+
.. kernel-doc:: include/net/cfg80211.h
:functions: cfg80211_connect_result
Maximum number of packets taken from all interfaces in one polling cycle (NAPI
poll). In one polling cycle interfaces which are registered to polling are
-probed in a round-robin manner.
+probed in a round-robin manner. Also, a polling cycle may not exceed
+netdev_budget_usecs microseconds, even if netdev_budget has not been
+exhausted.
+
+netdev_budget_usecs
+---------------------
+
+Maximum number of microseconds in one NAPI polling cycle. Polling
+will exit when either netdev_budget_usecs have elapsed during the
+poll cycle or the number of packets processed reaches netdev_budget.
netdev_max_backlog
------------------
TC subsystem
S: Maintained
F: include/net/pkt_cls.h
VERSION = 4
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Fearless Coyote
# *DOCUMENTATION*
phy1: ethernet-phy@1 {
reg = <7>;
+ eee-broken-100tx;
+ eee-broken-1000t;
};
};
ti,non-removable;
bus-width = <4>;
cap-power-off-card;
+ keep-power-in-suspend;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+ bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <0>;
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+ bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <1>;
&i2c3 {
clock-frequency = <400000>;
at24@50 {
- compatible = "at24,24c02";
+ compatible = "atmel,24c64";
readonly;
reg = <0x50>;
};
opp-microvolt = <1200000>;
clock-latency-ns = <244144>; /* 8 32k periods */
};
-
- opp@1200000000 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1320000>;
- clock-latency-ns = <244144>; /* 8 32k periods */
- };
};
cpus {
operating-points-v2 = <&cpu0_opp_table>;
};
+ cpu@1 {
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+
cpu@2 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <2>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
cpu@3 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <3>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
};
extern int omap4_mpuss_init(void);
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
+extern u32 omap4_get_cpu1_ns_pa_addr(void);
#else
static inline int omap4_enter_lowpower(unsigned int cpu,
unsigned int power_state)
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
if (omap_secure_apis_support())
- boot_cpu = omap_read_auxcoreboot0();
+ boot_cpu = omap_read_auxcoreboot0() >> 9;
else
boot_cpu =
readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
#include "prm-regbits-44xx.h"
static void __iomem *sar_base;
+static u32 old_cpu1_ns_pa_addr;
#if defined(CONFIG_PM) && defined(CONFIG_SMP)
{}
#endif
+u32 omap4_get_cpu1_ns_pa_addr(void)
+{
+ return old_cpu1_ns_pa_addr;
+}
+
/**
* omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
* The purpose of this function is to manage low power programming
void __init omap4_mpuss_early_init(void)
{
unsigned long startup_pa;
+ void __iomem *ns_pa_addr;
- if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
+ if (!(soc_is_omap44xx() || soc_is_omap54xx()))
return;
sar_base = omap4_get_sar_ram_base();
- if (cpu_is_omap443x())
+ /* Save old NS_PA_ADDR for validity checks later on */
+ if (soc_is_omap44xx())
+ ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+ else
+ ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+ old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
+
+ if (soc_is_omap443x())
startup_pa = __pa_symbol(omap4_secondary_startup);
- else if (cpu_is_omap446x())
+ else if (soc_is_omap446x())
startup_pa = __pa_symbol(omap4460_secondary_startup);
else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
else
startup_pa = __pa_symbol(omap5_secondary_startup);
- if (cpu_is_omap44xx())
+ if (soc_is_omap44xx())
writel_relaxed(startup_pa, sar_base +
CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
else
ldr r12, =0x103
dsb
smc #0
- mov r0, r0, lsr #9
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_read_auxcoreboot0)
#include <linux/io.h>
#include <linux/irqchip/arm-gic.h>
+#include <asm/sections.h>
#include <asm/smp_scu.h>
#include <asm/virt.h>
#define OMAP5_CORE_COUNT 0x2
+#define AUX_CORE_BOOT0_GP_RELEASE 0x020
+#define AUX_CORE_BOOT0_HS_RELEASE 0x200
+
struct omap_smp_config {
unsigned long cpu1_rstctrl_pa;
void __iomem *cpu1_rstctrl_va;
void __iomem *scu_base;
+ void __iomem *wakeupgen_base;
void *startup_addr;
};
static struct clockdomain *cpu1_clkdm;
static bool booted;
static struct powerdomain *cpu1_pwrdm;
- void __iomem *base = omap_get_wakeupgen_base();
/*
* Set synchronisation state between this boot processor
* A barrier is added to ensure that write buffer is drained
*/
if (omap_secure_apis_support())
- omap_modify_auxcoreboot0(0x200, 0xfffffdff);
+ omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
+ 0xfffffdff);
else
- writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0);
+ writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
+ cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
if (!cpu1_clkdm && !cpu1_pwrdm) {
cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
set_cpu_possible(i, true);
}
+/*
+ * For now, just make sure the start-up address is not within the booting
+ * kernel space as that means we just overwrote whatever secondary_startup()
+ * code there was.
+ */
+static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
+{
+ if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
+ return false;
+
+ return true;
+}
+
+/*
+ * We may need to reset CPU1 before configuring, otherwise kexec boot can end
+ * up trying to use old kernel startup address or suspend-resume will
+ * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
+ * idle states.
+ */
+static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
+{
+ unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
+ bool needs_reset = false;
+ u32 released;
+
+ if (omap_secure_apis_support())
+ released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
+ else
+ released = readl_relaxed(cfg.wakeupgen_base +
+ OMAP_AUX_CORE_BOOT_0) &
+ AUX_CORE_BOOT0_GP_RELEASE;
+ if (released) {
+ pr_warn("smp: CPU1 not parked?\n");
+
+ return;
+ }
+
+ cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
+ OMAP_AUX_CORE_BOOT_1);
+ cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
+
+ /* Did the configured secondary_startup() get overwritten? */
+ if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
+ needs_reset = true;
+
+ /*
+ * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
+ * deeper idle state in WFI and will wake to an invalid address.
+ */
+ if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
+ !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
+ needs_reset = true;
+
+ if (!needs_reset || !c->cpu1_rstctrl_va)
+ return;
+
+ pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
+ cpu1_startup_pa, cpu1_ns_pa_addr);
+
+ writel_relaxed(1, c->cpu1_rstctrl_va);
+ readl_relaxed(c->cpu1_rstctrl_va);
+ writel_relaxed(0, c->cpu1_rstctrl_va);
+}
+
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
{
- void __iomem *base = omap_get_wakeupgen_base();
const struct omap_smp_config *c = NULL;
if (soc_is_omap443x())
/* Must preserve cfg.scu_base set earlier */
cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
cfg.startup_addr = c->startup_addr;
+ cfg.wakeupgen_base = omap_get_wakeupgen_base();
if (soc_is_dra74x() || soc_is_omap54xx()) {
if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
if (cfg.scu_base)
scu_enable(cfg.scu_base);
- /*
- * Reset CPU1 before configuring, otherwise kexec will
- * end up trying to use old kernel startup address.
- */
- if (cfg.cpu1_rstctrl_va) {
- writel_relaxed(1, cfg.cpu1_rstctrl_va);
- readl_relaxed(cfg.cpu1_rstctrl_va);
- writel_relaxed(0, cfg.cpu1_rstctrl_va);
- }
+ omap4_smp_maybe_reset_cpu1(&cfg);
/*
* Write the address of secondary startup routine into the
omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
else
writel_relaxed(__pa_symbol(cfg.startup_addr),
- base + OMAP_AUX_CORE_BOOT_1);
+ cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
}
const struct smp_operations omap4_smp_ops __initconst = {
dev_err(dev, "failed to idle\n");
}
break;
+ case BUS_NOTIFY_BIND_DRIVER:
+ od = to_omap_device(pdev);
+ if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+ pm_runtime_status_suspended(dev)) {
+ od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+ pm_runtime_set_active(dev);
+ }
+ break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
select GPIOLIB
select MVEBU_MBUS
select PCI
+ select PHYLIB if NETDEVICES
select PLAT_ORION_LEGACY
help
Support for the following Marvell Orion 5x series SoCs:
eth_data, &orion_ge11);
}
+#ifdef CONFIG_ARCH_ORION5X
/*****************************************************************************
* Ethernet switch
****************************************************************************/
struct mdio_board_info *bd;
unsigned int i;
+ if (!IS_BUILTIN(CONFIG_PHYLIB))
+ return;
+
for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
if (!strcmp(d->port_names[i], "cpu"))
break;
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
}
+#endif
/*****************************************************************************
* I2C
usbphy: phy@01c19400 {
compatible = "allwinner,sun50i-a64-usb-phy";
reg = <0x01c19400 0x14>,
+ <0x01c1a800 0x4>,
<0x01c1b800 0x4>;
reg-names = "phy_ctrl",
+ "pmu0",
"pmu1";
clocks = <&ccu CLK_USB_PHY0>,
<&ccu CLK_USB_PHY1>;
#define get_user __get_user
#if !defined(CONFIG_64BIT)
-#define LDD_USER(ptr) __get_user_asm64(ptr)
+#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
#else
-#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
+#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
#endif
" mtsp %0,%%sr2\n\t" \
: : "r"(get_fs()) : )
-#define __get_user(x, ptr) \
-({ \
- register long __gu_err __asm__ ("r8") = 0; \
- register long __gu_val; \
- \
- load_sr2(); \
- switch (sizeof(*(ptr))) { \
- case 1: __get_user_asm("ldb", ptr); break; \
- case 2: __get_user_asm("ldh", ptr); break; \
- case 4: __get_user_asm("ldw", ptr); break; \
- case 8: LDD_USER(ptr); break; \
- default: BUILD_BUG(); break; \
- } \
- \
- (x) = (__force __typeof__(*(ptr))) __gu_val; \
- __gu_err; \
+#define __get_user_internal(val, ptr) \
+({ \
+ register long __gu_err __asm__ ("r8") = 0; \
+ \
+ switch (sizeof(*(ptr))) { \
+ case 1: __get_user_asm(val, "ldb", ptr); break; \
+ case 2: __get_user_asm(val, "ldh", ptr); break; \
+ case 4: __get_user_asm(val, "ldw", ptr); break; \
+ case 8: LDD_USER(val, ptr); break; \
+ default: BUILD_BUG(); \
+ } \
+ \
+ __gu_err; \
})
-#define __get_user_asm(ldx, ptr) \
+#define __get_user(val, ptr) \
+({ \
+ load_sr2(); \
+ __get_user_internal(val, ptr); \
+})
+
+#define __get_user_asm(val, ldx, ptr) \
+{ \
+ register long __gu_val; \
+ \
__asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err));
+ : "r"(ptr), "1"(__gu_err)); \
+ \
+ (val) = (__force __typeof__(*(ptr))) __gu_val; \
+}
#if !defined(CONFIG_64BIT)
-#define __get_user_asm64(ptr) \
+#define __get_user_asm64(val, ptr) \
+{ \
+ union { \
+ unsigned long long l; \
+ __typeof__(*(ptr)) t; \
+ } __gu_tmp; \
+ \
__asm__(" copy %%r0,%R0\n" \
"1: ldw 0(%%sr2,%2),%0\n" \
"2: ldw 4(%%sr2,%2),%R0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err));
+ : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
+ : "r"(ptr), "1"(__gu_err)); \
+ \
+ (val) = __gu_tmp.t; \
+}
#endif /* !defined(CONFIG_64BIT) */
-#define __put_user(x, ptr) \
+#define __put_user_internal(x, ptr) \
({ \
register long __pu_err __asm__ ("r8") = 0; \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
- load_sr2(); \
switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm("stb", __x, ptr); break; \
- case 2: __put_user_asm("sth", __x, ptr); break; \
- case 4: __put_user_asm("stw", __x, ptr); break; \
- case 8: STD_USER(__x, ptr); break; \
- default: BUILD_BUG(); break; \
- } \
+ case 1: __put_user_asm("stb", __x, ptr); break; \
+ case 2: __put_user_asm("sth", __x, ptr); break; \
+ case 4: __put_user_asm("stw", __x, ptr); break; \
+ case 8: STD_USER(__x, ptr); break; \
+ default: BUILD_BUG(); \
+ } \
\
__pu_err; \
})
+#define __put_user(x, ptr) \
+({ \
+ load_sr2(); \
+ __put_user_internal(x, ptr); \
+})
+
+
/*
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
* instead of writing. This is because they do not write to any memory
add dst,len,end
/* short copy with less than 16 bytes? */
- cmpib,>>=,n 15,len,.Lbyte_loop
+ cmpib,COND(>>=),n 15,len,.Lbyte_loop
/* same alignment? */
xor src,dst,t0
/* loop until we are 64-bit aligned */
.Lalign_loop64:
extru dst,31,3,t1
- cmpib,=,n 0,t1,.Lcopy_loop_16
+ cmpib,=,n 0,t1,.Lcopy_loop_16_start
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop64
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+.Lcopy_loop_16_start:
ldi 31,t0
.Lcopy_loop_16:
cmpb,COND(>>=),n t0,len,.Lword_loop
/* loop until we are 32-bit aligned */
.Lalign_loop32:
extru dst,31,2,t1
- cmpib,=,n 0,t1,.Lcopy_loop_4
+ cmpib,=,n 0,t1,.Lcopy_loop_8
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop32
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
-.Lcopy_loop_4:
+.Lcopy_loop_8:
cmpib,COND(>>=),n 15,len,.Lbyte_loop
10: ldw 0(srcspc,src),t1
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
- b .Lcopy_loop_4
+ b .Lcopy_loop_8
ldo -16(len),len
.Lbyte_loop:
.Lunaligned_copy:
/* align until dst is 32bit-word-aligned */
extru dst,31,2,t1
- cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
+ cmpib,=,n 0,t1,.Lcopy_dstaligned
20: ldb 0(srcspc,src),t1
ldo 1(src),src
21: stb,ma t1,1(dstspc,dst)
cmpiclr,<> 1,t0,%r0
b,n .Lcase1
.Lcase0:
- cmpb,= %r0,len,.Lcda_finish
+ cmpb,COND(=) %r0,len,.Lcda_finish
nop
1: ldw,ma 4(srcspc,src), a3
1: ldw,ma 4(srcspc,src), a3
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
ldo -1(len),len
- cmpb,=,n %r0,len,.Ldo0
+ cmpb,COND(=),n %r0,len,.Ldo0
.Ldo4:
1: ldw,ma 4(srcspc,src), a0
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
ldo -4(len),len
- cmpb,<> %r0,len,.Ldo4
+ cmpb,COND(<>) %r0,len,.Ldo4
nop
.Ldo0:
shrpw a2, a3, %sar, t0
/* fault exception fixup handlers: */
#ifdef CONFIG_64BIT
.Lcopy16_fault:
-10: b .Lcopy_done
- std,ma t1,8(dstspc,dst)
+ b .Lcopy_done
+10: std,ma t1,8(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
#endif
.Lcopy8_fault:
-10: b .Lcopy_done
- stw,ma t1,4(dstspc,dst)
+ b .Lcopy_done
+10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
.exit
select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
select HAVE_ARCH_HARDENED_USERCOPY
- select PROVE_LOCKING_SMALL if PROVE_LOCKING
+ select LOCKDEP_SMALL if LOCKDEP
select ARCH_WANT_RELAX_ORDER
config SPARC32
pgd_t *pgd;
unsigned long next;
+ addr &= PMD_MASK;
+ if (addr < floor) {
+ addr += PMD_SIZE;
+ if (!addr)
+ return;
+ }
+ if (ceiling) {
+ ceiling &= PMD_MASK;
+ if (!ceiling)
+ return;
+ }
+ if (end - 1 > ceiling - 1)
+ end -= PMD_SIZE;
+ if (addr > end - 1)
+ return;
+
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
- * instruction.
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
*/
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
clwb(p);
}
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
- return iter_is_iovec(i) == false;
-}
-
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(addr, bytes, i);
- if (__iter_needs_pmem_wb(i))
+ /*
+ * In the iovec case on x86_64 copy_from_iter_nocache() uses
+ * non-temporal stores for the bulk of the transfer, but we need
+ * to manually flush if the transfer is unaligned. A cached
+ * memory copy is used when destination or size is not naturally
+ * aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
+ *
+ * In the non-iovec case the entire destination needs to be
+ * flushed.
+ */
+ if (iter_is_iovec(i)) {
+ unsigned long flushed, dest = (unsigned long) addr;
+
+ if (bytes < 8) {
+ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+ arch_wb_cache_pmem(addr, 1);
+ } else {
+ if (!IS_ALIGNED(dest, 8)) {
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+ arch_wb_cache_pmem(addr, 1);
+ }
+
+ flushed = dest - (unsigned long) addr;
+ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
+ arch_wb_cache_pmem(addr + bytes - 1, 1);
+ }
+ } else
arch_wb_cache_pmem(addr, bytes);
return len;
crypto_completion_t complete;
void *data;
u8 *result;
+ u32 flags;
void *ubuf[] CRYPTO_MINALIGN_ATTR;
};
priv->result = req->result;
priv->complete = req->base.complete;
priv->data = req->base.data;
+ priv->flags = req->base.flags;
+
/*
* WARNING: We do not backup req->priv here! The req->priv
* is for internal use of the Crypto API and the
return 0;
}
-static void ahash_restore_req(struct ahash_request *req)
+static void ahash_restore_req(struct ahash_request *req, int err)
{
struct ahash_request_priv *priv = req->priv;
+ if (!err)
+ memcpy(priv->result, req->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
/* Restore the original crypto request. */
req->result = priv->result;
- req->base.complete = priv->complete;
- req->base.data = priv->data;
+
+ ahash_request_set_callback(req, priv->flags,
+ priv->complete, priv->data);
req->priv = NULL;
/* Free the req->priv.priv from the ADJUSTED request. */
kzfree(priv);
}
-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+static void ahash_notify_einprogress(struct ahash_request *req)
{
struct ahash_request_priv *priv = req->priv;
+ struct crypto_async_request oreq;
- if (err == -EINPROGRESS)
- return;
-
- if (!err)
- memcpy(priv->result, req->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+ oreq.data = priv->data;
- ahash_restore_req(req);
+ priv->complete(&oreq, -EINPROGRESS);
}
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
+ if (err == -EINPROGRESS) {
+ ahash_notify_einprogress(areq);
+ return;
+ }
+
/*
* Restore the original request, see ahash_op_unaligned() for what
* goes where.
*/
/* First copy req->result into req->priv.result */
- ahash_op_unaligned_finish(areq, err);
+ ahash_restore_req(areq, err);
/* Complete the ORIGINAL request. */
areq->base.complete(&areq->base, err);
return err;
err = op(req);
- ahash_op_unaligned_finish(req, err);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
+
+ ahash_restore_req(req, err);
return err;
}
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
+static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{
- struct ahash_request_priv *priv = req->priv;
+ struct ahash_request *areq = req->data;
if (err == -EINPROGRESS)
return;
- if (!err)
- memcpy(priv->result, req->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
- ahash_restore_req(req);
-}
-
-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
-{
- struct ahash_request *areq = req->data;
-
- ahash_def_finup_finish2(areq, err);
+ ahash_restore_req(areq, err);
areq->base.complete(&areq->base, err);
}
goto out;
req->base.complete = ahash_def_finup_done2;
- req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
err = crypto_ahash_reqtfm(req)->final(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
out:
- ahash_def_finup_finish2(req, err);
+ ahash_restore_req(req, err);
return err;
}
{
struct ahash_request *areq = req->data;
+ if (err == -EINPROGRESS) {
+ ahash_notify_einprogress(areq);
+ return;
+ }
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
err = ahash_def_finup_finish1(areq, err);
+ if (areq->priv)
+ return;
areq->base.complete(&areq->base, err);
}
return err;
err = tfm->update(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
+
return ahash_def_finup_finish1(req, err);
}
struct aead_async_rsgl first_rsgl;
struct list_head list;
struct kiocb *iocb;
+ struct sock *sk;
unsigned int tsgls;
char iv[];
};
static void aead_async_cb(struct crypto_async_request *_req, int err)
{
- struct sock *sk = _req->data;
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
- struct aead_request *req = aead_request_cast(_req);
+ struct aead_request *req = _req->data;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
+ struct sock *sk = areq->sk;
struct scatterlist *sg = areq->tsgl;
struct aead_async_rsgl *rsgl;
struct kiocb *iocb = areq->iocb;
memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
INIT_LIST_HEAD(&areq->list);
areq->iocb = msg->msg_iocb;
+ areq->sk = sk;
memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
aead_request_set_tfm(req, tfm);
aead_request_set_ad(req, ctx->aead_assoclen);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- aead_async_cb, sk);
+ aead_async_cb, req);
used -= ctx->aead_assoclen;
/* take over all tx sgls from ctx */
struct rctx *rctx;
rctx = skcipher_request_ctx(req);
+
+ if (err == -EINPROGRESS) {
+ if (rctx->left != req->cryptlen)
+ return;
+ goto out;
+ }
+
subreq = &rctx->subreq;
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
if (rctx->left)
return;
+out:
skcipher_request_complete(req, err);
}
struct rctx *rctx;
rctx = skcipher_request_ctx(req);
+
+ if (err == -EINPROGRESS) {
+ if (rctx->left != req->cryptlen)
+ return;
+ goto out;
+ }
+
subreq = &rctx->subreq;
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
if (rctx->left)
return;
+out:
skcipher_request_complete(req, err);
}
struct rctx *rctx;
rctx = skcipher_request_ctx(req);
+
+ if (err == -EINPROGRESS) {
+ if (rctx->left != req->cryptlen)
+ return;
+ goto out;
+ }
+
subreq = &rctx->subreq;
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
if (rctx->left)
return;
+out:
skcipher_request_complete(req, err);
}
struct rctx *rctx;
rctx = skcipher_request_ctx(req);
+
+ if (err == -EINPROGRESS) {
+ if (rctx->left != req->cryptlen)
+ return;
+ goto out;
+ }
+
subreq = &rctx->subreq;
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
if (rctx->left)
return;
+out:
skcipher_request_complete(req, err);
}
const struct nfit_set_info_map *map0 = m0;
const struct nfit_set_info_map *map1 = m1;
- return map0->region_offset - map1->region_offset;
+ if (map0->region_offset < map1->region_offset)
+ return -1;
+ else if (map0->region_offset > map1->region_offset)
+ return 1;
+ return 0;
}
/* Retrieve the nth entry referencing this spa */
tristate "DAX: direct access to differentiated memory"
default m if NVDIMM_DAX
depends on TRANSPARENT_HUGEPAGE
+ select SRCU
help
Support raw access to differentiated (persistence, bandwidth,
latency...) memory via an mmap(2) capable character
#include "dax.h"
static dev_t dax_devt;
+DEFINE_STATIC_SRCU(dax_srcu);
static struct class *dax_class;
static DEFINE_IDA(dax_minor_ida);
static int nr_dax = CONFIG_NR_DEV_DAX;
* @region - parent region
* @dev - device backing the character device
* @cdev - core chardev data
- * @alive - !alive + rcu grace period == no new mappings can be established
+ * @alive - !alive + srcu grace period == no new mappings can be established
* @id - child id in the region
* @num_resources - number of physical address extents in this device
* @res - array of physical address ranges
static int dax_dev_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size)
{
- int rc;
+ int rc, id;
struct file *filp = vmf->vma->vm_file;
struct dax_dev *dax_dev = filp->private_data;
? "write" : "read",
vmf->vma->vm_start, vmf->vma->vm_end);
- rcu_read_lock();
+ id = srcu_read_lock(&dax_srcu);
switch (pe_size) {
case PE_SIZE_PTE:
rc = __dax_dev_pte_fault(dax_dev, vmf);
default:
return VM_FAULT_FALLBACK;
}
- rcu_read_unlock();
+ srcu_read_unlock(&dax_srcu, id);
return rc;
}
* Note, rcu is not protecting the liveness of dax_dev, rcu is
* ensuring that any fault handlers that might have seen
* dax_dev->alive == true, have completed. Any fault handlers
- * that start after synchronize_rcu() has started will abort
+ * that start after synchronize_srcu() has started will abort
* upon seeing dax_dev->alive == false.
*/
dax_dev->alive = false;
- synchronize_rcu();
+ synchronize_srcu(&dax_srcu);
unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
cdev_del(cdev);
device_unregister(dev);
This enables support for the Mediatek MT7530 Ethernet switch
chip.
+config NET_DSA_SMSC_LAN9303
+ tristate
+ select NET_DSA_TAG_LAN9303
+ ---help---
+ This enables support for the SMSC/Microchip LAN9303 3 port ethernet
+ switch chips.
+
+config NET_DSA_SMSC_LAN9303_I2C
+ tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode"
+ depends on NET_DSA
+ select NET_DSA_SMSC_LAN9303
+ select REGMAP_I2C
+ ---help---
+ Enable access functions if the SMSC/Microchip LAN9303 is configured
+ for I2C managed mode.
+
+config NET_DSA_SMSC_LAN9303_MDIO
+ tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
+ depends on NET_DSA
+ select NET_DSA_SMSC_LAN9303
+ ---help---
+ Enable access functions if the SMSC/Microchip LAN9303 is configured
+ for MDIO managed mode.
+
endmenu
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
+obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
+obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
+obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
obj-y += b53/
obj-y += mv88e6xxx/
obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o
--- /dev/null
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/mutex.h>
+#include <linux/mii.h>
+
+#include "lan9303.h"
+
+#define LAN9303_CHIP_REV 0x14
+# define LAN9303_CHIP_ID 0x9303
+#define LAN9303_IRQ_CFG 0x15
+# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
+# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
+# define LAN9303_IRQ_CFG_IRQ_TYPE BIT(0)
+#define LAN9303_INT_STS 0x16
+# define LAN9303_INT_STS_PHY_INT2 BIT(27)
+# define LAN9303_INT_STS_PHY_INT1 BIT(26)
+#define LAN9303_INT_EN 0x17
+# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
+# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
+#define LAN9303_HW_CFG 0x1D
+# define LAN9303_HW_CFG_READY BIT(27)
+# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
+# define LAN9303_HW_CFG_AMDX_EN_PORT1 BIT(25)
+#define LAN9303_PMI_DATA 0x29
+#define LAN9303_PMI_ACCESS 0x2A
+# define LAN9303_PMI_ACCESS_PHY_ADDR(x) (((x) & 0x1f) << 11)
+# define LAN9303_PMI_ACCESS_MIIRINDA(x) (((x) & 0x1f) << 6)
+# define LAN9303_PMI_ACCESS_MII_BUSY BIT(0)
+# define LAN9303_PMI_ACCESS_MII_WRITE BIT(1)
+#define LAN9303_MANUAL_FC_1 0x68
+#define LAN9303_MANUAL_FC_2 0x69
+#define LAN9303_MANUAL_FC_0 0x6a
+#define LAN9303_SWITCH_CSR_DATA 0x6b
+#define LAN9303_SWITCH_CSR_CMD 0x6c
+#define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31)
+#define LAN9303_SWITCH_CSR_CMD_RW BIT(30)
+#define LAN9303_SWITCH_CSR_CMD_LANES (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define LAN9303_VIRT_PHY_BASE 0x70
+#define LAN9303_VIRT_SPECIAL_CTRL 0x77
+
+#define LAN9303_SW_DEV_ID 0x0000
+#define LAN9303_SW_RESET 0x0001
+#define LAN9303_SW_RESET_RESET BIT(0)
+#define LAN9303_SW_IMR 0x0004
+#define LAN9303_SW_IPR 0x0005
+#define LAN9303_MAC_VER_ID_0 0x0400
+#define LAN9303_MAC_RX_CFG_0 0x0401
+# define LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES BIT(1)
+# define LAN9303_MAC_RX_CFG_X_RX_ENABLE BIT(0)
+#define LAN9303_MAC_RX_UNDSZE_CNT_0 0x0410
+#define LAN9303_MAC_RX_64_CNT_0 0x0411
+#define LAN9303_MAC_RX_127_CNT_0 0x0412
+#define LAN9303_MAC_RX_255_CNT_0 0x413
+#define LAN9303_MAC_RX_511_CNT_0 0x0414
+#define LAN9303_MAC_RX_1023_CNT_0 0x0415
+#define LAN9303_MAC_RX_MAX_CNT_0 0x0416
+#define LAN9303_MAC_RX_OVRSZE_CNT_0 0x0417
+#define LAN9303_MAC_RX_PKTOK_CNT_0 0x0418
+#define LAN9303_MAC_RX_CRCERR_CNT_0 0x0419
+#define LAN9303_MAC_RX_MULCST_CNT_0 0x041a
+#define LAN9303_MAC_RX_BRDCST_CNT_0 0x041b
+#define LAN9303_MAC_RX_PAUSE_CNT_0 0x041c
+#define LAN9303_MAC_RX_FRAG_CNT_0 0x041d
+#define LAN9303_MAC_RX_JABB_CNT_0 0x041e
+#define LAN9303_MAC_RX_ALIGN_CNT_0 0x041f
+#define LAN9303_MAC_RX_PKTLEN_CNT_0 0x0420
+#define LAN9303_MAC_RX_GOODPKTLEN_CNT_0 0x0421
+#define LAN9303_MAC_RX_SYMBL_CNT_0 0x0422
+#define LAN9303_MAC_RX_CTLFRM_CNT_0 0x0423
+
+#define LAN9303_MAC_TX_CFG_0 0x0440
+# define LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT (21 << 2)
+# define LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE BIT(1)
+# define LAN9303_MAC_TX_CFG_X_TX_ENABLE BIT(0)
+#define LAN9303_MAC_TX_DEFER_CNT_0 0x0451
+#define LAN9303_MAC_TX_PAUSE_CNT_0 0x0452
+#define LAN9303_MAC_TX_PKTOK_CNT_0 0x0453
+#define LAN9303_MAC_TX_64_CNT_0 0x0454
+#define LAN9303_MAC_TX_127_CNT_0 0x0455
+#define LAN9303_MAC_TX_255_CNT_0 0x0456
+#define LAN9303_MAC_TX_511_CNT_0 0x0457
+#define LAN9303_MAC_TX_1023_CNT_0 0x0458
+#define LAN9303_MAC_TX_MAX_CNT_0 0x0459
+#define LAN9303_MAC_TX_UNDSZE_CNT_0 0x045a
+#define LAN9303_MAC_TX_PKTLEN_CNT_0 0x045c
+#define LAN9303_MAC_TX_BRDCST_CNT_0 0x045d
+#define LAN9303_MAC_TX_MULCST_CNT_0 0x045e
+#define LAN9303_MAC_TX_LATECOL_0 0x045f
+#define LAN9303_MAC_TX_EXCOL_CNT_0 0x0460
+#define LAN9303_MAC_TX_SNGLECOL_CNT_0 0x0461
+#define LAN9303_MAC_TX_MULTICOL_CNT_0 0x0462
+#define LAN9303_MAC_TX_TOTALCOL_CNT_0 0x0463
+
+#define LAN9303_MAC_VER_ID_1 0x0800
+#define LAN9303_MAC_RX_CFG_1 0x0801
+#define LAN9303_MAC_TX_CFG_1 0x0840
+#define LAN9303_MAC_VER_ID_2 0x0c00
+#define LAN9303_MAC_RX_CFG_2 0x0c01
+#define LAN9303_MAC_TX_CFG_2 0x0c40
+#define LAN9303_SWE_ALR_CMD 0x1800
+#define LAN9303_SWE_VLAN_CMD 0x180b
+# define LAN9303_SWE_VLAN_CMD_RNW BIT(5)
+# define LAN9303_SWE_VLAN_CMD_PVIDNVLAN BIT(4)
+#define LAN9303_SWE_VLAN_WR_DATA 0x180c
+#define LAN9303_SWE_VLAN_RD_DATA 0x180e
+# define LAN9303_SWE_VLAN_MEMBER_PORT2 BIT(17)
+# define LAN9303_SWE_VLAN_UNTAG_PORT2 BIT(16)
+# define LAN9303_SWE_VLAN_MEMBER_PORT1 BIT(15)
+# define LAN9303_SWE_VLAN_UNTAG_PORT1 BIT(14)
+# define LAN9303_SWE_VLAN_MEMBER_PORT0 BIT(13)
+# define LAN9303_SWE_VLAN_UNTAG_PORT0 BIT(12)
+#define LAN9303_SWE_VLAN_CMD_STS 0x1810
+#define LAN9303_SWE_GLB_INGRESS_CFG 0x1840
+#define LAN9303_SWE_PORT_STATE 0x1843
+# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT2 (0)
+# define LAN9303_SWE_PORT_STATE_LEARNING_PORT2 BIT(5)
+# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT2 BIT(4)
+# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT1 (0)
+# define LAN9303_SWE_PORT_STATE_LEARNING_PORT1 BIT(3)
+# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 BIT(2)
+# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 (0)
+# define LAN9303_SWE_PORT_STATE_LEARNING_PORT0 BIT(1)
+# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT0 BIT(0)
+#define LAN9303_SWE_PORT_MIRROR 0x1846
+# define LAN9303_SWE_PORT_MIRROR_SNIFF_ALL BIT(8)
+# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT2 BIT(7)
+# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT1 BIT(6)
+# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 BIT(5)
+# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 BIT(4)
+# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 BIT(3)
+# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT0 BIT(2)
+# define LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING BIT(1)
+# define LAN9303_SWE_PORT_MIRROR_ENABLE_TX_MIRRORING BIT(0)
+#define LAN9303_SWE_INGRESS_PORT_TYPE 0x1847
+#define LAN9303_BM_CFG 0x1c00
+#define LAN9303_BM_EGRSS_PORT_TYPE 0x1c0c
+# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT2 (BIT(17) | BIT(16))
+# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT1 (BIT(9) | BIT(8))
+# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0 (BIT(1) | BIT(0))
+
+#define LAN9303_PORT_0_OFFSET 0x400
+#define LAN9303_PORT_1_OFFSET 0x800
+#define LAN9303_PORT_2_OFFSET 0xc00
+
+/* the built-in PHYs are of type LAN911X */
+#define MII_LAN911X_SPECIAL_MODES 0x12
+#define MII_LAN911X_SPECIAL_CONTROL_STATUS 0x1f
+
+static const struct regmap_range lan9303_valid_regs[] = {
+ regmap_reg_range(0x14, 0x17), /* misc, interrupt */
+ regmap_reg_range(0x19, 0x19), /* endian test */
+ regmap_reg_range(0x1d, 0x1d), /* hardware config */
+ regmap_reg_range(0x23, 0x24), /* general purpose timer */
+ regmap_reg_range(0x27, 0x27), /* counter */
+ regmap_reg_range(0x29, 0x2a), /* PMI index regs */
+ regmap_reg_range(0x68, 0x6a), /* flow control */
+ regmap_reg_range(0x6b, 0x6c), /* switch fabric indirect regs */
+ regmap_reg_range(0x6d, 0x6f), /* misc */
+ regmap_reg_range(0x70, 0x77), /* virtual phy */
+ regmap_reg_range(0x78, 0x7a), /* GPIO */
+ regmap_reg_range(0x7c, 0x7e), /* MAC & reset */
+ regmap_reg_range(0x80, 0xb7), /* switch fabric direct regs (wr only) */
+};
+
+static const struct regmap_range lan9303_reserved_ranges[] = {
+ regmap_reg_range(0x00, 0x13),
+ regmap_reg_range(0x18, 0x18),
+ regmap_reg_range(0x1a, 0x1c),
+ regmap_reg_range(0x1e, 0x22),
+ regmap_reg_range(0x25, 0x26),
+ regmap_reg_range(0x28, 0x28),
+ regmap_reg_range(0x2b, 0x67),
+ regmap_reg_range(0x7b, 0x7b),
+ regmap_reg_range(0x7f, 0x7f),
+ regmap_reg_range(0xb8, 0xff),
+};
+
+const struct regmap_access_table lan9303_register_set = {
+ .yes_ranges = lan9303_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(lan9303_valid_regs),
+ .no_ranges = lan9303_reserved_ranges,
+ .n_no_ranges = ARRAY_SIZE(lan9303_reserved_ranges),
+};
+EXPORT_SYMBOL(lan9303_register_set);
+
+static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
+{
+ int ret, i;
+
+ /* we can lose arbitration for the I2C case, because the device
+ * tries to detect and read an external EEPROM after reset and acts as
+ * a master on the shared I2C bus itself. This conflicts with our
+ * attempts to access the device as a slave at the same moment.
+ */
+ for (i = 0; i < 5; i++) {
+ ret = regmap_read(regmap, offset, reg);
+ if (!ret)
+ return 0;
+ if (ret != -EAGAIN)
+ break;
+ msleep(500);
+ }
+
+ return -EIO;
+}
+
+static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum)
+{
+ int ret;
+ u32 val;
+
+ if (regnum > MII_EXPANSION)
+ return -EINVAL;
+
+ ret = lan9303_read(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, &val);
+ if (ret)
+ return ret;
+
+ return val & 0xffff;
+}
+
+static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val)
+{
+ if (regnum > MII_EXPANSION)
+ return -EINVAL;
+
+ return regmap_write(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, val);
+}
+
+static int lan9303_port_phy_reg_wait_for_completion(struct lan9303 *chip)
+{
+ int ret, i;
+ u32 reg;
+
+ for (i = 0; i < 25; i++) {
+ ret = lan9303_read(chip->regmap, LAN9303_PMI_ACCESS, ®);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to read pmi access status: %d\n", ret);
+ return ret;
+ }
+ if (!(reg & LAN9303_PMI_ACCESS_MII_BUSY))
+ return 0;
+ msleep(1);
+ }
+
+ return -EIO;
+}
+
+static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum)
+{
+ int ret;
+ u32 val;
+
+ val = LAN9303_PMI_ACCESS_PHY_ADDR(addr);
+ val |= LAN9303_PMI_ACCESS_MIIRINDA(regnum);
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_port_phy_reg_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* start the MII read cycle */
+ ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, val);
+ if (ret)
+ goto on_error;
+
+ ret = lan9303_port_phy_reg_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* read the result of this operation */
+ ret = lan9303_read(chip->regmap, LAN9303_PMI_DATA, &val);
+ if (ret)
+ goto on_error;
+
+ mutex_unlock(&chip->indirect_mutex);
+
+ return val & 0xffff;
+
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum,
+ unsigned int val)
+{
+ int ret;
+ u32 reg;
+
+ reg = LAN9303_PMI_ACCESS_PHY_ADDR(addr);
+ reg |= LAN9303_PMI_ACCESS_MIIRINDA(regnum);
+ reg |= LAN9303_PMI_ACCESS_MII_WRITE;
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_port_phy_reg_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* write the data first... */
+ ret = regmap_write(chip->regmap, LAN9303_PMI_DATA, val);
+ if (ret)
+ goto on_error;
+
+ /* ...then start the MII write cycle */
+ ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, reg);
+
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_switch_wait_for_completion(struct lan9303 *chip)
+{
+ int ret, i;
+ u32 reg;
+
+ for (i = 0; i < 25; i++) {
+ ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_CMD, ®);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to read csr command status: %d\n", ret);
+ return ret;
+ }
+ if (!(reg & LAN9303_SWITCH_CSR_CMD_BUSY))
+ return 0;
+ msleep(1);
+ }
+
+ return -EIO;
+}
+
+static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val)
+{
+ u32 reg;
+ int ret;
+
+ reg = regnum;
+ reg |= LAN9303_SWITCH_CSR_CMD_LANES;
+ reg |= LAN9303_SWITCH_CSR_CMD_BUSY;
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_switch_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_DATA, val);
+ if (ret) {
+ dev_err(chip->dev, "Failed to write csr data reg: %d\n", ret);
+ goto on_error;
+ }
+
+ /* trigger write */
+ ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg);
+ if (ret)
+ dev_err(chip->dev, "Failed to write csr command reg: %d\n",
+ ret);
+
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_read_switch_reg(struct lan9303 *chip, u16 regnum, u32 *val)
+{
+ u32 reg;
+ int ret;
+
+ reg = regnum;
+ reg |= LAN9303_SWITCH_CSR_CMD_LANES;
+ reg |= LAN9303_SWITCH_CSR_CMD_RW;
+ reg |= LAN9303_SWITCH_CSR_CMD_BUSY;
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_switch_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* trigger read */
+ ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg);
+ if (ret) {
+ dev_err(chip->dev, "Failed to write csr command reg: %d\n",
+ ret);
+ goto on_error;
+ }
+
+ ret = lan9303_switch_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_DATA, val);
+ if (ret)
+ dev_err(chip->dev, "Failed to read csr data reg: %d\n", ret);
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_detect_phy_setup(struct lan9303 *chip)
+{
+ int reg;
+
+ /* depending on the 'phy_addr_sel_strap' setting, the three phys are
+ * using IDs 0-1-2 or IDs 1-2-3. We cannot read back the
+ * 'phy_addr_sel_strap' setting directly, so we need a test, which
+ * configuration is active:
+ * Special reg 18 of phy 3 reads as 0x0000, if 'phy_addr_sel_strap' is 0
+ * and the IDs are 0-1-2, else it contains something different from
+ * 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3.
+ */
+ reg = lan9303_port_phy_reg_read(chip, 3, MII_LAN911X_SPECIAL_MODES);
+ if (reg < 0) {
+ dev_err(chip->dev, "Failed to detect phy config: %d\n", reg);
+ return reg;
+ }
+
+ if (reg != 0)
+ chip->phy_addr_sel_strap = 1;
+ else
+ chip->phy_addr_sel_strap = 0;
+
+ dev_dbg(chip->dev, "Phy setup '%s' detected\n",
+ chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2");
+
+ return 0;
+}
+
+#define LAN9303_MAC_RX_CFG_OFFS (LAN9303_MAC_RX_CFG_0 - LAN9303_PORT_0_OFFSET)
+#define LAN9303_MAC_TX_CFG_OFFS (LAN9303_MAC_TX_CFG_0 - LAN9303_PORT_0_OFFSET)
+
+static int lan9303_disable_packet_processing(struct lan9303 *chip,
+ unsigned int port)
+{
+ int ret;
+
+ /* disable RX, but keep register reset default values else */
+ ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port,
+ LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES);
+ if (ret)
+ return ret;
+
+ /* disable TX, but keep register reset default values else */
+ return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port,
+ LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT |
+ LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE);
+}
+
+static int lan9303_enable_packet_processing(struct lan9303 *chip,
+ unsigned int port)
+{
+ int ret;
+
+ /* enable RX and keep register reset default values else */
+ ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port,
+ LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES |
+ LAN9303_MAC_RX_CFG_X_RX_ENABLE);
+ if (ret)
+ return ret;
+
+ /* enable TX and keep register reset default values else */
+ return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port,
+ LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT |
+ LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE |
+ LAN9303_MAC_TX_CFG_X_TX_ENABLE);
+}
+
+/* We want a special working switch:
+ * - do not forward packets between port 1 and 2
+ * - forward everything from port 1 to port 0
+ * - forward everything from port 2 to port 0
+ * - forward special tagged packets from port 0 to port 1 *or* port 2
+ */
+static int lan9303_separate_ports(struct lan9303 *chip)
+{
+ int ret;
+
+ ret = lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_MIRROR,
+ LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 |
+ LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 |
+ LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 |
+ LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING |
+ LAN9303_SWE_PORT_MIRROR_SNIFF_ALL);
+ if (ret)
+ return ret;
+
+ /* enable defining the destination port via special VLAN tagging
+ * for port 0
+ */
+ ret = lan9303_write_switch_reg(chip, LAN9303_SWE_INGRESS_PORT_TYPE,
+ 0x03);
+ if (ret)
+ return ret;
+
+ /* tag incoming packets at port 1 and 2 on their way to port 0 to be
+ * able to discover their source port
+ */
+ ret = lan9303_write_switch_reg(chip, LAN9303_BM_EGRSS_PORT_TYPE,
+ LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0);
+ if (ret)
+ return ret;
+
+ /* prevent port 1 and 2 from forwarding packets by their own */
+ return lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
+ LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 |
+ LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 |
+ LAN9303_SWE_PORT_STATE_BLOCKING_PORT2);
+}
+
+static int lan9303_handle_reset(struct lan9303 *chip)
+{
+ if (!chip->reset_gpio)
+ return 0;
+
+ if (chip->reset_duration != 0)
+ msleep(chip->reset_duration);
+
+ /* release (deassert) reset and activate the device */
+ gpiod_set_value_cansleep(chip->reset_gpio, 0);
+
+ return 0;
+}
+
+/* stop processing packets for all ports */
+static int lan9303_disable_processing(struct lan9303 *chip)
+{
+ int ret;
+
+ ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_0_OFFSET);
+ if (ret)
+ return ret;
+ ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET);
+ if (ret)
+ return ret;
+ return lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET);
+}
+
+static int lan9303_check_device(struct lan9303 *chip)
+{
+ int ret;
+ u32 reg;
+
+ ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, ®);
+ if (ret) {
+ dev_err(chip->dev, "failed to read chip revision register: %d\n",
+ ret);
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev,
+ "hint: maybe failed due to missing reset GPIO\n");
+ }
+ return ret;
+ }
+
+ if ((reg >> 16) != LAN9303_CHIP_ID) {
+ dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
+ reg >> 16);
+ return ret;
+ }
+
+ /* The default state of the LAN9303 device is to forward packets between
+ * all ports (if not configured differently by an external EEPROM).
+ * The initial state of a DSA device must be forwarding packets only
+ * between the external and the internal ports and no forwarding
+ * between the external ports. In preparation we stop packet handling
+ * at all for now until the LAN9303 device is re-programmed accordingly.
+ */
+ ret = lan9303_disable_processing(chip);
+ if (ret)
+ dev_warn(chip->dev, "failed to disable switching %d\n", ret);
+
+ dev_info(chip->dev, "Found LAN9303 rev. %u\n", reg & 0xffff);
+
+ ret = lan9303_detect_phy_setup(chip);
+ if (ret) {
+ dev_err(chip->dev,
+ "failed to discover phy bootstrap setup: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* ---------------------------- DSA -----------------------------------*/
+
+static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds)
+{
+ return DSA_TAG_PROTO_LAN9303;
+}
+
+static int lan9303_setup(struct dsa_switch *ds)
+{
+ struct lan9303 *chip = ds->priv;
+ int ret;
+
+ /* Make sure that port 0 is the cpu port */
+ if (!dsa_is_cpu_port(ds, 0)) {
+ dev_err(chip->dev, "port 0 is not the CPU port\n");
+ return -EINVAL;
+ }
+
+ ret = lan9303_separate_ports(chip);
+ if (ret)
+ dev_err(chip->dev, "failed to separate ports %d\n", ret);
+
+ ret = lan9303_enable_packet_processing(chip, LAN9303_PORT_0_OFFSET);
+ if (ret)
+ dev_err(chip->dev, "failed to re-enable switching %d\n", ret);
+
+ return 0;
+}
+
+struct lan9303_mib_desc {
+ unsigned int offset; /* offset of first MAC */
+ const char *name;
+};
+
+static const struct lan9303_mib_desc lan9303_mib[] = {
+ { .offset = LAN9303_MAC_RX_BRDCST_CNT_0, .name = "RxBroad", },
+ { .offset = LAN9303_MAC_RX_PAUSE_CNT_0, .name = "RxPause", },
+ { .offset = LAN9303_MAC_RX_MULCST_CNT_0, .name = "RxMulti", },
+ { .offset = LAN9303_MAC_RX_PKTOK_CNT_0, .name = "RxOk", },
+ { .offset = LAN9303_MAC_RX_CRCERR_CNT_0, .name = "RxCrcErr", },
+ { .offset = LAN9303_MAC_RX_ALIGN_CNT_0, .name = "RxAlignErr", },
+ { .offset = LAN9303_MAC_RX_JABB_CNT_0, .name = "RxJabber", },
+ { .offset = LAN9303_MAC_RX_FRAG_CNT_0, .name = "RxFragment", },
+ { .offset = LAN9303_MAC_RX_64_CNT_0, .name = "Rx64Byte", },
+ { .offset = LAN9303_MAC_RX_127_CNT_0, .name = "Rx128Byte", },
+ { .offset = LAN9303_MAC_RX_255_CNT_0, .name = "Rx256Byte", },
+ { .offset = LAN9303_MAC_RX_511_CNT_0, .name = "Rx512Byte", },
+ { .offset = LAN9303_MAC_RX_1023_CNT_0, .name = "Rx1024Byte", },
+ { .offset = LAN9303_MAC_RX_MAX_CNT_0, .name = "RxMaxByte", },
+ { .offset = LAN9303_MAC_RX_PKTLEN_CNT_0, .name = "RxByteCnt", },
+ { .offset = LAN9303_MAC_RX_SYMBL_CNT_0, .name = "RxSymbolCnt", },
+ { .offset = LAN9303_MAC_RX_CTLFRM_CNT_0, .name = "RxCfs", },
+ { .offset = LAN9303_MAC_RX_OVRSZE_CNT_0, .name = "RxOverFlow", },
+ { .offset = LAN9303_MAC_TX_UNDSZE_CNT_0, .name = "TxShort", },
+ { .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", },
+ { .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", },
+ { .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", },
+ { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "TxUnderRun", },
+ { .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", },
+ { .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", },
+ { .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", },
+ { .offset = LAN9303_MAC_TX_511_CNT_0, .name = "Tx512Byte", },
+ { .offset = LAN9303_MAC_TX_1023_CNT_0, .name = "Tx1024Byte", },
+ { .offset = LAN9303_MAC_TX_MAX_CNT_0, .name = "TxMaxByte", },
+ { .offset = LAN9303_MAC_TX_PKTLEN_CNT_0, .name = "TxByteCnt", },
+ { .offset = LAN9303_MAC_TX_PKTOK_CNT_0, .name = "TxOk", },
+ { .offset = LAN9303_MAC_TX_TOTALCOL_CNT_0, .name = "TxCollision", },
+ { .offset = LAN9303_MAC_TX_MULTICOL_CNT_0, .name = "TxMultiCol", },
+ { .offset = LAN9303_MAC_TX_SNGLECOL_CNT_0, .name = "TxSingleCol", },
+ { .offset = LAN9303_MAC_TX_EXCOL_CNT_0, .name = "TxExcCol", },
+ { .offset = LAN9303_MAC_TX_DEFER_CNT_0, .name = "TxDefer", },
+ { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", },
+};
+
+static void lan9303_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ unsigned int u;
+
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
+ strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct lan9303 *chip = ds->priv;
+ u32 reg;
+ unsigned int u, poff;
+ int ret;
+
+ poff = port * 0x400;
+
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
+ ret = lan9303_read_switch_reg(chip,
+ lan9303_mib[u].offset + poff,
+ ®);
+ if (ret)
+ dev_warn(chip->dev, "Reading status reg %u failed\n",
+ lan9303_mib[u].offset + poff);
+ data[u] = reg;
+ }
+}
+
+static int lan9303_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(lan9303_mib);
+}
+
+static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ struct lan9303 *chip = ds->priv;
+ int phy_base = chip->phy_addr_sel_strap;
+
+ if (phy == phy_base)
+ return lan9303_virt_phy_reg_read(chip, regnum);
+ if (phy > phy_base + 2)
+ return -ENODEV;
+
+ return lan9303_port_phy_reg_read(chip, phy, regnum);
+}
+
+static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ struct lan9303 *chip = ds->priv;
+ int phy_base = chip->phy_addr_sel_strap;
+
+ if (phy == phy_base)
+ return lan9303_virt_phy_reg_write(chip, regnum, val);
+ if (phy > phy_base + 2)
+ return -ENODEV;
+
+ return lan9303_phy_reg_write(chip, phy, regnum, val);
+}
+
+static int lan9303_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct lan9303 *chip = ds->priv;
+
+ /* enable internal packet processing */
+ switch (port) {
+ case 1:
+ return lan9303_enable_packet_processing(chip,
+ LAN9303_PORT_1_OFFSET);
+ case 2:
+ return lan9303_enable_packet_processing(chip,
+ LAN9303_PORT_2_OFFSET);
+ default:
+ dev_dbg(chip->dev,
+ "Error: request to power up invalid port %d\n", port);
+ }
+
+ return -ENODEV;
+}
+
+static void lan9303_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct lan9303 *chip = ds->priv;
+
+ /* disable internal packet processing */
+ switch (port) {
+ case 1:
+ lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET);
+ lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 1,
+ MII_BMCR, BMCR_PDOWN);
+ break;
+ case 2:
+ lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET);
+ lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 2,
+ MII_BMCR, BMCR_PDOWN);
+ break;
+ default:
+ dev_dbg(chip->dev,
+ "Error: request to power down invalid port %d\n", port);
+ }
+}
+
+static struct dsa_switch_ops lan9303_switch_ops = {
+ .get_tag_protocol = lan9303_get_tag_protocol,
+ .setup = lan9303_setup,
+ .get_strings = lan9303_get_strings,
+ .phy_read = lan9303_phy_read,
+ .phy_write = lan9303_phy_write,
+ .get_ethtool_stats = lan9303_get_ethtool_stats,
+ .get_sset_count = lan9303_get_sset_count,
+ .port_enable = lan9303_port_enable,
+ .port_disable = lan9303_port_disable,
+};
+
+static int lan9303_register_switch(struct lan9303 *chip)
+{
+ chip->ds = dsa_switch_alloc(chip->dev, DSA_MAX_PORTS);
+ if (!chip->ds)
+ return -ENOMEM;
+
+ chip->ds->priv = chip;
+ chip->ds->ops = &lan9303_switch_ops;
+ chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7;
+
+ return dsa_register_switch(chip->ds, chip->dev);
+}
+
+static void lan9303_probe_reset_gpio(struct lan9303 *chip,
+ struct device_node *np)
+{
+ chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
+ GPIOD_OUT_LOW);
+
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev, "No reset GPIO defined\n");
+ return;
+ }
+
+ chip->reset_duration = 200;
+
+ if (np) {
+ of_property_read_u32(np, "reset-duration",
+ &chip->reset_duration);
+ } else {
+ dev_dbg(chip->dev, "reset duration defaults to 200 ms\n");
+ }
+
+ /* A sane reset duration should not be longer than 1s */
+ if (chip->reset_duration > 1000)
+ chip->reset_duration = 1000;
+}
+
+int lan9303_probe(struct lan9303 *chip, struct device_node *np)
+{
+ int ret;
+
+ mutex_init(&chip->indirect_mutex);
+
+ lan9303_probe_reset_gpio(chip, np);
+
+ ret = lan9303_handle_reset(chip);
+ if (ret)
+ return ret;
+
+ ret = lan9303_check_device(chip);
+ if (ret)
+ return ret;
+
+ ret = lan9303_register_switch(chip);
+ if (ret) {
+ dev_dbg(chip->dev, "Failed to register switch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(lan9303_probe);
+
+int lan9303_remove(struct lan9303 *chip)
+{
+ int rc;
+
+ rc = lan9303_disable_processing(chip);
+ if (rc != 0)
+ dev_warn(chip->dev, "shutting down failed\n");
+
+ dsa_unregister_switch(chip->ds);
+
+ /* assert reset to the whole device to prevent it from doing anything */
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+ gpiod_unexport(chip->reset_gpio);
+
+ return 0;
+}
+EXPORT_SYMBOL(lan9303_remove);
+
+MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+#include <linux/regmap.h>
+#include <linux/device.h>
+#include <net/dsa.h>
+
+struct lan9303 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ struct gpio_desc *reset_gpio;
+ u32 reset_duration; /* in [ms] */
+ bool phy_addr_sel_strap;
+ struct dsa_switch *ds;
+ struct mutex indirect_mutex; /* protect indexed register access */
+};
+
+extern const struct regmap_access_table lan9303_register_set;
+
+int lan9303_probe(struct lan9303 *chip, struct device_node *np);
+int lan9303_remove(struct lan9303 *chip);
--- /dev/null
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+
+#include "lan9303.h"
+
+struct lan9303_i2c {
+ struct i2c_client *device;
+ struct lan9303 chip;
+};
+
+static const struct regmap_config lan9303_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 1,
+ .can_multi_write = true,
+ .max_register = 0x0ff, /* address bits 0..1 are not used */
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+
+ .volatile_table = &lan9303_register_set,
+ .wr_table = &lan9303_register_set,
+ .rd_table = &lan9303_register_set,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int lan9303_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lan9303_i2c *sw_dev;
+ int ret;
+
+ sw_dev = devm_kzalloc(&client->dev, sizeof(struct lan9303_i2c),
+ GFP_KERNEL);
+ if (!sw_dev)
+ return -ENOMEM;
+
+ sw_dev->chip.regmap = devm_regmap_init_i2c(client,
+ &lan9303_i2c_regmap_config);
+ if (IS_ERR(sw_dev->chip.regmap)) {
+ ret = PTR_ERR(sw_dev->chip.regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* link forward and backward */
+ sw_dev->device = client;
+ i2c_set_clientdata(client, sw_dev);
+ sw_dev->chip.dev = &client->dev;
+
+ ret = lan9303_probe(&sw_dev->chip, client->dev.of_node);
+ if (ret != 0)
+ return ret;
+
+ dev_info(&client->dev, "LAN9303 I2C driver loaded successfully\n");
+
+ return 0;
+}
+
+static int lan9303_i2c_remove(struct i2c_client *client)
+{
+ struct lan9303_i2c *sw_dev;
+
+ sw_dev = i2c_get_clientdata(client);
+ if (!sw_dev)
+ return -ENODEV;
+
+ return lan9303_remove(&sw_dev->chip);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct i2c_device_id lan9303_i2c_id[] = {
+ { "lan9303", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id);
+
+static const struct of_device_id lan9303_i2c_of_match[] = {
+ { .compatible = "smsc,lan9303-i2c", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lan9303_i2c_of_match);
+
+static struct i2c_driver lan9303_i2c_driver = {
+ .driver = {
+ .name = "LAN9303_I2C",
+ .of_match_table = of_match_ptr(lan9303_i2c_of_match),
+ },
+ .probe = lan9303_i2c_probe,
+ .remove = lan9303_i2c_remove,
+ .id_table = lan9303_i2c_id,
+};
+module_i2c_driver(lan9303_i2c_driver);
+
+MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in I2C managed mode");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/*
+ *
+ * Partially based on a patch from
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#include "lan9303.h"
+
+/* Generate phy-addr and -reg from the input address */
+#define PHY_ADDR(x) ((((x) >> 6) + 0x10) & 0x1f)
+#define PHY_REG(x) (((x) >> 1) & 0x1f)
+
+struct lan9303_mdio {
+ struct mdio_device *device;
+ struct lan9303 chip;
+};
+
+static void lan9303_mdio_real_write(struct mdio_device *mdio, int reg, u16 val)
+{
+ mdio->bus->write(mdio->bus, PHY_ADDR(reg), PHY_REG(reg), val);
+}
+
+static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
+{
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ mutex_lock(&sw_dev->device->bus->mdio_lock);
+ lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
+ lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+
+ return 0;
+}
+
+static u16 lan9303_mdio_real_read(struct mdio_device *mdio, int reg)
+{
+ return mdio->bus->read(mdio->bus, PHY_ADDR(reg), PHY_REG(reg));
+}
+
+static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ mutex_lock(&sw_dev->device->bus->mdio_lock);
+ *val = lan9303_mdio_real_read(sw_dev->device, reg);
+ *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+
+ return 0;
+}
+
+static const struct regmap_config lan9303_mdio_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 1,
+ .can_multi_write = true,
+ .max_register = 0x0ff, /* address bits 0..1 are not used */
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+
+ .volatile_table = &lan9303_register_set,
+ .wr_table = &lan9303_register_set,
+ .rd_table = &lan9303_register_set,
+
+ .reg_read = lan9303_mdio_read,
+ .reg_write = lan9303_mdio_write,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int lan9303_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev;
+ int ret;
+
+ sw_dev = devm_kzalloc(&mdiodev->dev, sizeof(struct lan9303_mdio),
+ GFP_KERNEL);
+ if (!sw_dev)
+ return -ENOMEM;
+
+ sw_dev->chip.regmap = devm_regmap_init(&mdiodev->dev, NULL, sw_dev,
+ &lan9303_mdio_regmap_config);
+ if (IS_ERR(sw_dev->chip.regmap)) {
+ ret = PTR_ERR(sw_dev->chip.regmap);
+ dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* link forward and backward */
+ sw_dev->device = mdiodev;
+ dev_set_drvdata(&mdiodev->dev, sw_dev);
+ sw_dev->chip.dev = &mdiodev->dev;
+
+ ret = lan9303_probe(&sw_dev->chip, mdiodev->dev.of_node);
+ if (ret != 0)
+ return ret;
+
+ dev_info(&mdiodev->dev, "LAN9303 MDIO driver loaded successfully\n");
+
+ return 0;
+}
+
+static void lan9303_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_remove(&sw_dev->chip);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct of_device_id lan9303_mdio_of_match[] = {
+ { .compatible = "smsc,lan9303-mdio" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
+
+static struct mdio_driver lan9303_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "LAN9303_MDIO",
+ .of_match_table = of_match_ptr(lan9303_mdio_of_match),
+ },
+ .probe = lan9303_mdio_probe,
+ .remove = lan9303_mdio_remove,
+};
+mdio_module_driver(lan9303_mdio_driver);
+
+MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in MDIO managed mode");
+MODULE_LICENSE("GPL v2");
ctx->status = status;
- oct_dev = lio_get_device(ctx->octeon_id);
-
WRITE_ONCE(ctx->cond, 1);
/* This barrier is required to be sure that the response has been
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
switch (sdevid) {
case PCI_SUBSYS_DEVID_81XX_BGX:
+ case PCI_SUBSYS_DEVID_81XX_RGX:
max_bgx_per_node = MAX_BGX_PER_CN81XX;
break;
case PCI_SUBSYS_DEVID_83XX_BGX:
/* Subsystem device IDs */
#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
+#define PCI_SUBSYS_DEVID_81XX_RGX 0xA254
#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
if (!ltb->buff)
return;
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
if (!adapter->failover)
send_request_unmap(adapter, ltb->map_id);
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
return 0;
}
-static void release_bounce_buffer(struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
-
- if (!adapter->bounce_buffer)
- return;
-
- if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- dma_unmap_single(dev, adapter->bounce_buffer_dma,
- adapter->bounce_buffer_size,
- DMA_BIDIRECTIONAL);
- adapter->bounce_buffer_dma = DMA_ERROR_CODE;
- }
-
- kfree(adapter->bounce_buffer);
- adapter->bounce_buffer = NULL;
-}
-
-static int init_bounce_buffer(struct net_device *netdev)
+static void release_error_buffers(struct ibmvnic_adapter *adapter)
{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->vdev->dev;
- char *buf;
- int buf_sz;
- dma_addr_t map_addr;
-
- buf_sz = (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
- buf = kmalloc(adapter->bounce_buffer_size, GFP_KERNEL);
- if (!buf)
- return -1;
+ struct ibmvnic_error_buff *error_buff, *tmp;
+ unsigned long flags;
- map_addr = dma_map_single(dev, buf, buf_sz, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, map_addr)) {
- dev_err(dev, "Couldn't map bounce buffer\n");
- kfree(buf);
- return -1;
+ spin_lock_irqsave(&adapter->error_list_lock, flags);
+ list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
+ list_del(&error_buff->list);
+ dma_unmap_single(dev, error_buff->dma, error_buff->len,
+ DMA_FROM_DEVICE);
+ kfree(error_buff->buff);
+ kfree(error_buff);
}
-
- adapter->bounce_buffer = buf;
- adapter->bounce_buffer_size = buf_sz;
- adapter->bounce_buffer_dma = map_addr;
- return 0;
+ spin_unlock_irqrestore(&adapter->error_list_lock, flags);
}
static int ibmvnic_login(struct net_device *netdev)
static void release_resources(struct ibmvnic_adapter *adapter)
{
- release_bounce_buffer(adapter);
release_tx_pools(adapter);
release_rx_pools(adapter);
- release_sub_crqs(adapter);
- release_crq_queue(adapter);
-
release_stats_token(adapter);
+ release_error_buffers(adapter);
}
static int ibmvnic_open(struct net_device *netdev)
if (rc)
goto ibmvnic_open_fail;
- rc = init_bounce_buffer(netdev);
- if (rc)
- goto ibmvnic_open_fail;
-
replenish_pools(adapter);
/* We're ready to receive frames, enable the sub-crq interrupts and
return -ENOMEM;
}
+static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ disable_irq(adapter->tx_scrq[i]->irq);
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ disable_irq(adapter->rx_scrq[i]->irq);
+ }
+}
+
static int ibmvnic_close(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int i;
adapter->closing = true;
+ disable_sub_crqs(adapter);
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
unsigned int tx_bytes = 0;
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
- bool used_bounce = false;
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
tx_buff->index = index;
tx_buff->pool_index = queue_num;
tx_buff->last_frag = true;
- tx_buff->used_bounce = used_bounce;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
goto out;
}
- atomic_inc(&tx_scrq->used);
-
- if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
+ if (atomic_inc_return(&tx_scrq->used)
+ >= adapter->req_tx_entries_per_subcrq) {
netdev_info(netdev, "Stopping queue %d\n", queue_num);
netif_stop_subqueue(netdev, queue_num);
}
scrq->crq_num);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ if (rc) {
+ netdev_err(adapter->netdev,
+ "Failed to release sub-CRQ %16lx, rc = %ld\n",
+ scrq->crq_num, rc);
+ }
+
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2);
struct ibmvnic_sub_crq_queue *scrq;
int rc;
- scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
+ scrq = kzalloc(sizeof(*scrq), GFP_ATOMIC);
if (!scrq)
return NULL;
- scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
- memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ scrq->msgs =
+ (union sub_crq *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, 2);
if (!scrq->msgs) {
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
goto zero_page_failed;
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
- scrq->cur = 0;
- atomic_set(&scrq->used, 0);
- scrq->rx_skb_top = NULL;
spin_lock_init(&scrq->lock);
netdev_dbg(adapter->netdev,
continue;
txbuff->data_dma[j] = 0;
- txbuff->used_bounce = false;
}
/* if sub_crq was sent indirectly */
first = txbuff->indir_arr[0].generic.first;
}
if (txbuff->last_frag) {
- atomic_dec(&scrq->used);
-
- if (atomic_read(&scrq->used) <=
+ if (atomic_sub_return(next->tx_comp.num_comps,
+ &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
netif_subqueue_stopped(adapter->netdev,
txbuff->skb)) {
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
struct ibmvnic_login_buffer *login_buffer;
- struct ibmvnic_inflight_cmd *inflight_cmd;
struct device *dev = &adapter->vdev->dev;
dma_addr_t rsp_buffer_token;
dma_addr_t buffer_token;
size_t rsp_buffer_size;
union ibmvnic_crq crq;
- unsigned long flags;
size_t buffer_size;
__be64 *tx_list_p;
__be64 *rx_list_p;
dev_err(dev, "Couldn't map login rsp buffer\n");
goto buf_rsp_map_failed;
}
- inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
- if (!inflight_cmd) {
- dev_err(dev, "Couldn't allocate inflight_cmd\n");
- goto inflight_alloc_failed;
- }
+
adapter->login_buf = login_buffer;
adapter->login_buf_token = buffer_token;
adapter->login_buf_sz = buffer_size;
crq.login.cmd = LOGIN;
crq.login.ioba = cpu_to_be32(buffer_token);
crq.login.len = cpu_to_be32(buffer_size);
-
- memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
-
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_add_tail(&inflight_cmd->list, &adapter->inflight);
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
-
ibmvnic_send_crq(adapter, &crq);
return;
-inflight_alloc_failed:
- dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
- DMA_FROM_DEVICE);
buf_rsp_map_failed:
kfree(login_rsp_buffer);
buf_rsp_alloc_failed:
struct ibmvnic_adapter *adapter)
{
int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
- struct ibmvnic_inflight_cmd *inflight_cmd;
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_error_buff *error_buff;
union ibmvnic_crq new_crq;
return;
}
- inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
- if (!inflight_cmd) {
- dma_unmap_single(dev, error_buff->dma, detail_len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- kfree(error_buff);
- return;
- }
-
error_buff->len = detail_len;
error_buff->error_id = crq->error_indication.error_id;
new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
new_crq.request_error_info.len = cpu_to_be32(detail_len);
new_crq.request_error_info.error_id = crq->error_indication.error_id;
-
- memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
-
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_add_tail(&inflight_cmd->list, &adapter->inflight);
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
-
ibmvnic_send_crq(adapter, &new_crq);
}
}
}
-static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
-{
- struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff, *tmp2;
- unsigned long flags;
- unsigned long flags2;
-
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
- switch (inflight_cmd->crq.generic.cmd) {
- case LOGIN:
- dma_unmap_single(dev, adapter->login_buf_token,
- adapter->login_buf_sz,
- DMA_BIDIRECTIONAL);
- dma_unmap_single(dev, adapter->login_rsp_buf_token,
- adapter->login_rsp_buf_sz,
- DMA_BIDIRECTIONAL);
- kfree(adapter->login_rsp_buf);
- kfree(adapter->login_buf);
- break;
- case REQUEST_ERROR_INFO:
- spin_lock_irqsave(&adapter->error_list_lock, flags2);
- list_for_each_entry_safe(error_buff, tmp2,
- &adapter->errors, list) {
- dma_unmap_single(dev, error_buff->dma,
- error_buff->len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- list_del(&error_buff->list);
- kfree(error_buff);
- }
- spin_unlock_irqrestore(&adapter->error_list_lock,
- flags2);
- break;
- }
- list_del(&inflight_cmd->list);
- kfree(inflight_cmd);
- }
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
-}
-
static void ibmvnic_xport_event(struct work_struct *work)
{
struct ibmvnic_adapter *adapter = container_of(work,
struct device *dev = &adapter->vdev->dev;
long rc;
- ibmvnic_free_inflight(adapter);
release_sub_crqs(adapter);
if (adapter->migrated) {
rc = ibmvnic_reenable_crq_queue(adapter);
struct ibmvnic_generic_crq *gen_crq = &crq->generic;
struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->vdev->dev;
+ u64 *u64_crq = (u64 *)crq;
long rc;
netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
- ((unsigned long int *)crq)[0],
- ((unsigned long int *)crq)[1]);
+ (unsigned long int)cpu_to_be64(u64_crq[0]),
+ (unsigned long int)cpu_to_be64(u64_crq[1]));
switch (gen_crq->first) {
case IBMVNIC_CRQ_INIT_RSP:
switch (gen_crq->cmd) {
static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
{
struct ibmvnic_adapter *adapter = instance;
- unsigned long flags;
- spin_lock_irqsave(&adapter->crq.lock, flags);
- vio_disable_interrupts(adapter->vdev);
tasklet_schedule(&adapter->tasklet);
- spin_unlock_irqrestore(&adapter->crq.lock, flags);
return IRQ_HANDLED;
}
{
struct ibmvnic_adapter *adapter = data;
struct ibmvnic_crq_queue *queue = &adapter->crq;
- struct vio_dev *vdev = adapter->vdev;
union ibmvnic_crq *crq;
unsigned long flags;
bool done = false;
spin_lock_irqsave(&queue->lock, flags);
- vio_disable_interrupts(vdev);
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
- vio_enable_interrupts(vdev);
- crq = ibmvnic_next_crq(adapter);
- if (crq) {
- vio_disable_interrupts(vdev);
- ibmvnic_handle_crq(crq, adapter);
- crq->generic.first = 0;
- } else {
- /* remain in tasklet until all
- * capabilities responses are received
- */
- if (!adapter->wait_capability)
- done = true;
- }
+
+ /* remain in tasklet until all
+ * capabilities responses are received
+ */
+ if (!adapter->wait_capability)
+ done = true;
}
/* if capabilities CRQ's were sent in this tasklet, the following
* tasklet must wait until all responses are received
spin_lock_init(&adapter->stats_lock);
INIT_LIST_HEAD(&adapter->errors);
- INIT_LIST_HEAD(&adapter->inflight);
spin_lock_init(&adapter->error_list_lock);
- spin_lock_init(&adapter->inflight_lock);
rc = ibmvnic_init(adapter);
if (rc) {
static int ibmvnic_remove(struct vio_dev *dev)
{
struct net_device *netdev = dev_get_drvdata(&dev->dev);
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unregister_netdev(netdev);
+
+ release_resources(adapter);
+ release_sub_crqs(adapter);
+ release_crq_queue(adapter);
+
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
adapter = netdev_priv(netdev);
ret += PAGE_SIZE; /* the crq message queue */
- ret += adapter->bounce_buffer_size;
ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
u8 first;
u8 cmd;
u8 mac_addr[6];
- struct ibmvnic_rc rc;
u8 reserved[4];
+ struct ibmvnic_rc rc;
} __packed __aligned(8);
struct ibmvnic_multicast_ctrl {
int index;
int pool_index;
bool last_frag;
- bool used_bounce;
union sub_crq indir_arr[6];
u8 hdr_data[140];
dma_addr_t indir_dma;
__be32 error_id;
};
-struct ibmvnic_inflight_cmd {
- union ibmvnic_crq crq;
- struct list_head list;
-};
-
struct ibmvnic_adapter {
struct vio_dev *vdev;
struct net_device *netdev;
dma_addr_t ip_offload_ctrl_tok;
bool migrated;
u32 msg_enable;
- void *bounce_buffer;
- int bounce_buffer_size;
- dma_addr_t bounce_buffer_dma;
/* Statistics */
struct ibmvnic_statistics stats;
struct completion fw_done;
- /* in-flight commands that allocate and/or map memory*/
- struct list_head inflight;
- spinlock_t inflight_lock;
-
/* partner capabilities */
u64 min_tx_queues;
u64 min_rx_queues;
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
+#define IXGBE_RXBUFFER_1536 1536
#define IXGBE_RXBUFFER_2K 2048
#define IXGBE_RXBUFFER_3K 3072
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
-#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the 3K
+ * buffers.
+ */
#if (PAGE_SIZE < 8192)
-#define IXGBE_MAX_FRAME_BUILD_SKB \
- (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
+#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
+#define IXGBE_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
+
+static inline int ixgbe_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int ixgbe_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = IXGBE_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return ixgbe_compute_pad(rx_buf_len);
+}
+
+#define IXGBE_SKB_PAD ixgbe_skb_pad()
#else
-#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
+#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
/*
return IXGBE_RXBUFFER_3K;
#if (PAGE_SIZE < 8192)
if (ring_uses_build_skb(ring))
- return IXGBE_MAX_FRAME_BUILD_SKB;
+ return IXGBE_MAX_2K_FRAME_BUILD_SKB;
#endif
return IXGBE_RXBUFFER_2K;
}
case IXGBE_DEV_ID_82598_BX:
case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
return SUPPORTED_10000baseKR_Full;
default:
return SUPPORTED_10000baseKX4_Full |
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
*/
static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
{
- int i = 0, j = 0;
+ int i;
char rname[16];
u32 regs[64];
regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
break;
default:
- pr_info("%-15s %08x\n", reginfo->name,
- IXGBE_READ_REG(hw, reginfo->ofs));
+ pr_info("%-15s %08x\n",
+ reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
return;
}
- for (i = 0; i < 8; i++) {
- snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
- pr_err("%-15s", rname);
+ i = 0;
+ while (i < 64) {
+ int j;
+ char buf[9 * 8 + 1];
+ char *p = buf;
+
+ snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
for (j = 0; j < 8; j++)
- pr_cont(" %08x", regs[i*8+j]);
- pr_cont("\n");
+ p += sprintf(p, " %08x", regs[i++]);
+ pr_err("%-15s%s\n", rname, buf);
}
}
struct ixgbe_ring *rx_ring;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer_info;
- u32 staterr;
int i = 0;
if (!netif_msg_hw(adapter))
tx_buffer = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (dma_unmap_len(tx_buffer, len) > 0) {
- pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
+ const char *ring_desc;
+
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ ring_desc = " NTC/U";
+ else if (i == tx_ring->next_to_use)
+ ring_desc = " NTU";
+ else if (i == tx_ring->next_to_clean)
+ ring_desc = " NTC";
+ else
+ ring_desc = "";
+ pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
(u64)tx_buffer->time_stamp,
- tx_buffer->skb);
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- pr_cont(" NTC/U\n");
- else if (i == tx_ring->next_to_use)
- pr_cont(" NTU\n");
- else if (i == tx_ring->next_to_clean)
- pr_cont(" NTC\n");
- else
- pr_cont("\n");
+ tx_buffer->skb,
+ ring_desc);
if (netif_msg_pktdata(adapter) &&
tx_buffer->skb)
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("%s%s%s",
+ pr_info("%s%s%s\n",
"R [desc] [ PktBuf A0] ",
"[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
- "<-- Adv Rx Read format\n");
- pr_info("%s%s%s",
+ "<-- Adv Rx Read format");
+ pr_info("%s%s%s\n",
"RWB[desc] [PcsmIpSHl PtRs] ",
"[vl er S cks ln] ---------------- [bi->skb ] ",
- "<-- Adv Rx Write-Back format\n");
+ "<-- Adv Rx Write-Back format");
for (i = 0; i < rx_ring->count; i++) {
+ const char *ring_desc;
+
+ if (i == rx_ring->next_to_use)
+ ring_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ ring_desc = " NTC";
+ else
+ ring_desc = "";
+
rx_buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IXGBE_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- if (staterr & IXGBE_RXD_STAT_DD) {
+ if (rx_desc->wb.upper.length) {
/* Descriptor Done */
- pr_info("RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
+ pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
+ i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- rx_buffer_info->skb);
+ rx_buffer_info->skb,
+ ring_desc);
} else {
- pr_info("R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
+ pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
+ i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)rx_buffer_info->dma,
- rx_buffer_info->skb);
+ rx_buffer_info->skb,
+ ring_desc);
if (netif_msg_pktdata(adapter) &&
rx_buffer_info->dma) {
ixgbe_rx_bufsz(rx_ring), true);
}
}
-
- if (i == rx_ring->next_to_use)
- pr_cont(" NTU\n");
- else if (i == rx_ring->next_to_clean)
- pr_cont(" NTC\n");
- else
- pr_cont("\n");
-
}
}
}
/* Limit the maximum frame size so we don't overrun the skb */
if (ring_uses_build_skb(ring) &&
!test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
- rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
+ rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
IXGBE_RXDCTL_RLPML_EN;
#endif
}
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
- if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
- (max_frame > IXGBE_MAX_FRAME_BUILD_SKB))
+ if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
#endif
}
/* assign number of SR-IOV VFs */
if (hw->mac.type != ixgbe_mac_82598EB) {
if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
- adapter->num_vfs = 0;
+ max_vfs = 0;
e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
- } else {
- adapter->num_vfs = max_vfs;
}
}
#endif /* CONFIG_PCI_IOV */
ixgbe_init_mbx_params_pf(hw);
hw->mbx.ops = ii->mbx_ops;
pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
- ixgbe_enable_sriov(adapter);
+ ixgbe_enable_sriov(adapter, max_vfs);
skip_sriov:
#endif
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
/* Setup link based on the new speed settings */
- hw->phy.ops.setup_link(hw);
+ if (hw->phy.ops.setup_link)
+ hw->phy.ops.setup_link(hw);
return 0;
}
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
+static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
+ unsigned int num_vfs)
{
struct ixgbe_hw *hw = &adapter->hw;
- int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
-
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
- e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
-
- /* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
- if (!adapter->ring_feature[RING_F_VMDQ].limit)
- adapter->ring_feature[RING_F_VMDQ].limit = 1;
- adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
+ int num_vf_macvlans, i;
num_vf_macvlans = hw->mac.num_rar_entries -
- (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
+ (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
+ if (!num_vf_macvlans)
+ return;
- adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
- sizeof(struct vf_macvlans),
- GFP_KERNEL);
+ mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ GFP_KERNEL);
if (mv_list) {
/* Initialize list of VF macvlans */
INIT_LIST_HEAD(&adapter->vf_mvs.l);
for (i = 0; i < num_vf_macvlans; i++) {
- mv_list->vf = -1;
- mv_list->free = true;
- list_add(&mv_list->l, &adapter->vf_mvs.l);
- mv_list++;
+ mv_list[i].vf = -1;
+ mv_list[i].free = true;
+ list_add(&mv_list[i].l, &adapter->vf_mvs.l);
}
+ adapter->mv_list = mv_list;
}
+}
+
+static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
+ unsigned int num_vfs)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
+ if (!adapter->ring_feature[RING_F_VMDQ].limit)
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
+
+ /* Allocate memory for per VF control structures */
+ adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!adapter->vfinfo)
+ return -ENOMEM;
+
+ adapter->num_vfs = num_vfs;
+
+ ixgbe_alloc_vf_macvlans(adapter, num_vfs);
+ adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
/* Initialize default switching mode VEB */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
adapter->bridge_mode = BRIDGE_MODE_VEB;
- /* If call to enable VFs succeeded then allocate memory
- * for per VF control structures.
- */
- adapter->vfinfo =
- kcalloc(adapter->num_vfs,
- sizeof(struct vf_data_storage), GFP_KERNEL);
- if (adapter->vfinfo) {
- /* limit trafffic classes based on VFs enabled */
- if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
- (adapter->num_vfs < 16)) {
- adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
- } else if (adapter->num_vfs < 32) {
- adapter->dcb_cfg.num_tcs.pg_tcs = 4;
- adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
- } else {
- adapter->dcb_cfg.num_tcs.pg_tcs = 1;
- adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
- }
-
- /* Disable RSC when in SR-IOV mode */
- adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
- IXGBE_FLAG2_RSC_ENABLED);
+ /* limit trafffic classes based on VFs enabled */
+ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ } else if (num_vfs < 32) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 4;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
+ } else {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+ }
- for (i = 0; i < adapter->num_vfs; i++) {
- /* enable spoof checking for all VFs */
- adapter->vfinfo[i].spoofchk_enabled = true;
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
- /* We support VF RSS querying only for 82599 and x540
- * devices at the moment. These devices share RSS
- * indirection table and RSS hash key with PF therefore
- * we want to disable the querying by default.
- */
- adapter->vfinfo[i].rss_query_enabled = 0;
+ for (i = 0; i < num_vfs; i++) {
+ /* enable spoof checking for all VFs */
+ adapter->vfinfo[i].spoofchk_enabled = true;
- /* Untrust all VFs */
- adapter->vfinfo[i].trusted = false;
+ /* We support VF RSS querying only for 82599 and x540
+ * devices at the moment. These devices share RSS
+ * indirection table and RSS hash key with PF therefore
+ * we want to disable the querying by default.
+ */
+ adapter->vfinfo[i].rss_query_enabled = 0;
- /* set the default xcast mode */
- adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
- }
+ /* Untrust all VFs */
+ adapter->vfinfo[i].trusted = false;
- return 0;
+ /* set the default xcast mode */
+ adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
}
- return -ENOMEM;
+ e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
+ return 0;
}
/**
/* Note this function is called when the user wants to enable SR-IOV
* VFs using the now deprecated module parameter
*/
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
{
int pre_existing_vfs = 0;
+ unsigned int num_vfs;
pre_existing_vfs = pci_num_vf(adapter->pdev);
- if (!pre_existing_vfs && !adapter->num_vfs)
+ if (!pre_existing_vfs && !max_vfs)
return;
/* If there are pre-existing VFs then we have to force
* have been created via the new PCI SR-IOV sysfs interface.
*/
if (pre_existing_vfs) {
- adapter->num_vfs = pre_existing_vfs;
+ num_vfs = pre_existing_vfs;
dev_warn(&adapter->pdev->dev,
"Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
} else {
* physical function. If the user requests greater than
* 63 VFs then it is an error - reset to default of zero.
*/
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
+ num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
- err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
if (err) {
e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- adapter->num_vfs = 0;
return;
}
}
- if (!__ixgbe_enable_sriov(adapter)) {
+ if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
ixgbe_get_vfs(adapter);
return;
}
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err = 0;
+ u8 num_tc;
int i;
int pre_existing_vfs = pci_num_vf(dev);
return err;
/* While the SR-IOV capability structure reports total VFs to be 64,
- * we have to limit the actual number allocated based on two factors.
+ * we limit the actual number allocated as below based on two factors.
+ * Num_TCs MAX_VFs
+ * 1 63
+ * <=4 31
+ * >4 15
* First, we reserve some transmit/receive resources for the PF.
* Second, VMDQ also uses the same pools that SR-IOV does. We need to
* account for this, so that we don't accidentally allocate more VFs
* than we have available pools. The PCI bus driver already checks for
* other values out of range.
*/
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS)
- return -EPERM;
+ num_tc = netdev_get_num_tc(adapter->netdev);
- adapter->num_vfs = num_vfs;
+ if (num_tc > 4) {
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
+ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
+ return -EPERM;
+ }
+ } else if ((num_tc > 1) && (num_tc <= 4)) {
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
+ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
+ return -EPERM;
+ }
+ } else {
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
+ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
+ return -EPERM;
+ }
+ }
- err = __ixgbe_enable_sriov(adapter);
+ err = __ixgbe_enable_sriov(adapter, num_vfs);
if (err)
return err;
- for (i = 0; i < adapter->num_vfs; i++)
+ for (i = 0; i < num_vfs; i++)
ixgbe_vf_configuration(dev, (i | 0x10000000));
/* reset before enabling SRIOV to avoid mailbox issues */
* 63 (IXGBE_MAX_VF_FUNCTIONS - 1)
*/
#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VF_FUNCTIONS
+#define IXGBE_MAX_VFS_4TC 32
+#define IXGBE_MAX_VFS_8TC 16
#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs);
#endif
int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
#define ATH_PHY_ID 0x03429050
#define AQ_FW_REV 0x20
-/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
-
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
#define IXGBE_PHY_INIT_END_NL 0xFFFF
ixgbe_phy_aq,
ixgbe_phy_x550em_kr,
ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_xfi,
ixgbe_phy_x550em_ext_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
-#define IXGBE_KX4_LINK_CNTL_1 0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31)
-
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
-#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
-#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
-#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
#define IXGBE_NW_MNG_IF_SEL 0x00011178
#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21)
#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->phy.type = ixgbe_phy_x550em_xfi;
+ break;
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
default:
break;
}
else
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
- *speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kx4:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_2_5GB_FULL |
+ IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case ixgbe_phy_x550em_xfi:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case ixgbe_phy_sgmii:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_x550em_a) {
+ /* check different backplane modes */
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+ /* fall through */
+ default:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
*autoneg = true;
}
return 0;
return ixgbe_restart_an_internal_phy_x550em(hw);
}
-/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
- * @hw: pointer to hardware structure
- *
- * Configures the integrated KX4 PHY.
- **/
-static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
-{
- s32 status;
- u32 reg_val;
-
- status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, ®_val);
- if (status)
- return status;
-
- reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
- IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
-
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
-
- /* Advertise 10G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
-
- /* Advertise 1G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
-
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
- status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, reg_val);
-
- return status;
-}
-
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
return 0;
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
/* Set functions pointers based on phy type */
switch (hw->phy.type) {
case ixgbe_phy_x550em_kx4:
- phy->ops.setup_link = ixgbe_setup_kx4_x550em;
+ phy->ops.setup_link = NULL;
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
+ case ixgbe_phy_x550em_xfi:
+ /* link is managed by HW */
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
case ixgbe_phy_x550em_ext_t:
/* Save NW management interface connected on board. This is used
* to determine internal PHY mode
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
+ case ixgbe_phy_sgmii:
+ phy->ops.setup_link = NULL;
+ break;
case ixgbe_phy_fw:
phy->ops.setup_link = ixgbe_setup_fw_link;
phy->ops.reset = ixgbe_reset_phy_fw;
/* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
.get_media_type = ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
.get_wwn_prefix = NULL,
- .setup_link = NULL, /* defined later */
+ .setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = ixgbe_get_link_capabilities_X550em,
.get_bus_info = ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
-static int ixgbevf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbevf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = 0;
bool link_up;
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = -1;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = -1;
hw->mac.get_link_status = 1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
break;
}
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.speed = speed;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static const struct ethtool_ops ixgbevf_ethtool_ops = {
- .get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,
.get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
+ .get_link_ksettings = ixgbevf_get_link_ksettings,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *itxd, *txd;
- struct mtk_tx_buf *tx_buf;
+ struct mtk_tx_buf *itx_buf, *tx_buf;
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
txd4 |= fport;
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
- memset(tx_buf, 0, sizeof(*tx_buf));
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ memset(itx_buf, 0, sizeof(*itx_buf));
if (gso)
txd4 |= TX_DMA_TSO;
return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr);
- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
+ itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+ MTK_TX_FLAGS_FPORT1;
+ dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
/* TX SG offload */
txd = itxd;
last_frag * TX_DMA_LS0));
WRITE_ONCE(txd->txd4, fport);
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf = mtk_desc_to_tx_buf(ring, txd);
memset(tx_buf, 0, sizeof(*tx_buf));
-
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+ tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+ MTK_TX_FLAGS_FPORT1;
+
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
frag_size -= frag_map_size;
}
/* store skb to cleanup */
- tx_buf->skb = skb;
+ itx_buf->skb = skb;
WRITE_ONCE(itxd->txd4, txd4);
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
- int mac;
+ int mac = 0;
desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
break;
- mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
- TX_DMA_FPORT_MASK;
- mac--;
-
tx_buf = mtk_desc_to_tx_buf(ring, desc);
+ if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
+ mac = 1;
+
skb = tx_buf->skb;
if (!skb) {
condition = 1;
struct u64_stats_sync syncp;
};
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
enum mtk_tx_flags {
+ /* PDMA descriptor can point at 1-2 segments. This enum allows us to
+ * track how memory was allocated so that it can be freed properly.
+ */
MTK_TX_FLAGS_SINGLE0 = 0x01,
MTK_TX_FLAGS_PAGE0 = 0x02,
+
+ /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
+ * SKB out instead of looking up through hardware TX descriptor.
+ */
+ MTK_TX_FLAGS_FPORT0 = 0x04,
+ MTK_TX_FLAGS_FPORT1 = 0x08,
};
/* This enum allows us to identify how the clock is defined on the array of the
int err;
int index = vhcr->in_modifier;
struct res_mtt *mtt;
- struct res_mpt *mpt;
+ struct res_mpt *mpt = NULL;
int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
int phys;
int id;
*/
return netdev;
-free_mdev_resources:
- mlx5e_destroy_mdev_resources(mdev);
err_free_netdev:
free_netdev(netdev);
+free_mdev_resources:
+ mlx5e_destroy_mdev_resources(mdev);
+
return NULL;
}
EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
+
+/* Virtual Router and Forwarding Domain Action
+ * -------------------------------------------
+ * Virtual Switch action is used for manipulate the Virtual Router (VR),
+ * MPLS label space and the Forwarding Identifier (FID).
+ */
+
+#define MLXSW_AFA_VIRFWD_CODE 0x0E
+#define MLXSW_AFA_VIRFWD_SIZE 1
+
+enum mlxsw_afa_virfwd_fid_cmd {
+ /* Do nothing */
+ MLXSW_AFA_VIRFWD_FID_CMD_NOOP,
+ /* Set the Forwarding Identifier (FID) to fid */
+ MLXSW_AFA_VIRFWD_FID_CMD_SET,
+};
+
+/* afa_virfwd_fid_cmd */
+MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3);
+
+/* afa_virfwd_fid
+ * The FID value.
+ */
+MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16);
+
+static inline void mlxsw_afa_virfwd_pack(char *payload,
+ enum mlxsw_afa_virfwd_fid_cmd fid_cmd,
+ u16 fid)
+{
+ mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd);
+ mlxsw_afa_virfwd_fid_set(payload, fid);
+}
+
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_VIRFWD_CODE,
+ MLXSW_AFA_VIRFWD_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
u16 vid, u8 pcp, u8 et);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
u32 counter_index);
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
#endif
memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
}
-int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
- u16 vid_end, bool is_member, bool untagged)
+static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool is_member, bool untagged)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvm_pl;
return err;
}
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged)
+{
+ u16 vid, vid_e;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
+ is_member, untagged);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
+ MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
/* L3 traps */
MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
+static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create);
+
+static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
+}
+
+static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
+}
+
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
goto err_dpipe_init;
}
+ err = mlxsw_sp_dummy_fid_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n");
+ goto err_dummy_fid_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
return 0;
err_ports_create:
+ mlxsw_sp_dummy_fid_fini(mlxsw_sp);
+err_dummy_fid_init:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_dummy_fid_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
}
+static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ u16 vid;
+ int err;
+
+ spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
+ MLXSW_REG_SPMS_STATE_DISCARDING;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+
+ for (vid = 0; vid < VLAN_N_VID; vid++)
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ true, false);
+ if (err)
+ goto err_port_vlan_set;
+ return 0;
+
+err_port_vlan_set:
+ mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+ return err;
+}
+
+static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ false, false);
+ mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+}
+
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
unsigned long event, void *ptr)
{
if (!is_vlan_dev(upper_dev) &&
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
- !netif_is_l3_master(upper_dev))
+ !netif_is_l3_master(upper_dev) &&
+ !netif_is_ovs_master(upper_dev))
return -EINVAL;
if (!info->linking)
break;
if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
!netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
return -EINVAL;
+ if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
+ return -EINVAL;
+ if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
upper_dev);
else
- mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
- upper_dev);
+ mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
+ upper_dev);
} else if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
err = mlxsw_sp_port_vrf_join(mlxsw_sp_port);
else
mlxsw_sp_port_vrf_leave(mlxsw_sp_port);
+ } else if (netif_is_ovs_master(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
+ else
+ mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
} else {
err = -EINVAL;
WARN_ON(1);
#define MLXSW_SP_VFID_BASE VLAN_N_VID
#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
+#define MLXSW_SP_DUMMY_FID 15359
+
#define MLXSW_SP_RFID_BASE 15360
#define MLXSW_SP_MID_MAX 7000
static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{
- return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
+ return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
}
struct mlxsw_sp_sb_pr {
u32 action, u16 vid, u16 proto, u8 prio);
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u16 fid);
struct mlxsw_sp_acl_rule;
rulei->counter_index);
}
+int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u16 fid)
+{
+ return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
+}
+
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
+ err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
+ MLXSW_SP_DUMMY_FID);
+ if (err)
+ return err;
+
out_dev = __dev_get_by_index(dev_net(dev), ifindex);
if (out_dev == dev)
out_dev = NULL;
static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
unsigned long event)
{
- if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
+ if (netif_is_bridge_port(port_dev) ||
+ netif_is_lag_port(port_dev) ||
+ netif_is_ovs_port(port_dev))
return 0;
return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
return err;
}
-static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end, bool is_member,
- bool untagged)
-{
- u16 vid, vid_e;
- int err;
-
- for (vid = vid_begin; vid <= vid_end;
- vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
- vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
- vid_end);
-
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
- is_member, untagged);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool learn_enable)
return err;
}
- err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
- true, flag_untagged);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
+ true, flag_untagged);
if (err) {
netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
vid_end);
if (old_pvid != mlxsw_sp_port->pvid)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
err_port_pvid_set:
- __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
- false);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
err_port_vlans_set:
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
return err;
if (pvid >= vid_begin && pvid <= vid_end)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
- __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
- false);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
+ MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
return -ENOMEM;
}
- edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR));
+ edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
+ sizeof(long));
if (!edev->arfs->arfs_fltr_bmap) {
free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
edev->ndev->rx_cpu_rmap = NULL;
.get_mdio_data = sh_get_mdio,
};
+/* free Tx skb function */
+static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_txdesc *txdesc;
+ int free_num = 0;
+ int entry;
+ bool sent;
+
+ for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
+ entry = mdp->dirty_tx % mdp->num_tx_ring;
+ txdesc = &mdp->tx_ring[entry];
+ sent = !(txdesc->status & cpu_to_le32(TD_TACT));
+ if (sent_only && !sent)
+ break;
+ /* TACT bit must be checked before all the following reads */
+ dma_rmb();
+ netif_info(mdp, tx_done, ndev,
+ "tx entry %d status 0x%08x\n",
+ entry, le32_to_cpu(txdesc->status));
+ /* Free the original skb. */
+ if (mdp->tx_skbuff[entry]) {
+ dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+ le32_to_cpu(txdesc->len) >> 16,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
+ mdp->tx_skbuff[entry] = NULL;
+ free_num++;
+ }
+ txdesc->status = cpu_to_le32(TD_TFP);
+ if (entry >= mdp->num_tx_ring - 1)
+ txdesc->status |= cpu_to_le32(TD_TDLE);
+
+ if (sent) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
+ }
+ }
+ return free_num;
+}
+
/* free skb and descriptor buffer */
static void sh_eth_ring_free(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ringsize, i;
+ if (mdp->rx_ring) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
+ if (mdp->rx_skbuff[i]) {
+ struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
+
+ dma_unmap_single(&ndev->dev,
+ le32_to_cpu(rxdesc->addr),
+ ALIGN(mdp->rx_buf_sz, 32),
+ DMA_FROM_DEVICE);
+ }
+ }
+ ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+ mdp->rx_desc_dma);
+ mdp->rx_ring = NULL;
+ }
+
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
for (i = 0; i < mdp->num_rx_ring; i++)
kfree(mdp->rx_skbuff);
mdp->rx_skbuff = NULL;
- /* Free Tx skb ringbuffer */
- if (mdp->tx_skbuff) {
- for (i = 0; i < mdp->num_tx_ring; i++)
- dev_kfree_skb(mdp->tx_skbuff[i]);
- }
- kfree(mdp->tx_skbuff);
- mdp->tx_skbuff = NULL;
-
- if (mdp->rx_ring) {
- ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
- dma_free_coherent(NULL, ringsize, mdp->rx_ring,
- mdp->rx_desc_dma);
- mdp->rx_ring = NULL;
- }
-
if (mdp->tx_ring) {
+ sh_eth_tx_free(ndev, false);
+
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
dma_free_coherent(NULL, ringsize, mdp->tx_ring,
mdp->tx_desc_dma);
mdp->tx_ring = NULL;
}
+
+ /* Free Tx skb ringbuffer */
+ kfree(mdp->tx_skbuff);
+ mdp->tx_skbuff = NULL;
}
/* format skb and descriptor buffer */
update_mac_address(ndev);
}
-/* free Tx skb function */
-static int sh_eth_txfree(struct net_device *ndev)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- struct sh_eth_txdesc *txdesc;
- int free_num = 0;
- int entry;
-
- for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
- entry = mdp->dirty_tx % mdp->num_tx_ring;
- txdesc = &mdp->tx_ring[entry];
- if (txdesc->status & cpu_to_le32(TD_TACT))
- break;
- /* TACT bit must be checked before all the following reads */
- dma_rmb();
- netif_info(mdp, tx_done, ndev,
- "tx entry %d status 0x%08x\n",
- entry, le32_to_cpu(txdesc->status));
- /* Free the original skb. */
- if (mdp->tx_skbuff[entry]) {
- dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
- le32_to_cpu(txdesc->len) >> 16,
- DMA_TO_DEVICE);
- dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
- mdp->tx_skbuff[entry] = NULL;
- free_num++;
- }
- txdesc->status = cpu_to_le32(TD_TFP);
- if (entry >= mdp->num_tx_ring - 1)
- txdesc->status |= cpu_to_le32(TD_TDLE);
-
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
- }
- return free_num;
-}
-
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
{
intr_status, mdp->cur_tx, mdp->dirty_tx,
(u32)ndev->state, edtrr);
/* dirty buffer free */
- sh_eth_txfree(ndev);
+ sh_eth_tx_free(ndev, true);
/* SH7712 BUG */
if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
/* Clear Tx interrupts */
sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
- sh_eth_txfree(ndev);
+ sh_eth_tx_free(ndev, true);
netif_wake_queue(ndev);
}
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
- if (!sh_eth_txfree(ndev)) {
+ if (!sh_eth_tx_free(ndev, true)) {
netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
free_cpumask_var(thread_mask);
}
+ if (count > EFX_MAX_RX_QUEUES) {
+ netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+ "Reducing number of rx queues from %u to %u.\n",
+ count, EFX_MAX_RX_QUEUES);
+ count = EFX_MAX_RX_QUEUES;
+ }
+
/* If RSS is requested for the PF *and* VFs then we can't write RSS
* table entries that are inaccessible to VFs
*/
free_cpumask_var(thread_mask);
}
+ if (count > EF4_MAX_RX_QUEUES) {
+ netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+ "Reducing number of rx queues from %u to %u.\n",
+ count, EF4_MAX_RX_QUEUES);
+ count = EF4_MAX_RX_QUEUES;
+ }
+
return count;
}
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
VM_PKT_DATA_INBAND, 0);
+ /* If the failure is because the channel is rescinded;
+ * ignore the failure since we cannot send on a rescinded
+ * channel. This would allow us to properly cleanup
+ * even when the channel is rescinded.
+ */
+ if (device->channel->rescind)
+ ret = 0;
/*
* If we failed here, we might as well return and
* have a leak rather than continue and a bugchk
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
VM_PKT_DATA_INBAND, 0);
+
+ /* If the failure is because the channel is rescinded;
+ * ignore the failure since we cannot send on a rescinded
+ * channel. This would allow us to properly cleanup
+ * even when the channel is rescinded.
+ */
+ if (device->channel->rescind)
+ ret = 0;
+
/* If we failed here, we might as well return and
* have a leak rather than continue and a bugchk
*/
/* Now, we can close the channel safely */
vmbus_close(device->channel);
+ /* And dissassociate NAPI context from device */
for (i = 0; i < net_device->num_chn; i++)
- napi_disable(&net_device->chan_table[i].napi);
+ netif_napi_del(&net_device->chan_table[i].napi);
/* Release all resources */
free_netvsc_device_rcu(net_device);
struct netvsc_channel *nvchan = &net_device->chan_table[i];
nvchan->channel = device->channel;
- netif_napi_add(ndev, &nvchan->napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
}
/* Open the channel */
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
/* Enable NAPI handler for init callbacks */
+ netif_napi_add(ndev, &net_device->chan_table[0].napi,
+ netvsc_poll, NAPI_POLL_WEIGHT);
napi_enable(&net_device->chan_table[0].napi);
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
return ret;
close:
- napi_disable(&net_device->chan_table[0].napi);
+ netif_napi_del(&net_device->chan_table[0].napi);
/* Now, we can close the channel safely */
vmbus_close(device->channel);
/* Set the channel before opening.*/
nvchan->channel = new_sc;
+ netif_napi_add(ndev, &nvchan->napi,
+ netvsc_poll, NAPI_POLL_WEIGHT);
ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
nvscdev->ring_size * PAGE_SIZE, NULL, 0,
netvsc_channel_cb, nvchan);
-
-
- napi_enable(&nvchan->napi);
+ if (ret == 0)
+ napi_enable(&nvchan->napi);
+ else
+ netdev_err(ndev, "sub channel open failed (%d)\n", ret);
if (refcount_dec_and_test(&nvscdev->sc_offered))
complete(&nvscdev->channel_init_wait);
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
- .get_sset_count = kszphy_get_sset_count,
- .get_strings = kszphy_get_strings,
- .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
- .get_sset_count = kszphy_get_sset_count,
- .get_strings = kszphy_get_strings,
- .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
.config_init = ksz9021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
.config_init = ksz9031_config_init,
.config_aneg = genphy_config_aneg,
.read_status = ksz9031_read_status,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
- .get_sset_count = kszphy_get_sset_count,
- .get_strings = kszphy_get_strings,
- .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .get_sset_count = kszphy_get_sset_count,
- .get_strings = kszphy_get_strings,
- .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
- .get_sset_count = kszphy_get_sset_count,
- .get_strings = kszphy_get_strings,
- .get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
goto nla_put_failure;
/* rule only needs to appear once */
- nlh->nlmsg_flags &= NLM_F_EXCL;
+ nlh->nlmsg_flags |= NLM_F_EXCL;
frh = nlmsg_data(nlh);
memset(frh, 0, sizeof(*frh));
dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
+ wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
err = ieee80211_register_hw(dev);
if (err) {
printk(KERN_ERR "%s (adm8211): Cannot register device\n",
if (error)
goto out_cancel_rx_cmd;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
usb_set_intfdata(intf, hw);
error = ieee80211_register_hw(hw);
ar->hw->wiphy->cipher_suites = cipher_suites;
ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
hw->extra_tx_headroom = 2;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
/*
* Mark the device as detached to avoid processing
* interrupts until setup is complete.
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params)
{
struct ath6kl *ar = wiphy_priv(wiphy);
static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct ath6kl_vif *vif = netdev_priv(ndev);
}
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
}
static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
ath9k_cmn_reload_chainmask(ah);
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
}
int ath9k_init_device(u16 devid, struct ath_softc *sc,
for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
return ar;
err_nomem:
wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
+ wiphy_ext_feature_set(wcn->hw->wiphy,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
return ret;
}
BIT(NL80211_STA_INFO_RX_DROP_MISC) |
BIT(NL80211_STA_INFO_TX_FAILED);
- sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
- sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
sinfo->rxrate.mcs = stats->last_mcs_rx;
sinfo->rx_bytes = stats->rx_bytes;
sinfo->rx_packets = stats->rx_packets;
wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags, struct vif_params *params)
+ struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct net_device *ndev = wil_to_ndev(wil);
static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
case NL80211_IFTYPE_P2P_GO:
break;
case NL80211_IFTYPE_MONITOR:
- if (flags)
- wil->monitor_flags = *flags;
- else
- wil->monitor_flags = 0;
-
+ if (params->flags)
+ wil->monitor_flags = params->flags;
break;
default:
return -EOPNOTSUPP;
wiphy->hw_version = priv->board_type;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n",
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
wl->hw_registred = false;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
else
SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
/* Get and initialize struct b43legacy_wl */
wl = hw_to_b43legacy_wl(hw);
memset(wl, 0, sizeof(*wl));
*
* @wiphy: wiphy device of new interface.
* @name: name of the new interface.
- * @flags: not used.
* @params: contains mac address for AP device.
*/
static
struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
- u32 *flags, struct vif_params *params)
+ struct vif_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params)
{
struct wireless_dev *wdev;
case NL80211_IFTYPE_MESH_POINT:
return ERR_PTR(-EOPNOTSUPP);
case NL80211_IFTYPE_AP:
- wdev = brcmf_ap_add_vif(wiphy, name, flags, params);
+ wdev = brcmf_ap_add_vif(wiphy, name, params);
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
- wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params);
+ wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, params);
break;
case NL80211_IFTYPE_UNSPECIFIED:
default:
static s32
brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
if (err)
goto default_conf_out;
err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
- NULL, NULL);
+ NULL);
if (err)
goto default_conf_out;
* @name: name of the new interface.
* @name_assign_type: origin of the interface name
* @type: nl80211 interface type.
- * @flags: not used.
* @params: contains mac address for P2P device.
*/
struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
unsigned char name_assign_type,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
unsigned char name_assign_type,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params);
int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
* hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
*/
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
hw->rate_control_algorithm = "minstrel_ht";
hw->sta_data_size = 0;
il_leds_init(il);
+ wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(il->hw);
if (ret) {
IL_ERR("Failed to register hw (error %d)\n", ret);
rate_mask = sta->supp_rates[sband->band];
/* get user max rate if set */
- max_rate_idx = txrc->max_rate_idx;
+ max_rate_idx = fls(txrc->rate_idx_mask) - 1;
if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1)
max_rate_idx += IL_FIRST_OFDM_RATE;
if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
il_leds_init(il);
+ wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(il->hw);
if (ret) {
IL_ERR("Failed to register hw (error %d)\n", ret);
/* Get max rate if user set max rate */
if (lq_sta) {
- lq_sta->max_rate_idx = txrc->max_rate_idx;
+ lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
if (sband->band == NL80211_BAND_5GHZ &&
lq_sta->max_rate_idx != -1)
lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
iwl_leds_init(priv);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
/* Get max rate if user set max rate */
if (lq_sta) {
- lq_sta->max_rate_idx = txrc->max_rate_idx;
+ lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
if ((sband->band == NL80211_BAND_5GHZ) &&
(lq_sta->max_rate_idx != -1))
lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
}
static int orinoco_change_vif(struct wiphy *wiphy, struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct orinoco_private *priv = wiphy_priv(wiphy);
CHAN5G(5785), /* Channel 157 */
CHAN5G(5805), /* Channel 161 */
CHAN5G(5825), /* Channel 165 */
+ CHAN5G(5845), /* Channel 169 */
};
static const struct ieee80211_rate hwsim_rates[] = {
struct ieee80211_vif *hw_scan_vif;
int scan_chan_idx;
u8 scan_addr[ETH_ALEN];
+ struct {
+ struct ieee80211_channel *channel;
+ unsigned long next_start, start, end;
+ } survey_data[ARRAY_SIZE(hwsim_channels_2ghz) +
+ ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_channel *channel;
u64 beacon_int /* beacon interval in us */;
/* wmediumd portid responsible for netgroup of this radio */
u32 wmediumd;
- int power_level;
-
/* difference between this hw's clock and the real clock, in usecs */
s64 tsf_offset;
s64 bcn_delta;
if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
rx_status.flag |= RX_FLAG_SHORT_GI;
/* TODO: simulate real signal strength (and optional packet loss) */
- rx_status.signal = data->power_level - 50;
+ rx_status.signal = -50;
+ if (info->control.vif)
+ rx_status.signal += info->control.vif->bss_conf.txpower;
if (data->ps != PS_DISABLED)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
[IEEE80211_SMPS_STATIC] = "static",
[IEEE80211_SMPS_DYNAMIC] = "dynamic",
};
+ int idx;
if (conf->chandef.chan)
wiphy_debug(hw->wiphy,
data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
- data->channel = conf->chandef.chan;
+ WARN_ON(conf->chandef.chan && data->use_chanctx);
+
+ mutex_lock(&data->mutex);
+ if (data->scanning && conf->chandef.chan) {
+ for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) {
+ if (data->survey_data[idx].channel == data->channel) {
+ data->survey_data[idx].start =
+ data->survey_data[idx].next_start;
+ data->survey_data[idx].end = jiffies;
+ break;
+ }
+ }
- WARN_ON(data->channel && data->use_chanctx);
+ data->channel = conf->chandef.chan;
+
+ for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) {
+ if (data->survey_data[idx].channel &&
+ data->survey_data[idx].channel != data->channel)
+ continue;
+ data->survey_data[idx].channel = data->channel;
+ data->survey_data[idx].next_start = jiffies;
+ break;
+ }
+ } else {
+ data->channel = conf->chandef.chan;
+ }
+ mutex_unlock(&data->mutex);
- data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
tasklet_hrtimer_cancel(&data->beacon_timer);
else if (!hrtimer_is_queued(&data->beacon_timer.timer)) {
return 0;
}
-static int mac80211_hwsim_get_survey(
- struct ieee80211_hw *hw, int idx,
- struct survey_info *survey)
+static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
{
- struct ieee80211_conf *conf = &hw->conf;
-
- wiphy_debug(hw->wiphy, "%s (idx=%d)\n", __func__, idx);
+ struct mac80211_hwsim_data *hwsim = hw->priv;
- if (idx != 0)
+ if (idx < 0 || idx >= ARRAY_SIZE(hwsim->survey_data))
return -ENOENT;
- /* Current channel */
- survey->channel = conf->chandef.chan;
+ mutex_lock(&hwsim->mutex);
+ survey->channel = hwsim->survey_data[idx].channel;
+ if (!survey->channel) {
+ mutex_unlock(&hwsim->mutex);
+ return -ENOENT;
+ }
/*
- * Magically conjured noise level --- this is only ok for simulated hardware.
+ * Magically conjured dummy values --- this is only ok for simulated hardware.
*
- * A real driver which cannot determine the real channel noise MUST NOT
- * report any noise, especially not a magically conjured one :-)
+ * A real driver which cannot determine real values noise MUST NOT
+ * report any, especially not a magically conjured ones :-)
*/
- survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
survey->noise = -92;
+ survey->time =
+ jiffies_to_msecs(hwsim->survey_data[idx].end -
+ hwsim->survey_data[idx].start);
+ /* report 12.5% of channel time is used */
+ survey->time_busy = survey->time/8;
+ mutex_unlock(&hwsim->mutex);
return 0;
}
}
ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan,
msecs_to_jiffies(dwell));
+ hwsim->survey_data[hwsim->scan_chan_idx].channel = hwsim->tmp_chan;
+ hwsim->survey_data[hwsim->scan_chan_idx].start = jiffies;
+ hwsim->survey_data[hwsim->scan_chan_idx].end =
+ jiffies + msecs_to_jiffies(dwell);
hwsim->scan_chan_idx++;
mutex_unlock(&hwsim->mutex);
}
hw_req->req.mac_addr_mask);
else
memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN);
+ memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data));
mutex_unlock(&hwsim->mutex);
wiphy_debug(hw->wiphy, "hwsim hw_scan request\n");
memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN);
hwsim->scanning = true;
+ memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data));
out:
mutex_unlock(&hwsim->mutex);
"d_tx_failed",
"d_ps_mode",
"d_group",
- "d_tx_power",
};
#define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats)
data[i++] = ar->tx_failed;
data[i++] = ar->ps;
data[i++] = ar->group;
- data[i++] = ar->power_level;
WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN);
}
goto failed;
}
+ /* ieee80211_alloc_hw_nm may have used a default name */
+ param->hwname = wiphy_name(hw->wiphy);
+
if (info)
net = genl_info_net(info);
else
if (param->no_vif)
ieee80211_hw_set(hw, NO_AUTO_VIF);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
err = ieee80211_register_hw(hw);
if (err < 0) {
printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
* @HWSIM_CMD_REGISTER: request to register and received all broadcasted
* frames by any mac80211_hwsim radio device.
* @HWSIM_CMD_FRAME: send/receive a broadcasted frame from/to kernel/user
- * space, uses:
+ * space, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER,
* %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE,
* %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE, %HWSIM_ATTR_FREQ (optional)
* @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to
- * kernel, uses:
+ * kernel, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
* %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
* @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters,
*/
static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct lbs_private *priv = wiphy_priv(wiphy);
BIT(NL80211_IFTYPE_ADHOC);
skb_queue_head_init(&priv->bc_ps_buf);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
SET_IEEE80211_DEV(hw, dmdev);
INIT_WORK(&priv->cmd_work, lbtf_cmd_work);
static int
mwifiex_change_vif_to_p2p(struct net_device *dev,
enum nl80211_iftype curr_iftype,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct mwifiex_private *priv;
static int
mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
enum nl80211_iftype curr_iftype,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct mwifiex_private *priv;
static int
mwifiex_change_vif_to_ap(struct net_device *dev,
enum nl80211_iftype curr_iftype,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct mwifiex_private *priv;
static int
mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
- type, flags, params);
+ type, params);
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
- flags, params);
+ params);
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as IBSS\n", dev->name);
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
- type, flags, params);
+ type, params);
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
- flags, params);
+ params);
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as STA\n", dev->name);
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
- type, flags,
- params);
+ type, params);
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
- type, flags, params);
+ type, params);
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as AP\n", dev->name);
if (mwifiex_cfg80211_deinit_p2p(priv))
return -EFAULT;
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
- type, flags,
- params);
+ type, params);
break;
case NL80211_IFTYPE_AP:
if (mwifiex_cfg80211_deinit_p2p(priv))
return -EFAULT;
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
- flags, params);
+ params);
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as P2P\n", dev->name);
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
rtnl_lock();
/* Create station interface by default */
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
- NL80211_IFTYPE_STATION, NULL, NULL);
+ NL80211_IFTYPE_STATION, NULL);
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create default STA interface\n");
if (driver_mode & MWIFIEX_DRIVER_MODE_UAP) {
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM,
- NL80211_IFTYPE_AP, NULL, NULL);
+ NL80211_IFTYPE_AP, NULL);
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create AP interface\n");
if (driver_mode & MWIFIEX_DRIVER_MODE_P2P) {
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d", NET_NAME_ENUM,
- NL80211_IFTYPE_P2P_CLIENT, NULL,
- NULL);
+ NL80211_IFTYPE_P2P_CLIENT, NULL);
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create p2p client interface\n");
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params);
int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev);
chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT;
break;
}
- vht_oper->center_freq_seg1_idx =
+ vht_oper->center_freq_seg0_idx =
mwifiex_get_center_freq_index(priv, BAND_AAC,
bss_desc->channel,
chan_bw);
if (priv->sta_macids_supported || priv->device_info->fw_image_sta)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
rc = ieee80211_register_hw(hw);
if (rc) {
wiphy_err(hw->wiphy, "Cannot register device\n");
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = mt76_init_sband_2g(dev);
if (ret)
return ret;
rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(rt2x00dev->hw->wiphy,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
/*
* Initialize ieee80211 structure.
*/
else
ieee80211_hw_set(dev, SIGNAL_UNSPEC);
+ wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
rtl8180_eeprom_read(priv);
switch (priv->rf_type) {
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) ;
+ wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
printk(KERN_INFO "rtl8187: inconsistency between id with OEM"
" info!\n");
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
dev_err(&udev->dev, "%s: Failed to register: %i\n",
*/
static int rndis_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params);
static int rndis_scan(struct wiphy *wiphy,
*/
static int rndis_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
wiphy->reg_notifier = rsi_reg_notify;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
status = ieee80211_register_hw(hw);
if (status)
return status;
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
hw->max_signal = 100;
hw->queues = 1;
hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
if (rc < 0)
goto out_unlock;
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
+
+ vfree(buf);
+ return rc;
+
out_unlock:
nvdimm_bus_unlock(&nvdimm_bus->dev);
out:
}
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
- if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) {
+ /*
+ * FIXME: nsio_rw_bytes() may be called from atomic
+ * context in the btt case and nvdimm_clear_poison()
+ * takes a sleeping lock. Until the locking can be
+ * reworked this capability requires that the namespace
+ * is not claimed by btt.
+ */
+ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
+ && (!ndns->claim || !is_nd_btt(ndns->claim))) {
long cleared;
cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
int alias_dpa_busy(struct device *dev, void *data)
{
- resource_size_t map_end, blk_start, new, busy;
+ resource_size_t map_end, blk_start, new;
struct blk_alloc_info *info = data;
struct nd_mapping *nd_mapping;
struct nd_region *nd_region;
retry:
/*
* Find the free dpa from the end of the last pmem allocation to
- * the end of the interleave-set mapping that is not already
- * covered by a blk allocation.
+ * the end of the interleave-set mapping.
*/
- busy = 0;
for_each_dpa_resource(ndd, res) {
+ if (strncmp(res->name, "pmem", 4) != 0)
+ continue;
if ((res->start >= blk_start && res->start < map_end)
|| (res->end >= blk_start
&& res->end <= map_end)) {
- if (strncmp(res->name, "pmem", 4) == 0) {
- new = max(blk_start, min(map_end + 1,
- res->end + 1));
- if (new != blk_start) {
- blk_start = new;
- goto retry;
- }
- } else
- busy += min(map_end, res->end)
- - max(nd_mapping->start, res->start) + 1;
- } else if (nd_mapping->start > res->start
- && map_end < res->end) {
- /* total eclipse of the PMEM region mapping */
- busy += nd_mapping->size;
- break;
+ new = max(blk_start, min(map_end + 1, res->end + 1));
+ if (new != blk_start) {
+ blk_start = new;
+ goto retry;
+ }
}
}
return 1;
}
- info->available -= blk_start - nd_mapping->start + busy;
+ info->available -= blk_start - nd_mapping->start;
return 0;
}
-static int blk_dpa_busy(struct device *dev, void *data)
-{
- struct blk_alloc_info *info = data;
- struct nd_mapping *nd_mapping;
- struct nd_region *nd_region;
- resource_size_t map_end;
- int i;
-
- if (!is_nd_pmem(dev))
- return 0;
-
- nd_region = to_nd_region(dev);
- for (i = 0; i < nd_region->ndr_mappings; i++) {
- nd_mapping = &nd_region->mapping[i];
- if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
- break;
- }
-
- if (i >= nd_region->ndr_mappings)
- return 0;
-
- map_end = nd_mapping->start + nd_mapping->size - 1;
- if (info->res->start >= nd_mapping->start
- && info->res->start < map_end) {
- if (info->res->end <= map_end) {
- info->busy = 0;
- return 1;
- } else {
- info->busy -= info->res->end - map_end;
- return 0;
- }
- } else if (info->res->end >= nd_mapping->start
- && info->res->end <= map_end) {
- info->busy -= nd_mapping->start - info->res->start;
- return 0;
- } else {
- info->busy -= nd_mapping->size;
- return 0;
- }
-}
-
/**
* nd_blk_available_dpa - account the unused dpa of BLK region
* @nd_mapping: container of dpa-resource-root + labels
for_each_dpa_resource(ndd, res) {
if (strncmp(res->name, "blk", 3) != 0)
continue;
-
- info.res = res;
- info.busy = resource_size(res);
- device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
- info.available -= info.busy;
+ info.available -= resource_size(res);
}
return info.available;
}
ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+ min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)
}
ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+ min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)
}
ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+ min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)
}
EXPORT_SYMBOL_GPL(reset_control_status);
-static struct reset_control *__reset_control_get(
+static struct reset_control *__reset_control_get_internal(
struct reset_controller_dev *rcdev,
unsigned int index, bool shared)
{
return rstc;
}
-static void __reset_control_put(struct reset_control *rstc)
+static void __reset_control_put_internal(struct reset_control *rstc)
{
lockdep_assert_held(&reset_list_mutex);
}
/* reset_list_mutex also protects the rcdev's reset_control list */
- rstc = __reset_control_get(rcdev, rstc_id, shared);
+ rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(__of_reset_control_get);
+struct reset_control *__reset_control_get(struct device *dev, const char *id,
+ int index, bool shared, bool optional)
+{
+ if (dev->of_node)
+ return __of_reset_control_get(dev->of_node, id, index, shared,
+ optional);
+
+ return optional ? NULL : ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(__reset_control_get);
+
/**
* reset_control_put - free the reset controller
* @rstc: reset controller
return;
mutex_lock(&reset_list_mutex);
- __reset_control_put(rstc);
+ __reset_control_put_internal(rstc);
mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_control_put);
if (!ptr)
return ERR_PTR(-ENOMEM);
- rstc = __of_reset_control_get(dev ? dev->of_node : NULL,
- id, index, shared, optional);
+ rstc = __reset_control_get(dev, id, index, shared, optional);
if (!IS_ERR(rstc)) {
*ptr = rstc;
devres_add(dev, ptr);
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
-#define aac_adapter_check_health(dev) \
- (dev)->a_ops.adapter_check_health(dev)
-
#define aac_adapter_restart(dev, bled, reset_type) \
((dev)->a_ops.adapter_restart(dev, bled, reset_type))
return capacity;
}
+static inline int aac_adapter_check_health(struct aac_dev *dev)
+{
+ if (unlikely(pci_channel_offline(dev->pdev)))
+ return -1;
+
+ return (dev)->a_ops.adapter_check_health(dev);
+}
+
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
spin_unlock_irqrestore(&aac->fib_lock, flagv);
if (BlinkLED < 0) {
- printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
+ printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
+ aac->name, BlinkLED);
goto out;
}
break;
case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
case IPR_IOASA_IR_DUAL_IOA_DISABLED:
- scsi_cmd->result |= (DID_PASSTHROUGH << 16);
+ /*
+ * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
+ * so SCSI mid-layer and upper layers handle it accordingly.
+ */
+ if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
+ scsi_cmd->result |= (DID_PASSTHROUGH << 16);
break;
case IPR_IOASC_BUS_WAS_RESET:
case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
qedf_set_vlan_id(qedf, vid);
/* Inform waiter that it's ok to call fcoe_ctlr_link up() */
- complete(&qedf->fipvlan_compl);
+ if (!completion_done(&qedf->fipvlan_compl))
+ complete(&qedf->fipvlan_compl);
}
}
atomic_set(&qedf->num_offloads, 0);
qedf->stop_io_on_error = false;
pci_set_drvdata(pdev, qedf);
+ init_completion(&qedf->fipvlan_compl);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- return ((RD_REG_DWORD(®->host_status)) == ISP_REG_DISCONNECT);
+ if (IS_P3P_TYPE(ha))
+ return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT);
+ else
+ return ((RD_REG_DWORD(®->host_status)) ==
+ ISP_REG_DISCONNECT);
}
/**************************************************************************
#define READ_CAPACITY_RETRIES_ON_RESET 10
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+ return false;
+
+ return true;
+}
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
return -ENODEV;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
return sector_size;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
} else
- rw_max = BLK_DEF_MAX_SECTORS;
+ rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
+ (sector_t)BLK_DEF_MAX_SECTORS);
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ unsigned int ms_len = 128;
int rc, n;
static const char *loadmech[] =
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc)) {
+ if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
}
static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
- enum nl80211_iftype type, u32 *flags, struct vif_params *params)
+ enum nl80211_iftype type, struct vif_params *params)
{
struct wilc_priv *priv;
struct wilc_vif *vif;
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params)
{
struct wilc_vif *vif;
/* The interface functions, called by the cfg80211 layer */
static int prism2_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct wlandevice *wlandev = dev->ml_priv;
tty_ldisc_debug(tty, "%p: closed\n", ld);
}
+/**
+ * tty_ldisc_restore - helper for tty ldisc change
+ * @tty: tty to recover
+ * @old: previous ldisc
+ *
+ * Restore the previous line discipline or N_TTY when a line discipline
+ * change fails due to an open error
+ */
+
+static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
+{
+ struct tty_ldisc *new_ldisc;
+ int r;
+
+ /* There is an outstanding reference here so this is safe */
+ old = tty_ldisc_get(tty, old->ops->num);
+ WARN_ON(IS_ERR(old));
+ tty->ldisc = old;
+ tty_set_termios_ldisc(tty, old->ops->num);
+ if (tty_ldisc_open(tty, old) < 0) {
+ tty_ldisc_put(old);
+ /* This driver is always present */
+ new_ldisc = tty_ldisc_get(tty, N_TTY);
+ if (IS_ERR(new_ldisc))
+ panic("n_tty: get");
+ tty->ldisc = new_ldisc;
+ tty_set_termios_ldisc(tty, N_TTY);
+ r = tty_ldisc_open(tty, new_ldisc);
+ if (r < 0)
+ panic("Couldn't open N_TTY ldisc for "
+ "%s --- error %d.",
+ tty_name(tty), r);
+ }
+}
+
/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
int tty_set_ldisc(struct tty_struct *tty, int disc)
{
- int retval, old_disc;
+ int retval;
+ struct tty_ldisc *old_ldisc, *new_ldisc;
+
+ new_ldisc = tty_ldisc_get(tty, disc);
+ if (IS_ERR(new_ldisc))
+ return PTR_ERR(new_ldisc);
tty_lock(tty);
retval = tty_ldisc_lock(tty, 5 * HZ);
}
/* Check the no-op case */
- old_disc = tty->ldisc->ops->num;
- if (old_disc == disc)
+ if (tty->ldisc->ops->num == disc)
goto out;
if (test_bit(TTY_HUPPED, &tty->flags)) {
goto out;
}
- retval = tty_ldisc_reinit(tty, disc);
+ old_ldisc = tty->ldisc;
+
+ /* Shutdown the old discipline. */
+ tty_ldisc_close(tty, old_ldisc);
+
+ /* Now set up the new line discipline. */
+ tty->ldisc = new_ldisc;
+ tty_set_termios_ldisc(tty, disc);
+
+ retval = tty_ldisc_open(tty, new_ldisc);
if (retval < 0) {
/* Back to the old one or N_TTY if we can't */
- if (tty_ldisc_reinit(tty, old_disc) < 0) {
- pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
- if (tty_ldisc_reinit(tty, N_TTY) < 0) {
- /* At this point we have tty->ldisc == NULL. */
- pr_err("tty: reinitializing N_TTY failed\n");
- }
- }
+ tty_ldisc_put(new_ldisc);
+ tty_ldisc_restore(tty, old_ldisc);
}
- if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
- tty->ops->set_ldisc) {
+ if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
down_read(&tty->termios_rwsem);
tty->ops->set_ldisc(tty);
up_read(&tty->termios_rwsem);
}
+ /* At this point we hold a reference to the new ldisc and a
+ reference to the old ldisc, or we hold two references to
+ the old ldisc (if it was restored as part of error cleanup
+ above). In either case, releasing a single reference from
+ the old ldisc is correct. */
+ new_ldisc = old_ldisc;
out:
tty_ldisc_unlock(tty);
already running */
tty_buffer_restart_work(tty->port);
err:
+ tty_ldisc_put(new_ldisc); /* drop the extra reference */
tty_unlock(tty);
return retval;
}
int retval;
ld = tty_ldisc_get(tty, disc);
- if (IS_ERR(ld))
+ if (IS_ERR(ld)) {
+ BUG_ON(disc == N_TTY);
return PTR_ERR(ld);
+ }
if (tty->ldisc) {
tty_ldisc_close(tty, tty->ldisc);
tty_set_termios_ldisc(tty, disc);
retval = tty_ldisc_open(tty, tty->ldisc);
if (retval) {
- tty_ldisc_put(tty->ldisc);
- tty->ldisc = NULL;
+ if (!WARN_ON(disc == N_TTY)) {
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
+ }
}
return retval;
}
int retval = 0;
const char *s = nd->name->name;
+ if (!*s)
+ flags &= ~LOOKUP_RCU;
+
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0;
continue;
/*
* Skip ops whose filesystem we don't know about unless
- * it is being mounted.
+ * it is being mounted or unmounted. It is possible for
+ * a filesystem we don't know about to be unmounted if
+ * it fails to mount in the kernel after userspace has
+ * been sent the mount request.
*/
/* XXX: is there a better way to detect this? */
} else if (ret == -1 &&
!(op->upcall.type ==
ORANGEFS_VFS_OP_FS_MOUNT ||
op->upcall.type ==
- ORANGEFS_VFS_OP_GETATTR)) {
+ ORANGEFS_VFS_OP_GETATTR ||
+ op->upcall.type ==
+ ORANGEFS_VFS_OP_FS_UMOUNT)) {
gossip_debug(GOSSIP_DEV_DEBUG,
"orangefs: skipping op tag %llu %s\n",
llu(op->tag), get_opname_string(op));
char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
struct super_block *sb;
int mount_pending;
+ int no_list;
struct list_head list;
};
if (ret) {
d = ERR_PTR(ret);
- goto free_op;
+ goto free_sb_and_op;
}
/*
spin_unlock(&orangefs_superblocks_lock);
op_release(new_op);
+ /* Must be removed from the list now. */
+ ORANGEFS_SB(sb)->no_list = 0;
+
if (orangefs_userspace_version >= 20906) {
new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
if (!new_op)
return dget(sb->s_root);
+free_sb_and_op:
+ /* Will call orangefs_kill_sb with sb not in list. */
+ ORANGEFS_SB(sb)->no_list = 1;
+ deactivate_locked_super(sb);
free_op:
gossip_err("orangefs_mount: mount request failed with %d\n", ret);
if (ret == -EINVAL) {
*/
orangefs_unmount_sb(sb);
- /* remove the sb from our list of orangefs specific sb's */
-
- spin_lock(&orangefs_superblocks_lock);
- __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */
- ORANGEFS_SB(sb)->list.prev = NULL;
- spin_unlock(&orangefs_superblocks_lock);
+ if (!ORANGEFS_SB(sb)->no_list) {
+ /* remove the sb from our list of orangefs specific sb's */
+ spin_lock(&orangefs_superblocks_lock);
+ /* not list_del_init */
+ __list_del_entry(&ORANGEFS_SB(sb)->list);
+ ORANGEFS_SB(sb)->list.prev = NULL;
+ spin_unlock(&orangefs_superblocks_lock);
+ }
/*
* make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
}
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+ return req->base.flags;
+}
+
static inline struct crypto_ahash *crypto_spawn_ahash(
struct crypto_ahash_spawn *spawn)
{
return true;
}
-static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
- struct bio *next)
+static inline bool bio_will_gap(struct request_queue *q,
+ struct request *prev_rq,
+ struct bio *prev,
+ struct bio *next)
{
if (bio_has_data(prev) && queue_virt_boundary(q)) {
struct bio_vec pb, nb;
+ /*
+ * don't merge if the 1st bio starts with non-zero
+ * offset, otherwise it is quite difficult to respect
+ * sg gap limit. We work hard to merge a huge number of small
+ * single bios in case of mkfs.
+ */
+ if (prev_rq)
+ bio_get_first_bvec(prev_rq->bio, &pb);
+ else
+ bio_get_first_bvec(prev, &pb);
+ if (pb.bv_offset)
+ return true;
+
+ /*
+ * We don't need to worry about the situation that the
+ * merged segment ends in unaligned virt boundary:
+ *
+ * - if 'pb' ends aligned, the merged segment ends aligned
+ * - if 'pb' ends unaligned, the next bio must include
+ * one single bvec of 'nb', otherwise the 'nb' can't
+ * merge with 'pb'
+ */
bio_get_last_bvec(prev, &pb);
bio_get_first_bvec(next, &nb);
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
{
- return bio_will_gap(req->q, req->biotail, bio);
+ return bio_will_gap(req->q, req, req->biotail, bio);
}
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
{
- return bio_will_gap(req->q, bio, req->bio);
+ return bio_will_gap(req->q, NULL, bio, req->bio);
}
int kblockd_schedule_work(struct work_struct *work);
#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
+#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5
+#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0
/* for stbc_param */
#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
* This structure is the "VHT operation element" as
* described in 802.11ac D3.0 8.4.2.161
* @chan_width: Operating channel width
+ * @center_freq_seg0_idx: center freq segment 0 index
* @center_freq_seg1_idx: center freq segment 1 index
- * @center_freq_seg2_idx: center freq segment 2 index
* @basic_mcs_set: VHT Basic MCS rate set
*/
struct ieee80211_vht_operation {
u8 chan_width;
+ u8 center_freq_seg0_idx;
u8 center_freq_seg1_idx;
- u8 center_freq_seg2_idx;
__le16 basic_mcs_set;
} __packed;
WLAN_STATUS_REJECT_DSE_BAND = 96,
WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
+ /* 802.11ai */
+ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
+ WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
};
#define FILS_NONCE_LEN 16
#define FILS_MAX_KEK_LEN 64
+#define FILS_ERP_MAX_USERNAME_LEN 16
+#define FILS_ERP_MAX_REALM_LEN 253
+#define FILS_ERP_MAX_RRK_LEN 64
+
+#define PMK_MAX_LEN 48
+
/* Public action codes */
enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
/* AKM suite selectors */
#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
+#define WLAN_AKM_SUITE_FT_PSK SUITE(0x000FAC, 4)
#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14)
+#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15)
+#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16)
+#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17)
#define WLAN_MAX_KEY_LEN 32
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
+extern unsigned int netdev_budget_usecs;
/* Called by rtnetlink.c:rtnl_unlock() */
void netdev_run_todo(void);
return dev->priv_flags & IFF_OPENVSWITCH;
}
+static inline bool netif_is_ovs_port(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_OVS_DATAPATH;
+}
+
static inline bool netif_is_team_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM;
struct reset_control *__of_reset_control_get(struct device_node *node,
const char *id, int index, bool shared,
bool optional);
+struct reset_control *__reset_control_get(struct device *dev, const char *id,
+ int index, bool shared,
+ bool optional);
void reset_control_put(struct reset_control *rstc);
struct reset_control *__devm_reset_control_get(struct device *dev,
const char *id, int index, bool shared,
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline struct reset_control *__reset_control_get(
+ struct device *dev, const char *id,
+ int index, bool shared, bool optional)
+{
+ return optional ? NULL : ERR_PTR(-ENOTSUPP);
+}
+
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
int index, bool shared, bool optional)
#ifndef CONFIG_RESET_CONTROLLER
WARN_ON(1);
#endif
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
- false);
+ return __reset_control_get(dev, id, 0, false, false);
}
/**
static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
- false);
+ return __reset_control_get(dev, id, 0, true, false);
}
static inline struct reset_control *reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
- true);
+ return __reset_control_get(dev, id, 0, false, true);
}
static inline struct reset_control *reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
- true);
+ return __reset_control_get(dev, id, 0, true, true);
}
/**
/**
* struct vif_params - describes virtual interface parameters
+ * @flags: monitor interface flags, unchanged if 0, otherwise
+ * %MONITOR_FLAG_CHANGED will be set
* @use_4addr: use 4-address frames
* @macaddr: address to use for this virtual interface.
* If this parameter is set to zero address the driver may
* This feature is only fully supported by drivers that enable the
* %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating
** only p2p devices with specified MAC.
- * @vht_mumimo_groups: MU-MIMO groupID. used for monitoring only
- * packets belonging to that MU-MIMO groupID.
+ * @vht_mumimo_groups: MU-MIMO groupID, used for monitoring MU-MIMO packets
+ * belonging to that MU-MIMO groupID; %NULL if not changed
+ * @vht_mumimo_follow_addr: MU-MIMO follow address, used for monitoring
+ * MU-MIMO packets going to the specified station; %NULL if not changed
*/
struct vif_params {
+ u32 flags;
int use_4addr;
u8 macaddr[ETH_ALEN];
- u8 vht_mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN];
+ const u8 *vht_mumimo_groups;
+ const u8 *vht_mumimo_follow_addr;
};
/**
* Monitor interface configuration flags. Note that these must be the bits
* according to the nl80211 flags.
*
+ * @MONITOR_FLAG_CHANGED: set if the flags were changed
* @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS
* @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP
* @MONITOR_FLAG_CONTROL: pass control frames
* @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
*/
enum monitor_flags {
+ MONITOR_FLAG_CHANGED = 1<<__NL80211_MNTR_FLAG_INVALID,
MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL,
MONITOR_FLAG_PLCPFAIL = 1<<NL80211_MNTR_FLAG_PLCPFAIL,
MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL,
/**
* struct cfg80211_sched_scan_request - scheduled scan request description
*
+ * @reqid: identifies this request.
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
* @n_ssids: number of SSIDs
* @n_channels: total number of channels to scan
* comparisions.
*/
struct cfg80211_sched_scan_request {
+ u64 reqid;
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
* the BSSID of the current association, i.e., to the value that is
* included in the Current AP address field of the Reassociation Request
* frame.
+ * @fils_erp_username: EAP re-authentication protocol (ERP) username part of the
+ * NAI or %NULL if not specified. This is used to construct FILS wrapped
+ * data IE.
+ * @fils_erp_username_len: Length of @fils_erp_username in octets.
+ * @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or
+ * %NULL if not specified. This specifies the domain name of ER server and
+ * is used to construct FILS wrapped data IE.
+ * @fils_erp_realm_len: Length of @fils_erp_realm in octets.
+ * @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP
+ * messages. This is also used to construct FILS wrapped data IE.
+ * @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional
+ * keys in FILS or %NULL if not specified.
+ * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
bool pbss;
struct cfg80211_bss_selection bss_select;
const u8 *prev_bssid;
+ const u8 *fils_erp_username;
+ size_t fils_erp_username_len;
+ const u8 *fils_erp_realm;
+ size_t fils_erp_realm_len;
+ u16 fils_erp_next_seq_num;
+ const u8 *fils_erp_rrk;
+ size_t fils_erp_rrk_len;
};
/**
* This structure is passed to the set/del_pmksa() method for PMKSA
* caching.
*
- * @bssid: The AP's BSSID.
- * @pmkid: The PMK material itself.
+ * @bssid: The AP's BSSID (may be %NULL).
+ * @pmkid: The identifier to refer a PMKSA.
+ * @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key
+ * derivation by a FILS STA. Otherwise, %NULL.
+ * @pmk_len: Length of the @pmk. The length of @pmk can differ depending on
+ * the hash algorithm used to generate this.
+ * @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS
+ * cache identifier (may be %NULL).
+ * @ssid_len: Length of the @ssid in octets.
+ * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
+ * scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
+ * %NULL).
*/
struct cfg80211_pmksa {
const u8 *bssid;
const u8 *pmkid;
+ const u8 *pmk;
+ size_t pmk_len;
+ const u8 *ssid;
+ size_t ssid_len;
+ const u8 *cache_id;
};
/**
* the current level is above/below the configured threshold; this may
* need some care when the configuration is changed (without first being
* disabled.)
+ * @set_cqm_rssi_range_config: Configure two RSSI thresholds in the
+ * connection quality monitor. An event is to be sent only when the
+ * signal level is found to be outside the two values. The driver should
+ * set %NL80211_EXT_FEATURE_CQM_RSSI_LIST if this method is implemented.
+ * If it is provided then there's no point providing @set_cqm_rssi_config.
* @set_cqm_txe_config: Configure connection quality monitor TX error
* thresholds.
* @sched_scan_start: Tell the driver to start a scheduled scan.
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params);
int (*del_virtual_intf)(struct wiphy *wiphy,
struct wireless_dev *wdev);
int (*change_virtual_intf)(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params);
int (*add_key)(struct wiphy *wiphy, struct net_device *netdev,
struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst);
+ int (*set_cqm_rssi_range_config)(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_low, s32 rssi_high);
+
int (*set_cqm_txe_config)(struct wiphy *wiphy,
struct net_device *dev,
u32 rate, u32 pkts, u32 intvl);
struct cfg80211_conn;
struct cfg80211_internal_bss;
struct cfg80211_cached_keys;
+struct cfg80211_cqm_config;
/**
* struct wireless_dev - wireless device state
* @event_list: (private) list for internal event processing
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
+ * @cqm_config: (private) nl80211 RSSI monitor state
*/
struct wireless_dev {
struct wiphy *wiphy;
bool prev_bssid_valid;
} wext;
#endif
+
+ struct cfg80211_cqm_config *cqm_config;
};
static inline u8 *wdev_address(struct wireless_dev *wdev)
gfp);
}
+/**
+ * cfg80211_get_bss - get a BSS reference
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @channel: the channel to search on (or %NULL)
+ * @bssid: the desired BSSID (or %NULL)
+ * @ssid: the desired SSID (or %NULL)
+ * @ssid_len: length of the SSID (or 0)
+ * @bss_type: type of BSS, see &enum ieee80211_bss_type
+ * @privacy: privacy filter, see &enum ieee80211_privacy
+ */
struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
const u8 *ssid, size_t ssid_len,
enum ieee80211_bss_type bss_type,
- enum ieee80211_privacy);
+ enum ieee80211_privacy privacy);
static inline struct cfg80211_bss *
cfg80211_get_ibss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
#define CFG80211_TESTMODE_DUMP(cmd)
#endif
+/**
+ * struct cfg80211_connect_resp_params - Connection response params
+ * @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures. If this call is used to report a
+ * failure due to a timeout (e.g., not receiving an Authentication frame
+ * from the AP) instead of an explicit rejection by the AP, -1 is used to
+ * indicate that this is a failure, but without a status code.
+ * @timeout_reason is used to report the reason for the timeout in that
+ * case.
+ * @bssid: The BSSID of the AP (may be %NULL)
+ * @bss: Entry of bss to which STA got connected to, can be obtained through
+ * cfg80211_get_bss() (may be %NULL). Only one parameter among @bssid and
+ * @bss needs to be specified.
+ * @req_ie: Association request IEs (may be %NULL)
+ * @req_ie_len: Association request IEs length
+ * @resp_ie: Association response IEs (may be %NULL)
+ * @resp_ie_len: Association response IEs length
+ * @fils_kek: KEK derived from a successful FILS connection (may be %NULL)
+ * @fils_kek_len: Length of @fils_kek in octets
+ * @update_erp_next_seq_num: Boolean value to specify whether the value in
+ * @fils_erp_next_seq_num is valid.
+ * @fils_erp_next_seq_num: The next sequence number to use in ERP message in
+ * FILS Authentication. This value should be specified irrespective of the
+ * status for a FILS connection.
+ * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
+ * @pmk_len: Length of @pmk in octets
+ * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
+ * used for this FILS connection (may be %NULL).
+ * @timeout_reason: Reason for connection timeout. This is used when the
+ * connection fails due to a timeout instead of an explicit rejection from
+ * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ * not known. This value is used only if @status < 0 to indicate that the
+ * failure is due to a timeout and not due to explicit rejection by the AP.
+ * This value is ignored in other cases (@status >= 0).
+ */
+struct cfg80211_connect_resp_params {
+ int status;
+ const u8 *bssid;
+ struct cfg80211_bss *bss;
+ const u8 *req_ie;
+ size_t req_ie_len;
+ const u8 *resp_ie;
+ size_t resp_ie_len;
+ const u8 *fils_kek;
+ size_t fils_kek_len;
+ bool update_erp_next_seq_num;
+ u16 fils_erp_next_seq_num;
+ const u8 *pmk;
+ size_t pmk_len;
+ const u8 *pmkid;
+ enum nl80211_timeout_reason timeout_reason;
+};
+
+/**
+ * cfg80211_connect_done - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @params: connection response parameters
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss(), but takes a structure pointer for connection response
+ * parameters. Only one of the functions among cfg80211_connect_bss(),
+ * cfg80211_connect_result(), cfg80211_connect_timeout(),
+ * and cfg80211_connect_done() should be called.
+ */
+void cfg80211_connect_done(struct net_device *dev,
+ struct cfg80211_connect_resp_params *params,
+ gfp_t gfp);
+
/**
* cfg80211_connect_bss - notify cfg80211 of connection result
*
* It should be called by the underlying driver once execution of the connection
* request from connect() has been completed. This is similar to
* cfg80211_connect_result(), but with the option of identifying the exact bss
- * entry for the connection. Only one of these functions should be called.
+ * entry for the connection. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
-void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
- struct cfg80211_bss *bss, const u8 *req_ie,
- size_t req_ie_len, const u8 *resp_ie,
- size_t resp_ie_len, int status, gfp_t gfp,
- enum nl80211_timeout_reason timeout_reason);
+static inline void
+cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+ struct cfg80211_bss *bss, const u8 *req_ie,
+ size_t req_ie_len, const u8 *resp_ie,
+ size_t resp_ie_len, int status, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason)
+{
+ struct cfg80211_connect_resp_params params;
+
+ memset(¶ms, 0, sizeof(params));
+ params.status = status;
+ params.bssid = bssid;
+ params.bss = bss;
+ params.req_ie = req_ie;
+ params.req_ie_len = req_ie_len;
+ params.resp_ie = resp_ie;
+ params.resp_ie_len = resp_ie_len;
+ params.timeout_reason = timeout_reason;
+
+ cfg80211_connect_done(dev, ¶ms, gfp);
+}
/**
* cfg80211_connect_result - notify cfg80211 of connection result
* It should be called by the underlying driver once execution of the connection
* request from connect() has been completed. This is similar to
* cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
- * one of these functions should be called.
+ * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
static inline void
cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
* in a sequence where no explicit authentication/association rejection was
* received from the AP. This could happen, e.g., due to not being able to send
* out the Authentication or Association Request frame or timing out while
- * waiting for the response.
+ * waiting for the response. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
static inline void
cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
DSA_TAG_PROTO_BRCM,
DSA_TAG_PROTO_QCA,
DSA_TAG_PROTO_MTK,
+ DSA_TAG_PROTO_LAN9303,
DSA_TAG_LAST, /* MUST BE LAST */
};
__be16 o_flags;
__be32 i_key;
__be32 o_key;
+
+ __u32 fwmark;
};
/* IPv6 tunnel */
unsigned int prl_count; /* # of entries in PRL */
unsigned int ip_tnl_net_id;
struct gro_cells gro_cells;
+ __u32 fwmark;
bool collect_md;
bool ignore_df;
};
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p);
+ struct ip_tunnel_parm *p, __u32 fwmark);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p);
+ struct ip_tunnel_parm *p, __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
struct ip_tunnel_encap_ops {
* implies disabled. As with the cfg80211 callback, a change here should
* cause an event to be sent indicating where the current value is in
* relation to the newly configured threshold.
+ * @cqm_rssi_low: Connection quality monitor RSSI lower threshold, a zero value
+ * implies disabled. This is an alternative mechanism to the single
+ * threshold event and can't be enabled simultaneously with it.
+ * @cqm_rssi_high: Connection quality monitor RSSI upper threshold.
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
* @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
* may filter ARP queries targeted for other addresses than listed here.
u16 ht_operation_mode;
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
+ s32 cqm_rssi_low;
+ s32 cqm_rssi_high;
struct cfg80211_chan_def chandef;
struct ieee80211_mu_group_data mu_group;
__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
* RTS threshold
* @short_preamble: whether mac80211 will request short-preamble transmission
* if the selected rate supports it
- * @max_rate_idx: user-requested maximum (legacy) rate
- * (deprecated; this will be removed once drivers get updated to use
- * rate_idx_mask)
* @rate_idx_mask: user-requested (legacy) rate mask
* @rate_idx_mcs_mask: user-requested MCS rate mask (NULL if not in use)
* @bss: whether this frame is sent out in AP or IBSS mode
struct sk_buff *skb;
struct ieee80211_tx_rate reported_rate;
bool rts, short_preamble;
- u8 max_rate_idx;
u32 rate_idx_mask;
u8 *rate_idx_mcs_mask;
bool bss;
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- bool (*destroy)(struct tcf_proto*, bool);
+ void (*destroy)(struct tcf_proto*);
unsigned long (*get)(struct tcf_proto*, u32 handle);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
unsigned long *, bool);
- int (*delete)(struct tcf_proto*, unsigned long);
+ int (*delete)(struct tcf_proto*, unsigned long, bool*);
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
/* rtnetlink specific */
__u32 tc_classid;
__u32 data;
__u32 data_end;
+ __u32 napi_id;
};
struct bpf_tunnel_key {
IFLA_IPTUN_ENCAP_SPORT,
IFLA_IPTUN_ENCAP_DPORT,
IFLA_IPTUN_COLLECT_METADATA,
+ IFLA_IPTUN_FWMARK,
__IFLA_IPTUN_MAX,
};
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
IFLA_GRE_ENCAP_DPORT,
IFLA_GRE_COLLECT_METADATA,
IFLA_GRE_IGNORE_DF,
+ IFLA_GRE_FWMARK,
__IFLA_GRE_MAX,
};
IFLA_VTI_OKEY,
IFLA_VTI_LOCAL,
IFLA_VTI_REMOTE,
+ IFLA_VTI_FWMARK,
__IFLA_VTI_MAX,
};
* Multiple such rules can be created.
*/
+/**
+ * DOC: FILS shared key authentication offload
+ *
+ * FILS shared key authentication offload can be advertized by drivers by
+ * setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support
+ * FILS shared key authentication offload should be able to construct the
+ * authentication and association frames for FILS shared key authentication and
+ * eventually do a key derivation as per IEEE 802.11ai. The below additional
+ * parameters should be given to driver in %NL80211_CMD_CONNECT.
+ * %NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai
+ * %NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai
+ * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message
+ * %NL80211_ATTR_FILS_ERP_RRK - used to generate the rIK and rMSK
+ * rIK should be used to generate an authentication tag on the ERP message and
+ * rMSK should be used to derive a PMKSA.
+ * rIK, rMSK should be generated and keyname_nai, sequence number should be used
+ * as specified in IETF RFC 6696.
+ *
+ * When FILS shared key authentication is completed, driver needs to provide the
+ * below additional parameters to userspace.
+ * %NL80211_ATTR_FILS_KEK - used for key renewal
+ * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges
+ * %NL80211_ATTR_PMKID - used to identify the PMKSA used/generated
+ * %Nl80211_ATTR_PMK - used to update PMKSA cache in userspace
+ * The PMKSA can be maintained in userspace persistently so that it can be used
+ * later after reboots or wifi turn off/on also.
+ *
+ * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS
+ * capable AP supporting PMK caching. It specifies the scope within which the
+ * PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and
+ * %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based
+ * on FILS cache identifier. Additionally %NL80211_ATTR_PMK is used with
+ * %NL80211_SET_PMKSA to specify the PMK corresponding to a PMKSA for driver to
+ * use in a FILS shared key connection with PMKSA caching.
+ */
+
/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
* NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
*
- * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry, using %NL80211_ATTR_MAC
- * (for the BSSID) and %NL80211_ATTR_PMKID.
+ * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry using %NL80211_ATTR_MAC
+ * (for the BSSID), %NL80211_ATTR_PMKID, and optionally %NL80211_ATTR_PMK
+ * (PMK is used for PTKSA derivation in case of FILS shared key offload) or
+ * using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID,
+ * %NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS
+ * authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier
+ * advertized by a FILS capable AP identifying the scope of PMKSA in an
+ * ESS.
* @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
- * (for the BSSID) and %NL80211_ATTR_PMKID.
+ * (for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID,
+ * %NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS
+ * authentication.
* @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
*
* @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
* u32 attribute with an &enum nl80211_timeout_reason value. This is used,
* e.g., with %NL80211_CMD_CONNECT event.
*
+ * @NL80211_ATTR_FILS_ERP_USERNAME: EAP Re-authentication Protocol (ERP)
+ * username part of NAI used to refer keys rRK and rIK. This is used with
+ * %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_REALM: EAP Re-authentication Protocol (ERP) realm part
+ * of NAI specifying the domain name of the ER server. This is used with
+ * %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM: Unsigned 16-bit ERP next sequence number
+ * to use in ERP messages. This is used in generating the FILS wrapped data
+ * for FILS authentication and is used with %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_RRK: ERP re-authentication Root Key (rRK) for the
+ * NAI specified by %NL80211_ATTR_FILS_ERP_USERNAME and
+ * %NL80211_ATTR_FILS_ERP_REALM. This is used for generating rIK and rMSK
+ * from successful FILS authentication and is used with
+ * %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP
+ * identifying the scope of PMKSAs. This is used with
+ * @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
+ *
+ * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID.
+ * This is used with @NL80211_CMD_SET_PMKSA.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
NL80211_ATTR_TIMEOUT_REASON,
+ NL80211_ATTR_FILS_ERP_USERNAME,
+ NL80211_ATTR_FILS_ERP_REALM,
+ NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+ NL80211_ATTR_FILS_ERP_RRK,
+ NL80211_ATTR_FILS_CACHE_ID,
+
+ NL80211_ATTR_PMK,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
* @__NL80211_ATTR_CQM_INVALID: invalid
* @NL80211_ATTR_CQM_RSSI_THOLD: RSSI threshold in dBm. This value specifies
* the threshold for the RSSI level at which an event will be sent. Zero
- * to disable.
+ * to disable. Alternatively, if %NL80211_EXT_FEATURE_CQM_RSSI_LIST is
+ * set, multiple values can be supplied as a low-to-high sorted array of
+ * threshold values in dBm. Events will be sent when the RSSI value
+ * crosses any of the thresholds.
* @NL80211_ATTR_CQM_RSSI_HYST: RSSI hysteresis in dBm. This value specifies
* the minimum amount the RSSI level must change after an event before a
* new event may be issued (to reduce effects of RSSI oscillation).
* @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan
* for reporting BSSs with better RSSI than the current connected BSS
* (%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI).
+ * @NL80211_EXT_FEATURE_CQM_RSSI_LIST: With this driver the
+ * %NL80211_ATTR_CQM_RSSI_THOLD attribute accepts a list of zero or more
+ * RSSI threshold values to monitor rather than exactly one threshold.
+ * @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key
+ * authentication with %NL80211_CMD_CONNECT.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA,
NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED,
NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST,
+ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
* change to the channel status.
* @NL80211_RADAR_NOP_FINISHED: The Non-Occupancy Period for this channel is
* over, channel becomes usable.
+ * @NL80211_RADAR_PRE_CAC_EXPIRED: Channel Availability Check done on this
+ * non-operating channel is expired and no longer valid. New CAC must
+ * be done on this channel before starting the operation. This is not
+ * applicable for ETSI dfs domain where pre-CAC is valid for ever.
*/
enum nl80211_radar_event {
NL80211_RADAR_DETECTED,
NL80211_RADAR_CAC_FINISHED,
NL80211_RADAR_CAC_ABORTED,
NL80211_RADAR_NOP_FINISHED,
+ NL80211_RADAR_PRE_CAC_EXPIRED,
};
/**
NET_CORE_AEVENT_ETIME=20,
NET_CORE_AEVENT_RSEQTH=21,
NET_CORE_WARNINGS=22,
+ NET_CORE_BUDGET_USECS=23,
};
/* /proc/sys/net/ethernet */
if (insn->imm == BPF_FUNC_xdp_adjust_head)
prog->xdp_adjust_head = 1;
if (insn->imm == BPF_FUNC_tail_call) {
+ /* If we tail call into other programs, we
+ * cannot make any assumptions since they can
+ * be replaced dynamically during runtime in
+ * the program array.
+ */
+ prog->cb_access = 1;
+ prog->xdp_adjust_head = 1;
+
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
* call and to prevent accidental JITing by JIT compiler
* path is super cold. Let's just sleep a bit and retry.
*/
pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
- if (IS_ERR_OR_NULL(pinned_sb) ||
+ if (IS_ERR(pinned_sb) ||
!percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
mutex_unlock(&cgroup_mutex);
if (!IS_ERR_OR_NULL(pinned_sb))
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
/*
- * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
+ * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
* .data and .bss to fit in required 32MB limit for the kernel. With
- * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
+ * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
* So, reduce the static allocations for lockdeps related structures so that
* everything fits in current required size limit.
*/
-#ifdef CONFIG_PROVE_LOCKING_SMALL
+#ifdef CONFIG_LOCKDEP_SMALL
/*
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
* we track.
{ CTL_INT, NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" },
{ CTL_INT, NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" },
{ CTL_INT, NET_CORE_WARNINGS, "warnings" },
+ { CTL_INT, NET_CORE_BUDGET_USECS, "netdev_budget_usecs" },
{},
};
ftrace_probe_registered = 1;
}
-static void __disable_ftrace_function_probe(void)
+static bool __disable_ftrace_function_probe(void)
{
int i;
if (!ftrace_probe_registered)
- return;
+ return false;
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i];
if (hhd->first)
- return;
+ return false;
}
/* no more funcs left */
ftrace_shutdown(&trace_probe_ops, 0);
ftrace_probe_registered = 0;
+ return true;
}
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags)
{
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_entry *rec_entry;
struct ftrace_func_probe *entry;
struct ftrace_func_probe *p;
struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN];
int i, ret;
+ bool disabled;
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
func_g.search = NULL;
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+ old_hash_ops.filter_hash = old_hash;
+ /* Probes only have filters */
+ old_hash_ops.notrace_hash = NULL;
+
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
if (!hash)
/* Hmm, should report this somehow */
}
}
mutex_lock(&ftrace_lock);
- __disable_ftrace_function_probe();
+ disabled = __disable_ftrace_function_probe();
/*
* Remove after the disable is called. Otherwise, if the last
* probe is removed, a null hash means *all enabled*.
*/
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+
+ /* still need to update the function call sites */
+ if (ftrace_enabled && !disabled)
+ ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
+ &old_hash_ops);
synchronize_sched();
if (!ret)
free_ftrace_hash_rcu(old_hash);
trace_free_pid_list(pid_list);
}
+void ftrace_clear_pids(struct trace_array *tr)
+{
+ mutex_lock(&ftrace_lock);
+
+ clear_ftrace_pids(tr);
+
+ mutex_unlock(&ftrace_lock);
+}
+
static void ftrace_pid_reset(struct trace_array *tr)
{
mutex_lock(&ftrace_lock);
tracing_set_nop(tr);
event_trace_del_tracer(tr);
+ ftrace_clear_pids(tr);
ftrace_destroy_function_files(tr);
tracefs_remove_recursive(tr->dir);
free_trace_buffers(tr);
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
void ftrace_init_tracefs_toplevel(struct trace_array *tr,
struct dentry *d_tracer);
+void ftrace_clear_pids(struct trace_array *tr);
#else
static inline int ftrace_trace_task(struct trace_array *tr)
{
static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
+static inline void ftrace_clear_pids(struct trace_array *tr) { }
/* ftace_func_t type is not defined, use macro instead of static inline */
#define ftrace_init_array_ops(tr, func) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER */
For more details, see Documentation/locking/lockdep-design.txt.
-config PROVE_LOCKING_SMALL
- bool
-
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select KALLSYMS
select KALLSYMS_ALL
+config LOCKDEP_SMALL
+ bool
+
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
int netdev_tstamp_prequeue __read_mostly = 1;
int netdev_budget __read_mostly = 300;
+unsigned int __read_mostly netdev_budget_usecs = 2000;
int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
- unsigned long time_limit = jiffies + 2;
+ unsigned long time_limit = jiffies +
+ usecs_to_jiffies(netdev_budget_usecs);
int budget = netdev_budget;
LIST_HEAD(list);
LIST_HEAD(repoll);
#include <net/dst_metadata.h>
#include <net/dst.h>
#include <net/sock_reuseport.h>
+#include <net/busy_poll.h>
/**
* sk_filter_trim_cap - run a packet through a socket filter
* @new_prog: buffer where converted program will be stored
* @new_len: pointer to store length of converted program
*
- * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
+ * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
+ * style extended BPF (eBPF).
* Conversion workflow:
*
* 1) First pass for calculating the new program length:
*insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
else
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
+#endif
+ break;
+
+ case offsetof(struct __sk_buff, napi_id):
+#if defined(CONFIG_NET_RX_BUSY_POLL)
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, napi_id) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, napi_id));
+ *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
+ *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
+#else
+ *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
#endif
break;
}
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
serr->ee.ee_info = tstype;
serr->opt_stats = opt_stats;
+ serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
if (sk->sk_protocol == IPPROTO_TCP &&
.extra1 = &one,
.extra2 = &max_skb_frags,
},
+ {
+ .procname = "netdev_budget_usecs",
+ .data = &netdev_budget_usecs,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
{ }
};
config NET_DSA_TAG_MTK
bool
+
+config NET_DSA_TAG_LAN9303
+ bool
+
endif
dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
#endif
#ifdef CONFIG_NET_DSA_TAG_MTK
[DSA_TAG_PROTO_MTK] = &mtk_netdev_ops,
+#endif
+#ifdef CONFIG_NET_DSA_TAG_LAN9303
+ [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops,
#endif
[DSA_TAG_PROTO_NONE] = &none_ops,
};
/* tag_mtk.c */
extern const struct dsa_device_ops mtk_netdev_ops;
+/* tag_lan9303.c */
+extern const struct dsa_device_ops lan9303_netdev_ops;
+
#endif
--- /dev/null
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/dsa.h>
+#include "dsa_priv.h"
+
+/* To define the outgoing port and to discover the incoming port a regular
+ * VLAN tag is used by the LAN9303. But its VID meaning is 'special':
+ *
+ * Dest MAC Src MAC TAG Type
+ * ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 | 1 2 |...
+ * |<------->|
+ * TAG:
+ * |<------------->|
+ * | 1 2 | 3 4 |
+ * TPID VID
+ * 0x8100
+ *
+ * VID bit 3 indicates a request for an ALR lookup.
+ *
+ * If VID bit 3 is zero, then bits 0 and 1 specify the destination port
+ * (0, 1, 2) or broadcast (3) or the source port (1, 2).
+ *
+ * VID bit 4 is used to specify if the STP port state should be overridden.
+ * Required when no forwarding between the external ports should happen.
+ */
+
+#define LAN9303_TAG_LEN 4
+#define LAN9303_MAX_PORTS 3
+
+static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ u16 *lan9303_tag;
+
+ /* insert a special VLAN tag between the MAC addresses
+ * and the current ethertype field.
+ */
+ if (skb_cow_head(skb, LAN9303_TAG_LEN) < 0) {
+ dev_dbg(&dev->dev,
+ "Cannot make room for the special tag. Dropping packet\n");
+ goto out_free;
+ }
+
+ /* provide 'LAN9303_TAG_LEN' bytes additional space */
+ skb_push(skb, LAN9303_TAG_LEN);
+
+ /* make room between MACs and Ether-Type */
+ memmove(skb->data, skb->data + LAN9303_TAG_LEN, 2 * ETH_ALEN);
+
+ lan9303_tag = (u16 *)(skb->data + 2 * ETH_ALEN);
+ lan9303_tag[0] = htons(ETH_P_8021Q);
+ lan9303_tag[1] = htons(p->dp->index | BIT(4));
+
+ return skb;
+out_free:
+ kfree_skb(skb);
+ return NULL;
+}
+
+static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ u16 *lan9303_tag;
+ struct dsa_switch_tree *dst = dev->dsa_ptr;
+ struct dsa_switch *ds;
+ unsigned int source_port;
+
+ ds = dst->ds[0];
+
+ if (unlikely(!ds)) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet, due to missing DSA switch device\n");
+ return NULL;
+ }
+
+ if (unlikely(!pskb_may_pull(skb, LAN9303_TAG_LEN))) {
+ dev_warn_ratelimited(&dev->dev,
+ "Dropping packet, cannot pull\n");
+ return NULL;
+ }
+
+ /* '->data' points into the middle of our special VLAN tag information:
+ *
+ * ~ MAC src | 0x81 | 0x00 | 0xyy | 0xzz | ether type
+ * ^
+ * ->data
+ */
+ lan9303_tag = (u16 *)(skb->data - 2);
+
+ if (lan9303_tag[0] != htons(ETH_P_8021Q)) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n");
+ return NULL;
+ }
+
+ source_port = ntohs(lan9303_tag[1]) & 0x3;
+
+ if (source_port >= LAN9303_MAX_PORTS) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n");
+ return NULL;
+ }
+
+ if (!ds->ports[source_port].netdev) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid netdev or device\n");
+ return NULL;
+ }
+
+ /* remove the special VLAN tag between the MAC addresses
+ * and the current ethertype field.
+ */
+ skb_pull_rcsum(skb, 2 + 2);
+ memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN),
+ 2 * ETH_ALEN);
+
+ /* forward the packet to the dedicated interface */
+ skb->dev = ds->ports[source_port].netdev;
+
+ return skb;
+}
+
+const struct dsa_device_ops lan9303_netdev_ops = {
+ .xmit = lan9303_xmit,
+ .rcv = lan9303_rcv,
+};
static int ipgre_netlink_parms(struct net_device *dev,
struct nlattr *data[],
struct nlattr *tb[],
- struct ip_tunnel_parm *parms)
+ struct ip_tunnel_parm *parms,
+ __u32 *fwmark)
{
struct ip_tunnel *t = netdev_priv(dev);
t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
}
+ if (data[IFLA_GRE_FWMARK])
+ *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
+
return 0;
}
{
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
+ __u32 fwmark = 0;
int err;
if (ipgre_netlink_encap_parms(data, &ipencap)) {
return err;
}
- err = ipgre_netlink_parms(dev, data, tb, &p);
+ err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
if (err < 0)
return err;
- return ip_tunnel_newlink(dev, tb, &p);
+ return ip_tunnel_newlink(dev, tb, &p, fwmark);
}
static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
+ struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
+ __u32 fwmark = t->fwmark;
int err;
if (ipgre_netlink_encap_parms(data, &ipencap)) {
- struct ip_tunnel *t = netdev_priv(dev);
err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0)
return err;
}
- err = ipgre_netlink_parms(dev, data, tb, &p);
+ err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
if (err < 0)
return err;
- return ip_tunnel_changelink(dev, tb, &p);
+ return ip_tunnel_changelink(dev, tb, &p, fwmark);
}
static size_t ipgre_get_size(const struct net_device *dev)
nla_total_size(0) +
/* IFLA_GRE_IGNORE_DF */
nla_total_size(1) +
+ /* IFLA_GRE_FWMARK */
+ nla_total_size(4) +
0;
}
nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
nla_put_u8(skb, IFLA_GRE_PMTUDISC,
- !!(p->iph.frag_off & htons(IP_DF))))
+ !!(p->iph.frag_off & htons(IP_DF))) ||
+ nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
[IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
[IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
[IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
+ [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
};
static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
return false;
/* Support IP_PKTINFO on tstamp packets if requested, to correlate
- * timestamp with egress dev. Not possible for packets without dev
+ * timestamp with egress dev. Not possible for packets without iif
* or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
*/
- if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
- (!skb->dev))
+ info = PKTINFO_SKB_CB(skb);
+ if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
+ !info->ipi_ifindex)
return false;
- info = PKTINFO_SKB_CB(skb);
info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
- info->ipi_ifindex = skb->dev->ifindex;
return true;
}
case MCAST_LEAVE_GROUP:
case MCAST_LEAVE_SOURCE_GROUP:
case MCAST_UNBLOCK_SOURCE:
+ case IP_ROUTER_ALERT:
return true;
}
return false;
static inline void init_tunnel_flow(struct flowi4 *fl4,
int proto,
__be32 daddr, __be32 saddr,
- __be32 key, __u8 tos, int oif)
+ __be32 key, __u8 tos, int oif,
+ __u32 mark)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = oif;
fl4->flowi4_tos = tos;
fl4->flowi4_proto = proto;
fl4->fl4_gre_key = key;
+ fl4->flowi4_mark = mark;
}
static int ip_tunnel_bind_dev(struct net_device *dev)
init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
iph->saddr, tunnel->parms.o_key,
- RT_TOS(iph->tos), tunnel->parms.link);
+ RT_TOS(iph->tos), tunnel->parms.link,
+ tunnel->fwmark);
rt = ip_route_output_key(tunnel->net, &fl4);
if (!IS_ERR(rt)) {
tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
}
init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
- RT_TOS(tos), tunnel->parms.link);
+ RT_TOS(tos), tunnel->parms.link, tunnel->fwmark);
if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
goto tx_error;
rt = ip_route_output_key(tunnel->net, &fl4);
}
init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
- tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
+ tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
+ tunnel->fwmark);
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
goto tx_error;
struct ip_tunnel *t,
struct net_device *dev,
struct ip_tunnel_parm *p,
- bool set_mtu)
+ bool set_mtu,
+ __u32 fwmark)
{
ip_tunnel_del(itn, t);
t->parms.iph.saddr = p->iph.saddr;
t->parms.iph.tos = p->iph.tos;
t->parms.iph.frag_off = p->iph.frag_off;
- if (t->parms.link != p->link) {
+ if (t->parms.link != p->link || t->fwmark != fwmark) {
int mtu;
t->parms.link = p->link;
+ t->fwmark = fwmark;
mtu = ip_tunnel_bind_dev(dev);
if (set_mtu)
dev->mtu = mtu;
if (t) {
err = 0;
- ip_tunnel_update(itn, t, dev, p, true);
+ ip_tunnel_update(itn, t, dev, p, true, 0);
} else {
err = -ENOENT;
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p)
+ struct ip_tunnel_parm *p, __u32 fwmark)
{
struct ip_tunnel *nt;
struct net *net = dev_net(dev);
nt->net = net;
nt->parms = *p;
+ nt->fwmark = fwmark;
err = register_netdevice(dev);
if (err)
goto out;
EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p)
+ struct ip_tunnel_parm *p, __u32 fwmark)
{
struct ip_tunnel *t;
struct ip_tunnel *tunnel = netdev_priv(dev);
}
}
- ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU]);
+ ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
}
static void vti_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms)
+ struct ip_tunnel_parm *parms,
+ __u32 *fwmark)
{
memset(parms, 0, sizeof(*parms));
if (data[IFLA_VTI_REMOTE])
parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]);
+ if (data[IFLA_VTI_FWMARK])
+ *fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]);
}
static int vti_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct ip_tunnel_parm parms;
+ __u32 fwmark = 0;
- vti_netlink_parms(data, &parms);
- return ip_tunnel_newlink(dev, tb, &parms);
+ vti_netlink_parms(data, &parms, &fwmark);
+ return ip_tunnel_newlink(dev, tb, &parms, fwmark);
}
static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
+ struct ip_tunnel *t = netdev_priv(dev);
+ __u32 fwmark = t->fwmark;
struct ip_tunnel_parm p;
- vti_netlink_parms(data, &p);
- return ip_tunnel_changelink(dev, tb, &p);
+ vti_netlink_parms(data, &p, &fwmark);
+ return ip_tunnel_changelink(dev, tb, &p, fwmark);
}
static size_t vti_get_size(const struct net_device *dev)
nla_total_size(4) +
/* IFLA_VTI_REMOTE */
nla_total_size(4) +
+ /* IFLA_VTI_FWMARK */
+ nla_total_size(4) +
0;
}
nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr);
nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr);
+ nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark);
return 0;
}
[IFLA_VTI_OKEY] = { .type = NLA_U32 },
[IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
[IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_VTI_FWMARK] = { .type = NLA_U32 },
};
static struct rtnl_link_ops vti_link_ops __read_mostly = {
}
static void ipip_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms, bool *collect_md)
+ struct ip_tunnel_parm *parms, bool *collect_md,
+ __u32 *fwmark)
{
memset(parms, 0, sizeof(*parms));
if (data[IFLA_IPTUN_COLLECT_METADATA])
*collect_md = true;
+
+ if (data[IFLA_IPTUN_FWMARK])
+ *fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
}
/* This function returns true when ENCAP attributes are present in the nl msg */
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
+ __u32 fwmark = 0;
if (ipip_netlink_encap_parms(data, &ipencap)) {
int err = ip_tunnel_encap_setup(t, &ipencap);
return err;
}
- ipip_netlink_parms(data, &p, &t->collect_md);
- return ip_tunnel_newlink(dev, tb, &p);
+ ipip_netlink_parms(data, &p, &t->collect_md, &fwmark);
+ return ip_tunnel_newlink(dev, tb, &p, fwmark);
}
static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
+ struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
bool collect_md;
+ __u32 fwmark = t->fwmark;
if (ipip_netlink_encap_parms(data, &ipencap)) {
- struct ip_tunnel *t = netdev_priv(dev);
int err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0)
return err;
}
- ipip_netlink_parms(data, &p, &collect_md);
+ ipip_netlink_parms(data, &p, &collect_md, &fwmark);
if (collect_md)
return -EINVAL;
(!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
return -EINVAL;
- return ip_tunnel_changelink(dev, tb, &p);
+ return ip_tunnel_changelink(dev, tb, &p, fwmark);
}
static size_t ipip_get_size(const struct net_device *dev)
nla_total_size(2) +
/* IFLA_IPTUN_COLLECT_METADATA */
nla_total_size(0) +
+ /* IFLA_IPTUN_FWMARK */
+ nla_total_size(4) +
0;
}
nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
- !!(parm->iph.frag_off & htons(IP_DF))))
+ !!(parm->iph.frag_off & htons(IP_DF))) ||
+ nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
+ [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
};
static struct rtnl_link_ops ipip_link_ops __read_mostly = {
struct net *net = sock_net(sk);
struct mr_table *mrt;
- rtnl_lock();
+ ASSERT_RTNL();
ipmr_for_each_table(mrt, net) {
if (sk == rtnl_dereference(mrt->mroute_sk)) {
IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
mroute_clean_tables(mrt, false);
}
}
- rtnl_unlock();
}
/* Socket options and virtual interface manipulation. The whole
if (sk != rcu_access_pointer(mrt->mroute_sk)) {
ret = -EACCES;
} else {
- /* We need to unlock here because mrtsock_destruct takes
- * care of rtnl itself and we can't change that due to
- * the IP_ROUTER_ALERT setsockopt which runs without it.
- */
- rtnl_unlock();
ret = ip_ra_control(sk, 0, NULL);
- goto out;
+ goto out_unlock;
}
break;
case MRT_ADD_VIF:
}
out_unlock:
rtnl_unlock();
-out:
return ret;
}
/*
* Raw sockets may have direct kernel references. Kill them.
*/
+ rtnl_lock();
ip_ra_control(sk, 0, NULL);
+ rtnl_unlock();
sk_common_release(sk);
}
module_param(hystart, int, 0644);
MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm");
module_param(hystart_detect, int, 0644);
-MODULE_PARM_DESC(hystart_detect, "hyrbrid slow start detection mechanisms"
+MODULE_PARM_DESC(hystart_detect, "hybrid slow start detection mechanisms"
" 1: packet-train 2: delay 3: both packet-train and delay");
module_param(hystart_low_window, int, 0644);
MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
/* This barrier is coupled with smp_rmb() in tcp_poll() */
smp_wmb();
+ tcp_done(sk);
+
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
-
- tcp_done(sk);
}
/*
else
tp->pred_flags = 0;
- if (!sock_flag(sk, SOCK_DEAD)) {
- sk->sk_state_change(sk);
- sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
- }
}
static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_fastopen_cookie foc = { .len = -1 };
int saved_clamp = tp->rx_opt.mss_clamp;
+ bool fastopen_fail;
tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tcp_finish_connect(sk, skb);
- if ((tp->syn_fastopen || tp->syn_data) &&
- tcp_rcv_fastopen_synack(sk, skb, &foc))
- return -1;
+ fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
+ tcp_rcv_fastopen_synack(sk, skb, &foc);
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ sk->sk_state_change(sk);
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+ }
+ if (fastopen_fail)
+ return -1;
if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
icsk->icsk_ack.pingpong) {
* At one point, excluding local errors was a quick test to identify icmp/icmp6
* errors. This is no longer true, but the test remained, so the v6 stack,
* unlike v4, also honors cmsg requests on all wifi and timestamp errors.
- *
- * Timestamp code paths do not initialize the fields expected by cmsg:
- * the PKTINFO fields in skb->cb[]. Fill those in here.
*/
static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
struct sock_exterr_skb *serr)
if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
return false;
- if (!skb->dev)
+ if (!IP6CB(skb)->iif)
return false;
- if (skb->protocol == htons(ETH_P_IPV6))
- IP6CB(skb)->iif = skb->dev->ifindex;
- else
- PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
-
return true;
}
& IPV6_TCLASS_MASK;
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
+ else
+ fl6.flowi6_mark = t->parms.fwmark;
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
fl6.flowlabel |= ip6_flowlabel(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
+ else
+ fl6.flowi6_mark = t->parms.fwmark;
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
t->parms.o_key = p->o_key;
t->parms.i_flags = p->i_flags;
t->parms.o_flags = p->o_flags;
+ t->parms.fwmark = p->fwmark;
dst_cache_reset(&t->dst_cache);
ip6gre_tnl_link_config(t, set_mtu);
return 0;
if (data[IFLA_GRE_FLAGS])
parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
+
+ if (data[IFLA_GRE_FWMARK])
+ parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
}
static int ip6gre_tap_init(struct net_device *dev)
nla_total_size(2) +
/* IFLA_GRE_ENCAP_DPORT */
nla_total_size(2) +
+ /* IFLA_GRE_FWMARK */
+ nla_total_size(4) +
0;
}
nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
- nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
+ nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
+ nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
[IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
};
static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
/*
* RFC4291 2.5.3
+ * The loopback address must not be used as the source address in IPv6
+ * packets that are sent outside of a single node. [..]
* A packet received on an interface with a destination address
* of loopback must be dropped.
*/
- if (!(dev->flags & IFF_LOOPBACK) &&
- ipv6_addr_loopback(&hdr->daddr))
+ if ((ipv6_addr_loopback(&hdr->saddr) ||
+ ipv6_addr_loopback(&hdr->daddr)) &&
+ !(dev->flags & IFF_LOOPBACK))
goto err;
/* RFC4291 Errata ID: 3480
& IPV6_TCLASS_MASK;
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
+ else
+ fl6.flowi6_mark = t->parms.fwmark;
}
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
fl6.flowlabel |= ip6_flowlabel(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
+ else
+ fl6.flowi6_mark = t->parms.fwmark;
}
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
t->parms.flowinfo = p->flowinfo;
t->parms.link = p->link;
t->parms.proto = p->proto;
+ t->parms.fwmark = p->fwmark;
dst_cache_reset(&t->dst_cache);
ip6_tnl_link_config(t);
return 0;
if (data[IFLA_IPTUN_COLLECT_METADATA])
parms->collect_md = true;
+
+ if (data[IFLA_IPTUN_FWMARK])
+ parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
}
static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
nla_total_size(2) +
/* IFLA_IPTUN_COLLECT_METADATA */
nla_total_size(0) +
+ /* IFLA_IPTUN_FWMARK */
+ nla_total_size(4) +
0;
}
nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
- nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
+ nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
+ nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
if (parm->collect_md)
if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
goto nla_put_failure;
+
return 0;
nla_put_failure:
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
+ [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
};
static struct rtnl_link_ops ip6_link_ops __read_mostly = {
t->parms.i_key = p->i_key;
t->parms.o_key = p->o_key;
t->parms.proto = p->proto;
+ t->parms.fwmark = p->fwmark;
dst_cache_reset(&t->dst_cache);
vti6_link_config(t);
return 0;
if (data[IFLA_VTI_OKEY])
parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
+
+ if (data[IFLA_VTI_FWMARK])
+ parms->fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]);
}
static int vti6_newlink(struct net *src_net, struct net_device *dev,
nla_total_size(4) +
/* IFLA_VTI_OKEY */
nla_total_size(4) +
+ /* IFLA_VTI_FWMARK */
+ nla_total_size(4) +
0;
}
nla_put_in6_addr(skb, IFLA_VTI_LOCAL, &parm->laddr) ||
nla_put_in6_addr(skb, IFLA_VTI_REMOTE, &parm->raddr) ||
nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) ||
- nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key))
+ nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key) ||
+ nla_put_u32(skb, IFLA_VTI_FWMARK, parm->fwmark))
goto nla_put_failure;
return 0;
[IFLA_VTI_REMOTE] = { .len = sizeof(struct in6_addr) },
[IFLA_VTI_IKEY] = { .type = NLA_U32 },
[IFLA_VTI_OKEY] = { .type = NLA_U32 },
+ [IFLA_VTI_FWMARK] = { .type = NLA_U32 },
};
static struct rtnl_link_ops vti6_link_ops __read_mostly = {
goto tx_error;
}
- rt = ip_route_output_ports(tunnel->net, &fl4, NULL,
- dst, tiph->saddr,
- 0, 0,
- IPPROTO_IPV6, RT_TOS(tos),
- tunnel->parms.link);
+ flowi4_init_output(&fl4, tunnel->parms.link, tunnel->fwmark,
+ RT_TOS(tos), RT_SCOPE_UNIVERSE, IPPROTO_IPV6,
+ 0, dst, tiph->saddr, 0, 0,
+ sock_net_uid(tunnel->net, NULL));
+ rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
+
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
}
-static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
+static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
+ __u32 fwmark)
{
struct net *net = t->net;
struct sit_net *sitn = net_generic(net, sit_net_id);
ipip6_tunnel_link(sitn, t);
t->parms.iph.ttl = p->iph.ttl;
t->parms.iph.tos = p->iph.tos;
- if (t->parms.link != p->link) {
+ if (t->parms.link != p->link || t->fwmark != fwmark) {
t->parms.link = p->link;
+ t->fwmark = fwmark;
ipip6_tunnel_bind_dev(t->dev);
}
dst_cache_reset(&t->dst_cache);
t = netdev_priv(dev);
}
- ipip6_tunnel_update(t, &p);
+ ipip6_tunnel_update(t, &p, t->fwmark);
}
if (t) {
}
static void ipip6_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms)
+ struct ip_tunnel_parm *parms,
+ __u32 *fwmark)
{
memset(parms, 0, sizeof(*parms));
if (data[IFLA_IPTUN_PROTO])
parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+ if (data[IFLA_IPTUN_FWMARK])
+ *fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
}
/* This function returns true when ENCAP attributes are present in the nl msg */
return err;
}
- ipip6_netlink_parms(data, &nt->parms);
+ ipip6_netlink_parms(data, &nt->parms, &nt->fwmark);
if (ipip6_tunnel_locate(net, &nt->parms, 0))
return -EEXIST;
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd ip6rd;
#endif
+ __u32 fwmark = t->fwmark;
int err;
if (dev == sitn->fb_tunnel_dev)
return err;
}
- ipip6_netlink_parms(data, &p);
+ ipip6_netlink_parms(data, &p, &fwmark);
if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
(!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
} else
t = netdev_priv(dev);
- ipip6_tunnel_update(t, &p);
+ ipip6_tunnel_update(t, &p, fwmark);
#ifdef CONFIG_IPV6_SIT_6RD
if (ipip6_netlink_6rd_parms(data, &ip6rd))
nla_total_size(2) +
/* IFLA_IPTUN_ENCAP_DPORT */
nla_total_size(2) +
+ /* IFLA_IPTUN_FWMARK */
+ nla_total_size(4) +
0;
}
nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
!!(parm->iph.frag_off & htons(IP_DF))) ||
nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
- nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags))
+ nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags) ||
+ nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark))
goto nla_put_failure;
#ifdef CONFIG_IPV6_SIT_6RD
[IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
};
static void ipip6_dellink(struct net_device *dev, struct list_head *head)
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
+#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
#include <net/busy_poll.h>
#include <net/sock_reuseport.h>
return 0;
}
+
static struct sock *__udp6_lib_demux_lookup(struct net *net,
__be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr,
int dif)
{
+ unsigned short hnum = ntohs(loc_port);
+ unsigned int hash2 = udp6_portaddr_hash(net, loc_addr, hnum);
+ unsigned int slot2 = hash2 & udp_table.mask;
+ struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+ const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
struct sock *sk;
- rcu_read_lock();
- sk = __udp6_lib_lookup(net, rmt_addr, rmt_port, loc_addr, loc_port,
- dif, &udp_table, NULL);
- if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
- sk = NULL;
- rcu_read_unlock();
-
- return sk;
+ udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+ if (INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif))
+ return sk;
+ /* Only check first socket in chain */
+ break;
+ }
+ return NULL;
}
static void udp_v6_early_demux(struct sk_buff *skb)
else
return;
- if (!sk)
+ if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
return;
skb->sk = sk;
spin_lock_init(&tid_agg_rx->reorder_lock);
/* rx timer */
- tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
- tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
- init_timer_deferrable(&tid_agg_rx->session_timer);
+ setup_deferrable_timer(&tid_agg_rx->session_timer,
+ sta_rx_agg_session_timer_expired,
+ (unsigned long)&sta->timer_to_tid[tid]);
/* rx reorder timer */
- tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired;
- tid_agg_rx->reorder_timer.data = (unsigned long)&sta->timer_to_tid[tid];
- init_timer(&tid_agg_rx->reorder_timer);
+ setup_timer(&tid_agg_rx->reorder_timer,
+ sta_rx_agg_reorder_timer_expired,
+ (unsigned long)&sta->timer_to_tid[tid]);
/* prepare reordering buffer */
tid_agg_rx->reorder_buf =
tid_tx->timeout = timeout;
/* response timer */
- tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
- tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
- init_timer(&tid_tx->addba_resp_timer);
+ setup_timer(&tid_tx->addba_resp_timer,
+ sta_addba_resp_timer_expired,
+ (unsigned long)&sta->timer_to_tid[tid]);
/* tx timer */
- tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
- tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
- init_timer_deferrable(&tid_tx->session_timer);
+ setup_deferrable_timer(&tid_tx->session_timer,
+ sta_tx_agg_session_timer_expired,
+ (unsigned long)&sta->timer_to_tid[tid]);
/* assign a dialog token */
sta->ampdu_mlme.dialog_token_allocator++;
*
* Copyright 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2015-2016 Intel Deutschland GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
*
* This file is GPLv2 as found in COPYING.
*/
#include "mesh.h"
#include "wme.h"
+static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata,
+ struct vif_params *params)
+{
+ bool mu_mimo_groups = false;
+ bool mu_mimo_follow = false;
+
+ if (params->vht_mumimo_groups) {
+ u64 membership;
+
+ BUILD_BUG_ON(sizeof(membership) != WLAN_MEMBERSHIP_LEN);
+
+ memcpy(sdata->vif.bss_conf.mu_group.membership,
+ params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN);
+ memcpy(sdata->vif.bss_conf.mu_group.position,
+ params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN,
+ WLAN_USER_POSITION_LEN);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MU_GROUPS);
+ /* don't care about endianness - just check for 0 */
+ memcpy(&membership, params->vht_mumimo_groups,
+ WLAN_MEMBERSHIP_LEN);
+ mu_mimo_groups = membership != 0;
+ }
+
+ if (params->vht_mumimo_follow_addr) {
+ mu_mimo_follow =
+ is_valid_ether_addr(params->vht_mumimo_follow_addr);
+ ether_addr_copy(sdata->u.mntr.mu_follow_addr,
+ params->vht_mumimo_follow_addr);
+ }
+
+ sdata->vif.mu_mimo_owner = mu_mimo_groups || mu_mimo_follow;
+}
+
+static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
+ struct vif_params *params)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *monitor_sdata;
+
+ /* check flags first */
+ if (params->flags && ieee80211_sdata_running(sdata)) {
+ u32 mask = MONITOR_FLAG_COOK_FRAMES | MONITOR_FLAG_ACTIVE;
+
+ /*
+ * Prohibit MONITOR_FLAG_COOK_FRAMES and
+ * MONITOR_FLAG_ACTIVE to be changed while the
+ * interface is up.
+ * Else we would need to add a lot of cruft
+ * to update everything:
+ * cooked_mntrs, monitor and all fif_* counters
+ * reconfigure hardware
+ */
+ if ((params->flags & mask) != (sdata->u.mntr.flags & mask))
+ return -EBUSY;
+ }
+
+ /* also validate MU-MIMO change */
+ monitor_sdata = rtnl_dereference(local->monitor_sdata);
+
+ if (!monitor_sdata &&
+ (params->vht_mumimo_groups || params->vht_mumimo_follow_addr))
+ return -EOPNOTSUPP;
+
+ /* apply all changes now - no failures allowed */
+
+ if (monitor_sdata)
+ ieee80211_set_mu_mimo_follow(monitor_sdata, params);
+
+ if (params->flags) {
+ if (ieee80211_sdata_running(sdata)) {
+ ieee80211_adjust_monitor_flags(sdata, -1);
+ sdata->u.mntr.flags = params->flags;
+ ieee80211_adjust_monitor_flags(sdata, 1);
+
+ ieee80211_configure_filter(local);
+ } else {
+ /*
+ * Because the interface is down, ieee80211_do_stop
+ * and ieee80211_do_open take care of "everything"
+ * mentioned in the comment above.
+ */
+ sdata->u.mntr.flags = params->flags;
+ }
+ }
+
+ return 0;
+}
+
static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
if (err)
return ERR_PTR(err);
- if (type == NL80211_IFTYPE_MONITOR && flags) {
- sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- sdata->u.mntr.flags = *flags;
+ sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+
+ if (type == NL80211_IFTYPE_MONITOR) {
+ err = ieee80211_set_mon_options(sdata, params);
+ if (err) {
+ ieee80211_if_remove(sdata);
+ return NULL;
+ }
}
return wdev;
static int ieee80211_change_iface(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
}
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_sub_if_data *monitor_sdata;
- u32 mu_mntr_cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER;
-
- monitor_sdata = rtnl_dereference(local->monitor_sdata);
- if (monitor_sdata &&
- wiphy_ext_feature_isset(wiphy, mu_mntr_cap_flag)) {
- memcpy(monitor_sdata->vif.bss_conf.mu_group.membership,
- params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN);
- memcpy(monitor_sdata->vif.bss_conf.mu_group.position,
- params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN,
- WLAN_USER_POSITION_LEN);
- monitor_sdata->vif.mu_mimo_owner = true;
- ieee80211_bss_info_change_notify(monitor_sdata,
- BSS_CHANGED_MU_GROUPS);
-
- ether_addr_copy(monitor_sdata->u.mntr.mu_follow_addr,
- params->macaddr);
- }
-
- if (!flags)
- return 0;
-
- if (ieee80211_sdata_running(sdata)) {
- u32 mask = MONITOR_FLAG_COOK_FRAMES |
- MONITOR_FLAG_ACTIVE;
-
- /*
- * Prohibit MONITOR_FLAG_COOK_FRAMES and
- * MONITOR_FLAG_ACTIVE to be changed while the
- * interface is up.
- * Else we would need to add a lot of cruft
- * to update everything:
- * cooked_mntrs, monitor and all fif_* counters
- * reconfigure hardware
- */
- if ((*flags & mask) != (sdata->u.mntr.flags & mask))
- return -EBUSY;
-
- ieee80211_adjust_monitor_flags(sdata, -1);
- sdata->u.mntr.flags = *flags;
- ieee80211_adjust_monitor_flags(sdata, 1);
-
- ieee80211_configure_filter(local);
- } else {
- /*
- * Because the interface is down, ieee80211_do_stop
- * and ieee80211_do_open take care of "everything"
- * mentioned in the comment above.
- */
- sdata->u.mntr.flags = *flags;
- }
+ ret = ieee80211_set_mon_options(sdata, params);
+ if (ret)
+ return ret;
}
return 0;
params->basic_rates_len,
&sdata->vif.bss_conf.basic_rates);
changed |= BSS_CHANGED_BASIC_RATES;
+ ieee80211_check_rate_mask(sdata);
}
if (params->ap_isolate >= 0) {
bss_conf->cqm_rssi_thold = rssi_thold;
bss_conf->cqm_rssi_hyst = rssi_hyst;
+ bss_conf->cqm_rssi_low = 0;
+ bss_conf->cqm_rssi_high = 0;
+ sdata->u.mgd.last_cqm_event_signal = 0;
+
+ /* tell the driver upon association, unless already associated */
+ if (sdata->u.mgd.associated &&
+ sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
+
+ return 0;
+}
+
+static int ieee80211_set_cqm_rssi_range_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_low, s32 rssi_high)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_vif *vif = &sdata->vif;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
+ return -EOPNOTSUPP;
+
+ bss_conf->cqm_rssi_low = rssi_low;
+ bss_conf->cqm_rssi_high = rssi_high;
+ bss_conf->cqm_rssi_thold = 0;
+ bss_conf->cqm_rssi_hyst = 0;
sdata->u.mgd.last_cqm_event_signal = 0;
/* tell the driver upon association, unless already associated */
return ret;
}
+ /*
+ * If active validate the setting and reject it if it doesn't leave
+ * at least one basic rate usable, since we really have to be able
+ * to send something, and if we're an AP we have to be able to do
+ * so at a basic rate so that all clients can receive it.
+ */
+ if (rcu_access_pointer(sdata->vif.chanctx_conf) &&
+ sdata->vif.bss_conf.chandef.chan) {
+ u32 basic_rates = sdata->vif.bss_conf.basic_rates;
+ enum nl80211_band band = sdata->vif.bss_conf.chandef.chan->band;
+
+ if (!(mask->control[band].legacy & basic_rates))
+ return -EINVAL;
+ }
+
for (i = 0; i < NUM_NL80211_BANDS; i++) {
struct ieee80211_supported_band *sband = wiphy->bands[i];
int j;
.mgmt_tx = ieee80211_mgmt_tx,
.mgmt_tx_cancel_wait = ieee80211_mgmt_tx_cancel_wait,
.set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
+ .set_cqm_rssi_range_config = ieee80211_set_cqm_rssi_range_config,
.mgmt_frame_register = ieee80211_mgmt_frame_register,
.set_antenna = ieee80211_set_antenna,
.get_antenna = ieee80211_get_antenna,
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
cfg80211_chandef_create(&chandef, cbss->channel,
- NL80211_CHAN_WIDTH_20_NOHT);
+ NL80211_CHAN_NO_HT);
chandef.width = sdata->u.ibss.chandef.width;
break;
case NL80211_CHAN_WIDTH_80:
default:
/* fall back to 20 MHz for unsupported modes */
cfg80211_chandef_create(&chandef, cbss->channel,
- NL80211_CHAN_WIDTH_20_NOHT);
+ NL80211_CHAN_NO_HT);
break;
}
struct ieee80211_if_mntr {
u32 flags;
u8 mu_follow_addr[ETH_ALEN] __aligned(2);
+
+ struct list_head list;
};
/**
/* see iface.c */
struct list_head interfaces;
+ struct list_head mon_list; /* only that are IFF_UP && !cooked */
struct mutex iflist_mtx;
/*
set_bit(SDATA_STATE_RUNNING, &sdata->state);
- if (sdata->vif.type == NL80211_IFTYPE_WDS) {
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_WDS:
/* Create STA entry for the WDS peer */
sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
GFP_KERNEL);
rate_control_rate_init(sta);
netif_carrier_on(dev);
- } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
rcu_assign_pointer(local->p2p_sdata, sdata);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
+ break;
+ list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
+ break;
+ default:
+ break;
}
/*
case NL80211_IFTYPE_AP:
cancel_work_sync(&sdata->u.ap.request_smps_work);
break;
+ case NL80211_IFTYPE_MONITOR:
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
+ break;
+ list_del_rcu(&sdata->u.mntr.list);
+ break;
default:
break;
}
ARRAY_SIZE(local->ext_capa);
INIT_LIST_HEAD(&local->interfaces);
+ INIT_LIST_HEAD(&local->mon_list);
__hw_addr_init(&local->mc_list);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
return;
- if (mesh_matches_local(sdata, &elems))
- mesh_neighbour_update(sdata, mgmt->sa, &elems);
+ if (mesh_matches_local(sdata, &elems)) {
+ mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n",
+ sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal);
+ if (!sdata->u.mesh.user_mpm ||
+ sdata->u.mesh.mshcfg.rssi_threshold == 0 ||
+ sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
+ mesh_neighbour_update(sdata, mgmt->sa, &elems);
+ }
if (ifmsh->sync_ops)
ifmsh->sync_ops->rx_bcn_presp(sdata,
#define TEST_FRAME_LEN 8192
#define MAX_METRIC 0xffffffff
#define ARITH_SHIFT 8
+#define LINK_FAIL_THRESH 95
#define MAX_PREQ_QUEUE_LEN 64
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
- /* moving average, scaled to 100 */
- sta->mesh->fail_avg =
- ((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed);
- if (sta->mesh->fail_avg > 95)
+ /* moving average, scaled to 100.
+ * feed failure as 100 and success as 0
+ */
+ ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100);
+ if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) >
+ LINK_FAIL_THRESH)
mesh_plink_broken(sta);
}
int rate, err;
u32 tx_time, estimated_retx;
u64 result;
+ unsigned long fail_avg =
+ ewma_mesh_fail_avg_read(&sta->mesh->fail_avg);
/* Try to get rate based on HW/SW RC algorithm.
* Rate is returned in units of Kbps, correct this
if (rate) {
err = 0;
} else {
- if (sta->mesh->fail_avg >= 100)
+ if (fail_avg > LINK_FAIL_THRESH)
return MAX_METRIC;
sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
if (WARN_ON(!rate))
return MAX_METRIC;
- err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
+ err = (fail_avg << ARITH_SHIFT) / 100;
}
/* bitrate is in units of 100 Kbps, while we need rate in units of
? mpath->exp_time : exp_time;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
+ ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
+ /* init it at a low value - 0 start is tricky */
+ ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
mesh_path_tx_pending(mpath);
/* draft says preq_id should be saved to, but there does
* not seem to be any use for it, skipping by now
? mpath->exp_time : exp_time;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
+ ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
+ /* init it at a low value - 0 start is tricky */
+ ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
mesh_path_tx_pending(mpath);
} else
spin_unlock_bh(&mpath->state_lock);
new_mpath->sdata = sdata;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
- new_mpath->timer.data = (unsigned long) new_mpath;
- new_mpath->timer.function = mesh_path_timer;
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
- init_timer(&new_mpath->timer);
+ setup_timer(&new_mpath->timer, mesh_path_timer,
+ (unsigned long) new_mpath);
return new_mpath;
}
mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
+ ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
+ /* init it at a low value - 0 start is tricky */
+ ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
mesh_path_tx_pending(mpath);
}
sdata->u.mgd.associated = cbss;
memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
+ ieee80211_check_rate_mask(sdata);
+
sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
if (sdata->vif.p2p ||
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
- sdata_info(sdata, "disassociated from %pM (Reason: %u)\n",
- mgmt->sa, reason_code);
+ sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n",
+ mgmt->sa, reason_code,
+ ieee80211_get_reason_code_string(reason_code));
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
*have_higher_than_11mbit = true;
/*
- * BSS_MEMBERSHIP_SELECTOR_HT_PHY is defined in 802.11n-2009
- * 7.3.2.2 as a magic value instead of a rate. Hence, skip it.
+ * Skip HT and VHT BSS membership selectors since they're not
+ * rates.
*
- * Note: Even through the membership selector and the basic
+ * Note: Even though the membership selector and the basic
* rate flag share the same bit, they are not exactly
* the same.
*/
- if (!!(supp_rates[i] & 0x80) &&
- (supp_rates[i] & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
+ if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) ||
+ supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY))
continue;
for (j = 0; j < sband->n_bitrates; j++) {
}
}
+ if (bss_conf->cqm_rssi_low &&
+ ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
+ int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
+ int last_event = ifmgd->last_cqm_event_signal;
+ int low = bss_conf->cqm_rssi_low;
+ int high = bss_conf->cqm_rssi_high;
+
+ if (sig < low &&
+ (last_event == 0 || last_event >= low)) {
+ ifmgd->last_cqm_event_signal = sig;
+ ieee80211_cqm_rssi_notify(
+ &sdata->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ sig, GFP_KERNEL);
+ } else if (sig > high &&
+ (last_event == 0 || last_event <= high)) {
+ ifmgd->last_cqm_event_signal = sig;
+ ieee80211_cqm_rssi_notify(
+ &sdata->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ sig, GFP_KERNEL);
+ }
+ }
+
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) {
mlme_dbg_ratelimited(sdata,
"cancelling AP probe due to a received beacon\n");
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
+ * Copyright 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
/* try default if specific alg requested but not found */
ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo);
- /* try built-in one if specific alg requested but not found */
- if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT))
+ /* Note: check for > 0 is intentional to avoid clang warning */
+ if (!ops && (strlen(CONFIG_MAC80211_RC_DEFAULT) > 0))
+ /* try built-in one if specific alg requested but not found */
ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT);
+
kernel_param_unlock(THIS_MODULE);
return ops;
ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
if (!ref)
return NULL;
- ref->local = local;
ref->ops = ieee80211_rate_control_ops_get(name);
if (!ref->ops)
goto free;
return NULL;
}
-static void rate_control_free(struct rate_control_ref *ctrl_ref)
+static void rate_control_free(struct ieee80211_local *local,
+ struct rate_control_ref *ctrl_ref)
{
ctrl_ref->ops->free(ctrl_ref->priv);
#ifdef CONFIG_MAC80211_DEBUGFS
- debugfs_remove_recursive(ctrl_ref->local->debugfs.rcdir);
- ctrl_ref->local->debugfs.rcdir = NULL;
+ debugfs_remove_recursive(local->debugfs.rcdir);
+ local->debugfs.rcdir = NULL;
#endif
kfree(ctrl_ref);
}
+void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ u32 user_mask, basic_rates = sdata->vif.bss_conf.basic_rates;
+ enum nl80211_band band;
+
+ if (WARN_ON(!sdata->vif.bss_conf.chandef.chan))
+ return;
+
+ if (WARN_ON_ONCE(!basic_rates))
+ return;
+
+ band = sdata->vif.bss_conf.chandef.chan->band;
+ user_mask = sdata->rc_rateidx_mask[band];
+ sband = local->hw.wiphy->bands[band];
+
+ if (user_mask & basic_rates)
+ return;
+
+ sdata_dbg(sdata,
+ "no overlap between basic rates (0x%x) and user mask (0x%x on band %d) - clearing the latter",
+ basic_rates, user_mask, band);
+ sdata->rc_rateidx_mask[band] = (1 << sband->n_bitrates) - 1;
+}
+
static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
return;
local->rate_ctrl = NULL;
- rate_control_free(ref);
+ rate_control_free(local, ref);
}
#include "driver-ops.h"
struct rate_control_ref {
- struct ieee80211_local *local;
const struct rate_control_ops *ops;
void *priv;
};
#endif
}
+void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata);
+
/* Get a reference to the rate control algorithm. If `name' is NULL, get the
* first available algorithm. */
int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
* This function cleans up the SKB, i.e. it removes all the stuff
* only useful for monitoring.
*/
-static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
- struct sk_buff *skb,
- unsigned int rtap_vendor_space)
+static void remove_monitor_info(struct sk_buff *skb,
+ unsigned int present_fcs_len,
+ unsigned int rtap_vendor_space)
{
- if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
- if (likely(skb->len > FCS_LEN))
- __pskb_trim(skb, skb->len - FCS_LEN);
- else {
- /* driver bug */
- WARN_ON(1);
- dev_kfree_skb(skb);
- return NULL;
- }
- }
-
+ if (present_fcs_len)
+ __pskb_trim(skb, skb->len - present_fcs_len);
__pskb_pull(skb, rtap_vendor_space);
-
- return skb;
}
static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
* the SKB because it has a bad FCS/PLCP checksum.
*/
- if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
+ if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
+ if (unlikely(origskb->len <= FCS_LEN)) {
+ /* driver bug */
+ WARN_ON(1);
+ dev_kfree_skb(origskb);
+ return NULL;
+ }
present_fcs_len = FCS_LEN;
+ }
/* ensure hdr->frame_control and vendor radiotap data are in skb head */
if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) {
return NULL;
}
- return remove_monitor_info(local, origskb, rtap_vendor_space);
+ remove_monitor_info(origskb, present_fcs_len,
+ rtap_vendor_space);
+ return origskb;
}
/* room for the radiotap header based on driver features */
* and FCS from the original.
*/
skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
-
- origskb = remove_monitor_info(local, origskb,
- rtap_vendor_space);
+ remove_monitor_info(origskb, present_fcs_len,
+ rtap_vendor_space);
if (!skb)
return origskb;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
- continue;
-
- if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
- continue;
-
- if (!ieee80211_sdata_running(sdata))
- continue;
-
+ list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
if (prev_dev) {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
struct ieee80211_vht_operation vht_oper = {
.chan_width =
wide_bw_chansw_ie->new_channel_width,
- .center_freq_seg1_idx =
+ .center_freq_seg0_idx =
wide_bw_chansw_ie->new_center_freq_seg0,
- .center_freq_seg2_idx =
+ .center_freq_seg1_idx =
wide_bw_chansw_ie->new_center_freq_seg1,
/* .basic_mcs_set doesn't matter */
};
rinfo->bw = (rate & STA_STATS_RATE_BW_MASK) >>
STA_STATS_RATE_BW_SHIFT;
- if (rate & STA_STATS_RATE_VHT) {
+ switch (rate & STA_STATS_RATE_TYPE_MASK) {
+ case STA_STATS_RATE_TYPE_VHT:
rinfo->flags = RATE_INFO_FLAGS_VHT_MCS;
rinfo->mcs = rate & 0xf;
rinfo->nss = (rate & 0xf0) >> 4;
- } else if (rate & STA_STATS_RATE_HT) {
+ break;
+ case STA_STATS_RATE_TYPE_HT:
rinfo->flags = RATE_INFO_FLAGS_MCS;
rinfo->mcs = rate & 0xff;
- } else if (rate & STA_STATS_RATE_LEGACY) {
+ break;
+ case STA_STATS_RATE_TYPE_LEGACY: {
struct ieee80211_supported_band *sband;
u16 brate;
unsigned int shift;
else
shift = 0;
rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
+ break;
+ }
}
if (rate & STA_STATS_RATE_SGI)
struct rcu_head rcu_head;
};
+/* we use only values in the range 0-100, so pick a large precision */
+DECLARE_EWMA(mesh_fail_avg, 20, 8)
+
/**
* struct mesh_sta - mesh STA information
* @plink_lock: serialize access to plink fields
enum nl80211_mesh_power_mode nonpeer_pm;
/* moving percentage of failed MSDUs */
- unsigned int fail_avg;
+ struct ewma_mesh_fail_avg fail_avg;
};
DECLARE_EWMA(signal, 10, 8)
unsigned long ieee80211_sta_last_active(struct sta_info *sta);
#define STA_STATS_RATE_INVALID 0
-#define STA_STATS_RATE_VHT 0x8000
-#define STA_STATS_RATE_HT 0x4000
-#define STA_STATS_RATE_LEGACY 0x2000
+#define STA_STATS_RATE_TYPE_MASK 0xC000
+#define STA_STATS_RATE_TYPE_LEGACY 0x4000
+#define STA_STATS_RATE_TYPE_HT 0x8000
+#define STA_STATS_RATE_TYPE_VHT 0xC000
#define STA_STATS_RATE_SGI 0x1000
#define STA_STATS_RATE_BW_SHIFT 9
#define STA_STATS_RATE_BW_MASK (0x7 << STA_STATS_RATE_BW_SHIFT)
r |= STA_STATS_RATE_SGI;
if (s->flag & RX_FLAG_VHT)
- r |= STA_STATS_RATE_VHT | (s->vht_nss << 4);
+ r |= STA_STATS_RATE_TYPE_VHT | (s->vht_nss << 4);
else if (s->flag & RX_FLAG_HT)
- r |= STA_STATS_RATE_HT;
+ r |= STA_STATS_RATE_TYPE_HT;
else
- r |= STA_STATS_RATE_LEGACY | (s->band << 4);
+ r |= STA_STATS_RATE_TYPE_LEGACY | (s->band << 4);
return r;
}
txrc.skb = tx->skb;
txrc.reported_rate.idx = -1;
txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
- if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
- txrc.max_rate_idx = -1;
- else
- txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
if (tx->sdata->rc_has_mcs_mask[info->band])
txrc.rate_idx_mcs_mask =
txrc.skb = skb;
txrc.reported_rate.idx = -1;
txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
- if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1)
- txrc.max_rate_idx = -1;
- else
- txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
txrc.bss = true;
rate_control_get_rate(sdata, NULL, &txrc);
*pos++ = WLAN_EID_VHT_OPERATION;
*pos++ = sizeof(struct ieee80211_vht_operation);
vht_oper = (struct ieee80211_vht_operation *)pos;
- vht_oper->center_freq_seg1_idx = ieee80211_frequency_to_channel(
+ vht_oper->center_freq_seg0_idx = ieee80211_frequency_to_channel(
chandef->center_freq1);
if (chandef->center_freq2)
- vht_oper->center_freq_seg2_idx =
+ vht_oper->center_freq_seg1_idx =
ieee80211_frequency_to_channel(chandef->center_freq2);
else
- vht_oper->center_freq_seg2_idx = 0x00;
+ vht_oper->center_freq_seg1_idx = 0x00;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_160:
* workaround.
*/
vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
- vht_oper->center_freq_seg2_idx = vht_oper->center_freq_seg1_idx;
+ vht_oper->center_freq_seg1_idx = vht_oper->center_freq_seg0_idx;
if (chandef->chan->center_freq < chandef->center_freq1)
- vht_oper->center_freq_seg1_idx -= 8;
+ vht_oper->center_freq_seg0_idx -= 8;
else
- vht_oper->center_freq_seg1_idx += 8;
+ vht_oper->center_freq_seg0_idx += 8;
break;
case NL80211_CHAN_WIDTH_80P80:
/*
if (!oper)
return false;
- cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
+ cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg0_idx,
chandef->chan->band);
- cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg2_idx,
+ cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
chandef->chan->band);
switch (oper->chan_width) {
new.width = NL80211_CHAN_WIDTH_80;
new.center_freq1 = cf1;
/* If needed, adjust based on the newer interop workaround. */
- if (oper->center_freq_seg2_idx) {
+ if (oper->center_freq_seg1_idx) {
unsigned int diff;
- diff = abs(oper->center_freq_seg2_idx -
- oper->center_freq_seg1_idx);
+ diff = abs(oper->center_freq_seg1_idx -
+ oper->center_freq_seg0_idx);
if (diff == 8) {
new.width = NL80211_CHAN_WIDTH_160;
new.center_freq1 = cf2;
return ERR_PTR(err);
}
-static bool tcf_proto_destroy(struct tcf_proto *tp, bool force)
+static void tcf_proto_destroy(struct tcf_proto *tp)
{
- if (tp->ops->destroy(tp, force)) {
- module_put(tp->ops->owner);
- kfree_rcu(tp, rcu);
- return true;
- }
- return false;
+ tp->ops->destroy(tp);
+ module_put(tp->ops->owner);
+ kfree_rcu(tp, rcu);
}
void tcf_destroy_chain(struct tcf_proto __rcu **fl)
while ((tp = rtnl_dereference(*fl)) != NULL) {
RCU_INIT_POINTER(*fl, tp->next);
- tcf_proto_destroy(tp, true);
+ tcf_proto_destroy(tp);
}
}
EXPORT_SYMBOL(tcf_destroy_chain);
RCU_INIT_POINTER(*back, next);
tfilter_notify(net, skb, n, tp, fh,
RTM_DELTFILTER, false);
- tcf_proto_destroy(tp, true);
+ tcf_proto_destroy(tp);
err = 0;
goto errout;
}
goto errout;
}
} else {
+ bool last;
+
switch (n->nlmsg_type) {
case RTM_NEWTFILTER:
if (n->nlmsg_flags & NLM_F_EXCL) {
if (tp_created)
- tcf_proto_destroy(tp, true);
+ tcf_proto_destroy(tp);
err = -EEXIST;
goto errout;
}
break;
case RTM_DELTFILTER:
- err = tp->ops->delete(tp, fh);
+ err = tp->ops->delete(tp, fh, &last);
if (err)
goto errout;
next = rtnl_dereference(tp->next);
tfilter_notify(net, skb, n, tp, t->tcm_handle,
RTM_DELTFILTER, false);
- if (tcf_proto_destroy(tp, false))
+ if (last) {
RCU_INIT_POINTER(*back, next);
+ tcf_proto_destroy(tp);
+ }
goto errout;
case RTM_GETTFILTER:
err = tfilter_notify(net, skb, n, tp, fh,
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
} else {
if (tp_created)
- tcf_proto_destroy(tp, true);
+ tcf_proto_destroy(tp);
}
errout:
kfree(f);
}
-static bool basic_destroy(struct tcf_proto *tp, bool force)
+static void basic_destroy(struct tcf_proto *tp)
{
struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f, *n;
- if (!force && !list_empty(&head->flist))
- return false;
-
list_for_each_entry_safe(f, n, &head->flist, link) {
list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res);
call_rcu(&f->rcu, basic_delete_filter);
}
kfree_rcu(head, rcu);
- return true;
}
-static int basic_delete(struct tcf_proto *tp, unsigned long arg)
+static int basic_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
+ struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f = (struct basic_filter *) arg;
list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res);
call_rcu(&f->rcu, basic_delete_filter);
+ *last = list_empty(&head->flist);
return 0;
}
call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
}
-static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
+static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
+ struct cls_bpf_head *head = rtnl_dereference(tp->root);
+
__cls_bpf_delete(tp, (struct cls_bpf_prog *) arg);
+ *last = list_empty(&head->plist);
return 0;
}
-static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
+static void cls_bpf_destroy(struct tcf_proto *tp)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog, *tmp;
- if (!force && !list_empty(&head->plist))
- return false;
-
list_for_each_entry_safe(prog, tmp, &head->plist, link)
__cls_bpf_delete(tp, prog);
kfree_rcu(head, rcu);
- return true;
}
static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
return err;
}
-static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
+static void cls_cgroup_destroy(struct tcf_proto *tp)
{
struct cls_cgroup_head *head = rtnl_dereference(tp->root);
- if (!force)
- return false;
/* Head can still be NULL due to cls_cgroup_init(). */
if (head)
call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
-
- return true;
}
-static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
+static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
return -EOPNOTSUPP;
}
return err;
}
-static int flow_delete(struct tcf_proto *tp, unsigned long arg)
+static int flow_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
+ struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f = (struct flow_filter *)arg;
list_del_rcu(&f->list);
call_rcu(&f->rcu, flow_destroy_filter);
+ *last = list_empty(&head->filters);
return 0;
}
return 0;
}
-static bool flow_destroy(struct tcf_proto *tp, bool force)
+static void flow_destroy(struct tcf_proto *tp)
{
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f, *next;
- if (!force && !list_empty(&head->filters))
- return false;
-
list_for_each_entry_safe(f, next, &head->filters, list) {
list_del_rcu(&f->list);
call_rcu(&f->rcu, flow_destroy_filter);
}
kfree_rcu(head, rcu);
- return true;
}
static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
schedule_work(&head->work);
}
-static bool fl_destroy(struct tcf_proto *tp, bool force)
+static void fl_destroy(struct tcf_proto *tp)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f, *next;
- if (!force && !list_empty(&head->filters))
- return false;
-
list_for_each_entry_safe(f, next, &head->filters, list)
__fl_delete(tp, f);
__module_get(THIS_MODULE);
call_rcu(&head->rcu, fl_destroy_rcu);
-
- return true;
}
static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
return err;
}
-static int fl_delete(struct tcf_proto *tp, unsigned long arg)
+static int fl_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
rhashtable_remove_fast(&head->ht, &f->ht_node,
head->ht_params);
__fl_delete(tp, f);
+ *last = list_empty(&head->filters);
return 0;
}
kfree(f);
}
-static bool fw_destroy(struct tcf_proto *tp, bool force)
+static void fw_destroy(struct tcf_proto *tp)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f;
int h;
if (head == NULL)
- return true;
-
- if (!force) {
- for (h = 0; h < HTSIZE; h++)
- if (rcu_access_pointer(head->ht[h]))
- return false;
- }
+ return;
for (h = 0; h < HTSIZE; h++) {
while ((f = rtnl_dereference(head->ht[h])) != NULL) {
call_rcu(&f->rcu, fw_delete_filter);
}
}
- RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
- return true;
}
-static int fw_delete(struct tcf_proto *tp, unsigned long arg)
+static int fw_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = (struct fw_filter *)arg;
struct fw_filter __rcu **fp;
struct fw_filter *pfp;
+ int ret = -EINVAL;
+ int h;
if (head == NULL || f == NULL)
goto out;
RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
tcf_unbind_filter(tp, &f->res);
call_rcu(&f->rcu, fw_delete_filter);
- return 0;
+ ret = 0;
+ break;
}
}
+
+ *last = true;
+ for (h = 0; h < HTSIZE; h++) {
+ if (rcu_access_pointer(head->ht[h])) {
+ *last = false;
+ break;
+ }
+ }
+
out:
- return -EINVAL;
+ return ret;
}
static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
&offload);
}
-static bool mall_destroy(struct tcf_proto *tp, bool force)
+static void mall_destroy(struct tcf_proto *tp)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
struct net_device *dev = tp->q->dev_queue->dev;
if (!head)
- return true;
+ return;
if (tc_should_offload(dev, tp, head->flags))
mall_destroy_hw_filter(tp, head, (unsigned long) head);
call_rcu(&head->rcu, mall_destroy_rcu);
- return true;
}
static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
return err;
}
-static int mall_delete(struct tcf_proto *tp, unsigned long arg)
+static int mall_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
return -EOPNOTSUPP;
}
goto failure;
id = dst->tclassid;
- if (head == NULL)
- goto old_method;
iif = inet_iif(skb);
route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
failure:
return -1;
-
-old_method:
- if (id && (TC_H_MAJ(id) == 0 ||
- !(TC_H_MAJ(id^tp->q->handle)))) {
- res->classid = id;
- res->class = 0;
- return 0;
- }
- return -1;
}
static inline u32 to_hash(u32 id)
struct route4_filter *f;
unsigned int h1, h2;
- if (!head)
- return 0;
-
h1 = to_hash(handle);
if (h1 > 256)
return 0;
kfree(f);
}
-static bool route4_destroy(struct tcf_proto *tp, bool force)
+static void route4_destroy(struct tcf_proto *tp)
{
struct route4_head *head = rtnl_dereference(tp->root);
int h1, h2;
if (head == NULL)
- return true;
-
- if (!force) {
- for (h1 = 0; h1 <= 256; h1++) {
- if (rcu_access_pointer(head->table[h1]))
- return false;
- }
- }
+ return;
for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b;
kfree_rcu(b, rcu);
}
}
- RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
- return true;
}
-static int route4_delete(struct tcf_proto *tp, unsigned long arg)
+static int route4_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter *f = (struct route4_filter *)arg;
struct route4_filter *nf;
struct route4_bucket *b;
unsigned int h = 0;
- int i;
+ int i, h1;
if (!head || !f)
return -EINVAL;
rt = rtnl_dereference(b->ht[i]);
if (rt)
- return 0;
+ goto out;
}
/* OK, session has no flows */
RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
kfree_rcu(b, rcu);
+ break;
+ }
+ }
- return 0;
+out:
+ *last = true;
+ for (h1 = 0; h1 <= 256; h1++) {
+ if (rcu_access_pointer(head->table[h1])) {
+ *last = false;
+ break;
}
}
+
return 0;
}
return -1;
nhptr = ip_hdr(skb);
#endif
- if (unlikely(!head))
- return -1;
restart:
#if RSVP_DST_LEN == 4
call_rcu(&f->rcu, rsvp_delete_filter_rcu);
}
-static bool rsvp_destroy(struct tcf_proto *tp, bool force)
+static void rsvp_destroy(struct tcf_proto *tp)
{
struct rsvp_head *data = rtnl_dereference(tp->root);
int h1, h2;
if (data == NULL)
- return true;
-
- if (!force) {
- for (h1 = 0; h1 < 256; h1++) {
- if (rcu_access_pointer(data->ht[h1]))
- return false;
- }
- }
-
- RCU_INIT_POINTER(tp->root, NULL);
+ return;
for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
}
}
kfree_rcu(data, rcu);
- return true;
}
-static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
+static int rsvp_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
struct rsvp_head *head = rtnl_dereference(tp->root);
struct rsvp_filter *nfp, *f = (struct rsvp_filter *)arg;
unsigned int h = f->handle;
struct rsvp_session __rcu **sp;
struct rsvp_session *nsp, *s = f->sess;
- int i;
+ int i, h1;
fp = &s->ht[(h >> 8) & 0xFF];
for (nfp = rtnl_dereference(*fp); nfp;
for (i = 0; i <= 16; i++)
if (s->ht[i])
- return 0;
+ goto out;
/* OK, session has no flows */
sp = &head->ht[h & 0xFF];
if (nsp == s) {
RCU_INIT_POINTER(*sp, s->next);
kfree_rcu(s, rcu);
- return 0;
+ goto out;
}
}
- return 0;
+ break;
}
}
+
+out:
+ *last = true;
+ for (h1 = 0; h1 < 256; h1++) {
+ if (rcu_access_pointer(head->ht[h1])) {
+ *last = false;
+ break;
+ }
+ }
+
return 0;
}
kfree(f);
}
-static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
+static int tcindex_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
call_rcu(&f->rcu, tcindex_destroy_fexts);
else
call_rcu(&r->rcu, tcindex_destroy_rexts);
+
+ *last = false;
return 0;
}
unsigned long arg,
struct tcf_walker *walker)
{
- return tcindex_delete(tp, arg);
+ bool last;
+
+ return tcindex_delete(tp, arg, &last);
}
static void __tcindex_destroy(struct rcu_head *head)
}
}
-static bool tcindex_destroy(struct tcf_proto *tp, bool force)
+static void tcindex_destroy(struct tcf_proto *tp)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcf_walker walker;
- if (!force)
- return false;
-
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
walker.count = 0;
walker.skip = 0;
tcindex_walk(tp, &walker);
call_rcu(&p->rcu, __tcindex_destroy);
- return true;
}
return true;
}
-static bool u32_destroy(struct tcf_proto *tp, bool force)
+static void u32_destroy(struct tcf_proto *tp)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
WARN_ON(root_ht == NULL);
- if (!force) {
- if (root_ht) {
- if (root_ht->refcnt > 1)
- return false;
- if (root_ht->refcnt == 1) {
- if (!ht_empty(root_ht))
- return false;
- }
- }
-
- if (tp_c->refcnt > 1)
- return false;
-
- if (tp_c->refcnt == 1) {
- struct tc_u_hnode *ht;
-
- for (ht = rtnl_dereference(tp_c->hlist);
- ht;
- ht = rtnl_dereference(ht->next))
- if (!ht_empty(ht))
- return false;
- }
- }
-
if (root_ht && --root_ht->refcnt == 0)
u32_destroy_hnode(tp, root_ht);
}
tp->data = NULL;
- return true;
}
-static int u32_delete(struct tcf_proto *tp, unsigned long arg)
+static int u32_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
{
struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
+ struct tc_u_common *tp_c = tp->data;
+ int ret = 0;
if (ht == NULL)
- return 0;
+ goto out;
if (TC_U32_KEY(ht->handle)) {
u32_remove_hw_knode(tp, ht->handle);
- return u32_delete_key(tp, (struct tc_u_knode *)ht);
+ ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
+ goto out;
}
if (root_ht == ht)
return -EBUSY;
}
- return 0;
+out:
+ *last = true;
+ if (root_ht) {
+ if (root_ht->refcnt > 1) {
+ *last = false;
+ goto ret;
+ }
+ if (root_ht->refcnt == 1) {
+ if (!ht_empty(root_ht)) {
+ *last = false;
+ goto ret;
+ }
+ }
+ }
+
+ if (tp_c->refcnt > 1) {
+ *last = false;
+ goto ret;
+ }
+
+ if (tp_c->refcnt == 1) {
+ struct tc_u_hnode *ht;
+
+ for (ht = rtnl_dereference(tp_c->hlist);
+ ht;
+ ht = rtnl_dereference(ht->next))
+ if (!ht_empty(ht)) {
+ *last = false;
+ break;
+ }
+ }
+
+ret:
+ return ret;
}
#define NR_U32_NODE (1<<12)
rdev_set_qos_map(rdev, dev, NULL);
if (notify)
nl80211_send_ap_stopped(wdev);
+
+ /* Should we apply the grace period during beaconing interface
+ * shutdown also?
+ */
+ cfg80211_sched_dfs_chan_update(rdev);
}
return err;
return (r1 + r2 > 0);
}
+/*
+ * Checks if center frequency of chan falls with in the bandwidth
+ * range of chandef.
+ */
+bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
+ struct ieee80211_channel *chan)
+{
+ int width;
+ u32 cf_offset, freq;
+
+ if (chandef->chan->center_freq == chan->center_freq)
+ return true;
+
+ width = cfg80211_chandef_get_width(chandef);
+ if (width <= 20)
+ return false;
+
+ cf_offset = width / 2 - 10;
+
+ for (freq = chandef->center_freq1 - width / 2 + 10;
+ freq <= chandef->center_freq1 + width / 2 - 10; freq += 20) {
+ if (chan->center_freq == freq)
+ return true;
+ }
+
+ if (!chandef->center_freq2)
+ return false;
+
+ for (freq = chandef->center_freq2 - width / 2 + 10;
+ freq <= chandef->center_freq2 + width / 2 - 10; freq += 20) {
+ if (chan->center_freq == freq)
+ return true;
+ }
+
+ return false;
+}
+
+bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev)
+{
+ bool active = false;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!wdev->chandef.chan)
+ return false;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ active = wdev->beacon_interval != 0;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ active = wdev->ssid_len != 0;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ active = wdev->mesh_id_len != 0;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_OCB:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /* Can NAN type be considered as beaconing interface? */
+ case NL80211_IFTYPE_NAN:
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NUM_NL80211_IFTYPES:
+ WARN_ON(1);
+ }
+
+ return active;
+}
+
+static bool cfg80211_is_wiphy_oper_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *chan)
+{
+ struct wireless_dev *wdev;
+
+ list_for_each_entry(wdev, &wiphy->wdev_list, list) {
+ wdev_lock(wdev);
+ if (!cfg80211_beaconing_iface_active(wdev)) {
+ wdev_unlock(wdev);
+ continue;
+ }
+
+ if (cfg80211_is_sub_chan(&wdev->chandef, chan)) {
+ wdev_unlock(wdev);
+ return true;
+ }
+ wdev_unlock(wdev);
+ }
+
+ return false;
+}
+
+bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *chan)
+{
+ struct cfg80211_registered_device *rdev;
+
+ ASSERT_RTNL();
+
+ if (!(chan->flags & IEEE80211_CHAN_RADAR))
+ return false;
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ if (!reg_dfs_domain_same(wiphy, &rdev->wiphy))
+ continue;
+
+ if (cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan))
+ return true;
+ }
+
+ return false;
+}
static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy,
u32 center_freq,
rtnl_unlock();
}
+static void cfg80211_propagate_radar_detect_wk(struct work_struct *work)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(work, struct cfg80211_registered_device,
+ propagate_radar_detect_wk);
+
+ rtnl_lock();
+
+ regulatory_propagate_dfs_state(&rdev->wiphy, &rdev->radar_chandef,
+ NL80211_DFS_UNAVAILABLE,
+ NL80211_RADAR_DETECTED);
+
+ rtnl_unlock();
+}
+
+static void cfg80211_propagate_cac_done_wk(struct work_struct *work)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(work, struct cfg80211_registered_device,
+ propagate_cac_done_wk);
+
+ rtnl_lock();
+
+ regulatory_propagate_dfs_state(&rdev->wiphy, &rdev->cac_done_chandef,
+ NL80211_DFS_AVAILABLE,
+ NL80211_RADAR_CAC_FINISHED);
+
+ rtnl_unlock();
+}
+
/* exported functions */
struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
spin_lock_init(&rdev->destroy_list_lock);
INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk);
INIT_WORK(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk);
+ INIT_WORK(&rdev->propagate_radar_detect_wk,
+ cfg80211_propagate_radar_detect_wk);
+ INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk);
#ifdef CONFIG_CFG80211_DEFAULT_PS
rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
flush_work(&rdev->destroy_work);
flush_work(&rdev->sched_scan_stop_wk);
flush_work(&rdev->mlme_unreg_wk);
+ flush_work(&rdev->propagate_radar_detect_wk);
+ flush_work(&rdev->propagate_cac_done_wk);
#ifdef CONFIG_PM
if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
}
EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
+void cfg80211_cqm_config_free(struct wireless_dev *wdev)
+{
+ kfree(wdev->cqm_config);
+ wdev->cqm_config = NULL;
+}
+
void cfg80211_unregister_wdev(struct wireless_dev *wdev)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
WARN_ON_ONCE(1);
break;
}
+
+ cfg80211_cqm_config_free(wdev);
}
EXPORT_SYMBOL(cfg80211_unregister_wdev);
INIT_LIST_HEAD(&wdev->mgmt_registrations);
spin_lock_init(&wdev->mgmt_registrations_lock);
- wdev->identifier = ++rdev->wdev_id;
+ /*
+ * We get here also when the interface changes network namespaces,
+ * as it's registered into the new one, but we don't want it to
+ * change ID in that case. Checking if the ID is already assigned
+ * works, because 0 isn't considered a valid ID and the memory is
+ * 0-initialized.
+ */
+ if (!wdev->identifier)
+ wdev->identifier = ++rdev->wdev_id;
list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
rdev->devlist_generation++;
/* can only change netns with wiphy */
*/
if ((wdev->iftype == NL80211_IFTYPE_STATION ||
wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
- rdev->ops->set_power_mgmt)
- if (rdev_set_power_mgmt(rdev, dev, wdev->ps,
- wdev->ps_timeout)) {
- /* assume this means it's off */
- wdev->ps = false;
- }
+ rdev->ops->set_power_mgmt &&
+ rdev_set_power_mgmt(rdev, dev, wdev->ps,
+ wdev->ps_timeout)) {
+ /* assume this means it's off */
+ wdev->ps = false;
+ }
break;
case NETDEV_UNREGISTER:
/*
kzfree(wdev->wext.keys);
#endif
flush_work(&wdev->disconnect_wk);
+ cfg80211_cqm_config_free(wdev);
}
/*
* synchronise (so that we won't find this netdev
struct work_struct sched_scan_stop_wk;
+ struct cfg80211_chan_def radar_chandef;
+ struct work_struct propagate_radar_detect_wk;
+
+ struct cfg80211_chan_def cac_done_chandef;
+ struct work_struct propagate_cac_done_wk;
+
/* must be last because of the way we do wiphy_priv(),
* and it should at least be aligned to NETDEV_ALIGN */
struct wiphy wiphy __aligned(NETDEV_ALIGN);
enum cfg80211_event_type type;
union {
- struct {
- u8 bssid[ETH_ALEN];
- const u8 *req_ie;
- const u8 *resp_ie;
- size_t req_ie_len;
- size_t resp_ie_len;
- struct cfg80211_bss *bss;
- int status; /* -1 = failed; 0..65535 = status code */
- enum nl80211_timeout_reason timeout_reason;
- } cr;
+ struct cfg80211_connect_resp_params cr;
struct {
const u8 *req_ie;
const u8 *resp_ie;
u32 nlportid;
};
+struct cfg80211_cqm_config {
+ u32 rssi_hyst;
+ s32 last_rssi_event_value;
+ int n_rssi_thresholds;
+ s32 rssi_thresholds[0];
+};
+
void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
/* free object */
struct cfg80211_connect_params *connect,
struct cfg80211_cached_keys *connkeys,
const u8 *prev_bssid);
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- int status, bool wextev,
- struct cfg80211_bss *bss,
- enum nl80211_timeout_reason timeout_reason);
+void __cfg80211_connect_result(struct net_device *dev,
+ struct cfg80211_connect_resp_params *params,
+ bool wextev);
void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
size_t ie_len, u16 reason, bool from_ap);
int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
void cfg80211_upload_connect_keys(struct wireless_dev *wdev);
int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
struct net_device *dev, enum nl80211_iftype ntype,
- u32 *flags, struct vif_params *params);
+ struct vif_params *params);
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
void cfg80211_process_wdev_events(struct wireless_dev *wdev);
cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef);
+void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev);
+
+bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *chan);
+
+bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev);
+
+bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
+ struct ieee80211_channel *chan);
+
static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
{
unsigned long end = jiffies;
#define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; })
#endif
+void cfg80211_cqm_config_free(struct wireless_dev *wdev);
+
#endif /* __NET_WIRELESS_CORE_H */
if (!nowext)
wdev->wext.ibss.ssid_len = 0;
#endif
+ cfg80211_sched_dfs_chan_update(rdev);
}
void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
wdev->beacon_interval = 0;
memset(&wdev->chandef, 0, sizeof(wdev->chandef));
rdev_set_qos_map(rdev, dev, NULL);
+ cfg80211_sched_dfs_chan_update(rdev);
}
return err;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
- u8 *ie = mgmt->u.assoc_resp.variable;
- int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
- u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+ struct cfg80211_connect_resp_params cr;
+
+ memset(&cr, 0, sizeof(cr));
+ cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code);
+ cr.bssid = mgmt->bssid;
+ cr.bss = bss;
+ cr.resp_ie = mgmt->u.assoc_resp.variable;
+ cr.resp_ie_len =
+ len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
+ cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
trace_cfg80211_send_rx_assoc(dev, bss);
* and got a reject -- we only try again with an assoc
* frame instead of reassoc.
*/
- if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) {
+ if (cfg80211_sme_rx_assoc_resp(wdev, cr.status)) {
cfg80211_unhold_bss(bss_from_pub(bss));
cfg80211_put_bss(wiphy, bss);
return;
nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues);
/* update current_bss etc., consumes the bss reference */
- __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
- status_code,
- status_code == WLAN_STATUS_SUCCESS, bss,
- NL80211_TIMEOUT_UNSPECIFIED);
+ __cfg80211_connect_result(dev, &cr, cr.status == WLAN_STATUS_SUCCESS);
}
EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
}
EXPORT_SYMBOL(cfg80211_rx_mgmt);
+void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev)
+{
+ cancel_delayed_work(&rdev->dfs_update_channels_wk);
+ queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk, 0);
+}
+
void cfg80211_dfs_channels_update_work(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct wiphy *wiphy;
bool check_again = false;
unsigned long timeout, next_time = 0;
+ unsigned long time_dfs_update;
+ enum nl80211_radar_event radar_event;
int bandid, i;
rdev = container_of(delayed_work, struct cfg80211_registered_device,
for (i = 0; i < sband->n_channels; i++) {
c = &sband->channels[i];
- if (c->dfs_state != NL80211_DFS_UNAVAILABLE)
+ if (!(c->flags & IEEE80211_CHAN_RADAR))
+ continue;
+
+ if (c->dfs_state != NL80211_DFS_UNAVAILABLE &&
+ c->dfs_state != NL80211_DFS_AVAILABLE)
continue;
- timeout = c->dfs_state_entered + msecs_to_jiffies(
- IEEE80211_DFS_MIN_NOP_TIME_MS);
+ if (c->dfs_state == NL80211_DFS_UNAVAILABLE) {
+ time_dfs_update = IEEE80211_DFS_MIN_NOP_TIME_MS;
+ radar_event = NL80211_RADAR_NOP_FINISHED;
+ } else {
+ if (regulatory_pre_cac_allowed(wiphy) ||
+ cfg80211_any_wiphy_oper_chan(wiphy, c))
+ continue;
+
+ time_dfs_update = REG_PRE_CAC_EXPIRY_GRACE_MS;
+ radar_event = NL80211_RADAR_PRE_CAC_EXPIRED;
+ }
+
+ timeout = c->dfs_state_entered +
+ msecs_to_jiffies(time_dfs_update);
if (time_after_eq(jiffies, timeout)) {
c->dfs_state = NL80211_DFS_USABLE;
NL80211_CHAN_NO_HT);
nl80211_radar_notify(rdev, &chandef,
- NL80211_RADAR_NOP_FINISHED,
- NULL, GFP_ATOMIC);
+ radar_event, NULL,
+ GFP_ATOMIC);
+
+ regulatory_propagate_dfs_state(wiphy, &chandef,
+ c->dfs_state,
+ radar_event);
continue;
}
gfp_t gfp)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- unsigned long timeout;
trace_cfg80211_radar_event(wiphy, chandef);
*/
cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE);
- timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_NOP_TIME_MS);
- queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk,
- timeout);
+ cfg80211_sched_dfs_chan_update(rdev);
nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp);
+
+ memcpy(&rdev->radar_chandef, chandef, sizeof(struct cfg80211_chan_def));
+ queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk);
}
EXPORT_SYMBOL(cfg80211_radar_event);
msecs_to_jiffies(wdev->cac_time_ms);
WARN_ON(!time_after_eq(jiffies, timeout));
cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE);
+ memcpy(&rdev->cac_done_chandef, chandef,
+ sizeof(struct cfg80211_chan_def));
+ queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk);
+ cfg80211_sched_dfs_chan_update(rdev);
break;
case NL80211_RADAR_CAC_ABORTED:
break;
.len = sizeof(struct nl80211_bss_select_rssi_adjust)
},
[NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
+ [NL80211_ATTR_FILS_ERP_USERNAME] = { .type = NLA_BINARY,
+ .len = FILS_ERP_MAX_USERNAME_LEN },
+ [NL80211_ATTR_FILS_ERP_REALM] = { .type = NLA_BINARY,
+ .len = FILS_ERP_MAX_REALM_LEN },
+ [NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 },
+ [NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY,
+ .len = FILS_ERP_MAX_RRK_LEN },
+ [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 },
+ [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
};
/* policy for the key attributes */
if (flags[flag])
*mntrflags |= (1<<flag);
+ *mntrflags |= MONITOR_FLAG_CHANGED;
+
return 0;
}
+static int nl80211_parse_mon_options(struct cfg80211_registered_device *rdev,
+ enum nl80211_iftype type,
+ struct genl_info *info,
+ struct vif_params *params)
+{
+ bool change = false;
+ int err;
+
+ if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
+ if (type != NL80211_IFTYPE_MONITOR)
+ return -EINVAL;
+
+ err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS],
+ ¶ms->flags);
+ if (err)
+ return err;
+
+ change = true;
+ }
+
+ if (params->flags & MONITOR_FLAG_ACTIVE &&
+ !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
+ return -EOPNOTSUPP;
+
+ if (info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]) {
+ const u8 *mumimo_groups;
+ u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER;
+
+ if (type != NL80211_IFTYPE_MONITOR)
+ return -EINVAL;
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag))
+ return -EOPNOTSUPP;
+
+ mumimo_groups =
+ nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]);
+
+ /* bits 0 and 63 are reserved and must be zero */
+ if ((mumimo_groups[0] & BIT(7)) ||
+ (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(0)))
+ return -EINVAL;
+
+ params->vht_mumimo_groups = mumimo_groups;
+ change = true;
+ }
+
+ if (info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]) {
+ u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER;
+
+ if (type != NL80211_IFTYPE_MONITOR)
+ return -EINVAL;
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag))
+ return -EOPNOTSUPP;
+
+ params->vht_mumimo_follow_addr =
+ nla_data(info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]);
+ change = true;
+ }
+
+ return change ? 1 : 0;
+}
+
static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u8 use_4addr,
enum nl80211_iftype iftype)
int err;
enum nl80211_iftype otype, ntype;
struct net_device *dev = info->user_ptr[1];
- u32 _flags, *flags = NULL;
bool change = false;
memset(¶ms, 0, sizeof(params));
params.use_4addr = -1;
}
- if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
- if (ntype != NL80211_IFTYPE_MONITOR)
- return -EINVAL;
- err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS],
- &_flags);
- if (err)
- return err;
-
- flags = &_flags;
- change = true;
- }
-
- if (info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]) {
- const u8 *mumimo_groups;
- u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER;
-
- if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag))
- return -EOPNOTSUPP;
-
- mumimo_groups =
- nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]);
-
- /* bits 0 and 63 are reserved and must be zero */
- if ((mumimo_groups[0] & BIT(7)) ||
- (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(0)))
- return -EINVAL;
-
- memcpy(params.vht_mumimo_groups, mumimo_groups,
- VHT_MUMIMO_GROUPS_DATA_LEN);
- change = true;
- }
-
- if (info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]) {
- u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER;
-
- if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag))
- return -EOPNOTSUPP;
-
- nla_memcpy(params.macaddr,
- info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR],
- ETH_ALEN);
+ err = nl80211_parse_mon_options(rdev, ntype, info, ¶ms);
+ if (err < 0)
+ return err;
+ if (err > 0)
change = true;
- }
-
- if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
- !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
- return -EOPNOTSUPP;
if (change)
- err = cfg80211_change_iface(rdev, dev, ntype, flags, ¶ms);
+ err = cfg80211_change_iface(rdev, dev, ntype, ¶ms);
else
err = 0;
struct sk_buff *msg;
int err;
enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
- u32 flags;
/* to avoid failing a new interface creation due to pending removal */
cfg80211_destroy_ifaces(rdev);
return err;
}
- err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
- info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
- &flags);
-
- if (!err && (flags & MONITOR_FLAG_ACTIVE) &&
- !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
- return -EOPNOTSUPP;
+ err = nl80211_parse_mon_options(rdev, type, info, ¶ms);
+ if (err < 0)
+ return err;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
wdev = rdev_add_virtual_intf(rdev,
nla_data(info->attrs[NL80211_ATTR_IFNAME]),
- NET_NAME_USER, type, err ? NULL : &flags,
- ¶ms);
+ NET_NAME_USER, type, ¶ms);
if (WARN_ON(!wdev)) {
nlmsg_free(msg);
return -EPROTO;
return false;
return true;
case NL80211_CMD_CONNECT:
+ /* SAE not supported yet */
+ if (auth_type == NL80211_AUTHTYPE_SAE)
+ return false;
+ /* FILS with SK PFS or PK not supported yet */
+ if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+ auth_type == NL80211_AUTHTYPE_FILS_PK)
+ return false;
+ if (!wiphy_ext_feature_isset(
+ &rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+ auth_type == NL80211_AUTHTYPE_FILS_SK)
+ return false;
+ return true;
case NL80211_CMD_START_AP:
/* SAE not supported yet */
if (auth_type == NL80211_AUTHTYPE_SAE)
struct nlattr *rate;
u32 bitrate;
u16 bitrate_compat;
- enum nl80211_attrs rate_flg;
+ enum nl80211_rate_info rate_flg;
rate = nla_nest_start(msg, attr);
if (!rate)
cur_params.dot11MeshGateAnnouncementProtocol) ||
nla_put_u8(msg, NL80211_MESHCONF_FORWARDING,
cur_params.dot11MeshForwarding) ||
- nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
+ nla_put_s32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
cur_params.rssi_threshold) ||
nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE,
cur_params.ht_opmode) ||
return 0;
}
+static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev)
+{
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!cfg80211_beaconing_iface_active(wdev))
+ return true;
+
+ if (!(wdev->chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return true;
+
+ return regulatory_pre_cac_allowed(wdev->wiphy);
+}
+
static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
request->n_channels = i;
+ wdev_lock(wdev);
+ if (!cfg80211_off_channel_oper_allowed(wdev)) {
+ struct ieee80211_channel *chan;
+
+ if (request->n_channels != 1) {
+ wdev_unlock(wdev);
+ err = -EBUSY;
+ goto out_free;
+ }
+
+ chan = request->channels[0];
+ if (chan->center_freq != wdev->chandef.chan->center_freq) {
+ wdev_unlock(wdev);
+ err = -EBUSY;
+ goto out_free;
+ }
+ }
+ wdev_unlock(wdev);
+
i = 0;
if (n_ssids) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
rcu_assign_pointer(rdev->sched_scan_req, sched_scan_req);
- nl80211_send_sched_scan(rdev, dev,
- NL80211_CMD_START_SCHED_SCAN);
+ nl80211_send_sched_scan(sched_scan_req, NL80211_CMD_START_SCHED_SCAN);
return 0;
out_free:
}
}
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+ info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
+ info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
+ info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
+ info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+ connect.fils_erp_username =
+ nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+ connect.fils_erp_username_len =
+ nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+ connect.fils_erp_realm =
+ nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+ connect.fils_erp_realm_len =
+ nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+ connect.fils_erp_next_seq_num =
+ nla_get_u16(
+ info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
+ connect.fils_erp_rrk =
+ nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+ connect.fils_erp_rrk_len =
+ nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+ } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
+ info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
+ info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
+ info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+ kzfree(connkeys);
+ return -EINVAL;
+ }
+
wdev_lock(dev->ieee80211_ptr);
err = cfg80211_connect(rdev, dev, &connect, connkeys,
memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
- if (!info->attrs[NL80211_ATTR_MAC])
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_PMKID])
return -EINVAL;
pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
- pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (info->attrs[NL80211_ATTR_MAC]) {
+ pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ } else if (info->attrs[NL80211_ATTR_SSID] &&
+ info->attrs[NL80211_ATTR_FILS_CACHE_ID] &&
+ (info->genlhdr->cmd == NL80211_CMD_DEL_PMKSA ||
+ info->attrs[NL80211_ATTR_PMK])) {
+ pmksa.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+ pmksa.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+ pmksa.cache_id =
+ nla_data(info->attrs[NL80211_ATTR_FILS_CACHE_ID]);
+ } else {
+ return -EINVAL;
+ }
+ if (info->attrs[NL80211_ATTR_PMK]) {
+ pmksa.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]);
+ pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]);
+ }
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct wireless_dev *wdev = info->user_ptr[1];
struct cfg80211_chan_def chandef;
+ const struct cfg80211_chan_def *compat_chandef;
struct sk_buff *msg;
void *hdr;
u64 cookie;
if (err)
return err;
+ wdev_lock(wdev);
+ if (!cfg80211_off_channel_oper_allowed(wdev) &&
+ !cfg80211_chandef_identical(&wdev->chandef, &chandef)) {
+ compat_chandef = cfg80211_chandef_compatible(&wdev->chandef,
+ &chandef);
+ if (compat_chandef != &chandef) {
+ wdev_unlock(wdev);
+ return -EBUSY;
+ }
+ }
+ wdev_unlock(wdev);
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (!chandef.chan && params.offchan)
return -EINVAL;
+ wdev_lock(wdev);
+ if (params.offchan && !cfg80211_off_channel_oper_allowed(wdev)) {
+ wdev_unlock(wdev);
+ return -EBUSY;
+ }
+ wdev_unlock(wdev);
+
params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
static const struct nla_policy
nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] = {
- [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_BINARY },
[NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_TXE_RATE] = { .type = NLA_U32 },
return rdev_set_cqm_txe_config(rdev, dev, rate, pkts, intvl);
}
+static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ s32 last, low, high;
+ u32 hyst;
+ int i, n;
+ int err;
+
+ /* RSSI reporting disabled? */
+ if (!wdev->cqm_config)
+ return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+
+ /*
+ * Obtain current RSSI value if possible, if not and no RSSI threshold
+ * event has been received yet, we should receive an event after a
+ * connection is established and enough beacons received to calculate
+ * the average.
+ */
+ if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss &&
+ rdev->ops->get_station) {
+ struct station_info sinfo;
+ u8 *mac_addr;
+
+ mac_addr = wdev->current_bss->pub.bssid;
+
+ err = rdev_get_station(rdev, dev, mac_addr, &sinfo);
+ if (err)
+ return err;
+
+ if (sinfo.filled & BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
+ wdev->cqm_config->last_rssi_event_value =
+ (s8) sinfo.rx_beacon_signal_avg;
+ }
+
+ last = wdev->cqm_config->last_rssi_event_value;
+ hyst = wdev->cqm_config->rssi_hyst;
+ n = wdev->cqm_config->n_rssi_thresholds;
+
+ for (i = 0; i < n; i++)
+ if (last < wdev->cqm_config->rssi_thresholds[i])
+ break;
+
+ low = i > 0 ?
+ (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
+ high = i < n ?
+ (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
+
+ return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
+}
+
static int nl80211_set_cqm_rssi(struct genl_info *info,
- s32 threshold, u32 hysteresis)
+ const s32 *thresholds, int n_thresholds,
+ u32 hysteresis)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int i, err;
+ s32 prev = S32_MIN;
- if (threshold > 0)
- return -EINVAL;
-
- /* disabling - hysteresis should also be zero then */
- if (threshold == 0)
- hysteresis = 0;
+ /* Check all values negative and sorted */
+ for (i = 0; i < n_thresholds; i++) {
+ if (thresholds[i] > 0 || thresholds[i] <= prev)
+ return -EINVAL;
- if (!rdev->ops->set_cqm_rssi_config)
- return -EOPNOTSUPP;
+ prev = thresholds[i];
+ }
if (wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
return -EOPNOTSUPP;
- return rdev_set_cqm_rssi_config(rdev, dev, threshold, hysteresis);
+ wdev_lock(wdev);
+ cfg80211_cqm_config_free(wdev);
+ wdev_unlock(wdev);
+
+ if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
+ if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
+ return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+
+ return rdev_set_cqm_rssi_config(rdev, dev,
+ thresholds[0], hysteresis);
+ }
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST))
+ return -EOPNOTSUPP;
+
+ if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
+ n_thresholds = 0;
+
+ wdev_lock(wdev);
+ if (n_thresholds) {
+ struct cfg80211_cqm_config *cqm_config;
+
+ cqm_config = kzalloc(sizeof(struct cfg80211_cqm_config) +
+ n_thresholds * sizeof(s32), GFP_KERNEL);
+ if (!cqm_config) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ cqm_config->rssi_hyst = hysteresis;
+ cqm_config->n_rssi_thresholds = n_thresholds;
+ memcpy(cqm_config->rssi_thresholds, thresholds,
+ n_thresholds * sizeof(s32));
+
+ wdev->cqm_config = cqm_config;
+ }
+
+ err = cfg80211_cqm_rssi_update(rdev, dev);
+
+unlock:
+ wdev_unlock(wdev);
+
+ return err;
}
static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] &&
attrs[NL80211_ATTR_CQM_RSSI_HYST]) {
- s32 threshold = nla_get_s32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
+ const s32 *thresholds =
+ nla_data(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
+ int len = nla_len(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
u32 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
- return nl80211_set_cqm_rssi(info, threshold, hysteresis);
+ if (len % 4)
+ return -EINVAL;
+
+ return nl80211_set_cqm_rssi(info, thresholds, len / 4,
+ hysteresis);
}
if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
static int
nl80211_prep_sched_scan_msg(struct sk_buff *msg,
- struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
- u32 portid, u32 seq, int flags, u32 cmd)
+ struct cfg80211_sched_scan_request *req, u32 cmd)
{
void *hdr;
- hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
+ hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
if (!hdr)
return -1;
- if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY,
+ wiphy_to_rdev(req->wiphy)->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, req->dev->ifindex) ||
+ nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->reqid,
+ NL80211_ATTR_PAD))
goto nla_put_failure;
genlmsg_end(msg, hdr);
NL80211_MCGRP_SCAN, GFP_KERNEL);
}
-void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u32 cmd)
+void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd)
{
struct sk_buff *msg;
if (!msg)
return;
- if (nl80211_prep_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) {
+ if (nl80211_prep_sched_scan_msg(msg, req, cmd) < 0) {
nlmsg_free(msg);
return;
}
- genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(req->wiphy), msg, 0,
NL80211_MCGRP_SCAN, GFP_KERNEL);
}
}
void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- int status,
- enum nl80211_timeout_reason timeout_reason,
+ struct net_device *netdev,
+ struct cfg80211_connect_resp_params *cr,
gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
+ msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len +
+ cr->fils_kek_len + cr->pmk_len +
+ (cr->pmkid ? WLAN_PMKID_LEN : 0), gfp);
if (!msg)
return;
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
- (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
+ (cr->bssid &&
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->bssid)) ||
nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
- status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
- status) ||
- (status < 0 &&
+ cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
+ cr->status) ||
+ (cr->status < 0 &&
(nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
- nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, timeout_reason))) ||
- (req_ie &&
- nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
- (resp_ie &&
- nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+ nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON,
+ cr->timeout_reason))) ||
+ (cr->req_ie &&
+ nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) ||
+ (cr->resp_ie &&
+ nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len,
+ cr->resp_ie)) ||
+ (cr->update_erp_next_seq_num &&
+ nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+ cr->fils_erp_next_seq_num)) ||
+ (cr->status == WLAN_STATUS_SUCCESS &&
+ ((cr->fils_kek &&
+ nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils_kek_len,
+ cr->fils_kek)) ||
+ (cr->pmk &&
+ nla_put(msg, NL80211_ATTR_PMK, cr->pmk_len, cr->pmk)) ||
+ (cr->pmkid &&
+ nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->pmkid)))))
goto nla_put_failure;
genlmsg_end(msg, hdr);
s32 rssi_level, gfp_t gfp)
{
struct sk_buff *msg;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level);
rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH))
return;
+ if (wdev->cqm_config) {
+ wdev->cqm_config->last_rssi_event_value = rssi_level;
+
+ cfg80211_cqm_rssi_update(rdev, dev);
+
+ if (rssi_level == 0)
+ rssi_level = wdev->cqm_config->last_rssi_event_value;
+ }
+
msg = cfg80211_prepare_cqm(dev, NULL, gfp);
if (!msg)
return;
struct wireless_dev *wdev, bool aborted);
void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev,
struct sk_buff *msg);
-void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u32 cmd);
+void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd);
void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
struct regulatory_request *request);
struct net_device *netdev,
const u8 *addr, gfp_t gfp);
void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- int status,
- enum nl80211_timeout_reason timeout_reason,
+ struct net_device *netdev,
+ struct cfg80211_connect_resp_params *params,
gfp_t gfp);
void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
static inline struct wireless_dev
*rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name,
unsigned char name_assign_type,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct wireless_dev *ret;
trace_rdev_add_virtual_intf(&rdev->wiphy, name, type);
ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, name_assign_type,
- type, flags, params);
+ type, params);
trace_rdev_return_wdev(&rdev->wiphy, ret);
return ret;
}
static inline int
rdev_change_virtual_intf(struct cfg80211_registered_device *rdev,
struct net_device *dev, enum nl80211_iftype type,
- u32 *flags, struct vif_params *params)
+ struct vif_params *params)
{
int ret;
trace_rdev_change_virtual_intf(&rdev->wiphy, dev, type);
- ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, flags,
- params);
+ ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, params);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
return ret;
}
+static inline int
+rdev_set_cqm_rssi_range_config(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, s32 low, s32 high)
+{
+ int ret;
+ trace_rdev_set_cqm_rssi_range_config(&rdev->wiphy, dev, low, high);
+ ret = rdev->ops->set_cqm_rssi_range_config(&rdev->wiphy, dev,
+ low, high);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
static inline int
rdev_set_cqm_txe_config(struct cfg80211_registered_device *rdev,
struct net_device *dev, u32 rate, u32 pkts, u32 intvl)
return REG_REQ_IGNORE;
}
+bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2)
+{
+ const struct ieee80211_regdomain *wiphy1_regd = NULL;
+ const struct ieee80211_regdomain *wiphy2_regd = NULL;
+ const struct ieee80211_regdomain *cfg80211_regd = NULL;
+ bool dfs_domain_same;
+
+ rcu_read_lock();
+
+ cfg80211_regd = rcu_dereference(cfg80211_regdomain);
+ wiphy1_regd = rcu_dereference(wiphy1->regd);
+ if (!wiphy1_regd)
+ wiphy1_regd = cfg80211_regd;
+
+ wiphy2_regd = rcu_dereference(wiphy2->regd);
+ if (!wiphy2_regd)
+ wiphy2_regd = cfg80211_regd;
+
+ dfs_domain_same = wiphy1_regd->dfs_region == wiphy2_regd->dfs_region;
+
+ rcu_read_unlock();
+
+ return dfs_domain_same;
+}
+
+static void reg_copy_dfs_chan_state(struct ieee80211_channel *dst_chan,
+ struct ieee80211_channel *src_chan)
+{
+ if (!(dst_chan->flags & IEEE80211_CHAN_RADAR) ||
+ !(src_chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ if (dst_chan->flags & IEEE80211_CHAN_DISABLED ||
+ src_chan->flags & IEEE80211_CHAN_DISABLED)
+ return;
+
+ if (src_chan->center_freq == dst_chan->center_freq &&
+ dst_chan->dfs_state == NL80211_DFS_USABLE) {
+ dst_chan->dfs_state = src_chan->dfs_state;
+ dst_chan->dfs_state_entered = src_chan->dfs_state_entered;
+ }
+}
+
+static void wiphy_share_dfs_chan_state(struct wiphy *dst_wiphy,
+ struct wiphy *src_wiphy)
+{
+ struct ieee80211_supported_band *src_sband, *dst_sband;
+ struct ieee80211_channel *src_chan, *dst_chan;
+ int i, j, band;
+
+ if (!reg_dfs_domain_same(dst_wiphy, src_wiphy))
+ return;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ dst_sband = dst_wiphy->bands[band];
+ src_sband = src_wiphy->bands[band];
+ if (!dst_sband || !src_sband)
+ continue;
+
+ for (i = 0; i < dst_sband->n_channels; i++) {
+ dst_chan = &dst_sband->channels[i];
+ for (j = 0; j < src_sband->n_channels; j++) {
+ src_chan = &src_sband->channels[j];
+ reg_copy_dfs_chan_state(dst_chan, src_chan);
+ }
+ }
+ }
+}
+
+static void wiphy_all_share_dfs_chan_state(struct wiphy *wiphy)
+{
+ struct cfg80211_registered_device *rdev;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ if (wiphy == &rdev->wiphy)
+ continue;
+ wiphy_share_dfs_chan_state(wiphy, &rdev->wiphy);
+ }
+}
+
/* This processes *all* regulatory hints */
static void reg_process_hint(struct regulatory_request *reg_request)
{
if (treatment == REG_REQ_ALREADY_SET && wiphy &&
wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
wiphy_update_regulatory(wiphy, reg_request->initiator);
+ wiphy_all_share_dfs_chan_state(wiphy);
reg_check_channels();
}
lr = get_last_request();
wiphy_update_regulatory(wiphy, lr->initiator);
+ wiphy_all_share_dfs_chan_state(wiphy);
}
void wiphy_regulatory_deregister(struct wiphy *wiphy)
return reg_is_indoor;
}
+bool regulatory_pre_cac_allowed(struct wiphy *wiphy)
+{
+ const struct ieee80211_regdomain *regd = NULL;
+ const struct ieee80211_regdomain *wiphy_regd = NULL;
+ bool pre_cac_allowed = false;
+
+ rcu_read_lock();
+
+ regd = rcu_dereference(cfg80211_regdomain);
+ wiphy_regd = rcu_dereference(wiphy->regd);
+ if (!wiphy_regd) {
+ if (regd->dfs_region == NL80211_DFS_ETSI)
+ pre_cac_allowed = true;
+
+ rcu_read_unlock();
+
+ return pre_cac_allowed;
+ }
+
+ if (regd->dfs_region == wiphy_regd->dfs_region &&
+ wiphy_regd->dfs_region == NL80211_DFS_ETSI)
+ pre_cac_allowed = true;
+
+ rcu_read_unlock();
+
+ return pre_cac_allowed;
+}
+
+void regulatory_propagate_dfs_state(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_dfs_state dfs_state,
+ enum nl80211_radar_event event)
+{
+ struct cfg80211_registered_device *rdev;
+
+ ASSERT_RTNL();
+
+ if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+ return;
+
+ if (WARN_ON(!(chandef->chan->flags & IEEE80211_CHAN_RADAR)))
+ return;
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ if (wiphy == &rdev->wiphy)
+ continue;
+
+ if (!reg_dfs_domain_same(wiphy, &rdev->wiphy))
+ continue;
+
+ if (!ieee80211_get_channel(&rdev->wiphy,
+ chandef->chan->center_freq))
+ continue;
+
+ cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state);
+
+ if (event == NL80211_RADAR_DETECTED ||
+ event == NL80211_RADAR_CAC_FINISHED)
+ cfg80211_sched_dfs_chan_update(rdev);
+
+ nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL);
+ }
+}
+
int __init regulatory_init(void)
{
int err = 0;
*/
bool regulatory_indoor_allowed(void);
+/*
+ * Grace period to timeout pre-CAC results on the dfs channels. This timeout
+ * value is used for Non-ETSI domain.
+ * TODO: May be make this timeout available through regdb?
+ */
+#define REG_PRE_CAC_EXPIRY_GRACE_MS 2000
+
+/**
+ * regulatory_pre_cac_allowed - if pre-CAC allowed in the current dfs domain
+ * @wiphy: wiphy for which pre-CAC capability is checked.
+
+ * Pre-CAC is allowed only in ETSI domain.
+ */
+bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
+
+/**
+ * regulatory_propagate_dfs_state - Propagate DFS channel state to other wiphys
+ * @wiphy - wiphy on which radar is detected and the event will be propagated
+ * to other available wiphys having the same DFS domain
+ * @chandef - Channel definition of radar detected channel
+ * @dfs_state - DFS channel state to be set
+ * @event - Type of radar event which triggered this DFS state change
+ *
+ * This function should be called with rtnl lock held.
+ */
+void regulatory_propagate_dfs_state(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_dfs_state dfs_state,
+ enum nl80211_radar_event event);
+
+/**
+ * reg_dfs_domain_same - Checks if both wiphy have same DFS domain configured
+ * @wiphy1 - wiphy it's dfs_region to be checked against that of wiphy2
+ * @wiphy2 - wiphy it's dfs_region to be checked against that of wiphy1
+ */
+bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2);
#endif /* __NET_WIRELESS_REG_H */
spin_unlock_bh(&rdev->bss_lock);
request->scan_start = jiffies;
}
- nl80211_send_sched_scan(rdev, request->dev,
- NL80211_CMD_SCHED_SCAN_RESULTS);
+ nl80211_send_sched_scan(request, NL80211_CMD_SCHED_SCAN_RESULTS);
}
rtnl_unlock();
return err;
}
- nl80211_send_sched_scan(rdev, dev, NL80211_CMD_SCHED_SCAN_STOPPED);
+ nl80211_send_sched_scan(sched_scan_req, NL80211_CMD_SCHED_SCAN_STOPPED);
RCU_INIT_POINTER(rdev->sched_scan_req, NULL);
kfree_rcu(sched_scan_req, rcu_head);
}
treason = NL80211_TIMEOUT_UNSPECIFIED;
if (cfg80211_conn_do_work(wdev, &treason)) {
- __cfg80211_connect_result(
- wdev->netdev, bssid,
- NULL, 0, NULL, 0, -1, false, NULL,
- treason);
+ struct cfg80211_connect_resp_params cr;
+
+ memset(&cr, 0, sizeof(cr));
+ cr.status = -1;
+ cr.bssid = bssid;
+ cr.timeout_reason = treason;
+ __cfg80211_connect_result(wdev->netdev, &cr, false);
}
wdev_unlock(wdev);
}
wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
schedule_work(&rdev->conn_work);
} else if (status_code != WLAN_STATUS_SUCCESS) {
- __cfg80211_connect_result(wdev->netdev, mgmt->bssid,
- NULL, 0, NULL, 0,
- status_code, false, NULL,
- NL80211_TIMEOUT_UNSPECIFIED);
+ struct cfg80211_connect_resp_params cr;
+
+ memset(&cr, 0, sizeof(cr));
+ cr.status = status_code;
+ cr.bssid = mgmt->bssid;
+ cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
+ __cfg80211_connect_result(wdev->netdev, &cr, false);
} else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
schedule_work(&rdev->conn_work);
*/
/* This method must consume bss one way or another */
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- int status, bool wextev,
- struct cfg80211_bss *bss,
- enum nl80211_timeout_reason timeout_reason)
+void __cfg80211_connect_result(struct net_device *dev,
+ struct cfg80211_connect_resp_params *cr,
+ bool wextev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const u8 *country_ie;
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) {
- cfg80211_put_bss(wdev->wiphy, bss);
+ cfg80211_put_bss(wdev->wiphy, cr->bss);
return;
}
- nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
- bssid, req_ie, req_ie_len,
- resp_ie, resp_ie_len,
- status, timeout_reason, GFP_KERNEL);
+ nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, cr,
+ GFP_KERNEL);
#ifdef CONFIG_CFG80211_WEXT
if (wextev) {
- if (req_ie && status == WLAN_STATUS_SUCCESS) {
+ if (cr->req_ie && cr->status == WLAN_STATUS_SUCCESS) {
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = req_ie_len;
- wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie);
+ wrqu.data.length = cr->req_ie_len;
+ wireless_send_event(dev, IWEVASSOCREQIE, &wrqu,
+ cr->req_ie);
}
- if (resp_ie && status == WLAN_STATUS_SUCCESS) {
+ if (cr->resp_ie && cr->status == WLAN_STATUS_SUCCESS) {
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = resp_ie_len;
- wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie);
+ wrqu.data.length = cr->resp_ie_len;
+ wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu,
+ cr->resp_ie);
}
memset(&wrqu, 0, sizeof(wrqu));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (bssid && status == WLAN_STATUS_SUCCESS) {
- memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
- memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+ if (cr->bssid && cr->status == WLAN_STATUS_SUCCESS) {
+ memcpy(wrqu.ap_addr.sa_data, cr->bssid, ETH_ALEN);
+ memcpy(wdev->wext.prev_bssid, cr->bssid, ETH_ALEN);
wdev->wext.prev_bssid_valid = true;
}
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
}
#endif
- if (!bss && (status == WLAN_STATUS_SUCCESS)) {
+ if (!cr->bss && (cr->status == WLAN_STATUS_SUCCESS)) {
WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
- wdev->ssid, wdev->ssid_len,
- wdev->conn_bss_type,
- IEEE80211_PRIVACY_ANY);
- if (bss)
- cfg80211_hold_bss(bss_from_pub(bss));
+ cr->bss = cfg80211_get_bss(wdev->wiphy, NULL, cr->bssid,
+ wdev->ssid, wdev->ssid_len,
+ wdev->conn_bss_type,
+ IEEE80211_PRIVACY_ANY);
+ if (cr->bss)
+ cfg80211_hold_bss(bss_from_pub(cr->bss));
}
if (wdev->current_bss) {
wdev->current_bss = NULL;
}
- if (status != WLAN_STATUS_SUCCESS) {
+ if (cr->status != WLAN_STATUS_SUCCESS) {
kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
wdev->ssid_len = 0;
wdev->conn_owner_nlportid = 0;
- if (bss) {
- cfg80211_unhold_bss(bss_from_pub(bss));
- cfg80211_put_bss(wdev->wiphy, bss);
+ if (cr->bss) {
+ cfg80211_unhold_bss(bss_from_pub(cr->bss));
+ cfg80211_put_bss(wdev->wiphy, cr->bss);
}
cfg80211_sme_free(wdev);
return;
}
- if (WARN_ON(!bss))
+ if (WARN_ON(!cr->bss))
return;
- wdev->current_bss = bss_from_pub(bss);
+ wdev->current_bss = bss_from_pub(cr->bss);
if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
cfg80211_upload_connect_keys(wdev);
rcu_read_lock();
- country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
+ country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY);
if (!country_ie) {
rcu_read_unlock();
return;
* - country_ie + 2, the start of the country ie data, and
* - and country_ie[1] which is the IE length
*/
- regulatory_hint_country_ie(wdev->wiphy, bss->channel->band,
+ regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band,
country_ie + 2, country_ie[1]);
kfree(country_ie);
}
/* Consumes bss object one way or another */
-void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
- struct cfg80211_bss *bss, const u8 *req_ie,
- size_t req_ie_len, const u8 *resp_ie,
- size_t resp_ie_len, int status, gfp_t gfp,
- enum nl80211_timeout_reason timeout_reason)
+void cfg80211_connect_done(struct net_device *dev,
+ struct cfg80211_connect_resp_params *params,
+ gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_event *ev;
unsigned long flags;
+ u8 *next;
- if (bss) {
+ if (params->bss) {
/* Make sure the bss entry provided by the driver is valid. */
- struct cfg80211_internal_bss *ibss = bss_from_pub(bss);
+ struct cfg80211_internal_bss *ibss = bss_from_pub(params->bss);
if (WARN_ON(list_empty(&ibss->list))) {
- cfg80211_put_bss(wdev->wiphy, bss);
+ cfg80211_put_bss(wdev->wiphy, params->bss);
return;
}
}
- ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
+ ev = kzalloc(sizeof(*ev) + (params->bssid ? ETH_ALEN : 0) +
+ params->req_ie_len + params->resp_ie_len +
+ params->fils_kek_len + params->pmk_len +
+ (params->pmkid ? WLAN_PMKID_LEN : 0), gfp);
if (!ev) {
- cfg80211_put_bss(wdev->wiphy, bss);
+ cfg80211_put_bss(wdev->wiphy, params->bss);
return;
}
ev->type = EVENT_CONNECT_RESULT;
- if (bssid)
- memcpy(ev->cr.bssid, bssid, ETH_ALEN);
- if (req_ie_len) {
- ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
- ev->cr.req_ie_len = req_ie_len;
- memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
+ next = ((u8 *)ev) + sizeof(*ev);
+ if (params->bssid) {
+ ev->cr.bssid = next;
+ memcpy((void *)ev->cr.bssid, params->bssid, ETH_ALEN);
+ next += ETH_ALEN;
}
- if (resp_ie_len) {
- ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
- ev->cr.resp_ie_len = resp_ie_len;
- memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+ if (params->req_ie_len) {
+ ev->cr.req_ie = next;
+ ev->cr.req_ie_len = params->req_ie_len;
+ memcpy((void *)ev->cr.req_ie, params->req_ie,
+ params->req_ie_len);
+ next += params->req_ie_len;
}
- if (bss)
- cfg80211_hold_bss(bss_from_pub(bss));
- ev->cr.bss = bss;
- ev->cr.status = status;
- ev->cr.timeout_reason = timeout_reason;
+ if (params->resp_ie_len) {
+ ev->cr.resp_ie = next;
+ ev->cr.resp_ie_len = params->resp_ie_len;
+ memcpy((void *)ev->cr.resp_ie, params->resp_ie,
+ params->resp_ie_len);
+ next += params->resp_ie_len;
+ }
+ if (params->fils_kek_len) {
+ ev->cr.fils_kek = next;
+ ev->cr.fils_kek_len = params->fils_kek_len;
+ memcpy((void *)ev->cr.fils_kek, params->fils_kek,
+ params->fils_kek_len);
+ next += params->fils_kek_len;
+ }
+ if (params->pmk_len) {
+ ev->cr.pmk = next;
+ ev->cr.pmk_len = params->pmk_len;
+ memcpy((void *)ev->cr.pmk, params->pmk, params->pmk_len);
+ next += params->pmk_len;
+ }
+ if (params->pmkid) {
+ ev->cr.pmkid = next;
+ memcpy((void *)ev->cr.pmkid, params->pmkid, WLAN_PMKID_LEN);
+ next += WLAN_PMKID_LEN;
+ }
+ ev->cr.update_erp_next_seq_num = params->update_erp_next_seq_num;
+ if (params->update_erp_next_seq_num)
+ ev->cr.fils_erp_next_seq_num = params->fils_erp_next_seq_num;
+ if (params->bss)
+ cfg80211_hold_bss(bss_from_pub(params->bss));
+ ev->cr.bss = params->bss;
+ ev->cr.status = params->status;
+ ev->cr.timeout_reason = params->timeout_reason;
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
spin_unlock_irqrestore(&wdev->event_lock, flags);
queue_work(cfg80211_wq, &rdev->event_work);
}
-EXPORT_SYMBOL(cfg80211_connect_bss);
+EXPORT_SYMBOL(cfg80211_connect_done);
/* Consumes bss object one way or another */
void __cfg80211_roamed(struct wireless_dev *wdev,
__entry->rssi_thold, __entry->rssi_hyst)
);
+TRACE_EVENT(rdev_set_cqm_rssi_range_config,
+ TP_PROTO(struct wiphy *wiphy,
+ struct net_device *netdev, s32 low, s32 high),
+ TP_ARGS(wiphy, netdev, low, high),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(s32, rssi_low)
+ __field(s32, rssi_high)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->rssi_low = low;
+ __entry->rssi_high = high;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT
+ ", range: %d - %d ",
+ WIPHY_PR_ARG, NETDEV_PR_ARG,
+ __entry->rssi_low, __entry->rssi_high)
+);
+
TRACE_EVENT(rdev_set_cqm_txe_config,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 rate,
u32 pkts, u32 intvl),
int offset, int len)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- const skb_frag_t *frag = &sh->frags[-1];
+ const skb_frag_t *frag = &sh->frags[0];
struct page *frag_page;
void *frag_ptr;
int frag_len, frag_size;
while (offset >= frag_size) {
offset -= frag_size;
- frag++;
frag_page = skb_frag_page(frag);
frag_ptr = skb_frag_address(frag);
frag_size = skb_frag_size(frag);
+ frag++;
}
frag_ptr += offset;
len -= cur_len;
while (len > 0) {
- frag++;
frag_len = skb_frag_size(frag);
cur_len = min(len, frag_len);
__frame_add_frag(frame, skb_frag_page(frag),
skb_frag_address(frag), cur_len, frag_len);
len -= cur_len;
+ frag++;
}
}
netdev_err(dev, "failed to set key %d\n", i);
continue;
}
- if (wdev->connect_keys->def == i)
- if (rdev_set_default_key(rdev, dev, i, true, true)) {
- netdev_err(dev, "failed to set defkey %d\n", i);
- continue;
- }
+ if (wdev->connect_keys->def == i &&
+ rdev_set_default_key(rdev, dev, i, true, true)) {
+ netdev_err(dev, "failed to set defkey %d\n", i);
+ continue;
+ }
}
kzfree(wdev->connect_keys);
{
struct cfg80211_event *ev;
unsigned long flags;
- const u8 *bssid = NULL;
spin_lock_irqsave(&wdev->event_lock, flags);
while (!list_empty(&wdev->event_list)) {
wdev_lock(wdev);
switch (ev->type) {
case EVENT_CONNECT_RESULT:
- if (!is_zero_ether_addr(ev->cr.bssid))
- bssid = ev->cr.bssid;
__cfg80211_connect_result(
- wdev->netdev, bssid,
- ev->cr.req_ie, ev->cr.req_ie_len,
- ev->cr.resp_ie, ev->cr.resp_ie_len,
- ev->cr.status,
- ev->cr.status == WLAN_STATUS_SUCCESS,
- ev->cr.bss, ev->cr.timeout_reason);
+ wdev->netdev,
+ &ev->cr,
+ ev->cr.status == WLAN_STATUS_SUCCESS);
break;
case EVENT_ROAMED:
__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
struct net_device *dev, enum nl80211_iftype ntype,
- u32 *flags, struct vif_params *params)
+ struct vif_params *params)
{
int err;
enum nl80211_iftype otype = dev->ieee80211_ptr->iftype;
cfg80211_process_rdev_events(rdev);
}
- err = rdev_change_virtual_intf(rdev, dev, ntype, flags, params);
+ err = rdev_change_virtual_intf(rdev, dev, ntype, params);
WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype);
return err;
}
+static u32 cfg80211_calculate_bitrate_ht(struct rate_info *rate)
+{
+ int modulation, streams, bitrate;
+
+ /* the formula below does only work for MCS values smaller than 32 */
+ if (WARN_ON_ONCE(rate->mcs >= 32))
+ return 0;
+
+ modulation = rate->mcs & 7;
+ streams = (rate->mcs >> 3) + 1;
+
+ bitrate = (rate->bw == RATE_INFO_BW_40) ? 13500000 : 6500000;
+
+ if (modulation < 4)
+ bitrate *= (modulation + 1);
+ else if (modulation == 4)
+ bitrate *= (modulation + 2);
+ else
+ bitrate *= (modulation + 3);
+
+ bitrate *= streams;
+
+ if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+ bitrate = (bitrate / 9) * 10;
+
+ /* do NOT round down here */
+ return (bitrate + 50000) / 100000;
+}
+
static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
{
static const u32 __mcs2bitrate[] = {
u32 cfg80211_calculate_bitrate(struct rate_info *rate)
{
- int modulation, streams, bitrate;
-
- if (!(rate->flags & RATE_INFO_FLAGS_MCS) &&
- !(rate->flags & RATE_INFO_FLAGS_VHT_MCS))
- return rate->legacy;
+ if (rate->flags & RATE_INFO_FLAGS_MCS)
+ return cfg80211_calculate_bitrate_ht(rate);
if (rate->flags & RATE_INFO_FLAGS_60G)
return cfg80211_calculate_bitrate_60g(rate);
if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
return cfg80211_calculate_bitrate_vht(rate);
- /* the formula below does only work for MCS values smaller than 32 */
- if (WARN_ON_ONCE(rate->mcs >= 32))
- return 0;
-
- modulation = rate->mcs & 7;
- streams = (rate->mcs >> 3) + 1;
-
- bitrate = (rate->bw == RATE_INFO_BW_40) ? 13500000 : 6500000;
-
- if (modulation < 4)
- bitrate *= (modulation + 1);
- else if (modulation == 4)
- bitrate *= (modulation + 2);
- else
- bitrate *= (modulation + 3);
-
- bitrate *= streams;
-
- if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
- bitrate = (bitrate / 9) * 10;
-
- /* do NOT round down here */
- return (bitrate + 50000) / 100000;
+ return rate->legacy;
}
EXPORT_SYMBOL(cfg80211_calculate_bitrate);
memset(&vifparams, 0, sizeof(vifparams));
- return cfg80211_change_iface(rdev, dev, type, NULL, &vifparams);
+ return cfg80211_change_iface(rdev, dev, type, &vifparams);
}
EXPORT_WEXT_HANDLER(cfg80211_wext_siwmode);
__u32 tc_classid;
__u32 data;
__u32 data_end;
+ __u32 napi_id;
};
struct bpf_tunnel_key {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, vlan_tci)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
--- /dev/null
+#!/bin/sh
+# description: ftrace - function pid filters
+
+# Make sure that function pid matching filter works.
+# Also test it on an instance directory
+
+if ! grep -q function available_tracers; then
+ echo "no function tracer configured"
+ exit_unsupported
+fi
+
+if [ ! -f set_ftrace_pid ]; then
+ echo "set_ftrace_pid not found? Is function tracer not set?"
+ exit_unsupported
+fi
+
+if [ ! -f set_ftrace_filter ]; then
+ echo "set_ftrace_filter not found? Is function tracer not set?"
+ exit_unsupported
+fi
+
+do_function_fork=1
+
+if [ ! -f options/function-fork ]; then
+ do_function_fork=0
+ echo "no option for function-fork found. Option will not be tested."
+fi
+
+read PID _ < /proc/self/stat
+
+if [ $do_function_fork -eq 1 ]; then
+ # default value of function-fork option
+ orig_value=`grep function-fork trace_options`
+fi
+
+do_reset() {
+ reset_tracer
+ clear_trace
+ enable_tracing
+ echo > set_ftrace_filter
+ echo > set_ftrace_pid
+
+ if [ $do_function_fork -eq 0 ]; then
+ return
+ fi
+
+ echo $orig_value > trace_options
+}
+
+fail() { # msg
+ do_reset
+ echo $1
+ exit $FAIL
+}
+
+yield() {
+ ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
+}
+
+do_test() {
+ disable_tracing
+
+ echo do_execve* > set_ftrace_filter
+ echo *do_fork >> set_ftrace_filter
+
+ echo $PID > set_ftrace_pid
+ echo function > current_tracer
+
+ if [ $do_function_fork -eq 1 ]; then
+ # don't allow children to be traced
+ echo nofunction-fork > trace_options
+ fi
+
+ enable_tracing
+ yield
+
+ count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+ count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+ # count_other should be 0
+ if [ $count_pid -eq 0 -o $count_other -ne 0 ]; then
+ fail "PID filtering not working?"
+ fi
+
+ disable_tracing
+ clear_trace
+
+ if [ $do_function_fork -eq 0 ]; then
+ return
+ fi
+
+ # allow children to be traced
+ echo function-fork > trace_options
+
+ enable_tracing
+ yield
+
+ count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+ count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+ # count_other should NOT be 0
+ if [ $count_pid -eq 0 -o $count_other -eq 0 ]; then
+ fail "PID filtering not following fork?"
+ fi
+}
+
+do_test
+
+mkdir instances/foo
+cd instances/foo
+do_test
+cd ../../
+rmdir instances/foo
+
+do_reset
+
+exit 0