struct pci_error_handlers
{
- int (*error_detected)(struct pci_dev *dev, enum pci_channel_state);
+ int (*error_detected)(struct pci_dev *dev, pci_channel_state_t);
int (*mmio_enabled)(struct pci_dev *dev);
int (*slot_reset)(struct pci_dev *dev);
void (*resume)(struct pci_dev *dev);
The possible channel states are::
- enum pci_channel_state {
+ typedef enum {
pci_channel_io_normal, /* I/O channel is in normal state */
pci_channel_io_frozen, /* I/O to channel is blocked */
pci_channel_io_perm_failure, /* PCI card is dead */
- };
+ } pci_channel_state_t;
Possible return values are::
------------------
In response to a return value of PCI_ERS_RESULT_NEED_RESET, the
-the platform will perform a slot reset on the requesting PCI device(s).
+platform will perform a slot reset on the requesting PCI device(s).
The actual steps taken by a platform to perform a slot reset
will be platform-dependent. Upon completion of slot reset, the
platform will call the device slot_reset() callback.
-------------------------
A "permanent failure" has occurred, and the platform cannot recover
the device. The platform will call error_detected() with a
- pci_channel_state value of pci_channel_io_perm_failure.
+ pci_channel_state_t value of pci_channel_io_perm_failure.
The device driver should, at this point, assume the worst. It should
cancel all pending I/O, refuse all new I/O, returning -EIO to
A more complete resource is the third edition of "Linux Device Drivers"
by Jonathan Corbet, Alessandro Rubini, and Greg Kroah-Hartman.
LDD3 is available for free (under Creative Commons License) from:
- http://lwn.net/Kernel/LDD3/.
+ https://lwn.net/Kernel/LDD3/.
However, keep in mind that all documents are subject to "bit rot".
Refer to the source code if things are not working as described here.
OS BUG: we don't check resource allocations before enabling those
resources. The sequence would make more sense if we called
pci_request_resources() before calling pci_enable_device().
- Currently, the device drivers can't detect the bug when when two
+ Currently, the device drivers can't detect the bug when two
devices have been allocated the same range. This is not a common
problem and unlikely to get fixed soon.
This has been discussed before but not changed as of 2.6.19:
- http://lkml.org/lkml/2006/3/2/194
pci_set_master() will enable DMA by setting the bus master bit
---------------------
.. note::
If anything below doesn't make sense, please refer to
- Documentation/DMA-API.txt. This section is just a reminder that
+ :doc:`/core-api/dma-api`. This section is just a reminder that
drivers need to indicate DMA capabilities of the device and is not
an authoritative source for DMA interfaces.
Setup shared control data
-------------------------
Once the DMA masks are set, the driver can allocate "consistent" (a.k.a. shared)
-memory. See Documentation/DMA-API.txt for a full description of
+memory. See :doc:`/core-api/dma-api` for a full description of
the DMA APIs. This section is just a reminder that it needs to be done
before enabling DMA on the device.
Then clean up "consistent" buffers which contain the control data.
-See Documentation/DMA-API.txt for details on unmapping interfaces.
+See :doc:`/core-api/dma-api` for details on unmapping interfaces.
Unregister from other subsystems
The device IDs are arbitrary hex numbers (vendor controlled) and normally used
only in a single location, the pci_device_id table.
- Please DO submit new vendor/device IDs to http://pci-ids.ucw.cz/.
- There are mirrors of the pci.ids file at http://pciids.sourceforge.net/
- and https://github.com/pciutils/pciids.
+ Please DO submit new vendor/device IDs to https://pci-ids.ucw.cz/.
+ There's a mirror of the pci.ids file at https://github.com/pciutils/pciids.
Obsolete functions
S: Odd Fixes
-F: Documentation/networking/device_drivers/3com/vortex.rst
+F: Documentation/networking/device_drivers/ethernet/3com/vortex.rst
F: drivers/net/ethernet/3com/3c59x.c
3CR990 NETWORK DRIVER
F: include/linux/mfd/altera-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER
-M: Thor Thayer <thor.thayer@linux.intel.com>
+M: Joyce Ooi <joyce.ooi@intel.com>
S: Maintained
F: drivers/net/ethernet/altera/
S: Supported
-F: Documentation/networking/device_drivers/amazon/ena.rst
+F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst
F: drivers/net/ethernet/amazon/
AMAZON RDMA EFA DRIVER
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
S: Supported
F: drivers/crypto/ccp/
F: include/linux/ccp.h
+AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - SEV SUPPORT
+S: Supported
+F: drivers/crypto/ccp/sev*
+F: include/uapi/linux/psp-sev.h
+
AMD DISPLAY CORE
S: Supported
W: http://ez.analog.com/community/linux-device-drivers
F: drivers/media/i2c/adv7180.c
+F: Documentation/devicetree/bindings/media/i2c/adv7180.yaml
ANALOG DEVICES INC ADV748X DRIVER
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
S: Supported
W: https://www.marvell.com/
Q: http://patchwork.ozlabs.org/project/netdev/list/
-F: Documentation/networking/device_drivers/aquantia/atlantic.rst
+F: Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst
F: drivers/net/ethernet/aquantia/atlantic/
AQUANTIA ETHERNET DRIVER PTP SUBSYSTEM
F: arch/arm*/kernel/hw_breakpoint.c
F: arch/arm*/kernel/perf_*
F: arch/arm/oprofile/common.c
-F: drivers/perf/*
+F: drivers/perf/
F: include/linux/perf/arm_pmu.h
ARM PORT
F: drivers/amba/
F: include/linux/amba/bus.h
-ARM PRIMECELL CLCD PL110 DRIVER
-S: Odd Fixes
-F: drivers/video/fbdev/amba-clcd.*
-
ARM PRIMECELL KMI PL050 DRIVER
S: Odd Fixes
ARM/Amlogic Meson SoC support
S: Maintained
S: Maintained
F: arch/arm/boot/dts/alpine*
F: arch/arm/mach-alpine/
-F: arch/arm64/boot/dts/al/
+F: arch/arm64/boot/dts/amazon/
F: drivers/*/*alpine*
ARM/ARTPEC MACHINE SUPPORT
F: include/linux/irqchip/irq-ixp4xx.h
F: include/linux/platform_data/timer-ixp4xx.h
+ARM/INTEL KEEMBAY ARCHITECTURE
+S: Maintained
+F: Documentation/devicetree/bindings/arm/intel,keembay.yaml
+F: arch/arm64/boot/dts/intel/keembay-evm.dts
+F: arch/arm64/boot/dts/intel/keembay-soc.dtsi
+
ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT
N: at91
N: atmel
+ARM/Microchip Sparx5 SoC support
+S: Supported
+F: arch/arm64/boot/dts/microchip/
+N: sparx5
+
ARM/MIOA701 MACHINE SUPPORT
S: Maintained
F: arch/arm/mach-pxa/mioa701.c
+ARM/MStar/Sigmastar Armv7 SoC support
+S: Maintained
+W: http://linux-chenxing.org/
+F: Documentation/devicetree/bindings/arm/mstar/*
+F: arch/arm/boot/dts/infinity*.dtsi
+F: arch/arm/boot/dts/mercury*.dtsi
+F: arch/arm/boot/dts/mstar-v7.dtsi
+F: arch/arm/mach-mstar/
+
ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT
S: Maintained
F: drivers/phy/qualcomm/
F: drivers/power/*/msm*
F: drivers/reset/reset-qcom-*
-F: drivers/scsi/ufs/ufs-qcom.*
+F: drivers/scsi/ufs/ufs-qcom*
F: drivers/spi/spi-geni-qcom.c
F: drivers/spi/spi-qcom-qspi.c
F: drivers/spi/spi-qup.c
S: Maintained
-F: Documentation/crypto/asymmetric-keys.txt
+F: Documentation/crypto/asymmetric-keys.rst
F: crypto/asymmetric_keys/
F: include/crypto/pkcs7.h
F: include/crypto/public_key.h
S: Odd fixes
W: http://sourceforge.net/projects/xscaleiop
-F: Documentation/crypto/async-tx-api.txt
+F: Documentation/crypto/async-tx-api.rst
F: crypto/async_tx/
F: drivers/dma/
F: include/linux/async_tx.h
F: drivers/net/wireless/ath/*
ATHEROS ATH5K WIRELESS DRIVER
-M: Jiri Slaby <jirislaby@gmail.com>
+M: Jiri Slaby <jirislaby@kernel.org>
ATMEL MACB ETHERNET DRIVER
S: Supported
F: drivers/net/ethernet/cadence/
BPF JIT for S390
-M: Heiko Carstens <heiko.carstens@de.ibm.com>
+M: Heiko Carstens <hca@linux.ibm.com>
S: Supported
W: http://www.broadcom.com
S: Supported
F: drivers/char/hw_random/cctrng.c
F: drivers/char/hw_random/cctrng.h
-F: Documentation/devicetree/bindings/rng/arm-cctrng.txt
+F: Documentation/devicetree/bindings/rng/arm-cctrng.yaml
W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
CEC FRAMEWORK
N: cros_ec
N: cros-ec
+CHRONTEL CH7322 CEC DRIVER
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: Documentation/devicetree/bindings/media/i2c/chrontel,ch7322.yaml
+F: drivers/media/cec/i2c/ch7322.c
+
CIRRUS LOGIC AUDIO CODEC DRIVERS
S: Maintained
F: drivers/connector/
+CONSOLE SUBSYSTEM
+S: Supported
+F: drivers/video/console/
+F: include/linux/console*
+
CONTROL GROUP (CGROUP)
F: Documentation/hwmon/coretemp.rst
F: drivers/hwmon/coretemp.c
+CORSAIR-CPRO HARDWARE MONITOR DRIVER
+S: Maintained
+F: drivers/hwmon/corsair-cpro.c
+
COSA/SRP SYNC SERIAL DRIVER
S: Maintained
DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
S: Orphan
-F: Documentation/networking/device_drivers/dec/dmfe.rst
+F: Documentation/networking/device_drivers/ethernet/dec/dmfe.rst
F: drivers/net/ethernet/dec/tulip/dmfe.c
DC390/AM53C974 SCSI driver
F: drivers/pinctrl/pinctrl-da90??.c
F: drivers/power/supply/da9052-battery.c
F: drivers/power/supply/da91??-*.c
-F: drivers/regulator/da903x.c
F: drivers/regulator/da9???-regulator.[ch]
F: drivers/regulator/slg51000-regulator.[ch]
F: drivers/rtc/rtc-da90??.c
DMA BUFFER SHARING FRAMEWORK
S: Maintained
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
-T: git git://git.infradead.org/users/vkoul/slave-dma.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
F: Documentation/devicetree/bindings/dma/
F: Documentation/driver-api/dmaengine/
F: drivers/dma/
F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt
F: drivers/media/i2c/dw9714.c
+DONGWOON DW9768 LENS VOICE COIL DRIVER
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml
+F: drivers/media/i2c/dw9768.c
+
DONGWOON DW9807 LENS VOICE COIL DRIVER
S: Maintained
-F: Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst
-F: Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst
+F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst
+F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
F: drivers/net/ethernet/freescale/dpaa2/Kconfig
F: drivers/net/ethernet/freescale/dpaa2/Makefile
F: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth*
DRM DRIVER FOR RAYDIUM RM67191 PANELS
S: Maintained
-F: Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt
+F: Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
F: drivers/gpu/drm/panel/panel-raydium-rm67191.c
DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
S: Supported
T: git git://github.com/anholt/linux
T: git git://anongit.freedesktop.org/drm/drm-misc
-F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
+F: Documentation/devicetree/bindings/display/brcm,bcm2835-*.yaml
F: drivers/gpu/drm/vc4/
F: include/uapi/drm/vc4_drm.h
F: Documentation/gpu/xen-front.rst
F: drivers/gpu/drm/xen/
+DRM DRIVERS FOR XILINX
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/xlnx/
+F: drivers/gpu/drm/xlnx/
+
DRM DRIVERS FOR ZTE ZX
S: Maintained
S: Maintained
F: drivers/usb/gadget/udc/fsl*
+FREESCALE USB PHY DRIVER
+S: Maintained
+F: drivers/usb/phy/phy-fsl-usb*
+
FREEVXFS FILESYSTEM
S: Maintained
F: include/uapi/linux/futex.h
F: kernel/futex.c
F: tools/perf/bench/futex*
-F: Documentation/locking/*futex*
+F: tools/testing/selftests/futex/
GATEWORKS SYSTEM CONTROLLER (GSC) DRIVER
S: Supported
-F: Documentation/networking/device_drivers/google/gve.rst
+F: Documentation/networking/device_drivers/ethernet/google/gve.rst
F: drivers/net/ethernet/google
GPD POCKET FAN DRIVER
S: Supported
-F: Documentation/networking/hinic.rst
+F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst
F: drivers/net/ethernet/huawei/hinic/
HUGETLB FILESYSTEM
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
F: Documentation/ABI/stable/sysfs-bus-vmbus
F: Documentation/ABI/testing/debugfs-hyperv
-F: Documentation/networking/device_drivers/microsoft/netvsc.rst
+F: Documentation/networking/device_drivers/ethernet/microsoft/netvsc.rst
F: arch/x86/hyperv
F: arch/x86/include/asm/hyperv-tlfs.h
F: arch/x86/include/asm/mshyperv.h
S: Maintained
-W: http://wpan.cakelab.org/
+W: https://linux-wpan.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
F: Documentation/networking/ieee802154.rst
S: Maintained
F: drivers/platform/x86/intel_atomisp2_pm.c
+INTEL ATOMISP2 LED DRIVER
+S: Maintained
+F: drivers/platform/x86/intel_atomisp2_led.c
+
INTEL BROXTON PMC DRIVER
Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
-F: Documentation/networking/device_drivers/intel/e100.rst
-F: Documentation/networking/device_drivers/intel/e1000.rst
-F: Documentation/networking/device_drivers/intel/e1000e.rst
-F: Documentation/networking/device_drivers/intel/fm10k.rst
-F: Documentation/networking/device_drivers/intel/i40e.rst
-F: Documentation/networking/device_drivers/intel/iavf.rst
-F: Documentation/networking/device_drivers/intel/ice.rst
-F: Documentation/networking/device_drivers/intel/igb.rst
-F: Documentation/networking/device_drivers/intel/igbvf.rst
-F: Documentation/networking/device_drivers/intel/ixgb.rst
-F: Documentation/networking/device_drivers/intel/ixgbe.rst
-F: Documentation/networking/device_drivers/intel/ixgbevf.rst
+F: Documentation/networking/device_drivers/ethernet/intel/
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h
S: Supported
F: Documentation/driver-api/mei/*
-F: drivers/misc/mei/*
+F: drivers/misc/mei/
F: drivers/watchdog/mei_wdt.c
F: include/linux/mei_cl_bus.h
F: include/uapi/linux/mei.h
S: Maintained
-F: Documentation/networking/device_drivers/intel/ipw2100.rst
-F: Documentation/networking/device_drivers/intel/ipw2200.rst
+F: Documentation/networking/device_drivers/wifi/intel/ipw2100.rst
+F: Documentation/networking/device_drivers/wifi/intel/ipw2200.rst
F: drivers/net/wireless/intel/ipw2x00/
INTEL PSTATE DRIVER
F: include/linux/interconnect-provider.h
F: include/linux/interconnect.h
+INVENSENSE ICM-426xx IMU DRIVER
+S: Maintained
+W https://invensense.tdk.com/
+F: Documentation/devicetree/bindings/iio/imu/invensense,icm42600.yaml
+F: drivers/iio/imu/inv_icm42600/
+
INVENSENSE MPU-3050 GYROSCOPE DRIVER
F: scripts/Kconfig.include
F: scripts/kconfig/
+KCOV
+S: Maintained
+F: Documentation/dev-tools/kcov.rst
+F: include/linux/kcov.h
+F: include/uapi/linux/kcov.h
+F: kernel/kcov.c
+F: scripts/Makefile.kcov
+
KCSAN
F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
-S: Orphan
+S: Maintained
F: arch/mips/include/asm/kvm*
F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/kvm/
S: Maintained
-F: Documentation/kprobes.txt
+F: Documentation/trace/kprobes.rst
F: include/asm-generic/kprobes.h
F: include/linux/kprobes.h
F: kernel/kprobes.c
F: scripts/leaking_addresses.pl
LED SUBSYSTEM
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
F: Documentation/devicetree/bindings/leds/
F: drivers/leds/
S: Supported
F: Documentation/atomic_t.txt
F: Documentation/core-api/atomic_ops.rst
F: Documentation/core-api/refcount-vs-atomic.rst
+F: Documentation/litmus-tests/
F: Documentation/memory-barriers.txt
F: tools/memory-model/
S: Supported
-F: Documentation/networking/device_drivers/marvell/octeontx2.rst
+F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
F: drivers/net/ethernet/marvell/octeontx2/af/
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
F: drivers/hwmon/max6697.c
F: include/linux/platform_data/max6697.h
+MAX9286 QUAD GMSL DESERIALIZER DRIVER
+S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml
+F: drivers/media/i2c/max9286.c
+
MAX9860 MONO AUDIO VOICE CODEC DRIVER
S: Supported
T: git git://linuxtv.org/media_tree.git
-F: Documentation/devicetree/bindings/media/renesas,fcp.txt
+F: Documentation/devicetree/bindings/media/renesas,fcp.yaml
F: drivers/media/platform/rcar-fcp.c
F: include/media/rcar-fcp.h
S: Supported
T: git git://linuxtv.org/media_tree.git
-F: Documentation/devicetree/bindings/media/renesas,fdp1.txt
+F: Documentation/devicetree/bindings/media/renesas,fdp1.yaml
F: drivers/media/platform/rcar_fdp1.c
MEDIA DRIVERS FOR RENESAS - VIN
S: Supported
T: git git://linuxtv.org/media_tree.git
-F: Documentation/devicetree/bindings/media/renesas,vsp1.txt
+F: Documentation/devicetree/bindings/media/renesas,vsp1.yaml
F: drivers/media/platform/vsp1/
MEDIA DRIVERS FOR ST STV0910 DEMODULATOR ICs
F: drivers/dma/mediatek/
MEDIATEK ETHERNET DRIVER
-M: Felix Fietkau <nbd@openwrt.org>
+M: Felix Fietkau <nbd@nbd.name>
S: Supported
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
-F: Documentation/networking/device_drivers/mellanox/
+F: Documentation/networking/device_drivers/ethernet/mellanox/
F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/
F: include/linux/memblock.h
F: mm/memblock.c
+MEMORY CONTROLLER DRIVERS
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
+F: Documentation/devicetree/bindings/memory-controllers/
+F: drivers/memory/
+
+MEMORY FREQUENCY SCALING DRIVERS FOR NVIDIA TEGRA
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
+S: Maintained
+F: drivers/devfreq/tegra20-devfreq.c
+F: drivers/devfreq/tegra30-devfreq.c
+
MEMORY MANAGEMENT
T: git git://git.monstr.eu/linux-2.6-microblaze.git
F: arch/microblaze/
+MICROCHIP AT91 DMA DRIVERS
+S: Supported
+F: Documentation/devicetree/bindings/dma/atmel-dma.txt
+F: drivers/dma/at_hdmac.c
+F: drivers/dma/at_hdmac_regs.h
+F: drivers/dma/at_xdmac.c
+F: include/dt-bindings/dma/at91.h
+F: include/linux/platform_data/dma-atmel.h
+
MICROCHIP AT91 SERIAL DRIVER
S: Maintained
S: Supported
F: sound/soc/atmel
-MICROCHIP DMA DRIVER
-S: Supported
-F: Documentation/devicetree/bindings/dma/atmel-dma.txt
-F: drivers/dma/at_hdmac.c
-F: drivers/dma/at_hdmac_regs.h
-F: include/dt-bindings/dma/at91.h
-F: include/linux/platform_data/dma-atmel.h
-
MICROCHIP ECC DRIVER
F: drivers/crypto/atmel-ecc.*
MICROCHIP I2C DRIVER
-M: Ludovic Desroches <ludovic.desroches@microchip.com>
+M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
S: Supported
F: drivers/i2c/busses/i2c-at91-*.c
F: drivers/pwm/pwm-atmel.c
MICROCHIP SAMA5D2-COMPATIBLE ADC DRIVER
S: Supported
F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h
MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER
-M: Nicolas Ferre <nicolas.ferre@microchip.com>
+M: Claudiu Beznea <claudiu.beznea@microchip.com>
S: Supported
F: drivers/power/reset/at91-sama5d2_shdwc.c
MICROCHIP SPI DRIVER
-M: Nicolas Ferre <nicolas.ferre@microchip.com>
+M: Tudor Ambarus <tudor.ambarus@microchip.com>
S: Supported
F: drivers/spi/spi-atmel.*
MICROCHIP SSC DRIVER
-M: Nicolas Ferre <nicolas.ferre@microchip.com>
+M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
S: Supported
F: drivers/misc/atmel-ssc.c
S: Supported
F: drivers/usb/gadget/udc/atmel_usba_udc.*
-MICROCHIP XDMA DRIVER
-S: Supported
-F: drivers/dma/at_xdmac.c
-
-MICROSEMI ETHERNET SWITCH DRIVER
+MICROCHIP WILC1000 WIFI DRIVER
S: Supported
-F: drivers/net/ethernet/mscc/
-F: include/soc/mscc/ocelot*
+F: drivers/net/wireless/microchip/wilc1000/
MICROSEMI MIPS SOCS
F: drivers/clk/imgtec/clk-boston.c
F: include/dt-bindings/clock/boston-clock.h
+MIPS CORE DRIVERS
+S: Supported
+F: drivers/bus/mips_cdmm.c
+F: drivers/clocksource/mips-gic-timer.c
+F: drivers/cpuidle/cpuidle-cps.c
+F: drivers/irqchip/irq-mips-cpu.c
+F: drivers/irqchip/irq-mips-gic.c
+
MIPS GENERIC PLATFORM
F: include/uapi/linux/meye.h
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
-M: Jiri Slaby <jirislaby@gmail.com>
+M: Jiri Slaby <jirislaby@kernel.org>
S: Maintained
F: Documentation/driver-api/serial/moxa-smartio.rst
F: drivers/tty/mxser.*
S: Supported
-F: Documentation/networking/device_drivers/neterion/s2io.rst
-F: Documentation/networking/device_drivers/neterion/vxge.rst
+F: Documentation/networking/device_drivers/ethernet/neterion/s2io.rst
+F: Documentation/networking/device_drivers/ethernet/neterion/vxge.rst
F: drivers/net/ethernet/neterion/
NETFILTER
NVMEM FRAMEWORK
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/srini/nvmem.git
F: Documentation/ABI/stable/sysfs-bus-nvmem
F: Documentation/devicetree/bindings/nvmem/
F: drivers/nvmem/
S: Supported
F: tools/objtool/
+OCELOT ETHERNET SWITCH DRIVER
+S: Supported
+F: drivers/net/dsa/ocelot/*
+F: drivers/net/ethernet/mscc/
+F: include/soc/mscc/ocelot*
+F: net/dsa/tag_ocelot.c
+
OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
OP-TEE DRIVER
S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-optee-devices
F: drivers/tee/optee/
OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
S: Maintained
F: drivers/char/hw_random/optee-rng.c
F: Documentation/devicetree/bindings/pci/designware-pcie.txt
F: drivers/pci/controller/dwc/*designware*
- PCI DRIVER FOR TI DRA7XX
+ PCI DRIVER FOR TI DRA7XX/J721E
S: Supported
F: Documentation/devicetree/bindings/pci/ti-pci.txt
+ F: drivers/pci/controller/cadence/pci-j721e.c
F: drivers/pci/controller/dwc/pci-dra7xx.c
PCI DRIVER FOR TI KEYSTONE
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
S: Supported
S: Supported
-F: Documentation/networking/device_drivers/pensando/ionic.rst
+F: Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
F: drivers/net/ethernet/pensando/
PER-CPU MEMORY ALLOCATOR
F: include/linux/pktcdvd.h
F: include/uapi/linux/pktcdvd.h
-PKUNITY SOC DRIVERS
-S: Maintained
-W: http://mprc.pku.edu.cn/~guanxuetao/linux
-T: git git://github.com/gxt/linux.git
-F: drivers/i2c/busses/i2c-puv3.c
-F: drivers/input/serio/i8042-unicore32io.h
-F: drivers/rtc/rtc-puv3.c
-F: drivers/video/fbdev/fb-puv3.c
-
PLANTOWER PMS7003 AIR POLLUTION SENSOR DRIVER
S: Maintained
F: Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
F: drivers/iio/chemical/pms7003.c
+PLDMFW LIBRARY
+S: Maintained
+F: Documentation/driver-api/pldmfw/
+F: include/linux/pldmfw.h
+F: lib/pldmfw/
+
PLX DMA DRIVER
S: Maintained
S: Supported
-F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
+F: Documentation/networking/device_drivers/ethernet/qlogic/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLA4XXX iSCSI DRIVER
S: Maintained
F: Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
-F: Documentation/networking/device_drivers/freescale/dpaa2/overview.rst
+F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
F: drivers/bus/fsl-mc/
QT1010 MEDIA DRIVER
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
QUALCOMM GENERIC INTERFACE I2C DRIVER
S: Supported
S: Supported
F: drivers/dma/qcom/hidma*
+QUALCOMM I2C CCI DRIVER
+S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt
+F: drivers/i2c/busses/i2c-qcom-cci.c
+
QUALCOMM IOMMU
S: Maintained
-F: Documentation/networking/device_drivers/qualcomm/rmnet.rst
+F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
F: drivers/net/ethernet/qualcomm/rmnet/
F: include/linux/if_rmnet.h
QUALCOMM TSENS THERMAL DRIVER
-M: Amit Kucheria <amit.kucheria@linaro.org>
+M: Amit Kucheria <amitk@kernel.org>
S: Maintained
S: Orphan
F: drivers/net/wireless/ray*
+RC-CORE / LIRC FRAMEWORK
+S: Maintained
+W: http://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+F: Documentation/driver-api/media/rc-core.rst
+F: Documentation/userspace-api/media/rc/
+F: drivers/media/rc/
+F: include/media/rc-map.h
+F: include/media/rc-core.h
+F: include/uapi/linux/lirc.h
+
RCMM REMOTE CONTROLS DECODER
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: tools/testing/selftests/rcutorture
+RDACM20 Camera Sensor
+S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml
+F: drivers/media/i2c/rdacm20.c
+F: drivers/media/i2c/max9271.c
+F: drivers/media/i2c/max9271.h
+
RDC R-321X SoC
S: Maintained
F: Documentation/RCU/
F: include/linux/rcu*
F: kernel/rcu/
-X: Documentation/RCU/torture.txt
+X: Documentation/RCU/torture.rst
X: include/linux/srcu*.h
X: kernel/rcu/srcu*.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rproc-next
F: Documentation/ABI/testing/sysfs-class-remoteproc
F: Documentation/devicetree/bindings/remoteproc/
-F: Documentation/remoteproc.txt
+F: Documentation/staging/remoteproc.rst
F: drivers/remoteproc/
F: include/linux/remoteproc.h
F: include/linux/remoteproc/
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rpmsg-next
F: Documentation/ABI/testing/sysfs-bus-rpmsg
-F: Documentation/rpmsg.txt
+F: Documentation/staging/rpmsg.rst
F: drivers/rpmsg/
F: include/linux/rpmsg.h
F: include/linux/rpmsg/
F: drivers/i2c/busses/i2c-emev2.c
RENESAS ETHERNET DRIVERS
-R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+R: Sergei Shtylyov <sergei.shtylyov@gmail.com>
F: Documentation/devicetree/bindings/net/renesas,*.txt
S: Supported
-F: Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
-F: Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+F: Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+F: Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
F: drivers/thermal/rcar_gen3_thermal.c
F: drivers/thermal/rcar_thermal.c
F: drivers/video/fbdev/savage/
S390
-M: Heiko Carstens <heiko.carstens@de.ibm.com>
+M: Heiko Carstens <hca@linux.ibm.com>
F: include/linux/dasd_mod.h
S390 IOMMU (PCI)
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
S390 PCI SUBSYSTEM
-M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: include/linux/mmc/sdhci*
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) MICROCHIP DRIVER
-M: Ludovic Desroches <ludovic.desroches@microchip.com>
+M: Eugen Hristev <eugen.hristev@microchip.com>
S: Supported
F: drivers/mmc/host/sdhci-of-at91.c
F: security/selinux/
SENSABLE PHANTOM
-M: Jiri Slaby <jirislaby@gmail.com>
+M: Jiri Slaby <jirislaby@kernel.org>
S: Maintained
F: drivers/misc/phantom.c
F: include/uapi/linux/phantom.h
+SENSIRION SCD30 CARBON DIOXIDE SENSOR DRIVER
+S: Maintained
+F: Documentation/devicetree/bindings/iio/chemical/sensirion,scd30.yaml
+F: drivers/iio/chemical/scd30.h
+F: drivers/iio/chemical/scd30_core.c
+F: drivers/iio/chemical/scd30_i2c.c
+F: drivers/iio/chemical/scd30_serial.c
+
SENSIRION SPS30 AIR POLLUTION SENSOR DRIVER
S: Maintained
F: drivers/net/phy/sfp*
F: include/linux/phylink.h
F: include/linux/sfp.h
-K: phylink
+K: phylink\.h|struct\s+phylink|\.phylink|>phylink_|phylink_(autoneg|clear|connect|create|destroy|disconnect|ethtool|helper|mac|mii|of|set|start|stop|test|validate)
SGI GRU DRIVER
S: Maintained
F: drivers/net/ethernet/smsc/smsc9420.*
-SOC-CAMERA V4L2 SUBSYSTEM
-S: Orphan
-T: git git://linuxtv.org/media_tree.git
-F: drivers/staging/media/soc_camera/
-F: include/media/soc_camera.h
-
SOCIONEXT (SNI) AVE NETWORK DRIVER
SOUNDWIRE SUBSYSTEM
-M: Sanyog Kale <sanyog.r.kale@intel.com>
+M: Bard Liao <yung-chuan.liao@linux.intel.com>
S: Supported
F: Documentation/driver-api/soundwire/
S: Maintained
-W: https://sparse.wiki.kernel.org/
+W: https://sparse.docs.kernel.org/
T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
+Q: https://patchwork.kernel.org/project/linux-sparse/list/
+B: https://bugzilla.kernel.org/enter_bug.cgi?component=Sparse&product=Tools
F: include/linux/compiler.h
+SPEAKUP CONSOLE SPEECH DRIVER
+S: Odd Fixes
+W: http://www.linux-speakup.org/
+F: drivers/accessibility/speakup/
+
SPEAR CLOCK FRAMEWORK SUPPORT
S: Supported
-F: Documentation/networking/device_drivers/toshiba/spider_net.rst
+F: Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
F: drivers/net/ethernet/toshiba/spider_net*
SPMI SUBSYSTEM
S: Maintained
F: drivers/staging/sm750fb/
-STAGING - SPEAKUP CONSOLE SPEECH DRIVER
-S: Odd Fixes
-W: http://www.linux-speakup.org/
-F: drivers/staging/speakup/
-
STAGING - VIA VT665X DRIVERS
S: Odd Fixes
F: drivers/staging/vt665?/
-STAGING - WILC1000 WIFI DRIVER
-S: Supported
-F: drivers/staging/wilc1000/
-
STAGING SUBSYSTEM
S: Supported
W: http://www.stlinux.com
-F: Documentation/networking/device_drivers/stmicro/
+F: Documentation/networking/device_drivers/ethernet/stmicro/
F: drivers/net/ethernet/stmicro/stmmac/
SUN3/3X
TEE SUBSYSTEM
S: Maintained
-F: Documentation/tee.txt
+F: Documentation/staging/tee.rst
F: drivers/tee/
F: include/linux/tee_drv.h
F: include/uapi/linux/tee.h
THERMAL
S: Supported
Q: https://patchwork.kernel.org/project/linux-pm/list/
F: drivers/thermal/cpuidle_cooling.c
F: include/linux/cpu_cooling.h
+THERMAL/POWER_ALLOCATOR
+S: Maintained
+F: Documentation/driver-api/thermal/power_allocator.rst
+F: drivers/thermal/gov_power_allocator.c
+F: include/trace/events/thermal_power_allocator.h
+
THINKPAD ACPI EXTRAS DRIVER
S: Maintained
W: http://sourceforge.net/projects/tlan/
-F: Documentation/networking/device_drivers/ti/tlan.rst
+F: Documentation/networking/device_drivers/ethernet/ti/tlan.rst
F: drivers/net/ethernet/ti/tlan.*
TM6000 VIDEO4LINUX DRIVER
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
-F: Documentation/RCU/torture.txt
+F: Documentation/RCU/torture.rst
F: kernel/locking/locktorture.c
F: kernel/rcu/rcuperf.c
F: kernel/rcu/rcutorture.c
TTY LAYER
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
F: Documentation/driver-api/serial/
F: fs/ufs/
UHID USERSPACE HID IO DRIVER
-M: David Herrmann <dh.herrmann@googlemail.com>
+M: David Rheinsberg <david.rheinsberg@gmail.com>
S: Maintained
F: drivers/hid/uhid.c
S: Supported
F: fs/unicode/
-UNICORE32 ARCHITECTURE
-S: Maintained
-W: http://mprc.pku.edu.cn/~guanxuetao/linux
-T: git git://github.com/gxt/linux.git
-F: arch/unicore32/
-
UNIFDEF
S: Maintained
F: drivers/input/serio/userio.c
F: include/uapi/linux/userio.h
-VITESSE FELIX ETHERNET SWITCH DRIVER
-S: Maintained
-F: drivers/net/dsa/ocelot/*
-F: net/dsa/tag_ocelot.c
-
VIVID VIRTUAL VIDEO DRIVER
F: drivers/rtc/rtc-sd3078.c
WIIMOTE HID DRIVER
-M: David Herrmann <dh.herrmann@googlemail.com>
+M: David Rheinsberg <david.rheinsberg@gmail.com>
S: Maintained
F: drivers/hid/hid-wiimote*
F: drivers/media/platform/xilinx/
F: include/uapi/linux/xilinx-v4l2-controls.h
+XILINX ZYNQMP DPDMA DRIVER
+S: Supported
+F: Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml
+F: drivers/dma/xilinx/xilinx_dpdma.c
+F: include/dt-bindings/dma/xlnx-zynqmp-dpdma.h
+
+XILINX ZYNQMP PSGTR PHY DRIVER
+S: Supported
+T: git https://github.com/Xilinx/linux-xlnx.git
+F: Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
+F: drivers/phy/xilinx/phy-zynqmp.c
+
XILLYBUS DRIVER
S: Maintained
W: http://yaina.de/jreuter/
W: http://www.qsl.net/dl1bke/
-F: Documentation/networking/z8530drv.rst
+F: Documentation/networking/device_drivers/hamradio/z8530drv.rst
F: drivers/net/hamradio/*scc.c
F: drivers/net/hamradio/z8530.h
pci_save_state(pdev);
}
- static void eeh_set_channel_state(struct eeh_pe *root, enum pci_channel_state s)
+ static void eeh_set_channel_state(struct eeh_pe *root, pci_channel_state_t s)
{
struct eeh_pe *pe;
struct eeh_dev *edev, *tmp;
pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED);
#ifdef CONFIG_PCI_IOV
- if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
- eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
+ if (eeh_ops->notify_resume)
+ eeh_ops->notify_resume(edev);
#endif
return PCI_ERS_RESULT_NONE;
}
}
#ifdef CONFIG_PCI_IOV
- pci_iov_add_virtfn(edev->physfn, eeh_dev_to_pdn(edev)->vf_index);
+ pci_iov_add_virtfn(edev->physfn, edev->vf_index);
#endif
return NULL;
}
if (edev->physfn) {
#ifdef CONFIG_PCI_IOV
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
-
- pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
+ pci_iov_remove_virtfn(edev->physfn, edev->vf_index);
edev->pdev = NULL;
#endif
if (rmv_data)
continue;
edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
- eeh_rmv_from_parent_pe(edev);
+ eeh_pe_tree_remove(edev);
}
return NULL;
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
- pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ STATUS_BUFFER_SIZE8,
+ card->ctrl[i].status.buf,
+ card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
- pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ COMMAND_BUFFER_SIZE8,
+ card->ctrl[i].cmd.buf,
+ card->ctrl[i].cmd.dma_addr);
}
return 0;
}
static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
- enum pci_channel_state error)
+ pci_channel_state_t error)
{
int st;
failed_hw_buffers_init:
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
- pci_free_consistent(card->dev,
- STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ STATUS_BUFFER_SIZE8,
+ card->ctrl[i].status.buf,
+ card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
- pci_free_consistent(card->dev,
- COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ COMMAND_BUFFER_SIZE8,
+ card->ctrl[i].cmd.buf,
+ card->ctrl[i].cmd.dma_addr);
}
failed_hw_setup:
rsxx_eeh_failure(dev);
/**
* ioat_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
+ * @c: the channel to be cleaned
*/
static void ioat_free_chan_resources(struct dma_chan *c)
{
/* disable relaxed ordering */
err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
if (err)
- return err;
+ return pcibios_err_to_errno(err);
/* clear relaxed ordering enable */
val16 &= ~IOAT_DEVCTRL_ROE;
err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
if (err)
- return err;
+ return pcibios_err_to_errno(err);
if (ioat_dma->cap & IOAT_CAP_DPS)
writeb(ioat_pending_level + 1,
#define DRV_NAME "ioatdma"
static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
- enum pci_channel_state error)
+ pci_channel_state_t error)
{
dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
return g_iommus[iommu_id];
}
+static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+{
+ return sm_supported(iommu) ?
+ ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
+}
+
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
for_each_domain_iommu(i, domain) {
found = true;
- if (!ecap_coherent(g_iommus[i]->ecap)) {
+ if (!iommu_paging_structure_coherency(g_iommus[i])) {
domain->iommu_coherency = 0;
break;
}
/* No hardware attached; use lowest common denominator */
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
- if (!ecap_coherent(iommu->ecap)) {
+ if (!iommu_paging_structure_coherency(iommu)) {
domain->iommu_coherency = 0;
break;
}
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (domain_use_first_level(domain))
- pteval |= DMA_FL_PTE_XD;
+ pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
{
context->hi |= pasid & ((1 << 20) - 1);
- context->hi |= (1 << 20);
}
/*
context_set_fault_enable(context);
context_set_present(context);
- domain_flush_cache(domain, context, sizeof(*context));
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(context, sizeof(*context));
/*
* It's a non-present to present mapping. If hardware doesn't cache
unsigned long nr_pages, int prot)
{
struct dma_pte *first_pte = NULL, *pte = NULL;
- phys_addr_t uninitialized_var(pteval);
+ phys_addr_t pteval;
unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
if (domain_use_first_level(domain))
- attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
+ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (!sg) {
sg_res = nr_pages;
}
if (info->ats_supported && ecap_prs(iommu->ecap) &&
- pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
+ pci_pri_supported(pdev))
info->pri_supported = 1;
}
}
end >> agaw_to_width(si_domain->agaw)))
continue;
- ret = iommu_domain_identity_map(si_domain, start, end);
+ ret = iommu_domain_identity_map(si_domain,
+ mm_to_dma_pfn(start >> PAGE_SHIFT),
+ mm_to_dma_pfn(end >> PAGE_SHIFT));
if (ret)
return ret;
}
NULL,
};
- static inline bool has_untrusted_dev(void)
+ static inline bool has_external_pci(void)
{
struct pci_dev *pdev = NULL;
for_each_pci_dev(pdev)
- if (pdev->untrusted)
+ if (pdev->external_facing)
return true;
return false;
static int __init platform_optin_force_iommu(void)
{
- if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev())
+ if (!dmar_platform_optin() || no_platform_optin || !has_external_pci())
return 0;
if (no_iommu || dmar_disabled)
return ret;
}
+/*
+ * Check that the device does not live on an external facing PCI port that is
+ * marked as untrusted. Such devices should not be able to apply quirks and
+ * thus not be able to bypass the IOMMU restrictions.
+ */
+static bool risky_device(struct pci_dev *pdev)
+{
+ if (pdev->untrusted) {
+ pci_info(pdev,
+ "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
+ pdev->vendor, pdev->device);
+ pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
+ return true;
+ }
+ return false;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
static void quirk_iommu_igfx(struct pci_dev *dev)
{
+ if (risky_device(dev))
+ return;
+
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0;
}
static void quirk_iommu_rwbf(struct pci_dev *dev)
{
+ if (risky_device(dev))
+ return;
+
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it. Same seems to hold for the desktop versions.
{
unsigned short ggc;
+ if (risky_device(dev))
+ return;
+
if (pci_read_config_word(dev, GGC, &ggc))
return;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
if (!pdev)
return;
+
+ if (risky_device(pdev)) {
+ pci_dev_put(pdev);
+ return;
+ }
+
pci_dev_put(pdev);
/* System Management Registers. Might be hidden, in which case
if (!pdev)
return;
+ if (risky_device(pdev)) {
+ pci_dev_put(pdev);
+ return;
+ }
+
if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
pci_dev_put(pdev);
return;
/**
* genwqe_bus_reset() - Card recovery
+ * @cd: GenWQE device information
*
* pci_reset_function() will recover the device and ensure that the
* registers are accessible again when it completes with success. If
/**
* genwqe_recovery_on_fatal_gfir_required() - Version depended actions
+ * @cd: GenWQE device information
*
* Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must
* be ignored. This is e.g. true for the bitstream we gave to the card
/**
* genwqe_T_psec() - Calculate PF/VF timeout register content
+ * @cd: GenWQE device information
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
/**
* genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution
+ * @cd: GenWQE device information
*
* Do this _after_ card_reset() is called. Otherwise the values will
* vanish. The settings need to be done when the queues are inactive.
/**
* genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution
+ * @cd: GenWQE device information
*/
static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
{
/**
* genwqe_stop() - Stop card operation
+ * @cd: GenWQE device information
*
* Recovery notes:
* As long as genwqe_thread runs we might access registers during
/**
* genwqe_recover_card() - Try to recover the card if it is possible
+ * @cd: GenWQE device information
+ * @fatal_err: Indicate whether to attempt soft reset
*
* If fatal_err is set no register access is possible anymore. It is
* likely that genwqe_start fails in that situation. Proper error
/**
* genwqe_fir_checking() - Check the fault isolation registers of the card
+ * @cd: GenWQE device information
*
* If this code works ok, can be tried out with help of the genwqe_poke tool:
* sudo ./tools/genwqe_poke 0x8 0xfefefefefef
/**
* genwqe_pci_fundamental_reset() - trigger a PCIe fundamental reset on the slot
+ * @pci_dev: PCI device information struct
*
* Note: pci_set_pcie_reset_state() is not implemented on all archs, so this
* reset method will not work in all cases.
return rc;
}
-/*
+/**
* genwqe_reload_bistream() - reload card bitstream
+ * @cd: GenWQE device information
*
* Set the appropriate register and call fundamental reset to reaload the card
* bitstream.
/**
* genwqe_health_thread() - Health checking thread
+ * @data: GenWQE device information
*
* This thread is only started for the PF of the card.
*
static int genwqe_health_check_stop(struct genwqe_dev *cd)
{
- int rc;
-
if (!genwqe_health_thread_running(cd))
return -EIO;
- rc = kthread_stop(cd->health_thread);
+ kthread_stop(cd->health_thread);
cd->health_thread = NULL;
return 0;
}
/**
* genwqe_pci_setup() - Allocate PCIe related resources for our card
+ * @cd: GenWQE device information
*/
static int genwqe_pci_setup(struct genwqe_dev *cd)
{
/**
* genwqe_pci_remove() - Free PCIe related resources for our card
+ * @cd: GenWQE device information
*/
static void genwqe_pci_remove(struct genwqe_dev *cd)
{
/**
* genwqe_probe() - Device initialization
- * @pdev: PCI device information struct
+ * @pci_dev: PCI device information struct
+ * @id: PCI device ID
*
* Callable for multiple cards. This function is called on bind.
*
/**
* genwqe_remove() - Called when device is removed (hot-plugable)
+ * @pci_dev: PCI device information struct
*
* Or when driver is unloaded respecitively when unbind is done.
*/
genwqe_dev_free(cd);
}
-/*
+/**
* genwqe_err_error_detected() - Error detection callback
+ * @pci_dev: PCI device information struct
+ * @state: PCI channel state
*
* This callback is called by the PCI subsystem whenever a PCI bus
* error is detected.
*/
static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev,
- enum pci_channel_state state)
+ pci_channel_state_t state)
{
struct genwqe_dev *cd;
return 0;
}
-static struct pci_error_handlers genwqe_err_handler = {
+static const struct pci_error_handlers genwqe_err_handler = {
.error_detected = genwqe_err_error_detected,
.mmio_enabled = genwqe_err_result_none,
.slot_reset = genwqe_err_slot_reset,
/**
* genwqe_devnode() - Set default access mode for genwqe devices.
+ * @dev: Pointer to device (unused)
+ * @mode: Carrier to pass-back given mode (permissions)
*
* Default mode should be rw for everybody. Do not change default
* device name.
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/bpf.h>
+#include <generated/utsrelease.h>
/* Local includes */
#include "i40e.h"
static const char i40e_driver_string[] =
"Intel(R) Ethernet Connection XL710 Network Driver";
-#define DRV_KERN "-k"
-
-#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 20
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
- __stringify(DRV_VERSION_MINOR) "." \
- __stringify(DRV_VERSION_BUILD) DRV_KERN
-const char i40e_driver_version_str[] = DRV_VERSION;
static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
/* a bit of forward declarations */
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type);
-
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
/* i40e_pci_tbl - PCI Device ID Table
*
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *i40e_wq;
i40e_get_netdev_stats_struct_tx(ring, stats);
if (i40e_enabled_xdp_vsi(vsi)) {
- ring++;
+ ring = READ_ONCE(vsi->xdp_rings[i]);
+ if (!ring)
+ continue;
i40e_get_netdev_stats_struct_tx(ring, stats);
}
- ring++;
+ ring = READ_ONCE(vsi->rx_rings[i]);
+ if (!ring)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
p = READ_ONCE(vsi->tx_rings[q]);
+ if (!p)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- /* Rx queue is part of the same block as Tx queue */
- p = &p[1];
+ /* locate Rx ring */
+ p = READ_ONCE(vsi->rx_rings[q]);
+ if (!p)
+ continue;
+
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
rx_page += p->rx_stats.alloc_page_failed;
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* locate XDP ring */
+ p = READ_ONCE(vsi->xdp_rings[q]);
+ if (!p)
+ continue;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ tx_b += bytes;
+ tx_p += packets;
+ tx_restart += p->tx_stats.restart_queue;
+ tx_busy += p->tx_stats.tx_busy;
+ tx_linearize += p->tx_stats.tx_linearize;
+ tx_force_wb += p->tx_stats.tx_force_wb;
+ }
}
rcu_read_unlock();
vsi->tx_restart = tx_restart;
num_tc_qps);
break;
}
- /* fall through */
+ fallthrough;
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
return err;
}
#endif /* CONFIG_I40E_DCB */
-#define SPEED_SIZE 14
-#define FC_SIZE 8
+
/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
clear_bit(__I40E_CONFIG_BUSY, pf->state);
}
-/**
- * i40e_up - Bring the connection back up after being down
- * @vsi: the VSI being configured
- **/
-int i40e_up(struct i40e_vsi *vsi)
-{
- int err;
-
- err = i40e_vsi_configure(vsi);
- if (!err)
- err = i40e_up_complete(vsi);
-
- return err;
-}
-
/**
* i40e_force_link_state - Force the link status
* @pf: board private structure
{
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config = {0};
+ bool non_zero_phy_type = is_up;
struct i40e_hw *hw = &pf->hw;
i40e_status err;
u64 mask;
/* If link needs to go up, but was not forced to go down,
* and its speed values are OK, no need for a flap
+ * if non_zero_phy_type was set, still need to force up
*/
- if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
+ non_zero_phy_type = true;
+ else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
return I40E_SUCCESS;
/* To force link we need to set bits for all supported PHY types,
* across two fields.
*/
mask = I40E_PHY_TYPES_BITMASK;
- config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
- config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
+ config.phy_type =
+ non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
+ config.phy_type_ext =
+ non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
/* Copy the old settings, except of phy_type */
config.abilities = abilities.abilities;
+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
+ if (is_up)
+ config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
+ else
+ config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
+ }
if (abilities.link_speed != 0)
config.link_speed = abilities.link_speed;
else
i40e_update_link_info(hw);
}
- i40e_aq_set_link_restart_an(hw, true, NULL);
+ i40e_aq_set_link_restart_an(hw, is_up, NULL);
return I40E_SUCCESS;
}
+/**
+ * i40e_up - Bring the connection back up after being down
+ * @vsi: the VSI being configured
+ **/
+int i40e_up(struct i40e_vsi *vsi)
+{
+ int err;
+
+ if (vsi->type == I40E_VSI_MAIN &&
+ (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
+ vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ i40e_force_link_state(vsi->back, true);
+
+ err = i40e_vsi_configure(vsi);
+ if (!err)
+ err = i40e_up_complete(vsi);
+
+ return err;
+}
+
/**
* i40e_down - Shutdown the connection processing
* @vsi: the VSI being stopped
i40e_vsi_disable_irq(vsi);
i40e_vsi_stop_rings(vsi);
if (vsi->type == I40E_VSI_MAIN &&
- vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
+ (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
+ vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
i40e_force_link_state(vsi->back, false);
i40e_napi_disable_all(vsi);
return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
}
-/* We can see up to 256 filter programming desc in transit if the filters are
- * being applied really fast; before we see the first
- * filter miss error on Rx queue 0. Accumulating enough error messages before
- * reacting will make sure we don't cause flush too often.
- */
-#define I40E_MAX_FD_PROGRAM_ERROR 256
-
/**
* i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
* @pf: board private structure
{
struct i40e_driver_version dv;
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
+ dv.major_version = 0xff;
+ dv.minor_version = 0xff;
+ dv.build_version = 0xff;
dv.subbuild_version = 0;
- strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+ strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
}
if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
if (vsi->xdp_rings)
- vsi->xdp_rings[i] = NULL;
+ WRITE_ONCE(vsi->xdp_rings[i], NULL);
}
}
}
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = ring++;
+ WRITE_ONCE(vsi->tx_rings[i], ring++);
if (!i40e_enabled_xdp_vsi(vsi))
goto setup_rx;
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
- vsi->xdp_rings[i] = ring++;
+ WRITE_ONCE(vsi->xdp_rings[i], ring++);
setup_rx:
ring->queue_index = i;
ring->size = 0;
ring->dcb_tc = 0;
ring->itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
return ret;
}
+/**
+ * i40e_is_total_port_shutdown_enabled - read NVM and return value
+ * if total port shutdown feature is enabled for this PF
+ * @pf: board private structure
+ **/
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+{
+#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
+#define I40E_FEATURES_ENABLE_PTR 0x2A
+#define I40E_CURRENT_SETTING_PTR 0x2B
+#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
+#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
+#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
+#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
+ i40e_status read_status = I40E_SUCCESS;
+ u16 sr_emp_sr_settings_ptr = 0;
+ u16 features_enable = 0;
+ u16 link_behavior = 0;
+ bool ret = false;
+
+ read_status = i40e_read_nvm_word(&pf->hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ &sr_emp_sr_settings_ptr);
+ if (read_status)
+ goto err_nvm;
+ read_status = i40e_read_nvm_word(&pf->hw,
+ sr_emp_sr_settings_ptr +
+ I40E_FEATURES_ENABLE_PTR,
+ &features_enable);
+ if (read_status)
+ goto err_nvm;
+ if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
+ read_status = i40e_read_nvm_module_data(&pf->hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ I40E_CURRENT_SETTING_PTR,
+ I40E_LINK_BEHAVIOR_WORD_OFFSET,
+ I40E_LINK_BEHAVIOR_WORD_LENGTH,
+ &link_behavior);
+ if (read_status)
+ goto err_nvm;
+ link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
+ ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
+ }
+ return ret;
+
+err_nvm:
+ dev_warn(&pf->pdev->dev,
+ "total-port-shutdown feature is off due to read nvm error: %s\n",
+ i40e_stat_str(&pf->hw, read_status));
+ return ret;
+}
+
/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
pf->tx_timeout_recovery_level = 1;
+ if (pf->hw.mac.type != I40E_MAC_X722 &&
+ i40e_is_total_port_shutdown_enabled(pf)) {
+ /* Link down on close must be on when total port shutdown
+ * is enabled for a given port
+ */
+ pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
+ dev_info(&pf->pdev->dev,
+ "total-port-shutdown was enabled, link-down-on-close is forced on\n");
+ }
mutex_init(&pf->switch_mutex);
sw_init_done:
switch (xdp->command) {
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
- case XDP_QUERY_PROG:
- xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
- return 0;
case XDP_SETUP_XSK_UMEM:
return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
xdp->xsk.queue_id);
/* Setup DCB netlink interface */
i40e_dcbnl_setup(vsi);
#endif /* CONFIG_I40E_DCB */
- /* fall through */
-
+ fallthrough;
case I40E_VSI_FDIR:
/* set up vectors and rings if needed */
ret = i40e_vsi_setup_vectors(vsi);
i40e_vsi_reset_stats(vsi);
break;
-
default:
/* no netdev or rings for the other VSI types */
break;
**/
static bool i40e_check_recovery_mode(struct i40e_pf *pf)
{
- u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
- bool is_recovery_mode = false;
-
- if (pf->hw.mac.type == I40E_MAC_XL710)
- is_recovery_mode =
- val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
- val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
- val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
- val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
- if (pf->hw.mac.type == I40E_MAC_X722)
- is_recovery_mode =
- val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
- val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
- if (is_recovery_mode) {
- dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
- dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
+
+ if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
+ dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
set_bit(__I40E_RECOVERY_MODE, pf->state);
return true;
}
- if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
- dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+ dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
return false;
}
**/
static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
{
- const unsigned short MAX_CNT = 1000;
- const unsigned short MSECS = 10;
+ /* wait max 10 seconds for PF reset to succeed */
+ const unsigned long time_end = jiffies + 10 * HZ;
+
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- int cnt;
- for (cnt = 0; cnt < MAX_CNT; ++cnt) {
+ ret = i40e_pf_reset(hw);
+ while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
+ usleep_range(10000, 20000);
ret = i40e_pf_reset(hw);
- if (!ret)
- break;
- msleep(MSECS);
}
- if (cnt == MAX_CNT) {
+ if (ret == I40E_SUCCESS)
+ pf->pfr_count++;
+ else
dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
- return ret;
- }
- pf->pfr_count++;
return ret;
}
+/**
+ * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
+ * @pf: board private structure
+ *
+ * Check FW registers to determine if FW issued unexpected EMP Reset.
+ * Every time when unexpected EMP Reset occurs the FW increments
+ * a counter of unexpected EMP Resets. When the counter reaches 10
+ * the FW should enter the Recovery mode
+ *
+ * Returns true if FW issued unexpected EMP Reset
+ **/
+static bool i40e_check_fw_empr(struct i40e_pf *pf)
+{
+ const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
+ I40E_GL_FWSTS_FWS1B_MASK;
+ return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
+ (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
+}
+
+/**
+ * i40e_handle_resets - handle EMP resets and PF resets
+ * @pf: board private structure
+ *
+ * Handle both EMP resets and PF resets and conclude whether there are
+ * any issues regarding these resets. If there are any issues then
+ * generate log entry.
+ *
+ * Return 0 if NIC is healthy or negative value when there are issues
+ * with resets
+ **/
+static i40e_status i40e_handle_resets(struct i40e_pf *pf)
+{
+ const i40e_status pfr = i40e_pf_loop_reset(pf);
+ const bool is_empr = i40e_check_fw_empr(pf);
+
+ if (is_empr || pfr != I40E_SUCCESS)
+ dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
+
+ return is_empr ? I40E_ERR_RESET_FAILED : pfr;
+}
+
/**
* i40e_init_recovery_mode - initialize subsystems needed in recovery mode
* @pf: board private structure
goto err_pf_reset;
}
- err = i40e_pf_loop_reset(pf);
- if (err) {
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ err = i40e_handle_resets(pf);
+ if (err)
goto err_pf_reset;
- }
i40e_check_recovery_mode(pf);
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* make sure the MFS hasn't been set lower than the default */
+#define MAX_FRAME_SIZE_DEFAULT 0x2600
+ val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
+ I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ if (val < MAX_FRAME_SIZE_DEFAULT)
+ dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
+ i, val);
+
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
* PAUSE or PFC frames and potentially controlling traffic for other
* remediation.
**/
static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
- enum pci_channel_state error)
+ pci_channel_state_t error)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
**/
static int __init i40e_init_module(void)
{
- pr_info("%s: %s - version %s\n", i40e_driver_name,
- i40e_driver_string, i40e_driver_version_str);
+ pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
/* There is no need to throttle the number of active tasks because
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <generated/utsrelease.h>
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
-#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 2
-
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
- __stringify(DRV_VERSION_MINOR) "." \
- __stringify(DRV_VERSION_BUILD) "-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
-const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
static int debug = -1;
~IFF_PROMISC;
goto out_promisc;
}
+ ice_cfg_vlan_pruning(vsi, false, false);
}
} else {
/* Clear Rx filter to remove traffic from wire */
IFF_PROMISC;
goto out_promisc;
}
+ if (vsi->num_vlan > 1)
+ ice_cfg_vlan_pruning(vsi, true, false);
}
}
}
void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
struct ice_aqc_get_phy_caps_data *caps;
+ const char *an_advertised;
enum ice_status status;
const char *fec_req;
const char *speed;
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps) {
fec_req = "Unknown";
+ an_advertised = "Unknown";
goto done;
}
if (status)
netdev_info(vsi->netdev, "Get phy capability failed.\n");
+ an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
+
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
fec_req = "RS-FEC";
kfree(caps);
done:
- netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
- speed, fec_req, fec, an, fc);
+ netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
+ speed, fec_req, fec, an_advertised, an, fc);
ice_print_topo_conflict(vsi);
}
}
}
+/**
+ * ice_set_dflt_mib - send a default config MIB to the FW
+ * @pf: private PF struct
+ *
+ * This function sends a default configuration MIB to the FW.
+ *
+ * If this function errors out at any point, the driver is still able to
+ * function. The main impact is that LFC may not operate as expected.
+ * Therefore an error state in this function should be treated with a DBG
+ * message and continue on with driver rebuild/reenable.
+ */
+static void ice_set_dflt_mib(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ u8 mib_type, *buf, *lldpmib = NULL;
+ u16 len, typelen, offset = 0;
+ struct ice_lldp_org_tlv *tlv;
+ struct ice_hw *hw;
+ u32 ouisubtype;
+
+ if (!pf) {
+ dev_dbg(dev, "%s NULL pf pointer\n", __func__);
+ return;
+ }
+
+ hw = &pf->hw;
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib) {
+ dev_dbg(dev, "%s Failed to allocate MIB memory\n",
+ __func__);
+ return;
+ }
+
+ /* Add ETS CFG TLV */
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ buf = tlv->tlvinfo;
+ buf[0] = 0;
+
+ /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
+ * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
+ * Octets 13 - 20 are TSA values - leave as zeros
+ */
+ buf[5] = 0x64;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add ETS REC TLV */
+ buf = tlv->tlvinfo;
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First octet of buf is reserved
+ * Octets 1 - 4 map UP to TC - all UPs map to zero
+ * Octets 5 - 12 are BW values - set TC 0 to 100%.
+ * Octets 13 - 20 are TSA value - leave as zeros
+ */
+ buf[5] = 0x64;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add PFC CFG TLV */
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Octet 1 left as all zeros - PFC disabled */
+ buf[0] = 0x08;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+
+ if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
+ dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
+
+ kfree(lldpmib);
+}
+
/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
pi->lport);
- /* if the old link up/down and speed is the same as the new */
- if (link_up == old_link && link_speed == old_link_speed)
- return result;
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
+ link_up = true;
vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->port_info)
}
}
- ice_dcb_rebuild(pf);
+ /* if the old link up/down and speed is the same as the new */
+ if (link_up == old_link && link_speed == old_link_speed)
+ return result;
+
+ if (ice_is_dcb_active(pf)) {
+ if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ ice_dcb_rebuild(pf);
+ } else {
+ if (link_up)
+ ice_set_dflt_mib(pf);
+ }
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
return status;
}
+enum ice_aq_task_state {
+ ICE_AQ_TASK_WAITING = 0,
+ ICE_AQ_TASK_COMPLETE,
+ ICE_AQ_TASK_CANCELED,
+};
+
+struct ice_aq_task {
+ struct hlist_node entry;
+
+ u16 opcode;
+ struct ice_rq_event_info *event;
+ enum ice_aq_task_state state;
+};
+
+/**
+ * ice_wait_for_aq_event - Wait for an AdminQ event from firmware
+ * @pf: pointer to the PF private structure
+ * @opcode: the opcode to wait for
+ * @timeout: how long to wait, in jiffies
+ * @event: storage for the event info
+ *
+ * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
+ * current thread will be put to sleep until the specified event occurs or
+ * until the given timeout is reached.
+ *
+ * To obtain only the descriptor contents, pass an event without an allocated
+ * msg_buf. If the complete data buffer is desired, allocate the
+ * event->msg_buf with enough space ahead of time.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ struct ice_rq_event_info *event)
+{
+ struct ice_aq_task *task;
+ long ret;
+ int err;
+
+ task = kzalloc(sizeof(*task), GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ INIT_HLIST_NODE(&task->entry);
+ task->opcode = opcode;
+ task->event = event;
+ task->state = ICE_AQ_TASK_WAITING;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_add_head(&task->entry, &pf->aq_wait_list);
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
+ timeout);
+ switch (task->state) {
+ case ICE_AQ_TASK_WAITING:
+ err = ret < 0 ? ret : -ETIMEDOUT;
+ break;
+ case ICE_AQ_TASK_CANCELED:
+ err = ret < 0 ? ret : -ECANCELED;
+ break;
+ case ICE_AQ_TASK_COMPLETE:
+ err = ret < 0 ? ret : 0;
+ break;
+ default:
+ WARN(1, "Unexpected AdminQ wait task state %u", task->state);
+ err = -EINVAL;
+ break;
+ }
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_del(&task->entry);
+ spin_unlock_bh(&pf->aq_wait_lock);
+ kfree(task);
+
+ return err;
+}
+
+/**
+ * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
+ * @pf: pointer to the PF private structure
+ * @opcode: the opcode of the event
+ * @event: the event to check
+ *
+ * Loops over the current list of pending threads waiting for an AdminQ event.
+ * For each matching task, copy the contents of the event into the task
+ * structure and wake up the thread.
+ *
+ * If multiple threads wait for the same opcode, they will all be woken up.
+ *
+ * Note that event->msg_buf will only be duplicated if the event has a buffer
+ * with enough space already allocated. Otherwise, only the descriptor and
+ * message length will be copied.
+ *
+ * Returns: true if an event was found, false otherwise
+ */
+static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
+ struct ice_rq_event_info *event)
+{
+ struct ice_aq_task *task;
+ bool found = false;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
+ if (task->state || task->opcode != opcode)
+ continue;
+
+ memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
+ task->event->msg_len = event->msg_len;
+
+ /* Only copy the data buffer if a destination was set */
+ if (task->event->msg_buf &&
+ task->event->buf_len > event->buf_len) {
+ memcpy(task->event->msg_buf, event->msg_buf,
+ event->buf_len);
+ task->event->buf_len = event->buf_len;
+ }
+
+ task->state = ICE_AQ_TASK_COMPLETE;
+ found = true;
+ }
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ if (found)
+ wake_up(&pf->aq_wait_queue);
+}
+
+/**
+ * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
+ * @pf: the PF private structure
+ *
+ * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
+ * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
+ */
+static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
+{
+ struct ice_aq_task *task;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_for_each_entry(task, &pf->aq_wait_list, entry)
+ task->state = ICE_AQ_TASK_CANCELED;
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ wake_up(&pf->aq_wait_queue);
+}
+
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
opcode = le16_to_cpu(event.desc.opcode);
+ /* Notify any thread that might be waiting for this event */
+ ice_aq_check_events(pf, opcode, &event);
+
switch (opcode) {
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf, &event))
/**
* ice_service_task_stop - stop service task and cancel works
* @pf: board private structure
+ *
+ * Return 0 if the __ICE_SERVICE_DIS bit was not already set,
+ * 1 otherwise.
*/
-static void ice_service_task_stop(struct ice_pf *pf)
+static int ice_service_task_stop(struct ice_pf *pf)
{
- set_bit(__ICE_SERVICE_DIS, pf->state);
+ int ret;
+
+ ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
if (pf->serv_tmr.function)
del_timer_sync(&pf->serv_tmr);
cancel_work_sync(&pf->serv_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state);
+ return ret;
}
/**
link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
goto out;
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ /* Use the current user PHY configuration. The current user PHY
+ * configuration is initialized during probe from PHY capabilities
+ * software mode, and updated on set PHY configuration.
+ */
+ cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
retcode = -ENOMEM;
goto out;
}
- cfg->phy_type_low = pcaps->phy_type_low;
- cfg->phy_type_high = pcaps->phy_type_high;
- cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- cfg->low_power_ctrl = pcaps->low_power_ctrl;
- cfg->eee_cap = pcaps->eee_cap;
- cfg->eeer_value = pcaps->eeer_value;
- cfg->link_fec_opt = pcaps->link_fec_options;
+ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
if (link_up)
cfg->caps |= ICE_AQ_PHY_ENA_LINK;
else
cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
- retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
+ retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
if (retcode) {
dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
vsi->vsi_num, retcode);
}
/**
- * ice_check_media_subtask - Check for media; bring link up if detected.
+ * ice_init_nvm_phy_type - Initialize the NVM PHY type
+ * @pi: port info structure
+ *
+ * Initialize nvm_phy_type_[low|high] for link lenient mode support
+ */
+static int ice_init_nvm_phy_type(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_pf *pf = pi->hw->back;
+ enum ice_status status;
+ int err = 0;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
+ NULL);
+
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
+ err = -EIO;
+ goto out;
+ }
+
+ pf->nvm_phy_type_hi = pcaps->phy_type_high;
+ pf->nvm_phy_type_lo = pcaps->phy_type_low;
+
+out:
+ kfree(pcaps);
+ return err;
+}
+
+/**
+ * ice_init_link_dflt_override - Initialize link default override
+ * @pi: port info structure
+ *
+ * Initialize link default override and PHY total port shutdown during probe
+ */
+static void ice_init_link_dflt_override(struct ice_port_info *pi)
+{
+ struct ice_link_default_override_tlv *ldo;
+ struct ice_pf *pf = pi->hw->back;
+
+ ldo = &pf->link_dflt_override;
+ if (ice_get_link_default_override(ldo, pi))
+ return;
+
+ if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
+ return;
+
+ /* Enable Total Port Shutdown (override/replace link-down-on-close
+ * ethtool private flag) for ports with Port Disable bit set.
+ */
+ set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
+ set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
+}
+
+/**
+ * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
+ * @pi: port info structure
+ *
+ * If default override is enabled, initialized the user PHY cfg speed and FEC
+ * settings using the default override mask from the NVM.
+ *
+ * The PHY should only be configured with the default override settings the
+ * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state
+ * is used to indicate that the user PHY cfg default override is initialized
+ * and the PHY has not been configured with the default override settings. The
+ * state is set here, and cleared in ice_configure_phy the first time the PHY is
+ * configured.
+ */
+static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
+{
+ struct ice_link_default_override_tlv *ldo;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_phy_info *phy = &pi->phy;
+ struct ice_pf *pf = pi->hw->back;
+
+ ldo = &pf->link_dflt_override;
+
+ /* If link default override is enabled, use to mask NVM PHY capabilities
+ * for speed and FEC default configuration.
+ */
+ cfg = &phy->curr_user_phy_cfg;
+
+ if (ldo->phy_type_low || ldo->phy_type_high) {
+ cfg->phy_type_low = pf->nvm_phy_type_lo &
+ cpu_to_le64(ldo->phy_type_low);
+ cfg->phy_type_high = pf->nvm_phy_type_hi &
+ cpu_to_le64(ldo->phy_type_high);
+ }
+ cfg->link_fec_opt = ldo->fec_options;
+ phy->curr_user_fec_req = ICE_FEC_AUTO;
+
+ set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
+}
+
+/**
+ * ice_init_phy_user_cfg - Initialize the PHY user configuration
+ * @pi: port info structure
+ *
+ * Initialize the current user PHY configuration, speed, FEC, and FC requested
+ * mode to default. The PHY defaults are from get PHY capabilities topology
+ * with media so call when media is first available. An error is returned if
+ * called when media is not available. The PHY initialization completed state is
+ * set here.
+ *
+ * These configurations are used when setting PHY
+ * configuration. The user PHY configuration is updated on set PHY
+ * configuration. Returns 0 on success, negative on failure
+ */
+static int ice_init_phy_user_cfg(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_phy_info *phy = &pi->phy;
+ struct ice_pf *pf = pi->hw->back;
+ enum ice_status status;
+ struct ice_vsi *vsi;
+ int err = 0;
+
+ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+ return -EIO;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EINVAL;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
+
+ /* check if lenient mode is supported and enabled */
+ if (ice_fw_supports_link_override(&vsi->back->hw) &&
+ !(pcaps->module_compliance_enforcement &
+ ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
+ set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
+
+ /* if link default override is enabled, initialize user PHY
+ * configuration with link default override values
+ */
+ if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
+ ice_init_phy_cfg_dflt_override(pi);
+ goto out;
+ }
+ }
+
+ /* if link default override is not enabled, initialize PHY using
+ * topology with media
+ */
+ phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
+ pcaps->link_fec_options);
+ phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
+
+out:
+ phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
+ set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
+err_out:
+ kfree(pcaps);
+ return err;
+}
+
+/**
+ * ice_configure_phy - configure PHY
+ * @vsi: VSI of PHY
+ *
+ * Set the PHY configuration. If the current PHY configuration is the same as
+ * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
+ * configure the based get PHY capabilities for topology with media.
+ */
+static int ice_configure_phy(struct ice_vsi *vsi)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_port_info *pi;
+ enum ice_status status;
+ int err = 0;
+
+ pi = vsi->port_info;
+ if (!pi)
+ return -EINVAL;
+
+ /* Ensure we have media as we cannot configure a medialess port */
+ if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+ return -EPERM;
+
+ ice_print_topo_conflict(vsi);
+
+ if (vsi->port_info->phy.link_info.topo_media_conflict ==
+ ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
+ return -EPERM;
+
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
+ return ice_force_phys_link_state(vsi, true);
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ /* Get current PHY config */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status) {
+ dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
+ err = -EIO;
+ goto done;
+ }
+
+ /* If PHY enable link is configured and configuration has not changed,
+ * there's nothing to do
+ */
+ if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
+ ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
+ goto done;
+
+ /* Use PHY topology as baseline for configuration */
+ memset(pcaps, 0, sizeof(*pcaps));
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status) {
+ dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
+ err = -EIO;
+ goto done;
+ }
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
+
+ /* Speed - If default override pending, use curr_user_phy_cfg set in
+ * ice_init_phy_user_cfg_ldo.
+ */
+ if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
+ vsi->back->state)) {
+ cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
+ cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
+ } else {
+ u64 phy_low = 0, phy_high = 0;
+
+ ice_update_phy_type(&phy_low, &phy_high,
+ pi->phy.curr_user_speed_req);
+ cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
+ cfg->phy_type_high = pcaps->phy_type_high &
+ cpu_to_le64(phy_high);
+ }
+
+ /* Can't provide what was requested; use PHY capabilities */
+ if (!cfg->phy_type_low && !cfg->phy_type_high) {
+ cfg->phy_type_low = pcaps->phy_type_low;
+ cfg->phy_type_high = pcaps->phy_type_high;
+ }
+
+ /* FEC */
+ ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
+
+ /* Can't provide what was requested; use PHY capabilities */
+ if (cfg->link_fec_opt !=
+ (cfg->link_fec_opt & pcaps->link_fec_options)) {
+ cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+ }
+
+ /* Flow Control - always supported; no need to check against
+ * capabilities
+ */
+ ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
+
+ /* Enable link and link update */
+ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
+
+ status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
+ if (status) {
+ dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
+ err = -EIO;
+ }
+
+ kfree(cfg);
+done:
+ kfree(pcaps);
+ return err;
+}
+
+/**
+ * ice_check_media_subtask - Check for media
* @pf: pointer to PF struct
+ *
+ * If media is available, then initialize PHY user configuration if it is not
+ * been, and configure the PHY if the interface is up.
*/
static void ice_check_media_subtask(struct ice_pf *pf)
{
struct ice_vsi *vsi;
int err;
+ /* No need to check for media if it's already present */
+ if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
+ return;
+
vsi = ice_get_main_vsi(pf);
if (!vsi)
return;
- /* No need to check for media if it's already present or the interface
- * is down
- */
- if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
- test_bit(__ICE_DOWN, vsi->state))
- return;
-
/* Refresh link info and check if media is present */
pi = vsi->port_info;
err = ice_update_link_info(pi);
return;
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
- err = ice_force_phys_link_state(vsi, true);
- if (err)
+ if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
+ ice_init_phy_user_cfg(pi);
+
+ /* PHY settings are reset on media insertion, reconfigure
+ * PHY to preserve settings.
+ */
+ if (test_bit(__ICE_DOWN, vsi->state) &&
+ test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
return;
- clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+ err = ice_configure_phy(vsi);
+ if (!err)
+ clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
/* A Link Status Event will be generated; the event handler
* will complete bringing the interface up
xdp_ring->netdev = NULL;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
- vsi->xdp_rings[i] = xdp_ring;
+ WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
switch (xdp->command) {
case XDP_SETUP_PROG:
return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
- case XDP_QUERY_PROG:
- xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
- return 0;
case XDP_SETUP_XSK_UMEM:
return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
xdp->xsk.queue_id);
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
pf->serv_tmr_period = HZ;
return 0;
}
+/**
+ * ice_is_wol_supported - get NVM state of WoL
+ * @pf: board private structure
+ *
+ * Check if WoL is supported based on the HW configuration.
+ * Returns true if NVM supports and enables WoL for this port, false otherwise
+ */
+bool ice_is_wol_supported(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u16 wol_ctrl;
+
+ /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
+ * word) indicates WoL is not supported on the corresponding PF ID.
+ */
+ if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
+ return false;
+
+ return !(BIT(hw->pf_id) & wol_ctrl);
+}
+
/**
* ice_vsi_recfg_qs - Change the number of queues on a VSI
* @vsi: VSI being changed
return err;
}
+/**
+ * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
+ * @pf: PF to configure
+ *
+ * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
+ * VSI can still Tx/Rx VLAN tagged packets.
+ */
+static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_vsi_ctx *ctxt;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!vsi)
+ return;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return;
+
+ hw = &pf->hw;
+ ctxt->info = vsi->info;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ /* disable VLAN anti-spoof */
+ ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+
+ /* disable VLAN pruning and keep all other settings */
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ /* allow all VLANs on Tx and don't strip on Rx */
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
+ ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.sec_flags = ctxt->info.sec_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ }
+
+ kfree(ctxt);
+}
+
/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
{
struct ice_driver_ver dv;
- dv.major_ver = DRV_VERSION_MAJOR;
- dv.minor_ver = DRV_VERSION_MINOR;
- dv.build_ver = DRV_VERSION_BUILD;
+ dv.major_ver = 0xff;
+ dv.minor_ver = 0xff;
+ dv.build_ver = 0xff;
dv.subbuild_ver = 0;
- strscpy((char *)dv.driver_string, DRV_VERSION,
+ strscpy((char *)dv.driver_string, UTS_RELEASE,
sizeof(dv.driver_string));
return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
}
release_firmware(firmware);
}
+/**
+ * ice_print_wake_reason - show the wake up cause in the log
+ * @pf: pointer to the PF struct
+ */
+static void ice_print_wake_reason(struct ice_pf *pf)
+{
+ u32 wus = pf->wakeup_reason;
+ const char *wake_str;
+
+ /* if no wake event, nothing to print */
+ if (!wus)
+ return;
+
+ if (wus & PFPM_WUS_LNKC_M)
+ wake_str = "Link\n";
+ else if (wus & PFPM_WUS_MAG_M)
+ wake_str = "Magic Packet\n";
+ else if (wus & PFPM_WUS_MNG_M)
+ wake_str = "Management\n";
+ else if (wus & PFPM_WUS_FW_RST_WK_M)
+ wake_str = "Firmware Reset\n";
+ else
+ wake_str = "Unknown\n";
+
+ dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
+}
+
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
err = ice_send_version(pf);
if (err) {
dev_err(dev, "probe failed sending driver version %s. error: %d\n",
- ice_drv_ver, err);
- goto err_alloc_sw_unroll;
+ UTS_RELEASE, err);
+ goto err_send_version_unroll;
}
/* since everything is good, start the service timer */
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
+ }
+
+ err = ice_init_nvm_phy_type(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
+ goto err_send_version_unroll;
+ }
+
+ err = ice_update_link_info(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_update_link_info failed: %d\n", err);
+ goto err_send_version_unroll;
+ }
+
+ ice_init_link_dflt_override(pf->hw.port_info);
+
+ /* if media available, initialize PHY settings */
+ if (pf->hw.port_info->phy.link_info.link_info &
+ ICE_AQ_MEDIA_AVAILABLE) {
+ err = ice_init_phy_user_cfg(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
+ goto err_send_version_unroll;
+ }
+
+ if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ if (vsi)
+ ice_configure_phy(vsi);
+ }
+ } else {
+ set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
}
ice_verify_cacheline_size(pf);
- /* If no DDP driven features have to be setup, we are done with probe */
- if (ice_is_safe_mode(pf))
+ /* Save wakeup reason register for later use */
+ pf->wakeup_reason = rd32(hw, PFPM_WUS);
+
+ /* check for a power management event */
+ ice_print_wake_reason(pf);
+
+ /* clear wake status, all bits */
+ wr32(hw, PFPM_WUS, U32_MAX);
+
+ /* Disable WoL at init, wait for user to enable */
+ device_set_wakeup_enable(dev, false);
+
+ if (ice_is_safe_mode(pf)) {
+ ice_set_safe_mode_vlan_cfg(pf);
goto probe_done;
+ }
/* initialize DDP driven features */
clear_bit(__ICE_DOWN, pf->state);
return 0;
+err_send_version_unroll:
+ ice_vsi_release_all(pf);
err_alloc_sw_unroll:
ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
err_exit_unroll:
ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
return err;
}
+/**
+ * ice_set_wake - enable or disable Wake on LAN
+ * @pf: pointer to the PF struct
+ *
+ * Simple helper for WoL control
+ */
+static void ice_set_wake(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ bool wol = pf->wol_ena;
+
+ /* clear wake state, otherwise new wake events won't fire */
+ wr32(hw, PFPM_WUS, U32_MAX);
+
+ /* enable / disable APM wake up, no RMW needed */
+ wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
+
+ /* set magic packet filter enabled */
+ wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
+}
+
+/**
+ * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet
+ * @pf: pointer to the PF struct
+ *
+ * Issue firmware command to enable multicast magic wake, making
+ * sure that any locally administered address (LAA) is used for
+ * wake, and that PF reset doesn't undo the LAA.
+ */
+static void ice_setup_mc_magic_wake(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 mac_addr[ETH_ALEN];
+ struct ice_vsi *vsi;
+ u8 flags;
+
+ if (!pf->wol_ena)
+ return;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return;
+
+ /* Get current MAC address in case it's an LAA */
+ if (vsi->netdev)
+ ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
+ else
+ ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
+
+ flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
+ ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
+ ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
+
+ status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
+ if (status)
+ dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+}
+
/**
* ice_remove - Device removal routine
* @pdev: PCI device information struct
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ ice_aq_cancel_waiting_tasks(pf);
+
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
+ ice_setup_mc_magic_wake(pf);
ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
+ ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
pci_wait_for_pending_transaction(pdev);
ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * ice_shutdown - PCI callback for shutting down device
+ * @pdev: PCI device information struct
+ */
+static void ice_shutdown(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ ice_remove(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_ena);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * ice_prepare_for_shutdown - prep for PCI shutdown
+ * @pf: board private structure
+ *
+ * Inform or close all dependent features in prep for PCI device shutdown
+ */
+static void ice_prepare_for_shutdown(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 v;
+
+ /* Notify VFs of impending reset */
+ if (ice_check_sq_alive(hw, &hw->mailboxq))
+ ice_vc_notify_reset(pf);
+
+ dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
+
+ /* disable the VSIs and their queues that are not already DOWN */
+ ice_pf_dis_all_vsi(pf, false);
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ pf->vsi[v]->vsi_num = 0;
+
+ ice_shutdown_all_ctrlq(hw);
+}
+
+/**
+ * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
+ * @pf: board private structure to reinitialize
+ *
+ * This routine reinitialize interrupt scheme that was cleared during
+ * power management suspend callback.
+ *
+ * This should be called during resume routine to re-allocate the q_vectors
+ * and reacquire interrupts.
+ */
+static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret, v;
+
+ /* Since we clear MSIX flag during suspend, we need to
+ * set it back during resume...
+ */
+
+ ret = ice_init_interrupt_scheme(pf);
+ if (ret) {
+ dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
+ return ret;
+ }
+
+ /* Remap vectors and rings, after successful re-init interrupts */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+
+ ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
+ if (ret)
+ goto err_reinit;
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ }
+
+ ret = ice_req_irq_msix_misc(pf);
+ if (ret) {
+ dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
+ ret);
+ goto err_reinit;
+ }
+
+ return 0;
+
+err_reinit:
+ while (v--)
+ if (pf->vsi[v])
+ ice_vsi_free_q_vectors(pf->vsi[v]);
+
+ return ret;
+}
+
+/**
+ * ice_suspend
+ * @dev: generic device information structure
+ *
+ * Power Management callback to quiesce the device and prepare
+ * for D3 transition.
+ */
+static int __maybe_unused ice_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ice_pf *pf;
+ int disabled, v;
+
+ pf = pci_get_drvdata(pdev);
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Device is not ready, no need to suspend it\n");
+ return -EBUSY;
+ }
+
+ /* Stop watchdog tasks until resume completion.
+ * Even though it is most likely that the service task is
+ * disabled if the device is suspended or down, the service task's
+ * state is controlled by a different state bit, and we should
+ * store and honor whatever state that bit is in at this point.
+ */
+ disabled = ice_service_task_stop(pf);
+
+ /* Already suspended?, then there is nothing to do */
+ if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
+ if (!disabled)
+ ice_service_task_restart(pf);
+ return 0;
+ }
+
+ if (test_bit(__ICE_DOWN, pf->state) ||
+ ice_is_reset_in_progress(pf->state)) {
+ dev_err(dev, "can't suspend device in reset or already down\n");
+ if (!disabled)
+ ice_service_task_restart(pf);
+ return 0;
+ }
+
+ ice_setup_mc_magic_wake(pf);
+
+ ice_prepare_for_shutdown(pf);
+
+ ice_set_wake(pf);
+
+ /* Free vectors, clear the interrupt scheme and release IRQs
+ * for proper hibernation, especially with large number of CPUs.
+ * Otherwise hibernation might fail when mapping all the vectors back
+ * to CPU0.
+ */
+ ice_free_irq_msix_misc(pf);
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+ ice_vsi_free_q_vectors(pf->vsi[v]);
+ }
+ ice_clear_interrupt_scheme(pf);
+
+ pci_wake_from_d3(pdev, pf->wol_ena);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+/**
+ * ice_resume - PM callback for waking up from D3
+ * @dev: generic device information structure
+ */
+static int __maybe_unused ice_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ enum ice_reset_req reset_type;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (!pci_device_is_present(pdev))
+ return -ENODEV;
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret) {
+ dev_err(dev, "Cannot enable device after suspend\n");
+ return ret;
+ }
+
+ pf = pci_get_drvdata(pdev);
+ hw = &pf->hw;
+
+ pf->wakeup_reason = rd32(hw, PFPM_WUS);
+ ice_print_wake_reason(pf);
+
+ /* We cleared the interrupt scheme when we suspended, so we need to
+ * restore it now to resume device functionality.
+ */
+ ret = ice_reinit_interrupt_scheme(pf);
+ if (ret)
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+
+ clear_bit(__ICE_DOWN, pf->state);
+ /* Now perform PF reset and rebuild */
+ reset_type = ICE_RESET_PFR;
+ /* re-enable service task for reset, but allow reset to schedule it */
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
+
+ if (ice_schedule_reset(pf, reset_type))
+ dev_err(dev, "Reset during resume failed.\n");
+
+ clear_bit(__ICE_SUSPENDED, pf->state);
+ ice_service_task_restart(pf);
+
+ /* Restart the service task */
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+
+ return 0;
}
+#endif /* CONFIG_PM */
/**
* ice_pci_err_detected - warning that PCI error has been detected
* is in progress. Allows the driver to gracefully prepare/handle PCI errors.
*/
static pci_ers_result_t
- ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
+ ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
return;
}
+ ice_restore_all_vfs_msi_state(pdev);
+
ice_do_reset(pf, ICE_RESET_PFR);
ice_service_task_restart(pf);
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
+static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
+
static const struct pci_error_handlers ice_pci_err_handler = {
.error_detected = ice_pci_err_detected,
.slot_reset = ice_pci_err_slot_reset,
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &ice_pm_ops,
+#endif /* CONFIG_PM */
+ .shutdown = ice_shutdown,
.sriov_configure = ice_sriov_configure,
.err_handler = &ice_pci_err_handler
};
{
int status;
- pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
+ pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
vsi->tx_linearize = 0;
vsi->rx_buf_failed = 0;
vsi->rx_page_failed = 0;
+ vsi->rx_gro_dropped = 0;
rcu_read_lock();
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+ vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
}
/* update XDP Tx rings counters */
ice_update_eth_stats(vsi);
cur_ns->tx_errors = cur_es->tx_errors;
- cur_ns->rx_dropped = cur_es->rx_discards;
+ cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
cur_ns->tx_dropped = cur_es->tx_discards;
cur_ns->multicast = cur_es->rx_multicast;
if (err)
goto err_sched_init_port;
- err = ice_update_link_info(hw->port_info);
- if (err)
- dev_err(dev, "Get link status error %d\n", err);
-
/* start misc vector */
err = ice_req_irq_msix_misc(pf);
if (err) {
/* Set PHY if there is media, otherwise, turn off PHY */
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
- err = ice_force_phys_link_state(vsi, true);
+ clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+ if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
+ err = ice_init_phy_user_cfg(pi);
+ if (err) {
+ netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = ice_configure_phy(vsi);
if (err) {
netdev_err(netdev, "Failed to set physical link up, error %d\n",
err);
return err;
}
} else {
+ set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
err = ice_aq_set_link_restart_an(pi, false, NULL);
if (err) {
netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
vsi->vsi_num, err);
return err;
}
- set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
}
err = ice_vsi_open(vsi);
char ixgb_driver_name[] = "ixgb";
static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
-#define DRIVERNAPI "-NAPI"
-#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
-const char ixgb_driver_version[] = DRV_VERSION;
static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
#define IXGB_CB_LENGTH 256
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
- enum pci_channel_state state);
+ pci_channel_state_t state);
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
static void ixgb_io_resume (struct pci_dev *pdev);
MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
static int __init
ixgb_init_module(void)
{
- pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
+ pr_info("%s\n", ixgb_driver_string);
pr_info("%s\n", ixgb_copyright);
return pci_register_driver(&ixgb_driver);
* a PCI bus error is detected.
*/
static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
- enum pci_channel_state state)
+ pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev_priv(netdev);
/* We assume that efx->type->reconfigure_mac will always try to sync RX
* filters and therefore needs to read-lock the filter table against freeing
*/
-void efx_mac_reconfigure(struct efx_nic *efx)
+void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
{
if (efx->type->reconfigure_mac) {
down_read(&efx->filter_sem);
- efx->type->reconfigure_mac(efx);
+ efx->type->reconfigure_mac(efx, mtu_only);
up_read(&efx->filter_sem);
}
}
mutex_lock(&efx->mac_lock);
if (efx->port_enabled)
- efx_mac_reconfigure(efx);
+ efx_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock);
}
+int efx_set_mac_address(struct net_device *net_dev, void *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct sockaddr *addr = data;
+ u8 *new_addr = addr->sa_data;
+ u8 old_addr[6];
+ int rc;
+
+ if (!is_valid_ether_addr(new_addr)) {
+ netif_err(efx, drv, efx->net_dev,
+ "invalid ethernet MAC address requested: %pM\n",
+ new_addr);
+ return -EADDRNOTAVAIL;
+ }
+
+ /* save old address */
+ ether_addr_copy(old_addr, net_dev->dev_addr);
+ ether_addr_copy(net_dev->dev_addr, new_addr);
+ if (efx->type->set_mac_address) {
+ rc = efx->type->set_mac_address(efx);
+ if (rc) {
+ ether_addr_copy(net_dev->dev_addr, old_addr);
+ return rc;
+ }
+ }
+
+ /* Reconfigure the MAC */
+ mutex_lock(&efx->mac_lock);
+ efx_mac_reconfigure(efx, false);
+ mutex_unlock(&efx->mac_lock);
+
+ return 0;
+}
+
+/* Context: netif_addr_lock held, BHs disabled. */
+void efx_set_rx_mode(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->port_enabled)
+ queue_work(efx->workqueue, &efx->mac_work);
+ /* Otherwise efx_start_port() will do this */
+}
+
+int efx_set_features(struct net_device *net_dev, netdev_features_t data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ /* If disabling RX n-tuple filtering, clear existing filters */
+ if (net_dev->features & ~data & NETIF_F_NTUPLE) {
+ rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ if (rc)
+ return rc;
+ }
+
+ /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
+ * If rx-fcs is changed, mac_reconfigure updates that too.
+ */
+ if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXFCS)) {
+ /* efx_set_rx_mode() will schedule MAC work to update filters
+ * when a new features are finally set in net_dev.
+ */
+ efx_set_rx_mode(net_dev);
+ }
+
+ return 0;
+}
+
/* This ensures that the kernel is kept informed (via
* netif_carrier_on/off) of the link status, and also maintains the
* link status's stop on the port's TX queue.
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
- efx_mac_reconfigure(efx);
+ efx_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
*
**************************************************************************/
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+ bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+}
+
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
+{
+ efx->wanted_fc = wanted_fc;
+ if (efx->link_advertising[0]) {
+ if (wanted_fc & EFX_FC_RX)
+ efx->link_advertising[0] |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else
+ efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ if (wanted_fc & EFX_FC_TX)
+ efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
+ }
+}
+
static void efx_start_port(struct efx_nic *efx)
{
netif_dbg(efx, ifup, efx->net_dev, "start port\n");
efx->port_enabled = true;
/* Ensure MAC ingress/egress is enabled */
- efx_mac_reconfigure(efx);
+ efx_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock);
}
efx->type->fini(efx);
}
+/* Context: netif_tx_lock held, BHs disabled. */
+void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
+
+ efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
+}
+
/* This function will always ensure that the locks acquired in
* efx_reset_down() are released. A failure return code indicates
* that we were unable to reinitialise the hardware, and the
*/
int efx_reset(struct efx_nic *efx, enum reset_type method)
{
+ int rc, rc2 = 0;
bool disabled;
- int rc, rc2;
netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
RESET_TYPE(method));
efx_device_detach_sync(efx);
- efx_reset_down(efx, method);
+ /* efx_reset_down() grabs locks that prevent recovery on EF100.
+ * EF100 reset is handled in the efx_nic_type callback below.
+ */
+ if (efx_nic_rev(efx) != EFX_REV_EF100)
+ efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
if (rc) {
disabled = rc ||
method == RESET_TYPE_DISABLE ||
method == RESET_TYPE_RECOVER_OR_DISABLE;
- rc2 = efx_reset_up(efx, method, !disabled);
+ if (efx_nic_rev(efx) != EFX_REV_EF100)
+ rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
if (!rc)
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
INIT_LIST_HEAD(&efx->rss_context.list);
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
mutex_init(&efx->rss_lock);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
spin_lock_init(&efx->stats_lock);
efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
efx->num_mac_stats = MC_CMD_MAC_NSTATS;
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
+ efx->tx_queues_per_channel = 1;
+ efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
+ efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+
+ efx->mem_bar = UINT_MAX;
+
rc = efx_init_channels(efx);
if (rc)
goto fail;
struct pci_dev *pci_dev = efx->pci_dev;
int rc;
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+ efx->mem_bar = UINT_MAX;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar);
rc = pci_enable_device(pci_dev);
if (rc) {
rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
netif_err(efx, probe, efx->net_dev,
- "request for memory BAR failed\n");
+ "request for memory BAR[%d] failed\n", bar);
rc = -EIO;
goto fail3;
}
-
+ efx->mem_bar = bar;
efx->membase = ioremap(efx->membase_phys, mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
- "could not map memory BAR at %llx+%x\n",
+ "could not map memory BAR[%d] at %llx+%x\n", bar,
(unsigned long long)efx->membase_phys, mem_map_size);
rc = -ENOMEM;
goto fail4;
}
netif_dbg(efx, probe, efx->net_dev,
- "memory BAR at %llx+%x (virtual %p)\n",
+ "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
(unsigned long long)efx->membase_phys, mem_map_size,
efx->membase);
return rc;
}
-void efx_fini_io(struct efx_nic *efx, int bar)
+void efx_fini_io(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
}
if (efx->membase_phys) {
- pci_release_region(efx->pci_dev, bar);
+ pci_release_region(efx->pci_dev, efx->mem_bar);
efx->membase_phys = 0;
+ efx->mem_bar = UINT_MAX;
}
/* Don't disable bus-mastering if VFs are assigned */
device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
}
#endif
- enum pci_channel_state state)
+
+/* A PCI error affecting this device was detected.
+ * At this point MMIO and DMA may be disabled.
+ * Stop the software path and request a slot reset.
+ */
+static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
++ pci_channel_state_t state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_RECOVERY;
+ efx->reset_pending = 0;
+
+ efx_device_detach_sync(efx);
+
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+
+ status = PCI_ERS_RESULT_NEED_RESET;
+ } else {
+ /* If the interface is disabled we don't want to do anything
+ * with it.
+ */
+ status = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ rtnl_unlock();
+
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+/* Fake a successful reset, which will be performed later in efx_io_resume. */
+static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+
+ if (pci_enable_device(pdev)) {
+ netif_err(efx, hw, efx->net_dev,
+ "Cannot re-enable PCI device after reset.\n");
+ status = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return status;
+}
+
+/* Perform the actual reset and resume I/O operations. */
+static void efx_io_resume(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ int rc;
+
+ rtnl_lock();
+
+ if (efx->state == STATE_DISABLED)
+ goto out;
+
+ rc = efx_reset(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "efx_reset failed after PCI error (%d)\n", rc);
+ } else {
+ efx->state = STATE_READY;
+ netif_dbg(efx, hw, efx->net_dev,
+ "Done resetting and resuming IO after PCI error.\n");
+ }
+
+out:
+ rtnl_unlock();
+}
+
+/* For simplicity and reliability, we always require a slot reset and try to
+ * reset the hardware when a pci error affecting the device is detected.
+ * We leave both the link_reset and mmio_enabled callback unimplemented:
+ * with our request for slot reset the mmio_enabled callback will never be
+ * called, and the link_reset callback is not used by AER or EEH mechanisms.
+ */
+const struct pci_error_handlers efx_err_handlers = {
+ .error_detected = efx_io_error_detected,
+ .slot_reset = efx_io_slot_reset,
+ .resume = efx_io_resume,
+};
+
+int efx_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->get_phys_port_id)
+ return efx->type->get_phys_port_id(efx, ppid);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (snprintf(name, len, "p%u", efx->port_num) >= len)
+ return -EINVAL;
+ return 0;
+}
* membars, in order to allow proper address translation during
* resource assignment to enable guest virtualization
*/
- VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
+ VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
/*
* Device may provide root port configuration information which limits
* bus numbering
*/
- VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
+ VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
+
+ /*
+ * Device contains physical location shadow registers in
+ * vendor-specific capability space
+ */
+ VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2),
};
/*
}
}
+ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
+ int pos = pci_find_capability(vmd->dev, PCI_CAP_ID_VNDR);
+ u32 reg, regu;
+
+ pci_read_config_dword(vmd->dev, pos + 4, ®);
+
+ /* "SHDW" */
+ if (pos && reg == 0x53484457) {
+ pci_read_config_dword(vmd->dev, pos + 8, ®);
+ pci_read_config_dword(vmd->dev, pos + 12, ®u);
+ offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
+ (((u64) regu << 32 | reg) &
+ PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_dword(vmd->dev, pos + 16, ®);
+ pci_read_config_dword(vmd->dev, pos + 20, ®u);
+ offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
+ (((u64) regu << 32 | reg) &
+ PCI_BASE_ADDRESS_MEM_MASK);
+ }
+ }
+
/*
* Certain VMD devices may have a root port configuration option which
* limits the bus range to between 0-127, 128-255, or 224-255
vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
x86_vector_domain);
- irq_domain_free_fwnode(fn);
- if (!vmd->irq_domain)
+ if (!vmd->irq_domain) {
+ irq_domain_free_fwnode(fn);
return -ENODEV;
+ }
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
if (!vmd->bus) {
pci_free_resource_list(&resources);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
return -ENODEV;
}
static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct fwnode_handle *fn = vmd->irq_domain->fwnode;
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
}
#ifdef CONFIG_PM_SLEEP
static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
static const struct pci_device_id vmd_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
- .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
- .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
- .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
struct irq_affinity *affd)
{
struct irq_affinity msi_default_affd = {0};
- int msix_vecs = -ENOSPC;
- int msi_vecs = -ENOSPC;
+ int nvecs = -ENOSPC;
if (flags & PCI_IRQ_AFFINITY) {
if (!affd)
}
if (flags & PCI_IRQ_MSIX) {
- msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs,
- max_vecs, affd, flags);
- if (msix_vecs > 0)
- return msix_vecs;
+ nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
+ affd, flags);
+ if (nvecs > 0)
+ return nvecs;
}
if (flags & PCI_IRQ_MSI) {
- msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs,
- affd);
- if (msi_vecs > 0)
- return msi_vecs;
+ nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
+ if (nvecs > 0)
+ return nvecs;
}
/* use legacy IRQ if allowed */
}
}
- if (msix_vecs == -ENOSPC)
- return -ENOSPC;
- return msi_vecs;
+ return nvecs;
}
EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain);
- rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) :
- iort_msi_map_rid(&pdev->dev, rid);
+ rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
+ iort_msi_map_id(&pdev->dev, rid);
return rid;
}
u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
- dom = of_msi_map_get_device_domain(&pdev->dev, rid);
+ dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
if (!dom)
- dom = iort_get_device_domain(&pdev->dev, rid);
+ dom = iort_get_device_domain(&pdev->dev, rid,
+ DOMAIN_BUS_PCI_MSI);
return dom;
}
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
return;
break;
- case HPX_CFG_VEND_CAP: /* Fall through */
- case HPX_CFG_DVSEC: /* Fall through */
+ case HPX_CFG_VEND_CAP:
+ case HPX_CFG_DVSEC:
default:
pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
return;
error = -EBUSY;
break;
}
- /* Fall through */
+ fallthrough;
case PCI_D0:
case PCI_D1:
case PCI_D2:
ACPI_FREE(obj);
}
- static void pci_acpi_set_untrusted(struct pci_dev *dev)
+ static void pci_acpi_set_external_facing(struct pci_dev *dev)
{
u8 val;
/*
* These root ports expose PCIe (including DMA) outside of the
- * system so make sure we treat them and everything behind as
- * untrusted.
+ * system. Everything downstream from them is external.
*/
if (val)
- dev->untrusted = 1;
+ dev->external_facing = 1;
}
static void pci_acpi_setup(struct device *dev)
return;
pci_acpi_optimize_delay(pci_dev, adev->handle);
- pci_acpi_set_untrusted(pci_dev);
+ pci_acpi_set_external_facing(pci_dev);
pci_acpi_add_edr_notifier(pci_dev);
pci_acpi_add_pm_notifier(adev, pci_dev);
return 0;
}
+ static int pci_acs_enable;
+
+ /**
+ * pci_request_acs - ask for ACS to be enabled if supported
+ */
+ void pci_request_acs(void)
+ {
+ pci_acs_enable = 1;
+ }
+
+ static const char *disable_acs_redir_param;
+
+ /**
+ * pci_disable_acs_redir - disable ACS redirect capabilities
+ * @dev: the PCI device
+ *
+ * For only devices specified in the disable_acs_redir parameter.
+ */
+ static void pci_disable_acs_redir(struct pci_dev *dev)
+ {
+ int ret = 0;
+ const char *p;
+ int pos;
+ u16 ctrl;
+
+ if (!disable_acs_redir_param)
+ return;
+
+ p = disable_acs_redir_param;
+ while (*p) {
+ ret = pci_dev_str_match(dev, p, &p);
+ if (ret < 0) {
+ pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
+ disable_acs_redir_param);
+
+ break;
+ } else if (ret == 1) {
+ /* Found a match */
+ break;
+ }
+
+ if (*p != ';' && *p != ',') {
+ /* End of param or invalid format */
+ break;
+ }
+ p++;
+ }
+
+ if (ret != 1)
+ return;
+
+ if (!pci_dev_specific_disable_acs_redir(dev))
+ return;
+
+ pos = dev->acs_cap;
+ if (!pos) {
+ pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
+ return;
+ }
+
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+ /* P2P Request & Completion Redirect */
+ ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+
+ pci_info(dev, "disabled ACS redirect\n");
+ }
+
+ /**
+ * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
+ * @dev: the PCI device
+ */
+ static void pci_std_enable_acs(struct pci_dev *dev)
+ {
+ int pos;
+ u16 cap;
+ u16 ctrl;
+
+ pos = dev->acs_cap;
+ if (!pos)
+ return;
+
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+ /* Source Validation */
+ ctrl |= (cap & PCI_ACS_SV);
+
+ /* P2P Request Redirect */
+ ctrl |= (cap & PCI_ACS_RR);
+
+ /* P2P Completion Redirect */
+ ctrl |= (cap & PCI_ACS_CR);
+
+ /* Upstream Forwarding */
+ ctrl |= (cap & PCI_ACS_UF);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+ }
+
+ /**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+ static void pci_enable_acs(struct pci_dev *dev)
+ {
+ if (!pci_acs_enable)
+ goto disable_acs_redir;
+
+ if (!pci_dev_specific_enable_acs(dev))
+ goto disable_acs_redir;
+
+ pci_std_enable_acs(dev);
+
+ disable_acs_redir:
+ /*
+ * Note: pci_disable_acs_redir() must be called even if ACS was not
+ * enabled by the kernel because it may have been enabled by
+ * platform firmware. So if we are told to disable it, we should
+ * always disable it after setting the kernel's default
+ * preferences.
+ */
+ pci_disable_acs_redir(dev);
+ }
+
/**
* pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
}
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
+ void pcie_clear_device_status(struct pci_dev *dev)
+ {
+ u16 sta;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
+ pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
+ }
+
/**
* pcie_clear_root_pme_status - Clear root port PME interrupt status.
* @dev: PCIe root port or event collector.
}
}
- static int pci_acs_enable;
-
- /**
- * pci_request_acs - ask for ACS to be enabled if supported
- */
- void pci_request_acs(void)
- {
- pci_acs_enable = 1;
- }
-
- static const char *disable_acs_redir_param;
-
- /**
- * pci_disable_acs_redir - disable ACS redirect capabilities
- * @dev: the PCI device
- *
- * For only devices specified in the disable_acs_redir parameter.
- */
- static void pci_disable_acs_redir(struct pci_dev *dev)
- {
- int ret = 0;
- const char *p;
- int pos;
- u16 ctrl;
-
- if (!disable_acs_redir_param)
- return;
-
- p = disable_acs_redir_param;
- while (*p) {
- ret = pci_dev_str_match(dev, p, &p);
- if (ret < 0) {
- pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
- disable_acs_redir_param);
-
- break;
- } else if (ret == 1) {
- /* Found a match */
- break;
- }
-
- if (*p != ';' && *p != ',') {
- /* End of param or invalid format */
- break;
- }
- p++;
- }
-
- if (ret != 1)
- return;
-
- if (!pci_dev_specific_disable_acs_redir(dev))
- return;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
- if (!pos) {
- pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
- return;
- }
-
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
-
- /* P2P Request & Completion Redirect */
- ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
-
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
-
- pci_info(dev, "disabled ACS redirect\n");
- }
-
- /**
- * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
- * @dev: the PCI device
- */
- static void pci_std_enable_acs(struct pci_dev *dev)
- {
- int pos;
- u16 cap;
- u16 ctrl;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
- if (!pos)
- return;
-
- pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
-
- /* Source Validation */
- ctrl |= (cap & PCI_ACS_SV);
-
- /* P2P Request Redirect */
- ctrl |= (cap & PCI_ACS_RR);
-
- /* P2P Completion Redirect */
- ctrl |= (cap & PCI_ACS_CR);
-
- /* Upstream Forwarding */
- ctrl |= (cap & PCI_ACS_UF);
-
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
- }
-
- /**
- * pci_enable_acs - enable ACS if hardware support it
- * @dev: the PCI device
- */
- void pci_enable_acs(struct pci_dev *dev)
- {
- if (!pci_acs_enable)
- goto disable_acs_redir;
-
- if (!pci_dev_specific_enable_acs(dev))
- goto disable_acs_redir;
-
- pci_std_enable_acs(dev);
-
- disable_acs_redir:
- /*
- * Note: pci_disable_acs_redir() must be called even if ACS was not
- * enabled by the kernel because it may have been enabled by
- * platform firmware. So if we are told to disable it, we should
- * always disable it after setting the kernel's default
- * preferences.
- */
- pci_disable_acs_redir(dev);
- }
-
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
{
int pos;
u16 cap, ctrl;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+ pos = pdev->acs_cap;
if (!pos)
return false;
return true;
}
+ /**
+ * pci_acs_init - Initialize ACS if hardware supports it
+ * @dev: the PCI device
+ */
+ void pci_acs_init(struct pci_dev *dev)
+ {
+ dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+
+ if (dev->acs_cap)
+ pci_enable_acs(dev);
+ }
+
/**
* pci_rebar_find_pos - find position of resize ctrl reg for BAR
* @pdev: PCI device
* pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
- * @delay: Delay to wait after link has become active (in ms). Specify %0
- * for no delay.
+ * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
msleep(10);
timeout -= 10;
}
- if (active && ret && delay)
+ if (active && ret)
msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
if (!pcie_downstream_port(dev))
return;
- /*
- * Per PCIe r5.0, sec 6.6.1, for downstream ports that support
- * speeds > 5 GT/s, we must wait for link training to complete
- * before the mandatory delay.
- *
- * We can only tell when link training completes via DLL Link
- * Active, which is required for downstream ports that support
- * speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common
- * devices do not implement Link Active reporting even when it's
- * required, so we'll check for that directly instead of checking
- * the supported link speed. We assume devices without Link Active
- * reporting can train in 100 ms regardless of speed.
- */
- if (dev->link_active_reporting) {
- pci_dbg(dev, "waiting for link to train\n");
- if (!pcie_wait_for_link_delay(dev, true, 0)) {
+ if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
+ pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
+ msleep(delay);
+ } else {
+ pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
+ delay);
+ if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
return;
}
}
- pci_dbg(child, "waiting %d ms to become accessible\n", delay);
- msleep(delay);
if (!pci_device_is_present(child)) {
pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
int pcie_set_readrq(struct pci_dev *dev, int rq)
{
u16 v;
+ int ret;
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
return -EINVAL;
v = (ffs(rq) - 8) << 12;
- return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_READRQ, v);
+
+ return pcibios_err_to_errno(ret);
}
EXPORT_SYMBOL(pcie_set_readrq);
int pcie_set_mps(struct pci_dev *dev, int mps)
{
u16 v;
+ int ret;
if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
return -EINVAL;
return -EINVAL;
v <<= 5;
- return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_PAYLOAD, v);
+
+ return pcibios_err_to_errno(ret);
}
EXPORT_SYMBOL(pcie_set_mps);
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
+ int rc;
+
if (!pcie_aer_is_native(dev))
return -EIO;
- return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
+ rc = pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
+ return pcibios_err_to_errno(rc);
}
EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
+ int rc;
+
if (!pcie_aer_is_native(dev))
return -EIO;
- return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
- PCI_EXP_AER_FLAGS);
+ rc = pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
+ return pcibios_err_to_errno(rc);
}
EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
- void pci_aer_clear_device_status(struct pci_dev *dev)
- {
- u16 sta;
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
- pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
- }
-
int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
int aer = dev->aer_cap;
"Transaction Layer"
};
- static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = {
+ static const char *aer_correctable_error_string[] = {
"RxErr", /* Bit Position 0 */
NULL,
NULL,
"NonFatalErr", /* Bit Position 13 */
"CorrIntErr", /* Bit Position 14 */
"HeaderOF", /* Bit Position 15 */
+ NULL, /* Bit Position 16 */
+ NULL, /* Bit Position 17 */
+ NULL, /* Bit Position 18 */
+ NULL, /* Bit Position 19 */
+ NULL, /* Bit Position 20 */
+ NULL, /* Bit Position 21 */
+ NULL, /* Bit Position 22 */
+ NULL, /* Bit Position 23 */
+ NULL, /* Bit Position 24 */
+ NULL, /* Bit Position 25 */
+ NULL, /* Bit Position 26 */
+ NULL, /* Bit Position 27 */
+ NULL, /* Bit Position 28 */
+ NULL, /* Bit Position 29 */
+ NULL, /* Bit Position 30 */
+ NULL, /* Bit Position 31 */
};
- static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
+ static const char *aer_uncorrectable_error_string[] = {
"Undefined", /* Bit Position 0 */
NULL,
NULL,
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
"PoisonTLPBlocked", /* Bit Position 26 */
+ NULL, /* Bit Position 27 */
+ NULL, /* Bit Position 28 */
+ NULL, /* Bit Position 29 */
+ NULL, /* Bit Position 30 */
+ NULL, /* Bit Position 31 */
};
static const char *aer_agent_string[] = {
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
+ const char **strings;
unsigned long status = info->status & ~info->mask;
- const char *errmsg = NULL;
+ const char *level, *errmsg;
int i;
+ if (info->severity == AER_CORRECTABLE) {
+ strings = aer_correctable_error_string;
+ level = KERN_WARNING;
+ } else {
+ strings = aer_uncorrectable_error_string;
+ level = KERN_ERR;
+ }
+
for_each_set_bit(i, &status, 32) {
- if (info->severity == AER_CORRECTABLE)
- errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
- aer_correctable_error_string[i] : NULL;
- else
- errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ?
- aer_uncorrectable_error_string[i] : NULL;
+ errmsg = strings[i];
+ if (!errmsg)
+ errmsg = "Unknown Error Bit";
- if (errmsg)
- pci_err(dev, " [%2d] %-22s%s\n", i, errmsg,
+ pci_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
- else
- pci_err(dev, " [%2d] Unknown Error Bit%s\n",
- i, info->first_error == i ? " (First)" : "");
}
pci_dev_aer_stats_incr(dev, info);
}
{
int layer, agent;
int id = ((dev->bus->number << 8) | dev->devfn);
+ const char *level;
if (!info->status) {
pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
- aer_error_severity_string[info->severity],
- aer_error_layer[layer], aer_agent_string[agent]);
+ level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
+
+ pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
+ aer_error_severity_string[info->severity],
+ aer_error_layer[layer], aer_agent_string[agent]);
- pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
- dev->vendor, dev->device,
- info->status, info->mask);
+ pci_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ dev->vendor, dev->device, info->status, info->mask);
__aer_print_error(dev, info);
if (aer)
pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS,
info->status);
- pci_aer_clear_device_status(dev);
+ if (pcie_aer_is_native(dev))
+ pcie_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
else if (info->severity == AER_FATAL)
{
struct pcie_device *dev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(dev);
- struct aer_err_source uninitialized_var(e_src);
+ struct aer_err_source e_src;
if (kfifo_is_empty(&rpc->aer_fifo))
return IRQ_NONE;
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
+{
+ pci_info(dev, "Disabling ASPM L0s/L1\n");
+ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+}
+
+/*
+ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
+ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
+ * disable both L0s and L1 for now to be safe.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+
/*
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
* Link bit cleared after starting the link retrain process to allow this
* The device will throw a Link Down error on AER-capable systems and
* regardless of AER, config space of the device is never accessible again
* and typically causes the system to hang or reset when access is attempted.
- * http://www.spinics.net/lists/linux-pci/msg34797.html
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
* redirect (CR) since all transactions are redirected to the upstream
* root complex.
*
- * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086
- * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102
- * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402
*
* 1002:4385 SBx00 SMBus Controller
* 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
if (ACPI_FAILURE(status))
return -ENODEV;
+ acpi_put_table(header);
+
/* Filter out flags not applicable to multifunction */
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
*
* 0x9d10-0x9d1b PCI Express Root port #{1-12}
*
- * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
- * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
- * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
- * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
- * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
+ * [1] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
+ * [2] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
+ * [3] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
+ * [4] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
+ * [5] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
* [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
* [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
*/
if (!pci_quirk_intel_spt_pch_acs_match(dev))
return -ENOTTY;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ pos = dev->acs_cap;
if (!pos)
return -ENOTTY;
if (!pci_quirk_intel_spt_pch_acs_match(dev))
return -ENOTTY;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ pos = dev->acs_cap;
if (!pos)
return -ENOTTY;
if (!pci_quirk_intel_spt_pch_acs_match(dev))
return -ENOTTY;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ pos = dev->acs_cap;
if (!pos)
return -ENOTTY;
*/
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- if (pdev->device == 0x7340 && pdev->revision != 0xc5)
+ if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
+ (pdev->device == 0x7340 && pdev->revision != 0xc5))
return;
pci_info(pdev, "disabling ATS\n");
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
/* AMD Iceland dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+ /* AMD Navi10 dGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
bool found;
struct pci_dev *bridge = bus->self;
- pos = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ACS);
+ pos = bridge->acs_cap;
/* Disable ACS SV before initial config reads */
if (pos) {
/**
* aac_queuecommand - queue a SCSI command
+ * @shost: Scsi host to queue command on
* @cmd: SCSI command to queue
- * @done: Function to call on command completion
*
* Queues a command for execution by the associated Host Adapter.
*
param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
if (num < 4 && end_sec == param->sectors) {
- if (param->cylinders != saved_cylinders)
+ if (param->cylinders != saved_cylinders) {
dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
param->heads, param->sectors, num));
+ }
} else if (end_head > 0 || end_sec > 0) {
dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
end_head + 1, end_sec, num));
/**
* aac_cfg_ioctl - AAC configuration request
- * @inode: inode of device
* @file: file handle
* @cmd: ioctl command code
* @arg: argument
}
static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
- enum pci_channel_state error)
+ pci_channel_state_t error)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = shost_priv(shost);
* @state: current state of the PCI slot
*/
static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev,
- enum pci_channel_state state)
+ pci_channel_state_t state)
{
/* If slot is permanently frozen, turn everything off */
if (state == pci_channel_io_perm_failure) {
/**
* sym2_reset_workarounds - hardware-specific work-arounds
+ * @pdev: pointer to PCI device
*
* This routine is similar to sym_set_workarounds(), except
* that, at this point, we already know that the device was
size = sizeof(struct flash_params_8000) / sizeof(u32);
for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
+ status = ql_read_flash_word(qdev, i + offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
return -ETIMEDOUT;
for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
+ status = ql_read_flash_word(qdev, i + offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
struct iphdr *iph =
(struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
+ htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
+ htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
+ htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
msi:
qdev->intr_count = 1;
if (qlge_irq_type == MSI_IRQ) {
- if (!pci_enable_msi(qdev->pdev)) {
+ if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
set_bit(QL_MSI_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"Running with MSI interrupts.\n");
*/
ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
/* Add the TX ring(s) serviced by this vector
- * to the mask. */
+ * to the mask.
+ */
for (j = 0; j < tx_rings_per_vector; j++) {
ctx->irq_mask |=
(1 << qdev->rx_ring[qdev->rss_ring_count +
"Failed to set magic packet on %s.\n",
qdev->ndev->name);
return status;
- } else
- netif_info(qdev, drv, qdev->ndev,
- "Enabled magic packet successfully on %s.\n",
- qdev->ndev->name);
+ }
+ netif_info(qdev, drv, qdev->ndev,
+ "Enabled magic packet successfully on %s.\n",
+ qdev->ndev->name);
wol |= MB_WOL_MAGIC_PKT;
}
return;
}
- mod_timer(&qdev->timer, jiffies + (5*HZ));
+ mod_timer(&qdev->timer, jiffies + (5 * HZ));
}
static int qlge_probe(struct pci_dev *pdev,
* the bus goes dead
*/
timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
- mod_timer(&qdev->timer, jiffies + (5*HZ));
+ mod_timer(&qdev->timer, jiffies + (5 * HZ));
ql_link_off(qdev);
ql_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
* a PCI bus error is detected.
*/
static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
- enum pci_channel_state state)
+ pci_channel_state_t state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
netif_err(qdev, ifup, qdev->ndev,
"Device was not running prior to EEH.\n");
}
- mod_timer(&qdev->timer, jiffies + (5*HZ));
+ mod_timer(&qdev->timer, jiffies + (5 * HZ));
netif_device_attach(ndev);
}
.resume = qlge_io_resume,
};
-static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused qlge_suspend(struct device *dev_d)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev_d);
struct ql_adapter *qdev = netdev_priv(ndev);
int err;
}
ql_wol(qdev);
- err = pci_save_state(pdev);
- if (err)
- return err;
-
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-#ifdef CONFIG_PM
-static int qlge_resume(struct pci_dev *pdev)
+static int __maybe_unused qlge_resume(struct device *dev_d)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev_d);
struct ql_adapter *qdev = netdev_priv(ndev);
int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- err = pci_enable_device(pdev);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
- return err;
- }
- pci_set_master(pdev);
+ pci_set_master(to_pci_dev(dev_d));
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ device_wakeup_disable(dev_d);
if (netif_running(ndev)) {
err = ql_adapter_up(qdev);
return err;
}
- mod_timer(&qdev->timer, jiffies + (5*HZ));
+ mod_timer(&qdev->timer, jiffies + (5 * HZ));
netif_device_attach(ndev);
return 0;
}
-#endif /* CONFIG_PM */
static void qlge_shutdown(struct pci_dev *pdev)
{
- qlge_suspend(pdev, PMSG_SUSPEND);
+ qlge_suspend(&pdev->dev);
}
+static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
+
static struct pci_driver qlge_driver = {
.name = DRV_NAME,
.id_table = qlge_pci_tbl,
.probe = qlge_probe,
.remove = qlge_remove,
-#ifdef CONFIG_PM
- .suspend = qlge_suspend,
- .resume = qlge_resume,
-#endif
+ .driver.pm = &qlge_pm_ops,
.shutdown = qlge_shutdown,
.err_handler = &qlge_err_handler
};
*/
typedef unsigned int __bitwise pci_channel_state_t;
- enum pci_channel_state {
+ enum {
/* I/O channel is in normal state */
pci_channel_io_normal = (__force pci_channel_state_t) 1,
* mappings to make sure they cannot access arbitrary memory.
*/
unsigned int untrusted:1;
+ /*
+ * Info from the platform, e.g., ACPI or device tree, may mark a
+ * device as "external-facing". An external-facing device is
+ * itself internal but devices downstream from it are external.
+ */
+ unsigned int external_facing:1;
unsigned int broken_intx_masking:1; /* INTx masking can't be used */
unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
unsigned int irq_managed:1;
#ifdef CONFIG_PCI_P2PDMA
struct pci_p2pdma *p2pdma;
#endif
+ u16 acs_cap; /* ACS Capability offset */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
char *driver_override; /* Driver name to force a match */
struct pci_error_handlers {
/* PCI bus error detected on this device */
pci_ers_result_t (*error_detected)(struct pci_dev *dev,
- enum pci_channel_state error);
+ pci_channel_state_t error);
/* MMIO has been re-enabled, but not DMA */
pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
/* Generic PCI functions exported to card drivers */
- enum pci_lost_interrupt_reason {
- PCI_LOST_IRQ_NO_INFORMATION = 0,
- PCI_LOST_IRQ_DISABLE_MSI,
- PCI_LOST_IRQ_DISABLE_MSIX,
- PCI_LOST_IRQ_DISABLE_ACPI,
- };
- enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
int pci_find_capability(struct pci_dev *dev, int cap);
int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
int pci_find_ext_capability(struct pci_dev *dev, int cap);
*/
static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
{
- struct pci_dev *bridge = pci_upstream_bridge(dev);
-
- while (bridge) {
- if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
- return bridge;
- bridge = pci_upstream_bridge(bridge);
+ while (dev) {
+ if (pci_is_pcie(dev) &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ dev = pci_upstream_bridge(dev);
}
return NULL;
struct device_node;
struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
- int pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *resources,
- struct list_head *ib_resources,
- struct resource **bus_range);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
- static inline int
- pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *resources,
- struct list_head *ib_resources,
- struct resource **bus_range)
- {
- return -EINVAL;
- }
#endif /* CONFIG_OF */
static inline struct device_node *