description: Colorful GRP, Shenzhen Xueyushi Technology Ltd.
"^compulab,.*":
description: CompuLab Ltd.
+ "^comvetia,.*":
+ description: ComVetia AG
"^congatec,.*":
description: congatec GmbH
"^coolpi,.*":
description: DataImage, Inc.
"^davicom,.*":
description: DAVICOM Semiconductor, Inc.
+ "^deepcomputing,.*":
+ description: DeepComputing (HK) Limited
"^dell,.*":
description: Dell Inc.
"^delta,.*":
description: Japan Display Inc.
"^jedec,.*":
description: JEDEC Solid State Technology Association
+ "^jenson,.*":
+ description: Jenson Display Co. Ltd.
"^jesurun,.*":
description: Shenzhen Jesurun Electronics Business Dept.
"^jethome,.*":
description: Shanghai Neardi Technology Co., Ltd.
"^nec,.*":
description: NEC LCD Technologies, Ltd.
+ "^neofidelity,.*":
+ description: Neofidelity Inc.
"^neonode,.*":
description: Neonode Inc.
"^netgear,.*":
description: Nokia
"^nordic,.*":
description: Nordic Semiconductor
+ "^nothing,.*":
+ description: Nothing Technology Limited
"^novatek,.*":
description: Novatek
"^novtech,.*":
description: Unisoc Communications, Inc.
"^realtek,.*":
description: Realtek Semiconductor Corp.
+ "^relfor,.*":
+ description: Relfor Labs Pvt. Ltd.
"^remarkable,.*":
description: reMarkable AS
"^renesas,.*":
description: Sophgo Technology Inc.
"^sourceparts,.*":
description: Source Parts Inc.
+ "^spacemit,.*":
+ description: SpacemiT (Hangzhou) Technology Co. Ltd
"^spansion,.*":
description: Spansion Inc.
"^sparkfun,.*":
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-aimslab*
AIO
ALIENWARE WMI DRIVER
S: Maintained
+F: Documentation/wmi/devices/alienware-wmi.rst
F: drivers/platform/x86/dell/alienware-wmi.c
ALLEGRO DVT VIDEO IP CORE DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
F: drivers/media/platform/sunxi/sun4i-csi/
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
F: drivers/media/platform/sunxi/sun6i-csi/
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-isp.yaml
F: drivers/staging/media/sunxi/sun6i-isp/
F: drivers/staging/media/sunxi/sun6i-isp/uapi/sun6i-isp-config.h
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-mipi-csi2.yaml
F: drivers/media/platform/sunxi/sun6i-mipi-csi2/
F: drivers/infiniband/hw/efa/
F: include/uapi/rdma/efa-abi.h
+AMD 3D V-CACHE PERFORMANCE OPTIMIZER DRIVER
+S: Supported
+F: Documentation/ABI/testing/sysfs-bus-platform-drivers-amd_x3d_vcache
+F: drivers/platform/x86/amd/x3d_vcache.c
+
AMD ADDRESS TRANSLATION LIBRARY (ATL)
F: Documentation/arch/x86/amd_hsmp.rst
F: arch/x86/include/asm/amd_hsmp.h
F: arch/x86/include/uapi/asm/amd_hsmp.h
-F: drivers/platform/x86/amd/hsmp.c
+F: drivers/platform/x86/amd/hsmp/
AMD IOMMU (AMD-VI)
S: Maintained
F: drivers/i2c/busses/i2c-amd-mp2*
+AMD ASF I2C DRIVER
+S: Supported
+F: drivers/i2c/busses/i2c-amd-asf-plat.c
+
AMD PDS CORE DRIVER
AMD PMF DRIVER
-S: Maintained
+S: Supported
F: Documentation/ABI/testing/sysfs-amd-pmf
F: drivers/platform/x86/amd/pmf/
F: drivers/hid/amd-sfh-hid/
AMD SPI DRIVER
-S: Maintained
+S: Supported
F: drivers/spi/spi-amd.c
AMD XGBE DRIVER
S: Supported
W: http://wiki.analog.com/
W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/sound/adi,*
F: sound/soc/codecs/ad1*
F: sound/soc/codecs/ad7*
F: sound/soc/codecs/adau*
F: Documentation/devicetree/bindings/net/actions,owl-emac.yaml
F: Documentation/devicetree/bindings/pinctrl/actions,*
F: Documentation/devicetree/bindings/power/actions,owl-sps.txt
-F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt
+F: Documentation/devicetree/bindings/timer/actions,owl-timer.yaml
F: arch/arm/boot/dts/actions/
F: arch/arm/mach-actions/
F: arch/arm64/boot/dts/actions/
S: Maintained
F: Documentation/devicetree/bindings/sound/adi,ssm3515.yaml
+F: Documentation/devicetree/bindings/sound/cirrus,cs42l84.yaml
F: Documentation/devicetree/bindings/sound/apple,*
F: sound/soc/apple/*
F: sound/soc/codecs/cs42l83-i2c.c
+F: sound/soc/codecs/cs42l84.*
F: sound/soc/codecs/ssm3515.c
ARM/APPLE MACHINE SUPPORT
ARM/QUALCOMM MAILING LIST
+C: irc://irc.oftc.net/linux-msm
F: Documentation/devicetree/bindings/*/qcom*
F: Documentation/devicetree/bindings/soc/qcom/
F: arch/arm/boot/dts/qcom/
S: Maintained
+C: irc://irc.oftc.net/linux-msm
T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
F: Documentation/devicetree/bindings/arm/qcom-soc.yaml
F: Documentation/devicetree/bindings/arm/qcom.yaml
F: Documentation/devicetree/bindings/bus/qcom*
F: Documentation/devicetree/bindings/cache/qcom,llcc.yaml
F: Documentation/devicetree/bindings/firmware/qcom,scm.yaml
-F: Documentation/devicetree/bindings/reserved-memory/qcom
+F: Documentation/devicetree/bindings/reserved-memory/qcom*
F: Documentation/devicetree/bindings/soc/qcom/
F: arch/arm/boot/dts/qcom/
F: arch/arm/configs/qcom_defconfig
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/asahi-kasei,ak7375.yaml
F: drivers/media/i2c/ak7375.c
AXI PWM GENERATOR
S: Supported
W: https://ez.analog.com/linux-software-drivers
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/dvb-usb-v2/az6007.c
AZTECH FM RADIO RECEIVER DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-aztech*
B43 WIRELESS DRIVER
S: Supported
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/platform/st/sti/bdisp
BECKHOFF CX5020 ETHERCAT MASTER DRIVER
S: Odd fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/driver-api/media/drivers/bttv*
F: drivers/media/pci/bt8xx/bttv*
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-cadet*
CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER
S: Orphan
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/admin-guide/media/cafe_ccic*
F: drivers/media/platform/marvell/
S: Supported
W: http://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/ABI/testing/debugfs-cec-error-inj
F: Documentation/devicetree/bindings/media/cec/cec-common.yaml
F: Documentation/driver-api/media/cec-core.rst
S: Supported
W: http://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/cec/cec-gpio.yaml
F: drivers/media/cec/platform/cec-gpio/
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/chrontel,ch7322.yaml
F: drivers/media/cec/i2c/ch7322.c
S: Supported
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/pci/cobalt/
COCCINELLE/Semantic Patches (SmPL)
F: include/linux/configfs.h
F: samples/configfs/
+CONGATEC BOARD CONTROLLER MFD DRIVER
+S: Maintained
+F: drivers/gpio/gpio-cgbc.c
+F: drivers/i2c/busses/i2c-cgbc.c
+F: drivers/mfd/cgbc-core.c
+F: drivers/watchdog/cgbc_wdt.c
+F: include/linux/mfd/cgbc.h
+
CONSOLE SUBSYSTEM
S: Supported
CONTROL GROUP (CGROUP)
CONTROL GROUP - CPUSET
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Odd Fixes
W: http://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/cs3308.c
CS5535 Audio ALSA driver
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/pci/cx18/
F: include/uapi/linux/ivtv*
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/common/cx2341x*
F: include/media/drv-intf/cx2341x.h
S: Odd fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/driver-api/media/drivers/cx88*
F: drivers/media/pci/cx88/
S: Maintained
-W: http://www.linux-mips.org/wiki/DECstation
F: arch/mips/dec/
F: arch/mips/include/asm/dec/
F: arch/mips/include/asm/mach-dec/
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
F: drivers/media/platform/sunxi/sun8i-di/
S: Supported
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/platform/st/sti/delta
DENALI NAND DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml
F: drivers/media/i2c/dw9714.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/dw9719.c
DONGWOON DW9768 LENS VOICE COIL DRIVER
S: Orphan
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml
F: drivers/media/i2c/dw9768.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807-vcm.yaml
F: drivers/media/i2c/dw9807-vcm.c
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
- F: drivers/gpu/drm/drm_aperture.c
F: drivers/gpu/drm/tiny/ofdrm.c
F: drivers/gpu/drm/tiny/simpledrm.c
F: drivers/video/aperture.c
F: drivers/video/nomodeset.c
- F: include/drm/drm_aperture.h
F: include/linux/aperture.h
F: include/video/nomodeset.h
F: Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml
F: drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+ DRM DRIVER FOR SAMSUNG S6E3HA8 PANELS
+ S: Maintained
+ F: Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml
+ F: drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
+
+ DRM DRIVER FOR SHARP MEMORY LCD
+ S: Maintained
+ F: Documentation/devicetree/bindings/display/sharp,ls010b7dh04.yaml
+ F: drivers/gpu/drm/tiny/sharp-memory.c
+
DRM DRIVER FOR SITRONIX ST7586 PANELS
S: Maintained
F: drivers/gpu/drm/udl/
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
DRM DRIVERS FOR VC4
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/xlnx/
+ F: Documentation/gpu/zynqmp.rst
F: drivers/gpu/drm/xlnx/
DRM GPU SCHEDULER
F: drivers/gpu/drm/ci/
DSBR100 USB FM RADIO DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/dsbr100.c
DT3155 MEDIA DRIVER
S: Odd Fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/pci/dt3155/
DVB_USB_AF9015 MEDIA DRIVER
W: https://linuxtv.org
W: http://github.com/mkrufky
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/dvb-usb/cxusb*
DVB_USB_EC168 MEDIA DRIVER
F: drivers/edac/highbank*
EDAC-CAVIUM OCTEON
-S: Supported
+S: Maintained
F: drivers/edac/octeon_edac*
EDAC-CAVIUM THUNDERX
F: drivers/edac/e7xxx_edac.c
EDAC-FSL_DDR
S: Maintained
F: drivers/edac/fsl_ddr_edac.*
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/admin-guide/media/em28xx*
F: drivers/media/usb/em28xx/
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/cec/usb/extron-da-hd-4k-plus/
EXYNOS DP DRIVER
FREESCALE ENETC ETHERNET DRIVERS
S: Maintained
+F: Documentation/devicetree/bindings/net/fsl,enetc*.yaml
+F: Documentation/devicetree/bindings/net/nxp,netc-blk-ctrl.yaml
F: drivers/net/ethernet/freescale/enetc/
+F: include/linux/fsl/enetc_mdio.h
+F: include/linux/fsl/netc_global.h
FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/galaxycore,gc2145.yaml
F: drivers/media/i2c/gc2145.c
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-gemtek*
GENERIC ARCHITECTURE TOPOLOGY
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/spi/gs1662.c
GSPCA FINEPIX SUBDRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/gspca/finepix.c
GSPCA GL860 SUBDRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/gspca/gl860/
GSPCA M5602 SUBDRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/gspca/m5602/
GSPCA PAC207 SONIXB SUBDRIVER
S: Odd Fixes
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/gspca/pac207.c
GSPCA SN9C20X SUBDRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/gspca/sn9c20x.c
GSPCA T613 SUBDRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/gspca/t613.c
GSPCA USB WEBCAM DRIVER
S: Odd Fixes
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/gspca/
GTP (GPRS Tunneling Protocol)
S: Odd Fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/hdpvr/
HEWLETT PACKARD ENTERPRISE ILO CHIF DRIVER
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
F: Documentation/timers/
F: include/linux/clockchips.h
+F: include/linux/delay.h
F: include/linux/hrtimer.h
F: include/linux/timer.h
F: kernel/time/clockevents.c
F: kernel/time/hrtimer.c
+F: kernel/time/sleep_timeout.c
F: kernel/time/timer.c
F: kernel/time/timer_list.c
F: kernel/time/timer_migration.*
W: http://www.hisilicon.com
F: drivers/net/ethernet/hisilicon/hns3/
+HISILICON NETWORK HIBMCGE DRIVER
+S: Maintained
+F: drivers/net/ethernet/hisilicon/hibmcge/
+
HISILICON NETWORK SUBSYSTEM DRIVER
F: Documentation/mm/vmemmap_dedup.rst
F: fs/hugetlbfs/
F: include/linux/hugetlb.h
+F: include/trace/events/hugetlbfs.h
F: mm/hugetlb.c
F: mm/hugetlb_vmemmap.c
F: mm/hugetlb_vmemmap.h
S: Supported
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/platform/st/sti/hva
HWPOISON MEMORY FAILURE HANDLING
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/hi556.c
HYNIX HI846 SENSOR DRIVER
F: drivers/i2c/busses/i2c-ali1535.c
F: drivers/i2c/busses/i2c-ali1563.c
F: drivers/i2c/busses/i2c-ali15x3.c
-F: drivers/i2c/busses/i2c-amd756-s4882.c
F: drivers/i2c/busses/i2c-amd756.c
F: drivers/i2c/busses/i2c-amd8111.c
F: drivers/i2c/busses/i2c-i801.c
F: drivers/i2c/busses/i2c-isch.c
-F: drivers/i2c/busses/i2c-nforce2-s4985.c
F: drivers/i2c/busses/i2c-nforce2.c
-F: drivers/i2c/busses/i2c-piix4.c
+F: drivers/i2c/busses/i2c-piix4.*
F: drivers/i2c/busses/i2c-sis5595.c
F: drivers/i2c/busses/i2c-sis630.c
F: drivers/i2c/busses/i2c-sis96x.c
F: drivers/dma/ioat*
INTEL IAA CRYPTO DRIVER
-M: Tom Zanussi <tom.zanussi@linux.intel.com>
+M: Kristen Accardi <kristen.c.accardi@intel.com>
S: Supported
F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/userspace-api/media/v4l/pixfmt-srggb10-ipu3.rst
F: drivers/media/pci/intel/ipu3/
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/admin-guide/media/ipu6-isys.rst
F: drivers/media/pci/intel/ipu6/
INTEL ISHTP ECLITE DRIVER
-M: Sumesh K Naduvalath <sumesh.k.naduvalath@intel.com>
+M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
S: Supported
F: drivers/platform/x86/intel/ishtp_eclite.c
S: Maintained
F: Documentation/ABI/testing/debugfs-tpmi
-F: drivers/platform/x86/intel/tpmi.c
+F: drivers/platform/x86/intel/vsec_tpmi.c
F: include/linux/intel_tpmi.h
INTEL UNCORE FREQUENCY CONTROL
F: drivers/iio/gyro/mpu3050*
IOC3 ETHERNET DRIVER
S: Maintained
F: drivers/net/ethernet/sgi/ioc3-eth.c
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-isa*
ISAPNP
F: drivers/isdn/hardware/
F: drivers/isdn/mISDN/
+ISL28022 HARDWARE MONITORING DRIVER
+S: Maintained
+F: Documentation/devicetree/bindings/hwmon/renesas,isl28022.yaml
+F: Documentation/hwmon/isl28022.rst
+F: drivers/hwmon/isl28022.c
+
ISOFS FILESYSTEM
Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/tuners/it913x*
+ ITE IT6263 LVDS TO HDMI BRIDGE DRIVER
+ S: Maintained
+ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+ F: Documentation/devicetree/bindings/display/bridge/ite,it6263.yaml
+ F: drivers/gpu/drm/bridge/ite-it6263.c
+
ITE IT66121 HDMI BRIDGE DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/admin-guide/media/ivtv*
F: drivers/media/pci/ivtv/
F: include/uapi/linux/ivtv*
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-keene*
KERNEL AUTOMOUNTER
KGDB / KDB /debug_core
-M: Daniel Thompson <daniel.thompson@linaro.org>
+M: Daniel Thompson <danielt@kernel.org>
S: Maintained
F: drivers/media/dvb-frontends/m88rs2000*
MA901 MASTERKIT USB FM RADIO DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-ma901.c
MAC80211
F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
F: drivers/net/ethernet/marvell/octeontx2/af/
+MARVELL PEM PMU DRIVER
+S: Supported
+F: drivers/perf/marvell_pem_pmu.c
+
MARVELL PRESTERA ETHERNET SWITCH DRIVER
S: Supported
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/max2175.txt
F: Documentation/userspace-api/media/drivers/max2175.rst
F: drivers/media/i2c/max2175*
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-maxiradio*
MAXLINEAR ETHERNET PHY DRIVER
S: Supported
W: https://www.linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/mc/
F: include/media/media-*.h
F: include/uapi/linux/media.h
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/platform/nxp/imx-pxp.[ch]
MEDIA DRIVERS FOR ASCOT2E
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/ascot2e*
MEDIA DRIVERS FOR CXD2099AR CI CONTROLLERS
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/cxd2099*
MEDIA DRIVERS FOR CXD2841ER
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/cxd2841er*
MEDIA DRIVERS FOR CXD2880
S: Supported
W: http://linuxtv.org/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/cxd2880/*
F: drivers/media/spi/cxd2880*
S: Orphan
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/pci/ddbridge/*
MEDIA DRIVERS FOR FREESCALE IMX
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/admin-guide/media/imx.rst
F: Documentation/devicetree/bindings/media/imx.txt
F: drivers/staging/media/imx/
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/admin-guide/media/imx7.rst
F: Documentation/devicetree/bindings/media/nxp,imx-mipi-csi2.yaml
F: Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/helene*
MEDIA DRIVERS FOR HORUS3A
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/horus3a*
MEDIA DRIVERS FOR LNBH25
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/lnbh25*
MEDIA DRIVERS FOR MXL5XX TUNER DEMODULATORS
S: Orphan
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/mxl5xx*
MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/pci/netup_unidvb/*
MEDIA DRIVERS FOR NVIDIA TEGRA - VDE
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/nvidia,tegra-vde.yaml
F: drivers/media/platform/nvidia/tegra-vde/
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/renesas,ceu.yaml
F: drivers/media/platform/renesas/renesas-ceu.c
F: include/media/drv-intf/renesas-ceu.h
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/renesas,drif.yaml
F: drivers/media/platform/renesas/rcar_drif.c
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/renesas,fcp.yaml
F: drivers/media/platform/renesas/rcar-fcp.c
F: include/media/rcar-fcp.h
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/renesas,fdp1.yaml
F: drivers/media/platform/renesas/rcar_fdp1.c
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/renesas,csi2.yaml
F: Documentation/devicetree/bindings/media/renesas,isp.yaml
F: Documentation/devicetree/bindings/media/renesas,vin.yaml
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/renesas,vsp1.yaml
F: drivers/media/platform/renesas/vsp1/
S: Orphan
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/stv0910*
MEDIA DRIVERS FOR ST STV6111 TUNER ICs
S: Orphan
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/stv6111*
MEDIA DRIVERS FOR STM32 - DCMI / DCMIPP
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
F: Documentation/devicetree/bindings/media/st,stm32-dcmipp.yaml
F: drivers/media/platform/st/stm32/stm32-dcmi.c
S: Maintained
W: https://linuxtv.org
Q: http://patchwork.kernel.org/project/linux-media/list/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/admin-guide/media/
F: Documentation/devicetree/bindings/media/
F: Documentation/driver-api/media/
S: Maintained
-F: drivers/net/phy/mediatek-ge-soc.c
-F: drivers/net/phy/mediatek-ge.c
+F: drivers/net/phy/mediatek/mtk-ge-soc.c
+F: drivers/net/phy/mediatek/mtk-phy-lib.c
+F: drivers/net/phy/mediatek/mtk-ge.c
+F: drivers/net/phy/mediatek/mtk.h
F: drivers/phy/mediatek/phy-mtk-xfi-tphy.c
MEDIATEK I2C CONTROLLER DRIVER
S: Supported
W: http://linux-meson.com/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/cec/amlogic,meson-gx-ao-cec.yaml
F: drivers/media/cec/platform/meson/ao-cec-g12a.c
F: drivers/media/cec/platform/meson/ao-cec.c
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml
F: drivers/media/platform/amlogic/meson-ge2d/
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
F: drivers/staging/media/meson/vdec/
F: Documentation/devicetree/bindings/interrupt-controller/microchip,lan966x-oic.yaml
F: drivers/irqchip/irq-lan966x-oic.c
+MICROCHIP LAN966X PCI DRIVER
+S: Maintained
+F: drivers/misc/lan966x_pci.c
+F: drivers/misc/lan966x_pci.dtso
+
+MICROCHIP LAN969X ETHERNET DRIVER
+S: Maintained
+F: drivers/net/ethernet/microchip/lan969x/*
+
MICROCHIP LCDFB DRIVER
S: Maintained
-W: http://www.linux-mips.org/
Q: https://patchwork.kernel.org/project/linux-mips/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
F: Documentation/devicetree/bindings/mips/
S: Odd Fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-miropcm20*
MITSUMI MM8013 FG DRIVER
F: drivers/hwmon/pmbus/mp9941.c
MR800 AVERMEDIA USB FM RADIO DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-mr800.c
MRF24J40 IEEE 802.15.4 RADIO DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
F: drivers/media/i2c/mt9m114.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/aptina,mt9p031.yaml
F: drivers/media/i2c/mt9p031.c
-F: include/media/i2c/mt9p031.h
MT9T112 APTINA CAMERA SENSOR
S: Odd Fixes
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/mt9t112.c
F: include/media/i2c/mt9t112.h
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/mt9v032.txt
F: drivers/media/i2c/mt9v032.c
F: include/media/i2c/mt9v032.h
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.yaml
F: drivers/media/i2c/mt9v111.c
F: Documentation/devicetree/bindings/hwmon/nuvoton,nct6775.yaml
F: drivers/hwmon/nct6775-i2c.c
+NCT7363 HARDWARE MONITOR DRIVER
+S: Maintained
+F: Documentation/devicetree/bindings/hwmon/nuvoton,nct7363.yaml
+F: Documentation/hwmon/nct7363.rst
+F: drivers/hwmon/nct7363.c
+
NETCONSOLE
S: Maintained
F: tools/testing/selftests/net/netfilter/
NETROM NETWORK LAYER
-S: Maintained
+S: Orphan
W: https://linux-ax25.in-berlin.de
F: include/net/netrom.h
F: include/uapi/linux/netrom.h
F: include/uapi/linux/cn_proc.h
F: include/uapi/linux/ethtool_netlink.h
F: include/uapi/linux/if_*
+F: include/uapi/linux/net_shaper.h
F: include/uapi/linux/netdev*
F: tools/testing/selftests/drivers/net/
X: Documentation/devicetree/bindings/net/bluetooth/
+X: Documentation/devicetree/bindings/net/can/
X: Documentation/devicetree/bindings/net/wireless/
+X: drivers/net/can/
X: drivers/net/wireless/
NETWORKING DRIVERS (WIRELESS)
NETWORKING [DSA]
S: Maintained
F: Documentation/devicetree/bindings/net/dsa/
X: include/net/wext.h
X: net/9p/
X: net/bluetooth/
+X: net/can/
X: net/mac80211/
X: net/rfkill/
X: net/wireless/
F: include/trace/events/mptcp.h
F: include/uapi/linux/mptcp*.h
F: net/mptcp/
-F: tools/testing/selftests/bpf/*/*mptcp*.c
+F: tools/testing/selftests/bpf/*/*mptcp*.[ch]
F: tools/testing/selftests/net/mptcp/
NETWORKING [TCP]
F: Documentation/hwmon/nzxt-kraken3.rst
F: drivers/hwmon/nzxt-kraken3.c
-NZXT-SMART2 HARDWARE MONITORING DRIVER
-S: Maintained
-F: Documentation/hwmon/nzxt-smart2.rst
-F: drivers/hwmon/nzxt-smart2.c
-
OBJAGG
F: Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml
F: drivers/i2c/busses/i2c-omap.c
-OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
-S: Maintained
-F: Documentation/devicetree/bindings/media/ti,omap3isp.txt
-F: drivers/media/platform/ti/omap3isp/
-F: drivers/staging/media/omap4iss/
-
OMAP MMC SUPPORT
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/ov01a10.c
OMNIVISION OV02A10 SENSOR DRIVER
S: Orphan
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
F: drivers/media/i2c/ov02a10.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/ov08d10.c
OMNIVISION OV08X40 SENSOR DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/ov08x40.c
+F: Documentation/devicetree/bindings/media/i2c/ovti,ov08x40.yaml
OMNIVISION OV13858 SENSOR DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/ov13858.c
OMNIVISION OV13B10 SENSOR DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/ov13b10.c
OMNIVISION OV2680 SENSOR DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
F: drivers/media/i2c/ov2680.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml
F: drivers/media/i2c/ov2685.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/ov2740.c
OMNIVISION OV4689 SENSOR DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml
F: drivers/media/i2c/ov4689.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/ov5640.c
OMNIVISION OV5647 SENSOR DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
F: drivers/media/i2c/ov5647.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov5670.yaml
F: drivers/media/i2c/ov5670.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov5675.yaml
F: drivers/media/i2c/ov5675.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml
F: drivers/media/i2c/ov5693.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/ov5695.c
OMNIVISION OV64A40 SENSOR DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov64a40.yaml
F: drivers/media/i2c/ov64a40.c
OMNIVISION OV7670 SENSOR DRIVER
S: Orphan
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ov7670.txt
F: drivers/media/i2c/ov7670.c
S: Odd fixes
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov772x.yaml
F: drivers/media/i2c/ov772x.c
F: include/media/i2c/ov772x.h
OMNIVISION OV7740 SENSOR DRIVER
S: Orphan
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ov7740.txt
F: drivers/media/i2c/ov7740.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov8856.yaml
F: drivers/media/i2c/ov8856.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov8858.yaml
F: drivers/media/i2c/ov8858.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
F: drivers/media/i2c/ov9282.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/ov9650.txt
F: drivers/media/i2c/ov9650.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/ov9734.c
ONBOARD USB HUB DRIVER
F: Documentation/core-api/packing.rst
F: include/linux/packing.h
F: lib/packing.c
+F: lib/packing_test.c
PADATA PARALLEL EXECUTION MECHANISM
S: Maintained
+C: irc://irc.oftc.net/linux-msm
F: Documentation/devicetree/bindings/pinctrl/qcom,*
F: drivers/pinctrl/qcom/
F: drivers/ptp/ptp_vclock.c
F: net/ethtool/phc_vclocks.c
+PTP VMCLOCK SUPPORT
+S: Maintained
+F: drivers/ptp/ptp_vmclock.c
+F: include/uapi/linux/vmclock-abi.h
+
PTRACE SUPPORT
S: Maintained
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/cec/usb/pulse8/
PURELIFI PLFXLC DRIVER
S: Maintained
W: http://www.isely.net/pvrusb2/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/driver-api/media/drivers/pvrusb2*
F: drivers/media/usb/pvrusb2/
S: Odd Fixes
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/pwc/*
F: include/trace/events/pwc.h
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/*venus*
F: drivers/media/platform/qcom/venus/
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-shark.c
RADIOSHARK2 RADIO DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-shark2.c
F: drivers/media/radio/radio-tea5777.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/cec/usb/rainshadow/
RALINK MIPS ARCHITECTURE
F: drivers/media/platform/raspberrypi/pisp_be/
F: include/uapi/linux/media/raspberrypi/
+RASPBERRY PI PISP CAMERA FRONT END
+S: Maintained
+F: Documentation/devicetree/bindings/media/raspberrypi,rp1-cfe.yaml
+F: drivers/media/platform/raspberrypi/rp1-cfe/
+
RC-CORE / LIRC FRAMEWORK
S: Maintained
W: http://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/driver-api/media/rc-core.rst
F: Documentation/userspace-api/media/rc/
F: drivers/media/rc/
F: Documentation/devicetree/bindings/net/dsa/realtek.yaml
F: drivers/net/dsa/realtek/*
+REALTEK SPI-NAND
+S: Maintained
+F: Documentation/devicetree/bindings/spi/realtek,rtl9301-snand.yaml
+F: drivers/spi/spi-realtek-rtl-snand.c
+
REALTEK WIRELESS DRIVER (rtlwifi family)
F: drivers/base/regmap/
F: include/linux/regmap.h
-REISERFS FILE SYSTEM
-S: Obsolete
-F: fs/reiserfs/
-
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
F: drivers/i2c/busses/i2c-emev2.c
+RENESAS ETHERNET AVB DRIVER
+S: Supported
+F: Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+F: drivers/net/ethernet/renesas/Kconfig
+F: drivers/net/ethernet/renesas/Makefile
+F: drivers/net/ethernet/renesas/ravb*
+
RENESAS ETHERNET SWITCH DRIVER
F: Documentation/devicetree/bindings/sound/renesas,idt821034.yaml
F: sound/soc/codecs/idt821034.c
+RENESAS R-CAR & FSI AUDIO (ASoC) DRIVERS
+S: Supported
+F: Documentation/devicetree/bindings/sound/renesas,rsnd.*
+F: Documentation/devicetree/bindings/sound/renesas,fsi.yaml
+F: sound/soc/renesas/rcar/
+F: sound/soc/renesas/fsi.c
+F: include/sound/sh_fsi.h
+
RENESAS R-CAR GEN3 & RZ/N1 NAND CONTROLLER DRIVER
F: drivers/i2c/busses/i2c-rcar.c
F: drivers/i2c/busses/i2c-sh_mobile.c
+RENESAS R-CAR SATA DRIVER
+S: Supported
+F: Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
+F: drivers/ata/sata_rcar.c
+
RENESAS R-CAR THERMAL DRIVERS
F: Documentation/devicetree/bindings/i2c/renesas,riic.yaml
F: drivers/i2c/busses/i2c-riic.c
+RENESAS RZ AUDIO (ASoC) DRIVER
+S: Supported
+F: Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
+F: sound/soc/renesas/rz-ssi.c
+
RENESAS RZ/G2L A/D DRIVER
F: Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
F: drivers/i2c/busses/i2c-rzv2m.c
+RENESAS SUPERH ETHERNET DRIVER
+S: Supported
+F: Documentation/devicetree/bindings/net/renesas,ether.yaml
+F: drivers/net/ethernet/renesas/Kconfig
+F: drivers/net/ethernet/renesas/Makefile
+F: drivers/net/ethernet/renesas/sh_eth*
+F: include/linux/sh_eth.h
+
RENESAS USB PHY DRIVER
F: drivers/char/hw_random/mpfs-rng.c
F: drivers/clk/microchip/clk-mpfs*.c
F: drivers/firmware/microchip/mpfs-auto-update.c
+F: drivers/gpio/gpio-mpfs.c
F: drivers/i2c/busses/i2c-microchip-corei2c.c
F: drivers/mailbox/mailbox-mpfs.c
F: drivers/pci/controller/plda/pcie-microchip-host.c
S: Maintained
Q: https://patchwork.kernel.org/project/linux-riscv/list/
T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
-F: Documentation/devicetree/bindings/riscv/
-F: arch/riscv/boot/dts/
-X: arch/riscv/boot/dts/allwinner/
-X: arch/riscv/boot/dts/renesas/
-X: arch/riscv/boot/dts/sophgo/
-X: arch/riscv/boot/dts/thead/
+F: arch/riscv/boot/dts/canaan/
+F: arch/riscv/boot/dts/microchip/
+F: arch/riscv/boot/dts/sifive/
+F: arch/riscv/boot/dts/starfive/
RISC-V PMU DRIVERS
S: Maintained
T: git https://github.com/pdp7/linux.git
F: Documentation/devicetree/bindings/clock/thead,th1520-clk-ap.yaml
+F: Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml
F: arch/riscv/boot/dts/thead/
F: drivers/clk/thead/clk-th1520-ap.c
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
F: include/dt-bindings/clock/thead,th1520-clk-ap.h
RNBD BLOCK DRIVERS
F: include/linux/mfd/rohm-shared.h
ROSE NETWORK LAYER
-S: Maintained
+S: Orphan
W: https://linux-ax25.in-berlin.de
F: include/net/rose.h
F: include/uapi/linux/rose.h
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
F: drivers/media/platform/sunxi/sun8i-rotate/
T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtl8xxxu/
+RTL9300 I2C DRIVER (rtl9300-i2c)
+S: Maintained
+F: Documentation/devicetree/bindings/i2c/realtek,rtl9301-i2c.yaml
+F: drivers/i2c/busses/i2c-rtl9300.c
+
RTRS TRANSPORT DRIVERS
S: Supported
F: drivers/s390/cio/
+S390 CRYPTO MODULES, PRNG DRIVER, ARCH RANDOM
+S: Supported
+F: arch/s390/crypto/
+F: arch/s390/include/asm/archrandom.h
+F: arch/s390/include/asm/cpacf.h
+
S390 DASD DRIVER
F: drivers/s390/block/dasd*
F: include/linux/dasd_mod.h
+S390 HWRANDOM TRNG DRIVER
+S: Supported
+F: drivers/char/hw_random/s390-trng.c
+
S390 IOMMU (PCI)
F: arch/s390/pci/
F: drivers/pci/hotplug/s390_pci_hpc.c
+S390 PTP DRIVER
+S: Supported
+F: drivers/ptp/ptp_s390.c
+
S390 SCM DRIVER
F: drivers/vfio/pci/vfio_pci_zdev.c
F: include/uapi/linux/vfio_zdev.h
-S390 ZCRYPT DRIVER
+S390 ZCRYPT AND PKEY DRIVER AND AP BUS
S: Supported
+F: arch/s390/include/asm/ap.h
+F: arch/s390/include/asm/pkey.h
+F: arch/s390/include/asm/trace/zcrypt.h
+F: arch/s390/include/uapi/asm/pkey.h
+F: arch/s390/include/uapi/asm/zcrypt.h
F: drivers/s390/crypto/
S390 ZFCP DRIVER
S: Odd Fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/saa6588*
SAA7134 VIDEO4LINUX DRIVER
S: Odd fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/driver-api/media/drivers/saa7134*
F: drivers/media/pci/saa7134/
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/common/saa7146/
F: drivers/media/pci/saa7146/
F: include/media/drv-intf/saa7146*
P: https://github.com/LinuxSecurityModule/kernel/blob/main/README.md
T: git https://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm.git
+F: include/linux/lsm/
F: include/linux/lsm_audit.h
F: include/linux/lsm_hook_defs.h
F: include/linux/lsm_hooks.h
S: Supported
F: net/smc/
S: Odd fixes
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/rj54n1cb0c.c
F: include/media/i2c/rj54n1cb0c.h
S: Odd Fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/silabs,si470x.yaml
F: drivers/media/radio/si470x/radio-si470x-i2c.c
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/si470x/radio-si470x-common.c
F: drivers/media/radio/si470x/radio-si470x-usb.c
F: drivers/media/radio/si470x/radio-si470x.h
S: Odd Fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/si4713/si4713.?
SI4713 FM RADIO TRANSMITTER PLATFORM DRIVER
S: Odd Fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/si4713/radio-platform-si4713.c
SI4713 FM RADIO TRANSMITTER USB DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/si4713/radio-usb-si4713.c
SIANO DVB DRIVER
S: Odd fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/common/siano/
F: drivers/media/mmc/siano/
F: drivers/media/usb/siano/
SOFTWARE RAID (Multiple Disks) SUPPORT
S: Supported
Q: https://patchwork.kernel.org/project/linux-raid/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdraid/linux.git
F: drivers/md/Kconfig
F: drivers/md/Makefile
F: drivers/md/md*
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/imx208.c
SONY IMX214 SENSOR DRIVER
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
F: drivers/media/i2c/imx214.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/imx219.yaml
F: drivers/media/i2c/imx219.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx258.yaml
F: drivers/media/i2c/imx258.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
F: drivers/media/i2c/imx274.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx283.yaml
F: drivers/media/i2c/imx283.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
F: drivers/media/i2c/imx290.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx296.yaml
F: drivers/media/i2c/imx296.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/imx319.c
SONY IMX334 SENSOR DRIVER
S: Orphan
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
F: drivers/media/i2c/imx334.c
SONY IMX335 SENSOR DRIVER
S: Orphan
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
F: drivers/media/i2c/imx335.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/imx355.c
SONY IMX412 SENSOR DRIVER
S: Orphan
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
F: drivers/media/i2c/imx412.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
F: drivers/media/i2c/imx415.c
W: https://github.com/thesofproject/linux/
F: sound/soc/sof/
+SOUND - GENERIC SOUND CARD (Simple-Audio-Card, Audio-Graph-Card)
+S: Supported
+F: sound/soc/generic/
+F: include/sound/simple_card*
+F: Documentation/devicetree/bindings/sound/simple-card.yaml
+F: Documentation/devicetree/bindings/sound/audio-graph*.yaml
+
SOUNDWIRE SUBSYSTEM
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
F: drivers/media/i2c/st-mipid02.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/st,st-vgxy61.yaml
F: Documentation/userspace-api/media/drivers/vgxy61.rst
F: drivers/media/i2c/vgxy61.c
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/stk1160/
STM32 AUDIO (ASoC) DRIVERS
F: Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
F: drivers/input/keyboard/sun4i-lradc-keys.c
-SUNDANCE NETWORK DRIVER
-S: Maintained
-F: drivers/net/ethernet/dlink/sundance.c
-
SUNPLUS ETHERNET DRIVER
S: Maintained
W: https://linuxtv.org
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/tuners/tda18250*
TDA18271 MEDIA DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/tda9840*
TEA5761 TUNER DRIVER
S: Odd fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/tuners/tea5761.*
TEA5767 TUNER DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/tuners/tea5767.*
TEA6415C MEDIA DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/tea6415c*
TEA6420 MEDIA DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/i2c/tea6420*
TEAM DRIVER
F: include/linux/dma/ti-cppi5.h
X: drivers/dma/ti/cppi41.c
+TEXAS INSTRUMENTS TPS25990 HARDWARE MONITOR DRIVER
+S: Maintained
+F: Documentation/devicetree/bindings/hwmon/pmbus/ti,tps25990.yaml
+
TEXAS INSTRUMENTS TPS23861 PoE PSE DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-raremono.c
THERMAL
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml
F: Documentation/userspace-api/media/drivers/thp7312.rst
F: drivers/media/i2c/thp7312.c
F: drivers/net/ethernet/ti/icssg/*
TI J721E CSI2RX DRIVER
S: Maintained
F: Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml
TURBOCHANNEL SUBSYSTEM
S: Maintained
-Q: http://patchwork.linux-mips.org/project/linux-mips/list/
+Q: https://patchwork.kernel.org/project/linux-mips/list/
F: drivers/tc/
F: include/linux/tc.h
S: Odd Fixes
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/pci/tw68/
TW686X VIDEO4LINUX DRIVER
S: Maintained
W: http://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/pci/tw686x/
U-BOOT ENVIRONMENT VARIABLES
USB VIDEO CLASS
S: Maintained
W: http://www.ideasonboard.org/uvc/
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/usb/uvc/
F: include/uapi/linux/uvcvideo.h
USERSPACE DMA BUFFER DRIVER
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
S: Maintained
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/v4l2-core/v4l2-async.c
F: drivers/media/v4l2-core/v4l2-fwnode.c
F: include/media/v4l2-async.h
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/test-drivers/vicodec/*
VIDEO I2C POLLING DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/test-drivers/vidtv/*
VIMC VIRTUAL MEDIA CONTROLLER DRIVER
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/test-drivers/vimc/*
VIRT LIB
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/test-drivers/vivid/*
VM SOCKETS (AF_VSOCK)
S: Maintained
W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: drivers/media/tuners/xc2028.*
XDP (eXpress Data Path)
S: Supported
-T: git git://linuxtv.org/media_tree.git
+T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/xilinx/
F: drivers/media/platform/xilinx/
F: include/uapi/linux/xilinx-v4l2-controls.h
#define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
+/* TC358768_DSICMD_TX (0x0600) register */
+#define TC358768_DSI_CMDTX_DC_START BIT(0)
+
static const char * const tc358768_supplies[] = {
"vddc", "vddmipi", "vddio"
};
tc358768_write(priv, reg, tmp);
}
+static void tc358768_dsicmd_tx(struct tc358768_priv *priv)
+{
+ u32 val;
+
+ /* start transfer */
+ tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START);
+ if (priv->error)
+ return;
+
+ /* wait transfer completion */
+ priv->error = regmap_read_poll_timeout(priv->regmap, TC358768_DSICMD_TX, val,
+ (val & TC358768_DSI_CMDTX_DC_START) == 0,
+ 100, 100000);
+}
+
static int tc358768_sw_reset(struct tc358768_priv *priv)
{
/* Assert Reset */
ret = -EINVAL;
ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
if (ep) {
- ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
+ ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines);
+ if (ret)
+ ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
of_node_put(ep);
}
}
}
- /* start transfer */
- tc358768_write(priv, TC358768_DSICMD_TX, 1);
+ tc358768_dsicmd_tx(priv);
ret = tc358768_clear_error(priv);
if (ret)
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO AYANEO 2 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* AYA NEO 2021 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* AYA NEO Founder */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AYA NEO Founder"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO GEEK */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GEEK"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* AYA NEO NEXT */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
}, { /* Lenovo Yoga Tab 3 X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)&lcd1600x2560_rightside_up,
# SPDX-License-Identifier: GPL-2.0-only
config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
- depends on DRM && PCI && X86 && MMU
+ depends on DRM && PCI && X86 && MMU && HAS_IOPORT
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
select I2C
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+ #include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "i915_reg.h"
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
intel_de_rmw(display, TV_CTL, TV_ENC_ENABLE, 0);
}
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct drm_display_mode mode = {};
adjusted_mode->crtc_clock /= 2;
/* pixel counter doesn't work on i965gm TV output */
- if (IS_I965GM(dev_priv))
+ if (display->platform.i965gm)
pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
}
struct intel_atomic_state *state =
to_intel_atomic_state(pipe_config->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_tv_connector_state *tv_conn_state =
to_intel_tv_connector_state(conn_state);
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
adjusted_mode->name[0] = '\0';
/* pixel counter doesn't work on i965gm TV output */
- if (IS_I965GM(dev_priv))
+ if (display->platform.i965gm)
pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (IS_I915GM(dev_priv))
+ if (display->platform.i915gm)
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
set_tv_mode_timings(display, tv_mode, burst_ena);
* The TV sense state should be cleared to zero on cantiga platform. Otherwise
* the TV is misdetected. This is hardware requirement.
*/
- if (IS_GM45(dev_priv))
+ if (display->platform.gm45)
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
if (IS_HASWELL(i915))
intel_uncore_write(uncore,
HSW_MI_PREDICATE_RESULT_2,
- IS_HASWELL_GT3(i915) ?
+ INTEL_INFO(i915)->gt == 3 ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 fault;
+ unsigned long fault;
for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
gt_dbg(gt, "Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
+ "\tSource ID: %ld\n"
+ "\tType: %ld\n",
fault & PAGE_MASK,
fault & RING_FAULT_GTTSEL_MASK ?
"GGTT" : "PPGTT",
/* protects the irq masks */
spinlock_t irq_lock;
+ bool irqs_enabled;
/* Sideband mailbox protection */
struct mutex sb_lock;
struct intel_pxp *pxp;
- bool irq_enabled;
-
struct i915_pmu pmu;
/* The TTM device structure. */
(IS_PLATFORM(i915, INTEL_IRONLAKE) && IS_MOBILE(i915))
#define IS_SANDYBRIDGE(i915) IS_PLATFORM(i915, INTEL_SANDYBRIDGE)
#define IS_IVYBRIDGE(i915) IS_PLATFORM(i915, INTEL_IVYBRIDGE)
- #define IS_IVB_GT1(i915) (IS_IVYBRIDGE(i915) && \
- INTEL_INFO(i915)->gt == 1)
#define IS_VALLEYVIEW(i915) IS_PLATFORM(i915, INTEL_VALLEYVIEW)
#define IS_CHERRYVIEW(i915) IS_PLATFORM(i915, INTEL_CHERRYVIEW)
#define IS_HASWELL(i915) IS_PLATFORM(i915, INTEL_HASWELL)
*/
#define IS_LUNARLAKE(i915) (0 && i915)
#define IS_BATTLEMAGE(i915) (0 && i915)
+ #define IS_PANTHERLAKE(i915) (0 && i915)
-#define IS_ARROWLAKE(i915) \
- IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL)
+#define IS_ARROWLAKE_H(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H)
+#define IS_ARROWLAKE_U(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_U)
+#define IS_ARROWLAKE_S(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_S)
#define IS_DG2_G10(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(i915) \
IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
#define IS_BROADWELL_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
- #define IS_BROADWELL_GT3(i915) (IS_BROADWELL(i915) && \
- INTEL_INFO(i915)->gt == 3)
#define IS_HASWELL_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
- #define IS_HASWELL_GT3(i915) (IS_HASWELL(i915) && \
- INTEL_INFO(i915)->gt == 3)
- #define IS_HASWELL_GT1(i915) (IS_HASWELL(i915) && \
- INTEL_INFO(i915)->gt == 1)
/* ULX machines are also considered ULT. */
#define IS_HASWELL_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_KABYLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
- #define IS_SKYLAKE_GT2(i915) (IS_SKYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 2)
- #define IS_SKYLAKE_GT3(i915) (IS_SKYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 3)
- #define IS_SKYLAKE_GT4(i915) (IS_SKYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 4)
- #define IS_KABYLAKE_GT2(i915) (IS_KABYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 2)
- #define IS_KABYLAKE_GT3(i915) (IS_KABYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 3)
#define IS_COFFEELAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
#define IS_COFFEELAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
- #define IS_COFFEELAKE_GT2(i915) (IS_COFFEELAKE(i915) && \
- INTEL_INFO(i915)->gt == 2)
- #define IS_COFFEELAKE_GT3(i915) (IS_COFFEELAKE(i915) && \
- INTEL_INFO(i915)->gt == 3)
-
#define IS_COMETLAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_COMETLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
- #define IS_COMETLAKE_GT2(i915) (IS_COMETLAKE(i915) && \
- INTEL_INFO(i915)->gt == 2)
#define IS_ICL_WITH_PORT_F(i915) \
IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_TIGERLAKE_UY(i915) \
IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
- #define IS_LP(i915) (INTEL_INFO(i915)->is_lp)
- #define IS_GEN9_LP(i915) (GRAPHICS_VER(i915) == 9 && IS_LP(i915))
- #define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_LP(i915))
+ #define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
+ #define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915))
#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(i915) \
- (IS_SKYLAKE_GT3(i915) || IS_SKYLAKE_GT4(i915))
+ (IS_SKYLAKE(i915) && (INTEL_INFO(i915)->gt == 3 || INTEL_INFO(i915)->gt == 4))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
#define HAS_RPS(i915) (INTEL_INFO(i915)->has_rps)
+ #define HAS_PXP(i915) \
+ (IS_ENABLED(CONFIG_DRM_I915_PXP) && INTEL_INFO(i915)->has_pxp)
+
#define HAS_HECI_PXP(i915) \
(INTEL_INFO(i915)->has_heci_pxp)
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf)
- #define NUM_L3_SLICES(i915) (IS_HASWELL_GT3(i915) ? \
+ #define NUM_L3_SLICES(i915) (IS_HASWELL(i915) && INTEL_INFO(i915)->gt == 3 ? \
2 : HAS_L3_DPF(i915))
#define HAS_GUC_DEPRIVILEGE(i915) \
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
- #include <drm/intel/i915_pciids.h>
+ #include <drm/intel/pciids.h>
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
INTEL_DG2_G12_IDS(ID),
};
-static const u16 subplatform_arl_ids[] = {
- INTEL_ARL_IDS(ID),
+static const u16 subplatform_arl_h_ids[] = {
+ INTEL_ARL_H_IDS(ID),
+};
+
+static const u16 subplatform_arl_u_ids[] = {
+ INTEL_ARL_U_IDS(ID),
+};
+
+static const u16 subplatform_arl_s_ids[] = {
+ INTEL_ARL_S_IDS(ID),
};
static bool find_devid(u16 id, const u16 *p, unsigned int num)
} else if (find_devid(devid, subplatform_g12_ids,
ARRAY_SIZE(subplatform_g12_ids))) {
mask = BIT(INTEL_SUBPLATFORM_G12);
- } else if (find_devid(devid, subplatform_arl_ids,
- ARRAY_SIZE(subplatform_arl_ids))) {
- mask = BIT(INTEL_SUBPLATFORM_ARL);
+ } else if (find_devid(devid, subplatform_arl_h_ids,
+ ARRAY_SIZE(subplatform_arl_h_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL_H);
+ } else if (find_devid(devid, subplatform_arl_u_ids,
+ ARRAY_SIZE(subplatform_arl_u_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL_U);
+ } else if (find_devid(devid, subplatform_arl_s_ids,
+ ARRAY_SIZE(subplatform_arl_s_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL_S);
}
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
#define INTEL_SUBPLATFORM_RPLU 2
/* MTL */
-#define INTEL_SUBPLATFORM_ARL 0
+#define INTEL_SUBPLATFORM_ARL_H 0
+#define INTEL_SUBPLATFORM_ARL_U 1
+#define INTEL_SUBPLATFORM_ARL_S 2
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
#define DEV_INFO_FOR_EACH_FLAG(func) \
func(is_mobile); \
- func(is_lp); \
func(require_force_probe); \
func(is_dgfx); \
/* Keep has_* in alphabetical order */ \
#include <drm/drm_auth.h>
#include <drm/drm_managed.h>
+
+#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/xarray.h>
void *stream;
int err;
- stream = kzalloc(stream_size, GFP_KERNEL);
- if (!stream)
- return -ENOMEM;
-
- if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) {
- err = -EFAULT;
- goto err_free;
- }
+ stream = memdup_user(u64_to_user_ptr(stream_user_ptr), stream_size);
+ if (IS_ERR(stream))
+ return PTR_ERR(stream);
err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
- if (err)
- goto err_free;
-
- kfree(stream);
-
- return 0;
- err_free:
kfree(stream);
return err;
return err;
}
+ spin_lock(&pvr_dev->ctx_list_lock);
+ list_add_tail(&ctx->file_link, &pvr_file->contexts);
+ spin_unlock(&pvr_dev->ctx_list_lock);
+
return 0;
err_destroy_fw_obj:
container_of(ref_count, struct pvr_context, ref_count);
struct pvr_device *pvr_dev = ctx->pvr_dev;
+ WARN_ON(in_interrupt());
+ spin_lock(&pvr_dev->ctx_list_lock);
+ list_del(&ctx->file_link);
+ spin_unlock(&pvr_dev->ctx_list_lock);
+
xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
pvr_context_destroy_queues(ctx);
pvr_fw_object_destroy(ctx->fw_obj);
*/
void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
{
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
struct pvr_context *ctx;
unsigned long handle;
xa_for_each(&pvr_file->ctx_handles, handle, ctx)
pvr_context_destroy(pvr_file, handle);
+
+ spin_lock(&pvr_dev->ctx_list_lock);
+ ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
+
+ while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) {
+ list_del_init(&ctx->file_link);
+
+ if (pvr_context_get_if_referenced(ctx)) {
+ spin_unlock(&pvr_dev->ctx_list_lock);
+
+ pvr_vm_unmap_all(ctx->vm_ctx);
+
+ pvr_context_put(ctx);
+ spin_lock(&pvr_dev->ctx_list_lock);
+ }
+ ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
+ }
+ spin_unlock(&pvr_dev->ctx_list_lock);
}
/**
void pvr_context_device_init(struct pvr_device *pvr_dev)
{
xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
+ spin_lock_init(&pvr_dev->ctx_list_lock);
}
/**
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
return ret;
}
- static __always_inline u64
+ static __always_inline __maybe_unused u64
pvr_fw_version_packed(u32 major, u32 minor)
{
return ((u64)major << 32) | minor;
*/
pvr_file->pvr_dev = pvr_dev;
+ INIT_LIST_HEAD(&pvr_file->contexts);
+
xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1);
xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1);
xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1);
#include <drm/drm_gem.h>
#include <drm/drm_gpuvm.h>
+#include <linux/bug.h>
#include <linux/container_of.h>
#include <linux/err.h>
#include <linux/errno.h>
}
/**
- * pvr_vm_context_release() - Teardown a VM context.
- * @ref_count: Pointer to reference counter of the VM context.
+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+ * @vm_ctx: Target VM context.
*
* This function ensures that no mappings are left dangling by unmapping them
* all in order of ascending device-virtual address.
*/
+void
+pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+{
+ WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range));
+}
+
+/**
+ * pvr_vm_context_release() - Teardown a VM context.
+ * @ref_count: Pointer to reference counter of the VM context.
+ *
+ * This function also ensures that no mappings are left dangling by calling
+ * pvr_vm_unmap_all.
+ */
static void
pvr_vm_context_release(struct kref *ref_count)
{
if (vm_ctx->fw_mem_ctx_obj)
pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
- WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
- vm_ctx->gpuvm_mgr.mm_range));
+ pvr_vm_unmap_all(vm_ctx);
pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
drm_gem_private_object_fini(&vm_ctx->dummy_gem);
xa_lock(&pvr_file->vm_ctx_handles);
vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
- if (vm_ctx)
- kref_get(&vm_ctx->ref_count);
-
+ pvr_vm_context_get(vm_ctx);
xa_unlock(&pvr_file->vm_ctx_handles);
return vm_ctx;
if (!adreno_gpu->info->fw[i])
continue;
- /* Skip loading GMU firwmare with GMU Wrapper */
+ /* Skip loading GMU firmware with GMU Wrapper */
if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU)
continue;
int adreno_hw_init(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
VERB("%s", gpu->name);
+ if (adreno_gpu->info->family >= ADRENO_6XX_GEN1 &&
+ qcom_scm_set_gpu_smmu_aperture_is_available()) {
+ /* We currently always use context bank 0, so hard code this */
+ ret = qcom_scm_set_gpu_smmu_aperture(0);
+ if (ret)
+ DRM_DEV_ERROR(gpu->dev->dev, "unable to set SMMU aperture: %d\n", ret);
+ }
+
for (int i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
static void panthor_vm_start(struct panthor_vm *vm)
{
- drm_sched_start(&vm->sched);
+ drm_sched_start(&vm->sched, 0);
}
/**
if (!size)
break;
+
+ offset = 0;
}
return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
{
struct panthor_vm *vm;
+ xa_lock(&pool->xa);
vm = panthor_vm_get(xa_load(&pool->xa, handle));
+ xa_unlock(&pool->xa);
return vm;
}
* which passes iova as an unsigned long. Patch the mmu_features to reflect this
* limitation.
*/
- if (sizeof(unsigned long) * 8 < va_bits) {
+ if (va_bits > BITS_PER_LONG) {
ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0);
- ptdev->gpu_info.mmu_features |= sizeof(unsigned long) * 8;
+ ptdev->gpu_info.mmu_features |= BITS_PER_LONG;
}
return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
# SPDX-License-Identifier: GPL-2.0-only
config DRM_QXL
tristate "QXL virtual GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI && MMU && HAS_IOPORT
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/bug.h>
+ #include <linux/aperture.h>
#include <linux/module.h>
#include <linux/pci.h>
- #include <drm/drm_aperture.h>
+ #include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_client_setup.h>
+ #include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
- #include <drm/drm_fbdev_ttm.h>
+ #include <drm/drm_fbdev_shmem.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+ #include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
- #include <drm/drm_gem_vram_helper.h>
+ #include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
+ #include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
- #include <drm/drm_simple_kms_helper.h>
#include <video/vga.h>
};
struct bochs_device {
+ struct drm_device dev;
+
/* hw */
void __iomem *mmio;
int ioports;
u16 yres_virtual;
u32 stride;
u32 bpp;
- const struct drm_edid *drm_edid;
/* drm */
- struct drm_device *dev;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
};
+ static struct bochs_device *to_bochs_device(const struct drm_device *dev)
+ {
+ return container_of(dev, struct bochs_device, dev);
+ }
+
/* ---------------------------------------------------------------------- */
+static __always_inline bool bochs_uses_mmio(struct bochs_device *bochs)
+{
+ return !IS_ENABLED(CONFIG_HAS_IOPORT) || bochs->mmio;
+}
+
static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val)
{
if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
return;
- if (bochs->mmio) {
+ if (bochs_uses_mmio(bochs)) {
int offset = ioport - 0x3c0 + 0x400;
writeb(val, bochs->mmio + offset);
if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
return 0xff;
- if (bochs->mmio) {
+ if (bochs_uses_mmio(bochs)) {
int offset = ioport - 0x3c0 + 0x400;
return readb(bochs->mmio + offset);
{
u16 ret = 0;
- if (bochs->mmio) {
+ if (bochs_uses_mmio(bochs)) {
int offset = 0x500 + (reg << 1);
ret = readw(bochs->mmio + offset);
static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
{
- if (bochs->mmio) {
+ if (bochs_uses_mmio(bochs)) {
int offset = 0x500 + (reg << 1);
writew(val, bochs->mmio + offset);
#define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b)
#endif
- static int bochs_get_edid_block(void *data, u8 *buf,
- unsigned int block, size_t len)
+ static int bochs_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
struct bochs_device *bochs = data;
size_t i, start = block * EDID_LENGTH;
+ if (!bochs->mmio)
+ return -1;
+
if (start + len > 0x400 /* vga register offset */)
return -1;
return 0;
}
- static int bochs_hw_load_edid(struct bochs_device *bochs)
+ static const struct drm_edid *bochs_hw_read_edid(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+ struct bochs_device *bochs = to_bochs_device(dev);
u8 header[8];
- if (!bochs->mmio)
- return -1;
-
/* check header to detect whenever edid support is enabled in qemu */
bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header));
if (drm_edid_header_is_valid(header) != 8)
- return -1;
+ return NULL;
- drm_edid_free(bochs->drm_edid);
- bochs->drm_edid = drm_edid_read_custom(&bochs->connector,
- bochs_get_edid_block, bochs);
- if (!bochs->drm_edid)
- return -1;
+ drm_dbg(dev, "Found EDID data blob.\n");
- return 0;
+ return drm_edid_read_custom(connector, bochs_get_edid_block, bochs);
}
- static int bochs_hw_init(struct drm_device *dev)
+ static int bochs_hw_init(struct bochs_device *bochs)
{
- struct bochs_device *bochs = dev->dev_private;
+ struct drm_device *dev = &bochs->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long addr, size, mem, ioaddr, iosize;
u16 id;
if (pdev->resource[2].flags & IORESOURCE_MEM) {
+ ioaddr = pci_resource_start(pdev, 2);
+ iosize = pci_resource_len(pdev, 2);
/* mmio bar with vga and bochs registers present */
- if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
+ if (!devm_request_mem_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) {
DRM_ERROR("Cannot request mmio region\n");
return -EBUSY;
}
- ioaddr = pci_resource_start(pdev, 2);
- iosize = pci_resource_len(pdev, 2);
- bochs->mmio = ioremap(ioaddr, iosize);
+ bochs->mmio = devm_ioremap(&pdev->dev, ioaddr, iosize);
if (bochs->mmio == NULL) {
DRM_ERROR("Cannot map mmio region\n");
return -ENOMEM;
}
- } else {
+ } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) {
ioaddr = VBE_DISPI_IOPORT_INDEX;
iosize = 2;
- if (!request_region(ioaddr, iosize, "bochs-drm")) {
+ if (!devm_request_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) {
DRM_ERROR("Cannot request ioports\n");
return -EBUSY;
}
bochs->ioports = 1;
+ } else {
+ dev_err(dev->dev, "I/O ports are not supported\n");
+ return -EIO;
}
id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID);
size = min(size, mem);
}
- if (pci_request_region(pdev, 0, "bochs-drm") != 0)
+ if (!devm_request_mem_region(&pdev->dev, addr, size, "bochs-drm"))
DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
- bochs->fb_map = ioremap(addr, size);
+ bochs->fb_map = devm_ioremap_wc(&pdev->dev, addr, size);
if (bochs->fb_map == NULL) {
DRM_ERROR("Cannot map framebuffer\n");
return -ENOMEM;
return 0;
}
- static void bochs_hw_fini(struct drm_device *dev)
- {
- struct bochs_device *bochs = dev->dev_private;
-
- /* TODO: shot down existing vram mappings */
-
- if (bochs->mmio)
- iounmap(bochs->mmio);
- if (bochs->ioports)
- release_region(VBE_DISPI_IOPORT_INDEX, 2);
- if (bochs->fb_map)
- iounmap(bochs->fb_map);
- pci_release_regions(to_pci_dev(dev->dev));
- drm_edid_free(bochs->drm_edid);
- }
-
static void bochs_hw_blank(struct bochs_device *bochs, bool blank)
{
DRM_DEBUG_DRIVER("hw_blank %d\n", blank);
{
int idx;
- if (!drm_dev_enter(bochs->dev, &idx))
+ if (!drm_dev_enter(&bochs->dev, &idx))
return;
bochs->xres = mode->hdisplay;
{
int idx;
- if (!drm_dev_enter(bochs->dev, &idx))
+ if (!drm_dev_enter(&bochs->dev, &idx))
return;
DRM_DEBUG_DRIVER("format %c%c%c%c\n",
unsigned long offset;
unsigned int vx, vy, vwidth, idx;
- if (!drm_dev_enter(bochs->dev, &idx))
+ if (!drm_dev_enter(&bochs->dev, &idx))
return;
bochs->stride = stride;
/* ---------------------------------------------------------------------- */
- static const uint32_t bochs_formats[] = {
+ static const uint32_t bochs_primary_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_BGRX8888,
};
- static void bochs_plane_update(struct bochs_device *bochs, struct drm_plane_state *state)
+ static int bochs_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct drm_gem_vram_object *gbo;
- s64 gpu_addr;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
- if (!state->fb || !bochs->stride)
+ return 0;
+ }
+
+ static void bochs_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+ {
+ struct drm_device *dev = plane->dev;
+ struct bochs_device *bochs = to_bochs_device(dev);
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+
+ if (!fb || !bochs->stride)
return;
- gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
- gpu_addr = drm_gem_vram_offset(gbo);
- if (WARN_ON_ONCE(gpu_addr < 0))
- return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(bochs->fb_map);
+ iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, &damage));
+ drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage);
+ }
+
+ /* Always scanout image at VRAM offset 0 */
bochs_hw_setbase(bochs,
- state->crtc_x,
- state->crtc_y,
- state->fb->pitches[0],
- state->fb->offsets[0] + gpu_addr);
- bochs_hw_setformat(bochs, state->fb->format);
+ plane_state->crtc_x,
+ plane_state->crtc_y,
+ fb->pitches[0],
+ 0);
+ bochs_hw_setformat(bochs, fb->format);
}
- static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+ static const struct drm_plane_helper_funcs bochs_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = bochs_primary_plane_helper_atomic_check,
+ .atomic_update = bochs_primary_plane_helper_atomic_update,
+ };
+
+ static const struct drm_plane_funcs bochs_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS
+ };
+
+ static void bochs_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
{
- struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+ struct bochs_device *bochs = to_bochs_device(crtc->dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
bochs_hw_setmode(bochs, &crtc_state->mode);
- bochs_plane_update(bochs, plane_state);
}
- static void bochs_pipe_disable(struct drm_simple_display_pipe *pipe)
+ static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- bochs_hw_blank(bochs, true);
+ if (!crtc_state->enable)
+ return 0;
+
+ return drm_atomic_helper_check_crtc_primary_plane(crtc_state);
}
- static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+ static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+ }
- bochs_plane_update(bochs, pipe->plane.state);
+ static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *crtc_state)
+ {
+ struct bochs_device *bochs = to_bochs_device(crtc->dev);
+
+ bochs_hw_blank(bochs, true);
}
- static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
- .enable = bochs_pipe_enable,
- .disable = bochs_pipe_disable,
- .update = bochs_pipe_update,
- .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
- .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
+ static const struct drm_crtc_helper_funcs bochs_crtc_helper_funcs = {
+ .mode_set_nofb = bochs_crtc_helper_mode_set_nofb,
+ .atomic_check = bochs_crtc_helper_atomic_check,
+ .atomic_enable = bochs_crtc_helper_atomic_enable,
+ .atomic_disable = bochs_crtc_helper_atomic_disable,
+ };
+
+ static const struct drm_crtc_funcs bochs_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
- static int bochs_connector_get_modes(struct drm_connector *connector)
+ static const struct drm_encoder_funcs bochs_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+ };
+
+ static int bochs_connector_helper_get_modes(struct drm_connector *connector)
{
+ const struct drm_edid *edid;
int count;
- count = drm_edid_connector_add_modes(connector);
+ edid = bochs_hw_read_edid(connector);
- if (!count) {
+ if (edid) {
+ drm_edid_connector_update(connector, edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(edid);
+ } else {
+ drm_edid_connector_update(connector, NULL);
count = drm_add_modes_noedid(connector, 8192, 8192);
drm_set_preferred_mode(connector, defx, defy);
}
+
return count;
}
- static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
- .get_modes = bochs_connector_get_modes,
+ static const struct drm_connector_helper_funcs bochs_connector_helper_funcs = {
+ .get_modes = bochs_connector_helper_get_modes,
};
- static const struct drm_connector_funcs bochs_connector_connector_funcs = {
+ static const struct drm_connector_funcs bochs_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
- static void bochs_connector_init(struct drm_device *dev)
+ static enum drm_mode_status bochs_mode_config_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
{
- struct bochs_device *bochs = dev->dev_private;
- struct drm_connector *connector = &bochs->connector;
-
- drm_connector_init(dev, connector, &bochs_connector_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- drm_connector_helper_add(connector, &bochs_connector_connector_helper_funcs);
-
- bochs_hw_load_edid(bochs);
- if (bochs->drm_edid) {
- DRM_INFO("Found EDID data blob.\n");
- drm_connector_attach_edid_property(connector);
- drm_edid_connector_update(&bochs->connector, bochs->drm_edid);
- }
- }
+ struct bochs_device *bochs = to_bochs_device(dev);
+ const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888);
+ u64 pitch;
- static struct drm_framebuffer *
- bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
- const struct drm_mode_fb_cmd2 *mode_cmd)
- {
- if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 &&
- mode_cmd->pixel_format != DRM_FORMAT_BGRX8888)
- return ERR_PTR(-EINVAL);
+ if (drm_WARN_ON(dev, !format))
+ return MODE_ERROR;
+
+ pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (mode->vdisplay > DIV_ROUND_DOWN_ULL(bochs->fb_size, pitch))
+ return MODE_MEM;
- return drm_gem_fb_create(dev, file, mode_cmd);
+ return MODE_OK;
}
- static const struct drm_mode_config_funcs bochs_mode_funcs = {
- .fb_create = bochs_gem_fb_create,
- .mode_valid = drm_vram_helper_mode_valid,
+ static const struct drm_mode_config_funcs bochs_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = bochs_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int bochs_kms_init(struct bochs_device *bochs)
{
+ struct drm_device *dev = &bochs->dev;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
int ret;
- ret = drmm_mode_config_init(bochs->dev);
+ ret = drmm_mode_config_init(dev);
if (ret)
return ret;
- bochs->dev->mode_config.max_width = 8192;
- bochs->dev->mode_config.max_height = 8192;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
+
+ dev->mode_config.funcs = &bochs_mode_config_funcs;
- bochs->dev->mode_config.preferred_depth = 24;
- bochs->dev->mode_config.prefer_shadow = 0;
- bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
+ primary_plane = &bochs->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &bochs_primary_plane_funcs,
+ bochs_primary_plane_formats,
+ ARRAY_SIZE(bochs_primary_plane_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+ drm_plane_helper_add(primary_plane, &bochs_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ crtc = &bochs->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &bochs_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(crtc, &bochs_crtc_helper_funcs);
- bochs->dev->mode_config.funcs = &bochs_mode_funcs;
+ encoder = &bochs->encoder;
+ ret = drm_encoder_init(dev, encoder, &bochs_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
- bochs_connector_init(bochs->dev);
- drm_simple_display_pipe_init(bochs->dev,
- &bochs->pipe,
- &bochs_pipe_funcs,
- bochs_formats,
- ARRAY_SIZE(bochs_formats),
- NULL,
- &bochs->connector);
+ connector = &bochs->connector;
+ ret = drm_connector_init(dev, connector, &bochs_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret)
+ return ret;
+ drm_connector_helper_add(connector, &bochs_connector_helper_funcs);
+ drm_connector_attach_edid_property(connector);
+ drm_connector_attach_encoder(connector, encoder);
- drm_mode_config_reset(bochs->dev);
+ drm_mode_config_reset(dev);
return 0;
}
/* ---------------------------------------------------------------------- */
/* drm interface */
- static int bochs_load(struct drm_device *dev)
+ static int bochs_load(struct bochs_device *bochs)
{
- struct bochs_device *bochs;
int ret;
- bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL);
- if (bochs == NULL)
- return -ENOMEM;
- dev->dev_private = bochs;
- bochs->dev = dev;
-
- ret = bochs_hw_init(dev);
+ ret = bochs_hw_init(bochs);
if (ret)
return ret;
- ret = drmm_vram_helper_init(dev, bochs->fb_base, bochs->fb_size);
- if (ret)
- goto err_hw_fini;
-
ret = bochs_kms_init(bochs);
if (ret)
- goto err_hw_fini;
+ return ret;
return 0;
-
- err_hw_fini:
- bochs_hw_fini(dev);
- return ret;
}
DEFINE_DRM_GEM_FOPS(bochs_fops);
.date = "20130925",
.major = 1,
.minor = 0,
- DRM_GEM_VRAM_DRIVER,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
};
/* ---------------------------------------------------------------------- */
static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct bochs_device *bochs;
struct drm_device *dev;
- unsigned long fbsize;
int ret;
- fbsize = pci_resource_len(pdev, 0);
- if (fbsize < 4 * 1024 * 1024) {
- DRM_ERROR("less than 4 MB video memory, ignoring device\n");
- return -ENOMEM;
- }
-
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &bochs_driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, bochs_driver.name);
if (ret)
return ret;
- dev = drm_dev_alloc(&bochs_driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ bochs = devm_drm_dev_alloc(&pdev->dev, &bochs_driver, struct bochs_device, dev);
+ if (IS_ERR(bochs))
+ return PTR_ERR(bochs);
+ dev = &bochs->dev;
ret = pcim_enable_device(pdev);
if (ret)
pci_set_drvdata(pdev, dev);
- ret = bochs_load(dev);
+ ret = bochs_load(bochs);
if (ret)
goto err_free_dev;
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_hw_fini;
+ goto err_free_dev;
+
+ drm_client_setup(dev, NULL);
- drm_fbdev_ttm_setup(dev, 32);
return ret;
- err_hw_fini:
- bochs_hw_fini(dev);
err_free_dev:
drm_dev_put(dev);
return ret;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- bochs_hw_fini(dev);
drm_dev_put(dev);
}
*/
+ #include <linux/aperture.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <video/cirrus.h>
#include <video/vga.h>
- #include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
+ #include <drm/drm_client_setup.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
cirrus_mode_set(cirrus, &crtc_state->mode);
+#ifdef CONFIG_HAS_IOPORT
/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
outb(VGA_AR_ENABLE_DISPLAY, VGA_ATT_W);
+#endif
drm_dev_exit(idx);
}
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
struct cirrus_device *cirrus;
int ret;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &cirrus_driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, cirrus_driver.name);
if (ret)
return ret;
if (ret)
return ret;
- drm_fbdev_shmem_setup(dev, 16);
+ drm_client_setup(dev, NULL);
return 0;
}
static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
struct vmw_plane_state *vps)
{
- struct vmw_private *dev_priv = vcp->base.dev->dev_private;
+ struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
u32 i;
u32 cursor_max_dim, mob_max_size;
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
u32 i;
- vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
+ vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ if (WARN_ON(!bo))
+ return -EINVAL;
return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
}
select SHMEM
select TMPFS
select DRM_BUDDY
+ select DRM_CLIENT_SELECTION
select DRM_EXEC
select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
select DRM_PANEL
select DRM_SUBALLOC_HELPER
select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
config DRM_XE_DISPLAY
bool "Enable display support"
- depends on DRM_XE && DRM_XE=m
+ depends on DRM_XE && DRM_XE=m && HAS_IOPORT
select FB_IOMEM_HELPERS
select I2C
select I2C_ALGOBIT
#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
#define LTCDD_CLKGATE_DIS REG_BIT(10)
+ #define UNSLCGCTL9454 XE_REG(0x9454)
+ #define LSCFE_CLKGATE_DIS REG_BIT(4)
+
#define XEHP_SLICE_UNIT_LEVEL_CLKGATE XE_REG_MCR(0x94d4)
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define L3_CLKGATE_DIS REG_BIT(16)
#define CTC_SOURCE_DIVIDE_LOGIC REG_BIT(0)
#define FORCEWAKE_RENDER XE_REG(0xa278)
+
+ #define POWERGATE_DOMAIN_STATUS XE_REG(0xa2a0)
+ #define MEDIA_SLICE3_AWAKE_STATUS REG_BIT(4)
+ #define MEDIA_SLICE2_AWAKE_STATUS REG_BIT(3)
+ #define MEDIA_SLICE1_AWAKE_STATUS REG_BIT(2)
+ #define RENDER_AWAKE_STATUS REG_BIT(1)
+ #define MEDIA_SLICE0_AWAKE_STATUS REG_BIT(0)
+
#define FORCEWAKE_MEDIA_VDBOX(n) XE_REG(0xa540 + (n) * 4)
#define FORCEWAKE_MEDIA_VEBOX(n) XE_REG(0xa560 + (n) * 4)
#define FORCEWAKE_GSC XE_REG(0xa618)
* [4-6] RSVD
* [7] Disabled
*/
-#define CCS_MODE XE_REG(0x14804)
+#define CCS_MODE XE_REG(0x14804, XE_REG_OPTION_MASKED)
#define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */
#define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */
#define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1)
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define VOLTAGE_MASK REG_GENMASK(10, 0)
- /*
- * Note: Interrupt registers 1900xx are VF accessible only until version 12.50.
- * On newer platforms, VFs are using memory-based interrupts instead.
- * However, for simplicity we keep this XE_REG_OPTION_VF tag intact.
- */
-
- #define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF)
- #define INTR_GSC REG_BIT(31)
- #define INTR_GUC REG_BIT(25)
- #define INTR_MGUC REG_BIT(24)
- #define INTR_BCS8 REG_BIT(23)
- #define INTR_BCS(x) REG_BIT(15 - (x))
- #define INTR_CCS(x) REG_BIT(4 + (x))
- #define INTR_RCS0 REG_BIT(0)
- #define INTR_VECS(x) REG_BIT(31 - (x))
- #define INTR_VCS(x) REG_BIT(x)
-
- #define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF)
- #define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF)
- #define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF)
- #define ENGINE1_MASK REG_GENMASK(31, 16)
- #define ENGINE0_MASK REG_GENMASK(15, 0)
- #define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF)
- #define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF)
- #define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF)
-
- #define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF)
- #define INTR_DATA_VALID REG_BIT(31)
- #define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x)
- #define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
- #define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
- #define OTHER_GUC_INSTANCE 0
- #define OTHER_GSC_HECI2_INSTANCE 3
- #define OTHER_GSC_INSTANCE 6
-
- #define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF)
- #define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF)
- #define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF)
- #define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF)
- #define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF)
- #define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF)
- #define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
- #define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
- #define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
- #define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF)
- #define CCS0_CCS1_INTR_MASK XE_REG(0x190100)
- #define CCS2_CCS3_INTR_MASK XE_REG(0x190104)
- #define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110)
- #define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114)
- #define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118)
- #define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c)
- #define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
- #define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
- #define GSC_ER_COMPLETE REG_BIT(5)
- #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
- #define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
- #define GT_RENDER_USER_INTERRUPT REG_BIT(0)
+ #define SFC_DONE(n) XE_REG(0x1cc000 + (n) * 0x1000)
#endif
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include <linux/random.h>
+ #include <linux/iosys-map.h>
+ #include <linux/math64.h>
++#include <linux/prandom.h>
+ #include <linux/swap.h>
+
+ #include <uapi/linux/sysinfo.h>
+
#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
evict_test_run_device(xe);
}
+ struct xe_bo_link {
+ struct list_head link;
+ struct xe_bo *bo;
+ u32 val;
+ };
+
+ #define XE_BO_SHRINK_SIZE ((unsigned long)SZ_64M)
+
+ static int shrink_test_fill_random(struct xe_bo *bo, struct rnd_state *state,
+ struct xe_bo_link *link)
+ {
+ struct iosys_map map;
+ int ret = ttm_bo_vmap(&bo->ttm, &map);
+ size_t __maybe_unused i;
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
+ u32 val = prandom_u32_state(state);
+
+ iosys_map_wr(&map, i, u32, val);
+ if (i == 0)
+ link->val = val;
+ }
+
+ ttm_bo_vunmap(&bo->ttm, &map);
+ return 0;
+ }
+
+ static bool shrink_test_verify(struct kunit *test, struct xe_bo *bo,
+ unsigned int bo_nr, struct rnd_state *state,
+ struct xe_bo_link *link)
+ {
+ struct iosys_map map;
+ int ret = ttm_bo_vmap(&bo->ttm, &map);
+ size_t i;
+ bool failed = false;
+
+ if (ret) {
+ KUNIT_FAIL(test, "Error mapping bo %u for content check.\n", bo_nr);
+ return true;
+ }
+
+ for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
+ u32 val = prandom_u32_state(state);
+
+ if (iosys_map_rd(&map, i, u32) != val) {
+ KUNIT_FAIL(test, "Content not preserved, bo %u offset 0x%016llx",
+ bo_nr, (unsigned long long)i);
+ kunit_info(test, "Failed value is 0x%08x, recorded 0x%08x\n",
+ (unsigned int)iosys_map_rd(&map, i, u32), val);
+ if (i == 0 && val != link->val)
+ kunit_info(test, "Looks like PRNG is out of sync.\n");
+ failed = true;
+ break;
+ }
+ }
+
+ ttm_bo_vunmap(&bo->ttm, &map);
+
+ return failed;
+ }
+
+ /*
+ * Try to create system bos corresponding to twice the amount
+ * of available system memory to test shrinker functionality.
+ * If no swap space is available to accommodate the
+ * memory overcommit, mark bos purgeable.
+ */
+ static int shrink_test_run_device(struct xe_device *xe)
+ {
+ struct kunit *test = kunit_get_current_test();
+ LIST_HEAD(bos);
+ struct xe_bo_link *link, *next;
+ struct sysinfo si;
+ u64 ram, ram_and_swap, purgeable = 0, alloced, to_alloc, limit;
+ unsigned int interrupted = 0, successful = 0, count = 0;
+ struct rnd_state prng;
+ u64 rand_seed;
+ bool failed = false;
+
+ rand_seed = get_random_u64();
+ prandom_seed_state(&prng, rand_seed);
+ kunit_info(test, "Random seed is 0x%016llx.\n",
+ (unsigned long long)rand_seed);
+
+ /* Skip if execution time is expected to be too long. */
+
+ limit = SZ_32G;
+ /* IGFX with flat CCS needs to copy when swapping / shrinking */
+ if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
+ limit = SZ_16G;
+
+ si_meminfo(&si);
+ ram = (size_t)si.freeram * si.mem_unit;
+ if (ram > limit) {
+ kunit_skip(test, "Too long expected execution time.\n");
+ return 0;
+ }
+ to_alloc = ram * 2;
+
+ ram_and_swap = ram + get_nr_swap_pages() * PAGE_SIZE;
+ if (to_alloc > ram_and_swap)
+ purgeable = to_alloc - ram_and_swap;
+ purgeable += div64_u64(purgeable, 5);
+
+ kunit_info(test, "Free ram is %lu bytes. Will allocate twice of that.\n",
+ (unsigned long)ram);
+ for (alloced = 0; alloced < to_alloc; alloced += XE_BO_SHRINK_SIZE) {
+ struct xe_bo *bo;
+ unsigned int mem_type;
+ struct xe_ttm_tt *xe_tt;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ KUNIT_FAIL(test, "Unexpected link allocation failure\n");
+ failed = true;
+ break;
+ }
+
+ INIT_LIST_HEAD(&link->link);
+
+ /* We can create bos using WC caching here. But it is slower. */
+ bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE,
+ DRM_XE_GEM_CPU_CACHING_WB,
+ XE_BO_FLAG_SYSTEM);
+ if (IS_ERR(bo)) {
+ if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
+ bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))
+ KUNIT_FAIL(test, "Error creating bo: %pe\n", bo);
+ kfree(link);
+ failed = true;
+ break;
+ }
+ xe_bo_lock(bo, false);
+ xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
+
+ /*
+ * Allocate purgeable bos first, because if we do it the
+ * other way around, they may not be subject to swapping...
+ */
+ if (alloced < purgeable) {
+ xe_tt->purgeable = true;
+ bo->ttm.priority = 0;
+ } else {
+ int ret = shrink_test_fill_random(bo, &prng, link);
+
+ if (ret) {
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ KUNIT_FAIL(test, "Error filling bo with random data: %pe\n",
+ ERR_PTR(ret));
+ kfree(link);
+ failed = true;
+ break;
+ }
+ }
+
+ mem_type = bo->ttm.resource->mem_type;
+ xe_bo_unlock(bo);
+ link->bo = bo;
+ list_add_tail(&link->link, &bos);
+
+ if (mem_type != XE_PL_TT) {
+ KUNIT_FAIL(test, "Bo in incorrect memory type: %u\n",
+ bo->ttm.resource->mem_type);
+ failed = true;
+ }
+ cond_resched();
+ if (signal_pending(current))
+ break;
+ }
+
+ /*
+ * Read back and destroy bos. Reset the pseudo-random seed to get an
+ * identical pseudo-random number sequence for readback.
+ */
+ prandom_seed_state(&prng, rand_seed);
+ list_for_each_entry_safe(link, next, &bos, link) {
+ static struct ttm_operation_ctx ctx = {.interruptible = true};
+ struct xe_bo *bo = link->bo;
+ struct xe_ttm_tt *xe_tt;
+ int ret;
+
+ count++;
+ if (!signal_pending(current) && !failed) {
+ bool purgeable, intr = false;
+
+ xe_bo_lock(bo, NULL);
+
+ /* xe_tt->purgeable is cleared on validate. */
+ xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
+ purgeable = xe_tt->purgeable;
+ do {
+ ret = ttm_bo_validate(&bo->ttm, &tt_placement, &ctx);
+ if (ret == -EINTR)
+ intr = true;
+ } while (ret == -EINTR && !signal_pending(current));
+
+ if (!ret && !purgeable)
+ failed = shrink_test_verify(test, bo, count, &prng, link);
+
+ xe_bo_unlock(bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Validation failed: %pe\n",
+ ERR_PTR(ret));
+ failed = true;
+ } else if (intr) {
+ interrupted++;
+ } else {
+ successful++;
+ }
+ }
+ xe_bo_put(link->bo);
+ list_del(&link->link);
+ kfree(link);
+ }
+ kunit_info(test, "Readbacks interrupted: %u successful: %u\n",
+ interrupted, successful);
+
+ return 0;
+ }
+
+ static void xe_bo_shrink_kunit(struct kunit *test)
+ {
+ struct xe_device *xe = test->priv;
+
+ shrink_test_run_device(xe);
+ }
+
static struct kunit_case xe_bo_tests[] = {
KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param,
+ {.speed = KUNIT_SPEED_SLOW}),
{}
};
struct device *dev;
struct sg_table sgt;
struct sg_table *sg;
+ /** @purgeable: Whether the content of the pages of @ttm is purgeable. */
+ bool purgeable;
};
static int xe_tt_map_sg(struct ttm_tt *tt)
mem->bus.offset += vram->io_start;
mem->bus.is_iomem = true;
- #if !defined(CONFIG_X86)
+ #if !IS_ENABLED(CONFIG_X86)
mem->bus.caching = ttm_write_combined;
#endif
return 0;
if (xe_rpm_reclaim_safe(xe)) {
/*
* We might be called through swapout in the validation path of
- * another TTM device, so unconditionally acquire rpm here.
+ * another TTM device, so acquire rpm here.
*/
xe_pm_runtime_get(xe);
} else {
if (WARN_ON(!xe_bo_is_pinned(bo)))
return -EINVAL;
- if (WARN_ON(!xe_bo_is_vram(bo)))
- return -EINVAL;
+ if (!xe_bo_is_vram(bo))
+ return 0;
ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
if (ret)
}
}
- ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
+ ret = ttm_bo_populate(&bo->ttm, &ctx);
if (ret)
goto err_res_free;
.interruptible = false,
};
struct ttm_resource *new_mem;
+ struct ttm_place *place = &bo->placements[0];
int ret;
xe_bo_assert_held(bo);
if (WARN_ON(!xe_bo_is_pinned(bo)))
return -EINVAL;
- if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
+ if (WARN_ON(xe_bo_is_vram(bo)))
+ return -EINVAL;
+
+ if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
return -EINVAL;
+ if (!mem_type_is_vram(place->mem_type))
+ return 0;
+
ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
if (ret)
return ret;
- ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
+ ret = ttm_bo_populate(&bo->ttm, &ctx);
if (ret)
goto err_res_free;
}
}
+ static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx)
+ {
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+
+ if (ttm_bo->ttm) {
+ struct ttm_placement place = {};
+ int ret = ttm_bo_validate(ttm_bo, &place, ctx);
+
+ drm_WARN_ON(&xe->drm, ret);
+ }
+ }
+
+ static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
+ {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false
+ };
+
+ if (ttm_bo->ttm) {
+ struct xe_ttm_tt *xe_tt =
+ container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm);
+
+ if (xe_tt->purgeable)
+ xe_ttm_bo_purge(ttm_bo, &ctx);
+ }
+ }
+
const struct ttm_device_funcs xe_ttm_funcs = {
.ttm_tt_create = xe_ttm_tt_create,
.ttm_tt_populate = xe_ttm_tt_populate,
.release_notify = xe_ttm_bo_release_notify,
.eviction_valuable = ttm_bo_eviction_valuable,
.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
+ .swap_notify = xe_ttm_bo_swap_notify,
};
static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
__xe_bo_create_locked(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
- u16 cpu_caching, enum ttm_bo_type type, u32 flags)
+ u16 cpu_caching, enum ttm_bo_type type, u32 flags,
+ u64 alignment)
{
struct xe_bo *bo = NULL;
int err;
if (IS_ERR(bo))
return bo;
+ bo->min_align = alignment;
+
/*
* Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
* to ensure the shared resv doesn't disappear under the bo, the bo
xe_bo_create_locked_range(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags)
+ enum ttm_bo_type type, u32 flags, u64 alignment)
{
- return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
+ return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
+ flags, alignment);
}
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags)
{
- return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
+ return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
+ flags, 0);
}
struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
{
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
cpu_caching, ttm_bo_type_device,
- flags | XE_BO_FLAG_USER);
+ flags | XE_BO_FLAG_USER, 0);
if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo);
struct xe_vm *vm,
size_t size, u64 offset,
enum ttm_bo_type type, u32 flags)
+ {
+ return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
+ type, flags, 0);
+ }
+
+ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+ struct xe_tile *tile,
+ struct xe_vm *vm,
+ size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment)
{
struct xe_bo *bo;
int err;
flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
+ alignment);
if (IS_ERR(bo))
return bo;
int xe_bo_pin(struct xe_bo *bo)
{
+ struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
int err;
*/
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
-
if (mem_type_is_vram(place->mem_type)) {
xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
-
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
}
}
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ spin_unlock(&xe->pinned.lock);
+ }
+
ttm_bo_pin(&bo->ttm);
/*
void xe_bo_unpin(struct xe_bo *bo)
{
+ struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
xe_assert(xe, !bo->ttm.base.import_attach);
xe_assert(xe, xe_bo_is_pinned(bo));
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
-
- if (mem_type_is_vram(place->mem_type)) {
- spin_lock(&xe->pinned.lock);
- xe_assert(xe, !list_empty(&bo->pinned_link));
- list_del_init(&bo->pinned_link);
- spin_unlock(&xe->pinned.lock);
- }
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ xe_assert(xe, !list_empty(&bo->pinned_link));
+ list_del_init(&bo->pinned_link);
+ spin_unlock(&xe->pinned.lock);
}
-
ttm_bo_unpin(&bo->ttm);
}
#include "xe_device.h"
+ #include <linux/aperture.h>
#include <linux/delay.h>
+ #include <linux/fault-inject.h>
#include <linux/units.h>
- #include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client.h>
#include <drm/drm_gem_ttm_helper.h>
mutex_init(&xef->exec_queue.lock);
xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
- spin_lock(&xe->clients.lock);
- xe->clients.count++;
- spin_unlock(&xe->clients.lock);
-
file->driver_priv = xef;
kref_init(&xef->refcount);
static void xe_file_destroy(struct kref *ref)
{
struct xe_file *xef = container_of(ref, struct xe_file, refcount);
- struct xe_device *xe = xef->xe;
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
xa_destroy(&xef->vm.xa);
mutex_destroy(&xef->vm.lock);
- spin_lock(&xe->clients.lock);
- xe->clients.count--;
- spin_unlock(&xe->clients.lock);
-
xe_drm_client_put(xef->client);
kfree(xef->process_name);
kfree(xef);
xe_display_driver_set_hooks(&driver);
- err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ err = aperture_remove_conflicting_pci_devices(pdev, driver.name);
if (err)
return ERR_PTR(err);
xe->info.force_execlist = xe_modparam.force_execlist;
spin_lock_init(&xe->irq.lock);
- spin_lock_init(&xe->clients.lock);
init_waitqueue_head(&xe->ufence_wq);
err:
return ERR_PTR(err);
}
+ ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
+
+ static bool xe_driver_flr_disabled(struct xe_device *xe)
+ {
+ return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS;
+ }
/*
* The driver-initiated FLR is the highest level of reset that we can trigger
* if/when a new instance of i915 is bound to the device it will do a full
* re-init anyway.
*/
- static void xe_driver_flr(struct xe_device *xe)
+ static void __xe_driver_flr(struct xe_device *xe)
{
const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
int ret;
- if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
- drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
- return;
- }
-
drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
/*
* is still pending (unless the HW is totally dead), but better to be
* safe in case something unexpected happens
*/
- ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
return;
}
- xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+ xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
/* Trigger the actual Driver-FLR */
- xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR);
+ xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR);
/* Wait for hardware teardown to complete */
- ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
return;
}
/* Wait for hardware/firmware re-init to complete */
- ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
+ ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
}
/* Clear sticky completion status */
- xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+ xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
+ }
+
+ static void xe_driver_flr(struct xe_device *xe)
+ {
+ if (xe_driver_flr_disabled(xe)) {
+ drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
+ return;
+ }
+
+ __xe_driver_flr(xe);
}
static void xe_driver_flr_fini(void *arg)
return err;
}
- static bool verify_lmem_ready(struct xe_gt *gt)
+ static bool verify_lmem_ready(struct xe_device *xe)
{
- u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT;
+ u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT;
return !!val;
}
static int wait_for_lmem_ready(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
unsigned long timeout, start;
if (!IS_DGFX(xe))
if (IS_SRIOV_VF(xe))
return 0;
- if (verify_lmem_ready(gt))
+ if (verify_lmem_ready(xe))
return 0;
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
msleep(20);
- } while (!verify_lmem_ready(gt));
+ } while (!verify_lmem_ready(xe));
drm_dbg(&xe->drm, "lmem ready after %ums",
jiffies_to_msecs(jiffies - start));
return 0;
}
+ ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
static void update_device_info(struct xe_device *xe)
{
return 0;
}
- static int xe_device_set_has_flat_ccs(struct xe_device *xe)
+ static int probe_has_flat_ccs(struct xe_device *xe)
{
+ struct xe_gt *gt;
+ unsigned int fw_ref;
u32 reg;
- int err;
+ /* Always enabled/disabled, no runtime check to do */
if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
return 0;
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ gt = xe_root_mmio_gt(xe);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- return err;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
drm_dbg(&xe->drm,
"Flat CCS has been disabled in bios, May lead to performance impact");
- return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
int xe_device_probe(struct xe_device *xe)
err = xe_gt_init_early(gt);
if (err)
return err;
+
+ /*
+ * Only after this point can GT-specific MMIO operations
+ * (including things like communication with the GuC)
+ * be performed.
+ */
+ xe_gt_mmio_init(gt);
}
for_each_tile(tile, xe, id) {
err = xe_ggtt_init_early(tile->mem.ggtt);
if (err)
return err;
- if (IS_SRIOV_VF(xe)) {
- err = xe_memirq_init(&tile->sriov.vf.memirq);
- if (err)
- return err;
- }
+ err = xe_memirq_init(&tile->memirq);
+ if (err)
+ return err;
}
for_each_gt(gt, xe, id) {
if (err)
goto err;
- err = xe_device_set_has_flat_ccs(xe);
+ err = probe_has_flat_ccs(xe);
if (err)
goto err;
void xe_device_shutdown(struct xe_device *xe)
{
+ struct xe_gt *gt;
+ u8 id;
+
+ drm_dbg(&xe->drm, "Shutting down device\n");
+
+ if (xe_driver_flr_disabled(xe)) {
+ xe_display_pm_shutdown(xe);
+
+ xe_irq_suspend(xe);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_shutdown(gt);
+
+ xe_display_pm_shutdown_late(xe);
+ } else {
+ /* BOOM! */
+ __xe_driver_flr(xe);
+ }
}
/**
*/
void xe_device_wmb(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
-
wmb();
if (IS_DGFX(xe))
- xe_mmio_write32(gt, VF_CAP_REG, 0);
+ xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
}
/**
void xe_device_td_flush(struct xe_device *xe)
{
struct xe_gt *gt;
+ unsigned int fw_ref;
u8 id;
if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
if (xe_gt_is_media_type(gt))
continue;
- if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
- xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
+ xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
/*
* FIXME: We can likely do better here with our choice of
* timeout. Currently we just assume the worst case, i.e. 150us,
* scenario on current platforms if all cache entries are
* transient and need to be flushed..
*/
- if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
+ if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
150, NULL, false))
xe_gt_err_once(gt, "TD flush timeout\n");
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
}
void xe_device_l2_flush(struct xe_device *xe)
{
struct xe_gt *gt;
- int err;
+ unsigned int fw_ref;
gt = xe_root_mmio_gt(xe);
if (!XE_WA(gt, 16023588340))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
spin_lock(>->global_invl_lock);
- xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
+ xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1);
- if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
+ if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(>->global_invl_lock);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
for_each_gt(gt, xe, id) {
drm_printf(p, "GT id: %u\n", id);
+ drm_printf(p, "\tTile: %u\n", gt->tile->id);
drm_printf(p, "\tType: %s\n",
gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
drm_printf(p, "\tIP ver: %u.%u.%u\n",
#include <drm/drm_util.h>
#include "xe_device_types.h"
+ #include "xe_gt_types.h"
+ #include "xe_sriov.h"
static inline struct xe_device *to_xe_device(const struct drm_device *dev)
{
static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
{
- return >->mmio.fw;
+ return >->pm.fw;
}
void xe_device_assert_mem_access(struct xe_device *xe);
return xe->info.has_sriov;
}
+ static inline bool xe_device_has_msix(struct xe_device *xe)
+ {
+ /* TODO: change this when MSI-X support is fully integrated */
+ return false;
+ }
+
static inline bool xe_device_has_memirq(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) >= 1250;
}
+ static inline bool xe_device_uses_memirq(struct xe_device *xe)
+ {
+ return xe_device_has_memirq(xe) && (IS_SRIOV_VF(xe) || xe_device_has_msix(xe));
+ }
+
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p);
struct xe_file *xe_file_get(struct xe_file *xef);
void xe_file_put(struct xe_file *xef);
+/*
+ * Occasionally it is seen that the G2H worker starts running after a delay of more than
+ * a second even after being queued and activated by the Linux workqueue subsystem. This
+ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
+ * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS
+ * and this is beyond xe kmd.
+ *
+ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
+ */
+#define LNL_FLUSH_WORKQUEUE(wq__) \
+ flush_workqueue(wq__)
+#define LNL_FLUSH_WORK(wrk__) \
+ flush_work(wrk__)
+
#endif
#include "xe_devcoredump_types.h"
#include "xe_heci_gsc.h"
- #include "xe_gt_types.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
#include "xe_oa.h"
void __iomem *mapping;
};
+ /**
+ * struct xe_mmio - register mmio structure
+ *
+ * Represents an MMIO region that the CPU may use to access registers. A
+ * region may share its IO map with other regions (e.g., all GTs within a
+ * tile share the same map with their parent tile, but represent different
+ * subregions of the overall IO space).
+ */
+ struct xe_mmio {
+ /** @tile: Backpointer to tile, used for tracing */
+ struct xe_tile *tile;
+
+ /** @regs: Map used to access registers. */
+ void __iomem *regs;
+
+ /**
+ * @sriov_vf_gt: Backpointer to GT.
+ *
+ * This pointer is only set for GT MMIO regions and only when running
+ * as an SRIOV VF structure
+ */
+ struct xe_gt *sriov_vf_gt;
+
+ /**
+ * @regs_size: Length of the register region within the map.
+ *
+ * The size of the iomap set in *regs is generally larger than the
+ * register mmio space since it includes unused regions and/or
+ * non-register regions such as the GGTT PTEs.
+ */
+ size_t regs_size;
+
+ /** @adj_limit: adjust MMIO address if address is below this value */
+ u32 adj_limit;
+
+ /** @adj_offset: offset to add to MMIO address when adjusting */
+ u32 adj_offset;
+ };
+
/**
* struct xe_tile - hardware tile structure
*
* * 4MB-8MB: reserved
* * 8MB-16MB: global GTT
*/
- struct {
- /** @mmio.size: size of tile's MMIO space */
- size_t size;
-
- /** @mmio.regs: pointer to tile's MMIO space (starting with registers) */
- void __iomem *regs;
- } mmio;
+ struct xe_mmio mmio;
/**
* @mmio_ext: MMIO-extension info for a tile.
*
* Each tile has its own additional 256MB (28-bit) MMIO-extension space.
*/
- struct {
- /** @mmio_ext.size: size of tile's additional MMIO-extension space */
- size_t size;
-
- /** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */
- void __iomem *regs;
- } mmio_ext;
+ struct xe_mmio mmio_ext;
/** @mem: memory management info for tile */
struct {
struct xe_lmtt lmtt;
} pf;
struct {
- /** @sriov.vf.memirq: Memory Based Interrupts. */
- struct xe_memirq memirq;
-
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
struct xe_ggtt_node *ggtt_balloon[2];
} vf;
} sriov;
+ /** @memirq: Memory Based Interrupts. */
+ struct xe_memirq memirq;
+
/** @pcode: tile's PCODE */
struct {
/** @pcode.lock: protecting tile's PCODE mailbox data */
struct workqueue_struct *wq;
} sriov;
- /** @clients: drm clients info */
- struct {
- /** @clients.lock: Protects drm clients info */
- spinlock_t lock;
-
- /** @clients.count: number of drm clients */
- u64 count;
- } clients;
-
/** @usm: unified memory state */
struct {
/** @usm.asid: convert a ASID to VM */
}
}
- xe_mmio_write32(gt, CCS_MODE, mode);
+ /*
+ * Mask bits need to be set for the register. Though only Xe2+
+ * platforms require setting of mask bits, it won't harm for older
+ * platforms as these bits are unused there.
+ */
+ mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
+ xe_mmio_write32(>->mmio, CCS_MODE, mode);
xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
mode, config, num_engines, num_slices);
}
/* CCS mode can only be updated when there are no drm clients */
- spin_lock(&xe->clients.lock);
- if (xe->clients.count) {
- spin_unlock(&xe->clients.lock);
+ mutex_lock(&xe->drm.filelist_mutex);
+ if (!list_empty(&xe->drm.filelist)) {
+ mutex_unlock(&xe->drm.filelist_mutex);
+ xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n");
return -EBUSY;
}
xe_gt_reset_async(gt);
}
- spin_unlock(&xe->clients.lock);
+ mutex_unlock(&xe->drm.filelist_mutex);
return count;
}
#include "xe_ttm_vram_mgr.h"
#include "xe_wopcm.h"
+ #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
+
/*
* Return: number of KLVs that were successfully parsed and saved,
* negative error code on failure.
}
/* Return: number of configuration dwords written */
- static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
+ static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
if (xe_ggtt_node_allocated(config->ggtt_region)) {
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
+ cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
+ cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
+ }
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
}
/* Return: number of configuration dwords written */
- static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
+ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
- n += encode_config_ggtt(cfg, config);
+ n += encode_config_ggtt(cfg, config, details);
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
- cfg[n++] = config->begin_ctx;
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
+ cfg[n++] = config->begin_ctx;
+ }
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
cfg[n++] = config->num_ctxs;
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
- cfg[n++] = config->begin_db;
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
+ cfg[n++] = config->begin_db;
+ }
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
cfg[n++] = config->num_dbs;
if (!cfg)
return -ENOMEM;
- num_dwords = encode_config(cfg, config);
+ num_dwords = encode_config(cfg, config, true);
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
if (xe_gt_is_media_type(gt)) {
struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
/* media-GT will never include a GGTT config */
- xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
+ xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
/* the GGTT config must be taken from the primary-GT instead */
- num_dwords += encode_config_ggtt(cfg + num_dwords, other);
+ num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
}
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
* the xe_ggtt_clear() called by below xe_ggtt_remove_node().
*/
xe_ggtt_node_remove(node, false);
+ } else {
+ xe_ggtt_node_fini(node);
}
}
config->ggtt_region = node;
return 0;
err:
- xe_ggtt_node_fini(node);
+ pf_release_ggtt(tile, node);
return err;
}
valid_all = valid_all && valid_lmem;
}
- return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA;
+ return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
}
/**
return empty;
}
+ /**
+ * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @buf: the buffer to save a config to (or NULL if query the buf size)
+ * @size: the size of the buffer (or 0 if query the buf size)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: mininum size of the buffer or the number of bytes saved,
+ * or a negative error code on failure.
+ */
+ ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
+ {
+ struct xe_gt_sriov_config *config;
+ ssize_t ret;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !(!buf ^ !size));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ ret = pf_validate_vf_config(gt, vfid);
+ if (!size) {
+ ret = ret ? 0 : SZ_4K;
+ } else if (!ret) {
+ if (size < SZ_4K) {
+ ret = -ENOBUFS;
+ } else {
+ config = pf_pick_vf_config(gt, vfid);
+ ret = encode_config(buf, config, false) * sizeof(u32);
+ }
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return ret;
+ }
+
+ static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
+ u32 key, u32 len, const u32 *value)
+ {
+ switch (key) {
+ case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
+ if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_ctxs(gt, vfid, value[0]);
+
+ case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
+ if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_dbs(gt, vfid, value[0]);
+
+ case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
+ if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
+ return -EBADMSG;
+ return pf_provision_exec_quantum(gt, vfid, value[0]);
+
+ case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
+ if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
+ return -EBADMSG;
+ return pf_provision_preempt_timeout(gt, vfid, value[0]);
+
+ /* auto-generate case statements */
+ #define define_threshold_key_to_provision_case(TAG, ...) \
+ case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG): \
+ BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u); \
+ if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG)) \
+ return -EBADMSG; \
+ return pf_provision_threshold(gt, vfid, \
+ MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG), \
+ value[0]);
+
+ MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
+ #undef define_threshold_key_to_provision_case
+ }
+
+ if (xe_gt_is_media_type(gt))
+ return -EKEYREJECTED;
+
+ switch (key) {
+ case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
+ if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
+
+ case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
+ if (!IS_DGFX(gt_to_xe(gt)))
+ return -EKEYREJECTED;
+ if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
+ }
+
+ return -EKEYREJECTED;
+ }
+
+ static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
+ const u32 *klvs, size_t num_dwords)
+ {
+ int err;
+
+ while (num_dwords >= GUC_KLV_LEN_MIN) {
+ u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
+ u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
+
+ klvs += GUC_KLV_LEN_MIN;
+ num_dwords -= GUC_KLV_LEN_MIN;
+
+ if (num_dwords < len)
+ err = -EBADMSG;
+ else
+ err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
+
+ if (err) {
+ xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
+ return err;
+ }
+
+ klvs += len;
+ num_dwords -= len;
+ }
+
+ return pf_validate_vf_config(gt, vfid);
+ }
+
+ /**
+ * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @buf: the buffer with config data
+ * @size: the size of the config data
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
+ const void *buf, size_t size)
+ {
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+
+ if (!size)
+ return -ENODATA;
+
+ if (size % sizeof(u32))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ struct drm_printer p = xe_gt_info_printer(gt);
+
+ drm_printf(&p, "restoring VF%u config:\n", vfid);
+ xe_guc_klv_print(buf, size / sizeof(u32), &p);
+ }
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err) {
+ pf_release_vf_config(gt, vfid);
+ err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return err;
+ }
+
/**
* xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
* @gt: the &xe_gt
return 0;
}
+ /**
+ * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print LMEM allocations across all VFs.
+ * VFs without LMEM allocation are skipped.
+ *
+ * This function can only be called on PF.
+ * Return: 0 on success or a negative error code on failure.
+ */
+ int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
+ {
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+ const struct xe_gt_sriov_config *config;
+ char buf[10];
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 1; n <= total_vfs; n++) {
+ config = >->sriov.pf.vfs[n].config;
+ if (!config->lmem_obj)
+ continue;
+
+ string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
+ drm_printf(p, "VF%u:\t%zu\t(%s)\n",
+ n, config->lmem_obj->size, buf);
+ }
+
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return 0;
+ }
+
/**
* xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
* @gt: the &xe_gt
struct xe_device *xe = gt_to_xe(gt);
struct xe_gt_tlb_invalidation_fence *fence, *next;
+ LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker);
+
spin_lock_irq(>->tlb_invalidation.pending_lock);
list_for_each_entry_safe(fence, next,
>->tlb_invalidation.pending_fences, link) {
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref;
if (xe_guc_ct_enabled(>->uc.guc.ct) &&
gt->uc.guc.submission_state.enabled) {
xe_gt_tlb_invalidation_fence_wait(&fence);
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
+ struct xe_mmio *mmio = >->mmio;
+
if (IS_SRIOV_VF(xe))
return 0;
- xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
PVC_GUC_TLB_INV_DESC1_INVALIDATE);
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
PVC_GUC_TLB_INV_DESC0_VALID);
} else {
- xe_mmio_write32(gt, GUC_TLB_INV_CR,
+ xe_mmio_write32(mmio, GUC_TLB_INV_CR,
GUC_TLB_INV_CR_INVALIDATE);
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
return 0;
#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
+ #include <linux/fault-inject.h>
#include <kunit/static_stub.h>
#include "abi/guc_actions_sriov_abi.h"
#include "abi/guc_klvs_abi.h"
#include "xe_bo.h"
+ #include "xe_devcoredump.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
+ #include "xe_guc_log.h"
#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_trace_guc.h"
+ #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ enum {
+ /* Internal states, not error conditions */
+ CT_DEAD_STATE_REARM, /* 0x0001 */
+ CT_DEAD_STATE_CAPTURE, /* 0x0002 */
+
+ /* Error conditions */
+ CT_DEAD_SETUP, /* 0x0004 */
+ CT_DEAD_H2G_WRITE, /* 0x0008 */
+ CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */
+ CT_DEAD_G2H_READ, /* 0x0020 */
+ CT_DEAD_G2H_RECV, /* 0x0040 */
+ CT_DEAD_G2H_RELEASE, /* 0x0080 */
+ CT_DEAD_DEADLOCK, /* 0x0100 */
+ CT_DEAD_PROCESS_FAILED, /* 0x0200 */
+ CT_DEAD_FAST_G2H, /* 0x0400 */
+ CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */
+ CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
+ CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
+ CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
+ };
+
+ static void ct_dead_worker_func(struct work_struct *w);
+ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
+
+ #define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
+ #else
+ #define CT_DEAD(ct, ctb, reason) \
+ do { \
+ struct guc_ctb *_ctb = (ctb); \
+ if (_ctb) \
+ _ctb->info.broken = true; \
+ } while (0)
+ #endif
+
/* Used when a CT send wants to block and / or receive data */
struct g2h_fence {
u32 *response_buffer;
xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
- ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0);
+ ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
if (!ct->g2h_wq)
return -ENOMEM;
spin_lock_init(&ct->fast_lock);
xa_init(&ct->fence_lookup);
INIT_WORK(&ct->g2h_worker, g2h_worker_func);
- INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
+ INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
+ #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ spin_lock_init(&ct->dead.lock);
+ INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
+ #endif
init_waitqueue_head(&ct->wq);
init_waitqueue_head(&ct->g2h_fence_wq);
ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
+ ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
#define desc_read(xe_, guc_ctb__, field_) \
xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size);
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
if (ct_needs_safe_mode(ct))
ct_enter_safe_mode(ct);
+ #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ /*
+ * The CT has now been reset so the dumper can be re-armed
+ * after any existing dead state has been dumped.
+ */
+ spin_lock_irq(&ct->dead.lock);
+ if (ct->dead.reason)
+ ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
+ spin_unlock_irq(&ct->dead.lock);
+ #endif
+
return 0;
err_out:
xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
+ CT_DEAD(ct, NULL, SETUP);
return err;
}
if (cmd_len > h2g->info.space) {
h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
+
+ if (h2g->info.head > h2g->info.size) {
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 desc_status = desc_read(xe, h2g, status);
+
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+
+ xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
+ h2g->info.head, h2g->info.size);
+ CT_DEAD(ct, h2g, H2G_HAS_ROOM);
+ return false;
+ }
+
h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
h2g->info.size) -
h2g->info.resv_space;
static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
{
+ bool bad = false;
+
lockdep_assert_held(&ct->fast_lock);
- xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <=
- ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
- xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding);
+
+ bad = ct->ctbs.g2h.info.space + g2h_len >
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
+ bad |= !ct->g2h_outstanding;
+
+ if (bad) {
+ xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
+ ct->ctbs.g2h.info.space, g2h_len,
+ ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
+ ct->ctbs.g2h.info.space + g2h_len,
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
+ ct->g2h_outstanding);
+ CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
+ return;
+ }
ct->ctbs.g2h.info.space += g2h_len;
if (!--ct->g2h_outstanding)
u32 full_len;
struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
tail * sizeof(u32));
+ u32 desc_status;
full_len = len + GUC_CTB_HDR_LEN;
lockdep_assert_held(&ct->lock);
xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
- xe_gt_assert(gt, tail <= h2g->info.size);
+
+ desc_status = desc_read(xe, h2g, status);
+ if (desc_status) {
+ xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
+ goto corrupted;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ u32 desc_tail = desc_read(xe, h2g, tail);
+ u32 desc_head = desc_read(xe, h2g, head);
+
+ if (tail != desc_tail) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
+ xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
+ goto corrupted;
+ }
+
+ if (tail > h2g->info.size) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
+ tail, h2g->info.size);
+ goto corrupted;
+ }
+
+ if (desc_head >= h2g->info.size) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
+ desc_head, h2g->info.size);
+ goto corrupted;
+ }
+ }
/* Command will wrap, zero fill (NOPs), return and check credits again */
if (tail + full_len > h2g->info.size) {
desc_read(xe, h2g, head), h2g->info.tail);
return 0;
+
+ corrupted:
+ CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
+ return -EPIPE;
}
/*
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
- struct drm_printer p = xe_gt_info_printer(gt);
unsigned int sleep_period_ms = 1;
int ret;
goto broken;
#undef g2h_avail
- if (dequeue_one_g2h(ct) < 0)
+ ret = dequeue_one_g2h(ct);
+ if (ret < 0) {
+ if (ret != -ECANCELED)
+ xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
+ ERR_PTR(ret));
goto broken;
+ }
goto try_again;
}
broken:
xe_gt_err(gt, "No forward process on H2G, reset required\n");
- xe_guc_ct_print(ct, &p, true);
- ct->ctbs.h2g.info.broken = true;
+ CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
return -EDEADLK;
}
#define ct_alive(ct) \
(xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
!ct->ctbs.g2h.info.broken)
- if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
+ if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
return false;
#undef ct_alive
goto retry_same_fence;
if (!g2h_fence_needs_alloc(&g2h_fence))
- xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
+ xa_erase(&ct->fence_lookup, g2h_fence.seqno);
return ret;
}
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
- /*
- * Occasionally it is seen that the G2H worker starts running after a delay of more than
- * a second even after being queued and activated by the Linux workqueue subsystem. This
- * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
- * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
- * and this is beyond xe kmd.
- *
- * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
- */
if (!ret) {
- flush_work(&ct->g2h_worker);
+ LNL_FLUSH_WORK(&ct->g2h_worker);
if (g2h_fence.done) {
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
g2h_fence.seqno, action[0]);
if (!ret) {
xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
- xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
+ xa_erase(&ct->fence_lookup, g2h_fence.seqno);
mutex_unlock(&ct->lock);
return -ETIME;
}
else
xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
type, fence);
+ CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
}
g2h_fence = xa_erase(&ct->fence_lookup, fence);
if (unlikely(!g2h_fence)) {
/* Don't tear down channel, as send could've timed out */
+ /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
return 0;
if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
origin);
- ct->ctbs.g2h.info.broken = true;
+ CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
return -EPROTO;
}
default:
xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
type);
- ct->ctbs.g2h.info.broken = true;
+ CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
ret = -EOPNOTSUPP;
}
/* Selftest only at the moment */
break;
case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
+ ret = xe_guc_error_capture_handler(guc, payload, adj_len);
+ break;
case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
/* FIXME: Handle this */
break;
xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
- if (ret)
+ if (ret) {
xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
action, ERR_PTR(ret));
+ CT_DEAD(ct, NULL, PROCESS_FAILED);
+ }
return 0;
}
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
struct guc_ctb *g2h = &ct->ctbs.g2h;
- u32 tail, head, len;
+ u32 tail, head, len, desc_status;
s32 avail;
u32 action;
u32 *hxg;
xe_gt_assert(gt, xe_guc_ct_enabled(ct));
+ desc_status = desc_read(xe, g2h, status);
+ if (desc_status) {
+ if (desc_status & GUC_CTB_STATUS_DISABLED) {
+ /*
+ * Potentially valid if a CLIENT_RESET request resulted in
+ * contexts/engines being reset. But should never happen as
+ * no contexts should be active when CLIENT_RESET is sent.
+ */
+ xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
+ desc_status &= ~GUC_CTB_STATUS_DISABLED;
+ }
+
+ if (desc_status) {
+ xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
+ goto corrupted;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ u32 desc_tail = desc_read(xe, g2h, tail);
+ /*
+ u32 desc_head = desc_read(xe, g2h, head);
+
+ * info.head and desc_head are updated back-to-back at the end of
+ * this function and nowhere else. Hence, they cannot be different
+ * unless two g2h_read calls are running concurrently. Which is not
+ * possible because it is guarded by ct->fast_lock. And yet, some
+ * discrete platforms are reguarly hitting this error :(.
+ *
+ * desc_head rolling backwards shouldn't cause any noticeable
+ * problems - just a delay in GuC being allowed to proceed past that
+ * point in the queue. So for now, just disable the error until it
+ * can be root caused.
+ *
+ if (g2h->info.head != desc_head) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
+ xe_gt_err(gt, "CT read: head was modified %u != %u\n",
+ desc_head, g2h->info.head);
+ goto corrupted;
+ }
+ */
+
+ if (g2h->info.head > g2h->info.size) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
+ g2h->info.head, g2h->info.size);
+ goto corrupted;
+ }
+
+ if (desc_tail >= g2h->info.size) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
+ desc_tail, g2h->info.size);
+ goto corrupted;
+ }
+ }
+
/* Calculate DW available to read */
tail = desc_read(xe, g2h, tail);
avail = tail - g2h->info.head;
if (len > avail) {
xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
avail, len);
- g2h->info.broken = true;
-
- return -EPROTO;
+ goto corrupted;
}
head = (g2h->info.head + 1) % g2h->info.size;
action, len, g2h->info.head, tail);
return len;
+
+ corrupted:
+ CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
+ return -EPROTO;
}
static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
xe_gt_warn(gt, "NOT_POSSIBLE");
}
- if (ret)
+ if (ret) {
xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
action, ERR_PTR(ret));
+ CT_DEAD(ct, NULL, FAST_G2H);
+ }
}
/**
static void receive_g2h(struct xe_guc_ct *ct)
{
- struct xe_gt *gt = ct_to_gt(ct);
bool ongoing;
int ret;
mutex_unlock(&ct->lock);
if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
- struct drm_printer p = xe_gt_info_printer(gt);
-
- xe_guc_ct_print(ct, &p, false);
+ xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
+ CT_DEAD(ct, NULL, G2H_RECV);
kick_reset(ct);
}
} while (ret == 1);
receive_g2h(ct);
}
- static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
- struct guc_ctb_snapshot *snapshot,
- bool atomic)
+ static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
+ bool want_ctb)
{
- u32 head, tail;
-
- xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
- sizeof(struct guc_ct_buffer_desc));
- memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
+ struct xe_guc_ct_snapshot *snapshot;
- snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32),
- atomic ? GFP_ATOMIC : GFP_KERNEL);
+ snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot)
+ return NULL;
- if (!snapshot->cmds) {
- drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n");
- return;
+ if (ct->bo && want_ctb) {
+ snapshot->ctb_size = ct->bo->size;
+ snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
}
- head = snapshot->desc.head;
- tail = snapshot->desc.tail;
-
- if (head != tail) {
- struct iosys_map map =
- IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32));
-
- while (head != tail) {
- snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32);
- ++head;
- if (head == ctb->info.size) {
- head = 0;
- map = ctb->cmds;
- } else {
- iosys_map_incr(&map, sizeof(u32));
- }
- }
- }
+ return snapshot;
+ }
+
+ static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
+ struct guc_ctb_snapshot *snapshot)
+ {
+ xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
+ sizeof(struct guc_ct_buffer_desc));
+ memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
}
static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
struct drm_printer *p)
{
- u32 head, tail;
-
drm_printf(p, "\tsize: %d\n", snapshot->info.size);
drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
drm_printf(p, "\thead: %d\n", snapshot->info.head);
drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
+ }
- if (!snapshot->cmds)
- return;
+ static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
+ bool want_ctb)
+ {
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_guc_ct_snapshot *snapshot;
- head = snapshot->desc.head;
- tail = snapshot->desc.tail;
+ snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
+ if (!snapshot) {
+ xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
+ return NULL;
+ }
- while (head != tail) {
- drm_printf(p, "\tcmd[%d]: 0x%08x\n", head,
- snapshot->cmds[head]);
- ++head;
- if (head == snapshot->info.size)
- head = 0;
+ if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
+ snapshot->ct_enabled = true;
+ snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
}
- }
- static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot)
- {
- kfree(snapshot->cmds);
+ if (ct->bo && snapshot->ctb)
+ xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
+
+ return snapshot;
}
/**
* xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
* @ct: GuC CT object.
- * @atomic: Boolean to indicate if this is called from atomic context like
- * reset or CTB handler or from some regular path like debugfs.
*
* This can be printed out in a later stage like during dev_coredump
- * analysis.
+ * analysis. This is safe to be called during atomic context.
*
* Returns: a GuC CT snapshot object that must be freed by the caller
* by using `xe_guc_ct_snapshot_free`.
*/
- struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
- bool atomic)
+ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
{
- struct xe_device *xe = ct_to_xe(ct);
- struct xe_guc_ct_snapshot *snapshot;
-
- snapshot = kzalloc(sizeof(*snapshot),
- atomic ? GFP_ATOMIC : GFP_KERNEL);
-
- if (!snapshot) {
- drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n");
- return NULL;
- }
-
- if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
- snapshot->ct_enabled = true;
- snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
- guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
- &snapshot->h2g, atomic);
- guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h,
- &snapshot->g2h, atomic);
- }
-
- return snapshot;
+ return guc_ct_snapshot_capture(ct, true, true);
}
/**
drm_puts(p, "H2G CTB (all sizes in DW):\n");
guc_ctb_snapshot_print(&snapshot->h2g, p);
- drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
+ drm_puts(p, "G2H CTB (all sizes in DW):\n");
guc_ctb_snapshot_print(&snapshot->g2h, p);
-
drm_printf(p, "\tg2h outstanding: %d\n",
snapshot->g2h_outstanding);
+
+ if (snapshot->ctb)
+ xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size);
} else {
drm_puts(p, "CT disabled\n");
}
if (!snapshot)
return;
- guc_ctb_snapshot_free(&snapshot->h2g);
- guc_ctb_snapshot_free(&snapshot->g2h);
+ kfree(snapshot->ctb);
kfree(snapshot);
}
* xe_guc_ct_print - GuC CT Print.
* @ct: GuC CT.
* @p: drm_printer where it will be printed out.
- * @atomic: Boolean to indicate if this is called from atomic context like
- * reset or CTB handler or from some regular path like debugfs.
+ * @want_ctb: Should the full CTB content be dumped (vs just the headers)
*
- * This function quickly capture a snapshot and immediately print it out.
+ * This function will quickly capture a snapshot of the CT state
+ * and immediately print it out.
*/
- void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
+ void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
{
struct xe_guc_ct_snapshot *snapshot;
- snapshot = xe_guc_ct_snapshot_capture(ct, atomic);
+ snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
xe_guc_ct_snapshot_print(snapshot, p);
xe_guc_ct_snapshot_free(snapshot);
}
+
+ #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
+ {
+ struct xe_guc_log_snapshot *snapshot_log;
+ struct xe_guc_ct_snapshot *snapshot_ct;
+ struct xe_guc *guc = ct_to_guc(ct);
+ unsigned long flags;
+ bool have_capture;
+
+ if (ctb)
+ ctb->info.broken = true;
+
+ /* Ignore further errors after the first dump until a reset */
+ if (ct->dead.reported)
+ return;
+
+ spin_lock_irqsave(&ct->dead.lock, flags);
+
+ /* And only capture one dump at a time */
+ have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
+ ct->dead.reason |= (1 << reason_code) |
+ (1 << CT_DEAD_STATE_CAPTURE);
+
+ spin_unlock_irqrestore(&ct->dead.lock, flags);
+
+ if (have_capture)
+ return;
+
+ snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
+ snapshot_ct = xe_guc_ct_snapshot_capture((ct));
+
+ spin_lock_irqsave(&ct->dead.lock, flags);
+
+ if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
+ xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
+ xe_guc_log_snapshot_free(snapshot_log);
+ xe_guc_ct_snapshot_free(snapshot_ct);
+ } else {
+ ct->dead.snapshot_log = snapshot_log;
+ ct->dead.snapshot_ct = snapshot_ct;
+ }
+
+ spin_unlock_irqrestore(&ct->dead.lock, flags);
+
+ queue_work(system_unbound_wq, &(ct)->dead.worker);
+ }
+
+ static void ct_dead_print(struct xe_dead_ct *dead)
+ {
+ struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ static int g_count;
+ struct drm_printer ip = xe_gt_info_printer(gt);
+ struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
+
+ if (!dead->reason) {
+ xe_gt_err(gt, "CTB is dead for no reason!?\n");
+ return;
+ }
+
+ drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason);
+
+ /* Can't generate a genuine core dump at this point, so just do the good bits */
+ drm_puts(&lp, "**** Xe Device Coredump ****\n");
+ xe_device_snapshot_print(xe, &lp);
+
+ drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
+ drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
+
+ drm_puts(&lp, "**** GuC Log ****\n");
+ xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
+
+ drm_puts(&lp, "**** GuC CT ****\n");
+ xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
+
+ drm_puts(&lp, "Done.\n");
+ }
+
+ static void ct_dead_worker_func(struct work_struct *w)
+ {
+ struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
+
+ if (!ct->dead.reported) {
+ ct->dead.reported = true;
+ ct_dead_print(&ct->dead);
+ }
+
+ spin_lock_irq(&ct->dead.lock);
+
+ xe_guc_log_snapshot_free(ct->dead.snapshot_log);
+ ct->dead.snapshot_log = NULL;
+ xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
+ ct->dead.snapshot_ct = NULL;
+
+ if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
+ /* A reset has occurred so re-arm the error reporting */
+ ct->dead.reason = 0;
+ ct->dead.reported = false;
+ }
+
+ spin_unlock_irq(&ct->dead.lock);
+ }
+ #endif
#include "xe_gt_clock.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
+ #include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
+ struct dma_fence *fence = NULL;
bool lr = xe_exec_queue_is_lr(q);
xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
if (lr) {
xe_sched_job_set_error(job, -EOPNOTSUPP);
- return NULL;
- } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
- return job->fence;
+ dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */
} else {
- return dma_fence_get(job->fence);
+ fence = job->fence;
}
+
+ return fence;
}
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
- xe_exec_queue_update_run_ticks(job->q);
-
trace_xe_sched_job_free(job);
xe_sched_job_put(job);
}
- static int guc_read_stopped(struct xe_guc *guc)
+ int xe_guc_read_stopped(struct xe_guc *guc)
{
return atomic_read(&guc->submission_state.stopped);
}
set_min_preemption_timeout(guc, q);
smp_rmb();
ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
- guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc), HZ * 5);
if (!ret) {
struct xe_gpu_scheduler *sched = &q->guc->sched;
*/
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc), HZ * 5);
if (!ret) {
drm_warn(&xe->drm, "Schedule disable failed to respond");
xe_sched_submission_start(sched);
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_enable(q) ||
- guc_read_stopped(guc), HZ * 5);
- if (!ret || guc_read_stopped(guc)) {
+ xe_guc_read_stopped(guc), HZ * 5);
+ if (!ret || xe_guc_read_stopped(guc)) {
xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
set_exec_queue_banned(q);
xe_gt_reset_async(q->gt);
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc *guc = exec_queue_to_guc(q);
const char *process_name = "no process";
+ struct xe_device *xe = guc_to_xe(guc);
+ unsigned int fw_ref;
int err = -ETIME;
pid_t pid = -1;
int i = 0;
exec_queue_killed_or_banned_or_wedged(q) ||
exec_queue_destroyed(q);
+ /*
+ * If devcoredump not captured and GuC capture for the job is not ready
+ * do manual capture first and decide later if we need to use it
+ */
+ if (!exec_queue_killed(q) && !xe->devcoredump.captured &&
+ !xe_guc_capture_get_matching_and_lock(job)) {
+ /* take force wake before engine register manual capture */
+ fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n");
+
+ xe_engine_snapshot_capture_for_job(job);
+
+ xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
+ }
+
/*
* XXX: Sampling timeout doesn't work in wedged mode as we have to
* modify scheduling state to read timestamp. We could read the
*/
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_enable(q) ||
- guc_read_stopped(guc), HZ * 5);
- if (!ret || guc_read_stopped(guc))
+ xe_guc_read_stopped(guc), HZ * 5);
+ if (!ret || xe_guc_read_stopped(guc))
goto trigger_reset;
/*
smp_rmb();
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- guc_read_stopped(guc), HZ * 5);
- if (!ret || guc_read_stopped(guc)) {
+ xe_guc_read_stopped(guc), HZ * 5);
+ if (!ret || xe_guc_read_stopped(guc)) {
trigger_reset:
if (!ret)
xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond");
struct xe_device *xe = guc_to_xe(guc);
xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
- guc_read_stopped(guc));
+ xe_guc_read_stopped(guc));
xe_assert(xe, q->guc->suspend_pending);
__suspend_fence_signal(q);
if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
exec_queue_enabled(q)) {
wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
- guc_read_stopped(guc));
+ xe_guc_read_stopped(guc));
- if (!guc_read_stopped(guc)) {
+ if (!xe_guc_read_stopped(guc)) {
s64 since_resume_ms =
ktime_ms_delta(ktime_get(),
q->guc->resume_time);
q->entity = &ge->entity;
- if (guc_read_stopped(guc))
+ if (xe_guc_read_stopped(guc))
xe_sched_stop(sched);
mutex_unlock(&guc->submission_state.lock);
ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
!READ_ONCE(q->guc->suspend_pending) ||
exec_queue_killed(q) ||
- guc_read_stopped(guc),
+ xe_guc_read_stopped(guc),
HZ * 5);
if (!ret) {
void xe_guc_submit_reset_wait(struct xe_guc *guc)
{
wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
- !guc_read_stopped(guc));
+ !xe_guc_read_stopped(guc));
}
void xe_guc_submit_stop(struct xe_guc *guc)
unsigned long index;
struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, guc_read_stopped(guc) == 1);
+ xe_assert(xe, xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
unsigned long index;
struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, guc_read_stopped(guc) == 1);
+ xe_assert(xe, xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
- /* FIXME: Do error capture, most likely async */
-
trace_xe_exec_queue_reset(q);
/*
return 0;
}
+ /*
+ * xe_guc_error_capture_handler - Handler of GuC captured message
+ * @guc: The GuC object
+ * @msg: Point to the message
+ * @len: The message length
+ *
+ * When GuC captured data is ready, GuC will send message
+ * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be
+ * called 1st to check status before process the data comes with the message.
+ *
+ * Returns: error code. 0 if success
+ */
+ int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len)
+ {
+ u32 status;
+
+ if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN)) {
+ xe_gt_dbg(guc_to_gt(guc), "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ status = msg[0] & XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
+ if (status == XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
+ xe_gt_warn(guc_to_gt(guc), "G2H-Error capture no space");
+
+ xe_guc_capture_process(guc);
+
+ return 0;
+ }
+
int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
u32 len)
{
if (!snapshot)
return;
- drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
+ drm_printf(p, "GuC ID: %d\n", snapshot->guc.id);
drm_printf(p, "\tName: %s\n", snapshot->name);
drm_printf(p, "\tClass: %d\n", snapshot->class);
drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
#include "xe_pm.h"
#include "xe_sched_job.h"
#include "xe_sriov.h"
+ #include "xe_sync.h"
#define DEFAULT_POLL_FREQUENCY_HZ 200
#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
#define XE_OA_UNIT_INVALID U32_MAX
+ enum xe_oa_submit_deps {
+ XE_OA_SUBMIT_NO_DEPS,
+ XE_OA_SUBMIT_ADD_DEPS,
+ };
+
+ enum xe_oa_user_extn_from {
+ XE_OA_USER_EXTN_FROM_OPEN,
+ XE_OA_USER_EXTN_FROM_CONFIG,
+ };
+
struct xe_oa_reg {
struct xe_reg addr;
u32 value;
};
struct xe_oa_open_param {
+ struct xe_file *xef;
u32 oa_unit_id;
bool sample;
u32 metric_set;
struct xe_exec_queue *exec_q;
struct xe_hw_engine *hwe;
bool no_preempt;
+ struct drm_xe_sync __user *syncs_user;
+ int num_syncs;
+ struct xe_sync_entry *syncs;
};
struct xe_oa_config_bo {
struct xe_bb *bb;
};
+ struct xe_oa_fence {
+ /* @base: dma fence base */
+ struct dma_fence base;
+ /* @lock: lock for the fence */
+ spinlock_t lock;
+ /* @work: work to signal @base */
+ struct delayed_work work;
+ /* @cb: callback to schedule @work */
+ struct dma_fence_cb cb;
+ };
+
#define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x
static const struct xe_oa_format oa_formats[] = {
return oa_config;
}
- static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo)
+ static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *last_fence)
{
xe_oa_config_put(oa_bo->oa_config);
- xe_bb_free(oa_bo->bb, NULL);
+ xe_bb_free(oa_bo->bb, last_fence);
kfree(oa_bo);
}
static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream)
{
- return xe_mmio_read32(stream->gt, __oa_regs(stream)->oa_tail_ptr) &
+ return xe_mmio_read32(&stream->gt->mmio, __oa_regs(stream)->oa_tail_ptr) &
OAG_OATAILPTR_MASK;
}
struct xe_reg oaheadptr = __oa_regs(stream)->oa_head_ptr;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- xe_mmio_write32(stream->gt, oaheadptr,
+ xe_mmio_write32(&stream->gt->mmio, oaheadptr,
(head + gtt_offset) & OAG_OAHEADPTR_MASK);
stream->oa_buffer.head = head;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
{
+ struct xe_mmio *mmio = &stream->gt->mmio;
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
u32 oa_buf = gtt_offset | OABUFFER_SIZE_16M | OAG_OABUFFER_MEMORY_SELECT;
unsigned long flags;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_status, 0);
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_head_ptr,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_status, 0);
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_head_ptr,
gtt_offset & OAG_OAHEADPTR_MASK);
stream->oa_buffer.head = 0;
/*
* PRM says: "This MMIO must be set before the OATAILPTR register and after the
* OAHEADPTR register. This is to enable proper functionality of the overflow bit".
*/
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_buffer, oa_buf);
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_tail_ptr,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_buffer, oa_buf);
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_tail_ptr,
gtt_offset & OAG_OATAILPTR_MASK);
/* Mark that we need updated tail pointer to read from */
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
- xe_mmio_write32(stream->gt, regs->oa_ctrl, val);
+ xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val);
}
static void xe_oa_disable(struct xe_oa_stream *stream)
{
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, 0);
- if (xe_mmio_wait32(stream->gt, __oa_regs(stream)->oa_ctrl,
+ struct xe_mmio *mmio = &stream->gt->mmio;
+
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0);
+ if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl,
OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
"wait for OA to be disabled timed out\n");
if (GRAPHICS_VERx100(stream->oa->xe) <= 1270 && GRAPHICS_VERx100(stream->oa->xe) != 1260) {
/* <= XE_METEORLAKE except XE_PVC */
- xe_mmio_write32(stream->gt, OA_TLB_INV_CR, 1);
- if (xe_mmio_wait32(stream->gt, OA_TLB_INV_CR, 1, 0, 50000, NULL, false))
+ xe_mmio_write32(mmio, OA_TLB_INV_CR, 1);
+ if (xe_mmio_wait32(mmio, OA_TLB_INV_CR, 1, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
"wait for OA tlb invalidate timed out\n");
}
size_t count, size_t *offset)
{
/* Only clear our bits to avoid side-effects */
- stream->oa_status = xe_mmio_rmw32(stream->gt, __oa_regs(stream)->oa_status,
+ stream->oa_status = xe_mmio_rmw32(&stream->gt->mmio, __oa_regs(stream)->oa_status,
OASTATUS_RELEVANT_BITS, 0);
/*
* Signal to userspace that there is non-zero OA status to read via
return ret;
}
- static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
+ static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
+ struct xe_bb *bb)
{
struct xe_sched_job *job;
struct dma_fence *fence;
- long timeout;
int err = 0;
/* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
goto exit;
}
+ if (deps == XE_OA_SUBMIT_ADD_DEPS) {
+ for (int i = 0; i < stream->num_syncs && !err; i++)
+ err = xe_sync_entry_add_deps(&stream->syncs[i], job);
+ if (err) {
+ drm_dbg(&stream->oa->xe->drm, "xe_sync_entry_add_deps err %d\n", err);
+ goto err_put_job;
+ }
+ }
+
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- timeout = dma_fence_wait_timeout(fence, false, HZ);
- dma_fence_put(fence);
- if (timeout < 0)
- err = timeout;
- else if (!timeout)
- err = -ETIME;
+ return fence;
+ err_put_job:
+ xe_sched_job_put(job);
exit:
- return err;
+ return ERR_PTR(err);
}
static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs)
xe_oa_config_put(stream->oa_config);
llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
- free_oa_config_bo(oa_bo);
+ free_oa_config_bo(oa_bo, stream->last_fence);
+ dma_fence_put(stream->last_fence);
}
static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
const struct flex *flex, u32 count)
{
+ struct dma_fence *fence;
struct xe_bb *bb;
int err;
xe_oa_store_flex(stream, lrc, bb, flex, count);
- err = xe_oa_submit_bb(stream, bb);
+ fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto free_bb;
+ }
+ xe_bb_free(bb, fence);
+ dma_fence_put(fence);
+
+ return 0;
+ free_bb:
xe_bb_free(bb, NULL);
exit:
return err;
static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
{
+ struct dma_fence *fence;
struct xe_bb *bb;
int err;
write_cs_mi_lri(bb, reg_lri, 1);
- err = xe_oa_submit_bb(stream, bb);
+ fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto free_bb;
+ }
+ xe_bb_free(bb, fence);
+ dma_fence_put(fence);
+
+ return 0;
+ free_bb:
xe_bb_free(bb, NULL);
exit:
return err;
int err;
/* Set ccs select to enable programming of OAC_OACONTROL */
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, __oa_ccs_select(stream));
+ xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
+ __oa_ccs_select(stream));
/* Modify stream hwe context image with regs_context */
err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
{
+ struct xe_mmio *mmio = &stream->gt->mmio;
u32 sqcnt1;
/*
_MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
}
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
oag_configure_mmio_trigger(stream, false));
/* disable the context save/restore or OAR counters */
xe_oa_configure_oa_context(stream, false);
/* Make sure we disable noa to save power. */
- xe_mmio_rmw32(stream->gt, RPM_CONFIG1, GT_NOA_ENABLE, 0);
+ xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0);
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
/* Reset PMON Enable to save power. */
- xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, sqcnt1, 0);
+ xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0);
}
static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
xe_oa_free_oa_buffer(stream);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_pm_runtime_put(stream->oa->xe);
/* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */
xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc));
xe_oa_free_configs(stream);
+ xe_file_put(stream->xef);
}
static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
return oa_bo;
}
+ static void xe_oa_update_last_fence(struct xe_oa_stream *stream, struct dma_fence *fence)
+ {
+ dma_fence_put(stream->last_fence);
+ stream->last_fence = dma_fence_get(fence);
+ }
+
+ static void xe_oa_fence_work_fn(struct work_struct *w)
+ {
+ struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work);
+
+ /* Signal fence to indicate new OA configuration is active */
+ dma_fence_signal(&ofence->base);
+ dma_fence_put(&ofence->base);
+ }
+
+ static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+ {
+ /* Additional empirical delay needed for NOA programming after registers are written */
+ #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
+
+ struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
+
+ INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
+ queue_delayed_work(system_unbound_wq, &ofence->work,
+ usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
+ dma_fence_put(fence);
+ }
+
+ static const char *xe_oa_get_driver_name(struct dma_fence *fence)
+ {
+ return "xe_oa";
+ }
+
+ static const char *xe_oa_get_timeline_name(struct dma_fence *fence)
+ {
+ return "unbound";
+ }
+
+ static const struct dma_fence_ops xe_oa_fence_ops = {
+ .get_driver_name = xe_oa_get_driver_name,
+ .get_timeline_name = xe_oa_get_timeline_name,
+ };
+
static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config)
{
#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
struct xe_oa_config_bo *oa_bo;
- int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
+ struct xe_oa_fence *ofence;
+ int i, err, num_signal = 0;
+ struct dma_fence *fence;
+
+ ofence = kzalloc(sizeof(*ofence), GFP_KERNEL);
+ if (!ofence) {
+ err = -ENOMEM;
+ goto exit;
+ }
oa_bo = xe_oa_alloc_config_buffer(stream, config);
if (IS_ERR(oa_bo)) {
goto exit;
}
- err = xe_oa_submit_bb(stream, oa_bo->bb);
+ /* Emit OA configuration batch */
+ fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto exit;
+ }
- /* Additional empirical delay needed for NOA programming after registers are written */
- usleep_range(us, 2 * us);
+ /* Point of no return: initialize and set fence to signal */
+ spin_lock_init(&ofence->lock);
+ dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0);
+
+ for (i = 0; i < stream->num_syncs; i++) {
+ if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL)
+ num_signal++;
+ xe_sync_entry_signal(&stream->syncs[i], &ofence->base);
+ }
+
+ /* Additional dma_fence_get in case we dma_fence_wait */
+ if (!num_signal)
+ dma_fence_get(&ofence->base);
+
+ /* Update last fence too before adding callback */
+ xe_oa_update_last_fence(stream, fence);
+
+ /* Add job fence callback to schedule work to signal ofence->base */
+ err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb);
+ xe_gt_assert(stream->gt, !err || err == -ENOENT);
+ if (err == -ENOENT)
+ xe_oa_config_cb(fence, &ofence->cb);
+
+ /* If nothing needs to be signaled we wait synchronously */
+ if (!num_signal) {
+ dma_fence_wait(&ofence->base, false);
+ dma_fence_put(&ofence->base);
+ }
+
+ /* Done with syncs */
+ for (i = 0; i < stream->num_syncs; i++)
+ xe_sync_entry_cleanup(&stream->syncs[i]);
+ kfree(stream->syncs);
+
+ return 0;
exit:
+ kfree(ofence);
return err;
}
static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
{
+ struct xe_mmio *mmio = &stream->gt->mmio;
u32 oa_debug, sqcnt1;
int ret;
OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL |
OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL;
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
_MASKED_BIT_ENABLE(oa_debug) |
oag_report_ctx_switches(stream) |
oag_configure_mmio_trigger(stream, true));
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
(OAG_OAGLBCTXCTRL_COUNTER_RESUME |
OAG_OAGLBCTXCTRL_TIMER_ENABLE |
REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK,
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
- xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, 0, sqcnt1);
+ xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1);
/* Configure OAR/OAC */
if (stream->exec_q) {
return xe_oa_emit_oa_config(stream, stream->oa_config);
}
+ static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
+ {
+ u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
+ u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
+ u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
+ u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
+ int idx;
+
+ for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
+ const struct xe_oa_format *f = &oa->oa_formats[idx];
+
+ if (counter_size == f->counter_size && bc_report == f->bc_report &&
+ type == f->type && counter_sel == f->counter_select) {
+ *name = idx;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ if (value >= oa->oa_unit_ids) {
+ drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
+ return -EINVAL;
+ }
+ param->oa_unit_id = value;
+ return 0;
+ }
+
+ static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ param->sample = value;
+ return 0;
+ }
+
+ static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ param->metric_set = value;
+ return 0;
+ }
+
+ static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ int ret = decode_oa_format(oa, value, ¶m->oa_format);
+
+ if (ret) {
+ drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
+ return ret;
+ }
+ return 0;
+ }
+
+ static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ #define OA_EXPONENT_MAX 31
+
+ if (value > OA_EXPONENT_MAX) {
+ drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
+ return -EINVAL;
+ }
+ param->period_exponent = value;
+ return 0;
+ }
+
+ static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ param->disabled = value;
+ return 0;
+ }
+
+ static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ param->exec_queue_id = value;
+ return 0;
+ }
+
+ static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ param->engine_instance = value;
+ return 0;
+ }
+
+ static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ param->no_preempt = value;
+ return 0;
+ }
+
+ static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ param->num_syncs = value;
+ return 0;
+ }
+
+ static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ param->syncs_user = u64_to_user_ptr(value);
+ return 0;
+ }
+
+ static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+ {
+ return -EINVAL;
+ }
+
+ typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param);
+ static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = {
+ [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
+ [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
+ [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
+ [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
+ [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
+ [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
+ [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
+ [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
+ [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
+ [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+ [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ };
+
+ static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
+ [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
+ [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+ [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ };
+
+ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from,
+ u64 extension, struct xe_oa_open_param *param)
+ {
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+ int err;
+ u32 idx;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return -EFAULT;
+
+ BUILD_BUG_ON(ARRAY_SIZE(xe_oa_set_property_funcs_open) !=
+ ARRAY_SIZE(xe_oa_set_property_funcs_config));
+
+ if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
+ XE_IOCTL_DBG(oa->xe, ext.pad))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
+
+ if (from == XE_OA_USER_EXTN_FROM_CONFIG)
+ return xe_oa_set_property_funcs_config[idx](oa, ext.value, param);
+ else
+ return xe_oa_set_property_funcs_open[idx](oa, ext.value, param);
+ }
+
+ typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, enum xe_oa_user_extn_from from,
+ u64 extension, struct xe_oa_open_param *param);
+ static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
+ [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
+ };
+
+ #define MAX_USER_EXTENSIONS 16
+ static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from from, u64 extension,
+ int ext_number, struct xe_oa_open_param *param)
+ {
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_user_extension ext;
+ int err;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
+ return -E2BIG;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
+ XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
+ err = xe_oa_user_extension_funcs[idx](oa, from, extension, param);
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return err;
+
+ if (ext.next_extension)
+ return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param);
+
+ return 0;
+ }
+
+ static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
+ {
+ int ret, num_syncs, num_ufence = 0;
+
+ if (param->num_syncs && !param->syncs_user) {
+ drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (param->num_syncs) {
+ param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL);
+ if (!param->syncs) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
+ ret = xe_sync_entry_parse(oa->xe, param->xef, ¶m->syncs[num_syncs],
+ ¶m->syncs_user[num_syncs], 0);
+ if (ret)
+ goto err_syncs;
+
+ if (xe_sync_is_ufence(¶m->syncs[num_syncs]))
+ num_ufence++;
+ }
+
+ if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) {
+ ret = -EINVAL;
+ goto err_syncs;
+ }
+
+ return 0;
+
+ err_syncs:
+ while (num_syncs--)
+ xe_sync_entry_cleanup(¶m->syncs[num_syncs]);
+ kfree(param->syncs);
+ exit:
+ return ret;
+ }
+
static void xe_oa_stream_enable(struct xe_oa_stream *stream)
{
stream->pollin = false;
static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
{
- struct drm_xe_ext_set_property ext;
+ struct xe_oa_open_param param = {};
long ret = stream->oa_config->id;
struct xe_oa_config *config;
int err;
- err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext));
- if (XE_IOCTL_DBG(stream->oa->xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) ||
- XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) ||
- XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) ||
- XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET))
- return -EINVAL;
+ err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, ¶m);
+ if (err)
+ return err;
- config = xe_oa_get_oa_config(stream->oa, ext.value);
+ config = xe_oa_get_oa_config(stream->oa, param.metric_set);
if (!config)
return -ENODEV;
- if (config != stream->oa_config) {
- err = xe_oa_emit_oa_config(stream, config);
- if (!err)
- config = xchg(&stream->oa_config, config);
- else
- ret = err;
+ param.xef = stream->xef;
+ err = xe_oa_parse_syncs(stream->oa, ¶m);
+ if (err)
+ goto err_config_put;
+
+ stream->num_syncs = param.num_syncs;
+ stream->syncs = param.syncs;
+
+ err = xe_oa_emit_oa_config(stream, config);
+ if (!err) {
+ config = xchg(&stream->oa_config, config);
+ drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n",
+ stream->oa_config->uuid);
}
+ err_config_put:
xe_oa_config_put(config);
- return ret;
+ return err ?: ret;
}
static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
struct xe_oa_stream *stream = file->private_data;
struct xe_gt *gt = stream->gt;
+ xe_pm_runtime_get(gt_to_xe(gt));
mutex_lock(>->oa.gt_lock);
xe_oa_destroy_locked(stream);
mutex_unlock(>->oa.gt_lock);
+ xe_pm_runtime_put(gt_to_xe(gt));
/* Release the reference the OA stream kept on the driver */
drm_dev_put(>_to_xe(gt)->drm);
{
struct xe_oa_unit *u = param->hwe->oa_unit;
struct xe_gt *gt = param->hwe->gt;
+ unsigned int fw_ref;
int ret;
stream->exec_q = param->exec_q;
stream->period_exponent = param->period_exponent;
stream->no_preempt = param->no_preempt;
+ stream->xef = xe_file_get(param->xef);
+ stream->num_syncs = param->num_syncs;
+ stream->syncs = param->syncs;
+
/*
* For Xe2+, when overrun mode is enabled, there are no partial reports at the end
* of buffer, making the OA buffer effectively a non-power-of-2 size circular
/* Take runtime pm ref and forcewake to disable RC6 */
xe_pm_runtime_get(stream->oa->xe);
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ ret = -ETIMEDOUT;
+ goto err_fw_put;
+ }
ret = xe_oa_alloc_oa_buffer(stream);
if (ret)
err_free_oa_buf:
xe_oa_free_oa_buffer(stream);
err_fw_put:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(stream->oa->xe);
if (stream->override_gucrc)
xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc));
err_free_configs:
xe_oa_free_configs(stream);
exit:
+ xe_file_put(stream->xef);
return ret;
}
case XE_PVC:
case XE_METEORLAKE:
xe_pm_runtime_get(gt_to_xe(gt));
- reg = xe_mmio_read32(gt, RPM_CONFIG0);
+ reg = xe_mmio_read32(>->mmio, RPM_CONFIG0);
xe_pm_runtime_put(gt_to_xe(gt));
shift = REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
}
}
- static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
- {
- u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
- u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
- u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
- u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
- int idx;
-
- for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
- const struct xe_oa_format *f = &oa->oa_formats[idx];
-
- if (counter_size == f->counter_size && bc_report == f->bc_report &&
- type == f->type && counter_sel == f->counter_select) {
- *name = idx;
- return 0;
- }
- }
-
- return -EINVAL;
- }
-
/**
* xe_oa_unit_id - Return OA unit ID for a hardware engine
* @hwe: @xe_hw_engine
return ret;
}
- static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
- {
- if (value >= oa->oa_unit_ids) {
- drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
- return -EINVAL;
- }
- param->oa_unit_id = value;
- return 0;
- }
-
- static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
- {
- param->sample = value;
- return 0;
- }
-
- static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
- {
- param->metric_set = value;
- return 0;
- }
-
- static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
- {
- int ret = decode_oa_format(oa, value, ¶m->oa_format);
-
- if (ret) {
- drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
- return ret;
- }
- return 0;
- }
-
- static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
- {
- #define OA_EXPONENT_MAX 31
-
- if (value > OA_EXPONENT_MAX) {
- drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
- return -EINVAL;
- }
- param->period_exponent = value;
- return 0;
- }
-
- static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
- {
- param->disabled = value;
- return 0;
- }
-
- static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
- {
- param->exec_queue_id = value;
- return 0;
- }
-
- static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
- {
- param->engine_instance = value;
- return 0;
- }
-
- static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
- {
- param->no_preempt = value;
- return 0;
- }
-
- typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param);
- static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
- [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
- [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
- [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
- [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
- [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
- [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
- [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
- [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
- [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
- };
-
- static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
- struct xe_oa_open_param *param)
- {
- u64 __user *address = u64_to_user_ptr(extension);
- struct drm_xe_ext_set_property ext;
- int err;
- u32 idx;
-
- err = __copy_from_user(&ext, address, sizeof(ext));
- if (XE_IOCTL_DBG(oa->xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) ||
- XE_IOCTL_DBG(oa->xe, ext.pad))
- return -EINVAL;
-
- idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs));
- return xe_oa_set_property_funcs[idx](oa, ext.value, param);
- }
-
- typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension,
- struct xe_oa_open_param *param);
- static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
- [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
- };
-
- #define MAX_USER_EXTENSIONS 16
- static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number,
- struct xe_oa_open_param *param)
- {
- u64 __user *address = u64_to_user_ptr(extension);
- struct drm_xe_user_extension ext;
- int err;
- u32 idx;
-
- if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
- return -E2BIG;
-
- err = __copy_from_user(&ext, address, sizeof(ext));
- if (XE_IOCTL_DBG(oa->xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
- XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
- return -EINVAL;
-
- idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
- err = xe_oa_user_extension_funcs[idx](oa, extension, param);
- if (XE_IOCTL_DBG(oa->xe, err))
- return err;
-
- if (ext.next_extension)
- return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param);
-
- return 0;
- }
-
/**
* xe_oa_stream_open_ioctl - Opens an OA stream
* @dev: @drm_device
return -ENODEV;
}
- ret = xe_oa_user_extensions(oa, data, 0, ¶m);
+ param.xef = xef;
+ ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m);
if (ret)
return ret;
drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz);
}
+ ret = xe_oa_parse_syncs(oa, ¶m);
+ if (ret)
+ goto err_exec_q;
+
mutex_lock(¶m.hwe->gt->oa.gt_lock);
ret = xe_oa_stream_open_ioctl_locked(oa, ¶m);
mutex_unlock(¶m.hwe->gt->oa.gt_lock);
+ if (ret < 0)
+ goto err_sync_cleanup;
+
+ return ret;
+
+ err_sync_cleanup:
+ while (param.num_syncs--)
+ xe_sync_entry_cleanup(¶m.syncs[param.num_syncs]);
+ kfree(param.syncs);
err_exec_q:
- if (ret < 0 && param.exec_q)
+ if (param.exec_q)
xe_exec_queue_put(param.exec_q);
return ret;
}
}
/* Ensure MMIO trigger remains disabled till there is a stream */
- xe_mmio_write32(gt, u->regs.oa_debug,
+ xe_mmio_write32(>->mmio, u->regs.oa_debug,
oag_configure_mmio_trigger(NULL, false));
/* Set oa_unit_ids now to ensure ids remain contiguous */
--- /dev/null
-#define INTEL_ARL_IDS(MACRO__, ...) \
- MACRO__(0x7D41, ## __VA_ARGS__), \
+ /*
+ * Copyright 2013 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+ #ifndef __PCIIDS_H__
+ #define __PCIIDS_H__
+
+ #ifdef __KERNEL__
+ #define INTEL_VGA_DEVICE(_id, _info) { \
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \
+ .driver_data = (kernel_ulong_t)(_info), \
+ }
+
+ #define INTEL_QUANTA_VGA_DEVICE(_info) { \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = 0x16a, \
+ .subvendor = 0x152d, .subdevice = 0x8990, \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \
+ .driver_data = (kernel_ulong_t)(_info), \
+ }
+ #endif
+
+ #define INTEL_I810_IDS(MACRO__, ...) \
+ MACRO__(0x7121, ## __VA_ARGS__), /* I810 */ \
+ MACRO__(0x7123, ## __VA_ARGS__), /* I810_DC100 */ \
+ MACRO__(0x7125, ## __VA_ARGS__) /* I810_E */
+
+ #define INTEL_I815_IDS(MACRO__, ...) \
+ MACRO__(0x1132, ## __VA_ARGS__) /* I815*/
+
+ #define INTEL_I830_IDS(MACRO__, ...) \
+ MACRO__(0x3577, ## __VA_ARGS__)
+
+ #define INTEL_I845G_IDS(MACRO__, ...) \
+ MACRO__(0x2562, ## __VA_ARGS__)
+
+ #define INTEL_I85X_IDS(MACRO__, ...) \
+ MACRO__(0x3582, ## __VA_ARGS__), /* I855_GM */ \
+ MACRO__(0x358e, ## __VA_ARGS__)
+
+ #define INTEL_I865G_IDS(MACRO__, ...) \
+ MACRO__(0x2572, ## __VA_ARGS__) /* I865_G */
+
+ #define INTEL_I915G_IDS(MACRO__, ...) \
+ MACRO__(0x2582, ## __VA_ARGS__), /* I915_G */ \
+ MACRO__(0x258a, ## __VA_ARGS__) /* E7221_G */
+
+ #define INTEL_I915GM_IDS(MACRO__, ...) \
+ MACRO__(0x2592, ## __VA_ARGS__) /* I915_GM */
+
+ #define INTEL_I945G_IDS(MACRO__, ...) \
+ MACRO__(0x2772, ## __VA_ARGS__) /* I945_G */
+
+ #define INTEL_I945GM_IDS(MACRO__, ...) \
+ MACRO__(0x27a2, ## __VA_ARGS__), /* I945_GM */ \
+ MACRO__(0x27ae, ## __VA_ARGS__) /* I945_GME */
+
+ #define INTEL_I965G_IDS(MACRO__, ...) \
+ MACRO__(0x2972, ## __VA_ARGS__), /* I946_GZ */ \
+ MACRO__(0x2982, ## __VA_ARGS__), /* G35_G */ \
+ MACRO__(0x2992, ## __VA_ARGS__), /* I965_Q */ \
+ MACRO__(0x29a2, ## __VA_ARGS__) /* I965_G */
+
+ #define INTEL_G33_IDS(MACRO__, ...) \
+ MACRO__(0x29b2, ## __VA_ARGS__), /* Q35_G */ \
+ MACRO__(0x29c2, ## __VA_ARGS__), /* G33_G */ \
+ MACRO__(0x29d2, ## __VA_ARGS__) /* Q33_G */
+
+ #define INTEL_I965GM_IDS(MACRO__, ...) \
+ MACRO__(0x2a02, ## __VA_ARGS__), /* I965_GM */ \
+ MACRO__(0x2a12, ## __VA_ARGS__) /* I965_GME */
+
+ #define INTEL_GM45_IDS(MACRO__, ...) \
+ MACRO__(0x2a42, ## __VA_ARGS__) /* GM45_G */
+
+ #define INTEL_G45_IDS(MACRO__, ...) \
+ MACRO__(0x2e02, ## __VA_ARGS__), /* IGD_E_G */ \
+ MACRO__(0x2e12, ## __VA_ARGS__), /* Q45_G */ \
+ MACRO__(0x2e22, ## __VA_ARGS__), /* G45_G */ \
+ MACRO__(0x2e32, ## __VA_ARGS__), /* G41_G */ \
+ MACRO__(0x2e42, ## __VA_ARGS__), /* B43_G */ \
+ MACRO__(0x2e92, ## __VA_ARGS__) /* B43_G.1 */
+
+ #define INTEL_PNV_G_IDS(MACRO__, ...) \
+ MACRO__(0xa001, ## __VA_ARGS__)
+
+ #define INTEL_PNV_M_IDS(MACRO__, ...) \
+ MACRO__(0xa011, ## __VA_ARGS__)
+
+ #define INTEL_PNV_IDS(MACRO__, ...) \
+ INTEL_PNV_G_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_PNV_M_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_ILK_D_IDS(MACRO__, ...) \
+ MACRO__(0x0042, ## __VA_ARGS__)
+
+ #define INTEL_ILK_M_IDS(MACRO__, ...) \
+ MACRO__(0x0046, ## __VA_ARGS__)
+
+ #define INTEL_ILK_IDS(MACRO__, ...) \
+ INTEL_ILK_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ILK_M_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_SNB_D_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0102, ## __VA_ARGS__), \
+ MACRO__(0x010A, ## __VA_ARGS__)
+
+ #define INTEL_SNB_D_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0112, ## __VA_ARGS__), \
+ MACRO__(0x0122, ## __VA_ARGS__)
+
+ #define INTEL_SNB_D_IDS(MACRO__, ...) \
+ INTEL_SNB_D_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SNB_D_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_SNB_M_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0106, ## __VA_ARGS__)
+
+ #define INTEL_SNB_M_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0116, ## __VA_ARGS__), \
+ MACRO__(0x0126, ## __VA_ARGS__)
+
+ #define INTEL_SNB_M_IDS(MACRO__, ...) \
+ INTEL_SNB_M_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SNB_M_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_SNB_IDS(MACRO__, ...) \
+ INTEL_SNB_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SNB_M_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_IVB_M_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0156, ## __VA_ARGS__) /* GT1 mobile */
+
+ #define INTEL_IVB_M_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0166, ## __VA_ARGS__) /* GT2 mobile */
+
+ #define INTEL_IVB_M_IDS(MACRO__, ...) \
+ INTEL_IVB_M_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_IVB_M_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_IVB_D_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0152, ## __VA_ARGS__), /* GT1 desktop */ \
+ MACRO__(0x015a, ## __VA_ARGS__) /* GT1 server */
+
+ #define INTEL_IVB_D_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0162, ## __VA_ARGS__), /* GT2 desktop */ \
+ MACRO__(0x016a, ## __VA_ARGS__) /* GT2 server */
+
+ #define INTEL_IVB_D_IDS(MACRO__, ...) \
+ INTEL_IVB_D_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_IVB_D_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_IVB_IDS(MACRO__, ...) \
+ INTEL_IVB_M_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_IVB_D_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_IVB_Q_IDS(MACRO__, ...) \
+ INTEL_QUANTA_VGA_DEVICE(__VA_ARGS__) /* Quanta transcode */
+
+ #define INTEL_HSW_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0A02, ## __VA_ARGS__), /* ULT GT1 desktop */ \
+ MACRO__(0x0A06, ## __VA_ARGS__), /* ULT GT1 mobile */ \
+ MACRO__(0x0A0A, ## __VA_ARGS__), /* ULT GT1 server */ \
+ MACRO__(0x0A0B, ## __VA_ARGS__) /* ULT GT1 reserved */
+
+ #define INTEL_HSW_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0A0E, ## __VA_ARGS__) /* ULX GT1 mobile */
+
+ #define INTEL_HSW_GT1_IDS(MACRO__, ...) \
+ INTEL_HSW_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x0402, ## __VA_ARGS__), /* GT1 desktop */ \
+ MACRO__(0x0406, ## __VA_ARGS__), /* GT1 mobile */ \
+ MACRO__(0x040A, ## __VA_ARGS__), /* GT1 server */ \
+ MACRO__(0x040B, ## __VA_ARGS__), /* GT1 reserved */ \
+ MACRO__(0x040E, ## __VA_ARGS__), /* GT1 reserved */ \
+ MACRO__(0x0C02, ## __VA_ARGS__), /* SDV GT1 desktop */ \
+ MACRO__(0x0C06, ## __VA_ARGS__), /* SDV GT1 mobile */ \
+ MACRO__(0x0C0A, ## __VA_ARGS__), /* SDV GT1 server */ \
+ MACRO__(0x0C0B, ## __VA_ARGS__), /* SDV GT1 reserved */ \
+ MACRO__(0x0C0E, ## __VA_ARGS__), /* SDV GT1 reserved */ \
+ MACRO__(0x0D02, ## __VA_ARGS__), /* CRW GT1 desktop */ \
+ MACRO__(0x0D06, ## __VA_ARGS__), /* CRW GT1 mobile */ \
+ MACRO__(0x0D0A, ## __VA_ARGS__), /* CRW GT1 server */ \
+ MACRO__(0x0D0B, ## __VA_ARGS__), /* CRW GT1 reserved */ \
+ MACRO__(0x0D0E, ## __VA_ARGS__) /* CRW GT1 reserved */
+
+ #define INTEL_HSW_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0A12, ## __VA_ARGS__), /* ULT GT2 desktop */ \
+ MACRO__(0x0A16, ## __VA_ARGS__), /* ULT GT2 mobile */ \
+ MACRO__(0x0A1A, ## __VA_ARGS__), /* ULT GT2 server */ \
+ MACRO__(0x0A1B, ## __VA_ARGS__) /* ULT GT2 reserved */ \
+
+ #define INTEL_HSW_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0A1E, ## __VA_ARGS__) /* ULX GT2 mobile */ \
+
+ #define INTEL_HSW_GT2_IDS(MACRO__, ...) \
+ INTEL_HSW_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x0412, ## __VA_ARGS__), /* GT2 desktop */ \
+ MACRO__(0x0416, ## __VA_ARGS__), /* GT2 mobile */ \
+ MACRO__(0x041A, ## __VA_ARGS__), /* GT2 server */ \
+ MACRO__(0x041B, ## __VA_ARGS__), /* GT2 reserved */ \
+ MACRO__(0x041E, ## __VA_ARGS__), /* GT2 reserved */ \
+ MACRO__(0x0C12, ## __VA_ARGS__), /* SDV GT2 desktop */ \
+ MACRO__(0x0C16, ## __VA_ARGS__), /* SDV GT2 mobile */ \
+ MACRO__(0x0C1A, ## __VA_ARGS__), /* SDV GT2 server */ \
+ MACRO__(0x0C1B, ## __VA_ARGS__), /* SDV GT2 reserved */ \
+ MACRO__(0x0C1E, ## __VA_ARGS__), /* SDV GT2 reserved */ \
+ MACRO__(0x0D12, ## __VA_ARGS__), /* CRW GT2 desktop */ \
+ MACRO__(0x0D16, ## __VA_ARGS__), /* CRW GT2 mobile */ \
+ MACRO__(0x0D1A, ## __VA_ARGS__), /* CRW GT2 server */ \
+ MACRO__(0x0D1B, ## __VA_ARGS__), /* CRW GT2 reserved */ \
+ MACRO__(0x0D1E, ## __VA_ARGS__) /* CRW GT2 reserved */
+
+ #define INTEL_HSW_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x0A22, ## __VA_ARGS__), /* ULT GT3 desktop */ \
+ MACRO__(0x0A26, ## __VA_ARGS__), /* ULT GT3 mobile */ \
+ MACRO__(0x0A2A, ## __VA_ARGS__), /* ULT GT3 server */ \
+ MACRO__(0x0A2B, ## __VA_ARGS__), /* ULT GT3 reserved */ \
+ MACRO__(0x0A2E, ## __VA_ARGS__) /* ULT GT3 reserved */
+
+ #define INTEL_HSW_GT3_IDS(MACRO__, ...) \
+ INTEL_HSW_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x0422, ## __VA_ARGS__), /* GT3 desktop */ \
+ MACRO__(0x0426, ## __VA_ARGS__), /* GT3 mobile */ \
+ MACRO__(0x042A, ## __VA_ARGS__), /* GT3 server */ \
+ MACRO__(0x042B, ## __VA_ARGS__), /* GT3 reserved */ \
+ MACRO__(0x042E, ## __VA_ARGS__), /* GT3 reserved */ \
+ MACRO__(0x0C22, ## __VA_ARGS__), /* SDV GT3 desktop */ \
+ MACRO__(0x0C26, ## __VA_ARGS__), /* SDV GT3 mobile */ \
+ MACRO__(0x0C2A, ## __VA_ARGS__), /* SDV GT3 server */ \
+ MACRO__(0x0C2B, ## __VA_ARGS__), /* SDV GT3 reserved */ \
+ MACRO__(0x0C2E, ## __VA_ARGS__), /* SDV GT3 reserved */ \
+ MACRO__(0x0D22, ## __VA_ARGS__), /* CRW GT3 desktop */ \
+ MACRO__(0x0D26, ## __VA_ARGS__), /* CRW GT3 mobile */ \
+ MACRO__(0x0D2A, ## __VA_ARGS__), /* CRW GT3 server */ \
+ MACRO__(0x0D2B, ## __VA_ARGS__), /* CRW GT3 reserved */ \
+ MACRO__(0x0D2E, ## __VA_ARGS__) /* CRW GT3 reserved */
+
+ #define INTEL_HSW_IDS(MACRO__, ...) \
+ INTEL_HSW_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_GT3_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_VLV_IDS(MACRO__, ...) \
+ MACRO__(0x0f30, ## __VA_ARGS__), \
+ MACRO__(0x0f31, ## __VA_ARGS__), \
+ MACRO__(0x0f32, ## __VA_ARGS__), \
+ MACRO__(0x0f33, ## __VA_ARGS__)
+
+ #define INTEL_BDW_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x1606, ## __VA_ARGS__), /* GT1 ULT */ \
+ MACRO__(0x160B, ## __VA_ARGS__) /* GT1 Iris */
+
+ #define INTEL_BDW_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x160E, ## __VA_ARGS__) /* GT1 ULX */
+
+ #define INTEL_BDW_GT1_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1602, ## __VA_ARGS__), /* GT1 ULT */ \
+ MACRO__(0x160A, ## __VA_ARGS__), /* GT1 Server */ \
+ MACRO__(0x160D, ## __VA_ARGS__) /* GT1 Workstation */
+
+ #define INTEL_BDW_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x1616, ## __VA_ARGS__), /* GT2 ULT */ \
+ MACRO__(0x161B, ## __VA_ARGS__) /* GT2 ULT */
+
+ #define INTEL_BDW_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x161E, ## __VA_ARGS__) /* GT2 ULX */
+
+ #define INTEL_BDW_GT2_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1612, ## __VA_ARGS__), /* GT2 Halo */ \
+ MACRO__(0x161A, ## __VA_ARGS__), /* GT2 Server */ \
+ MACRO__(0x161D, ## __VA_ARGS__) /* GT2 Workstation */
+
+ #define INTEL_BDW_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x1626, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x162B, ## __VA_ARGS__) /* Iris */ \
+
+ #define INTEL_BDW_ULX_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x162E, ## __VA_ARGS__) /* ULX */
+
+ #define INTEL_BDW_GT3_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1622, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x162A, ## __VA_ARGS__), /* Server */ \
+ MACRO__(0x162D, ## __VA_ARGS__) /* Workstation */
+
+ #define INTEL_BDW_ULT_RSVD_IDS(MACRO__, ...) \
+ MACRO__(0x1636, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x163B, ## __VA_ARGS__) /* Iris */
+
+ #define INTEL_BDW_ULX_RSVD_IDS(MACRO__, ...) \
+ MACRO__(0x163E, ## __VA_ARGS__) /* ULX */
+
+ #define INTEL_BDW_RSVD_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_RSVD_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_RSVD_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1632, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x163A, ## __VA_ARGS__), /* Server */ \
+ MACRO__(0x163D, ## __VA_ARGS__) /* Workstation */
+
+ #define INTEL_BDW_IDS(MACRO__, ...) \
+ INTEL_BDW_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_RSVD_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_CHV_IDS(MACRO__, ...) \
+ MACRO__(0x22b0, ## __VA_ARGS__), \
+ MACRO__(0x22b1, ## __VA_ARGS__), \
+ MACRO__(0x22b2, ## __VA_ARGS__), \
+ MACRO__(0x22b3, ## __VA_ARGS__)
+
+ #define INTEL_SKL_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x1906, ## __VA_ARGS__), /* ULT GT1 */ \
+ MACRO__(0x1913, ## __VA_ARGS__) /* ULT GT1.5 */
+
+ #define INTEL_SKL_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x190E, ## __VA_ARGS__), /* ULX GT1 */ \
+ MACRO__(0x1915, ## __VA_ARGS__) /* ULX GT1.5 */
+
+ #define INTEL_SKL_GT1_IDS(MACRO__, ...) \
+ INTEL_SKL_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1902, ## __VA_ARGS__), /* DT GT1 */ \
+ MACRO__(0x190A, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x190B, ## __VA_ARGS__), /* Halo GT1 */ \
+ MACRO__(0x1917, ## __VA_ARGS__) /* DT GT1.5 */
+
+ #define INTEL_SKL_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x1916, ## __VA_ARGS__), /* ULT GT2 */ \
+ MACRO__(0x1921, ## __VA_ARGS__) /* ULT GT2F */
+
+ #define INTEL_SKL_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x191E, ## __VA_ARGS__) /* ULX GT2 */
+
+ #define INTEL_SKL_GT2_IDS(MACRO__, ...) \
+ INTEL_SKL_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1912, ## __VA_ARGS__), /* DT GT2 */ \
+ MACRO__(0x191A, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x191B, ## __VA_ARGS__), /* Halo GT2 */ \
+ MACRO__(0x191D, ## __VA_ARGS__) /* WKS GT2 */
+
+ #define INTEL_SKL_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x1923, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x1926, ## __VA_ARGS__), /* ULT GT3e */ \
+ MACRO__(0x1927, ## __VA_ARGS__) /* ULT GT3e */
+
+ #define INTEL_SKL_GT3_IDS(MACRO__, ...) \
+ INTEL_SKL_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x192A, ## __VA_ARGS__), /* SRV GT3 */ \
+ MACRO__(0x192B, ## __VA_ARGS__), /* Halo GT3e */ \
+ MACRO__(0x192D, ## __VA_ARGS__) /* SRV GT3e */
+
+ #define INTEL_SKL_GT4_IDS(MACRO__, ...) \
+ MACRO__(0x1932, ## __VA_ARGS__), /* DT GT4 */ \
+ MACRO__(0x193A, ## __VA_ARGS__), /* SRV GT4e */ \
+ MACRO__(0x193B, ## __VA_ARGS__), /* Halo GT4e */ \
+ MACRO__(0x193D, ## __VA_ARGS__) /* WKS GT4e */
+
+ #define INTEL_SKL_IDS(MACRO__, ...) \
+ INTEL_SKL_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_GT4_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_BXT_IDS(MACRO__, ...) \
+ MACRO__(0x0A84, ## __VA_ARGS__), \
+ MACRO__(0x1A84, ## __VA_ARGS__), \
+ MACRO__(0x1A85, ## __VA_ARGS__), \
+ MACRO__(0x5A84, ## __VA_ARGS__), /* APL HD Graphics 505 */ \
+ MACRO__(0x5A85, ## __VA_ARGS__) /* APL HD Graphics 500 */
+
+ #define INTEL_GLK_IDS(MACRO__, ...) \
+ MACRO__(0x3184, ## __VA_ARGS__), \
+ MACRO__(0x3185, ## __VA_ARGS__)
+
+ #define INTEL_KBL_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x5906, ## __VA_ARGS__), /* ULT GT1 */ \
+ MACRO__(0x5913, ## __VA_ARGS__) /* ULT GT1.5 */
+
+ #define INTEL_KBL_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x590E, ## __VA_ARGS__), /* ULX GT1 */ \
+ MACRO__(0x5915, ## __VA_ARGS__) /* ULX GT1.5 */
+
+ #define INTEL_KBL_GT1_IDS(MACRO__, ...) \
+ INTEL_KBL_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5902, ## __VA_ARGS__), /* DT GT1 */ \
+ MACRO__(0x5908, ## __VA_ARGS__), /* Halo GT1 */ \
+ MACRO__(0x590A, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x590B, ## __VA_ARGS__) /* Halo GT1 */
+
+ #define INTEL_KBL_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x5916, ## __VA_ARGS__), /* ULT GT2 */ \
+ MACRO__(0x5921, ## __VA_ARGS__) /* ULT GT2F */
+
+ #define INTEL_KBL_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x591E, ## __VA_ARGS__) /* ULX GT2 */
+
+ #define INTEL_KBL_GT2_IDS(MACRO__, ...) \
+ INTEL_KBL_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5912, ## __VA_ARGS__), /* DT GT2 */ \
+ MACRO__(0x5917, ## __VA_ARGS__), /* Mobile GT2 */ \
+ MACRO__(0x591A, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x591B, ## __VA_ARGS__), /* Halo GT2 */ \
+ MACRO__(0x591D, ## __VA_ARGS__) /* WKS GT2 */
+
+ #define INTEL_KBL_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x5926, ## __VA_ARGS__) /* ULT GT3 */
+
+ #define INTEL_KBL_GT3_IDS(MACRO__, ...) \
+ INTEL_KBL_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5923, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x5927, ## __VA_ARGS__) /* ULT GT3 */
+
+ #define INTEL_KBL_GT4_IDS(MACRO__, ...) \
+ MACRO__(0x593B, ## __VA_ARGS__) /* Halo GT4 */
+
+ /* AML/KBL Y GT2 */
+ #define INTEL_AML_KBL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x591C, ## __VA_ARGS__), /* ULX GT2 */ \
+ MACRO__(0x87C0, ## __VA_ARGS__) /* ULX GT2 */
+
+ /* AML/CFL Y GT2 */
+ #define INTEL_AML_CFL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x87CA, ## __VA_ARGS__)
+
+ /* CML GT1 */
+ #define INTEL_CML_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9BA2, ## __VA_ARGS__), \
+ MACRO__(0x9BA4, ## __VA_ARGS__), \
+ MACRO__(0x9BA5, ## __VA_ARGS__), \
+ MACRO__(0x9BA8, ## __VA_ARGS__)
+
+ #define INTEL_CML_U_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9B21, ## __VA_ARGS__), \
+ MACRO__(0x9BAA, ## __VA_ARGS__), \
+ MACRO__(0x9BAC, ## __VA_ARGS__)
+
+ /* CML GT2 */
+ #define INTEL_CML_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9BC2, ## __VA_ARGS__), \
+ MACRO__(0x9BC4, ## __VA_ARGS__), \
+ MACRO__(0x9BC5, ## __VA_ARGS__), \
+ MACRO__(0x9BC6, ## __VA_ARGS__), \
+ MACRO__(0x9BC8, ## __VA_ARGS__), \
+ MACRO__(0x9BE6, ## __VA_ARGS__), \
+ MACRO__(0x9BF6, ## __VA_ARGS__)
+
+ #define INTEL_CML_U_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9B41, ## __VA_ARGS__), \
+ MACRO__(0x9BCA, ## __VA_ARGS__), \
+ MACRO__(0x9BCC, ## __VA_ARGS__)
+
+ #define INTEL_CML_IDS(MACRO__, ...) \
+ INTEL_CML_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CML_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CML_U_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CML_U_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_KBL_IDS(MACRO__, ...) \
+ INTEL_KBL_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_GT4_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_AML_KBL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+ /* CFL S */
+ #define INTEL_CFL_S_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x3E90, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x3E93, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x3E99, ## __VA_ARGS__) /* SRV GT1 */
+
+ #define INTEL_CFL_S_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3E91, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E92, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E96, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E98, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E9A, ## __VA_ARGS__) /* SRV GT2 */
+
+ /* CFL H */
+ #define INTEL_CFL_H_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x3E9C, ## __VA_ARGS__)
+
+ #define INTEL_CFL_H_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3E94, ## __VA_ARGS__), /* Halo GT2 */ \
+ MACRO__(0x3E9B, ## __VA_ARGS__) /* Halo GT2 */
+
+ /* CFL U GT2 */
+ #define INTEL_CFL_U_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3EA9, ## __VA_ARGS__)
+
+ /* CFL U GT3 */
+ #define INTEL_CFL_U_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x3EA5, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x3EA6, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x3EA7, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x3EA8, ## __VA_ARGS__) /* ULT GT3 */
+
+ #define INTEL_CFL_IDS(MACRO__, ...) \
+ INTEL_CFL_S_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_S_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_H_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_H_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_U_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_U_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_AML_CFL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+ /* WHL/CFL U GT1 */
+ #define INTEL_WHL_U_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x3EA1, ## __VA_ARGS__), \
+ MACRO__(0x3EA4, ## __VA_ARGS__)
+
+ /* WHL/CFL U GT2 */
+ #define INTEL_WHL_U_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3EA0, ## __VA_ARGS__), \
+ MACRO__(0x3EA3, ## __VA_ARGS__)
+
+ /* WHL/CFL U GT3 */
+ #define INTEL_WHL_U_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x3EA2, ## __VA_ARGS__)
+
+ #define INTEL_WHL_IDS(MACRO__, ...) \
+ INTEL_WHL_U_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_WHL_U_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_WHL_U_GT3_IDS(MACRO__, ## __VA_ARGS__)
+
+ /* CNL */
+ #define INTEL_CNL_PORT_F_IDS(MACRO__, ...) \
+ MACRO__(0x5A44, ## __VA_ARGS__), \
+ MACRO__(0x5A4C, ## __VA_ARGS__), \
+ MACRO__(0x5A54, ## __VA_ARGS__), \
+ MACRO__(0x5A5C, ## __VA_ARGS__)
+
+ #define INTEL_CNL_IDS(MACRO__, ...) \
+ INTEL_CNL_PORT_F_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5A40, ## __VA_ARGS__), \
+ MACRO__(0x5A41, ## __VA_ARGS__), \
+ MACRO__(0x5A42, ## __VA_ARGS__), \
+ MACRO__(0x5A49, ## __VA_ARGS__), \
+ MACRO__(0x5A4A, ## __VA_ARGS__), \
+ MACRO__(0x5A50, ## __VA_ARGS__), \
+ MACRO__(0x5A51, ## __VA_ARGS__), \
+ MACRO__(0x5A52, ## __VA_ARGS__), \
+ MACRO__(0x5A59, ## __VA_ARGS__), \
+ MACRO__(0x5A5A, ## __VA_ARGS__)
+
+ /* ICL */
+ #define INTEL_ICL_PORT_F_IDS(MACRO__, ...) \
+ MACRO__(0x8A50, ## __VA_ARGS__), \
+ MACRO__(0x8A52, ## __VA_ARGS__), \
+ MACRO__(0x8A53, ## __VA_ARGS__), \
+ MACRO__(0x8A54, ## __VA_ARGS__), \
+ MACRO__(0x8A56, ## __VA_ARGS__), \
+ MACRO__(0x8A57, ## __VA_ARGS__), \
+ MACRO__(0x8A58, ## __VA_ARGS__), \
+ MACRO__(0x8A59, ## __VA_ARGS__), \
+ MACRO__(0x8A5A, ## __VA_ARGS__), \
+ MACRO__(0x8A5B, ## __VA_ARGS__), \
+ MACRO__(0x8A5C, ## __VA_ARGS__), \
+ MACRO__(0x8A70, ## __VA_ARGS__), \
+ MACRO__(0x8A71, ## __VA_ARGS__)
+
+ #define INTEL_ICL_IDS(MACRO__, ...) \
+ INTEL_ICL_PORT_F_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x8A51, ## __VA_ARGS__), \
+ MACRO__(0x8A5D, ## __VA_ARGS__)
+
+ /* EHL */
+ #define INTEL_EHL_IDS(MACRO__, ...) \
+ MACRO__(0x4541, ## __VA_ARGS__), \
+ MACRO__(0x4551, ## __VA_ARGS__), \
+ MACRO__(0x4555, ## __VA_ARGS__), \
+ MACRO__(0x4557, ## __VA_ARGS__), \
+ MACRO__(0x4570, ## __VA_ARGS__), \
+ MACRO__(0x4571, ## __VA_ARGS__)
+
+ /* JSL */
+ #define INTEL_JSL_IDS(MACRO__, ...) \
+ MACRO__(0x4E51, ## __VA_ARGS__), \
+ MACRO__(0x4E55, ## __VA_ARGS__), \
+ MACRO__(0x4E57, ## __VA_ARGS__), \
+ MACRO__(0x4E61, ## __VA_ARGS__), \
+ MACRO__(0x4E71, ## __VA_ARGS__)
+
+ /* TGL */
+ #define INTEL_TGL_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9A60, ## __VA_ARGS__), \
+ MACRO__(0x9A68, ## __VA_ARGS__), \
+ MACRO__(0x9A70, ## __VA_ARGS__)
+
+ #define INTEL_TGL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9A40, ## __VA_ARGS__), \
+ MACRO__(0x9A49, ## __VA_ARGS__), \
+ MACRO__(0x9A59, ## __VA_ARGS__), \
+ MACRO__(0x9A78, ## __VA_ARGS__), \
+ MACRO__(0x9AC0, ## __VA_ARGS__), \
+ MACRO__(0x9AC9, ## __VA_ARGS__), \
+ MACRO__(0x9AD9, ## __VA_ARGS__), \
+ MACRO__(0x9AF8, ## __VA_ARGS__)
+
+ #define INTEL_TGL_IDS(MACRO__, ...) \
+ INTEL_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+ /* RKL */
+ #define INTEL_RKL_IDS(MACRO__, ...) \
+ MACRO__(0x4C80, ## __VA_ARGS__), \
+ MACRO__(0x4C8A, ## __VA_ARGS__), \
+ MACRO__(0x4C8B, ## __VA_ARGS__), \
+ MACRO__(0x4C8C, ## __VA_ARGS__), \
+ MACRO__(0x4C90, ## __VA_ARGS__), \
+ MACRO__(0x4C9A, ## __VA_ARGS__)
+
+ /* DG1 */
+ #define INTEL_DG1_IDS(MACRO__, ...) \
+ MACRO__(0x4905, ## __VA_ARGS__), \
+ MACRO__(0x4906, ## __VA_ARGS__), \
+ MACRO__(0x4907, ## __VA_ARGS__), \
+ MACRO__(0x4908, ## __VA_ARGS__), \
+ MACRO__(0x4909, ## __VA_ARGS__)
+
+ /* ADL-S */
+ #define INTEL_ADLS_IDS(MACRO__, ...) \
+ MACRO__(0x4680, ## __VA_ARGS__), \
+ MACRO__(0x4682, ## __VA_ARGS__), \
+ MACRO__(0x4688, ## __VA_ARGS__), \
+ MACRO__(0x468A, ## __VA_ARGS__), \
+ MACRO__(0x468B, ## __VA_ARGS__), \
+ MACRO__(0x4690, ## __VA_ARGS__), \
+ MACRO__(0x4692, ## __VA_ARGS__), \
+ MACRO__(0x4693, ## __VA_ARGS__)
+
+ /* ADL-P */
+ #define INTEL_ADLP_IDS(MACRO__, ...) \
+ MACRO__(0x46A0, ## __VA_ARGS__), \
+ MACRO__(0x46A1, ## __VA_ARGS__), \
+ MACRO__(0x46A2, ## __VA_ARGS__), \
+ MACRO__(0x46A3, ## __VA_ARGS__), \
+ MACRO__(0x46A6, ## __VA_ARGS__), \
+ MACRO__(0x46A8, ## __VA_ARGS__), \
+ MACRO__(0x46AA, ## __VA_ARGS__), \
+ MACRO__(0x462A, ## __VA_ARGS__), \
+ MACRO__(0x4626, ## __VA_ARGS__), \
+ MACRO__(0x4628, ## __VA_ARGS__), \
+ MACRO__(0x46B0, ## __VA_ARGS__), \
+ MACRO__(0x46B1, ## __VA_ARGS__), \
+ MACRO__(0x46B2, ## __VA_ARGS__), \
+ MACRO__(0x46B3, ## __VA_ARGS__), \
+ MACRO__(0x46C0, ## __VA_ARGS__), \
+ MACRO__(0x46C1, ## __VA_ARGS__), \
+ MACRO__(0x46C2, ## __VA_ARGS__), \
+ MACRO__(0x46C3, ## __VA_ARGS__)
+
+ /* ADL-N */
+ #define INTEL_ADLN_IDS(MACRO__, ...) \
+ MACRO__(0x46D0, ## __VA_ARGS__), \
+ MACRO__(0x46D1, ## __VA_ARGS__), \
+ MACRO__(0x46D2, ## __VA_ARGS__), \
+ MACRO__(0x46D3, ## __VA_ARGS__), \
+ MACRO__(0x46D4, ## __VA_ARGS__)
+
+ /* RPL-S */
+ #define INTEL_RPLS_IDS(MACRO__, ...) \
+ MACRO__(0xA780, ## __VA_ARGS__), \
+ MACRO__(0xA781, ## __VA_ARGS__), \
+ MACRO__(0xA782, ## __VA_ARGS__), \
+ MACRO__(0xA783, ## __VA_ARGS__), \
+ MACRO__(0xA788, ## __VA_ARGS__), \
+ MACRO__(0xA789, ## __VA_ARGS__), \
+ MACRO__(0xA78A, ## __VA_ARGS__), \
+ MACRO__(0xA78B, ## __VA_ARGS__)
+
+ /* RPL-U */
+ #define INTEL_RPLU_IDS(MACRO__, ...) \
+ MACRO__(0xA721, ## __VA_ARGS__), \
+ MACRO__(0xA7A1, ## __VA_ARGS__), \
+ MACRO__(0xA7A9, ## __VA_ARGS__), \
+ MACRO__(0xA7AC, ## __VA_ARGS__), \
+ MACRO__(0xA7AD, ## __VA_ARGS__)
+
+ /* RPL-P */
+ #define INTEL_RPLP_IDS(MACRO__, ...) \
+ MACRO__(0xA720, ## __VA_ARGS__), \
+ MACRO__(0xA7A0, ## __VA_ARGS__), \
+ MACRO__(0xA7A8, ## __VA_ARGS__), \
+ MACRO__(0xA7AA, ## __VA_ARGS__), \
+ MACRO__(0xA7AB, ## __VA_ARGS__)
+
+ /* DG2 */
+ #define INTEL_DG2_G10_IDS(MACRO__, ...) \
+ MACRO__(0x5690, ## __VA_ARGS__), \
+ MACRO__(0x5691, ## __VA_ARGS__), \
+ MACRO__(0x5692, ## __VA_ARGS__), \
+ MACRO__(0x56A0, ## __VA_ARGS__), \
+ MACRO__(0x56A1, ## __VA_ARGS__), \
+ MACRO__(0x56A2, ## __VA_ARGS__), \
+ MACRO__(0x56BE, ## __VA_ARGS__), \
+ MACRO__(0x56BF, ## __VA_ARGS__)
+
+ #define INTEL_DG2_G11_IDS(MACRO__, ...) \
+ MACRO__(0x5693, ## __VA_ARGS__), \
+ MACRO__(0x5694, ## __VA_ARGS__), \
+ MACRO__(0x5695, ## __VA_ARGS__), \
+ MACRO__(0x56A5, ## __VA_ARGS__), \
+ MACRO__(0x56A6, ## __VA_ARGS__), \
+ MACRO__(0x56B0, ## __VA_ARGS__), \
+ MACRO__(0x56B1, ## __VA_ARGS__), \
+ MACRO__(0x56BA, ## __VA_ARGS__), \
+ MACRO__(0x56BB, ## __VA_ARGS__), \
+ MACRO__(0x56BC, ## __VA_ARGS__), \
+ MACRO__(0x56BD, ## __VA_ARGS__)
+
+ #define INTEL_DG2_G12_IDS(MACRO__, ...) \
+ MACRO__(0x5696, ## __VA_ARGS__), \
+ MACRO__(0x5697, ## __VA_ARGS__), \
+ MACRO__(0x56A3, ## __VA_ARGS__), \
+ MACRO__(0x56A4, ## __VA_ARGS__), \
+ MACRO__(0x56B2, ## __VA_ARGS__), \
+ MACRO__(0x56B3, ## __VA_ARGS__)
+
+ #define INTEL_DG2_IDS(MACRO__, ...) \
+ INTEL_DG2_G10_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G12_IDS(MACRO__, ## __VA_ARGS__)
+
+ #define INTEL_ATS_M150_IDS(MACRO__, ...) \
+ MACRO__(0x56C0, ## __VA_ARGS__), \
+ MACRO__(0x56C2, ## __VA_ARGS__)
+
+ #define INTEL_ATS_M75_IDS(MACRO__, ...) \
+ MACRO__(0x56C1, ## __VA_ARGS__)
+
+ #define INTEL_ATS_M_IDS(MACRO__, ...) \
+ INTEL_ATS_M150_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
+
+ /* ARL */
- MACRO__(0x7DD1, ## __VA_ARGS__), \
++#define INTEL_ARL_H_IDS(MACRO__, ...) \
+ MACRO__(0x7D51, ## __VA_ARGS__), \
++ MACRO__(0x7DD1, ## __VA_ARGS__)
++
++#define INTEL_ARL_U_IDS(MACRO__, ...) \
++ MACRO__(0x7D41, ## __VA_ARGS__) \
++
++#define INTEL_ARL_S_IDS(MACRO__, ...) \
+ MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0xB640, ## __VA_ARGS__)
+
++#define INTEL_ARL_IDS(MACRO__, ...) \
++ INTEL_ARL_H_IDS(MACRO__, ## __VA_ARGS__), \
++ INTEL_ARL_U_IDS(MACRO__, ## __VA_ARGS__), \
++ INTEL_ARL_S_IDS(MACRO__, ## __VA_ARGS__)
++
+ /* MTL */
+ #define INTEL_MTL_IDS(MACRO__, ...) \
+ MACRO__(0x7D40, ## __VA_ARGS__), \
+ MACRO__(0x7D45, ## __VA_ARGS__), \
+ MACRO__(0x7D55, ## __VA_ARGS__), \
+ MACRO__(0x7D60, ## __VA_ARGS__), \
+ MACRO__(0x7DD5, ## __VA_ARGS__)
+
+ /* PVC */
+ #define INTEL_PVC_IDS(MACRO__, ...) \
+ MACRO__(0x0B69, ## __VA_ARGS__), \
+ MACRO__(0x0B6E, ## __VA_ARGS__), \
+ MACRO__(0x0BD4, ## __VA_ARGS__), \
+ MACRO__(0x0BD5, ## __VA_ARGS__), \
+ MACRO__(0x0BD6, ## __VA_ARGS__), \
+ MACRO__(0x0BD7, ## __VA_ARGS__), \
+ MACRO__(0x0BD8, ## __VA_ARGS__), \
+ MACRO__(0x0BD9, ## __VA_ARGS__), \
+ MACRO__(0x0BDA, ## __VA_ARGS__), \
+ MACRO__(0x0BDB, ## __VA_ARGS__), \
+ MACRO__(0x0BE0, ## __VA_ARGS__), \
+ MACRO__(0x0BE1, ## __VA_ARGS__), \
+ MACRO__(0x0BE5, ## __VA_ARGS__)
+
+ /* LNL */
+ #define INTEL_LNL_IDS(MACRO__, ...) \
+ MACRO__(0x6420, ## __VA_ARGS__), \
+ MACRO__(0x64A0, ## __VA_ARGS__), \
+ MACRO__(0x64B0, ## __VA_ARGS__)
+
+ /* BMG */
+ #define INTEL_BMG_IDS(MACRO__, ...) \
+ MACRO__(0xE202, ## __VA_ARGS__), \
+ MACRO__(0xE20B, ## __VA_ARGS__), \
+ MACRO__(0xE20C, ## __VA_ARGS__), \
+ MACRO__(0xE20D, ## __VA_ARGS__), \
+ MACRO__(0xE212, ## __VA_ARGS__)
+
+ /* PTL */
+ #define INTEL_PTL_IDS(MACRO__, ...) \
+ MACRO__(0xB080, ## __VA_ARGS__), \
+ MACRO__(0xB081, ## __VA_ARGS__), \
+ MACRO__(0xB082, ## __VA_ARGS__), \
+ MACRO__(0xB090, ## __VA_ARGS__), \
+ MACRO__(0xB091, ## __VA_ARGS__), \
+ MACRO__(0xB092, ## __VA_ARGS__), \
+ MACRO__(0xB0A0, ## __VA_ARGS__), \
+ MACRO__(0xB0A1, ## __VA_ARGS__), \
+ MACRO__(0xB0A2, ## __VA_ARGS__)
+
+ #endif /* __PCIIDS_H__ */